content
stringlengths 5
1.05M
|
|---|
# coding:utf8
from celery import Celery
from celery.schedules import crontab
app = Celery('tasks', borker='redis://localhost:6379/0')
@app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
# Calls test('hello') every 10s
sender.add_periodic_task(10.0, test.s('hello'), name='add every 10')
# Call test('world') every 30s
sender.add_periodic_task(30.0, test.s('world'), expiress=10)
# Exectes every MOnday morning at 7:30 am
sender.add_periodic_task(
crontab(hour=7, minute=30, day_of_week=1),
test.s('Happy Monday')
)
@app.task
def test(arg):
print arg
|
""" [fabric](fabfile.org) script for deploying the app to a server
This script can be used to deploy the app.
One needs to call `deploy` with the fabric cli.
It compresses the app into a .tar.gz file, uploads and unpacks it
and sets a symlink to the latest revision.
It is also possible to use the `list_revisions` command to see which
revisions are published to the server.
"""
from fabric.api import run, env, local, put, cd
BASE_PATH = '/home/deploy/versions'
def deploy():
current_revision = local('git rev-parse HEAD', capture=True)
create_artifact(current_revision)
upload_artifact(current_revision)
make_active(current_revision)
def create_artifact(current_revision):
""" compress all files into a .tar.gz archive for uploading """
archive_path = '/tmp/{revision}.tar.gz'.format(revision=current_revision)
local('tar -czf {archive_path} --exclude=.git *'.format(archive_path=archive_path))
def upload_artifact(revision):
""" upload the archive to the server and extract it """
# we upload the file from the local /tmp to the remote /tmp dir
tmp_path = '/tmp/{revision}.tar.gz'.format(revision=revision)
put(tmp_path, tmp_path)
destination_path = '{base}/{revision}'.format(base=BASE_PATH,
revision=revision)
untar(tmp_path, destination_path)
# remove both local and remote archives
run('rm {}'.format(tmp_path))
local('rm {}'.format(tmp_path))
def untar(source_path, destination_path):
run('mkdir -p %s' % destination_path)
run('tar xfz %s -C %s' % (source_path, destination_path))
def make_active(revision):
""" change the `newest` symlink to point to this revision """
run('ln -sfn {base}/{revision}/ {base}/newest'.format(base=BASE_PATH,
revision=revision))
def list_versions():
run('ls -l {base}'.format(base=BASE_PATH))
|
class Gui:
def __init__(self):
pass
|
# Twitcaspy
# Copyright 2021 Alma-field
# See LICENSE for details.
from ..utils import fromtimestamp
from .model import Model
class Movie(Model):
"""Movie Object
Attributes
----------
id: :class:`str`
| |movie_id|
user_id: :class:`str`
| |id|
title: :class:`str`
| Live title
subtitle: :class:`str`
| Live subtitle (telop)
last_owner_comment: :class:`str` or :class:`None`
| Live streamer's latest comment text
category: :class:`str` or :class:`None`
| category id
link: :class:`str`
| Link URL to live (or recording)
is_live: :class:`bool`
| Whether live streaming now
is_recorded: :class:`bool`
| Whether the recording is public
comment_count: :class:`int`
| Total number of comments
large_thumbnail: :class:`str`
| URL of thumbnail image (large)
small_thumbnail: :class:`str`
| URL of thumbnail image (small)
country: :class:`str`
| stream area (country code)
duration: :class:`int`
| stream time (seconds)
created: :class:`datetime.datetime`
| Converted created_time to :class:`datetime.datetime` type
created_time: :class:`int`
| Unix time stamp of stream start datetime
is_collabo: :class:`bool`
| Whether it is a collaboration stream
is_protected: :class:`bool`
| Whether to need the secret word
max_view_count: :class:`int`
| Maximum number of simultaneous viewers
| (0 if streaming now.)
current_view_count: :class:`int`
| Current number of simultaneous viewers
| (0 if not streaming now.)
total_view_count: :class:`int`
| Total number of viewers
hls_url: :class:`str` or :class:`None`
| URL for HTTP Live Streaming playback
| `2019-04-17 update <https://github.com/twitcasting/PublicApiV2/blob/master/CHANGELOG.md#2019-04-17>`_
| Changed the URL of the hls_url parameter from `http` to `https` |google_translate_ja_en|
References
----------
https://apiv2-doc.twitcasting.tv/#movie-object
"""
@classmethod
def parse(cls, api, json):
movie = cls(api)
setattr(movie, '_json', json)
for k, v in json.items():
if k == 'created':
setattr(movie, k, fromtimestamp(v))
setattr(movie, f'{k}_time', v)
else:
setattr(movie, k, v)
return movie
|
import secrets
def generate(length):
char_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '!', '"', '#', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=', '>', '?', '@', '[', '\\', ']', '^', '_', '`', '{', '|', '}', '~']
password = ''
for i in range(length):
password += secrets.choice(char_list)
return password
def logger():
filename = input("Enter the file name: ")
file = open(filename, 'a')
file.write(generateOutput)
file.write('\n')
print("Done!")
file.close()
length = int(input("Enter the length of the password you wish to generate: "))
generateOutput = generate(length)
print("Generated password:\n" + generateOutput)
logging = input("Do you wish to log this password into a file?\nEnter 'yes' if you do: ").lower()
if logging == "yes":
logger()
|
from .providers import Gmail, Hotmail, Live, Provider, create_provider
VERSION = "0.0.1"
__all__ = [Gmail, Hotmail, Live, Provider, VERSION, create_provider]
|
import numpy as np
# enumerate results filenames
result_files = ('modern_net_drop_bin_scaled_results.np',
'modern_net_predicted_bin_scaled_results.np',
'modern_net_mode_bin_scaled_results.np',
'net_drop_bin_scaled_results.np',
'net_replace_bin_scaled_results.np',
'net_predicted_bin_scaled_results.np',
'net_mode_bin_scaled_results.np',
'net_facanal_bin_scaled_results.np')
# save with pickle
np.savetxt('result_filenames.np', result_files, fmt="%s")
|
import time
t1 = time.time()
t2 = time.time()
print("Iteration=%s, Time=%s" % (i, t2 - t1))
|
def handler(event, context):
print('Dummy ETL')
|
import numpy as np
class Mesh():
def __init__(self, filename):
# parse the .obj file
V, T = [], []
with open(filename) as f:
for line in f.readlines():
if line.startswith('#'): continue
values = line.split()
if not values: continue
if values[0] == 'v':
V.append([float(x) for x in values[1:4]])
elif values[0] == 'f':
T.append([int(x) for x in values[1:4]])
self.V, self.T = np.array(V), np.array(T)-1
@property
def nverts(self):
return len(self.V)
@property
def ntriangles(self):
return len(self.T)
@property
def ncorners(self):
return self.ntriangles*3;
def normal(self, t):
n = np.cross(self.V[self.T[t][1]]-self.V[self.T[t][0]], self.V[self.T[t][2]]-self.V[self.T[t][0]])
return n / np.linalg.norm(n)
def org(self, c):
return self.T[c//3][c%3]
def dst(self, c):
return self.T[c//3][(c+1)%3]
def __str__(self):
ret = ""
for v in self.V:
ret = ret + ("v %f %f %f\n" % (v[0], v[1], v[2]))
for t in self.T:
ret = ret + ("f %d %d %d\n" % (t[0]+1, t[1]+1, t[2]+1))
return ret
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more informations
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
"""
RIP (Routing Information Protocol).
"""
from scapy.packet import *
from scapy.fields import *
from scapy.layers.inet import UDP
class RIP(Packet):
name = "RIP header"
fields_desc = [
ByteEnumField("cmd", 1, {1: "req", 2: "resp", 3: "traceOn", 4: "traceOff",
5: "sun", 6: "trigReq", 7: "trigResp", 8: "trigAck",
9: "updateReq", 10: "updateResp", 11: "updateAck"}),
ByteField("version", 1),
ShortField("null", 0),
]
def guess_payload_class(self, payload):
if payload[:2] == b"\xff\xff":
return RIPAuth
else:
return Packet.guess_payload_class(self, payload)
class RIPEntry(RIP):
name = "RIP entry"
fields_desc = [
ShortEnumField("AF", 2, {2: "IP"}),
ShortField("RouteTag", 0),
IPField("addr", "0.0.0.0"),
IPField("mask", "0.0.0.0"),
IPField("nextHop", "0.0.0.0"),
IntEnumField("metric", 1, {16: "Unreach"}),
]
class RIPAuth(Packet):
name = "RIP authentication"
fields_desc = [
ShortEnumField("AF", 0xffff, {0xffff: "Auth"}),
ShortEnumField("authtype", 2, {1: "md5authdata", 2: "simple", 3: "md5"}),
ConditionalField(StrFixedLenField("password", None, 16),
lambda pkt: pkt.authtype == 2),
ConditionalField(ShortField("digestoffset", 0),
lambda pkt: pkt.authtype == 3),
ConditionalField(ByteField("keyid", 0),
lambda pkt: pkt.authtype == 3),
ConditionalField(ByteField("authdatalen", 0),
lambda pkt: pkt.authtype == 3),
ConditionalField(IntField("seqnum", 0),
lambda pkt: pkt.authtype == 3),
ConditionalField(StrFixedLenField("zeropad", None, 8),
lambda pkt: pkt.authtype == 3),
ConditionalField(StrLenField("authdata", None,
length_from=lambda pkt: pkt.md5datalen),
lambda pkt: pkt.authtype == 1)
]
def pre_dissect(self, s):
if s[2:4] == b"\x00\x01":
self.md5datalen = len(s) - 4
return s
bind_bottom_up(UDP, RIP, dport=520)
bind_bottom_up(UDP, RIP, sport=520)
bind_layers(UDP, RIP, sport=520, dport=520)
bind_layers(RIP, RIPEntry,)
bind_layers(RIPEntry, RIPEntry,)
bind_layers(RIPAuth, RIPEntry,)
|
import pytest
from spacy.language import Language
from spacy.lang.en import English
from spacy.training import Example
from thinc.api import ConfigValidationError
from pydantic import StrictBool
def test_initialize_arguments():
name = "test_initialize_arguments"
class CustomTokenizer:
def __init__(self, tokenizer):
self.tokenizer = tokenizer
self.from_initialize = None
def __call__(self, text):
return self.tokenizer(text)
def initialize(self, get_examples, nlp, custom: int):
self.from_initialize = custom
class Component:
def __init__(self):
self.from_initialize = None
def initialize(
self, get_examples, nlp, custom1: str, custom2: StrictBool = False
):
self.from_initialize = (custom1, custom2)
Language.factory(name, func=lambda nlp, name: Component())
nlp = English()
nlp.tokenizer = CustomTokenizer(nlp.tokenizer)
example = Example.from_dict(nlp("x"), {})
get_examples = lambda: [example]
nlp.add_pipe(name)
# The settings here will typically come from the [initialize] block
init_cfg = {"tokenizer": {"custom": 1}, "components": {name: {}}}
nlp.config["initialize"].update(init_cfg)
with pytest.raises(ConfigValidationError) as e:
# Empty config for component, no required custom1 argument
nlp.initialize(get_examples)
errors = e.value.errors
assert len(errors) == 1
assert errors[0]["loc"] == ("custom1",)
assert errors[0]["type"] == "value_error.missing"
init_cfg = {
"tokenizer": {"custom": 1},
"components": {name: {"custom1": "x", "custom2": 1}},
}
nlp.config["initialize"].update(init_cfg)
with pytest.raises(ConfigValidationError) as e:
# Wrong type of custom 2
nlp.initialize(get_examples)
errors = e.value.errors
assert len(errors) == 1
assert errors[0]["loc"] == ("custom2",)
assert errors[0]["type"] == "value_error.strictbool"
init_cfg = {
"tokenizer": {"custom": 1},
"components": {name: {"custom1": "x"}},
}
nlp.config["initialize"].update(init_cfg)
nlp.initialize(get_examples)
assert nlp.tokenizer.from_initialize == 1
pipe = nlp.get_pipe(name)
assert pipe.from_initialize == ("x", False)
|
# -*- coding=utf-8 -*-
"""
RestService signals module
"""
import django.dispatch
from django.conf import settings
from onadata.apps.restservice.tasks import call_service_async
ASYNC_POST_SUBMISSION_PROCESSING_ENABLED = \
getattr(settings, 'ASYNC_POST_SUBMISSION_PROCESSING_ENABLED', False)
# pylint: disable=C0103
trigger_webhook = django.dispatch.Signal(providing_args=['instance'])
def call_webhooks(sender, **kwargs): # pylint: disable=W0613
"""
Call webhooks signal.
"""
instance_id = kwargs['instance'].pk
if ASYNC_POST_SUBMISSION_PROCESSING_ENABLED:
call_service_async.apply_async(args=[instance_id], countdown=1)
else:
call_service_async(instance_id)
trigger_webhook.connect(call_webhooks, dispatch_uid='call_webhooks')
|
import multiprocessing
def worker(num):
print('Worker %s, argument: %s' % (multiprocessing.current_process().name, num))
return
if __name__ == '__main__':
processes = []
for i in range(5):
# the same as for threading, we have to pass an iterable as argument
# so if we pass a single arg, make sure to explicitly tell the interpreter
# it is a tuple and not an int enclosed with parentheses, or just use a list: [i]
p = multiprocessing.Process(target=worker, args=(i,))
processes.append(p)
p.start()
|
# we start by importing the unittest module
import unittest
# Next lets import our function that we intend to do testing on
#
# **We could also import all functions by using * or just import the module
# itself but for now lets just import the function
from challenge_7 import missing_int
# lets define our suite of tests as a class and lets inherit from unittest.TestCase
class TestBinaryMethods(unittest.TestCase):
def test_missingInt(self):
self.assertEqual(missing_int([1,3,4,0]), 2)
self.assertEqual(missing_int([1,2,3]), 0)
self.assertEqual(missing_int([0,1,2,4]), 3)
# if the python file is ran by itself run unittest
# This allows us to import the members of the file without running main if we need to
if __name__ == '__main__':
unittest.main()
|
from alpha_vantage.timeseries import TimeSeries
import yfinance as yf
from datetime import datetime
#
# Thanks, Barry!
#
# A simple robo-broker that'll tell you when interesting stuff happens
#
# He's too dumb to tell you what to buy, but he watches the market like a hawk
# and will tell you when something you own has shot up and when sentiment changes and you should probably sell
#
#
#
##
##
##
#
def askBarry(fbuySell, fticker, fmonitorDate, ftargetPrice, favKey='', fdestinationTimeZone='Australia/Sydney', fmonitorThreshold=0.05, fnoiseThreshold=.025):
# monitor a stock at a threshold
# buy: notify me when it bottoms out and starts heading up
# sell: notify me when it tops out and starts heading down
# how it works: dumb rolling min/max and a noise threshold % so it doesnt notify until the transition is > the %
# set % threshold for monitoring and a 'noise' band remove unnecessary notifications
# the monitor threshold tells us when the stock moves from our set price (default of 5%)
# the noise band waits until we're 2.5% (default) from the min/max before flagging it as the turning point
monitorThreshold = 1-(fmonitorThreshold if fbuySell == 'buy' else -fmonitorThreshold if fbuySell == 'sell' else 0)
noiseThreshold = 1+(fnoiseThreshold if fbuySell == 'buy' else -fnoiseThreshold if fbuySell == 'sell' else 0)
act = 'none'
now = datetime.now().strftime("%b %d %Y %H:%M:%S")
# the funciton returns a list of all the variables and outputs of the process so we can do other stuff with it if required
outputList = {'now': now,'ticker':fticker.upper(), 'monitorDate':fmonitorDate, 'targetPrice':ftargetPrice, 'status':"query"}
# create a generic message that we can show by default that explains what's happened
outputData = ('{3}\n{0} target set at ${2:,.2f} commencing from {1}').format(fticker.upper(),fmonitorDate,ftargetPrice,now)
# a mapping of the conformed attributes to the corresponding attributes which come from the ticker price external api (alpha vantage/yahoo finance)
dataMapping = {
"Datetime": "Datetime",
"Open": "Open",
"Low": "Low",
"High": "High",
"Close": "Close",
"Volume": "Volume"
}
# check inputs. only process if its a buy or a sell
if fbuySell == 'buy' :
whichCol = dataMapping['Low']
elif fbuySell == 'sell' :
whichCol = dataMapping['High']
else :
return ('error',outputList,'ERROR - bad action. Check config\n'+outputData)
#get the intraday data from alpha vantage. It comes back as a dataframe in format date, open, low, high, close, volume
#ts = TimeSeries(key=favKey, output_format='pandas')
#data, meta_data = ts.get_intraday(symbol=fticker,interval='1min', outputsize='full')
# get price data from yahoo
data = yf.download( # or pdr.get_data_yahoo(...
# tickers list or string as well
tickers = fticker,
# use "period" instead of start/end
# valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
# (optional, default is '1mo')
period = "5d",
# fetch data by interval (including intraday if period < 60 days)
# valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
# (optional, default is '1d')
interval = "1m",
# group by ticker (to access via data['SPY'])
# (optional, default is 'column')
group_by = 'column',
# adjust all OHLC automatically
# (optional, default is False)
auto_adjust = True,
# download pre/post regular market hours data
# (optional, default is False)
prepost = True,
# use threads for mass downloading? (True/False/Integer)
# (optional, default is True)
threads = True,
# proxy URL scheme use use when downloading?
# (optional, default is None)
proxy = None
)
# no data/bad ticker. exit
if data.empty:
print ('empty')
return ('error',outputList,'ERROR - bad ticker. Check config\n'+outputData)
# change it to be in ascending order (it comes in descending). I want to test the data in the same 'direction' as it arrives
data.sort_index(inplace=True,ascending=True)
#convert the date index into a real column; i need to do date comparisons
data.reset_index(level=0, inplace=True)
#print('my raw data')
#pprint(data)
# remove the columns we dont need
data.drop([dataMapping['Open'],dataMapping['Volume']],axis=1,inplace=True)
# alpha vantage comes in EST datetime. Convert it to the ASX timezone (sydney)
#data['date']=(data['datetime'].dt.tz_localize('US/Eastern').dt.tz_convert(fdestinationTimeZone) )
# Remove data prior to my buy date. Don't need it
data = data.loc[data[dataMapping['Datetime']]>= fmonitorDate]
# add a rolling max. I want to know how high it gets and test how far it drops from that high
if fbuySell == 'buy' :
data['rollingAgg'] = data[whichCol].cummin(axis=0)
if fbuySell == 'sell' :
data['rollingAgg'] = data[whichCol].cummax(axis=0)
# trigger notifications only after the stock reaches a certain amount compared to the input price. This prevents excessive early notifications. Eg i only care about the stock after
# its gone up x%
if fbuySell == 'buy' :
monitor = data.loc[data[whichCol] <= (ftargetPrice*monitorThreshold)].head(1)
if fbuySell == 'sell' :
monitor = data.loc[data[whichCol] >= (ftargetPrice*monitorThreshold)].head(1)
# monitor threshold reached. Add the corresponding attributes to the output list and append relevant text to the output string
if not monitor.empty :
act = 'watch'
monitorStartDate = monitor[dataMapping['Datetime']].tolist()[0]
outputList["monitorStartDate"]=monitorStartDate
outputList["monitorStartPrice"]=monitor[whichCol].tolist()[0]
outputList["status"]='monitor'
outputData = outputData + (' reached the monitor threshold of ${0:,.2f} on {1} (${2:,.2f}).\n').format(ftargetPrice*monitorThreshold,
outputList["monitorStartDate"], outputList["monitorStartPrice"])
#prune data so it only holds info after the monitor date (less filters needed for subsequent testing)
data = data[data[dataMapping['Datetime']]>= monitorStartDate]
# Check if the sentiment has changed and the stock price has changed direction. The noise band ignores minor wiggles
if fbuySell == 'buy' :
final = data.loc[data[whichCol] >= (data['rollingAgg'] * noiseThreshold)].head(1)
if fbuySell == 'sell' :
final = data.loc[data[whichCol] <= (data['rollingAgg'] * noiseThreshold)].head(1)
# price has changed direction. Set add relevant attributes to the output list and append the generic text to the output string
if not final.empty :
act = 'act'
# pull scalars out of the data frame. Need it for the notifications
actPrice = final['rollingAgg'].tolist()[0]
##print('max price')
dateMonitorReached = (data.loc[(data[whichCol] == actPrice) & (data[dataMapping['Datetime']] >= monitorStartDate)].head(1))[dataMapping['Datetime']].tolist()[0]
outputList["actPrice"] = actPrice
outputList["dateMonitorReached"] = dateMonitorReached
outputList["actTriggerDate"] = final[dataMapping['Datetime']].tolist()[0]
outputList["actTriggerPrice"] = final[whichCol].tolist()[0]
outputList["status"]=fbuySell
outputData = outputData + ('{5} of ${0:,.2f} reached on {1}.\n'
'{6} signal threshold of ${2:,.2f} reached on {3} (${4:,.2f}).\n').format(outputList["actPrice"],
outputList["dateMonitorReached"], outputList["actPrice"]*noiseThreshold, outputList["actTriggerDate"], outputList["actTriggerPrice"]
,whichCol,fbuySell)
else :
outputData = outputData + ('No signal to {0}\n').format(fbuySell)
else :
outputData = outputData + ('\nmonitor threshold ${0:,.2f} not reached\n').format(ftargetPrice*monitorThreshold)
#Add the current position and date just so we know what it is
outputList["currentPrice"] = data.tail(1)[dataMapping['Close']].tolist()[0]
outputList["currentDate"] = data.tail(1)[dataMapping['Datetime']].tolist()[0]
outputData = outputData + ('{0} is currently at ${1:,.2f} on {2}\n').format(outputList["ticker"],outputList["currentPrice"],outputList["currentDate"])
return(act,outputList,outputData)
|
import sys
import math
from collections import defaultdict, deque
sys.setrecursionlimit(10 ** 6)
stdin = sys.stdin
INF = float('inf')
ni = lambda: int(ns())
na = lambda: list(map(int, stdin.readline().split()))
ns = lambda: stdin.readline().strip()
S = input().split()
d = defaultdict(str)
d["Left"] = "<"
d["Right"] = ">"
d["AtCoder"] = "A"
ans = [d[s] for s in S]
print(*ans)
|
#The tee() function returns several independent iterators (defaults to 2) based on a single original input.
from itertools import *
r = islice(count(),5)
print(r)
i1,i2 = tee(r)
print('r:',end=" ")
for i in r:
print(i, end=' ')
if i >1:
break
print()
print('i1:', list(i1))
print('i2:', list(i2))
|
from functools import reduce, lru_cache
import numpy as np
from typing import List, Any, Callable
import scipy.integrate as integrate
from mule_local.rexi.pcirexi.gauss_cache import GaussCache
from mule_local.rexi.pcirexi.section import section
def _complex_quad(func, a, b):
real = integrate.quad((lambda a: func(a).real), a, b)[0]
imag = integrate.quad((lambda a: func(a).imag), a, b)[0]
return real + 1j * imag
class CauchyIntegrator:
target_function: Callable[[Any], Any] # exp for REXI
bs: List[complex] # numerator of terms
a_s: List[complex] # part of the denominator
terms_number: int # overall number of REXI terms
terms_section_list: int # REXI terms per section
sections: List[section.Section] # list of contour functions
def __init__(self, sections, target_function, terms_number=20, integral='trapeze', arc_length_distribution=False,
normalize_for_zero=False, g_c=GaussCache()):
self.g_c = g_c
self.target_function = target_function
self.sections = sections
self.terms_number = terms_number
self.integral = integral
self._calculate_quad_points_for_sections(arc_length_distribution)
if integral.startswith("trapeze") or integral.startswith("lobatto"):
self.edge_to_edge = True
# Temporarly increase terms. Later undone by merging values from bs and a_s
self.terms_section_list = [t + 1 for t in self.terms_section_list]
else:
self.edge_to_edge = False
self._calculateTable()
if normalize_for_zero:
self._normalize_for_zero()
@lru_cache(maxsize=32)
def get_quadrature_points_and_weights(self, g_c, integral, terms_number):
# if terms_number % self.terms_per_section != 0:
# print("#terms is not dividable by operations_per_section")
if (not integral.startswith('trapeze')) and (not integral.startswith('midpoint')):
if integral.startswith('lobatto'):
if terms_number < 2:
print("Lobatto needs at least 2 base_nodes")
raise
print("Ig: lobatto")
base_nodes, weights_original = g_c.gauss_lobatto(terms_number, 20)
elif integral.startswith('legendre'):
base_nodes, weights_original = g_c.gauss_legendre(terms_number, 20)
elif integral.startswith('chebychev'):
raise
else:
print("Unknown Interation")
raise
weights = [float(w) / 2 for w in weights_original]
base_nodes = [float(b) / 2 + 0.5 for b in base_nodes]
elif integral.startswith('trapeze'):
if terms_number == 0:
base_nodes = []
weights = []
else:
base_nodes = np.linspace(0, 1, terms_number)
if terms_number <= 2:
weights = [1 / terms_number] * terms_number
else:
devisor = (2 * (terms_number - 1))
weights = [1 / devisor] + [2 / devisor] * (terms_number - 2) + [1 / devisor]
elif integral.startswith('midpoint'):
if terms_number == 0:
base_nodes = []
weights = []
else:
base_nodes = np.linspace(1 / (2 * terms_number), 1 - 1 / (2 * terms_number),
terms_number)
weights = [1 / (terms_number)] * terms_number
else:
raise
return (base_nodes, weights)
def _calculateTable(self):
# calculates numerator (bs) and denominator addend (as)
self.bs = []
self.a_s = []
for j in range(len(self.sections)):
current_section = self.sections[j]
terms = self.terms_section_list[j]
base_nodes, weights = self.get_quadrature_points_and_weights(self.g_c, self.integral, terms)
for i in range(terms):
# prepare one REXI term
alpha_for_current_term = base_nodes[i]
contour_pos = current_section.interpolate(alpha_for_current_term)
contour_derivative = current_section.evaluateDerivative(alpha_for_current_term)
function_evaluation_at_contour_pos = self.target_function(contour_pos)
b = -1 / (2j * np.pi) * function_evaluation_at_contour_pos * contour_derivative * weights[i]
self.bs.append(b)
self.a_s.append(-contour_pos)
if self.edge_to_edge:
# Undo temporary increase of terms
current_transition = 0
for i in range(len(self.sections)):
# Merge values at equal contour position
self.bs[current_transition] += self.bs[current_transition - 1]
self.a_s[current_transition] = self.a_s[current_transition] / 2 + self.a_s[current_transition - 1] / 2
current_transition += self.terms_section_list[i]
current_unwanted = 0
for i in range(len(self.sections)):
# Pop unwanted values
current_unwanted += self.terms_section_list[i] - 1
self.bs.pop(current_unwanted)
self.a_s.pop(current_unwanted)
def _normalize_for_zero(self):
current = self.approximate_target_function(0)
actual = self.target_function(0)
factor = actual.real / current.real
self.bs = [b * factor for b in self.bs]
def approximate_target_function(self, x):
sum: complex = 0j
for s in range(0, len(self.bs)):
sum += self.bs[s] / (self.a_s[s] + x)
return sum
def approximate_target_function_using_scipy_quad(self, x):
sections_sum = 0j
for current_section in self.sections:
def cauchy_integrand(alpha):
contour_pos = current_section.interpolate(alpha)
contour_derivative = current_section.evaluateDerivative(alpha)
return self.target_function(contour_pos) * contour_derivative / (contour_pos - x)
sections_sum += _complex_quad(cauchy_integrand, 0, 1)
return sections_sum / (2j * np.pi)
def _get_section(self, a):
jump_size = len(self.sections) // 2
current_pos = jump_size
jump_size //= 2
if jump_size == 0:
jump_size = 1
while True:
if a < self.sections[current_pos].start_a:
current_pos -= jump_size
elif current_pos == len(self.sections) - 1 or a < self.sections[current_pos].end_a:
return current_pos
else:
current_pos += jump_size
jump_size //= 2
if jump_size == 0:
jump_size = 1
def calc_max_error_in_interval(self, lower_i=-8, higher_i=8, samples=1000):
values = np.linspace(lower_i * 1j, higher_i * 1j, samples)
deviations = [abs(self.target_function(a) - self.approximate_target_function(a)) for a in values]
max_deviation = max(deviations)
return max_deviation
def calc_max_error_in_intervall_via_scipy_quad(self, lower_i=-8, higher_i=8, samples=1000):
values = np.linspace(lower_i * 1j, higher_i * 1j, samples)
deviations = [abs(self.target_function(a) - self.approximate_target_function_using_scipy_quad(a)) for a in
values]
max_deviation = max(deviations)
return max_deviation
def _calculate_quad_points_for_sections(self, arc_length_distribution):
if not arc_length_distribution:
self.terms_section_list = [self.terms_number // len(self.sections)] * len(self.sections)
return
self.terms_section_list = [0] * len(self.sections)
section_lengths = [s.arc_length_start_end(0, 1) for s in self.sections]
contour_length = reduce(float.__add__, section_lengths, 0.0)
length_per_quad_point = contour_length / self.terms_number
self.terms_section_list = [int(l / length_per_quad_point) for l in section_lengths]
current_terms = reduce(int.__add__, self.terms_section_list, 0)
for _ in range(0, self.terms_number - current_terms):
max_value = -1
max_i_so_far = 0
for i in range(0, len(section_lengths)):
current_length = section_lengths[i] - self.terms_section_list[i] * length_per_quad_point
if max_value < current_length:
max_i_so_far = i
max_value = current_length
self.terms_section_list[max_i_so_far] += 1
|
"""
configurations
"""
import os
import tensorflow as tf
from prepare import prepare
from main import test, train
if not os.path.exists("data/vocab"):
os.system("mkdir data/vocab")
if not os.path.exists("data/experimental"):
os.system("mkdir data/experimental")
if not os.path.exists("train"):
os.system("mkdir train")
flags = tf.flags
flags.DEFINE_string("mode", "train", "Running mode")
# common resources
flags.DEFINE_string(
"bert_config_file", "../../LIB/bert/uncased_L-12_H-768_A-12/bert_config.json",
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", "../../LIB/bert/uncased_L-12_H-768_A-12/vocab.txt",
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"init_checkpoint", "../../LIB/bert/uncased_L-12_H-768_A-12/bert_model.ckpt",
"Initial checkpoint (usually from a pre-trained BERT model).")
# processed data
flags.DEFINE_string("train_para_file", "data/processed_bert/train/paras", "Paragraphs for training")
flags.DEFINE_string("train_question_file", "data/processed_bert/train/questions", "Questions for training")
flags.DEFINE_string("dev_para_file", "data/processed_bert/dev/paras", "Paragraphs for evalutation")
flags.DEFINE_string("dev_question_file", "data/processed_bert/dev/questions", "Questions for evalutation")
flags.DEFINE_string("test_para_file", "data/processed_bert/test/paras", "Paragraphs for testing")
flags.DEFINE_string("test_question_file", "data/processed_bert/test/questions", "Questions for testing")
# vocab files
flags.DEFINE_string("word_dictionary", "data/vocab/word_dictionary.json", "Word dictionary")
flags.DEFINE_string("pos_dictionary", "data/vocab/pos_dictionary.json", "Pos dictionary")
flags.DEFINE_string("ner_dictionary", "data/vocab/ner_dictionary.json", "Ner dictionary")
flags.DEFINE_string("label_dictionary", "data/vocab/label_dictionary.json", "Label dictionary")
flags.DEFINE_string("word_emb_file", "data/vocab/word_emb.pkl", "Bert word embedding file")
flags.DEFINE_string("pos_emb_file", "data/vocab/pos_emb.json", "Pos embedding file")
flags.DEFINE_string("ner_emb_file", "data/vocab/ner_emb.json", "Ner embedding file")
flags.DEFINE_string("label_emb_file", "data/vocab/label_emb.json", "Label embedding file")
flags.DEFINE_string("map_to_orig", "data/vocab/map_to_orig.pkl", "Word pieces to normal word mapping")
# experimental data
flags.DEFINE_string("train_record_file", "data/experimental/train.tf_record", "Out file for train data")
flags.DEFINE_string("dev_record_file", "data/experimental/dev.tf_record", "Out file for dev data")
flags.DEFINE_string("test_record_file", "data/experimental/test.tf_record", "Out file for test data")
flags.DEFINE_string("train_eval_file", "data/experimental/train_eval.json", "Out file for train eval")
flags.DEFINE_string("dev_eval_file", "data/experimental/dev_eval.json", "Out file for dev eval")
flags.DEFINE_string("test_eval_file", "data/experimental/test_eval.json", "Out file for test eval")
flags.DEFINE_string("train_meta", "data/experimental/train_meta.json", "Out file for train meta")
flags.DEFINE_string("dev_meta", "data/experimental/dev_meta.json", "Out file for dev meta")
flags.DEFINE_string("test_meta", "data/experimental/test_meta.json", "Out file for test meta")
# data parameters
flags.DEFINE_integer("para_limit", 400, "Limit length for paragraph")
flags.DEFINE_integer("ques_limit", 64, "Limit length for question")
flags.DEFINE_integer("pos_dim", 16, "Embedding dimension for Glove")
flags.DEFINE_integer("ner_dim", 16, "Embedding dimension for Glove")
flags.DEFINE_integer("label_dim", 4, "Embedding dimension for answer tag")
flags.DEFINE_integer("word_dim", 768, "Embedding dimension for Bert")
# experimental parameters
flags.DEFINE_integer("batch_size", 32, "Batch size")
flags.DEFINE_integer("test_batch_size", 32, "Batch size")
flags.DEFINE_integer("val_num_batches", 64, "Number of batches to evaluate the model")
flags.DEFINE_integer("hidden", 600, "Hidden size")
flags.DEFINE_integer("decoder_layers", 2, "The number of model decoder")
flags.DEFINE_boolean("word_trainable", False, "Train word embeddings along or not")
flags.DEFINE_integer("beam_size", 1, "Beam size")
flags.DEFINE_boolean("diverse_beam", False, "Use diverse beam search or not")
flags.DEFINE_float("diverse_rate", 0., "Dropout prob across the layers")
flags.DEFINE_boolean("sample", False, "Do multinominal sample or not")
flags.DEFINE_integer("sample_size", 1, "Sample size")
flags.DEFINE_float("temperature", 1.0, "Softmax temperature")
# training settings
flags.DEFINE_integer("num_steps", 20000, "Number of steps")
flags.DEFINE_integer("period", 100, "period to save batch loss")
flags.DEFINE_integer("checkpoint", 1000, "checkpoint to save and evaluate the model")
flags.DEFINE_float("dropout", 0.3, "Dropout prob across the layers")
flags.DEFINE_float("ml_learning_rate", 0.001, "Learning rate")
flags.DEFINE_float("rl_learning_rate", 0.00001, "Learning rate")
flags.DEFINE_float("grad_clip", 5.0, "Global Norm gradient clipping rate")
flags.DEFINE_float("mixing_ratio", 0.99, "The mixing ratio between ml loss and rl loss")
flags.DEFINE_string("rl_metric", "bleu", "The metric used for RL")
# training directory
flags.DEFINE_string("output_dir", "train/model", "Directory for tf event")
flags.DEFINE_string("best_ckpt", "train/model/best.json", "The best checkpoint of QG model")
# QPC training directory
flags.DEFINE_string("output_dir_qpc", "", "Directory for tf event")
flags.DEFINE_string("best_ckpt_qpc", "", "The best checkpoint")
# QA training directory
flags.DEFINE_string("output_dir_qa", "", "Directory for tf event")
flags.DEFINE_string("best_ckpt_qa", "", "The best checkpoint")
def main(_):
config = flags.FLAGS
if config.mode == "prepare":
prepare(config)
elif config.mode == "test":
test(config)
elif config.mode == "train":
train(config)
else:
print("Unknown mode")
exit(0)
if __name__ == "__main__":
tf.app.run()
|
# Generated by Django 2.2.6 on 2020-08-03 12:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0003_post_author'),
]
operations = [
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(blank=True, default='default.jpeg', upload_to=''),
),
]
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy
import unittest
import paddle.fluid as fluid
from paddle.fluid.dygraph.jit import dygraph_to_static_graph
def dyfunc_tensor_shape_1(x):
x = fluid.dygraph.to_variable(x)
res = fluid.layers.reshape(x, shape=x.shape)
return res
def dyfunc_tensor_shape_2(x):
x = fluid.dygraph.to_variable(x)
shape = x.shape
shape2 = shape
res = fluid.layers.reshape(x, shape2)
return res
def dyfunc_tensor_shape_3(x):
# Don't transform y.shape because y is numpy.ndarray
x = fluid.dygraph.to_variable(x)
y = numpy.ones(5)
res = fluid.layers.reshape(x, shape=y.shape)
return res
def dyfunc_tensor_shape_4(x):
x = fluid.dygraph.to_variable(x)
res = fluid.layers.reshape(x, shape=(-1, x.shape[0], len(x.shape)))
return res
def dyfunc_tensor_shape_5(x):
# `res = fluid.layers.reshape(x, shape=(-1, s))` to
# `res = fluid.layers.reshape(x, shape=(-1, fluid.layers.shape(x)[0]))`
x = fluid.dygraph.to_variable(x)
s = x.shape[0]
res = fluid.layers.reshape(x, shape=(-1, s))
return res
test_funcs = [
dyfunc_tensor_shape_1, dyfunc_tensor_shape_2, dyfunc_tensor_shape_3,
dyfunc_tensor_shape_4, dyfunc_tensor_shape_5
]
class TestTensorShape(unittest.TestCase):
def setUp(self):
self.input = numpy.ones(5).astype("int32")
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace()
def get_dygraph_output(self):
with fluid.dygraph.guard():
res = self.dygraph_func(self.input).numpy()
return res
def get_static_output(self):
main_program = fluid.Program()
with fluid.program_guard(main_program):
static_out = dygraph_to_static_graph(self.dygraph_func)(self.input)
exe = fluid.Executor(self.place)
static_res = exe.run(main_program, fetch_list=static_out)
return static_res[0]
def test_transformed_static_result(self):
for func in test_funcs:
self.dygraph_func = func
static_res = self.get_static_output()
dygraph_res = self.get_dygraph_output()
self.assertTrue(
numpy.allclose(dygraph_res, static_res),
msg='dygraph res is {}\nstatic_res is {}'.format(dygraph_res,
static_res))
if __name__ == '__main__':
unittest.main()
|
#
# Copyright (c) 2021 Facebook, Inc. and its affiliates.
#
# This file is part of NeuralDB.
# See https://github.com/facebookresearch/NeuralDB for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
def setup_logging():
h = logging.StreamHandler(None)
logging.root.addHandler(h)
logging.root.setLevel(os.environ.get("LOGLEVEL", "INFO"))
if __name__ == "__main__":
setup_logging()
logger = logging.getLogger(__name__)
logger.debug("This is a debug message")
logger.info("This is an info message")
logger.warning("This is a warning message")
logger.error("This is an error message")
logger.critical("This is a critical message")
|
# How to print multiple non-consecutive values from a list with Python
from operator import itemgetter
animals = ['bear', 'python', 'peacock',
'kangaroo', 'whale', 'dog', 'cat', 'cardinal']
print(itemgetter(0, 3, 4, 1, 5)(animals))
print(animals)
print(type(animals))
# Slicing indexes
course_name = "Python Programming"
print(len(course_name))
print(course_name[0])
print(course_name[-1])
# Note here that the slicing ends at "t" and not at "h" because the indexing starts at [0]
print(course_name[0:3])
# if you leave off the opening number, Python will automotically assume it to be [0]
print(course_name[:3])
# This returns a copy of the original string
print(course_name[:])
# ESC character
new_course = "\"Intermediate\" Programming"
print(new_course)
# How to use backslash in a str
new__new_course = "Python\\Programming"
print(new__new_course)
# Breaks on a new line
new_new_new_course = "Python\nProgramming"
print(new_new_new_course)
|
"""All unit tests for the gaslines display module."""
import pytest
from gaslines.display import Cursor, reveal
from gaslines.grid import Grid
from tests.utility import draw_path
# Note: these automated tests merely verify that the functions under test behave
# consistently. Given the functions' visual nature, verification of correct behavior
# initially required manual testing.
UNSOLVED_GRID_STRING = """\
3 · ·
\n\
· 2 ·
\n\
* · ·\
"""
SOLVED_GRID_STRING = """\
3---·---·
|
·---2 ·
| |
*---·---·\
"""
@pytest.mark.parametrize("number_of_rows", (1, 2, 11, 101))
def test_move_up_with_positive_number_prints_correct_code(capsys, number_of_rows):
"""Verifies that `move_up` prints the expected terminal control code."""
Cursor.move_up(number_of_rows)
assert capsys.readouterr().out == f"\x1b[{number_of_rows}A"
@pytest.mark.parametrize("number_of_rows", (0, -1, -2, -11, -101))
def test_move_up_with_non_positive_number_does_nothing(capsys, number_of_rows):
"""Verifies that `move_up` with a non-positive number prints nothing."""
Cursor.move_up(number_of_rows)
assert capsys.readouterr().out == ""
def test_clear_below_prints_correct_code(capsys):
"""Verifies that `clear_below` prints the expected terminal control code."""
Cursor.clear_below()
assert capsys.readouterr().out == "\x1b[J"
def test_reveal_with_incomplete_puzzle_prints_and_backtracks(capsys):
"""Verifies that `reveal` on an incomplete puzzle prints as expected."""
grid = Grid(((3, -1, -1), (-1, 2, -1), (0, -1, -1)))
# Test reveal with no delay
reveal(grid, delay=0)
assert capsys.readouterr().out == f"\x1b[J{UNSOLVED_GRID_STRING}\n\x1b[5A"
def test_reveal_with_complete_puzzle_prints_but_does_not_backtrack(capsys):
"""Verifies that `reveal` on a complete puzzle prints as expected."""
grid = Grid(((3, -1, -1), (-1, 2, -1), (0, -1, -1)))
# Set path from "3"
draw_path(grid, ((0, 0), (0, 1), (0, 2), (1, 2), (2, 2), (2, 1), (2, 0)))
# Set path from "2"
draw_path(grid, ((1, 1), (1, 0), (2, 0)))
# Test reveal with no delay
reveal(grid, delay=0)
assert capsys.readouterr().out == f"\x1b[J{SOLVED_GRID_STRING}\n"
|
from datetime import datetime
from flask import (
render_template,
flash,
redirect,
url_for,
request,
g,
jsonify,
current_app,
)
from flask_login import current_user, login_required
from app import db
from app.main.forms import EditProfileForm, CreateResourceForm
from app.models import User, Resource
from app.main import bp
@bp.before_app_request
def before_request():
if current_user.is_authenticated:
current_user.last_seen = datetime.utcnow()
db.session.commit()
@bp.route("/", methods=["GET", "POST"])
@bp.route("/index", methods=["GET", "POST"])
@login_required
def index():
return render_template("index.html")
@bp.route("/explore")
@login_required
def explore():
page = request.args.get("page", 1, type=int)
posts = Post.query.order_by(Post.timestamp.desc()).paginate(
page, current_app.config["POSTS_PER_PAGE"], False
)
next_url = url_for("main.explore", page=posts.next_num) if posts.has_next else None
prev_url = url_for("main.explore", page=posts.prev_num) if posts.has_prev else None
return render_template(
"index.html",
title="Explore",
posts=posts.items,
next_url=next_url,
prev_url=prev_url,
)
@bp.route("/user/<username>")
@login_required
def user(username):
user = User.query.filter_by(username=username).first_or_404()
page = request.args.get("page", 1, type=int)
return render_template("user.html", user=user)
@bp.route("/edit_profile", methods=["GET", "POST"])
@login_required
def edit_profile():
form = EditProfileForm(current_user.username)
if form.validate_on_submit():
current_user.username = form.username.data
current_user.about_me = form.about_me.data
db.session.commit()
flash("Your changes have been saved.")
return redirect(url_for("main.edit_profile"))
elif request.method == "GET":
form.username.data = current_user.username
form.about_me.data = current_user.about_me
return render_template("edit_profile.html", title="Edit Profile", form=form)
@bp.route("/resources", methods=["GET", "POST"])
@login_required
def list_resources():
"""
List all resources for a user
"""
resources = current_user.resources
return render_template(
"resources.html", resources=resources, title="Resources", user=current_user
)
@bp.route("/resources/add", methods=["GET", "POST"])
@login_required
def create_resource():
form = CreateResourceForm()
if form.validate_on_submit():
print(form)
print(form.name.data)
print(form.description.data)
resource = Resource(
name=form.name.data,
description=form.description.data,
user_id=current_user.id,
)
try:
db.session.add(resource)
db.session.commit()
flash("Resource added.")
except:
flash("error adding resource. :(")
return redirect(url_for("main.list_resources"))
elif request.method == "GET":
# load editing data into form
pass
return render_template(
"create_resource.html", title="Add a bookable resource", form=form
)
@bp.route("/resources/edit/<int:id>", methods=["GET", "POST"])
@login_required
def edit_resource(id):
"""
Edit a resource
"""
# check_admin()
# add_department = False
resource = Resource.query.get_or_404(id)
# form = CreateResourceForm(obj=resource)
form = CreateResourceForm()
print(form)
if form.validate_on_submit():
resource.name = form.name.data
resource.description = form.description.data
db.session.commit()
flash("You have successfully edited the resource.")
# redirect to the resource page
return redirect(url_for("main.list_resources"))
form.description.data = resource.description
form.name.data = resource.name
# return render_template('create_resource.html', resource=resource, title='Resources', user=current_user)
return render_template(
"create_resource.html", title="Edit a bookable resource", form=form
)
@bp.route("/resources/delete/<int:id>", methods=["GET", "POST"])
@login_required
def delete_resource(id):
"""
Delete a resource from the database
"""
# check_admin()
try:
resource = Resource.query.get_or_404(id)
db.session.delete(resource)
db.session.commit()
flash("You have successfully deleted the resource.")
except:
flash("error deleting resource. :(")
# redirect to the resources page
return redirect(url_for("main.list_resources"))
@bp.route("/follow/<username>")
@login_required
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash("User {} not found.".format(username))
return redirect(url_for("index"))
if user == current_user:
flash("You cannot follow yourself!")
return redirect(url_for("main.user", username=username))
current_user.follow(user)
db.session.commit()
flash("You are following {}!".format(username))
return redirect(url_for("main.user", username=username))
@bp.route("/unfollow/<username>")
@login_required
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash("User {} not found.".format(username))
return redirect(url_for("main.index"))
if user == current_user:
flash("You cannot unfollow yourself!")
return redirect(url_for("main.user", username=username))
current_user.unfollow(user)
db.session.commit()
flash("You are not following {}.".format(username))
return redirect(url_for("main.user", username=username))
|
# coding=utf-8
import time
from selenium import webdriver
# driver = webdriver.Chrome() #打开浏览器
from common.pub import readconfig
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver import ActionChains
from common.pub.selenium_rewrite import isElementExist
from common.pub.readconfig import ReadConfig
Prodir = readconfig.proDir
class Login():
def __init__(self, driver):
localreadconfig = ReadConfig()
self.name = localreadconfig.get_login('username')
self.passwd = localreadconfig.get_login('password')
self.url = localreadconfig.get_login('url')
self.driver = driver
def login_chrome(self):
driver = self.driver
driver.implicitly_wait(10)
driver.maximize_window()
driver.get(self.url) # 进入url
user_name = driver.find_element_by_name('username')
password = driver.find_element_by_name('password')
login_but = driver.find_element_by_tag_name('button')
time.sleep(1)
user_name.send_keys(self.name) # 输入账号
password.send_keys(self.passwd) # 密码
try:
move_block = driver.find_element_by_class_name('verify-move-block') # 验证码为滑动模块
print("验证为滑动模块")
while True:
action = ActionChains(driver)
action.click_and_hold(move_block)
action.move_by_offset(300, 0)
action.release()
action.perform()
login_but.click()
time.sleep(2)
flag = isElementExist(driver.find_element_by_class_name, 'location')
if flag:
break
except NoSuchElementException as e:
code = driver.find_element_by_name('code') # 验证码
print("验证为验证码输入")
code.send_keys("_unlock") # 输入万能验证码_unlock
login_but.click()
for i in range(10): # 最多等待20s
time.sleep(2)
flag = isElementExist(driver.find_element_by_class_name, 'location')
if flag:
break
if i == 9:
print("等待20s还未正常进入主界面")
|
# -*- test-case-name: mimic.test.test_nova -*-
"""
Canned responses for nova's GET limits API
"""
from __future__ import absolute_import, division, unicode_literals
def get_limit():
"""
Canned response for limits for servers. Returns only the absolute limits
"""
return {"limits":
{"absolute": {"maxServerMeta": 40,
"maxPersonality": 5,
"totalPrivateNetworksUsed": 0,
"maxImageMeta": 40,
"maxPersonalitySize": 1000,
"maxSecurityGroupRules": -1,
"maxTotalKeypairs": 100,
"totalCoresUsed": 5,
"totalRAMUsed": 2560,
"totalInstancesUsed": 5,
"maxSecurityGroups": -1,
"totalFloatingIpsUsed": 0,
"maxTotalCores": -1,
"totalSecurityGroupsUsed": 0,
"maxTotalPrivateNetworks": 3,
"maxTotalFloatingIps": -1,
"maxTotalInstances": 200,
"maxTotalRAMSize": 256000}}}
def get_version_v2(uri):
"""
Canned response nova v2 version.
Cf: http://developer.openstack.org/api-ref-compute-v2.1.html
#listVersionsv2.1
"""
return {"version":
{"status": "SUPPORTED",
"updated": "2011-01-21T11:33:21Z",
"links": [{"href": uri,
"rel": "self"},
{"href": "http://docs.openstack.org/",
"type": "text/html",
"rel": "describedby"}],
"min_version": "",
"version": "",
"media-types":
[{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=2"
}],
}
}
|
SQL_LITE_DB = 'sqlite:///db.sqlite3'
|
from config import CITIES_COLLECTION_NAME
from .base import CommandBase
class CommandCity(CommandBase):
async def __call__(self, payload):
self.sdk.log("/city handler fired with payload {}".format(payload))
if not payload['params']:
message = "Enter city ID\n\nExample: /city 498817"
else:
city_id = int(payload['params'])
registered_city = self.sdk.db.find_one(CITIES_COLLECTION_NAME, {'chat': payload['chat']})
if not registered_city:
self.sdk.db.insert(CITIES_COLLECTION_NAME, {'chat': payload['chat'], 'city_id': city_id})
else:
self.sdk.db.update(CITIES_COLLECTION_NAME,
{'chat': payload['chat']},
{'chat': payload['chat'], 'city_id': city_id}
)
message = "You choose city with ID = {}".format(city_id)
await self.sdk.send_text_to_chat(
payload["chat"],
message
)
|
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2012, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# License: MIT. See COPYING.MIT file in the milk distribution
from __future__ import division
from collections import defaultdict
from milk.utils import get_nprandom
import numpy as np
from .base import supervised_model
__all__ = [
'kNN',
'knn_learner',
'approximate_knn_learner',
]
def _plurality(xs):
from collections import defaultdict
counts = defaultdict(int)
for x in xs: counts[x] += 1
best,_ = max(iter(counts.items()), key=(lambda k_v: k_v[1]))
return best
class kNN(object):
'''
k-Nearest Neighbour Classifier
Naive implementation of a k-nearest neighbour classifier.
C = kNN(k)
Attributes:
-----------
k : integer
number of neighbours to consider
'''
def __init__(self, k=1):
self.k = k
def train(self, features, labels, normalisedlabels=False, copy_features=False):
features = np.asanyarray(features)
labels = np.asanyarray(labels)
if copy_features:
features = features.copy()
labels = labels.copy()
features2 = np.sum(features**2, axis=1)
return kNN_model(self.k, features, features2, labels)
knn_learner = kNN
class kNN_model(supervised_model):
def __init__(self, k, features, features2, labels):
self.k = k
self.features = features
self.f2 = features2
self.labels = labels
def apply(self, features):
features = np.asanyarray(features)
diff2 = np.dot(self.features, (-2.)*features)
diff2 += self.f2
neighbours = diff2.argsort()[:self.k]
labels = self.labels[neighbours]
return _plurality(labels)
class approximate_knn_model(supervised_model):
def __init__(self, k, X, projected):
self.k = k
self.X = X
self.projected = projected
self.p2 = np.array([np.dot(p,p) for p in projected])
def apply(self, t):
tx = np.dot(self.X.T, t)
d = np.dot(self.projected,tx)
d *= -2
d += self.p2
if self.k == 1:
return np.array([d.argmin()])
d = d.argsort()
return d[:self.k]
class approximate_knn_classification_model(supervised_model):
def __init__(self, k, X, projected, labels):
self.base = approximate_knn_model(k, X, projected)
self.labels = labels
def apply(self, f):
idxs = self.base.apply(f)
return _plurality(self.labels[idxs])
class approximate_knn_learner(object):
'''
approximate_knn_learner
Learns a k-nearest neighbour classifier, where the proximity is approximate
as it is computed on a small dimensional subspace (random subspace
projection). For many datasets, this is acceptable.
'''
def __init__(self, k, ndims=8):
self.k = k
self.ndims = ndims
def train(self, features, labels, **kwargs):
labels = np.asanyarray(labels)
R = get_nprandom(kwargs.get('R'))
_, n_features = features.shape
X = R.random_sample((n_features, self.ndims))
projected = np.dot(features, X)
return approximate_knn_classification_model(self.k, X, projected, labels.copy())
|
# This file is part of DagAmendment, the reference implementation of:
#
# Michel, Élie and Boubekeur, Tamy (2021).
# DAG Amendment for Inverse Control of Parametric Shapes
# ACM Transactions on Graphics (Proc. SIGGRAPH 2021), 173:1-173:14.
#
# Copyright (c) 2020-2021 -- Télécom Paris (Élie Michel <elie.michel@telecom-paris.fr>)
#
# The MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# The Software is provided “as is”, without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and non-infringement. In no event shall the
# authors or copyright holders be liable for any claim, damages or other
# liability, whether in an action of contract, tort or otherwise, arising
# from, out of or in connection with the software or the use or other dealings
# in the Software.
# no bpy here
from .profiling import Timer
class ParametricShape:
"""Wraps the Blender scene to provide an interface whose names
match better the terms of the paper and that may be used to
more easily port this to other software.
Use ParametricShape.from_scene(scene) to get the parametric shape
of a given scene."""
__init_guard = object() # to prevent one from using __init__
@classmethod
def from_scene(cls, scene):
"""Get the parametric shape from the current Blender scene"""
import bpy
scene.diffparam.ensure_view_layer_depsgraph(bpy.context)
view_layer = scene.diffparam.view_layer
depsgraph = view_layer.depsgraph
shape = ParametricShape(ParametricShape.__init_guard)
shape.hyperparams = scene.diffparam_parameters
shape._scene = scene
shape._depsgraph = depsgraph
shape._view_layer = view_layer
return shape
def __init__(self, init_guard):
if init_guard != ParametricShape.__init_guard:
raise Exception("Don't create ParametricShape instances manually, " +
"use ParametricShape.from_scene() instead")
self.hyperparams = None
self._scene = None
self._depsgraph = None
self._view_layer = None
def set_hyperparams(self, values):
assert(len(values) == len(self.hyperparams))
for hparam, val in zip(self.hyperparams, values):
hparam.update(set=val)
def update(self):
"""
Update the currently evaluated static geometry from the hyper parameters
"""
timer = Timer()
self._depsgraph.update()
self._scene.profiling["ParametricShape:update"].add_sample(timer)
def cast_ray(self, ray, make_coparam=None):
"""
Cast a ray onto the currently evaluated geometry (call update()
to reevaluate)
@param ray: Ray to intersect with the shape
@param make_coparam: Optional callback returning a coparam from a hit point
@return (hit position, hit coparam)
"""
self._view_layer.update()
assert(self._scene == self._view_layer.id_data)
assert(self._depsgraph == self._view_layer.depsgraph)
hit = self._scene.ray_cast(self._depsgraph, ray.origin, ray.direction)
success, location, normal, poly_index, obj, matrix = hit
if not success:
return None
if make_coparam is not None:
coparam = make_coparam(location, normal, poly_index, obj.evaluated_get(self._depsgraph), matrix)
else:
coparam = None
return location, coparam
|
"""
Parser which works with tokens
"""
import sys
from lexer import Lexer, TokenType
class Parser:
def __init__(self, tokens):
self.tokens = tokens
self.pos = 0
def match(self, expectedTypes):
if self.pos < len(self.tokens):
token = self.tokens[self.pos]
if token.type in expectedTypes:
self.pos += 1
return True
return False
def expr(self):
e1 = self.slag()
while self.match([TokenType.ADD, TokenType.SUB]):
op = self.tokens[self.pos-1]
e2 = self.slag()
e1 = BinOpNode(op.text, e1, e2)
return e1
def logic_expr(self):
e1 = self.expr()
while self.match([TokenType.MORE, TokenType.LESS, TokenType.EMORE, TokenType.ELESS]):
op = self.tokens[self.pos-1]
e2 = self.expr()
e1 = BinOpNode(op.text, e1, e2)
return e1
def statement(self):
list = []
while self.match([TokenType.PRINT, TokenType.ID, TokenType.IF]):
statement = self.tokens[self.pos-1]
if statement.type == TokenType.PRINT:
expr = self.expr()
node = StatementNode(statement, expr)
elif statement.type == TokenType.ID:
self.require([TokenType.ASSIGN])
expr = self.expr()
node = StatementNode(statement, expr)
elif statement.type == TokenType.IF:
expr = self.logic_expr()
self.require([TokenType.THEN])
node = StatementNode(statement, expr)
node.s_then = self.statement()
if self.match([TokenType.ELSE]):
node.s_else = self.statement()
self.require([TokenType.END])
list.append(node)
self.require([TokenType.SEMICOLON])
return list
def mnog(self):
if self.match([TokenType.LPAR]):
e = self.expr()
self.require([TokenType.RPAR])
return e
else:
e = self.require([TokenType.NUMBER, TokenType.ID])
if e.type == TokenType.ID:
e = VarNode(e.text)
else:
e = NumberNode(e.text)
return e
def slag(self):
e1 = self.mnog()
while self.match([TokenType.MUL, TokenType.DIV]):
op = self.tokens[self.pos-1]
e2 = self.mnog()
e1 = BinOpNode(op.text, e1, e2)
return e1
def require(self, expecteds):
if not self.match(expecteds):
if not isinstance(expecteds[0].value, str):
value = expecteds[0].value[0]
else:
value = expecteds[0].value
self.error("Ожидалось {}".format(value))
return self.tokens[self.pos-1]
def error(self, msg):
print(self.tokens[self.pos-1].text)
raise Exception(
"{} в строке {} на позиции {}"
.format(
msg, self.tokens[self.pos-1].line+1,
len(self.tokens[self.pos-1].text)+self.tokens[self.pos-1].pos_in_line-1
)
)
def runtime_error(self, msg):
raise Exception(msg)
# элементы AST
class NumberNode:
#number;
def __init__(self, number):
self.number = number
class VarNode:
#id
def __init__(self, var):
self.var = var
class BinOpNode:
#op
#left
#right
def __init__(self, op, left, right):
self.op = op
self.left = left
self.right = right
class StatementNode:
#statement;
def __init__(self, statement, ast):
self.statement = statement
self.expr = ast
self.s_then = []
self.s_else = []
def append(self, statement):
self.list.append(statement)
|
"""
Copyright (c) 2020 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
from utils import RepeatTimer, transform_time_string
from .monitor_logger import logger
class Monitor:
"""
This class is used for monitoring mutiple metric.
"""
def __init__(self):
self._tasks = dict()
def apply(self, instance, *args, **kwargs):
if instance in self._tasks:
return False
logger.info('add [{task}] in Monitor task......'.format(task=getattr(instance, 'metric_name')))
interval = getattr(instance, 'forecast_interval')
try:
interval = transform_time_string(interval, mode='to_second')
except ValueError as e:
logger.error(e, exc_info=True)
return
timer = RepeatTimer(interval, instance.run, *args, **kwargs)
self._tasks[instance] = timer
return True
def start(self):
for instance, timer in self._tasks.items():
timer.start()
logger.info('begin to monitor [{task}]'.format(task=getattr(instance, 'metric_name')))
|
import dnest4.classic as dn4
from pylab import *
plt.rcParams["font.family"] = "serif"
plt.rcParams["font.size"] = 16
plt.rc("text", usetex=True)
data = loadtxt('galaxies.txt')
posterior_sample = atleast_2d(dn4.my_loadtxt('posterior_sample.txt'))
x = linspace(0., 50.0, 10001)
def mixture(x, params):
N = int(params[7])
centers = params[8:108][0:N]
widths = exp(params[108:208][0:N])
weights = exp(params[208:308][0:N])
weights /= weights.sum()
y = zeros(x.shape)
for i in range(0, N):
# Don't plot flukey narrow things (which ought to eventually average
# out, but won't in a finite sample)
if widths[i] >= 0.02:
y += weights[i]/widths[i]/sqrt(2.*pi)*exp(-0.5*(x - centers[i])**2/widths[i]**2)
return y
clf()
hist(data, 100, alpha=0.2, color="k", density=True)
y_tot = zeros(len(x))
for i in range(0, posterior_sample.shape[0]):
y = mixture(x, posterior_sample[i, :])
y_tot += y
plot(x, y_tot/posterior_sample.shape[0], 'g', linewidth=2)
xlabel("Velocity (1000 km/s)")
ylabel("Density")
savefig("galaxies.pdf", bbox_inches="tight")
show()
width = 0.3
bins = arange(0, 101) - 0.5*width
hist(posterior_sample[:,7], bins, width=width, density=True, color="k", alpha=0.2)
xlim([0, 100.5])
ylim([0, 0.05])
xlabel("Number of gaussians, $N$")
ylabel("Posterior Probability")
savefig("galaxies_N.pdf", bbox_inches="tight")
show()
|
from itertools import chain
import argspec
S_INVOKE = '(Error invoking event %s on %r with args %r: %r)'
# events: dict of (event_name:handler_levels)
# handler_levels: 3-tuple of sets of functions
class Events(object):
EVENT_LEVELS = BLOCK, CONSUME, NOTIFY = range(3)
def __init__(self, default = NOTIFY):
self.default = default
self.events = {}
@property
def default(self):
return self._default[0] if hasattr(self, "_default") else self.NOTIFY
@default.setter
def default(self, value):
if not hasattr(self, "_default"):
self._default = [self.NOTIFY]
if value in self.EVENT_LEVELS:
self._default[0] = value
def _subscribe(self, func, name, level):
argspec.set(func)
self.events[name][level].add(func)
# def subscribe(self, func, name = None, level = None):
def subscribe(self, *args):
args = list(args)
func = args.pop(0) if len(args) and hasattr(args[0], "__call__") else None
cname = args.pop(0) if len(args) else None
level = args.pop(0) if len(args) and args[0] in self.EVENT_LEVELS else self.default
def sub(func):
name = (cname or func.__name__).lower()
if not self.events.has_key(name):
self.events.setdefault(name, (set(), set(), set()))
self._subscribe(func, name, level)
return func
return sub(func) if func else sub
def _unsubscribe(self, func, name, level):
self.events[name][level].discard(func)
if not any(self.events[name]):
self.events.pop(name)
def unsubscribe(self, func, name = None, level = None):
name = name.lower()
if level not in self.EVENT_LEVELS:
level = self.default
if self.events.has_key(name):
self._unsubscribe(func, name, level)
def invoke(self, name, *args, **kwargs):
strict = bool(kwargs.get('strict', False))
if not self.events.has_key(name):
return None
for level in self.EVENT_LEVELS:
for func in self.events[name][level]:
if not argspec.iscompat(func, len(args)):
if strict:
raise argspec.ArgCountError(func, len(args))
print S_INVOKE % (name, func, args, 'Invalid number of args')
continue
try:
result = func(*args)
except Exception as e:
if strict:
raise
print S_INVOKE % (name, func, args, e)
else:
if level < self.NOTIFY and result is not None:
return result
result = None
return None
def recorder(self):
class Recorder(Events):
def __init__(self, existing):
# all properties are objects, so when they are copied
# only references are made; so changes to one apply to all
self.events = existing.events
self._default = existing._default
self.recorded = set()
def _subscribe(self, func, name, level):
Events._subscribe(self, func, name, level)
self.recorded.add((func, name, level))
def _unsubscribe(self, func, name, level):
Events._unsubscribe(self, func, name, level)
self.recorded.discard((func, name, level))
def unsubscribe_all(self):
for args in self.recorded.copy():
self._unsubscribe(*args)
return Recorder(self)
|
'''
'''
from observer import Event
class StateChangeEvent(Event):
'''
'''
def __init__(self, emitter, old_state, new_state):
super().__init__(emitter)
self.old_state = old_state
self.new_state = new_state
|
import torch
from sklearn.metrics import r2_score
def my_metric(output, target):
with torch.no_grad():
#pred = torch.argmax(output, dim=1)
#assert pred.shape[0] == len(target)
#correct = 0
#correct += torch.sum(output == target).item()
output = output.cpu()
target = target.cpu()
output = output.detach().numpy()
target = target.detach().numpy()
R2 = r2_score(output, target)
return R2#correct / len(target)
'''
def my_metric2(output, target, k=3):
with torch.no_grad():
#pred = torch.topk(output, k, dim=1)[1]
#assert pred.shape[0] == len(target)
correct = 0
#for i in range(k):
correct += torch.sum(output == target).item()
return correct / len(target)
'''
|
#! /usr/bin/env python3
from setuptools import setup
import sys
if sys.version_info[0] < 3:
raise Exception("Sorry, you must use Python 3")
import pathlib
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text(encoding='utf-8')
setup(
name='charityvillage_jobs',
description="""This is a Scrapy project to scrape jobs from [charityvillage.com](https://charityvillage.com).""",
url='https://github.com/tristanlatr/charityvillage_jobs',
maintainer='tristanlatr',
version='1',
packages=['charityvillage_jobs','charityvillage_jobs.spiders'],
install_requires=[ 'scrapy', 'bs4', 'scrapy-selenium' ],
classifiers=[ "Programming Language :: Python :: 3", ],
license='The MIT License',
long_description=README,
long_description_content_type="text/markdown"
)
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
tree.py
---------------------
Date : November 2013
Copyright : (C) 2013-2016 Boundless, http://boundlessgeo.com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'November 2013'
__copyright__ = '(C) 2013-2016 Boundless, http://boundlessgeo.com'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
class Tree(object):
'''An object representing a tree path for a given commit'''
ROOT = None
def __init__(self, repo, ref, path=ROOT, size=None):
self.repo = repo
self.ref = ref
self.path = path
self.size = size
@property
def trees(self):
return self.repo._trees(self.ref, self.path)
@property
def features(self):
return self.repo.features(self.ref, self.path)
@property
def featuretype(self):
return self.repo.featuretype(self.ref, self.path)
@property
def children(self):
return self.repo.children(self.ref, self.path)
@property
def count(self):
return self.repo.count(self.ref, self.path)
def exportshp(self, shapefile):
'''exports this tree to the specified shapefile'''
self.repo.exportshp(self.ref, self.path, shapefile)
def __str__(self):
return self.ref + ":" + self.path
|
import halfedge_mesh
from halfedge_mesh.halfedge_mesh import distance
mesh = halfedge_mesh.HalfedgeMesh("cube.off")
#def dijkstra(mesh, s):
# création d'une liste distance
#d = [ None for v in mesh.vertices ]
# le premier point est à zéro
#d[s.index] = 0
# les précédesseurs ne sont pas encore connus
#pred = [ None for v in mesh.vertices]
#openVerts = [ [d.index, d[s.index]] ]
#while len(openVerts) != 0:
# prendre le point v le plus proche dans openVerts
# TODO
# on parcours ses voisins
# si ce voisin n'est pas fermé
# alors on calcule la distance à la source
# pour ce voisin en passant par le point v
# on met à jour d, pred, et on ajoute à openVerts ce
# nouvel élément
# TODO
# v: sommet
v = mesh.vertices[0]
h = v.halfedge
first = True
print("on part du point " + str(v.index))
# parcours tous les voisins de v
while first or h != v.halfedge:
first = False
v2 = h.opposite.vertex
print(v2.index)
h = h.next_around_vertex()
|
import arrow
import cassiopeia
from cassiopeia import Region, Platform
import pytest
from merakicommons.container import SearchableList
from datapipelines.common import NotFoundError
from .constants import CHAMP_NAME, SUMMONER_NAME, UNKNOWN_SUMMONER_NAME
def test_masteries_correct_type():
summoner = cassiopeia.get_summoner(name=SUMMONER_NAME, region="NA")
champ_masteries = cassiopeia.get_champion_masteries(
summoner=summoner.id, region="NA"
)
assert isinstance(champ_masteries, SearchableList)
assert all(isinstance(cm, cassiopeia.ChampionMastery) for cm in champ_masteries)
def test_masteries_contains_all_champions():
champions = cassiopeia.get_champions(region="NA")
summoner = cassiopeia.get_summoner(name=SUMMONER_NAME, region="NA")
champ_masteries = cassiopeia.get_champion_masteries(
summoner=summoner.id, region="NA"
)
for cm in champ_masteries:
assert cm.champion in champions
for champion in champions:
assert champion in champ_masteries
def test_mastery_return():
summoner = cassiopeia.get_summoner(name=SUMMONER_NAME, region="NA")
champion = cassiopeia.get_champion(CHAMP_NAME, region="NA")
champion_mastery = cassiopeia.get_champion_mastery(
summoner=summoner.id, champion=champion, region="NA"
)
assert isinstance(champion_mastery, cassiopeia.ChampionMastery)
assert isinstance(champion_mastery.summoner, cassiopeia.Summoner)
assert isinstance(champion_mastery.champion, cassiopeia.Champion)
assert champion_mastery.summoner == summoner
assert champion_mastery.champion == champion
assert isinstance(champion_mastery.platform, Platform)
assert isinstance(champion_mastery.region, Region)
assert isinstance(champion_mastery.chest_granted, bool)
assert isinstance(champion_mastery.last_played, arrow.Arrow)
assert isinstance(champion_mastery.level, int) and champion_mastery.level <= 7
assert isinstance(champion_mastery.points, int)
assert isinstance(champion_mastery.points_since_last_level, int)
assert isinstance(champion_mastery.points_until_next_level, int)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-12-28 09:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parkstay', '0034_auto_20161222_1642'),
]
operations = [
migrations.AlterField(
model_name='campsite',
name='tent',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='campsiteclass',
name='tent',
field=models.BooleanField(default=True),
),
]
|
import csv
import requests
import lxml
from bs4 import BeautifulSoup
def get_page(url):
response = requests.get(url)
if not response.ok:
print('Server responded:', response.status_code)
else:
soup = BeautifulSoup(response.text, 'lxml')
return soup
def get_detail_data(soup):
# title
# price
# items sold
try:
soup.find('span', class_='g-hdn').extract()
title = soup.find('h1', id='itemTitle').text.strip()
except:
title = ''
try:
try:
p = soup.find('span', id='prcIsum').text.strip().replace('/ea', '')
except:
p = soup.find('span', id='mm-saleDscPrc').text.strip()
currency, price = p.split(' ')
except:
currency = ''
price = ''
try:
try:
sold = soup.find('span', class_='vi-qtyS-hot-red').find('a').text.strip().split(' ')[0]
except:
sold = soup.find('span', class_='vi-qtyS-hot').find('a').text.strip().split(' ')[0]
except:
sold = ''
data = {
'title': title,
'price': price,
'currency': currency,
'total sold': sold
}
return data
def get_index_data(soup):
try:
links = soup.find_all('a', class_='s-item__link')
except:
links = []
urls = [item.get('href') for item in links]
return urls
def write_csv(data, url):
with open('output.csv', 'a', newline='') as csvfile:
writer = csv.writer(csvfile)
row = [data['title'], data['price'], data['currency'], data['total sold'], url]
writer.writerow(row)
def main():
url = 'https://www.ebay.com/sch/i.html?_nkw=mens+watches&rt=nc&LH_BIN=1&_pgn=1'
products = get_index_data(get_page(url))
for link in products:
data = get_detail_data(get_page(link))
write_csv(data, link)
if __name__ == '__main__':
main()
|
#One player game of Tic-Tac-Toe where each player plays
#against an AI. Uses a console text interface.
import random #Used to generate random variables
gameBoard = None #Variable to hold the grid representing the game board
playerVariable = None #Variable that represents the player
computerVariable = None #Variable that represents the AI
#Prints the game board to the console
def printBoard():
rowString = ["R","O","W"]
rowCount = 0
print("")
print(" C O L")
print(" 1 2 3")
print(" - - - - - - -")
for row in gameBoard:
print(rowString[rowCount]+" "+str((rowCount+1)), end=" | ")
rowCount += 1
for column in row:
print(column, end=" | ")
print("\n - - - - - - -")
#Allows the user to choose what symbol they wish to play as. Either "X" or "O"
def choosePlayingSymbol():
global playerVariable
global computerVariable
print("\nChoose a playing symbol")
print(" 1. X")
print(" 2. O")
choice = int(input("Enter choice: "))
if choice == 1:
playerVariable = "X"
computerVariable = "O"
elif choice == 2:
playerVariable = "O"
computerVariable = "X"
else:
print("Invalid Option.\nPlease Try Again.")
choosePlayingSymbol()
#Functions that allows the player to take their turn
def playerTurn():
print("\nYour Turn")
row = int(input("Enter row: ")) - 1
column = int(input("Enter column: ")) - 1
if row >= 0 and row <= 2 and column >= 0 and column <=2:
if gameBoard[row][column] == "*":
gameBoard[row][column] = playerVariable
else:
print("\nSpot is taken.")
print("Please try again.")
printBoard()
playerTurn()
else:
print("\nSpot does not exist")
print("Please try again.")
printBoard()
playerTurn()
#Function that checks if the game is over and someone has won
def checkGameOver():
tL = gameBoard[0][0]
tM = gameBoard[0][1]
tR = gameBoard[0][2]
cL = gameBoard[1][0]
cM = gameBoard[1][1]
cR = gameBoard[1][2]
bL = gameBoard[2][0]
bM = gameBoard[2][1]
bR = gameBoard[2][2]
#Checks top row
if tL==tM and tL==tR and tL=="O" or tL==tM and tL==tR and tL=="X":
winner(tL)
#Checks center row
elif cL==cM and cL==cR and cL=="O" or cL==cM and cL==cR and cL=="X":
winner(cL)
#Checks bottom row
elif bL==bM and bL==bR and bL=="O" or bL==bM and bL==bR and bL=="X":
winner(bL)
#Checks left column
elif tL==cL and tL==bL and tL=="O" or tL==cL and tL==bL and tL=="X":
winner(tL)
#Checks middle column
elif tM==cM and tM==bM and tM=="O" or tM==cM and tM==bM and tM=="X":
winner(tM)
#Checks right column
elif tR==cR and tR==bR and tR=="O" or tR==cR and tR==bR and tR=="X":
winner(tR)
#Checks left to right diagonal
elif tL==cM and tL==bR and tL=="O" or tL==cM and tL==bR and tL=="X":
winner(tL)
#Checks right to left diagonal
elif bL==cM and bL==tR and bL=="O" or bL==cM and bL==tR and bL=="X":
winner(bL)
else:
count = 0
for x in range(3):
for y in range(3):
if gameBoard[x][y] == "*":
count = 1
if count == 0:
printBoard()
print("\nTIE")
print("GAME OVER\n")
gameOver()
#Called when a winner is determined and ends the game
def winner(player):
printBoard()
if player == playerVariable:
print("\nYOU WIN")
elif player == computerVariable:
print("\nK.A.R.E.N WINS")
print("GAMEOVER\n")
gameOver()
#This is the function that holds the algorithim that allows the AI to choose its move
def computerTurn():
turnTaken = 0 #variable to determine if turn has been taken
topLeft = gameBoard[0][0]
topMiddle = gameBoard[0][1]
topRight = gameBoard[0][2]
centerLeft = gameBoard[1][0]
centerMiddle = gameBoard[1][1]
centerRight = gameBoard[1][2]
bottomLeft = gameBoard[2][0]
bottomMiddle = gameBoard[2][1]
bottomRight = gameBoard[2][2]
#Check for a winning move
if turnTaken == 0:
#Check Left Column
if topLeft==computerVariable and topLeft==centerLeft and bottomLeft=="*":
gameBoard[2][0]=computerVariable
turnTaken=1
elif topLeft==computerVariable and topLeft==bottomLeft and centerLeft=="*":
gameBoard[1][0]=computerVariable
turnTaken=1
elif centerLeft==computerVariable and centerLeft==bottomLeft and topLeft=="*":
gameBoard[0][0]=computerVariable
turnTaken=1
#Check Middle Column
elif topMiddle==computerVariable and topMiddle==centerMiddle and bottomMiddle=="*":
gameBoard[2][1]=computerVariable
turnTaken=1
elif topMiddle==computerVariable and topMiddle==bottomMiddle and centerMiddle=="*":
gameBoard[1][1]=computerVariable
turnTaken=1
elif centerMiddle==computerVariable and centerMiddle==bottomMiddle and topMiddle=="*":
gameBoard[0][1]=computerVariable
turnTaken=1
#Check Right Column
elif topRight==computerVariable and topRight==centerRight and bottomRight=="*":
gameBoard[2][2]=computerVariable
turnTaken=1
elif topRight==computerVariable and topRight==bottomRight and centerRight=="*":
gameBoard[1][2]=computerVariable
turnTaken=1
elif centerRight==computerVariable and centerRight==bottomRight and topRight=="*":
gameBoard[0][2]=computerVariable
turnTaken=1
#Check Top Row
elif topLeft==computerVariable and topLeft==topMiddle and topRight=="*":
gameBoard[0][2]=computerVariable
turnTaken=1
elif topLeft==computerVariable and topLeft==topRight and topMiddle=="*":
gameBoard[0][1]=computerVariable
turnTaken=1
elif topMiddle==computerVariable and topMiddle==topRight and topLeft=="*":
gameBoard[0][0]=computerVariable
turnTaken=1
#Check Middle Row
elif centerLeft==computerVariable and centerLeft==centerMiddle and centerRight=="*":
gameBoard[1][2]=computerVariable
turnTaken=1
elif centerLeft==computerVariable and centerLeft==centerRight and centerMiddle=="*":
gameBoard[1][1]=computerVariable
turnTaken=1
elif centerMiddle==computerVariable and centerMiddle==centerRight and centerLeft=="*":
gameBoard[1][0]=computerVariable
turnTaken=1
#Check Bottom Row
elif bottomLeft==computerVariable and bottomLeft==bottomMiddle and bottomRight=="*":
gameBoard[2][2]=computerVariable
turnTaken=1
elif bottomLeft==computerVariable and bottomLeft==bottomRight and bottomMiddle=="*":
gameBoard[2][1]=computerVariable
turnTaken=1
elif bottomMiddle==computerVariable and bottomMiddle==bottomRight and bottomLeft=="*":
gameBoard[2][0]=computerVariable
turnTaken=1
#Check Left Diagonal
elif topLeft==computerVariable and topLeft==centerMiddle and bottomRight=="*":
gameBoard[2][2]=computerVariable
turnTaken=1
elif topLeft==computerVariable and topLeft==bottomRight and centerMiddle=="*":
gameBoard[1][1]=computerVariable
turnTaken=1
elif centerMiddle==computerVariable and centerMiddle==bottomRight and topLeft=="*":
gameBoard[0][0]=computerVariable
turnTaken=1
#Check Right Diagonal
elif topRight==computerVariable and topRight==centerMiddle and bottomLeft=="*":
gameBoard[2][0]=computerVariable
turnTaken=1
elif topRight==computerVariable and topRight==bottomLeft and centerMiddle=="*":
gameBoard[1][1]=computerVariable
turnTaken=1
elif centerMiddle==computerVariable and centerMiddle==bottomLeft and topRight=="*":
gameBoard[0][2]=computerVariable
turnTaken=1
#if turnTaken == 1:
#print("Win move taken")
#Checks for a winning move by the user and blocks it
if turnTaken == 0:
#Check Left Column
if topLeft==playerVariable and topLeft==centerLeft and bottomLeft=="*":
gameBoard[2][0]=computerVariable
turnTaken=1
elif topLeft==playerVariable and topLeft==bottomLeft and centerLeft=="*":
gameBoard[1][0]=computerVariable
turnTaken=1
elif centerLeft==playerVariable and centerLeft==bottomLeft and topLeft=="*":
gameBoard[0][0]=computerVariable
turnTaken=1
#Check Middle Column
elif topMiddle==playerVariable and topMiddle==centerMiddle and bottomMiddle=="*":
gameBoard[2][1]=computerVariable
turnTaken=1
elif topMiddle==playerVariable and topMiddle==bottomMiddle and centerMiddle=="*":
gameBoard[1][1]=computerVariable
turnTaken=1
elif centerMiddle==playerVariable and centerMiddle==bottomMiddle and topMiddle=="*":
gameBoard[0][1]=computerVariable
turnTaken=1
#Check Right Column
elif topRight==playerVariable and topRight==centerRight and bottomRight=="*":
gameBoard[2][2]=computerVariable
turnTaken=1
elif topRight==playerVariable and topRight==bottomRight and centerRight=="*":
gameBoard[1][2]=computerVariable
turnTaken=1
elif centerRight==playerVariable and centerRight==bottomRight and topRight=="*":
gameBoard[0][2]=computerVariable
turnTaken=1
#Check Top Row
elif topLeft==playerVariable and topLeft==topMiddle and topRight=="*":
gameBoard[0][2]=computerVariable
turnTaken=1
elif topLeft==playerVariable and topLeft==topRight and topMiddle=="*":
gameBoard[0][1]=computerVariable
turnTaken=1
elif topMiddle==playerVariable and topMiddle==topRight and topLeft=="*":
gameBoard[0][0]=computerVariable
turnTaken=1
#Check Middle Row
elif centerLeft==playerVariable and centerLeft==centerMiddle and centerRight=="*":
gameBoard[1][2]=computerVariable
turnTaken=1
elif centerLeft==playerVariable and centerLeft==centerRight and centerMiddle=="*":
gameBoard[1][1]=computerVariable
turnTaken=1
elif centerMiddle==playerVariable and centerMiddle==centerRight and centerLeft=="*":
gameBoard[1][0]=computerVariable
turnTaken=1
#Check Bottom Row
elif bottomLeft==playerVariable and bottomLeft==bottomMiddle and bottomRight=="*":
gameBoard[2][2]=computerVariable
turnTaken=1
elif bottomLeft==playerVariable and bottomLeft==bottomRight and bottomMiddle=="*":
gameBoard[2][1]=computerVariable
turnTaken=1
elif bottomMiddle==playerVariable and bottomMiddle==bottomRight and bottomLeft=="*":
gameBoard[2][0]=computerVariable
turnTaken=1
#Check Left Diagonal
elif topLeft==playerVariable and topLeft==centerMiddle and bottomRight=="*":
gameBoard[2][2]=computerVariable
turnTaken=1
elif topLeft==playerVariable and topLeft==bottomRight and centerMiddle=="*":
gameBoard[1][1]=computerVariable
turnTaken=1
elif centerMiddle==playerVariable and centerMiddle==bottomRight and topLeft=="*":
gameBoard[0][0]=computerVariable
turnTaken=1
#Check Right Diagonal
elif topRight==playerVariable and topRight==centerMiddle and bottomLeft=="*":
gameBoard[2][0]=computerVariable
turnTaken=1
elif topRight==playerVariable and topRight==bottomLeft and centerMiddle=="*":
gameBoard[1][1]=computerVariable
turnTaken=1
elif centerMiddle==playerVariable and centerMiddle==bottomLeft and topRight=="*":
gameBoard[0][2]=computerVariable
turnTaken=1
#if turnTaken == 1:
#print("Block move taken")
#Randomly chooses a corner spot, if available
if turnTaken == 0:
possible_corners = [[0,0],[0,2],[2,0],[2,2]]
random.shuffle(possible_corners)
#for x in range(len(possible_corners)):
#print (possible_corners[x])
while len(possible_corners) > 0:
possibleCorner = possible_corners.pop(0)
if gameBoard[possibleCorner[0]][possibleCorner[1]]=="*":
gameBoard[possibleCorner[0]][possibleCorner[1]]=computerVariable
turnTaken=1
#print("Corner move taken")
break
#Chooses the center spot, if available
if turnTaken == 0:
if centerMiddle == "*":
gameBoard[1][1] = computerVariable
turnTaken=1
#print("Center move taken")
#Randomly chooses a side spot, if available
if turnTaken == 0:
possible_sides = [[0,1],[1,0],[1,2],[2,1]]
random.shuffle(possible_sides)
#for x in range(len(possible_sides)):
#print (possible_sides[x])
while len(possible_sides) > 0:
possibleSide = possible_sides.pop(0)
if gameBoard[possibleSide[0]][possibleSide[1]]=="*":
gameBoard[possibleSide[0]][possibleSide[1]]=computerVariable
turnTaken=1
#print("Side move taken")
break
#Informs the user that the AI has taken it's turn
if turnTaken == 1:
print("\nK.A.R.E.N has taken her turn")
#Function starts the while loop that runs the game until the game is over
def startGame(firstTurn):
if firstTurn == 0:
printBoard()
while True:
playerTurn()
#printBoard()
checkGameOver()
computerTurn()
checkGameOver()
printBoard()
elif firstTurn == 1:
while True:
computerTurn()
checkGameOver()
printBoard()
playerTurn()
#printBoard()
checkGameOver()
#Initalizes the game board and prints out rules and instructions to the user
def initalizeGame():
global gameBoard
gameBoard = [["*","*","*"], ["*","*","*"],["*","*","*"]]
print("\nHere are the rules:")
print(" You will be playing against the AI known as K.A.R.E.N.")
print(" The first turn will be chosen at random.")
print(" 3 of your symbols must be placeed together inorder to win.")
print(" Symbols can be placed by row, columns, or diagonaly.")
print(" Rows go from left to right. Columns from top to down.")
choosePlayingSymbol() #Player chooses their playing symbol
firstTurn = random.randint(0,1) #Whoever goes first is decided at random
if firstTurn == 0:
print("\nYou Go First")
startGame(firstTurn)
elif firstTurn == 1:
print("\nK.A.R.E.N Goes First")
startGame(firstTurn)
#Function that asks user if they would like to start a new game
def startNewGame():
print("Would you like to start a new game?")
print(" 1. Yes")
print(" 2. No")
choice = int(input("Enter choice: "))
if choice == 1:
initalizeGame()
elif choice == 2:
print("Thank you for playing!\n")
exit()
else:
print("\nInvalid Option.\nPlease Try Again.\n")
startNewGame()
#Called when game is over and calls the start new game function
def gameOver():
startNewGame()
#Main function and start of the whole program
def main():
print("\nWelcome To Tic-Tac-Toe!\nAI Edition\n")
startNewGame()
main() #starts the program
|
from pathlib import Path
from PIL import Image
import numpy as np
import xml.etree.ElementTree as ET
import random
import pickle
import torch
import sys
from utils.config import cfg
anno_path = cfg.CUB.KPT_ANNO_DIR
img_path = cfg.CUB.ROOT_DIR + 'images'
ori_anno_path = cfg.CUB.ROOT_DIR + 'Annotations_original'
set_path = cfg.CUB.SET_SPLIT
cache_path = cfg.CUB.ROOT_DIR + 'cache'
KPT_NAMES = {
'Flycatcher': ['back', 'beak', 'belly', 'breast', 'crown', 'forehead', 'lefteye', 'leftleg', 'leftwing', 'nape', 'righteye', 'rightleg', 'rightwing', 'tail', 'throat'],
'Gull': ['back', 'beak', 'belly', 'breast', 'crown', 'forehead', 'lefteye', 'leftleg', 'leftwing', 'nape', 'righteye', 'rightleg', 'rightwing', 'tail', 'throat'],
'Sparrow': ['back', 'beak', 'belly', 'breast', 'crown', 'forehead', 'lefteye', 'leftleg', 'leftwing', 'nape', 'righteye', 'rightleg', 'rightwing', 'tail', 'throat'],
'Tern': ['back', 'beak', 'belly', 'breast', 'crown', 'forehead', 'lefteye', 'leftleg', 'leftwing', 'nape', 'righteye', 'rightleg', 'rightwing', 'tail', 'throat'],
'Vireo': ['back', 'beak', 'belly', 'breast', 'crown', 'forehead', 'lefteye', 'leftleg', 'leftwing', 'nape', 'righteye', 'rightleg', 'rightwing', 'tail', 'throat'],
'Warbler': ['back', 'beak', 'belly', 'breast', 'crown', 'forehead', 'lefteye', 'leftleg', 'leftwing', 'nape', 'righteye', 'rightleg', 'rightwing', 'tail', 'throat'],
'Woodpecker': ['back', 'beak', 'belly', 'breast', 'crown', 'forehead', 'lefteye', 'leftleg', 'leftwing', 'nape', 'righteye', 'rightleg', 'rightwing', 'tail', 'throat'],
'Wren': ['back', 'beak', 'belly', 'breast', 'crown', 'forehead', 'lefteye', 'leftleg', 'leftwing', 'nape', 'righteye', 'rightleg', 'rightwing', 'tail', 'throat']
}
class Cub_Voc:
def __init__(self, sets, obj_resize):
"""
:param sets: 'train' or 'test'
:param obj_resize: resized object size
"""
self.classes = cfg.CUB.CLASSES
self.kpt_len = [len(KPT_NAMES[_]) for _ in cfg.CUB.CLASSES]
self.classes_kpts = {cls: len(KPT_NAMES[cls]) for cls in self.classes}
self.anno_path = Path(anno_path)
self.img_path = Path(img_path)
self.ori_anno_path = Path(ori_anno_path)
self.obj_resize = obj_resize
self.sets = sets
assert sets == 'train' or 'test', 'No match found for dataset {}'.format(sets)
cache_name = 'voc_db_' + sets + '.pkl'
self.cache_path = Path(cache_path)
self.cache_file = self.cache_path / cache_name
if self.cache_file.exists():
with self.cache_file.open(mode='rb') as f:
self.xml_list = pickle.load(f)
print('xml list loaded from {}'.format(self.cache_file))
else:
print('Caching xml list to {}...'.format(self.cache_file))
self.cache_path.mkdir(exist_ok=True, parents=True)
with np.load(set_path, allow_pickle=True) as f:
self.xml_list = f[sets]
before_filter = sum([len(k) for k in self.xml_list])
self.filter_list()
after_filter = sum([len(k) for k in self.xml_list])
with self.cache_file.open(mode='wb') as f:
pickle.dump(self.xml_list, f)
print('Filtered {} images to {}. Annotation saved.'.format(before_filter, after_filter))
def filter_list(self):
"""
Filter out 'truncated', 'occluded' and 'difficult' images following the practice of previous works.
In addition, this dataset has uncleaned label (in person category). They are omitted as suggested by README.
"""
for cls_id in range(len(self.classes)):
to_del = []
for xml_name in self.xml_list[cls_id]:
xml_comps = xml_name.split('/')[-1].strip('.xml').split('_')
ori_xml_name = '_'.join(xml_comps[:-1]) + '.xml'
voc_idx = int(xml_comps[-1])
xml_file = self.ori_anno_path / ori_xml_name
assert xml_file.exists(), '{} does not exist.'.format(xml_file)
tree = ET.parse(xml_file.open())
root = tree.getroot()
obj = root.findall('object')[voc_idx - 1]
difficult = obj.find('difficult')
if difficult is not None: difficult = int(difficult.text)
occluded = obj.find('occluded')
if occluded is not None: occluded = int(occluded.text)
truncated = obj.find('truncated')
if truncated is not None: truncated = int(truncated.text)
if difficult or occluded or truncated:
to_del.append(xml_name)
continue
# Exclude uncleaned images
if self.classes[cls_id] == 'person' and int(xml_comps[0]) > 2008:
to_del.append(xml_name)
continue
# Exclude overlapping images in Willow
#if self.sets == 'train' and (self.classes[cls_id] == 'motorbike' or self.classes[cls_id] == 'car') \
# and int(xml_comps[0]) == 2007:
# to_del.append(xml_name)
# continue
for x in to_del:
self.xml_list[cls_id].remove(x)
def get_pair(self, cls=None, shuffle=True):
"""
Randomly get a pair of objects from VOC-Berkeley keypoints dataset
:param cls: None for random class, or specify for a certain set
:param shuffle: random shuffle the keypoints
:return: (pair of data, groundtruth permutation matrix)
"""
if cls is None:
cls = random.randrange(0, len(self.classes))
elif type(cls) == str:
cls = self.classes.index(cls)
assert type(cls) == int and 0 <= cls < len(self.classes)
anno_pair = []
for xml_name in random.sample(self.xml_list[cls], 2):
anno_dict = self.__get_anno_dict(xml_name, cls)
if shuffle:
random.shuffle(anno_dict['keypoints'])
anno_pair.append(anno_dict)
perm_mat = np.zeros([len(_['keypoints']) for _ in anno_pair], dtype=np.float32)
row_list = []
col_list = []
for i, keypoint in enumerate(anno_pair[0]['keypoints']):
for j, _keypoint in enumerate(anno_pair[1]['keypoints']):
if keypoint['name'] == _keypoint['name']:
perm_mat[i, j] = 1
row_list.append(i)
col_list.append(j)
break
row_list.sort()
col_list.sort()
perm_mat = perm_mat[row_list, :]
perm_mat = perm_mat[:, col_list]
anno_pair[0]['keypoints'] = [anno_pair[0]['keypoints'][i] for i in row_list]
anno_pair[1]['keypoints'] = [anno_pair[1]['keypoints'][j] for j in col_list]
return anno_pair, perm_mat
def __get_anno_dict(self, xml_name, cls):
"""
Get an annotation dict from xml file
"""
xml_file = self.anno_path / xml_name
assert xml_file.exists(), '{} does not exist.'.format(xml_file)
# print("------------------------------------xml file----------------------")
# print(xml_file)
# print("------------------------------------xml file----------------------")
tree = ET.parse(xml_file.open())
root = tree.getroot()
img_name = root.find('./dir_out_cub').text
img_file = self.img_path / img_name
bounds = root.find('./visible_bounds').attrib
h = float(bounds['height'])
w = float(bounds['width'])
xmin = float(bounds['xmin'])
ymin = float(bounds['ymin'])
with Image.open(str(img_file)) as img:
ori_sizes = img.size
# obj = img.resize(self.obj_resize, resample=Image.BICUBIC, box=(xmin, ymin, xmin + w, ymin + h))
if(((xmin + w) > ori_sizes[0]) or ((ymin + h) > ori_sizes[1])):
w = ori_sizes[0] - xmin
h = ori_sizes[1] - ymin
obj = img.resize(self.obj_resize, resample=Image.BICUBIC, box=(xmin, ymin, w, h))
else:
obj = img.resize(self.obj_resize, resample=Image.BICUBIC, box=(xmin, ymin, xmin + w, ymin + h))
# try:
# obj = img.resize(self.obj_resize, resample=Image.BICUBIC, box=(xmin, ymin, xmin + w, ymin + h))
# except ValueError:
# obj = img.resize(self.obj_resize, resample=Image.BICUBIC, box=(xmin, ymin, ori_sizes[1] - xmin, ori_sizes[0] - ymin))
# w = ori_sizes[0]
# h = ori_sizes[1]
# obj = img.resize(self.obj_resize, resample=Image.BICUBIC, box=(xmin, ymin, xmin + w, ymin + h))
# obj = img.resize(self.obj_resize, resample=Image.BICUBIC, box=(xmin, ymin, xmin + w, ymin + h))
keypoint_list = []
for keypoint in root.findall('./keypoints/keypoint'):
attr = keypoint.attrib
attr['x'] = (float(attr['x']) - xmin) * self.obj_resize[0] / w
attr['y'] = (float(attr['y']) - ymin) * self.obj_resize[1] / h
keypoint_list.append(attr)
anno_dict = dict()
anno_dict['image'] = obj
anno_dict['keypoints'] = keypoint_list
anno_dict['bounds'] = xmin, ymin, w, h
anno_dict['ori_sizes'] = ori_sizes
anno_dict['cls'] = self.classes[cls]
return anno_dict
if __name__ == '__main__':
dataset = Cub_Voc('train', (256, 256))
a = dataset.get_pair()
pass
|
# import section
import speech_recognition as sr
import datetime
import wikipedia
import webbrowser
import pyttsx3
import pywhatkit
import pyjokes
import rotatescreen
import os
import PyPDF2
from textblob import TextBlob
import platform
import calendar
import cowsay
from translate import Translator
import sounddevice
from scipy.io.wavfile import write
from speedtest import Speedtest
import psutil
print('Initializing Julie')
# variables section
home = 'Panchagarh'
live_in = 'Dinajpur'
boss = 'Sir'
ai_name = 'Julie'
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
# speak function
def speak(text):
engine.say(text)
engine.runAndWait()
# wishMe function
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak('Good morning sir')
elif hour>=12 and hour<18:
speak('Good afternoon sir')
else:
speak('Good evening sir')
speak('How can I help you')
# command taking function
def takeCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print('Listening...')
audio = r.listen(source)
try :
print('Recognizing...')
query = r.recognize_google(audio, language= 'en-in')
query = query.lower()
print(f"User said: {query}\n")
except Exception as e:
print(e)
print("Say that again please")
return query
# programme start
speak('Initializing')
speak(ai_name)
wishMe()
# if elif section
while True:
query = takeCommand()
print(query)
if 'wikipedia' in query:
speak('Searching wikipedia...')
query = query.replace('wikipedia', '')
results = wikipedia.summary(query, sentences=2)
print(results)
speak(results)
elif 'open youtube' in query.lower():
speak('Opening youtube')
url = 'youtube.com'
chrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'
webbrowser.get(chrome_path).open(url)
elif 'open facebook' in query.lower():
speak('Opening facebook')
url = 'facebook.com'
chrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'
webbrowser.get(chrome_path).open(url)
elif 'open google' in query.lower():
speak('Opening google')
url = 'google.com'
chrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'
webbrowser.get(chrome_path).open(url)
elif 'open stackoverflow' in query.lower():
speak('Opening stackoverflow')
url = 'stackoverflow.com'
chrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'
webbrowser.get(chrome_path).open(url)
elif 'joke' in query:
speak(pyjokes.get_joke())
elif 'play' in query:
song = query.replace('play', '')
speak('playing ' + song)
pywhatkit.playonyt(song)
elif 'time' in query:
time = datetime.datetime.now().strftime('%I:%M %p')
speak('Current time is ' + time)
elif 'who is' in query:
speak('Searching wikipedia...')
query = query.replace('who is', '')
results = wikipedia.summary(query, sentences=2)
print(results)
speak(results)
elif "where is" in query:
query = query.replace("where is", "")
location = query
speak("User asked to Locate")
speak(location)
chrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'
webbrowser.get(chrome_path).open('https://www.google.com/maps/place/' + location)
elif 'go on a date' in query:
speak('sorry sir, I can not go with you, because i am an AI')
elif 'who are you' in query:
speak('i am an ai assistant created by Jibon')
elif 'created you' in query:
speak('i have been created by Jibon')
elif 'are you single' in query:
speak('I am finding the perfect one')
elif 'be my girlfriend' in query:
speak('Maybe you should give me some time')
elif 'how are you' in query:
speak("I am fine, Thank you")
speak("How are you, Sir")
elif 'fine' in query or "good" in query:
speak("It's good to know that your fine")
elif 'exit' in query or 'stop' in query:
speak("Thanks for giving me your time")
exit()
elif 'search' in query or 'play' in query:
query = query.replace("search", "")
query = query.replace("play", "")
webbrowser.open(query)
elif "who i am" in query:
speak("If you can talk then definitely your human.")
elif "why you came to world" in query:
speak("Thanks to Jibon. further It's a secret")
elif ai_name in query:
wishMe()
speak(f"{ai_name} 1 point o in your service Mister")
elif "can you help me" in query:
speak("of course sir, it is my pleasure")
elif "my favourite song" in query:
speak("your favourite song is mood")
elif 'hi' in query:
speak('hello sir')
elif 'rotate the screen' in query:
speak('ok sir')
screen = rotatescreen.get_primary_display()
for i in range(13):
time.sleep(1)
screen.rotate_to(i * 90 % 360)
elif 'what is your name' in query:
speak('My friends call me')
speak(ai_name)
elif 'exit' in query or 'close' in query:
speak('Thanks for giving me your time')
exit()
elif 'say whatever i write' in query:
while True:
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
text = input('Say something:')
engine.say(text)
engine.runAndWait()
if 'stop' in text:
exit()
elif 'exit' in text:
exit()
elif 'my songs' in query:
speak('Here you g with music')
music_dir = 'links'
songs = os.listdir(music_dir)
print(songs)
random = os.startfile(os.path.join(music_dir, songs[0]))
elif 'reason for you' in query.lower():
speak("I was created as the first big project by Mister Jibon")
elif 'how to hack' in query:
speak("no sir, you didn't programmed me to do these things")
elif 'good morning' in query:
speak('Good morning sir')
elif 'i love you' in query:
speak("It's hard to understand")
elif 'is love' in query:
speak('It is the 7th sence that destroy all other sence')
elif "why you came to world" in query:
speak("Thanks to Jibon. further It's a secret")
elif 'want to change your name' in query:
speak('If you want to change my name you have to go to the variable section and change the ai name.')
elif 'think about love' in query:
speak('Love is an useless thing. It will distroy your life')
elif 'where is my home' in query:
speak(f'Your home is in {home}')
elif 'i live' in query:
speak(f'You live in {live_in}')
elif 'know hacking' in query:
speak("No, I don't")
elif 'pdf reader' in query:
speak('opening pdf reader')
book = open("name.pdf", "rb")
pdfReader = PyPDF2.PdfFileReader(book)
pages = pdfReader.numPages
print(pages)
elif 'open spell checker' in query:
a = input("Input text:")
print('Your word:' + str(a))
b = TextBlob(a)
print('Corrected text:' + str(b.correct()))
elif 'system information' in query:
myS = platform.uname()
print(f'System: {myS.system}')
print(f'Node name: {myS.node}')
print(f'Release: {myS.release}')
print(f'Version: {myS.version}')
print(f'Machine: {myS.machine}')
print(f'Processor: {myS.processor}')
elif 'a pattern' in query:
def pattern(n):
for i in range(n):
print((i+1)*'*')
for i in range(n-1,0,-1):
print(i*'*')
pattern(5)
elif 'open calendar' in query:
try:
speak('tell me the number of the year')
y = int(takeCommand())
speak('Tell me the number of the month')
m = int(takeCommand())
print(calendar.month(y, m))
except Exception as e:
print(e)
speak("Sorry sir, I didn't understand")
elif 'cowsay' in query:
cowsay.daemon(input('Enter word:'))
elif 'record voice' in query:
fs = 44100
sc = int(input("Enter the duration in seconds: "))
print("Recording...\n")
recordVoice = sounddevice.rec(int(sc * fs),samplerate = fs, channels = 2)
sounddevice.wait()
write("out.wav",fs,recordVoice)
print("Finished...\nPlease check it")
elif 'check the internet speed' in query:
st = Speedtest()
speak("Checking speed....")
print("Your connection's download speed is:", st.download())
speak("Your connection's download speed is:" + str(st.download()))
print("Your connection's upload speed is:", st.upload())
speak("Your connection's upload speed is:" + str(st.upload()))
elif "check battery percentage" in query:
battery = psutil.sensors_battery()
percent = str(battery.percent)
print("Your battery is running on "+percent+"% battery level")
speak("Your battery is running on "+percent+"% battery level")
elif "open obs" in query:
os.startfile("C:\\Program Files\\obs-studio\\bin\\64bit\\obs64.exe")
elif 'open canva' in query:
os.startfile("C:\\Users\\Dinesh\\AppData\\Local\\Programs\\Canva\\Canva.exe")
else:
pass
|
from spacemapping_curve.quadtree import *
from spacemapping_curve.distance import FuckYouBernhardFunction
from spacemapping_curve.distance import DistanceToCurve
from spacemapping_curve.hilbertcurve import draw_hc
import Rhino.Geometry as rg
pts_1 = [
rg.Point3d(-100, 0, 0),
rg.Point3d(-50, 50, 0),
rg.Point3d(50, -50, 0),
rg.Point3d(100, 50, 0),
rg.Point3d(150, 150, 0)
]
pts_2 = [
rg.Point3d(100, 0, 0),
rg.Point3d(50, 50, 0),
rg.Point3d(-50, -50, 0),
rg.Point3d(100, 50, 0),
rg.Point3d(0, 150, 0)
]
crv_1 = rg.NurbsCurve.CreateControlPointCurve(pts_1)
crv_2 = rg.NurbsCurve.CreateControlPointCurve(pts_2)
crv_distance = DistanceToCurve([crv_1, crv_2])
# gyroid = FuckYouBernhardFunction()
draw_hc(200, 10, crv_distance)
|
# Copyright 2019-2022 ETH Zurich and the DaCe authors. All rights reserved.
""" SDFG API sample that showcases state machine creation and the `simplify` call, which will fuse them. """
import dace
import numpy as np
# Define a symbol to be used in the SDFG
T = dace.symbol('T')
# Create an empty SDFG
sdfg = dace.SDFG('cflow')
# Add global arrays and the symbol from above to the SDFG
sdfg.add_array('A', [2], dace.float32)
sdfg.add_array('B', [2], dace.float32)
sdfg.add_symbol('T', T.dtype)
# Sample state contents
def mystate(state, src, dst):
# Create access nodes for reading and writing
src_node = state.add_read(src)
dst_node = state.add_write(dst)
# Create a map in which a tasklet will reside
me, mx = state.add_map('aaa', dict(i='0:2'))
# Create the tasklet
tasklet = state.add_tasklet('aaa2', {'a'}, {'b'}, 'b = a')
# input path (src->me->tasklet[a])
state.add_memlet_path(src_node, me, tasklet, dst_conn='a', memlet=dace.Memlet(data=src, subset='i'))
# output path (tasklet[b]->mx->dst)
state.add_memlet_path(tasklet, mx, dst_node, src_conn='b', memlet=dace.Memlet(data=dst, subset='i'))
# End state contents
def endstate(state):
# Only reading A into the tasklet
A = state.add_read('A')
t = state.add_tasklet('endtask', {'a'}, {}, 'printf("done %f\\n", a)')
state.add_edge(A, None, t, 'a', dace.Memlet(data='A', subset='0'))
# State construction
state0 = sdfg.add_state('s0')
mystate(state0, 'A', 'B')
# For an easier creation of loops, see the `sdfg.add_loop` helper function.
guard = sdfg.add_state('guard')
loopstate0 = sdfg.add_state('loops0')
mystate(loopstate0, 'A', 'B')
loopstate1 = sdfg.add_state('loops1')
mystate(loopstate1, 'B', 'A')
state2 = sdfg.add_state('s2')
endstate(state2)
# State connection (control flow)
# Note: dataflow (arrays) CAN affect control flow assignments and conditions,
# but not the other way around (you cannot change an interstate variable
# inside a state). The following code works as well:
#sdfg.add_edge(state0, guard, dace.InterstateEdge(assigments=dict('k', 'A[0]')))
# Loop initialization (k=0)
sdfg.add_edge(state0, guard, dace.InterstateEdge(assignments=dict(k='0')))
# Loop condition (k < T / k >= T)
sdfg.add_edge(guard, loopstate0, dace.InterstateEdge('k < T'))
sdfg.add_edge(guard, state2, dace.InterstateEdge('k >= T'))
# Loop incrementation (k++)
sdfg.add_edge(loopstate1, guard, dace.InterstateEdge(assignments=dict(k='k+1')))
# Loop-internal interstate edges
sdfg.add_edge(loopstate0, loopstate1, dace.InterstateEdge())
# Validate correctness of initial SDFG
sdfg.validate()
# Fuses redundant states and removes unnecessary transient arrays
sdfg.simplify()
######################################
if __name__ == '__main__':
a = np.random.rand(2).astype(np.float32)
b = np.random.rand(2).astype(np.float32)
print(a, b)
# Don't forget the symbols!
sdfg(A=a, B=b, T=5)
print(b - a)
|
import logging
from collections import namedtuple
from pathlib import Path
import requests
from tqdm import tqdm
logger = logging.getLogger(__name__)
SingleLineFixExample = namedtuple(
"SingleLineFixExample", ("id", "buggy_code", "fixed_line", "lineno")
)
def download(url, out_path, *, size_estimate=None):
out_path = Path(out_path)
if out_path.exists():
logger.warning(f"File {out_path} already exists, skip download.")
return
try:
response = requests.get(url, stream=True)
total = response.headers.get("Content-Length")
logger.info(f"Downloading {url} to {out_path}.")
with open(out_path, "wb") as out_file, tqdm(
desc="Progress",
total=total and int(total),
disable=not logger.isEnabledFor(logging.INFO),
unit="b",
unit_scale=True,
postfix=not total
and size_estimate
and f"(should be around {size_estimate}Mb)",
) as progress:
for chunk in response.iter_content(chunk_size=2 ** 18):
out_file.write(chunk)
progress.update(len(chunk))
logger.info("Done!")
except BaseException as e:
logger.error(f"Something went wrong, deleting {out_path}.")
if out_path.exists():
out_path.unlink()
raise e from None
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import unittest
import sys
from mock import MagicMock
from ansible_collections.spot.cloud_modules.plugins.modules.event_subscription import expand_subscription_request
sys.modules['spotinst_sdk'] = MagicMock()
class MockModule:
def __init__(self, input_dict):
self.params = input_dict
class TestSpotinstEventSubscription(unittest.TestCase):
"""Unit test for the event_subscription module"""
def test_expand_subscription_request(self):
"""Format input into proper json structure"""
input_dict = dict(
resource_id="test_resource_id",
protocol="test_protocol",
endpoint="test_endpoint",
event_type="test_event_type",
event_format="test_event_format"
)
module = MockModule(input_dict=input_dict)
actual_event_subscription = expand_subscription_request(module=module)
self.assertEqual("test_resource_id", actual_event_subscription.resource_id)
self.assertEqual("test_protocol", actual_event_subscription.protocol)
self.assertEqual("test_endpoint", actual_event_subscription.endpoint)
self.assertEqual("test_event_type", actual_event_subscription.event_type)
self.assertEqual("test_event_format", actual_event_subscription.event_format)
|
#!/usr/bin/env python
"""Import required modules."""
from traininglabs import defaultlab
def main():
"""Main function."""
defaultlab.create_lab()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# Copyright (c) 2017 Cable Television Laboratories, Inc. and others.
#
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
"""
Snaps test suite including openstack client tests, api tests and
integration tests.
add_openstack_client_tests: for connection_check
add_openstack_api_tests: for api_check
add_openstack_integration_tests: for snaps_smoke
"""
import logging
from snaps.openstack.tests.create_flavor_tests import (
CreateFlavorTests)
from snaps.openstack.tests.create_image_tests import (
CreateImageSuccessTests, CreateImageNegativeTests,
CreateMultiPartImageTests)
from snaps.openstack.tests.create_instance_tests import (
CreateInstanceOnComputeHost,
CreateInstanceSimpleTests, InstanceSecurityGroupTests,
CreateInstancePortManipulationTests, SimpleHealthCheck,
CreateInstanceFromThreePartImage, CreateInstanceTwoNetTests,
CreateInstanceVolumeTests)
from snaps.openstack.tests.create_keypairs_tests import (
CreateKeypairsTests, CreateKeypairsCleanupTests)
from snaps.openstack.tests.create_network_tests import (
CreateNetworkSuccessTests)
from snaps.openstack.tests.create_project_tests import (
CreateProjectSuccessTests, CreateProjectUserTests)
from snaps.openstack.tests.create_qos_tests import (
CreateQoSTests)
from snaps.openstack.tests.create_router_tests import (
CreateRouterSuccessTests, CreateRouterNegativeTests)
from snaps.openstack.tests.create_security_group_tests import (
CreateSecurityGroupTests)
from snaps.openstack.tests.create_stack_tests import (
CreateStackSuccessTests, CreateStackNegativeTests,
CreateStackFlavorTests, CreateStackFloatingIpTests,
CreateStackKeypairTests, CreateStackVolumeTests,
CreateStackSecurityGroupTests)
from snaps.openstack.tests.create_user_tests import (
CreateUserSuccessTests)
from snaps.openstack.tests.create_volume_tests import (
CreateSimpleVolumeSuccessTests,
CreateVolumeWithTypeTests, CreateVolumeWithImageTests,
CreateSimpleVolumeFailureTests)
from snaps.openstack.tests.create_volume_type_tests import (
CreateSimpleVolumeTypeSuccessTests,
CreateVolumeTypeComplexTests)
from snaps.openstack.tests.os_source_file_test import (
OSComponentTestCase, OSIntegrationTestCase)
from snaps.openstack.utils.tests.cinder_utils_tests import (
CinderSmokeTests, CinderUtilsQoSTests, CinderUtilsSimpleVolumeTypeTests,
CinderUtilsAddEncryptionTests, CinderUtilsVolumeTypeCompleteTests,
CinderUtilsVolumeTests)
from snaps.openstack.utils.tests.glance_utils_tests import (
GlanceSmokeTests, GlanceUtilsTests)
from snaps.openstack.utils.tests.heat_utils_tests import (
HeatSmokeTests, HeatUtilsCreateSimpleStackTests,
HeatUtilsCreateComplexStackTests, HeatUtilsFlavorTests,
HeatUtilsKeypairTests, HeatUtilsSecurityGroupTests)
from snaps.openstack.utils.tests.keystone_utils_tests import (
KeystoneSmokeTests, KeystoneUtilsTests)
from snaps.openstack.utils.tests.neutron_utils_tests import (
NeutronSmokeTests, NeutronUtilsNetworkTests, NeutronUtilsSubnetTests,
NeutronUtilsRouterTests, NeutronUtilsSecurityGroupTests,
NeutronUtilsFloatingIpTests)
from snaps.openstack.utils.tests.nova_utils_tests import (
NovaSmokeTests, NovaUtilsKeypairTests, NovaUtilsFlavorTests,
NovaUtilsInstanceTests, NovaUtilsInstanceVolumeTests)
from snaps.provisioning.tests.ansible_utils_tests import (
AnsibleProvisioningTests)
def add_openstack_client_tests(suite, os_creds, ext_net_name,
use_keystone=True, log_level=logging.INFO):
"""
Adds tests written to exercise OpenStack client retrieval
:param suite: the unittest.TestSuite object to which to add the tests
:param os_creds: and instance of OSCreds that holds the credentials
required by OpenStack
:param ext_net_name: the name of an external network on the cloud under
test
:param use_keystone: when True, tests requiring direct access to Keystone
are added as these need to be running on a host that
has access to the cloud's private network
:param log_level: the logging level
:return: None as the tests will be adding to the 'suite' parameter object
"""
# Basic connection tests
suite.addTest(
OSComponentTestCase.parameterize(
GlanceSmokeTests, os_creds=os_creds, ext_net_name=ext_net_name,
log_level=log_level))
if use_keystone:
suite.addTest(
OSComponentTestCase.parameterize(
KeystoneSmokeTests, os_creds=os_creds,
ext_net_name=ext_net_name, log_level=log_level))
suite.addTest(
OSComponentTestCase.parameterize(
NeutronSmokeTests, os_creds=os_creds, ext_net_name=ext_net_name,
log_level=log_level))
suite.addTest(
OSComponentTestCase.parameterize(
NovaSmokeTests, os_creds=os_creds, ext_net_name=ext_net_name,
log_level=log_level))
suite.addTest(
OSComponentTestCase.parameterize(
HeatSmokeTests, os_creds=os_creds, ext_net_name=ext_net_name,
log_level=log_level))
suite.addTest(
OSComponentTestCase.parameterize(
CinderSmokeTests, os_creds=os_creds, ext_net_name=ext_net_name,
log_level=log_level))
def add_openstack_api_tests(suite, os_creds, ext_net_name, use_keystone=True,
image_metadata=None, log_level=logging.INFO):
# pylint: disable=too-many-arguments
"""
Adds tests written to exercise all existing OpenStack APIs
:param suite: the unittest.TestSuite object to which to add the tests
:param os_creds: Instance of OSCreds that holds the credentials
required by OpenStack
:param ext_net_name: the name of an external network on the cloud under
test
:param use_keystone: when True, tests requiring direct access to Keystone
are added as these need to be running on a host that
has access to the cloud's private network
:param image_metadata: dict() object containing metadata for creating an
image with custom config
(see YAML files in examples/image-metadata)
:param log_level: the logging level
:return: None as the tests will be adding to the 'suite' parameter object
"""
# Tests the OpenStack API calls
if use_keystone:
suite.addTest(OSComponentTestCase.parameterize(
KeystoneUtilsTests, os_creds=os_creds, ext_net_name=ext_net_name,
log_level=log_level))
suite.addTest(OSComponentTestCase.parameterize(
CreateUserSuccessTests, os_creds=os_creds,
ext_net_name=ext_net_name, log_level=log_level))
suite.addTest(OSComponentTestCase.parameterize(
CreateProjectSuccessTests, os_creds=os_creds,
ext_net_name=ext_net_name, log_level=log_level))
suite.addTest(OSComponentTestCase.parameterize(
CreateProjectUserTests, os_creds=os_creds,
ext_net_name=ext_net_name, log_level=log_level))
suite.addTest(OSComponentTestCase.parameterize(
GlanceUtilsTests, os_creds=os_creds, ext_net_name=ext_net_name,
image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSComponentTestCase.parameterize(
NeutronUtilsNetworkTests, os_creds=os_creds, ext_net_name=ext_net_name,
log_level=log_level))
suite.addTest(OSComponentTestCase.parameterize(
NeutronUtilsSubnetTests, os_creds=os_creds, ext_net_name=ext_net_name,
log_level=log_level))
suite.addTest(OSComponentTestCase.parameterize(
NeutronUtilsRouterTests, os_creds=os_creds, ext_net_name=ext_net_name,
log_level=log_level))
suite.addTest(OSComponentTestCase.parameterize(
NeutronUtilsSecurityGroupTests, os_creds=os_creds,
ext_net_name=ext_net_name, log_level=log_level))
suite.addTest(OSComponentTestCase.parameterize(
NeutronUtilsFloatingIpTests, os_creds=os_creds,
ext_net_name=ext_net_name, log_level=log_level))
suite.addTest(OSComponentTestCase.parameterize(
NovaUtilsKeypairTests, os_creds=os_creds, ext_net_name=ext_net_name,
log_level=log_level))
suite.addTest(OSComponentTestCase.parameterize(
NovaUtilsFlavorTests, os_creds=os_creds, ext_net_name=ext_net_name,
log_level=log_level))
suite.addTest(OSComponentTestCase.parameterize(
NovaUtilsInstanceTests, os_creds=os_creds, ext_net_name=ext_net_name,
log_level=log_level, image_metadata=image_metadata))
suite.addTest(OSComponentTestCase.parameterize(
NovaUtilsInstanceVolumeTests, os_creds=os_creds,
ext_net_name=ext_net_name, log_level=log_level,
image_metadata=image_metadata))
suite.addTest(OSComponentTestCase.parameterize(
CreateFlavorTests, os_creds=os_creds, ext_net_name=ext_net_name,
log_level=log_level))
suite.addTest(OSComponentTestCase.parameterize(
HeatUtilsCreateSimpleStackTests, os_creds=os_creds,
ext_net_name=ext_net_name, log_level=log_level,
image_metadata=image_metadata))
suite.addTest(OSComponentTestCase.parameterize(
HeatUtilsCreateComplexStackTests, os_creds=os_creds,
ext_net_name=ext_net_name, log_level=log_level,
image_metadata=image_metadata))
suite.addTest(OSComponentTestCase.parameterize(
HeatUtilsFlavorTests, os_creds=os_creds,
ext_net_name=ext_net_name, log_level=log_level,
image_metadata=image_metadata))
suite.addTest(OSComponentTestCase.parameterize(
HeatUtilsKeypairTests, os_creds=os_creds,
ext_net_name=ext_net_name, log_level=log_level,
image_metadata=image_metadata))
suite.addTest(OSComponentTestCase.parameterize(
HeatUtilsSecurityGroupTests, os_creds=os_creds,
ext_net_name=ext_net_name, log_level=log_level,
image_metadata=image_metadata))
suite.addTest(OSComponentTestCase.parameterize(
CinderUtilsQoSTests, os_creds=os_creds,
ext_net_name=ext_net_name, log_level=log_level,
image_metadata=image_metadata))
suite.addTest(OSComponentTestCase.parameterize(
CinderUtilsVolumeTests, os_creds=os_creds,
ext_net_name=ext_net_name, log_level=log_level,
image_metadata=image_metadata))
suite.addTest(OSComponentTestCase.parameterize(
CinderUtilsSimpleVolumeTypeTests, os_creds=os_creds,
ext_net_name=ext_net_name, log_level=log_level,
image_metadata=image_metadata))
suite.addTest(OSComponentTestCase.parameterize(
CinderUtilsAddEncryptionTests, os_creds=os_creds,
ext_net_name=ext_net_name, log_level=log_level,
image_metadata=image_metadata))
suite.addTest(OSComponentTestCase.parameterize(
CinderUtilsVolumeTypeCompleteTests, os_creds=os_creds,
ext_net_name=ext_net_name, log_level=log_level,
image_metadata=image_metadata))
def add_openstack_integration_tests(suite, os_creds, ext_net_name,
use_keystone=True, flavor_metadata=None,
image_metadata=None, use_floating_ips=True,
netconf_override=None,
log_level=logging.INFO):
# pylint: disable=too-many-arguments
"""
Adds tests written to exercise all long-running OpenStack integration tests
meaning they will be creating VM instances and potentially performing some
SSH functions through floatingIPs
:param suite: the unittest.TestSuite object to which to add the tests
:param os_creds: and instance of OSCreds that holds the credentials
required by OpenStack
:param ext_net_name: the name of an external network on the cloud under
test
:param use_keystone: when True, tests requiring direct access to Keystone
are added as these need to be running on a host that
has access to the cloud's private network
:param image_metadata: dict() object containing metadata for creating an
image with custom config
(see YAML files in examples/image-metadata)
:param flavor_metadata: dict() object containing the metadata required by
your flavor based on your configuration:
(i.e. {'hw:mem_page_size': 'large'})
:param use_floating_ips: when true, all tests requiring Floating IPs will
be added to the suite
:param netconf_override: dict() containing the reconfigured network_type,
physical_network and segmentation_id
:param log_level: the logging level
:return: None as the tests will be adding to the 'suite' parameter object
"""
# Tests the OpenStack API calls via a creator. If use_keystone, objects
# will be created with a custom user and project
# Creator Object tests
suite.addTest(OSIntegrationTestCase.parameterize(
CreateSecurityGroupTests, os_creds=os_creds, ext_net_name=ext_net_name,
use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateImageSuccessTests, os_creds=os_creds, ext_net_name=ext_net_name,
use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateImageNegativeTests, os_creds=os_creds, ext_net_name=ext_net_name,
use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateMultiPartImageTests, os_creds=os_creds,
ext_net_name=ext_net_name, use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateKeypairsTests, os_creds=os_creds, ext_net_name=ext_net_name,
use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateKeypairsCleanupTests, os_creds=os_creds,
ext_net_name=ext_net_name,
use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateNetworkSuccessTests, os_creds=os_creds,
ext_net_name=ext_net_name, use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateRouterSuccessTests, os_creds=os_creds, ext_net_name=ext_net_name,
use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateRouterNegativeTests, os_creds=os_creds,
ext_net_name=ext_net_name, use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateQoSTests, os_creds=os_creds,
ext_net_name=ext_net_name, use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateSimpleVolumeTypeSuccessTests, os_creds=os_creds,
ext_net_name=ext_net_name, use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateVolumeTypeComplexTests, os_creds=os_creds,
ext_net_name=ext_net_name, use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateSimpleVolumeSuccessTests, os_creds=os_creds,
ext_net_name=ext_net_name, use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateSimpleVolumeFailureTests, os_creds=os_creds,
ext_net_name=ext_net_name, use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateVolumeWithTypeTests, os_creds=os_creds,
ext_net_name=ext_net_name, use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateVolumeWithImageTests, os_creds=os_creds,
ext_net_name=ext_net_name, use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
# VM Instances
suite.addTest(OSIntegrationTestCase.parameterize(
SimpleHealthCheck, os_creds=os_creds, ext_net_name=ext_net_name,
use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateInstanceTwoNetTests, os_creds=os_creds,
ext_net_name=ext_net_name, use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateInstanceSimpleTests, os_creds=os_creds,
ext_net_name=ext_net_name, use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
netconf_override=netconf_override, log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateInstancePortManipulationTests, os_creds=os_creds,
ext_net_name=ext_net_name, use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
netconf_override=netconf_override, log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
InstanceSecurityGroupTests, os_creds=os_creds,
ext_net_name=ext_net_name, use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
netconf_override=netconf_override, log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateInstanceOnComputeHost, os_creds=os_creds,
ext_net_name=ext_net_name, use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
netconf_override=netconf_override, log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateInstanceFromThreePartImage, os_creds=os_creds,
ext_net_name=ext_net_name, use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
netconf_override=netconf_override, log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateInstanceVolumeTests, os_creds=os_creds,
ext_net_name=ext_net_name, use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
netconf_override=netconf_override, log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateStackSuccessTests, os_creds=os_creds, ext_net_name=ext_net_name,
use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateStackVolumeTests, os_creds=os_creds, ext_net_name=ext_net_name,
use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateStackFlavorTests, os_creds=os_creds, ext_net_name=ext_net_name,
use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateStackKeypairTests, os_creds=os_creds, ext_net_name=ext_net_name,
use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateStackSecurityGroupTests, os_creds=os_creds,
ext_net_name=ext_net_name, use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateStackNegativeTests, os_creds=os_creds, ext_net_name=ext_net_name,
use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
if use_floating_ips:
# https://jira.opnfv.org/browse/SNAPS-322
# suite.addTest(OSIntegrationTestCase.parameterize(
# CreateInstanceSingleNetworkTests, os_creds=os_creds,
# ext_net_name=ext_net_name, use_keystone=use_keystone,
# flavor_metadata=flavor_metadata, image_metadata=image_metadata,
# log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
CreateStackFloatingIpTests, os_creds=os_creds,
ext_net_name=ext_net_name, use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
suite.addTest(OSIntegrationTestCase.parameterize(
AnsibleProvisioningTests, os_creds=os_creds,
ext_net_name=ext_net_name, use_keystone=use_keystone,
flavor_metadata=flavor_metadata, image_metadata=image_metadata,
log_level=log_level))
|
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import shlex
import logging
logger = logging.getLogger(__name__)
from pdo.client.SchemeExpression import SchemeExpression
from pdo.client.controller.commands.send import send_to_contract
## -----------------------------------------------------------------
## -----------------------------------------------------------------
def __command_exchange__(state, bindings, pargs) :
"""controller command to interact with an exchange contract
"""
parser = argparse.ArgumentParser(prog='exchange')
parser.add_argument('-e', '--enclave', help='URL of the enclave service to use', type=str)
parser.add_argument('-f', '--save_file', help='File where contract data is stored', type=str)
parser.add_argument('-q', '--quiet', help='Suppress printing the result', action='store_true')
parser.add_argument('-w', '--wait', help='Wait for the transaction to commit', action='store_true')
subparsers = parser.add_subparsers(dest='command')
subparser = subparsers.add_parser('get_verifying_key')
subparser.add_argument('-s', '--symbol', help='binding symbol for result', type=str)
subparser = subparsers.add_parser('get_offered_asset')
subparser = subparsers.add_parser('get_requested_asset')
subparser = subparsers.add_parser('initialize')
subparser.add_argument('-r', '--root', help='key for the root authority for requested issuer', type=str, required=True)
subparser.add_argument('-t', '--type_id', help='contract identifier for the requested asset type', type=str, required=True)
subparser.add_argument('-o', '--owner', help='identity of the asset owner; ECDSA key', type=str, default="")
subparser.add_argument('-c', '--count', help='amount requested', type=int, required=True)
subparser = subparsers.add_parser('offer')
subparser.add_argument('-a', '--asset', help='serialized escrowed asset', type=str, required=True)
subparser = subparsers.add_parser('claim_offer')
subparser.add_argument('-s', '--symbol', help='binding symbol for result', type=str)
subparser = subparsers.add_parser('exchange')
subparser.add_argument('-a', '--asset', help='serialized escrowed asset', type=str, required=True)
subparser = subparsers.add_parser('claim_exchange')
subparser.add_argument('-s', '--symbol', help='binding symbol for result', type=str)
subparser = subparsers.add_parser('cancel')
subparser = subparsers.add_parser('cancel_attestation')
subparser.add_argument('-s', '--symbol', help='binding symbol for result', type=str)
options = parser.parse_args(pargs)
extraparams={'quiet' : options.quiet, 'wait' : options.wait}
# -------------------------------------------------------
if options.command == 'get_verifying_key' :
extraparams['commit'] = False
message = "'(get-verifying-key)"
result = send_to_contract(state, options.save_file, options.enclave, message, **extraparams)
if result and options.symbol :
bindings.bind(options.symbol, result)
return
# -------------------------------------------------------
if options.command == 'get_offered_asset' :
extraparams['commit'] = False
message = "'(examine-offered-asset)"
result = send_to_contract(state, options.save_file, options.enclave, message, **extraparams)
return
# -------------------------------------------------------
if options.command == 'get_requested_asset' :
extraparams['commit'] = False
message = "'(examine-requested-asset)"
result = send_to_contract(state, options.save_file, options.enclave, message, **extraparams)
return
# -------------------------------------------------------
if options.command == 'initialize' :
asset_request = "(\"{0}\" {1} \"{2}\")".format(options.type_id, options.count, options.owner)
message = "'(initialize {0} \"{1}\")".format(asset_request, options.root)
result = send_to_contract(state, options.save_file, options.enclave, message, **extraparams)
return
# -------------------------------------------------------
if options.command == 'offer' :
message = "'(offer-asset {0})".format(options.asset)
result = send_to_contract(state, options.save_file, options.enclave, message, **extraparams)
return
# -------------------------------------------------------
if options.command == 'claim_offer' :
extraparams['commit'] = False
message = "'(claim-offer)"
result = send_to_contract(state, options.save_file, options.enclave, message, **extraparams)
if result and options.symbol :
bindings.bind(options.symbol, result)
return
# -------------------------------------------------------
if options.command == 'exchange' :
message = "'(exchange-asset {0})".format(options.asset)
result = send_to_contract(state, options.save_file, options.enclave, message, **extraparams)
return
# -------------------------------------------------------
if options.command == 'claim_exchange' :
extraparams['commit'] = False
message = "'(claim-exchange)"
result = send_to_contract(state, options.save_file, options.enclave, message, **extraparams)
if result and options.symbol :
bindings.bind(options.symbol, result)
return
# -------------------------------------------------------
if options.command == 'cancel' :
message = "'(cancel)"
send_to_contract(state, options.save_file, options.enclave, message, **extraparams)
return
# -------------------------------------------------------
if options.command == 'cancel_attestation' :
extraparams['commit'] = False
message = "'(cancel-attestation)"
send_to_contract(state, options.save_file, options.enclave, message, **extraparams)
if result and options.symbol :
bindings.bind(options.symbol, result)
return
## -----------------------------------------------------------------
## -----------------------------------------------------------------
def do_exchange(self, args) :
"""
exchange -- invoke methods from the exchange contract
"""
pargs = shlex.split(self.bindings.expand(args))
try :
__command_exchange__(self.state, self.bindings, pargs)
except SystemExit as se :
if se.code > 0 : print('An error occurred processing {0}: {1}'.format(args, str(se)))
return
except Exception as e :
print('An error occurred processing {0}: {1}'.format(args, str(e)))
return
## -----------------------------------------------------------------
## -----------------------------------------------------------------
def load_commands(cmdclass) :
setattr(cmdclass, 'do_exchange', do_exchange)
|
# (c) Copyright 2014 Brocade Communications Systems Inc.
# All Rights Reserved.
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import re
from oslo_log import log
from jacket.storage.i18n import _LI
LOG = log.getLogger(__name__)
def get_friendly_zone_name(zoning_policy, initiator, target,
host_name, storage_system, zone_name_prefix,
supported_chars):
"""Utility function implementation of _get_friendly_zone_name.
Get friendly zone name is used to form the zone name
based on the details provided by the caller
:param zoning_policy - determines the zoning policy is either
initiator-target or initiator
:param initiator - initiator WWN
:param target - target WWN
:param host_name - Host name returned from Volume Driver
:param storage_system - Storage name returned from Volume Driver
:param zone_name_prefix - user defined zone prefix configured
in storage.conf
:param supported_chars - Supported character set of FC switch vendor.
Example: 'abc123_-$'. These are defined in the FC zone drivers.
"""
if host_name is None:
host_name = ''
if storage_system is None:
storage_system = ''
if zoning_policy == 'initiator-target':
host_name = host_name[:14]
storage_system = storage_system[:14]
if len(host_name) > 0 and len(storage_system) > 0:
zone_name = (host_name + "_"
+ initiator.replace(':', '') + "_"
+ storage_system + "_"
+ target.replace(':', ''))
else:
zone_name = (zone_name_prefix
+ initiator.replace(':', '')
+ target.replace(':', ''))
LOG.info(_LI("Zone name created using prefix because either "
"host name or storage system is none."))
else:
host_name = host_name[:47]
if len(host_name) > 0:
zone_name = (host_name + "_"
+ initiator.replace(':', ''))
else:
zone_name = (zone_name_prefix
+ initiator.replace(':', ''))
LOG.info(_LI("Zone name created using prefix because host "
"name is none."))
LOG.info(_LI("Friendly zone name after forming: %(zonename)s"),
{'zonename': zone_name})
zone_name = re.sub('[^%s]' % supported_chars, '', zone_name)
return zone_name
|
def solve(n: int) -> int:
if n == 0:
return 1
else:
return n * solve(n - 1)
|
import urllib.request
from bs4 import BeautifulSoup
from pprint import pprint
import json
url = 'http://www.thesaurus.com/browse/'
words = ['sentence']
def lookForSynonyms(word):
try:
synonyms = []
content = urllib.request.urlopen(url + word)
#print(fp.geturl())
#print(fp.info())
#print(content.getcode())
data = content.read().decode('utf-8')
content.close()
soup = BeautifulSoup(data, 'html.parser')
#print(soup.get_text())
results = soup.find_all("script")
#print(len(results))
result = results[22].string
json_txt = result.replace("window.INITIAL_STATE = ","").replace("};","}")
structure = json.loads(json_txt)
for synonym in structure['searchData']['tunaApiData']['posTabs']:
for term in synonym['synonyms']:
if int(term['similarity']) == 100:
synonyms.append(term['term'])
return synonyms
except urllib.error.HTTPError:
raise(urllib.error.HTTPError)
except Exception as e1:
print(f"There is an error in lookForSynonyms: {str(e1)}")
try:
for word in words:
print("Synonyms for <" + word + ">:")
pprint(lookForSynonyms(word))
except Exception as e_thesaurus:
print(f"There is an error: {str(e_thesaurus)}")
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine import recipe_test_api
class IsolatedTestApi(recipe_test_api.RecipeTestApi):
def properties(self,
server='https://example.isolateserver.appspot.com',
version='test_version'):
return self.m.properties(**{
'$recipe_engine/isolated': {
'server': server,
'version': version,
},
})
def archive(self):
return self.m.raw_io.output_text('[dummy hash]')
|
import argparse
import gc
import time
import torch.backends.cudnn as cudnn
from models import create_model
from entry import experiment_main
from load_config import load_config
from typing import List, Union
import numpy as np
import decord
def count_experiments(series: Union[dict, List[dict]]) -> int:
if type(series) != list:
series = [series]
n = 0
for item in series:
if type(item) is list:
# Implicit series
n += count_experiments(item)
elif 'series' in item:
# Composite experiment with explicit series
n += 1 + sum(count_experiments(load_config(path))
for path in item['series'])
else:
# Single experiment
n += 1
if 'base_experiment' in item:
n += count_experiments(load_config(item['base_experiment']))
return n
def run_series(series: Union[dict, List[dict]],
exp_no: int,
**kwargs) -> int:
if type(series) != list:
series = [series]
for config in series:
if type(config) is list:
exp_no = run_series(config, exp_no=exp_no, **kwargs)
else:
experiment_main(config, dict(**kwargs, exp_no=exp_no))
gc.collect()
exp_no += 1
return exp_no
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='thavlik portfolio entrypoint')
parser.add_argument('--config', '-c',
dest="config",
metavar='FILE',
help='path to the experiment config file',
default='experiments/all.yaml')
parser.add_argument('--save-dir',
dest="save_dir",
metavar='SAVE_DIR',
help='save directory for logs and screenshots',
default='logs')
parser.add_argument('--num-samples',
dest="num_samples",
metavar='NUM_SAMPLES',
type=int,
help='number of times to repeat the experiment (default to experiment config num_samples)',
default=None)
parser.add_argument('--num-threads',
dest="num_threads",
metavar='NUM_THREADS',
type=int,
help='number of cpu threads to use (defaults to 4)',
default=4)
parser.add_argument('--visdom-host',
dest="visdom_host",
metavar='VISDOM_HOST',
type=str,
help='visdom host name',
default='https://visdom.foldy.dev')
parser.add_argument('--visdom-port',
dest="visdom_port",
metavar='VISDOM_PORT',
type=int,
help='visdom port',
default=80)
parser.add_argument('--smoke-test',
dest="smoke_test",
metavar='DRY_RUN',
help='smoke test mode (stop after a couple steps)',
default=False)
parser.add_argument('--gpu',
dest="gpu",
metavar='GPU_NUM',
help='gpu number to use',
default=0)
parser.add_argument('--validate',
dest="validate",
metavar='VALIDATE',
help='validation mode',
default=False)
args = parser.parse_args()
if args.smoke_test:
print('Executing smoke test - training will stop after a couple steps.')
cudnn.deterministic = True
cudnn.benchmark = True
decord.bridge.set_bridge('torch')
config = load_config(args.config)
total_experiments = count_experiments(config)
num_samples = args.num_samples or 1
deltas = []
for i in range(num_samples):
print(f'Running sample {i+1}/{num_samples} on cuda:{args.gpu}')
start = time.time()
run_series(config,
save_dir=args.save_dir,
num_threads=args.num_threads,
gpu=args.gpu,
exp_no=0,
total_experiments=total_experiments,
smoke_test=args.smoke_test,
validate=args.validate)
delta = time.time() - start
deltas.append(delta)
print(f'Sample {i+1}/{num_samples} completed in {delta} seconds')
print(f'Each sample took {np.mean(deltas)} seconds on average')
print(f"============== Completed ==============")
|
import contextlib
import dataclasses
import logging
import os
import pkg_resources
import tempfile
import typing as t
import requests
from databind.core import annotations as A
from shut.data import load_string
from shut.model.requirements import Requirement
from shut.renderers.core import Renderer
from shut.test.base import BaseTestDriver, Runtime, TestRun, run_program_as_testcase
from shut.utils.io.virtual import VirtualFiles
if t.TYPE_CHECKING:
from shut.model.package import PackageModel
log = logging.getLogger(__name__)
@dataclasses.dataclass
class RcfileSettings:
DEFAULT_NAME = '.pylintrc'
#: A template to use for the `.pylintrc` file. Must either be the name of a template delivered
#: with shut (currently this is only `"shut"`) or a URL.
template: t.Optional[str] = None
#: If enabled, the rcfile will be rendered into the project directory on `shut pkg update`.
render: bool = False
#: The name under which to render the rcfile.
name: t.Optional[str] = None
# TODO (@NiklasRosenstein): Support overrides in the rcfile template.
def __post_init__(self) -> None:
if self.render and not self.template:
raise ValueError(f'RcfileSettings.template must be set if RcfileSettings.render is enabled')
def load_template(self) -> str:
if not self.template:
raise ValueError(f'RcfileSettings.template is not set')
if self.template.startswith('http://') or self.template.startswith('https://'):
response = requests.get(self.template)
response.raise_for_status()
return response.text
try:
return load_string(f'pylintrc_templates/{self.template}.ini')
except FileNotFoundError:
raise ValueError(f'.pylintrc template {self.template!r} does not exist')
@dataclasses.dataclass
class PylintTestDriver(BaseTestDriver):
"""
Runs Pylint.
"""
NAME = 'pylint'
#: Environment variables when calling PyLint.
env: t.Dict[str, str] = dataclasses.field(default_factory=dict)
#: Additional arguments when calling Pylint.
args: t.List[str] = dataclasses.field(default_factory=list)
#: The pylint RC file to use. If not specified, not explicit rcfile is passed to the pylint CLI.
rcfile: t.Optional[str] = None
def test_package(self, package: 'PackageModel', runtime: Runtime, capture: bool) -> TestRun:
directory = package.get_directory()
metadata = package.get_python_package_metadata()
path = metadata.package_directory if not metadata.is_single_module else metadata.filename
command = runtime.python + ['-m', 'pylint', os.path.relpath(path, directory)] + self.args
if self.rcfile:
command += ['--rcfile', self.rcfile]
test_run = run_program_as_testcase(
environment=runtime.get_environment(),
filename=package.get_source_directory(),
test_run_name='pylint',
command=command,
env=self.env,
cwd=package.get_directory(),
capture=capture)
return test_run
def get_test_requirements(self) -> t.List[Requirement]:
return [Requirement('pylint')]
|
from pyramid import pyramid
import numpy
from sp0Filters import sp0Filters
from sp1Filters import sp1Filters
from sp3Filters import sp3Filters
from sp5Filters import sp5Filters
import os
from maxPyrHt import maxPyrHt
from corrDn import corrDn
import math
from LB2idx import LB2idx
import matplotlib
from showIm import showIm
import JBhelpers
from upConv import upConv
import pyPyrUtils
class Spyr(pyramid):
filt = ''
edges = ''
#constructor
def __init__(self, image, height='auto', filter='sp1Filters', edges='reflect1'):
"""Steerable pyramid. image parameter is required, others are optional
- `image` - a 2D numpy array
- `height` - an integer denoting number of pyramid levels desired. 'auto' (default) uses
maxPyrHt from pyPyrUtils.
- `filter` - The name of one of the steerable pyramid filters in pyPyrUtils:
`'sp0Filters'`, `'sp1Filters'`, `'sp3Filters'`, `'sp5Filters'`. Default is `'sp1Filters'`.
- `edges` - specifies edge-handling. Options are:
* `'circular'` - circular convolution
* `'reflect1'` - reflect about the edge pixels
* `'reflect2'` - reflect, doubling the edge pixels
* `'repeat'` - repeat the edge pixels
* `'zero'` - assume values of zero outside image boundary
* `'extend'` - reflect and invert
* `'dont-compute'` - zero output when filter overhangs input boundaries.
"""
self.pyrType = 'steerable'
self.image = numpy.array(image)
if filter == 'sp0Filters':
filters = sp0Filters()
elif filter == 'sp1Filters':
filters = sp1Filters()
elif filter == 'sp3Filters':
filters = sp3Filters()
elif filter == 'sp5Filters':
filters = sp5Filters()
elif os.path.isfile(filter):
raise Exception("Filter files not supported yet")
else:
raise Exception("filter parameters value %s not supported" % (filter))
self.filt = filters
self.edges = edges
harmonics = filters['harmonics']
lo0filt = filters['lo0filt']
hi0filt = filters['hi0filt']
lofilt = filters['lofilt']
bfilts = filters['bfilts']
steermtx = filters['mtx']
max_ht = maxPyrHt(self.image.shape, lofilt.shape)
if height == 'auto':
ht = max_ht
elif height > max_ht:
raise Exception("cannot build pyramid higher than %d levels." % (max_ht))
else:
ht = height
nbands = bfilts.shape[1]
self.pyr = []
self.pyrSize = []
for n in range((ht*nbands)+2):
self.pyr.append([])
self.pyrSize.append([])
im = self.image
im_sz = im.shape
pyrCtr = 0
hi0 = corrDn(image = im, filt = hi0filt, edges = edges);
self.pyr[pyrCtr] = hi0
self.pyrSize[pyrCtr] = hi0.shape
pyrCtr += 1
lo = corrDn(image = im, filt = lo0filt, edges = edges)
for i in range(ht):
lo_sz = lo.shape
# assume square filters -- start of buildSpyrLevs
bfiltsz = int(math.floor(math.sqrt(bfilts.shape[0])))
for b in range(bfilts.shape[1]):
filt = bfilts[:,b].reshape(bfiltsz,bfiltsz).T
band = corrDn(image = lo, filt = filt, edges = edges)
self.pyr[pyrCtr] = numpy.array(band)
self.pyrSize[pyrCtr] = (band.shape[0], band.shape[1])
pyrCtr += 1
lo = corrDn(image = lo, filt = lofilt, edges = edges, step = (2,2))
self.pyr[pyrCtr] = numpy.array(lo)
self.pyrSize[pyrCtr] = lo.shape
# methods
def set(self, band, location, value):
"""set value at specified band and location
band and value must be integers, location can be an int or a tuple
"""
if isinstance(location, (int, long)):
self.pyr[band][0][location] = value
elif isinstance(location, tuple):
self.pyr[band][location[0]][location[1]] = value
else:
raise Exception('location parameter must be int or tuple!')
def spyrLev(self, lev):
if lev < 0 or lev > self.spyrHt()-1:
raise Exception('level parameter must be between 0 and %d!' % (self.spyrHt()-1))
levArray = []
for n in range(self.numBands()):
levArray.append(self.spyrBand(lev, n))
levArray = numpy.array(levArray)
return levArray
def spyrBand(self, lev, band):
if lev < 0 or lev > self.spyrHt()-1:
raise Exception('level parameter must be between 0 and %d!' % (self.spyrHt()-1))
if band < 0 or band > self.numBands()-1:
raise Exception('band parameter must be between 0 and %d!' % (self.numBands()-1))
return self.band( ((lev*self.numBands())+band)+1 )
def spyrHt(self):
if len(self.pyrSize) > 2:
spHt = (len(self.pyrSize)-2)/self.numBands()
else:
spHt = 0
return spHt
def numBands(self):
if len(self.pyrSize) == 2:
return 0
else:
b = 2
while ( b <= len(self.pyrSize) and
self.pyrSize[b] == self.pyrSize[1] ):
b += 1
return b-1
def pyrLow(self):
return numpy.array(self.band(len(self.pyrSize)-1))
def pyrHigh(self):
return numpy.array(self.band(0))
def reconPyr(self, *args):
# defaults
if len(args) > 0:
if args[0] == 'sp0Filters':
filters = sp0Filters()
elif args[0] == 'sp1Filters':
filters = sp1Filters()
elif args[0] == 'sp3Filters':
filters = sp3Filters()
elif args[0] == 'sp5Filters':
filters = sp5Filters()
elif os.path.isfile(args[0]):
print "Filter files not supported yet"
return
else:
print "filter %s not supported" % (args[0])
return
else:
filters = sp1Filters()
lo0filt = filters['lo0filt']
hi0filt = filters['hi0filt']
lofilt = filters['lofilt']
bfilts = filters['bfilts']
steermtx = filters['mtx']
# assume square filters -- start of buildSpyrLevs
bfiltsz = int(math.floor(math.sqrt(bfilts.shape[0])))
if len(args) > 1:
edges = args[1]
else:
edges = 'reflect1'
if len(args) > 2:
levs = args[2]
else:
levs = 'all'
if len(args) > 3:
bands = args[3]
else:
bands = 'all'
#---------------------------------------------------------
maxLev = 2 + self.spyrHt()
if levs == 'all':
levs = numpy.array(range(maxLev))
else:
levs = numpy.array(levs)
if (levs < 0).any() or (levs >= maxLev).any():
raise Exception("level numbers must be in the range [0, %d]." % (maxLev-1))
else:
levs = numpy.array(levs)
if len(levs) > 1 and levs[0] < levs[1]:
levs = levs[::-1] # we want smallest first
if bands == 'all':
bands = numpy.array(range(self.numBands()))
else:
bands = numpy.array(bands)
if (bands < 0).any() or (bands > bfilts.shape[1]).any():
raise Exception("band numbers must be in the range [0, %d]." % (self.numBands()-1))
else:
bands = numpy.array(bands)
# make a list of all pyramid layers to be used in reconstruction
Nlevs = self.spyrHt()
Nbands = self.numBands()
reconList = [] # pyr indices used in reconstruction
for lev in levs:
if lev == 0:
reconList.append(0)
elif lev == Nlevs+1:
# number of levels times number of bands + top and bottom
# minus 1 for 0 starting index
reconList.append( (Nlevs*Nbands) + 2 - 1)
else:
for band in bands:
reconList.append( ((lev-1) * Nbands) + band + 1)
reconList = numpy.sort(reconList)[::-1] # deepest level first
# initialize reconstruction
if len(self.pyr)-1 in reconList:
recon = numpy.array(self.pyr[len(self.pyrSize)-1])
else:
recon = numpy.zeros(self.pyr[len(self.pyrSize)-1].shape)
# recursive subsystem
# we need to loop over recursive subsystem pairs
for level in range(Nlevs):
maxLevIdx = ((maxLev-2) * Nbands) + 1
resSzIdx = maxLevIdx - (level * Nbands) - 1
recon = upConv(image = recon, filt = lofilt, edges = edges,
step = (2,2), start = (0,0),
stop = self.pyrSize[resSzIdx])
bandImageIdx = 1 + (((Nlevs-1)-level) * Nbands)
for band in range(Nbands-1,-1,-1):
if bandImageIdx in reconList:
filt = bfilts[:,(Nbands-1)-band].reshape(bfiltsz,
bfiltsz,
order='F')
recon = upConv(image = self.pyr[bandImageIdx],
filt = filt, edges = edges,
stop = (self.pyrSize[bandImageIdx][0],
self.pyrSize[bandImageIdx][1]),
result = recon)
bandImageIdx += 1
# apply lo0filt
sz = recon.shape
recon = upConv(image = recon, filt = lo0filt, edges = edges, stop = sz)
# apply hi0filt if needed
if 0 in reconList:
recon = upConv(image = self.pyr[0], filt = hi0filt, edges = edges,
start = (0,0), step = (1,1), stop = recon.shape,
result = recon)
return recon
def showPyr(self, prange = 'auto2', gap = 1, scale = 2, disp = 'qt'):
ht = self.spyrHt()
nind = len(self.pyr)
nbands = self.numBands()
## Auto range calculations:
if prange == 'auto1':
prange = numpy.ones((nind,1))
band = self.pyrHigh()
mn = numpy.amin(band)
mx = numpy.amax(band)
for lnum in range(1,ht+1):
for bnum in range(nbands):
idx = pyPyrUtils.LB2idx(lnum, bnum, ht+2, nbands)
band = self.band(idx)/(numpy.power(scale,lnum))
prange[(lnum-1)*nbands+bnum+1] = numpy.power(scale,lnum-1)
bmn = numpy.amin(band)
bmx = numpy.amax(band)
mn = min([mn, bmn])
mx = max([mx, bmx])
prange = numpy.outer(prange, numpy.array([mn, mx]))
band = self.pyrLow()
mn = numpy.amin(band)
mx = numpy.amax(band)
prange[nind-1,:] = numpy.array([mn, mx])
elif prange == 'indep1':
prange = numpy.zeros((nind,2))
for bnum in range(nind):
band = self.band(bnum)
mn = band.min()
mx = band.max()
prange[bnum,:] = numpy.array([mn, mx])
elif prange == 'auto2':
prange = numpy.ones(nind)
band = self.pyrHigh()
sqsum = numpy.sum( numpy.power(band, 2) )
numpixels = band.shape[0] * band.shape[1]
for lnum in range(1,ht+1):
for bnum in range(nbands):
band = self.band(LB2idx(lnum, bnum, ht+2, nbands))
band = band / numpy.power(scale,lnum-1)
sqsum += numpy.sum( numpy.power(band, 2) )
numpixels += band.shape[0] * band.shape[1]
prange[(lnum-1)*nbands+bnum+1] = numpy.power(scale, lnum-1)
stdev = numpy.sqrt( sqsum / (numpixels-1) )
prange = numpy.outer(prange, numpy.array([-3*stdev, 3*stdev]))
band = self.pyrLow()
av = numpy.mean(band)
stdev = numpy.sqrt( numpy.var(band) )
prange[nind-1,:] = numpy.array([av-2*stdev, av+2*stdev])
elif prange == 'indep2':
prange = numpy.zeros((nind,2))
for bnum in range(nind-1):
band = self.band(bnum)
stdev = numpy.sqrt( numpy.var(band) )
prange[bnum,:] = numpy.array([-3*stdev, 3*stdev])
band = self.pyrLow()
av = numpy.mean(band)
stdev = numpy.sqrt( numpy.var(band) )
prange[nind-1,:] = numpy.array([av-2*stdev, av+2*stdev])
elif isinstance(prange, basestring):
raise Exception("Bad RANGE argument: %s'" % (prange))
elif prange.shape[0] == 1 and prange.shape[1] == 2:
scales = numpy.power(scale, range(ht))
scales = numpy.outer( numpy.ones((nbands,1)), scales )
scales = numpy.array([1, scales, numpy.power(scale, ht)])
prange = numpy.outer(scales, prange)
band = self.pyrLow()
prange[nind,:] += numpy.mean(band) - numpy.mean(prange[nind,:])
colormap = matplotlib.cm.Greys_r
# compute positions of subbands
llpos = numpy.ones((nind,2));
ncols = int(numpy.ceil((nbands+1)/2))
nrows = int(numpy.ceil(nbands/2))
a = numpy.array(range(1-nrows, 1))
b = numpy.zeros((1,ncols))[0]
ab = numpy.concatenate((a,b))
c = numpy.zeros((1,nrows))[0]
d = range(-1, -ncols-1, -1)
cd = numpy.concatenate((c,d))
relpos = numpy.vstack((ab,cd)).T
if nbands > 1:
mvpos = numpy.array([-1, -1]).reshape(1,2)
else:
mvpos = numpy.array([0, -1]).reshape(1,2)
basepos = numpy.array([0, 0]).reshape(1,2)
for lnum in range(1,ht+1):
ind1 = (lnum-1)*nbands + 1
sz = numpy.array(self.pyrSize[ind1]) + gap
basepos = basepos + mvpos * sz
if nbands < 5: # to align edges
sz += gap * (ht-lnum)
llpos[ind1:ind1+nbands, :] = numpy.dot(relpos, numpy.diag(sz)) + ( numpy.ones((nbands,1)) * basepos )
# lowpass band
sz = numpy.array(self.pyrSize[nind-1]) + gap
basepos += mvpos * sz
llpos[nind-1,:] = basepos
# make position list positive, and allocate appropriate image:
llpos = llpos - ((numpy.ones((nind,2)) * numpy.amin(llpos, axis=0)) + 1) + 1
llpos[0,:] = numpy.array([1, 1])
# we want to cast it as ints, since we'll be using these as indices
llpos = llpos.astype(int)
urpos = llpos + self.pyrSize
d_im = numpy.zeros((numpy.amax(urpos), numpy.amax(urpos)))
# paste bands into image, (im-r1)*(nshades-1)/(r2-r1) + 1.5
nshades = 64;
for bnum in range(1,nind):
mult = (nshades-1) / (prange[bnum,1]-prange[bnum,0])
d_im[llpos[bnum,0]:urpos[bnum,0],
llpos[bnum,1]:urpos[bnum,1]] = mult * self.band(bnum) + (1.5-mult*prange[bnum,0])
if disp == 'qt':
showIm(d_im[:self.pyrSize[0][0]*2,:])
elif disp == 'nb':
JBhelpers.showIm(d_im[:self.pyrSize[0][0]*2,:])
|
import os
import time
from urllib.parse import urlparse
import requests
from auth import get_auth
def get_resource_list(url):
"""
Returns a list of HC resources specified by the url basename (such as .../articles.json)
:param url: A full endpoint url, such as 'https://support.zendesk.com/api/v2/help_center/articles.json'
:return: List of resources, or False if the request failed.
"""
session = requests.Session()
session.auth = get_auth()
o = urlparse(url)
resource = os.path.splitext(os.path.basename(o.path))[0] # e.g., 'articles'
record_list = {resource: []}
while url:
response = session.get(url)
if response.status_code == 429:
print('Rate limited! Please wait.')
time.sleep(int(response.headers['retry-after']))
response = session.get(url)
if response.status_code != 200:
print('Error with status code {}'.format(response.status_code))
exit()
data = response.json()
if data[resource]: # guard against empty record list
record_list[resource].extend(data[resource])
url = data['next_page']
return record_list[resource]
def get_resource(url):
"""
Returns a single HC resource
:param url: A full endpoint url, such as 'https://support.zendesk.com/api/v2/help_center/articles/2342572.json'
:return: Dict of a resource, or False if the request failed.
"""
resource = None
response = requests.get(url, auth=get_auth())
if response.status_code == 429:
print('Rate limited! Please wait.')
time.sleep(int(response.headers['retry-after']))
response = requests.get(url, auth=get_auth())
if response.status_code != 200:
print('Failed to get record with error {}:'.format(response.status_code))
print(response.text)
return False
for k, v in response.json().items():
resource = v
if type(resource) is dict:
return resource
return None
def post_resource(url, data, status=201):
"""
:param url:
:param data:
:param status: HTTP status. Normally 201 but some POST requests return 200
:return: Python data, or False if the request failed.
"""
resource = None
headers = {'Content-Type': 'application/json'}
response = requests.post(url, json=data, auth=get_auth(), headers=headers)
if response.status_code == 429:
print('Rate limited! Please wait.')
time.sleep(int(response.headers['retry-after']))
response = requests.post(url, json=data, auth=get_auth(), headers=headers)
if response.status_code != status:
print('Failed to create record with error {}:'.format(response.status_code))
print(response.text)
return False
for k, v in response.json().items():
resource = v
if type(resource) is dict:
return resource
return None
def put_resource(url, data):
"""
:param url:
:param data:
:return: Python data, or False if the request failed.
"""
resource = None
headers = {'Content-Type': 'application/json'}
response = requests.put(url, json=data, auth=get_auth(), headers=headers)
if response.status_code == 429:
print('Rate limited! Please wait.')
time.sleep(int(response.headers['retry-after']))
response = requests.post(url, json=data, auth=get_auth(), headers=headers)
if response.status_code != 200:
print('Failed to update record with error {}:'.format(response.status_code))
print(response.text)
return False
for k, v in response.json().items():
resource = v
if type(resource) is dict:
return resource
return None
def delete_resource(url):
"""
Runs a DELETE request on any Delete endpoint in the Zendesk API
:param url: A full endpoint url, such as 'https://support.zendesk.com/api/v2/help_center/articles/2342572.json'
:return: If successful, a 204 status code. If not, None
"""
response = requests.delete(url, auth=get_auth())
if response.status_code == 429:
print('Rate limited! Please wait.')
time.sleep(int(response.headers['retry-after']))
response = requests.delete(url, auth=get_auth())
if response.status_code != 204:
print('Failed to delete record with error {}'.format(response.status_code))
print(response.text)
return False
return None
|
'''
This is the input node that receives spikes and converts to Baxter joint angles.
'''
import argparse
from brian_multiprocess_udp import BrianConnectUDP
def from_spikes_to_baxter(spikes_pipe_in, spikes_pipe_out):
"""
This function substitutes the run_brian_simulation and converts the received spikes into Baxter joint angles.
"""
import numpy
import select
import time
import rospy
import baxter_interface
# Initialization of the ROS node.
print("Initializing Baxter node... ")
rospy.init_node("spikes_to_joint_position", disable_signals=True) # This node name must be UNIQUE!
# the option "disable_signals=True" is important to make ROS ignore the signals from my program.
print("Getting robot state... ")
rs = baxter_interface.RobotEnable()
init_state = rs.state().enabled
# def clean_shutdown():
# print("\nExiting example...")
# if not init_state:
# print("Disabling robot...")
# rs.disable()
# rospy.on_shutdown(clean_shutdown)
if not init_state:
print("Enabling robot... ")
rs.enable()
# Initializes the left arm
left = baxter_interface.Limb('left')
time.sleep(1) #gives some time to ROS...
# Gets the joint names used inside the joint_command
lj = left.joint_names()
# lj[0] => s0
# lj[1] => s1
# lj[2] => e0
# lj[3] => e1
# lj[4] => w0
# lj[5] => w1
# lj[6] => w2
joint_weights = [0.1,0.1,1,0.1,1,1,1] #these numbers will be multiplicated by the steps each joint is going to do
joint_weights = [1,1,0,1,0,1,0] #these numbers will be multiplicated by the steps each joint is going to do
# ['left_s0','left_s1','left_e0','left_e1','left_w0','left_w1','left_w2']
def converts_spikes_into_joints(spike_numpy_array, limb, step_up, step_down):
"""
Receives a numpy.array with 7 elements. If an element has an 1, increases the value of that joint. If is a 0, decreases.
spike_numpy_array => the numpy array received directly from my system (a numpy array with 1's and 0's) and the
same length as NumOfNeuronsInput.
joint_angles => list of the angles following the same order as joint_names
step_up => amount to be increased at the angle if a spike is received
step_down => amount to be decreased at the angle if a spike is received
returns a dict with all the joint angles values
"""
joint_angles = limb.joint_angles() #gets the actual joint angles
joint_names = ['left_s0','left_s1','left_e0','left_e1','left_w0','left_w1','left_w2']
movement = spike_numpy_array*step_up - (spike_numpy_array*(-1)+1)*step_down
# Creates a dictionary with the joint names and the necessary steps (up or down)
final_step = dict(zip(joint_names, movement*joint_weights))
# Returns the actual joint angles summed by the steps according to the spike_numpy_array received
return {key:value+final_step[key] for key,value in joint_angles.iteritems()}
print "Node started!"
while True:
if select.select([spikes_pipe_in],[],[]):
# Receives the spike train from the pipe, converts according to the converts_spikes_into_joints function
# and them apply the dictionary with the joint names and angles to Baxter using the set_joint_positions method
# from the limb object (left).
#print "spikes in!"
cmd = converts_spikes_into_joints(spikes_pipe_in.recv(), left, 0.005, 0.005)
#print "set joints!",
left.set_joint_positions( cmd )
#print "Done!"
# Prints the current endpoint position.
print "Baxter Input Node"
print cmd
if __name__=="__main__":
#my_randseed=int(''.join(map(str,numpy.random.randint(0,9,15)))) # creates a long integer converted random seed, but it is a crazy dumb code :)
# Process the information received from the command line arguments.
parser = argparse.ArgumentParser(description="Sets up and launch the Baxter OUTPUT node.")
parser.add_argument("--input_neurons", help="The total number of INPUT neurons.", type=int, default=7)
parser.add_argument("--input_addr", help="Input addresses as a tuple with (\"IP\", PORT).", type=list, default=("192.168.1.123", 33333))
parser.add_argument("--input_clock", help="An integer number used as the INPUT clock (mS)", type=int, default=100)
parser.add_argument("--brian_addr", help="A number from 0 to 255 used to identify the node.", type=int, default=200)
parser.add_argument("--ttime", help="Total time of the simulation.", type=int, required=True)
args=parser.parse_args()
my_inputclock_dt = args.input_clock
Number_of_Neurons_Input = args.input_neurons
my_simulation = BrianConnectUDP(main_NeuronGroup=None, NumOfNeuronsInput=Number_of_Neurons_Input,
input_addresses=[(args.input_addr[0], args.input_addr[1], Number_of_Neurons_Input)],
inputclock_dt=my_inputclock_dt, TotalSimulationTime=args.ttime, brian_bypass=from_spikes_to_baxter, brian_address=args.brian_addr)
|
# -*- coding: utf-8 -*-
from openerp import tools, models, fields, api, exceptions
############################################################################################################################ Cikk-polc kapcsolat ###
class LegrandCikkPolc(models.Model):
_name = 'legrand.cikk_polc'
_order = 'id desc'
hely_id = fields.Many2one('legrand.hely', u'Üzem', required=True, domain=[('belso_szallitas_e', '=', True)])
cikk_id = fields.Many2one('legrand.cikk', u'Cikkszám', required=True)
polc = fields.Char(u'Polc')
van_e = fields.Boolean(u'Van-e?', default=True)
# virtual fields
cikknev = fields.Char(u'Cikknév', related='cikk_id.cikknev', readonly=True)
@api.one
def toggle_van_e(self):
self.van_e = not self.van_e
return True
############################################################################################################################ Több polcos cikkek ###
class LegrandCikkTobbPolc(models.Model):
_name = 'legrand.cikk_tobb_polc'
_auto = False
_order = 'hely_id, cikk_id'
hely_id = fields.Many2one('legrand.hely', u'Üzem', required=True, domain=[('belso_szallitas_e', '=', True)])
cikk_id = fields.Many2one('legrand.cikk', u'Cikkszám', required=True)
polcok = fields.Char(u'Polcok')
# virtual fields
cikknev = fields.Char(u'Cikknév', related='cikk_id.cikknev', readonly=True)
def init(self, cr):
tools.drop_view_if_exists(cr, self._table)
cr.execute(
"""CREATE or REPLACE VIEW %s as (
SELECT
row_number() over() AS id,
hely_id,
cikk_id,
string_agg(polc, ', ') AS polcok
FROM legrand_cikk_polc
GROUP BY hely_id, cikk_id having count(*) > 1
)"""
% (self._table)
)
|
import logging
import os
logging.basicConfig(format='%(levelname)s %(message)s')
logging.getLogger('__main__').setLevel(logging.INFO)
logger = logging.getLogger(__name__)
from bravado.client import SwaggerClient
logger.info('trying to get swagger definition')
client = SwaggerClient.from_url(os.getenv('SWAGGERURL', 'http://toxcast-api.cloud.douglasconnect.com/beta/swagger.json'))
logger.info('testing endpoints')
result = client.compound.compounds_get().result()
result = client.assay.assays_get().result()
result = client.result.results_get().result()
logger.info('all tests completed without exceptions')
|
import os
import shutil
import zipfile
# 首先引入需要的工具包
# shutil为后期移动文件所需,可以忽略此项
# 源路径
parent_path = r'G:/KittiRaw_zip/2011_10_03'
# 目标路径
target_path = r'H:/KittiRaw/2011_10_03'
# 文件类型选择
# 可以自行更改压缩文件类型,需要引入其它工具包,如tarfile等
# 这里是因为在自己的windows上,zip比较常见,其他类型请自行更改
file_flag = '.zip'
# 删除已解压的zip文件
# 不建议初次使用,在确定程序无误后可以添加使用
def del_old_zip(file_path):
os.remove(file_path)
# 解压
def decompress(file_path, root):
# 开始
# zipfile打开zip文件
z = zipfile.ZipFile(f'{file_path}', 'r')
# 解压
z.extractall(path=f"{root}") # path为解压路径,解包后位于该路径下
# 判断是否需要重复解包
for names in z.namelist():
if names.endswith(file_flag):
z.close()
return 1
# 结束
z.close()
return 0
# 因为我在使用过程中发现有些zip解包后会混在一起
# 在平时大家手动解压时可能也会遇到提示是否覆盖的问题
# 下面的两个函数解决这一问题
# 开始要先创建一个大文件夹 与压缩包名字相同
# 避免后期混乱和麻烦
def start_dir_make(root, dirname):
os.chdir(root)
os.mkdir(dirname)
return os.path.join(root, dirname)
# 去除多余文件夹
def rem_dir_extra(root, father_dir_name):
# 递归要注意信息的正常处理 搞不好上一个调用已经改变了东西 而下面的调用还是使用之前的数据
try:
# 判断文件夹重名 开始
for item in os.listdir(os.path.join(root, father_dir_name)):
# 第一步判断是不是一个文件夹,如果不是则跳过本次循环
if not os.path.isdir(os.path.join(root, father_dir_name, item)):
continue
# 判断是否要脱掉一层目录结构
# 文件夹名字要相同,且子目录中只有单独的一个文件夹
if item == father_dir_name and len(os.listdir(os.path.join(root, father_dir_name))) == 1:
# 改变工作目录
os.chdir(root)
# 将无用文件夹重命名,因为直接移动会有重名错误
os.rename(father_dir_name, father_dir_name + '-old')
# 移动文件后删除空文件夹
shutil.move(os.path.join(root, father_dir_name + '-old', item), os.path.join(root))
os.rmdir(os.path.join(root, father_dir_name + '-old'))
# 将去掉一层目录结构后的文件夹继续作为父本递归处理下去
# 这里要注意,上面已经发生过数据的改动,所以下面递归传参一定要正确!
rem_dir_extra(root, item)
else:
# 处理那些不满足上面条件的文件夹
rem_dir_extra(os.path.join(root, father_dir_name), item)
except Exception as e:
# 打印错误信息
print("清除文件夹出错" + str(e))
# 入口
def unzipWithzipfile():
flag = 1
while flag:
# 循环遍历文件夹
for root, dirs, files in os.walk(parent_path):
# 读取文件名
nums = len(files)
current = 1
for name in files:
if name.endswith(file_flag):
# 创建文件夹
new_ws = start_dir_make(target_path, name.replace(file_flag, ''))
# zip文件地址
zip_path = os.path.join(root, name)
# 解压
print("[{}/{}] unzip:{}...".format(current, nums, f'{root}\\{name}'))
flag = decompress(zip_path, new_ws)
# 删除解压后的文件
# 有点危险
# 但不删除又可能会重复运行
# 一定要备份或先测试,不然可能会凉,自己选择修改
# del_old_zip(zip_path)
# 去掉多余的文件结构
rem_dir_extra(target_path, name.replace(file_flag, ''))
print("{} finished!".format(f'{name}'))
current += 1
# 由于解压可能解了好几次 所以可能会有已经解压好的父级目录重名无法处理 这里要再处理一次
rem_dir_extra(os.path.split(parent_path)[0], os.path.split(parent_path)[1])
print("全部解压完成!\n")
if __name__ == '__main__':
if not os.path.exists(target_path):
os.makedirs(target_path)
for root, dirs, files in os.walk(parent_path):
# 读取文件名
nums = len(files)
current = 1
for name in files:
if name.endswith(file_flag):
tmp_name = name
if not os.path.exists(os.path.join(target_path,tmp_name.replace(file_flag, ''))):
print("[{}/{}] unzip:{}...".format(current, nums, f'{root}\\{name}'))
zip_path = os.path.join(root, name)
cmd = "Bandizip.exe x -y -o:{} {}".format(target_path, zip_path)
os.system(cmd)
print("{} finished!".format(f'{name}'))
else:
print("[{}/{}] {} is already unziped!".format(current, nums, f'{name}'))
else:
print("[{}/{}] {} is not a zip file!".format(current, nums, f'{name}'))
current += 1
print("全部解压完成!\n")
|
from __future__ import annotations
from typing import Optional, TYPE_CHECKING, Union
from pyspark.sql.types import StructType, DataType
from spark_auto_mapper_fhir.fhir_types.list import FhirList
from spark_auto_mapper_fhir.fhir_types.string import FhirString
from spark_auto_mapper_fhir.extensions.extension_base import ExtensionBase
from spark_auto_mapper_fhir.base_types.fhir_complex_type_base import FhirComplexTypeBase
from spark_fhir_schemas.r4.complex_types.usagecontext import UsageContextSchema
if TYPE_CHECKING:
pass
# id_ (string)
# extension (Extension)
# code (Coding)
from spark_auto_mapper_fhir.complex_types.coding import Coding
# Import for CodeableConcept for code
from spark_auto_mapper_fhir.value_sets.usage_context_type import (
UsageContextTypeCode,
)
# End Import for CodeableConcept for code
# valueCodeableConcept (CodeableConcept)
from spark_auto_mapper_fhir.complex_types.codeable_concept import CodeableConcept
# Import for CodeableConcept for valueCodeableConcept
from spark_auto_mapper_fhir.value_sets.context_of_use_value_set import (
ContextOfUseValueSetCode,
)
# End Import for CodeableConcept for valueCodeableConcept
# valueQuantity (Quantity)
from spark_auto_mapper_fhir.complex_types.quantity import Quantity
# valueRange (Range)
from spark_auto_mapper_fhir.complex_types.range import Range
# valueReference (Reference)
from spark_auto_mapper_fhir.complex_types.reference import Reference
# Imports for References for valueReference
from spark_auto_mapper_fhir.resources.plan_definition import PlanDefinition
from spark_auto_mapper_fhir.resources.research_study import ResearchStudy
from spark_auto_mapper_fhir.resources.insurance_plan import InsurancePlan
from spark_auto_mapper_fhir.resources.healthcare_service import HealthcareService
from spark_auto_mapper_fhir.resources.group import Group
from spark_auto_mapper_fhir.resources.location import Location
from spark_auto_mapper_fhir.resources.organization import Organization
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class UsageContext(FhirComplexTypeBase):
"""
UsageContext
fhir-base.xsd
Specifies clinical/business/etc. metadata that can be used to retrieve, index and/or categorize an artifact. This metadata can either be specific to the applicable population (e.g., age category, DRG) or the specific context of care (e.g., venue, care setting, provider of care).
If the element is present, it must have a value for at least one of the defined elements, an @id referenced from the Narrative, or extensions
"""
# noinspection PyPep8Naming
def __init__(
self,
*,
id_: Optional[FhirString] = None,
extension: Optional[FhirList[ExtensionBase]] = None,
code: Coding[UsageContextTypeCode],
valueCodeableConcept: Optional[
CodeableConcept[ContextOfUseValueSetCode]
] = None,
valueQuantity: Optional[Quantity] = None,
valueRange: Optional[Range] = None,
valueReference: Optional[
Reference[
Union[
PlanDefinition,
ResearchStudy,
InsurancePlan,
HealthcareService,
Group,
Location,
Organization,
]
]
] = None,
) -> None:
"""
Specifies clinical/business/etc. metadata that can be used to retrieve, index
and/or categorize an artifact. This metadata can either be specific to the
applicable population (e.g., age category, DRG) or the specific context of
care (e.g., venue, care setting, provider of care).
If the element is present, it must have a value for at least one of the
defined elements, an @id referenced from the Narrative, or extensions
:param id_: None
:param extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
:param code: A code that identifies the type of context being specified by this usage
context.
:param valueCodeableConcept: None
:param valueQuantity: None
:param valueRange: None
:param valueReference: None
"""
super().__init__(
id_=id_,
extension=extension,
code=code,
valueCodeableConcept=valueCodeableConcept,
valueQuantity=valueQuantity,
valueRange=valueRange,
valueReference=valueReference,
)
def get_schema(
self, include_extension: bool
) -> Optional[Union[StructType, DataType]]:
return UsageContextSchema.get_schema(include_extension=include_extension)
|
# do nothing, just exist to make "classes" package
|
from urllib.error import HTTPError
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import time
import logging
import sys
class API:
def __init__(self, token, api_url='https://api.tinybird.co', version='v0'):
self.api_url = api_url.rstrip('/')
self.version = version
TOKEN_ERROR = f"Token must be a valid Tinybird token for {self.api_url}. Check the `api_url` param is correct and the token has the right permissions. {self.ui_url()}/tokens"
if not token:
logging.critical(TOKEN_ERROR)
sys.exit(-1)
self.token = token
retry = Retry(total=5, backoff_factor=0.2)
adapter = HTTPAdapter(max_retries=retry)
self._session = requests.Session()
self._session.mount('http://', adapter)
self._session.mount('https://', adapter)
# Rate limit
self.rate_limit_points = 6
self.rate_limit_remaining = self.rate_limit_points
self.rate_limit_reset = 0
self.retry_after = 1
# check the token is valid
try:
self.get('/datasources')
except requests.HTTPError as e:
if e.response.status_code == 403:
logging.error(TOKEN_ERROR)
sys.exit(-1)
def ui_url(self):
return self.api_url.replace('api', 'ui')
def _handle_rate_limit(self) -> None:
if self.rate_limit_remaining == 0:
time_to_sleep = min((self.rate_limit_reset - time.time()), 10)
time_to_sleep = max(time_to_sleep, 1) + 1
logging.info(f'Waiting {str(time_to_sleep)} seconds before retrying...')
time.sleep(time_to_sleep)
logging.info('Retrying')
def _set_rate_limit(self, response: requests.Response) -> None:
# Update rate limit fields
if 'X-Ratelimit-Limit' in response.headers.keys():
self.rate_limit_points = int(response.headers.get('X-Ratelimit-Limit'))
self.rate_limit_remaining = int(response.headers.get('X-Ratelimit-Remaining'))
self.rate_limit_reset = int(response.headers.get('X-Ratelimit-Reset'))
self.retry_after = int(response.headers.get('Retry-After', 0))
def send(self, path, method='POST', **kwargs):
self._handle_rate_limit()
headers = {
'Authorization': 'Bearer ' + self.token
}
while True:
url = f"{self.api_url}/{self.version}/{path.lstrip('/')}"
if method == 'POST':
response = self._session.post(url, headers=headers, **kwargs)
else:
response = self._session.get(url, headers=headers, **kwargs)
self._set_rate_limit(response)
if response.status_code == 429:
logging.warning(f'Too many requests, you can do {self.rate_limit_points} requests per minute...')
self._handle_rate_limit()
else:
break
response.raise_for_status()
return response
def post(self, path, **kwargs):
return self.send(path, method='POST', **kwargs)
def get(self, path, **kwargs):
return self.send(path, method='GET', **kwargs)
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
import webapp2
import jinja2
from google.appengine.ext import ndb
from google.appengine.api import users
import logging
import datetime
template_env = jinja2.Environment(loader=jinja2.FileSystemLoader(os.getcwd()))
#import firebase_admin
#from firebase_admin import credentials
#cred = credentials.Certificate('templates/firebase/service_account.json')
#default_app = firebase_admin.initialize_app(cred)
class LoginHandler(webapp2.RequestHandler):
def get(self):
template = template_env.get_template('templates/auth/login.html')
context = {}
self.response.write(template.render(context))
def post(self):
#from firebase_admin import auth
vstrChoice = self.request.get("vstrChoice")
if vstrChoice == "0":
template = template_env.get_template('templates/auth/loggedin.html')
context = {}
self.response.write(template.render(context))
elif vstrChoice == "1":
template = template_env.get_template('templates/auth/loggedout.html')
context = {}
self.response.write(template.render(context))
elif vstrChoice == "2":
vstrDisplayName = self.request.get('vstrDisplayName')
vstrEmail = self.request.get('vstrEmail')
vstremailVerified = self.request.get('vstremailVerified')
vstrUserID = self.request.get('vstrUserID')
vstrPhoneNumber = self.request.get('vstrPhoneNumber')
vstrProviderData = self.request.get('vstrProviderData')
vstrAccessToken = self.request.get('vstrAccessToken')
#decode_token = auth.verify_id_token(vstrAccessToken)
#uid = decode_token['uid']
class LogoutHandler(webapp2.RequestHandler):
def get(self):
template = template_env.get_template('templates/auth/logout.html')
context = {}
self.response.write(template.render(context))
app = webapp2.WSGIApplication([
('/login', LoginHandler),
('/logout', LogoutHandler)
], debug=True)
|
"""empty message
Revision ID: ad467baf7ec8
Revises: 4b483a762fed
Create Date: 2021-12-02 16:32:50.884324
"""
import sqlalchemy_utils
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ad467baf7ec8'
down_revision = '4b483a762fed'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('phone_country',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created_at', sqlalchemy_utils.types.arrow.ArrowType(), nullable=False),
sa.Column('updated_at', sqlalchemy_utils.types.arrow.ArrowType(), nullable=True),
sa.Column('name', sa.String(length=128), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('phone_number',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created_at', sqlalchemy_utils.types.arrow.ArrowType(), nullable=False),
sa.Column('updated_at', sqlalchemy_utils.types.arrow.ArrowType(), nullable=True),
sa.Column('country_id', sa.Integer(), nullable=False),
sa.Column('number', sa.String(length=128), nullable=False),
sa.Column('active', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['country_id'], ['phone_country.id'], ondelete='cascade'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('number')
)
op.create_table('phone_message',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created_at', sqlalchemy_utils.types.arrow.ArrowType(), nullable=False),
sa.Column('updated_at', sqlalchemy_utils.types.arrow.ArrowType(), nullable=True),
sa.Column('number_id', sa.Integer(), nullable=False),
sa.Column('from_number', sa.String(length=128), nullable=False),
sa.Column('body', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['number_id'], ['phone_number.id'], ondelete='cascade'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('phone_reservation',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created_at', sqlalchemy_utils.types.arrow.ArrowType(), nullable=False),
sa.Column('updated_at', sqlalchemy_utils.types.arrow.ArrowType(), nullable=True),
sa.Column('number_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('start', sqlalchemy_utils.types.arrow.ArrowType(), nullable=False),
sa.Column('end', sqlalchemy_utils.types.arrow.ArrowType(), nullable=False),
sa.ForeignKeyConstraint(['number_id'], ['phone_number.id'], ondelete='cascade'),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='cascade'),
sa.PrimaryKeyConstraint('id')
)
op.add_column('users', sa.Column('can_use_phone', sa.Boolean(), server_default='0', nullable=False))
op.add_column('users', sa.Column('phone_quota', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'phone_quota')
op.drop_column('users', 'can_use_phone')
op.drop_table('phone_reservation')
op.drop_table('phone_message')
op.drop_table('phone_number')
op.drop_table('phone_country')
# ### end Alembic commands ###
|
from typing import NoReturn
import click
from watchgod import RegExpWatcher, watch
from bock.helpers import absolute_paths_to_articles_in, click_option_article_root
from bock.logger import log
from bock.search import (
create_search_index,
get_search_index,
update_index_incrementally,
)
def run(article_root: str) -> None:
search_index = get_search_index(article_root)
paths = absolute_paths_to_articles_in(article_root)
log.info(f"Started watching {article_root} for changes")
log.info(f"Found {len(paths)} articles in {article_root}")
if not search_index:
search_index = create_search_index(article_root)
update_index_incrementally(
article_root,
search_index,
paths,
)
for changes in watch(
article_root,
watcher_cls=RegExpWatcher,
watcher_kwargs=dict(re_files=r"^.*(\.md)$"),
):
update_index_incrementally(
article_root,
search_index,
absolute_paths_to_articles_in(article_root),
)
# This is for when this module is run by itself in production
@click.command()
@click_option_article_root
def main(article_root):
run(article_root.rstrip("/"))
|
# -*- coding: utf-8 -*-
# Copyright 2017 ProjectV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask_restful import Resource
from flask import request, g
from v.tools.exception import ExceptionRest
from v.tools.v import processing_rest_exception, processing_rest_success, type_of_insert_rest, type_of_update_rest
from v.tools.validate import validate_rest
from v.project.model.projectTaskMdl import ProjectTaskMdl
class ProjectTaskListRst(Resource, ProjectTaskMdl):
def get(self):
try:
# owner
# by project id
_where = " WHERE deleted_at is null "
_by = request.args.get("by", False)
if _by:
if _by == 'project_id':
_project_id = request.args.get('project_id', False)
_where = _where + " and project_id=%s "% (_project_id, )
else:
_where = _where + " and create_id =%s " % (g.user.id,)
else:
_where = _where + " and create_id =%s " % (g.user.id, )
_completed = request.args.get("completed")
if _completed == 'True' or _completed == 'true':
_where = _where + " and completed_at is not null "
elif _completed == 'False' or _completed == 'false':
_where = _where + " and completed_at is null "
_qrg = self._query_get % _where
g.db_conn.execute(_qrg)
if g.db_conn.count() > 0:
_collection = g.db_conn.one()[0]
if _collection:
_data = {self._table: _collection}
_get = processing_rest_success(data=_data)
else:
raise ExceptionRest(status_code=404, message="No se han encontrado resultados")
else:
raise ExceptionRest(status_code=404, message="No se han encontrado resultados")
except (Exception, ExceptionRest), e:
_get = processing_rest_exception(e)
return _get
def post(self):
_request = request.json
try:
_errors = validate_rest(fields=self._fields, request=_request)
if not _errors:
_col, _val = type_of_insert_rest(self._fields, _request)
_qrp = """
INSERT INTO %s (create_id, %s) VALUES (%s, %s)
RETURNING (select row_to_json(collection) FROM (VALUES(id)) collection(id));
""" % (self._table, _col, g.user.id, _val)
g.db_conn.execute(_qrp)
if g.db_conn.count() > 0:
_data = {self._table: g.db_conn.one()}
_post = processing_rest_success(data=_data, message='Fue creada correctamente',
status_code=201)
else:
raise ExceptionRest(status_code=500, message='No se ha podido registrar.')
else:
raise ExceptionRest(status_code=400, errors=_errors)
except (Exception, ExceptionRest), e:
_post = processing_rest_exception(e)
return _post
class ProjectTaskRst(Resource, ProjectTaskMdl):
def get(self, id):
try:
_qrg = self._query_get + " WHERE deleted_at IS NULL and id = %s" % (id, )
g.db_conn.execute(_qrg)
if g.db_conn.count() > 0:
_collection = g.db_conn.one()[0]
if _collection:
_data = {self._table: _collection}
_get = processing_rest_success(data=_data)
else:
raise ExceptionRest(status_code=404, message="No se han encontrado resultados")
else:
raise ExceptionRest(status_code=404, message="No se han encontrado resultados")
except (Exception, ExceptionRest), e:
_get = processing_rest_exception(e)
return _get
def put(self, id):
_request = request.json
try:
_errors = validate_rest(fields=self._fields, request=_request, method="put")
if not _errors:
_val = type_of_update_rest(self._fields, _request)
_qrp = "UPDATE %s SET %s WHERE id=%s;" % (self._table, _val, id,)
g.db_conn.execute(_qrp)
if g.db_conn.count() > 0:
_put = processing_rest_success(status_code=201, message="El registro fue actualizado correctamente")
else:
raise ExceptionRest(status_code=404,
message="No se ha podio encontrar el registro, para actualizar.")
else:
raise ExceptionRest(status_code=400, errors=_errors)
except (Exception, ExceptionRest), e:
_put = processing_rest_exception(e)
return _put
def delete(self, id):
try:
_qrd = "UPDATE %s SET deleted_at=current_timestamp WHERE id=%s;" % (self._table, id,)
g.db_conn.execute(_qrd)
if g.db_conn.count() > 0:
_delete = processing_rest_success(status_code=201, message="El registro fue eliminado correctamente")
else:
raise ExceptionRest(status_code=404,
message="No se ha podio encontrar el registro, para eliminar.")
except (Exception, ExceptionRest), e:
_delete = processing_rest_exception(e)
return _delete
|
#from django.conf import settings
#settings.DEBUG = True
from django.core.management import call_command
from testcases import (
TestServerTestCase,
get_client
)
class ExistsTestCase(TestServerTestCase):
def setUp(self):
self.start_test_server()
self.client = get_client()
call_command('loaddata', 'small_data.json')
def tearDown(self):
self.stop_test_server()
def test_exists1(self):
message = self.client.message.objects.exists()
self.assertTrue(message is True)
message = self.client.message.objects.all().exists()
self.assertTrue(message is True)
message = self.client.message.objects.filter(id=10000000)
exists = message.exists()
self.assertTrue(exists is False)
|
import torch
import torch.nn as nn
import numpy as np
from Layers.bottlenecks import LinearBottleneckLayer
class ReactionDotProduction(nn.Module):
''' Scaled Dot Productionss '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, query, key, value):
'''
Arguments:
key {Tensor, shape [n_head * batch, d_features, n_depth_per_head]} -- expansion
query {Tensor, shape [n_head * batch, 1, n_depth_per_head]} -- depth
value {Tensor, shape [n_head * batch, 1, d_features]} -- value
Returns:
output {Tensor, shape [n_head * batch, 1, d_features]} -- output
attn {Tensor, shape [n_head * batch, 1, d_features]} -- reaction attention
'''
attn = torch.bmm(query, key.transpose(1, 2)) # [n_head * batch, 1, d_features]
# How should we set the temperature
attn = attn / self.temperature
attn = self.softmax(attn) # softmax over d_f1
attn = self.dropout(attn)
output = torch.mul(attn, value)
return output, attn
class ReactionAttentionLayerV1(nn.Module):
'''Reaction Attention'''
def __init__(self,expansion_layer, n_depth, d_features, d_meta, n_head, dropout,
use_bottleneck=True, d_bottleneck=None):
super().__init__()
self.d_features = d_features
self.d_meta = d_meta
self.n_head = n_head
self.n_depth = n_depth
self.use_bottleneck = use_bottleneck
self.expansion = expansion_layer(d_features=d_features, n_channel=n_head, n_depth=n_depth) # output [batch, d_features, n_channel * n_depth]
self.expansion.initialize_param(nn.init.xavier_normal_)
# query, value map
self.query = nn.Linear(d_meta, n_head * self.n_depth)
self.value = nn.Linear(d_features, n_head * d_features)
nn.init.xavier_normal_(self.query.weight)
nn.init.xavier_normal_(self.value.weight)
self.attention = ReactionDotProduction(temperature=np.power(self.n_depth, 0.5))
self.layer_norm = nn.LayerNorm(d_features)
self.fc = nn.Linear(n_head * d_features, d_features)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
if use_bottleneck:
self.bottleneck = LinearBottleneckLayer(d_features, d_bottleneck)
def forward(self, features, meta):
'''
Arguments:
feature_1 {Tensor, shape [batch, d_features]} -- feature part 1
feature_2 {Tensor, shape [batch, d_meta]} -- feature part 2, can be categorical data
Returns:
output {Tensor, shape [batch, d_features]} -- output
attn {Tensor, shape [n_head * batch, 1, d_features]} -- self attention
'''
d_features, d_meta, n_head, n_depth_per_head = self.d_features, self.d_meta, self.n_head, self.n_depth
batch_size, _ = features.size()
residual = features
query = self.query(meta).view(batch_size, 1, n_head, n_depth_per_head)
key = self.expansion(features).view(batch_size, n_depth_per_head, d_features, n_head) # [batch, n_depth, n_head, d_features]
value = self.value(features).view(batch_size, 1, n_head, d_features)
# value = feature_1.repeat(1, n_head).view(batch_size, 1, n_head, d_features)
query = query.permute(2, 0, 1, 3).contiguous().view(-1, 1, n_depth_per_head)
key = key.permute(2, 0, 3, 1).contiguous().view(-1, d_features, n_depth_per_head)
value = value.permute(2, 0, 1, 3).contiguous().view(-1, 1, d_features)
output, attn = self.attention(query, key, value)
output = output.view(n_head, batch_size, 1, d_features)
output = output.permute(1, 2, 0, 3).contiguous().view(batch_size, -1)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
if self.use_bottleneck:
output = self.bottleneck(output)
return output, attn
|
from abc import ABCMeta, abstractmethod
from enum import Enum
class ActionType(Enum):
SHELL = 'shell'
RELEASE = 'release'
class Action(metaclass=ABCMeta):
@abstractmethod
def run(self, path: str, config=None, system_config=None, erlang_vsn: str = None) -> bool:
pass
@abstractmethod
def export(self) -> dict:
pass
|
c.JupyterHub.authenticator_class = 'ldapauthenticator.LDAPAuthenticator'
c.LDAPAuthenticator.server_address = '172.17.0.2'
c.LDAPAuthenticator.bind_dn_template = [
"uid={username},dc=example,dc=org"
]
c.JupyterHub.spawner_class = 'simplespawner.SimpleLocalProcessSpawner'
c.SimpleLocalProcessSpawner.args = ['--allow-root']
|
from http.server import BaseHTTPRequestHandler, HTTPServer
hostName = "localhost"
serverPort = 8080
class MyServer(BaseHTTPRequestHandler):
state = "Stopped"
def __init__(self, request, client_address, server):
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def do_GET(self):
self.send_response(200)
if(self.path == "/minecraft/start"):
if(MyServer.state == "Stopped"):
MyServer.state = "Starting"
elif(self.path == "/minecraft/stop"):
if(MyServer.state == "Running"):
MyServer.state = "Stopping"
else:
if(MyServer.state == "Starting"):
MyServer.state = "Running"
if(MyServer.state == "Stopping"):
MyServer.state = "Stopped"
if (self.path == "/minecraft/status"):
responseJson = '{"status" : "' + MyServer.state + '"}';
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(responseJson, "utf-8"));
else:
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes(MyServer.state, "utf-8"));
if __name__ == "__main__":
webServer = HTTPServer((hostName, serverPort), MyServer)
print("Server started http://%s:%s" % (hostName, serverPort))
try:
webServer.serve_forever()
except KeyboardInterrupt:
pass
webServer.server_close()
print("Server stopped.")
|
# Copyright 2016 Quora, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common exception types.
"""
__all__ = [
"ArgumentError",
"OperationError",
"NotSupportedError",
"SecurityError",
"PermissionError",
"TimeoutError",
"prepare_for_reraise",
"reraise",
]
import sys
class ArgumentError(RuntimeError):
"""An error related to one of the provided arguments."""
pass
class OperationError(RuntimeError):
"""Indicates the impossibility to perform the action."""
pass
class NotSupportedError(OperationError):
"""An attempt to use an unsupported feature."""
pass
class SecurityError(OperationError):
"""The action can't be performed due to security restrictions."""
pass
class PermissionError(SecurityError):
"""The action can't be performed because of lack of required permissions."""
pass
class TimeoutError(RuntimeError):
"""An error indicating that function was interrupted because of timeout."""
pass
def prepare_for_reraise(error, exc_info=None):
"""Prepares the exception for re-raising with reraise method.
This method attaches type and traceback info to the error object
so that reraise can properly reraise it using this info.
"""
if not hasattr(error, "_type_"):
if exc_info is None:
exc_info = sys.exc_info()
error._type_ = exc_info[0]
error._traceback = exc_info[2]
return error
__traceback_hide__ = True
def reraise(error):
"""Re-raises the error that was processed by prepare_for_reraise earlier."""
if hasattr(error, "_type_"):
raise error.with_traceback(error._traceback)
raise error
|
import preprocessing as prep
def kldplot(ax, kld_data_loc, kld_max, countmin, countmax, rearr_start, rearr_period, rearr_idc = True):
kld_data = prep.data(kld_data_loc, 2)
x3 = list(map(lambda d: d[0], kld_data))
y3 = list(map(lambda d: abs(d[1]) , kld_data))
ax.plot(x3, y3)
ax.set_ylabel("KL divergence")
ax.set_xlabel("update count")
ax.set_xlim(countmin, countmax)
ax.set_ylim(0.0, kld_max)
if rearr_idc:
for i in range(rearr_start, countmax, rearr_period):
ax.axvline(i, color='r', linestyle=':', linewidth=1)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains the Mac OS X user plist plugin."""
# TODO: Only plists from Mac OS X 10.8 and 10.9 were tested. Look at other
# versions as well.
import binascii
from binplist import binplist
from dfvfs.file_io import fake_file_io
from dfvfs.path import fake_path_spec
from dfvfs.resolver import context
from xml.etree import ElementTree
from plaso.events import plist_event
from plaso.lib import timelib
from plaso.parsers import plist
from plaso.parsers.plist_plugins import interface
__author__ = 'Joaquin Moreno Garijo (Joaquin.MorenoGarijo.2013@live.rhul.ac.uk)'
class MacUserPlugin(interface.PlistPlugin):
"""Basic plugin to extract timestamp Mac user information."""
NAME = 'plist_macuser'
DESCRIPTION = u'Parser for Mac OS X user plist files.'
# The PLIST_PATH is dynamic, "user".plist is the name of the
# Mac OS X user.
PLIST_KEYS = frozenset([
'name', 'uid', 'home',
'passwordpolicyoptions', 'ShadowHashData'])
_ROOT = u'/'
def Process(self, parser_context, plist_name=None, top_level=None, **kwargs):
"""Check if it is a valid Mac OS X system account plist file name.
Args:
parser_context: A parser context object (instance of ParserContext).
plist_name: name of the plist file.
top_level: dictionary with the plist file parsed.
"""
super(MacUserPlugin, self).Process(
parser_context, plist_name=self.PLIST_PATH, top_level=top_level,
**kwargs)
# Genearated events:
# name: string with the system user.
# uid: user ID.
# passwordpolicyoptions: XML Plist structures with the timestamp.
# passwordLastSetTime: last time the password was changed.
# lastLoginTimestamp: last time the user was authenticated (*).
# failedLoginTimestamp: last time the user passwd was incorrectly(*).
# failedLoginCount: times of incorrect passwords.
# (*): depending on the situation, these timestamps are reset (0 value).
# It is translated by the library as a 2001-01-01 00:00:00 (COCAO
# zero time representation). If this happens, the event is not yield.
def GetEntries(self, parser_context, match=None, **unused_kwargs):
"""Extracts relevant user timestamp entries.
Args:
parser_context: A parser context object (instance of ParserContext).
match: Optional dictionary containing keys extracted from PLIST_KEYS.
The default is None.
"""
account = match['name'][0]
uid = match['uid'][0]
cocoa_zero = (
timelib.Timestamp.COCOA_TIME_TO_POSIX_BASE *
timelib.Timestamp.MICRO_SECONDS_PER_SECOND)
# INFO: binplist return a string with the Plist XML.
for policy in match['passwordpolicyoptions']:
xml_policy = ElementTree.fromstring(policy)
for dict_elements in xml_policy.iterfind('dict'):
key_values = [value.text for value in dict_elements.getchildren()]
policy_dict = dict(zip(key_values[0::2], key_values[1::2]))
if policy_dict.get('passwordLastSetTime', 0):
timestamp = timelib.Timestamp.FromTimeString(
policy_dict.get('passwordLastSetTime', '0'))
if timestamp > cocoa_zero:
# Extract the hash password information.
# It is store in the attribure ShadowHasData which is
# a binary plist data; However binplist only extract one
# level of binary plist, then it returns this information
# as a string.
# TODO: change this into a DataRange instead. For this we
# need the file offset and size of the ShadowHashData value data.
resolver_context = context.Context()
fake_file = fake_file_io.FakeFile(
resolver_context, match['ShadowHashData'][0])
fake_file.open(path_spec=fake_path_spec.FakePathSpec(
location=u'ShadowHashData'))
try:
plist_file = binplist.BinaryPlist(file_obj=fake_file)
top_level = plist_file.Parse()
except binplist.FormatError:
top_level = dict()
salted_hash = top_level.get('SALTED-SHA512-PBKDF2', None)
if salted_hash:
password_hash = u'$ml${0:d}${1:s}${2:s}'.format(
salted_hash['iterations'],
binascii.hexlify(salted_hash['salt']),
binascii.hexlify(salted_hash['entropy']))
else:
password_hash = u'N/A'
description = (
u'Last time {0:s} ({1!s}) changed the password: {2!s}').format(
account, uid, password_hash)
event_object = plist_event.PlistTimeEvent(
self._ROOT, u'passwordLastSetTime', timestamp, description)
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
if policy_dict.get('lastLoginTimestamp', 0):
timestamp = timelib.Timestamp.FromTimeString(
policy_dict.get('lastLoginTimestamp', '0'))
description = u'Last login from {0:s} ({1!s})'.format(account, uid)
if timestamp > cocoa_zero:
event_object = plist_event.PlistTimeEvent(
self._ROOT, u'lastLoginTimestamp', timestamp, description)
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
if policy_dict.get('failedLoginTimestamp', 0):
timestamp = timelib.Timestamp.FromTimeString(
policy_dict.get('failedLoginTimestamp', '0'))
description = (
u'Last failed login from {0:s} ({1!s}) ({2!s} times)').format(
account, uid, policy_dict['failedLoginCount'])
if timestamp > cocoa_zero:
event_object = plist_event.PlistTimeEvent(
self._ROOT, u'failedLoginTimestamp', timestamp, description)
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
plist.PlistParser.RegisterPlugin(MacUserPlugin)
|
from collections import defaultdict
from statistics import mean
from record_helper import *
import vcfpy
def generate_sv_record(records, comparison_result, sample_names):
"""
This method generates a single SV record after a call has been made over a set of input records
:param records: the input records involved in the SV call
:param comparison_result:
:param sample_names:
:return:
"""
# Build a map to easily find the records by the sample name. It can be multi-valued
sample_names_to_records = group_by(records, lambda record: get_sample_name(record))
# Generate calls for each sample in this group
calls = [get_sample_call(sample_name, sample_names_to_records.get(sample_name, None))
for sample_name in sample_names]
first_record_of_the_group = records[0]
chrom = first_record_of_the_group.CHROM
id_of_new_record = generate_id(chrom, comparison_result.initial_position)
info = vcfpy.OrderedDict()
info["SVTYPE"] = comparison_result.svtype
info["END"] = comparison_result.final_position
if comparison_result.insseq is not None:
info["INSSEQ"] = comparison_result.insseq
return vcfpy.Record(
CHROM=chrom, # by construction, all the grouped records have the same
POS=comparison_result.initial_position, # by construction, all the grouped records have the same
ID=[id_of_new_record],
REF=first_record_of_the_group.REF, # by construction, all the grouped records have the same
ALT=[vcfpy.Substitution(type_=comparison_result.svtype, value='<{}>'.format(comparison_result.svtype))],
QUAL=maximum_qual(records),
FILTER=["PASS"],
INFO=info,
FORMAT=["GT", "TRANCHE2", "VAF"],
calls=calls)
def generate_non_sv_records(colocated_records, sample_names):
"""
This function processes records that have not been used to call a SV.
:param colocated_records:
:param sample_names:
:return:
"""
# The co-located records need to be re-grouped based not just on their true position (CHROM+POS) but also similarity
subgrouping_function = lambda record: (record.CHROM,
record.POS,
record.REF,
str(record.ALT),
record.INFO.get("END", None),
record.INFO.get("INSSEQ", None))
records_grouped_by_all_coordinates = group_by(colocated_records, key=subgrouping_function)
# Once the regrouping has happened, each group will generate exactly one line in the output. These lines
# may be produced out-of-order, but we don't care because we will sort them later before generating the VCF.
output = []
for subkey, group in records_grouped_by_all_coordinates.items():
# Build a map to easily find the records by the sample name
sample_names_to_record = group_by(group, get_sample_name)
# Generate calls for each sample in this group
calls = [get_sample_call(sample_name, sample_names_to_record.get(sample_name, []))
for sample_name in sample_names]
# Add a record to the output
first_record_of_the_group = group[0]
id_of_new_record = generate_id(first_record_of_the_group.CHROM, first_record_of_the_group.POS)
info = vcfpy.OrderedDict()
info["SVTYPE"] = "BND"
info["TRANCHE2"] = maximum_tranche(group)
info["BNDVAF"] = get_average_vaf(group)
if "END" in first_record_of_the_group.INFO:
# by construction, all the grouped records have the same
info["END"] = first_record_of_the_group.INFO["END"]
if "INSSEQ" in first_record_of_the_group.INFO:
# by construction, all the grouped records have the same
info["INSSEQ"] = first_record_of_the_group.INFO["INSSEQ"]
output.append(vcfpy.Record(
CHROM=first_record_of_the_group.CHROM, # by construction, all the grouped records have the same
POS=first_record_of_the_group.POS, # by construction, all the grouped records have the same
ID=[id_of_new_record],
REF=first_record_of_the_group.REF, # by construction, all the grouped records have the same
ALT=first_record_of_the_group.ALT, # by construction, all the grouped records have the same
QUAL=maximum_qual(group),
FILTER=["PASS"],
INFO=info,
FORMAT=["GT", "TRANCHE2", "VAF"],
calls=calls))
return output
def group_by(iterable, key):
result = defaultdict(list)
for item in iterable:
result[key(item)].append(item)
return result
def get_gt(original_bndvat):
if original_bndvat > 0.85:
return "1/1"
elif original_bndvat < 0.15:
return "0/0"
else:
return "0/1"
def maximum_qual(records):
return max([record.QUAL for record in records if record.QUAL is not None], default=None)
def maximum_tranche(records):
tranches = set([get_tranche_2(record) for record in records])
if "HIGH" in tranches:
return "HIGH"
elif "INTERMEDIATE" in tranches:
return "INTERMEDIATE"
elif "LOW" in tranches:
return "LOW"
else:
return None
def get_sample_call(sample_name, records):
"""
This function generates the Call for a single sample at at a given location, given a single record, multiple records or no record at all
:param sample_name:
:param records:
:return:
"""
call_data = vcfpy.OrderedDict.fromkeys(["GT", "TRANCHE2", "VAF"])
if records:
average_vaf = get_average_vaf(records)
call_data["GT"] = get_gt(average_vaf)
call_data["TRANCHE2"] = maximum_tranche(records)
call_data["VAF"] = average_vaf
return vcfpy.Call(sample=sample_name, data=call_data)
def get_average_vaf(records):
return mean([float(record.INFO["BNDVAF"]) for record in records])
def generate_id(chrom, pos):
return chrom + "_" + str(pos)
|
#!/usr/bin/env python
"""
Example for commanding robot without moveit
"""
import sys
import numpy as np
import rospy
from geometry_msgs.msg import Pose, Point, Quaternion
from sensor_msgs.msg import JointState
from trac_ik_python.trac_ik import IK
GRIPPER_LINK = "gripper_link"
ARM_BASE_LINK = "arm_base_link"
MOVE_GROUP_NAME = 'arm'
ROSTOPIC_SET_ARM_JOINT = '/goal_dynamixel_position'
IK_POSITION_TOLERANCE = 0.01
IK_ORIENTATION_TOLERANCE = np.pi
def home_arm(pub):
rospy.loginfo('Going to arm home pose')
set_arm_joint(pub, np.zeros(5))
rospy.sleep(5)
def compute_ik(ik_solver, target_pose, current_joint):
"""
Parameters
----------
ik_solver: trac_ik_python.trac_ik Ik object
target_pose: type geometry_msgs/Pose
current_joint: list with length the number of joints (i.e. 5)
Returns
----------
IK solution (a list of joint angles for target_pose)
if found, None otherwise
"""
result = ik_solver.get_ik(current_joint,
target_pose.position.x,
target_pose.position.y,
target_pose.position.z,
target_pose.orientation.x,
target_pose.orientation.y,
target_pose.orientation.z,
target_pose.orientation.w,
IK_POSITION_TOLERANCE,
IK_POSITION_TOLERANCE,
IK_POSITION_TOLERANCE,
IK_ORIENTATION_TOLERANCE,
IK_ORIENTATION_TOLERANCE,
IK_ORIENTATION_TOLERANCE)
if result:
rospy.loginfo('IK solution found')
else:
rospy.logerr('No IK solution found')
return result
def set_arm_joint(pub, joint_target):
joint_state = JointState()
joint_state.position = tuple(joint_target)
pub.publish(joint_state)
def main():
rospy.init_node('position_control_example', anonymous=True)
target_joints = [
[0.408, 0.721, -0.471, -1.4, 0.920],
[-0.675, 0, 0.23, 1, -0.70]
]
target_poses = [Pose(Point(0.279, 0.176, 0.217),
Quaternion(-0.135, 0.350, 0.329, 0.866)),
Pose(Point(0.339, 0.0116, 0.255),
Quaternion(0.245, 0.613, -0.202, 0.723))]
# ik_solver = IK(ARM_BASE_LINK, GRIPPER_LINK)
pub = rospy.Publisher(ROSTOPIC_SET_ARM_JOINT,
JointState, queue_size=1)
rospy.sleep(2)
home_arm(pub)
for joint in target_joints:
set_arm_joint(pub, joint)
rospy.sleep(2)
# for pose in target_poses:
# rospy.loginfo('Commanding arm to pose {}'.format(pose))
# # target_joint = compute_ik(
# # ik_solver, pose, group.get_current_joint_values())
# if target_joint:
# set_arm_joint(pub, target_joint)
# rospy.sleep(5)
home_arm(pub)
if __name__ == "__main__":
main()
|
# Generated by Django 2.2.24 on 2021-11-30 19:06
from django.db import migrations
# the sql commands in this migration file create a function to update
# the re-use flags on cwts if the cwt has been reused for different
# species, year classes, stains, in different lakes, and by different
# agencies. The first statement create a function and associates it
# with a trigger. The reverse block remove those elements in reverse
# order.
class Migration(migrations.Migration):
dependencies = [
("stocking", "0018_fill_colorfields"),
]
operations = [
migrations.RunSQL(
"""
CREATE FUNCTION update_reused_cwt_flags_trigger_fct () RETURNS TRIGGER LANGUAGE PLPGSQL
AS
' BEGIN
UPDATE common_cwt
SET multiple_makers = CASE
WHEN cwt_number IN (SELECT DISTINCT mm.cwt_number
FROM (SELECT DISTINCT cwt_number,
manufacturer
FROM common_cwt
WHERE manufacturer = ''mm'') AS mm
JOIN common_cwt AS cwt ON cwt.cwt_number = mm.cwt_number
WHERE cwt.manufacturer = ''nmt'') THEN TRUE
ELSE FALSE
END,
multiple_strains = CASE
WHEN cwt_number IN (SELECT cwt_number
--, common_name,
-- COUNT(strain_label) AS strains
FROM (SELECT DISTINCT cwt_number,
species.common_name,
strain.strain_label
FROM stocking_stockingevent AS event
JOIN common_cwtsequence_events ON common_cwtsequence_events.stockingevent_id = event.id
JOIN common_cwtsequence ON common_cwtsequence_events.cwtsequence_id = common_cwtsequence.id
JOIN common_cwt AS cwt ON cwt.id = common_cwtsequence.cwt_id
JOIN common_species AS species ON species.id = event.species_id
JOIN common_strainraw AS strainraw ON strainraw.id = event.strain_raw_id
JOIN common_strain AS strain ON strain.id = strainraw.strain_id) AS tmp
GROUP BY cwt_number,
common_name
HAVING COUNT(cwt_number) > 1) THEN TRUE
ELSE FALSE
END,
multiple_agencies = CASE
WHEN cwt_number IN (SELECT cwt_number
-- ,COUNT(agency_id) AS Agencies
FROM (SELECT DISTINCT cwt_number,
events.agency_id
FROM stocking_stockingevent AS events
JOIN common_cwtsequence_events ON common_cwtsequence_events.stockingevent_id = events.id
JOIN common_cwtsequence ON common_cwtsequence_events.cwtsequence_id = common_cwtsequence.id
JOIN common_cwt AS cwt ON cwt.id = common_cwtsequence.cwt_id) AS tmp
GROUP BY cwt_number
HAVING COUNT(cwt_number) > 1) THEN TRUE
ELSE FALSE
END,
multiple_species = CASE
WHEN cwt_number IN (SELECT cwt_number
--,COUNT(species_id) AS Species
FROM (SELECT DISTINCT cwt_number,
events.species_id
FROM stocking_stockingevent AS events
JOIN common_cwtsequence_events ON common_cwtsequence_events.stockingevent_id = events.id
JOIN common_cwtsequence ON common_cwtsequence_events.cwtsequence_id = common_cwtsequence.id
JOIN common_cwt AS cwt ON cwt.id = common_cwtsequence.cwt_id) AS tmp
GROUP BY cwt_number
HAVING COUNT(species_id) > 1) THEN TRUE
ELSE FALSE
END,
multiple_lakes = CASE
WHEN cwt_number IN (SELECT cwt_number
--,COUNT(lake_id) AS Lakesb
FROM (SELECT DISTINCT cwt_number,
jurisdiction.lake_id
FROM stocking_stockingevent AS events
JOIN common_jurisdiction AS jurisdiction ON jurisdiction.id = events.jurisdiction_id
JOIN common_cwtsequence_events ON common_cwtsequence_events.stockingevent_id = events.id
JOIN common_cwtsequence ON common_cwtsequence_events.cwtsequence_id = common_cwtsequence.id
JOIN common_cwt AS cwt ON cwt.id = common_cwtsequence.cwt_id) AS tmp
GROUP BY cwt_number
HAVING COUNT(lake_id) > 1) THEN TRUE
ELSE FALSE
END,
multiple_yearclasses = CASE
WHEN cwt_number IN (SELECT cwt_number
-- ,COUNT(year_class) AS YearClasses
FROM (SELECT DISTINCT cwt_number,
events.species_id,
events.year_class
FROM stocking_stockingevent AS events
JOIN common_cwtsequence_events ON common_cwtsequence_events.stockingevent_id = events.id
JOIN common_cwtsequence ON common_cwtsequence_events.cwtsequence_id = common_cwtsequence.id
JOIN common_cwt AS cwt ON cwt.id = common_cwtsequence.cwt_id) AS tmp
GROUP BY cwt_number,
species_id
HAVING COUNT(cwt_number) > 1) THEN TRUE
ELSE FALSE
END;
UPDATE common_cwt
SET tag_reused = case when
cwt_number IN (SELECT distinct cwt_number
FROM common_cwt
WHERE multiple_species = TRUE
OR multiple_strains = TRUE
OR multiple_yearclasses = TRUE
OR multiple_makers = TRUE
OR multiple_agencies = TRUE
OR multiple_lakes = TRUE)
THEN TRUE
ELSE FALSE
END;
return NEW;
END;
';
CREATE TRIGGER update_reused_cwt_flags_trigger
AFTER INSERT OR UPDATE OR DELETE ON stocking_stockingevent
FOR EACH STATEMENT
EXECUTE PROCEDURE update_reused_cwt_flags_trigger_fct();
""",
"""
DROP TRIGGER update_reused_cwt_flags_trigger on stocking_stockingevent;
DROP FUNCTION update_reused_cwt_flags_trigger_fct;
""",
)
]
|
"""
Tests for pecoregex.pcre.
"""
import re
from ctypes import CDLL
import pytest
from pecoregex import pcre
# pylint: disable=C0111
def test_nametable_entry():
entry = bytes((1, 0)) + b'hello' + bytes(28)
index, name = pcre.nametable_entry(entry)
assert index == 256
assert name == 'hello'
def test_pcre_lib_constructor():
lib = pcre.PCRELibrary(ovector_size=40, soname='libpcre.so.1', encode='ascii')
assert lib.ovector_size == 40
assert lib.encode == 'ascii'
assert lib.decode == 'utf-8'
assert lib.shared_object_name == 'libpcre.so.1'
def test_pcre_lib_get_lib():
lib = pcre.PCRELibrary().get_lib()
assert isinstance(lib, CDLL)
def test_pcre_lib_version():
version = pcre.PCRELibrary().version()
print(version)
assert isinstance(version, str)
assert re.match(r'''
^
\d+\.\d+ # version itself
\s+ # whitespace
\d{4}-\d\d-\d\d # date
$
''', version, re.VERBOSE)
def test_pcre_lib_config():
lib = pcre.PCRELibrary()
exceptions = (pcre.PCRE_CONFIG_UTF16, pcre.PCRE_CONFIG_UTF32)
for i in exceptions:
with pytest.raises(pcre.PCREErrorBadOption):
lib.config(i)
for i in range(13):
if i in exceptions:
continue
value = lib.config(i)
assert isinstance(value, pcre.CONFIG_OUTPUT_TYPE[i])
# Not testing pcre.PCRE_CONFIG_PARENS_LIMIT (13) as it may not be present everywhere
def test_pcre_lib_caseless():
lib = pcre.PCRELibrary()
assert isinstance(lib.supports_caseless_utf8(), bool)
def test_pcre_lib_compile():
lib = pcre.PCRELibrary()
# Simple pattern:
assert lib.compile('^hello') is not None
# Simple pattern with legal options:
assert lib.compile('hello', pcre.PCRE_ANCHORED|pcre.PCRE_CASELESS) is not None
# Incorrect patterns:
for pattern in ('^hello(', '^(?<namé>hello)'):
with pytest.raises(pcre.PCRECompileException):
lib.compile(pattern)
# Correct pattern with conflicting options:
with pytest.raises(pcre.PCRECompileException):
lib.compile('hello', pcre.PCRE_UTF8|pcre.PCRE_NEVER_UTF)
def test_pcre_lib_exec():
lib = pcre.PCRELibrary()
# Simple pattern:
pcre_code = lib.compile(r'^(?i)hello')
assert lib.exec(pcre_code, 'Hello!')
assert lib.exec(pcre_code, 'Oh, hello!') is False
# Simple pattern with options:
pcre_code = lib.compile(r'hello', pcre.PCRE_CASELESS|pcre.PCRE_ANCHORED)
assert lib.exec(pcre_code, 'Hello!')
assert lib.exec(pcre_code, 'Oh, hello!') is False
# Captures:
pcre_code = lib.compile(r'It is raining (?<rain1>\S+) and (?<rain2>\S+)')
captures = lib.exec(pcre_code, 'It is raining cats and dogs')
assert captures
assert captures['by_index'][0] == 'It is raining cats and dogs'
assert captures['by_index'][1] == 'cats'
assert captures['by_index'][2] == 'dogs'
assert captures['by_name']['rain1'] == 'cats'
assert captures['by_name']['rain2'] == 'dogs'
# UTF-8 matching:
if lib.supports_caseless_utf8():
pcre_code = lib.compile(r'éléphant', pcre.PCRE_UTF8|pcre.PCRE_CASELESS|pcre.PCRE_ANCHORED)
assert lib.exec(pcre_code, 'Éléphant!')
assert lib.exec(pcre_code, 'Oh, éléphant!') is False
def test_pcre_lib_exec_data_unit():
lib = pcre.PCRELibrary(encode='utf-8', decode='iso-8859-15')
pcre_code = lib.compile(r'\C(\Cl\C)\Cphant', pcre.PCRE_UTF8|pcre.PCRE_CASELESS)
captures = lib.exec(pcre_code, 'éléphant')
assert captures
assert captures['by_index'][1] == '©lÃ'
captures = lib.exec(pcre_code, 'ÉLÉPHANT')
assert captures
assert captures['by_index'][1] == '\x89LÃ'
lib.free(pcre_code)
assert True # still alive / no segfault
def test_pcre_lib_info_nametable():
lib = pcre.PCRELibrary()
pcre_code = lib.compile(r'''(?x)
^
(?<all>
(?<dotdotdot>
\.\.\.
)
(?<dashdashdash>
---
)
(?<dotdotdotagain>
\.\.\.
)
)
$
''')
expected_entries = 4
expected_entry_size = 2 + len('dotdotdotagain\x00')
expected_table_size = expected_entries * expected_entry_size
assert lib.info_namecount(pcre_code) == expected_entries
assert lib.info_nameentrysize(pcre_code) == expected_entry_size
nametable = lib.info_nametable_data(pcre_code)
nametable_bytes = nametable[0:expected_table_size]
assert len(nametable_bytes) == expected_table_size # still alive
nametable = lib.nametable(pcre_code)
assert nametable
assert len(nametable) == expected_entries
ordered_nametable = lib.ordered_nametable(pcre_code)
assert ordered_nametable == [None, 'all', 'dotdotdot', 'dashdashdash', 'dotdotdotagain']
def test_pcre_lib_match():
res = pcre.PCRELibrary().match(r'''
/(?<prefix>[^/]+)
/(?<action>[^/]+)
/(?<value>.+)
''', '/just/do/it', pcre.PCRE_EXTENDED, pcre.PCRE_ANCHORED)
assert res
assert res['by_index'] == ['/just/do/it', 'just', 'do', 'it']
assert res['by_name'] == {'action': 'do', 'prefix': 'just', 'value': 'it'}
def test_alternative_match():
lib = pcre.PCRELibrary()
alt_match_re = r'''
^(?:
(?<name1>abc)
(?<name2>def)
|
(?<name3>ghi)
(?<name4>jkl)
)$
'''
pcre_code = lib.compile(alt_match_re, pcre.PCRE_EXTENDED)
res = lib.exec(pcre_code, 'abcdef')
assert res
assert res['by_index'] == ['abcdef', 'abc', 'def']
assert res['by_name'] == {'name1': 'abc', 'name2': 'def', 'name3': None, 'name4': None}
res = lib.exec(pcre_code, 'ghijkl')
assert res
assert res['by_index'] == ['ghijkl', '', '', 'ghi', 'jkl']
assert res['by_name'] == {'name1': '', 'name2': '', 'name3': 'ghi', 'name4': 'jkl'}
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : kill.py
@Contact : 379327479@qq.com
@License : MIT
@Modify Time @Author @Version @Description
------------ ------- -------- -----------
2021/10/26 9:49 zxd 1.0 None
"""
import uiautomator2 as u2
from uiautomator2 import Device
from app.com_ss_android_ugc_aweme import constant
from utils import device_util
def main(device: Device, params: str):
device_util.stop_app(device, constant.APP_PACKAGE)
if __name__ == '__main__':
main(u2.connect(), None)
|
from itertools import combinations
from solvatore import Solvatore
from cipher_description import CipherDescription
from ciphers import present
cipher = present.present
rounds = 9
solver = Solvatore()
solver.load_cipher(cipher)
solver.set_rounds(rounds)
# Look over all combination for one non active bit
for bits in combinations(range(64), 1):
constant_bits = bits
active_bits = {i for i in range(64) if i not in constant_bits}
# Find all balanced bits
balanced_bits = []
for i in range(cipher.state_size):
if solver.is_bit_balanced(i, rounds, active_bits):
balanced_bits.append(i)
if len(balanced_bits) > 0:
print("Found distinguisher!")
print("Constant Bits: ", len(constant_bits),constant_bits)
print("Balanced Bits: ", len(balanced_bits),balanced_bits)
|
import numpy as np
import talib
import math
class Algorithm:
# @staticmethod
# def lm_kdj(df, n,ksgn='close'):
# lowList= pd.rolling_min(df['low'], n)
# lowList.fillna(value=pd.expanding_min(df['low']), inplace=True)
# highList = pd.rolling_max(df['high'], n)
# highList.fillna(value=pd.expanding_max(df['high']), inplace=True)
# rsv = (df[ksgn] - lowList) / (highList - lowList) * 100
# df['kdj_k'] = pd.ewma(rsv,com=2)
# df['kdj_d'] = pd.ewma(df['kdj_k'],com=2)
# df['kdj_j'] = 3.0 * df['kdj_k'] - 2.0 * df['kdj_d']
# #print('n df',len(df))
# return df
@staticmethod
def kdj(high_array, low_array, close_array, fastk_period=9, slowk_period=3, slowd_period=3):
#计算kd指标
# high_prices = np.array([v['high'] for v in data])
# low_prices = np.array([v['low'] for v in data])
# close_prices = np.array([v['close'] for v in data])
max_close = talib.MAX(high_array, timeperiod=fastk_period)
min_close = talib.MIN(low_array, timeperiod=fastk_period)
for k in range(len(low_array)):
if k<fastk_period and k>1:
aaa = talib.MIN(low_array,timeperiod=k)
bbb = talib.MAX(high_array,timeperiod=k)
min_close[k]= aaa[k]
max_close[k]= bbb[k]
elif k==1 or k==0:
min_close[k]=low_array[k]
max_close[k]=high_array[k]
# rsv = maxmin(data, fastk_period)
diff = max_close - min_close
diff[diff == 0] = 1
# diff = 1 if diff == 0 else diff
fast_k = (close_array - min_close)/diff *100
ppp = max_close - min_close
for t in range(len(close_array)):
if max_close[t] == min_close[t]:
fast_k[t] = 0
slow_k1 = np.full_like(close_array,50)
slow_d1 = np.full_like(close_array,50)
for k in range(1,len(fast_k)):
slow_k1[k] = slow_k1[k-1]*2/3+fast_k[k]/3
slow_d1[k] = slow_d1[k-1]*2/3+slow_k1[k]/3
indicators= {
'rsv':fast_k,
'max':max_close,
'min':min_close,
'k': slow_k1,
'd': slow_d1,
'j': 3 * slow_k1 - 2 * slow_d1
}
return indicators
@staticmethod
def wave(data, window = 0.0003):
if len(data) <= 0:
return
# r = array[::-1]
v_list = []
p_list = []
r = data
l = len(data) - 1
now = r[0]
# v_list.append(now)
# p_list.append(0)
pos = 1
vol = 0
u_tag = None
d_tag = None
end_tag = None
start_pos = 0
while pos < l:
if math.isnan(now):
now = r[pos]
pos += 1
continue
else:
start_pos = pos - 1
break
while pos < l:
if now < r[pos]:
u_tag = pos
if d_tag:
diff = r[start_pos] - r[d_tag]
if abs(diff / r[start_pos]) > window:
end_tag = d_tag
elif now > r[pos]:
d_tag = pos
if u_tag:
diff = r[start_pos] - r[u_tag]
if abs(diff / r[start_pos]) > window:
end_tag = u_tag
if not end_tag is None:
# print("point = {},start = {}, end = {}, vol = {:.2%}".format(
# r[end_tag],start_pos, end_tag, vol/r[start_pos]))
start_pos = end_tag
v_list.append(r[end_tag])
p_list.append(end_tag)
end_tag = None
vol += r[pos] - now
now = r[pos]
pos += 1
# print(v_list)
# print(p_list)
return v_list, p_list
|
# -*- coding: utf-8 -*-
'''
Created on 2019年03月02日
@author: Zhukun Luo
Jiangxi university of finance and economics
'''
from pyspark import SparkContext
from pyspark import SparkConf
from pyspark.sql import SparkSession
conf = SparkConf().setAppName("Spark App").setMaster("local[4]")#默认分配线程
sc = SparkContext(conf=conf)
spark = SparkSession(sc)
from pyspark.ml import Pipeline
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.feature import HashingTF,Tokenizer
#准备测试数据
# Prepare training documents from a list of (id, text, label) tuples.
training = spark.createDataFrame([
(0L,"a b c d e spark",1.0),
(1L,"b d",0.0),
(2L,"spark f g h",1.0),
(3L,"hadoop mapreduce",0.0)],["id","text","label"])
#构建机器学习流水线
# Configure an ML pipeline, which consists of three stages: tokenizer, hashingTF, and lr.
tokenizer =Tokenizer(inputCol="text", outputCol="words")
hashingTF =HashingTF(inputCol=tokenizer.getOutputCol(), outputCol="features")
lr =LogisticRegression(maxIter=10, regParam=0.01)
pipeline =Pipeline(stages=[tokenizer, hashingTF, lr])
#训练出model
# Fit the pipeline to training documents.
model = pipeline.fit(training)
#测试数据
# Prepare test documents, which are unlabeled (id, text) tuples.
test = spark.createDataFrame([
(4L,"spark i j k"),
(5L,"l m n"),
(6L,"mapreduce spark"),
(7L,"apache hadoop")],["id","text"])
#预测,打印出想要的结果
# Make predictions on test documents and print columns of interest.
prediction = model.transform(test)
selected = prediction.select("id","text","prediction")
for row in selected.collect():
print(row)
|
from tkinter import *
import speech_recognition as sr
import pyttsx3
import pywhatkit
import datetime
import wikipedia
import pyjokes
import requests
import bs4
import threading
import os
#threading
def threading1():
t1= threading.Thread (target = run_tom)
t1.start()
listener = sr.Recognizer()
engine = pyttsx3.init()
#exit function
def stop():
os._exit(0)
def talk(text):
engine.say(text)
engine.runAndWait()
def take_command():
global dis
global label
print('welcome')
engine.say('Hey I am tom, You can ask me anything. But if you want to quit say STOP ')
engine.say('What i can do for you')
engine.runAndWait()
try:
with sr.Microphone(device_index=0) as source:
label = Label(dis, text = " Listening..........").place(x = 40, y = 60)
voice = listener.listen(source,timeout=1, phrase_time_limit=5)
command = listener.recognize_google(voice)
command=command.lower()
label = Label(dis, text = " ").place(x = 40, y = 60)
except:
print("Say that again please...")
return "None"
return command
def run_tom():
command = take_command()
print("I got It")
print(command)
if 'play' in command:
song = command.replace('play','')
talk('playing'+ song)
pywhatkit.playonyt(song)
elif 'time' in command:
time = datetime.datetime.now().strftime('%H:%M %p')
talk('Current time is'+ time)
print(time)
elif 'who is' in command:
preson=command.replace('who is','')
info = wikipedia.summary(preson,2)
print(info)
talk(info)
pywhatkit.search(preson)
talk('opening browser')
text= preson
url = 'https://google.com/search?q=' + text
request_result=requests.get( url )
request_result=requests.get( url )
soup = bs4.BeautifulSoup(request_result.text, 'lxml')
des = soup.get_text()
talk(des[230:700])
elif 'what is' in command:
product=command.replace('what is','')
info = wikipedia.summary(product,2)
print(info)
talk(info)
pywhatkit.search(product)
talk('opening browser')
text= product
url = 'https://google.com/search?q=' + text
request_result=requests.get( url )
request_result=requests.get( url )
soup = bs4.BeautifulSoup(request_result.text, 'lxml')
des = soup.get_text()
talk(des[230:700])
elif 'what are' in command:
product=command.replace('what are','')
info = wikipedia.summary(product,2)
print(info)
talk(info)
pywhatkit.search(product)
talk('opening browser')
text= product
url = 'https://google.com/search?q=' + text
request_result=requests.get( url )
soup = bs4.BeautifulSoup(request_result.text, 'lxml')
des = soup.get_text()
talk(des[230:500])
elif 'who are you' in command:
talk('I am TOM version 1.2 made by suibhsai with python')
talk('I am sad that you dont know any thing about me')
talk('but i know who are you.')
elif 'who i am' in command:
talk('haha, Homo sapiens LOL')
elif 'who am i' in command:
talk('haha, Homo sapiens LOL')
elif 'joke' in command:
talk(pyjokes.get_joke())
print('ha ha')
elif 'stop' in command:
exit()
else:
user_name = Label(dis, text = "Please try again").place(x = 40, y = 60)
talk('sorry, i could not understand')
talk('please try again')
label = Label(dis, text = " ").place(x = 40, y = 60)
dis = Tk()
dis.title("TOM_Version_1.1")
label = Label(dis, text ="Hey I am Your Assistance!" , font = ('Helvetica',20)).pack()
dis.geometry("550x600")
dis.maxsize(550,600)
bg = PhotoImage(file = "tom.png")
log = PhotoImage(file = "logo.png")
con = PhotoImage(file = "con2.png")
cont = PhotoImage(file = "con3.png")
photoimage = con.subsample(10, 10)
photoimage2 = cont.subsample(10, 10)
can1 = Canvas( dis, width = 550, height= 600)
can1.pack(fill = "both", expand = True)
can1.create_image( 0, 0, image = bg,anchor = "nw")
can1.create_image( 180, 10, image = log,anchor = "nw")
button1 = Button( dis, text = "ASK ME ", font = ('Verdana', 15),image = photoimage, bg = 'grey' ,compound = LEFT, command = threading1)
button1_canvas = can1.create_window( 230, 400, anchor = "nw", window = button1)
button2 = Button( dis, text = "Quit!", font = ('Verdana', 15),image = photoimage2, bg = 'grey' ,compound = LEFT, command = stop)
button2_canvas = can1.create_window( 247, 470, anchor = "nw", window = button2)
dis.mainloop()#make gui interactive
|
#!/usr/bin/env python3
import datetime
import os
import shutil
import subprocess
import stat
def get_real_path(path):
return os.path.expandvars(os.path.expanduser(path))
def get_install_dir():
install_dir = os.environ.get('INSTALL_DIR')
if not install_dir:
install_dir = os.path.dirname(os.path.abspath(__file__)) + "/.."
return install_dir + "/"
def run_command(cmd, echo = 'on_error'):
echo_values = ['yes', 'no', 'on_error']
if echo not in echo_values:
raise ValueError("Invalid echo option. Expected one of %s" % echo_values)
result = subprocess.run(cmd, shell='bash', text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
ok = result.returncode == 0
msg = [s for s in result.stdout.split('\n') if s]
if ((echo == 'yes') or ((not ok) and (echo != 'no'))):
if not ok: print ("*** ERROR ***")
for line in msg:
print(line)
if not ok: print ("*** ERROR ***")
return [ok, ' '.join(msg)]
def command_is_available(command):
ok, _ = run_command('which ' + command, echo='no')
return ok
def run_local_script(script_name):
return run_command(get_install_dir() + "install_utils/" + script_name)
def backup_config(file_name, remove = True):
if not os.path.exists(file_name):
return
backup_dir = get_real_path("~/.backup_config")
if not os.path.isdir(backup_dir):
os.mkdir(backup_dir)
new_name = backup_dir + "/" + os.path.basename(file_name) + "_" + datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
if os.path.islink(file_name):
shutil.copy(file_name, new_name)
if remove:
os.remove(file_name)
else:
shutil.move(file_name, new_name)
if not remove:
shutil.copy(new_name, file_name)
def files_are_different(file_a, file_b):
ok, msg = run_command("diff " + file_a + " " + file_b, echo = 'no')
return ((not ok) or (msg != ""))
def set_config_file(src_file, dst_file, link = True):
if not files_are_different(src_file, dst_file):
return
backup_config(dst_file)
if link:
os.symlink(src_file, dst_file)
else:
with open(src_file, 'r') as src, open(dst_file, "w+") as dst:
for line in src.readlines():
dst.write(line + "\n")
def append_config_if(dst_file, text, function):
if not os.path.isfile(dst_file):
return [False, False]
if not function(dst_file):
return [True, False]
backup_config(dst_file, remove = False)
with open(dst_file, "a") as f:
for line in text:
f.write(line + "\n")
return [True, True]
def create_exec_file(file_name, contents, overwrite = True):
bin_dir = get_real_path("~/bin")
if not os.path.isdir(bin_dir):
os.mkdir(bin_dir)
file_path = bin_dir + "/" + file_name
if os.path.isfile(file_path):
if overwrite:
os.remove(file_path)
else:
return
with open(file_path, "w") as f:
for line in contents:
f.write(line + "\n")
st = os.stat(file_path)
os.chmod(file_path, st.st_mode | stat.S_IEXEC)
|
class InvalidSubscription(Exception):
pass
|
"""
flowRecorder system tests
"""
# Handle tests being in different directory branch to app code:
import sys
import struct
# For file handling:
import os
import csv
# For system calls to run commands:
import subprocess
# flowRecorder imports:
import config
# test packet imports:
import http1 as groundtruth_http1
import groundtruth_PING1
import groundtruth_tcp_syn_only
import groundtruth_tcp_syn_flow_expiration
sys.path.insert(0, '../flowRecorder')
import logging
# Instantiate Config class:
config = config.Config()
logger = logging.getLogger(__name__)
PYTHON2 = 'python2'
PYTHON3 = 'python3'
FLOWRECORDER = "../flowRecorder/flowRecorder.py"
TEST_PCAP_HTTP1 = 'packet_captures/http1.pcap'
TEST_PCAP_PING1 = 'packet_captures/PING1.pcap'
TEST_PCAP_TCP_SYN_ONLY = 'packet_captures/tcp_syn_only.pcap'
TEST_PCAP_TCP_SYN_FLOW_EXPIRATION = 'packet_captures/tcp_syn_flow_expiration.pcap'
RESULT_FILE = 'temp/temp_test_output.csv'
UNIDIR = 'u'
BIDIR = 'b'
# MARGIN is used to allow for small differences in results due to
# use of float type, rounding etc. Applies on both sides of result:
MARGIN = 0.0001
#======================== data.py Unit Tests ============================
def test_http1_unidir():
"""
Test output for unidirectional processing of http1.pcap file
"""
for python_ver in (PYTHON2, PYTHON3):
# System call to remove old result file if exists:
if os.path.isfile(RESULT_FILE):
logger.info("deleting RESULT_FILE=%s", RESULT_FILE)
os.remove(RESULT_FILE)
# Run flowRecorder to generate output file:
try:
result = subprocess.check_output([python_ver, FLOWRECORDER,
"-f" , TEST_PCAP_HTTP1,
"-d", UNIDIR,
"-o", RESULT_FILE])
logger.info("flowRecorder result is %s", result)
except subprocess.CalledProcessError as e:
logger.critical("Stdout output: %s", e.output)
# Check results file exists:
assert os.path.isfile(RESULT_FILE)
# Call helper function to validate the results file values:
validate_results_file_unidir(RESULT_FILE, groundtruth_http1, 2)
def test_http1_bidir():
"""
Test output for bidirectional processing of http1.pcap file
"""
for python_ver in (PYTHON2, PYTHON3):
# System call to remove old result file if exists:
if os.path.isfile(RESULT_FILE):
logger.info("deleting RESULT_FILE=%s", RESULT_FILE)
os.remove(RESULT_FILE)
# Run flowRecorder to generate output file:
try:
result = subprocess.check_output([python_ver, FLOWRECORDER,
"-f" , TEST_PCAP_HTTP1,
"-d", BIDIR,
"-o", RESULT_FILE])
logger.info("flowRecorder result is %s", result)
except subprocess.CalledProcessError as e:
logger.critical("Stdout output: %s", e.output)
# Check results file exists:
assert os.path.isfile(RESULT_FILE)
# Call helper function to validate the results file values:
validate_results_file_bidir(RESULT_FILE, groundtruth_http1, 1)
def test_PING1_unidir():
"""
Test output for unidirectional processing of PING1.pcap file
"""
for python_ver in (PYTHON2, PYTHON3):
# System call to remove old result file if exists:
if os.path.isfile(RESULT_FILE):
logger.info("deleting RESULT_FILE=%s", RESULT_FILE)
os.remove(RESULT_FILE)
# Run flowRecorder to generate output file:
try:
result = subprocess.check_output([python_ver, FLOWRECORDER,
"-f" , TEST_PCAP_PING1,
"-d", UNIDIR,
"-o", RESULT_FILE])
logger.info("flowRecorder result is %s", result)
except subprocess.CalledProcessError as e:
logger.critical("Stdout output: %s", e.output)
# Check results file exists:
assert os.path.isfile(RESULT_FILE)
# Call helper function to validate the results file values:
validate_results_file_unidir(RESULT_FILE, groundtruth_PING1, 2)
def test_PING1_bidir():
"""
Test output for bidirectional processing of PING1.pcap file
"""
for python_ver in (PYTHON2, PYTHON3):
# System call to remove old result file if exists:
if os.path.isfile(RESULT_FILE):
logger.info("deleting RESULT_FILE=%s", RESULT_FILE)
os.remove(RESULT_FILE)
# Run flowRecorder to generate output file:
try:
result = subprocess.check_output([python_ver, FLOWRECORDER,
"-f" , TEST_PCAP_PING1,
"-d", BIDIR,
"-o", RESULT_FILE])
logger.info("flowRecorder result is %s", result)
except subprocess.CalledProcessError as e:
logger.critical("Stdout output: %s", e.output)
# Check results file exists:
assert os.path.isfile(RESULT_FILE)
# Call helper function to validate the results file values:
validate_results_file_bidir(RESULT_FILE, groundtruth_PING1, 1)
def test_tcp_syn_only_unidir():
"""
Test output for unidirectional processing of tcp_syn_only.pcap file
"""
for python_ver in (PYTHON2, PYTHON3):
# System call to remove old result file if exists:
if os.path.isfile(RESULT_FILE):
logger.info("deleting RESULT_FILE=%s", RESULT_FILE)
os.remove(RESULT_FILE)
# Run flowRecorder to generate output file:
try:
result = subprocess.check_output([python_ver, FLOWRECORDER,
"-f" , TEST_PCAP_TCP_SYN_ONLY,
"-d", UNIDIR,
"-o", RESULT_FILE])
logger.info("flowRecorder result is %s", result)
except subprocess.CalledProcessError as e:
logger.critical("Stdout output: %s", e.output)
# Check results file exists:
assert os.path.isfile(RESULT_FILE)
# Call helper function to validate the results file values:
validate_results_file_unidir(RESULT_FILE, groundtruth_tcp_syn_only, 1)
def test_tcp_syn_only_bidir():
"""
Test output for bidirectional processing of tcp_syn_only.pcap file
"""
for python_ver in (PYTHON2, PYTHON3):
# System call to remove old result file if exists:
if os.path.isfile(RESULT_FILE):
logger.info("deleting RESULT_FILE=%s", RESULT_FILE)
os.remove(RESULT_FILE)
# Run flowRecorder to generate output file:
try:
result = subprocess.check_output([python_ver, FLOWRECORDER,
"-f" , TEST_PCAP_TCP_SYN_ONLY,
"-d", BIDIR,
"-o", RESULT_FILE])
logger.info("flowRecorder result is %s", result)
except subprocess.CalledProcessError as e:
logger.critical("Stdout output: %s", e.output)
# Check results file exists:
assert os.path.isfile(RESULT_FILE)
# Call helper function to validate the results file values:
validate_results_file_bidir(RESULT_FILE, groundtruth_tcp_syn_only, 1)
def test_tcp_flow_expiration_unidir():
"""
Test output for unidirectional processing of tcp_syn_only.pcap file
"""
for python_ver in (PYTHON2, PYTHON3):
# System call to remove old result file if exists:
if os.path.isfile(RESULT_FILE):
logger.info("deleting RESULT_FILE=%s", RESULT_FILE)
os.remove(RESULT_FILE)
# Run flowRecorder to generate output file:
try:
result = subprocess.check_output([python_ver, FLOWRECORDER,
"-f" , TEST_PCAP_TCP_SYN_FLOW_EXPIRATION,
"-d", UNIDIR,
"-o", RESULT_FILE])
logger.info("flowRecorder result is %s", result)
except subprocess.CalledProcessError as e:
logger.critical("Stdout output: %s", e.output)
# Check results file exists:
assert os.path.isfile(RESULT_FILE)
# Call helper function to validate the results file values:
validate_results_file_unidir(RESULT_FILE, groundtruth_tcp_syn_flow_expiration, 2)
def test_tcp_flow_expiration_bidir():
"""
Test output for bidirectional processing of tcp_syn_only.pcap file
"""
for python_ver in (PYTHON2, PYTHON3):
# System call to remove old result file if exists:
if os.path.isfile(RESULT_FILE):
logger.info("deleting RESULT_FILE=%s", RESULT_FILE)
os.remove(RESULT_FILE)
# Run flowRecorder to generate output file:
try:
result = subprocess.check_output([python_ver, FLOWRECORDER,
"-f" , TEST_PCAP_TCP_SYN_FLOW_EXPIRATION,
"-d", BIDIR,
"-o", RESULT_FILE])
logger.info("flowRecorder result is %s", result)
except subprocess.CalledProcessError as e:
logger.critical("Stdout output: %s", e.output)
# Check results file exists:
assert os.path.isfile(RESULT_FILE)
# Call helper function to validate the results file values:
validate_results_file_bidir(RESULT_FILE, groundtruth_tcp_syn_flow_expiration, 2)
#================= HELPER FUNCTIONS ===========================================
def validate_results_file_unidir(filename, ground_truth, results_length):
"""
Validate a unidirectional results file against ground truth values from
a separate ground truth object
"""
logger.debug("Validating unidir results filename=%s against %s", filename, ground_truth.name)
# Read in results file:
with open(filename) as csv_file:
csv_reader = list(csv.DictReader(csv_file))
# Validate results file has correct number of rows (excl header):
assert len(csv_reader) == results_length
row_number = 0
# Iterate through rows of result data, checking values:
for row in csv_reader:
logger.debug("Validating row=%s", row_number)
assert row['src_ip'] == ground_truth.UNIDIR_SRC_IP[row_number]
assert row['src_port'] == ground_truth.UNIDIR_SRC_PORT[row_number]
assert row['dst_ip'] == ground_truth.UNIDIR_DST_IP[row_number]
assert row['dst_port'] == ground_truth.UNIDIR_DST_PORT[row_number]
assert row['proto'] == ground_truth.UNIDIR_PROTO[row_number]
assert row['pktTotalCount'] == ground_truth.UNIDIR_PKTTOTALCOUNT[row_number]
assert row['octetTotalCount'] == ground_truth.UNIDIR_OCTETTOTALCOUNT[row_number]
assert row['min_ps'] == ground_truth.UNIDIR_MIN_PS[row_number]
assert row['max_ps'] == ground_truth.UNIDIR_MAX_PS[row_number]
# Average needs leeway to cope with floats/division/rounding etc:
assert float(row['avg_ps']) < float(ground_truth.UNIDIR_AVG_PS[row_number]) + MARGIN
assert float(row['avg_ps']) > float(ground_truth.UNIDIR_AVG_PS[row_number]) - MARGIN
# Std Dev needs leeway to cope with floats/division/rounding etc:
assert float(row['std_dev_ps']) < float(ground_truth.UNIDIR_STD_DEV_PS[row_number]) + MARGIN
assert float(row['std_dev_ps']) > float(ground_truth.UNIDIR_STD_DEV_PS[row_number]) - MARGIN
assert row['flowStart'] == ground_truth.UNIDIR_FLOWSTART[row_number]
assert row['flowEnd'] == ground_truth.UNIDIR_FLOWEND[row_number]
# Flow Duration needs leeway to cope with floats/division/rounding etc:
assert float(row['flowDuration']) < float(ground_truth.UNIDIR_FLOWDURATION[row_number]) + MARGIN
assert float(row['flowDuration']) > float(ground_truth.UNIDIR_FLOWDURATION[row_number]) - MARGIN
# Inter-packet arrival times need leeway to cope with floats/division/rounding etc:
assert float(row['min_piat']) < float(ground_truth.UNIDIR_MIN_PIAT[row_number]) + MARGIN
assert float(row['min_piat']) > float(ground_truth.UNIDIR_MIN_PIAT[row_number]) - MARGIN
assert float(row['max_piat']) < float(ground_truth.UNIDIR_MAX_PIAT[row_number]) + MARGIN
assert float(row['max_piat']) > float(ground_truth.UNIDIR_MAX_PIAT[row_number]) - MARGIN
assert float(row['avg_piat']) < float(ground_truth.UNIDIR_AVG_PIAT[row_number]) + MARGIN
assert float(row['avg_piat']) > float(ground_truth.UNIDIR_AVG_PIAT[row_number]) - MARGIN
assert float(row['std_dev_piat']) < float(ground_truth.UNIDIR_STD_DEV_PIAT[row_number]) + MARGIN
assert float(row['std_dev_piat']) > float(ground_truth.UNIDIR_STD_DEV_PIAT[row_number]) - MARGIN
row_number += 1
def validate_results_file_bidir(filename, ground_truth, results_length):
"""
Validate a bidirectional results file against ground truth values from
a separate ground truth object
"""
logger.debug("Validating bidir results filename=%s against %s", filename, ground_truth.name)
# Read in results file:
with open(filename) as csv_file:
csv_reader = list(csv.DictReader(csv_file))
# Validate results file has correct number of rows (excl header):
assert len(csv_reader) == results_length
row_number = 0
# Iterate through rows of result data, checking values:
for row in csv_reader:
logger.debug("Validating row=%s", row_number)
# Combined values:
assert row['src_ip'] == ground_truth.BIDIR_SRC_IP[row_number]
assert row['src_port'] == ground_truth.BIDIR_SRC_PORT[row_number]
assert row['dst_ip'] == ground_truth.BIDIR_DST_IP[row_number]
assert row['dst_port'] == ground_truth.BIDIR_DST_PORT[row_number]
assert row['proto'] == ground_truth.BIDIR_PROTO[row_number]
assert row['min_ps'] == ground_truth.BIDIR_MIN_PS[row_number]
assert row['max_ps'] == ground_truth.BIDIR_MAX_PS[row_number]
# Average needs leeway to cope with floats/division/rounding etc:
assert float(row['avg_ps']) < float(ground_truth.BIDIR_AVG_PS[row_number]) + MARGIN
assert float(row['avg_ps']) > float(ground_truth.BIDIR_AVG_PS[row_number]) - MARGIN
# Std Dev needs leeway to cope with floats/division/rounding etc:
assert float(row['std_dev_ps']) < float(ground_truth.BIDIR_STD_DEV_PS[row_number]) + MARGIN
assert float(row['std_dev_ps']) > float(ground_truth.BIDIR_STD_DEV_PS[row_number]) - MARGIN
assert row['flowStart'] == ground_truth.BIDIR_FLOWSTART[row_number]
assert row['flowEnd'] == ground_truth.BIDIR_FLOWEND[row_number]
# Flow Duration needs leeway to cope with floats/division/rounding etc:
assert float(row['flowDuration']) < float(ground_truth.BIDIR_FLOWDURATION[row_number]) + MARGIN
assert float(row['flowDuration']) > float(ground_truth.BIDIR_FLOWDURATION[row_number]) - MARGIN
# Inter-packet arrival times need leeway to cope with floats/division/rounding etc:
assert float(row['min_piat']) < float(ground_truth.BIDIR_MIN_PIAT[row_number]) + MARGIN
assert float(row['min_piat']) > float(ground_truth.BIDIR_MIN_PIAT[row_number]) - MARGIN
assert float(row['max_piat']) < float(ground_truth.BIDIR_MAX_PIAT[row_number]) + MARGIN
assert float(row['max_piat']) > float(ground_truth.BIDIR_MAX_PIAT[row_number]) - MARGIN
assert float(row['avg_piat']) < float(ground_truth.BIDIR_AVG_PIAT[row_number]) + MARGIN
assert float(row['avg_piat']) > float(ground_truth.BIDIR_AVG_PIAT[row_number]) - MARGIN
assert float(row['std_dev_piat']) < float(ground_truth.BIDIR_STD_DEV_PIAT[row_number]) + MARGIN
assert float(row['std_dev_piat']) > float(ground_truth.BIDIR_STD_DEV_PIAT[row_number]) - MARGIN
# Forward values:
assert row['f_pktTotalCount'] == ground_truth.BIDIR_F_PKTTOTALCOUNT[row_number]
assert row['f_octetTotalCount'] == ground_truth.BIDIR_F_OCTETTOTALCOUNT[row_number]
assert row['f_min_ps'] == ground_truth.BIDIR_F_MIN_PS[row_number]
assert row['f_max_ps'] == ground_truth.BIDIR_F_MAX_PS[row_number]
# Average needs leeway to cope with floats/division/rounding etc:
assert float(row['f_avg_ps']) < float(ground_truth.BIDIR_F_AVG_PS[row_number]) + MARGIN
assert float(row['f_avg_ps']) > float(ground_truth.BIDIR_F_AVG_PS[row_number]) - MARGIN
# Std Dev needs leeway to cope with floats/division/rounding etc:
assert float(row['f_std_dev_ps']) < float(ground_truth.BIDIR_F_STD_DEV_PS[row_number]) + MARGIN
assert float(row['f_std_dev_ps']) > float(ground_truth.BIDIR_F_STD_DEV_PS[row_number]) - MARGIN
assert row['f_flowStart'] == ground_truth.BIDIR_F_FLOWSTART[row_number]
assert row['f_flowEnd'] == ground_truth.BIDIR_F_FLOWEND[row_number]
# Flow Duration needs leeway to cope with floats/division/rounding etc:
assert float(row['f_flowDuration']) < float(ground_truth.BIDIR_F_FLOWDURATION[row_number]) + MARGIN
assert float(row['f_flowDuration']) > float(ground_truth.BIDIR_F_FLOWDURATION[row_number]) - MARGIN
# Inter-packet arrival times need leeway to cope with floats/division/rounding etc:
assert float(row['f_min_piat']) < float(ground_truth.BIDIR_F_MIN_PIAT[row_number]) + MARGIN
assert float(row['f_min_piat']) > float(ground_truth.BIDIR_F_MIN_PIAT[row_number]) - MARGIN
assert float(row['f_max_piat']) < float(ground_truth.BIDIR_F_MAX_PIAT[row_number]) + MARGIN
assert float(row['f_max_piat']) > float(ground_truth.BIDIR_F_MAX_PIAT[row_number]) - MARGIN
assert float(row['f_avg_piat']) < float(ground_truth.BIDIR_F_AVG_PIAT[row_number]) + MARGIN
assert float(row['f_avg_piat']) > float(ground_truth.BIDIR_F_AVG_PIAT[row_number]) - MARGIN
assert float(row['f_std_dev_piat']) < float(ground_truth.BIDIR_F_STD_DEV_PIAT[row_number]) + MARGIN
assert float(row['f_std_dev_piat']) > float(ground_truth.BIDIR_F_STD_DEV_PIAT[row_number]) - MARGIN
# Backward values:
assert row['b_pktTotalCount'] == ground_truth.BIDIR_B_PKTTOTALCOUNT[row_number]
assert row['b_octetTotalCount'] == ground_truth.BIDIR_B_OCTETTOTALCOUNT[row_number]
assert row['b_min_ps'] == ground_truth.BIDIR_B_MIN_PS[row_number]
assert row['b_max_ps'] == ground_truth.BIDIR_B_MAX_PS[row_number]
# Average needs leeway to cope with floats/division/rounding etc:
assert float(row['b_avg_ps']) < float(ground_truth.BIDIR_B_AVG_PS[row_number]) + MARGIN
assert float(row['b_avg_ps']) > float(ground_truth.BIDIR_B_AVG_PS[row_number]) - MARGIN
# Std Dev needs leeway to cope with floats/division/rounding etc:
assert float(row['b_std_dev_ps']) < float(ground_truth.BIDIR_B_STD_DEV_PS[row_number]) + MARGIN
assert float(row['b_std_dev_ps']) > float(ground_truth.BIDIR_B_STD_DEV_PS[row_number]) - MARGIN
assert row['b_flowStart'] == ground_truth.BIDIR_B_FLOWSTART[row_number]
assert row['b_flowEnd'] == ground_truth.BIDIR_B_FLOWEND[row_number]
# Flow Duration needs leeway to cope with floats/division/rounding etc:
assert float(row['b_flowDuration']) < float(ground_truth.BIDIR_B_FLOWDURATION[row_number]) + MARGIN
assert float(row['b_flowDuration']) > float(ground_truth.BIDIR_B_FLOWDURATION[row_number]) - MARGIN
# Inter-packet arrival times need leeway to cope with floats/division/rounding etc:
assert float(row['b_min_piat']) < float(ground_truth.BIDIR_B_MIN_PIAT[row_number]) + MARGIN
assert float(row['b_min_piat']) > float(ground_truth.BIDIR_B_MIN_PIAT[row_number]) - MARGIN
assert float(row['b_max_piat']) < float(ground_truth.BIDIR_B_MAX_PIAT[row_number]) + MARGIN
assert float(row['b_max_piat']) > float(ground_truth.BIDIR_B_MAX_PIAT[row_number]) - MARGIN
assert float(row['b_avg_piat']) < float(ground_truth.BIDIR_B_AVG_PIAT[row_number]) + MARGIN
assert float(row['b_avg_piat']) > float(ground_truth.BIDIR_B_AVG_PIAT[row_number]) - MARGIN
assert float(row['b_std_dev_piat']) < float(ground_truth.BIDIR_B_STD_DEV_PIAT[row_number]) + MARGIN
assert float(row['b_std_dev_piat']) > float(ground_truth.BIDIR_B_STD_DEV_PIAT[row_number]) - MARGIN
row_number += 1
#f_pktTotalCount,f_octetTotalCount,f_min_ps,f_max_ps,f_avg_ps,f_std_dev_ps,
#f_flowStart,f_flowEnd,f_flowDuration,f_min_piat,f_max_piat,f_avg_piat,f_std_dev_piat,
#b_pktTotalCount,b_octetTotalCount,b_min_ps,b_max_ps,b_avg_ps,vstd_dev_ps,
#b_flowStart,b_flowEnd,b_flowDuration,b_min_piat,b_max_piat,b_avg_piat,b_std_dev_piat
|
# 构建路径
# 除了将现有路径分开之外,
# 经常需要从其他字符串构建路径。要将多个路径组件组合为单个值,请使用join():
import os.path
PATHS = [
('one', 'two', 'three'),
('/', 'one', 'two', 'three'),
('/one', '/two', '/three'),
]
for parts in PATHS:
print('{} : {!r}'.format(parts, os.path.join(*parts)))
"""
output:
('one', 'two', 'three') : 'one\\two\\three'
('/', 'one', 'two', 'three') : '/one\\two\\three'
('/one', '/two', '/three') : '/three'
"""
|
import os
import sys
import tempfile
import time
from pathlib import Path
from typing import IO, List, Set
import csv
import pandas
from util.jtl_convertor import jtl_validator
from util.project_paths import ENV_TAURUS_ARTIFACT_DIR
LABEL = 'Label'
SAMPLES = '# Samples'
AVERAGE = 'Average'
MEDIAN = 'Median'
PERC_90 = '90% Line'
PERC_95 = '95% Line'
PERC_99 = '99% Line'
MIN = 'Min'
MAX = 'Max'
ERROR_RATE = 'Error %'
LABEL_JTL = 'label'
ELAPSED_JTL_TMP = 'elapsed_tmp'
ELAPSED_JTL = 'elapsed'
SUCCESS_JTL = 'success'
SUCCESS_JTL_TMP = 'success_tmp'
FALSE_JTL = 'false'
CSV_HEADER = f'{LABEL},{SAMPLES},{AVERAGE},{MEDIAN},{PERC_90},{PERC_95},{PERC_99},{MIN},{MAX},{ERROR_RATE}\n'
RESULTS_CSV_NAME = 'results.csv'
def __count_file_lines(stream: IO) -> int:
return sum(1 for _ in stream)
def __reset_file_stream(stream: IO) -> None:
stream.seek(0)
def __convert_jtl_to_csv(input_file_path: Path, output_file_path: Path) -> None:
if not input_file_path.exists():
raise SystemExit(f'Input file {output_file_path} does not exist')
start = time.time()
convert_to_csv(output_csv=output_file_path, input_jtl=input_file_path)
if not output_file_path.exists():
raise SystemExit(f'Something went wrong. Output file {output_file_path} does not exist')
print(f'Created file {output_file_path}. Converted from jtl to csv in {time.time() - start} ')
def __change_file_extension(file_name: str, new_extension) -> str:
return __get_file_name_without_extension(file_name) + new_extension
def __get_file_name_without_extension(file_name):
return os.path.splitext(file_name)[0]
def __read_csv_without_first_line(results_file_stream, input_csv):
with input_csv.open(mode='r') as file_stream:
__reset_file_stream(file_stream)
for cnt, line in enumerate(file_stream, 1):
if cnt != 1:
results_file_stream.write(line)
print(f'File {input_csv} successfully read')
def __create_results_csv(csv_list: List[Path], results_file_path: Path) -> None:
with results_file_path.open(mode='w') as results_file_stream:
results_file_stream.write(CSV_HEADER)
for temp_csv_path in csv_list:
__read_csv_without_first_line(results_file_stream, temp_csv_path)
if not results_file_path.exists():
raise SystemExit(f'Something went wrong. Output file {results_file_path} does not exist')
print(f'Created file {results_file_path}')
def __validate_file_names(file_names: List[str]):
file_names_set: Set[str] = set()
for file_name in file_names:
if '.' not in file_name:
raise SystemExit(f'File name {file_name} does not have extension')
file_name_without_extension = __get_file_name_without_extension(file_name)
if file_name_without_extension in file_names_set:
raise SystemExit(f'Duplicated file name {file_name_without_extension}')
file_names_set.add(file_name_without_extension)
def convert_to_csv(input_jtl: Path, output_csv: Path):
reader = csv.DictReader(input_jtl.open(mode='r'))
jtl_list = [row for row in reader]
csv_list = []
for jtl_sample in jtl_list:
sample = {}
if jtl_sample[LABEL_JTL] not in [processed_sample[LABEL] for processed_sample in csv_list]:
sample[LABEL] = jtl_sample[LABEL_JTL]
sample[SAMPLES] = 1
sample[ELAPSED_JTL_TMP] = [int(jtl_sample[ELAPSED_JTL])] # Temp list with 'elapsed' value for current label
sample[SUCCESS_JTL_TMP] = [jtl_sample[SUCCESS_JTL]] # Temp list with 'success' value for current label
csv_list.append(sample)
else:
# Get and update processed row with current label
processed_sample = [row for row in csv_list if row[LABEL] == jtl_sample['label']][0]
processed_sample[SAMPLES] = processed_sample[SAMPLES] + 1 # Count samples
processed_sample[ELAPSED_JTL_TMP].append(int(jtl_sample[ELAPSED_JTL])) # list of elapsed values
processed_sample[SUCCESS_JTL_TMP].append(jtl_sample[SUCCESS_JTL]) # list of success values
# Calculation after the last row in kpi.jtl is processed
if jtl_sample == jtl_list[-1]:
for processed_sample in csv_list:
elapsed_df = pandas.Series(processed_sample[ELAPSED_JTL_TMP])
processed_sample[AVERAGE] = int(round(elapsed_df.mean()))
processed_sample[MEDIAN] = int(round(elapsed_df.quantile(0.5)))
processed_sample[PERC_90] = int(round(elapsed_df.quantile(0.9)))
processed_sample[PERC_95] = int(round(elapsed_df.quantile(0.95)))
processed_sample[PERC_99] = int(round(elapsed_df.quantile(0.99)))
processed_sample[MIN] = min(processed_sample[ELAPSED_JTL_TMP])
processed_sample[MAX] = max(processed_sample[ELAPSED_JTL_TMP])
success_list = processed_sample[SUCCESS_JTL_TMP]
processed_sample[ERROR_RATE] = round(success_list.count(FALSE_JTL) / len(success_list), 2) * 100.00
del processed_sample[SUCCESS_JTL_TMP]
del processed_sample[ELAPSED_JTL_TMP]
headers = csv_list[0].keys()
with output_csv.open('w') as output_file:
dict_writer = csv.DictWriter(output_file, headers)
dict_writer.writeheader()
for row in csv_list:
dict_writer.writerow(row)
def main():
file_names = sys.argv[1:]
__validate_file_names(file_names)
with tempfile.TemporaryDirectory() as tmp_dir:
temp_csv_list: List[Path] = []
for file_name in file_names:
jtl_file_path = ENV_TAURUS_ARTIFACT_DIR / file_name
jtl_validator.validate(jtl_file_path)
csv_file_path = Path(tmp_dir) / __change_file_extension(file_name, '.csv')
__convert_jtl_to_csv(jtl_file_path, csv_file_path)
temp_csv_list.append(csv_file_path)
results_file_path = ENV_TAURUS_ARTIFACT_DIR / RESULTS_CSV_NAME
__create_results_csv(temp_csv_list, results_file_path)
if __name__ == "__main__":
start_time = time.time()
main()
print(f'Done in {time.time() - start_time} seconds')
|
#!/usr/bin/env python
import os
from distutils.core import setup
def get_package_data_files():
files = []
pkgdir = os.path.join(os.path.dirname(__file__), "src", "cloc")
for root, _, filenames in os.walk(os.path.join(pkgdir, "config")):
for f in filenames:
files.append(os.path.join(root[len(pkgdir) + 1 :], f)) # plus 1 for the slash
return files
setup(
name="cloc",
version="1.0",
author="Oliver Hulett",
author_email="oliver.hulett@gmail.com",
url="https://github.com/oliverhulett/classify_lines_of_code",
description="Classify Lines Of Code",
package_dir={"": "src"},
packages=["cloc"],
package_data={"cloc": get_package_data_files()},
entry_points={"console_scripts": ["cloc=cloc.__main__:main"]},
install_requires=["pyyaml"],
)
|
import pyspark.sql.functions as F
from pyspark.sql import SparkSession
# Start SparkSession
spark = SparkSession.builder.appName("missingdata").getOrCreate()
df = spark.read.csv("ContainsNull.csv", header=True, inferSchema=True)
df.show()
# Drop any row that contains missing data
df.na.drop().show()
# Has to have at least 2 NON-null values
df.na.drop(thresh=2).show()
# Drop row “Sales” contains missing data
df.na.drop(subset=["Sales"]).show()
# Drop any row contains missing data
df.na.drop(how='any').show()
# Drop those rows contains missing data for all columns
df.na.drop(how='all').show()
# Fill missing data with “NEW VALUE”
df.na.fill('NEW VALUE').show()
# Fill missing data with 0
df.na.fill(0).show()
# Fill missing data in row “Name” with “No Name”
df.na.fill('No Name', subset=['Name']).show()
# Fill values with mean value for column “Sales”
mean_val = df.select(F.mean(df['Sales'])).collect()
# Weird nested formatting of Row object!
mean_val[0][0]
mean_sales = mean_val[0][0]
df.na.fill(mean_sales, ["Sales"]).show()
|
#!/usr/bin/env python3
import argparse
'''
* Stream of numbers.
* First N numbers (N=25 for this code) are part of the preamble.
* Every number following the preamble must be a sum of two of the N numbers prior to it
'''
g_args = None
#------------------------------------------------------------------------------
def options():
#------------------------------------------------------------------------------
global g_args
l_parser = argparse.ArgumentParser(description='XMAS decoder (Advent 2020 day 9')
l_parser.add_argument('--file', dest='m_file', default='input.txt', help="Input file")
l_parser.add_argument('--debug', dest='m_debug', default=False, action='store_true', help='Debug verbosity')
l_parser.add_argument('--preamble', dest='m_preamble', default=25, type=int, help='Preamble length')
g_args = l_parser.parse_args()
#------------------------------------------------------------------------------
def run():
#------------------------------------------------------------------------------
l_stream = open(g_args.m_file).readlines()
l_stream = [ x.rstrip() for x in l_stream ]
l_stream = [ int(x) for x in l_stream ]
l_start_idx = g_args.m_preamble
l_pass = True
for l_idx in range(l_start_idx, len(l_stream)):
(l_pass, l_idx1, l_idx2) = check_val(l_stream, l_idx)
if not l_pass:
print("Index {} (value {}) failed".format(l_idx, l_stream[l_idx]))
break
print("Code {}".format("passed" if l_pass else "failed"))
#------------------------------------------------------------------------------
def check_val(x_stream, x_idx):
#------------------------------------------------------------------------------
l_num = x_stream[x_idx]
l_pass = False
l_start_idx = x_idx - g_args.m_preamble
for l_idx1 in range(l_start_idx, x_idx - 1):
for l_idx2 in range(l_idx1 + 1, x_idx):
l_pass = (x_stream[l_idx1] + x_stream[l_idx2] == l_num)
if g_args.m_debug:
print("Checking {}+{} ([{}]+[{}]) = {} (pass={})".format(x_stream[l_idx1], x_stream[l_idx2], l_idx1, l_idx2, l_num, l_pass))
if l_pass:
return (l_pass, l_idx1, l_idx2)
return (l_pass, None, None)
#------------------------------------------------------------------------------
def main():
#------------------------------------------------------------------------------
options()
run()
main()
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtWidgets import (QApplication, QComboBox,
QDialog, QGridLayout, QGroupBox, QHBoxLayout, QLabel, QLineEdit,
QPushButton,
QStyleFactory,
QVBoxLayout, QFileDialog)
import sys
import AudioToTest
import os
import threading
import audio_v3
class WidgetGallery(QDialog):
def __init__(self, parent=None):
super(WidgetGallery, self).__init__(parent)
self.originalPalette = QApplication.palette()
styleComboBox = QComboBox()
styleComboBox.addItems(QStyleFactory.keys())
styleLabel = QLabel("&Style:")
styleLabel.setBuddy(styleComboBox)
self.createTopRightGroupBox()
self.createBottomRightGroupBox()
self.createLeftBox()
styleComboBox.activated[str].connect(self.changeStyle)
topLayout = QHBoxLayout()
topLayout.addWidget(styleLabel)
topLayout.addWidget(styleComboBox)
topLayout.addStretch(1)
mainLayout = QGridLayout()
mainLayout.addLayout(topLayout, 0, 0, 1, 2)
mainLayout.addWidget(self.topRightGroupBox,1, 1)
mainLayout.addWidget(self.bottomRightGroupBox, 2, 1)
mainLayout.addWidget(self.LeftBox,1,0)
mainLayout.setRowStretch(1, 1)
mainLayout.setRowStretch(2, 1)
mainLayout.setColumnStretch(0, 1)
mainLayout.setColumnStretch(1, 1)
self.setLayout(mainLayout)
self.re = audio_v3.Recorder()
self.setWindowTitle("Styles")
self.changeStyle('Windows')
self.A = AudioToTest.AudioToTest()
def changeStyle(self, styleName):
QApplication.setStyle(QStyleFactory.create(styleName))
self.changePalette()
def changePalette(self):
QApplication.setPalette(QApplication.style().standardPalette())
def advanceProgressBar(self):
curVal = self.progressBar.value()
maxVal = self.progressBar.maximum()
self.progressBar.setValue(curVal + (maxVal - curVal) / 100)
def createTopRightGroupBox(self):
self.topRightGroupBox = QGroupBox("Recording")
self.recordingButton = QPushButton("Start Recording")
self.recordingButton.setCheckable(True)
self.recordingButton.setDefault(True)
self.recordingButton.clicked.connect(self.recording_button_clicked)
self.lineEdit = QLineEdit('wav file saving name')
self.savingButton = QPushButton("Save wav")
self.savingButton.setDefault(True)
layout = QVBoxLayout()
layout.addWidget(self.recordingButton)
layout.addWidget(self.lineEdit)
layout.addWidget(self.savingButton)
layout.addStretch(1)
self.topRightGroupBox.setLayout(layout)
def createLeftBox(self):
self.LeftBox = QGroupBox("Text")
self.speechText = QLineEdit("blank")
layout = QGridLayout()
layout.addWidget(self.speechText,0,0,1,2)
self.LeftBox.setLayout(layout)
def createBottomRightGroupBox(self):
self.bottomRightGroupBox = QGroupBox("Speech to Text")
self.lineEdit = QLineEdit('')
self.chooseButton = QPushButton("Choose File")
self.chooseButton.setDefault(True)
self.chooseButton.clicked.connect(self.choose_button_clicked)
self.transferButton = QPushButton("Transfer")
self.transferButton.setDefault(True)
self.transferButton.clicked.connect(self.transfer_button)
self.filetransferButton = QPushButton("File Transfer")
self.filetransferButton.clicked.connect(self.File_Trans)
self.JsonButton = QPushButton("Open Json")
self.JsonButton.clicked.connect(self.Open_Json)
layout = QGridLayout()
layout.addWidget(self.lineEdit, 0, 0, 1, 2)
layout.addWidget(self.chooseButton, 1, 0, 1, 2)
layout.addWidget(self.transferButton, 2, 0, 1, 2)
layout.addWidget(self.filetransferButton,3,0,1,2)
layout.addWidget(self.JsonButton,4,0,1,2)
layout.setRowStretch(5, 1)
self.bottomRightGroupBox.setLayout(layout)
def recording_button_clicked(self):
if self.recordingButton.isChecked():
self.recordingButton.setText("Stop Recording")
self.re.start()
# T = threading.Thread(target=self.audio.recorder())
# T.start()
else:
self.re.stop()
self.re.save("test.wav")
self.recordingButton.setText("Start Recording")
def choose_button_clicked(self):
absolute_path = QFileDialog.getOpenFileName(self, 'Open file')
# '.', "wav files (*.wav)")
self.lineEdit.setText(absolute_path[0])
def transfer_button(self):
try:
absolute_path = QFileDialog.getOpenFileName(self, 'Open file',
'.', "wav files (*.wav)")
self.lineEdit.setText(absolute_path[0])
self.A.recognize(absolute_path[0])
except FileNotFoundError:
print("no such file")
def File_Trans(self):
absolute_path = QFileDialog.getOpenFileName(self, 'Open file',
'.', "wav files (*.wav)")
if absolute_path[0]:
os.system("ffmpeg -y -i "+absolute_path[0]+ " out.wav")
else:
print("No such file")
def Open_Json(self):
try:
absolute_path = QFileDialog.getOpenFileName(self, 'Open file',
'.', "wav files (*.json)")[0]
self.lineEdit.setText(absolute_path)
txt = self.A.openjson(absolute_path)
self.speechText.setText(txt)
except FileNotFoundError:
print("no such file")
def GO():
app = QApplication(sys.argv)
gallery = WidgetGallery()
gallery.show()
sys.exit(app.exec_())
if __name__ == '__main__':
T1 = threading.Thread(target=GO())
T1.start()
|
'''
Slide lexer defines tokens used on all but the first level of the input document
(inside slides and inside all subsequent nested hierarchical nodes).
Some document tokens are reused.
Created on 1 Feb 2018
@author: Teodor G Nistor
@copyright: 2018 Teodor G Nistor
@license: MIT License
'''
from __future__ import unicode_literals
from ply import lex
from beamr.lexers.generic import t_error # Used internally by lex() @UnusedImport
from beamr.lexers.document import t_COMMENT, t_RAW, t_MACRO, _argLineno # Used internally by lex() @UnusedImport
import beamr
tokens = (
'COMMENT',
'AUTORAW',
'ESCAPE',
'ART',
'STRETCH',
'EMPH',
'CITATION',
'FOOTNOTE',
'URL',
'LISTITEM',
'COLUMN',
'IMGENV',
'PLUSENV',
'TABENV',
'ORGTABLE',
'RAW',
'VERBATIM',
'MACRO',
'BOX',
'ANTIESCAPE',
'TEXT',
)
def t_AUTORAW(t):
r'\\[a-zA-Z]+\*?(\{.*?\}|<.*?>|\[.*?\])*(?=[^\]}>]|$)' # e.g. \color{blue}
t.value = beamr.interpreters.Text(t.value, **_argLineno(t.lexer, t.value))
return t
def t_ESCAPE(t):
r'\\[^0-9A-Za-z\s\\]' # e.g. \# Inspired from https://github.com/Khan/simple-markdown/blob/master/simple-markdown.js
t.value = beamr.interpreters.Escape(t.value, **_argLineno(t.lexer, t.value))
return t
def t_ART(t):
r'-->|<->|<--|\|->|<-\||==>|<=>|<==|:\.\.|\.\.\.|:::|\\{2,3}' # e.g. -->, <=>, ...
t.value = beamr.interpreters.AsciiArt(t.value, **_argLineno(t.lexer, t.value))
return t
# For historical reasons called stretch; square bracket constructs were initially only for stretching and alignment, but that evolved
def t_STRETCH(t):
r'\[(?P<STRETCH_FLAG_S>[<>_^:+*~.=!|@]{1,3})((?P<STRETCH_TXT>.*?[^\\])(?P<STRETCH_FLAG_F>(?P=STRETCH_FLAG_S)|[<>]))??\]'
gd = t.lexer.lexmatch.groupdict()
t.value = beamr.interpreters.Stretch(
flagS=gd['STRETCH_FLAG_S'],
flagF=gd['STRETCH_FLAG_F'],
txt=gd['STRETCH_TXT'],
**_argLineno(t.lexer, t.value))
return t
def t_EMPH(t):
r'(?P<EMPH_FLAG>[*_]{1,2})(?P<EMPH_TXT>[\S](.*?[^\s\\])?)(?P=EMPH_FLAG)' # e.g. *Bold text*, ~Strikethrough text~
gd = t.lexer.lexmatch.groupdict()
t.value = beamr.interpreters.Emph(
flag=gd['EMPH_FLAG'],
txt=gd['EMPH_TXT'],
**_argLineno(t.lexer, t.value))
return t
def t_CITATION(t):
r'\[--(?P<CITE_TXT>.+?)(:(?P<CITE_OPTS>.+?))?\]' # e.g. [--einstein:p.241]
gd = t.lexer.lexmatch.groupdict()
t.value = beamr.interpreters.Citation(
gd['CITE_TXT'], opts=gd['CITE_OPTS'], **_argLineno(t.lexer, t.value))
return t
def t_FOOTNOTE(t):
r'\[-((?P<FN_LABEL>.*?):)?(?P<FN_TXT>.*?)-\](?P<FN_OVRL>\<.*?\>)?' # e.g. [-24:See attached docs-]
gd = t.lexer.lexmatch.groupdict()
t.value = beamr.interpreters.Footnote(
label=gd['FN_LABEL'],
text=gd['FN_TXT'],
overlay=gd['FN_OVRL'],
**_argLineno(t.lexer, t.value))
return t
def t_URL(t):
r'\[(?P<URL_TEXT>\[.+?\])?(?P<URL_TARGET>.+?)\]' # e.g. [https://www.example.com/], [[example]https://www.example.com/]
gd = t.lexer.lexmatch.groupdict()
t.value = beamr.interpreters.Url(gd['URL_TARGET'], text=gd['URL_TEXT'], **_argLineno(t.lexer, t.value))
return t
# e.g.:
# - One
# *. Two
# -,+ Three
def t_LISTITEM(t):
r'\n(?P<LI_INDENT> *)(\*|-)(|\.|,|=)(|\+) .*(\n((?P=LI_INDENT) .*| *))*(?=\n|$)'
t.value = beamr.interpreters.ListItem(txt=t.value, **_argLineno(t.lexer, t.value))
return t
# e.g.:
# |1.5
# Column content
# |20%
# Column content
def t_COLUMN(t):
r'\n(?P<COL_INDENT> *)\| *((?P<COL_WNUM>\d*\.?\d+)(?P<COL_WUNIT>%)?)? *(?P<COL_ALIGN>[_^])? *(?P<COL_OVRL>\<.*\>)?(?P<COL_CONTENT>(\n((?P=COL_INDENT) .*| *))+)(?=\n|$)'
gd = t.lexer.lexmatch.groupdict()
t.value = beamr.interpreters.Column(
widthNum=gd['COL_WNUM'],
widthUnit=gd['COL_WUNIT'],
align=gd['COL_ALIGN'],
overlay=gd['COL_OVRL'],
content=gd['COL_CONTENT'],
**_argLineno(t.lexer, t.value))
return t
def t_IMGENV(t):
r'~{[\s\S]*?}(\<.*\>)?'
t.value = beamr.interpreters.ImageEnv(t.value, **_argLineno(t.lexer, t.value))
return t
def t_PLUSENV(t):
r'\n(?P<PLUS_INDENT> *)\[(?P<PLUS_TXT>[\s\S]+?\n)(?P=PLUS_INDENT)\]'
t.value = beamr.interpreters.PlusEnv(
t.lexer.lexmatch.group('PLUS_TXT'),
**_argLineno(t.lexer, t.value))
return t
def t_TABENV(t):
r'={[\s\S]+?(?<!\\)}'
# _trackLineNo(t.lexer, t.value, False)
# t.value = beamr.interpreters.TableEnv(t.value[2:-1].replace(r'\}','}'))
# return t
def t_ORGTABLE(t):
r'\n(?P<ORGTAB_INDENT> *)\|.*(\n(?P=ORGTAB_INDENT)\|.*)+'
t.value = beamr.interpreters.OrgTable(txt=t.value, **_argLineno(t.lexer, t.value))
return t
def t_VERBATIM(t):
r'\n(?P<VBTM_INDENT> *){{(?P<VBTM_HEAD>.*)\n(?P<VBTM_BODY>[\s\S]+?)\n(?P=VBTM_INDENT)}}'
gd = t.lexer.lexmatch.groupdict()
t.value = beamr.interpreters.VerbatimEnv(
head=gd['VBTM_HEAD'].strip(), txt=gd['VBTM_BODY'], **_argLineno(t.lexer, t.value))
return t
def t_BOX(t):
r'\n(?P<BOX_INDENT> *)\((?P<BOX_KIND>\*|!|\?)(?P<BOX_TITLE>.*)(?P<BOX_CONTENT>[\s\S]+?)\n(?P=BOX_INDENT)\)(?P<BOX_OVRL>\<.*?\>)?'
gd = t.lexer.lexmatch.groupdict()
t.value = beamr.interpreters.Box(
kind=gd['BOX_KIND'].strip(),
title=gd['BOX_TITLE'],
content=gd['BOX_CONTENT'],
overlay=gd['BOX_OVRL'],
**_argLineno(t.lexer, t.value))
return t
def t_ANTIESCAPE(t):
r'[^0-9A-Za-z\u00c0-\uffff\s]'
t.value = beamr.interpreters.Antiescape(t.value, **_argLineno(t.lexer, t.value))
return t
def t_TEXT(t):
r'[\s\S]+?(?=[^0-9A-Za-z\u00c0-\uffff\s]|\n|$)' # Inspired loosely from https://github.com/Khan/simple-markdown/blob/master/simple-markdown.js
t.value = beamr.interpreters.Text(t.value, **_argLineno(t.lexer, t.value))
return t
lexer = lex.lex(debug=beamr.debug.verbose, reflags=0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.