content stringlengths 5 1.05M |
|---|
"""
Promotion API Service Test Suite
Test cases can be run with the following:
nosetests -v --with-spec --spec-color
coverage report -m
"""
import os
import logging
import unittest
from datetime import datetime
from unittest import TestCase
from flask_api import status # HTTP Status Codes
from service.models import Promotion, DataValidationError, db, PromoType, Product
from service import app
from service.service import init_db
from .factories import PromotionFactory, ProductFactory
from freezegun import freeze_time
DATABASE_URI = os.getenv(
"DATABASE_URI", "postgres://postgres:postgres@localhost:5432/postgres"
)
######################################################################
# T E S T C A S E S
######################################################################
@freeze_time("2020-11-03")
class TestPromotionService(TestCase):
""" REST API Server Tests """
@classmethod
def setUpClass(cls):
""" Run once before all tests """
app.config["TESTING"] = True
app.config["DEBUG"] = False
app.config["SQLALCHEMY_DATABASE_URI"] = DATABASE_URI
app.logger.setLevel(logging.CRITICAL)
init_db()
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
""" Runs before each test """
db.drop_all() # clean up the last tests
db.create_all() # create new tables
self.app = app.test_client()
def tearDown(self):
db.session.remove()
db.drop_all()
def _create_promotions(self, count):
""" Factory method to create promotions in bulk """
promotions = []
for _ in range(count):
test_promotion = PromotionFactory()
logging.debug(test_promotion.serialize())
resp = self.app.post(
"/promotions",
json=test_promotion.serialize(),
content_type="application/json",
)
self.assertEqual(
resp.status_code,
status.HTTP_201_CREATED,
"Could not create test promotion",
)
new_promotion = resp.get_json()
test_promotion.id = new_promotion["id"]
promotions.append(test_promotion)
return promotions
######################################################################
# P L A C E T E S T C A S E S H E R E
######################################################################
def test_index(self):
""" Test index call """
resp = self.app.get("/")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertIn(b"NYU DevOps eCommerce Promotions", resp.data)
def test_create_promotion(self):
""" Create a new Promotion """
test_promotion = PromotionFactory()
logging.debug(test_promotion)
resp = self.app.post(
"/promotions",
json=test_promotion.serialize(),
content_type="application/json",
)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# Check the data is correct
new_promotion = resp.get_json()
self.assertEqual(
new_promotion["title"], test_promotion.title, "Titles do not match"
)
self.assertEqual(
new_promotion["description"],
test_promotion.description,
"Descriptions do not match",
)
self.assertEqual(
new_promotion["promo_code"],
test_promotion.promo_code,
"Promo Codes do not match",
)
self.assertEqual(
new_promotion["promo_type"],
test_promotion.promo_type.name,
"Promo Types do not match",
)
self.assertEqual(
new_promotion["amount"], test_promotion.amount, "Amounts do not match"
)
self.assertEqual(
new_promotion["start_date"],
test_promotion.start_date.isoformat(),
"Start Date does not match",
)
self.assertEqual(
new_promotion["end_date"],
test_promotion.end_date.isoformat(),
"End Date does not match",
)
self.assertEqual(
new_promotion["is_site_wide"],
test_promotion.is_site_wide,
"Is Site Wide bool does not match",
)
def test_create_promotion_with_product(self):
""" Create a new Promotion With Product """
resp = self.app.post(
"/promotions",
json={
"id": 1,
"title": "Halloween Special",
"description": "Some items off in honor of the spookiest month.",
"promo_code": "hween",
"promo_type": "DISCOUNT",
"amount": 25,
"start_date": "2020-10-20T00:00:00",
"end_date": "2020-11-01T00:00:00",
"is_site_wide": False,
"products": [123, 456],
},
content_type="application/json",
)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# Check the promotion got created
new_promotion = resp.get_json()
self.assertEqual(
new_promotion["title"], "Halloween Special", "Title does not match"
)
self.assertEqual(
new_promotion["products"], [123, 456], "Products does not match"
)
def test_get_promotion(self):
""" Get a single Promotion """
# get the id of a promotion
test_promotion = self._create_promotions(1)[0]
resp = self.app.get(
"/promotions/{}".format(test_promotion.id), content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(data["title"], test_promotion.title)
def test_get_promotion_not_found(self):
""" Get a Promotion thats not found """
resp = self.app.get("/promotions/0")
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_list_promotion(self):
""" List all promotions in the database """
# create two promotions
test_promotion00 = self._create_promotions(1)[0]
test_promotion01 = self._create_promotions(1)[0]
# if it gets 200 status, we pass
resp = self.app.get("/promotions")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
# check that the ID of test promos match JSON returned
data = resp.get_json()
self.assertEqual(data[0]["id"], test_promotion00.id)
self.assertEqual(data[1]["id"], test_promotion01.id)
def test_update_promotion(self):
""" Update an existing Promotion """
# create a promotion to update
test_promotion = PromotionFactory()
resp = self.app.post(
"/promotions",
json=test_promotion.serialize(),
content_type="application/json",
)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# update the promotion
new_promotion = resp.get_json()
new_promotion["title"] = "unknown"
new_promotion["products"] = [123]
resp = self.app.put(
"/promotions/{}".format(new_promotion["id"]),
json=new_promotion,
content_type="application/json",
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
updated_promotion = resp.get_json()
self.assertEqual(updated_promotion["title"], "unknown")
self.assertEqual(updated_promotion["products"], [123])
# check that trying to update a non-existent promotion returns 404 not found
resp = self.app.put(
"/promotions/{}".format("999999999999999"),
json=new_promotion,
content_type="application/json",
)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_promotion(self):
""" Delete a Promotion """
test_promotion = self._create_promotions(1)[0]
resp = self.app.delete(
"/promotions/{}".format(test_promotion.id), content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(len(resp.data), 0)
# make sure they are deleted
resp = self.app.get(
"/promotions/{}".format(test_promotion.id), content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
# If you call the DELETE function on a promotion that doesn't exist, should return OK
def test_delete_promotion_not_exist(self):
""" Delete a Promotion that does not exist """
resp = self.app.delete(
"/promotions/{}".format("9999999999999999"), content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
def test_query_promotion_list_by_site_wide(self):
""" Query all promotions in the database by site-wide """
# Create a set of promotions
promotions, is_site_wide_list = [], [True, False, True]
for site_wide in is_site_wide_list:
test_promotion = PromotionFactory()
test_promotion.is_site_wide = site_wide
resp = self.app.post(
"/promotions",
json=test_promotion.serialize(),
content_type="application/json",
)
new_promotion = resp.get_json()
promotions.append(new_promotion)
logging.debug(new_promotion)
resp = self.app.get("/promotions", query_string=f"is_site_wide={True}")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
for promotion, site_wide in zip(promotions, is_site_wide_list):
if site_wide:
self.assertIn(promotion, data)
else:
self.assertNotIn(promotion, data)
def test_query_promotion(self):
""" Query all promotions in the database by multiple parameters """
product_1 = Product()
product_1.id = 100
product_2 = Product()
product_2.id = 200
db.session.add(product_1)
db.session.add(product_2)
# Define the test cases
test_cases = [
{
"title": "0",
"promo_code": "XYZ0000",
"promo_type": PromoType.DISCOUNT,
"amount": 50,
"is_site_wide": False,
"start_date": datetime(2020, 10, 17),
"end_date": datetime(2020, 10, 21),
},
{
"title": "1",
"promo_code": "XYZ0001",
"promo_type": PromoType.DISCOUNT,
"amount": 10,
"is_site_wide": True,
"start_date": datetime(2020, 10, 21),
"end_date": datetime(2021, 10, 23),
},
{
"title": "2",
"promo_code": "XYZ0002",
"promo_type": PromoType.BOGO,
"amount": 2,
"is_site_wide": False,
"start_date": datetime(2020, 10, 14),
"end_date": datetime(2020, 10, 18),
},
{
"title": "3",
"promo_code": "XYZ0003",
"promo_type": PromoType.DISCOUNT,
"amount": 20,
"is_site_wide": False,
"start_date": datetime(2020, 10, 14),
"end_date": datetime(2021, 10, 18),
},
]
tests = [
("is_site_wide=true", 1),
("is_site_wide=true&product=100", 0),
("is_site_wide=true&active=1", 1),
("is_site_wide=false&active=0", 2),
("is_site_wide=true&title=3", 0),
("is_site_wide=false", 3),
("is_site_wide=false&product=200", 1),
("promo_code=XYZ0004", 0),
("promo_code=XYZ0003", 1),
("promo_code=XYZ0003&is_site_wide=false", 1),
("amount=20&is_site_wide=false", 1),
("amount=20&is_site_wide=true", 0),
("promo_type=DISCOUNT&is_site_wide=true", 1),
("start_date=2020-10-17T00:00:00", 1),
("promo_type=BOGO", 1),
("start_date=Sat, 17 Oct 2020 00:00:00 GMT", 1),
(
"start_date=Tue, 14 Oct 2020 00:00:00 GMT&end_date=Wed, 18 Oct 2020 00:00:00 GMT",
1,
),
("duration=4", 2),
("active=0", 2),
("active=1", 2),
("product=100", 3),
("product=200", 1),
("", 4),
]
# Create the set of Promotions
for test_case in test_cases:
test_promotion = Promotion()
if not test_case["is_site_wide"]:
test_promotion.products = [product_1]
if test_case["promo_code"] == "XYZ0003":
test_promotion.products.append(product_2)
for attribute in test_case:
setattr(test_promotion, attribute, test_case[attribute])
resp = self.app.post(
"/promotions",
json=test_promotion.serialize(),
content_type="application/json",
)
# Carry out the tests
for query_str, length_of_result in tests:
logging.debug(query_str)
resp = self.app.get("/promotions", query_string=query_str)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
print(query_str)
self.assertEqual(len(data), length_of_result)
def test_cancel_promotion(self):
""" Cancel a promotion """
# try to cancel it before it's in there
resp = self.app.post(
"/promotions/{}/cancel".format(1), content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
# create a new promotion
test_promotion = self._create_promotions(1)[0]
# cancel the promotion
resp = self.app.post(
"/promotions/{}/cancel".format(test_promotion.id),
content_type="application/json",
)
# if it gets 200 status, we pass
self.assertEqual(resp.status_code, status.HTTP_200_OK)
def test_apply_best_promotions(self):
""" Test Apply Best Promotion """
# API: /promotions/apply?product_id=product_price
# Product - Promotion mapping
# Product 1
## Available: Promo 1, Promo 2 (Store-wide), Promo 3, Promo 6 (Store-wide, Expired)
## Best: Promo 3 (BOGO)
# Product 2
## Available: Promo 1, Promo 2 (Store-wide), Promo_4, Promo 6 (Store-wide, Expired)
## Best: Promo 4 (80%)
# Product 3
## Available: Promo 2 (Store-wide), Promo 6 (Store-wide, Expired)
## Best: Promo 2 (10%)
# Product 4
## Available: Promo 2 (Store-wide), Promo 5, Promo 6 (Store-wide, Expired)
## Best: Promo 5 (FIXED, 150)
product_1 = Product()
product_1.id = 100
product_2 = Product()
product_2.id = 200
product_3 = Product()
product_3.id = 300
product_4 = Product()
product_4.id = 400
db.session.add(product_1)
db.session.add(product_2)
db.session.add(product_3)
db.session.add(product_4)
# Define the promotions
promotions = [
{
"promo_code": "promo_code_1",
"promo_type": PromoType.DISCOUNT,
"amount": 40,
"is_site_wide": False,
"start_date": datetime(2020, 9, 2),
"end_date": datetime(2021, 10, 21),
},
{
"promo_code": "promo_code_2",
"promo_type": PromoType.DISCOUNT,
"amount": 10,
"is_site_wide": True,
"start_date": datetime(2020, 8, 21),
"end_date": datetime(2021, 10, 23),
},
{
"promo_code": "promo_code_3",
"promo_type": PromoType.BOGO,
"amount": 1,
"is_site_wide": False,
"start_date": datetime(2020, 9, 1),
"end_date": datetime(2021, 5, 30),
},
{
"promo_code": "promo_code_4",
"promo_type": PromoType.DISCOUNT,
"amount": 80,
"is_site_wide": False,
"start_date": datetime(2020, 10, 14),
"end_date": datetime(2021, 5, 18),
},
{
"promo_code": "promo_code_5",
"promo_type": PromoType.FIXED,
"amount": 150,
"is_site_wide": False,
"start_date": datetime(2020, 10, 14),
"end_date": datetime(2021, 10, 18),
},
{
"promo_code": "promo_code_6",
"promo_type": PromoType.DISCOUNT,
"amount": 80,
"is_site_wide": True,
"start_date": datetime(2020, 9, 14),
"end_date": datetime(2020, 10, 15),
},
]
tests = [
("100=1000&200=5000", []),
(
"100=1000&200=5000&300=268&400=255",
[
{"100": "promo_code_3"},
{"200": "promo_code_4"},
{"300": "promo_code_2"},
{"400": "promo_code_5"},
],
),
("", []),
]
# Carry out the tests without promotions in the system
for cart, result in tests[:1]:
resp = self.app.get("/promotions/apply", query_string=cart)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(data, result)
# Create the set of Promotions
logging.debug("Creating promotions")
for promo in promotions:
test_promotion = PromotionFactory()
for attribute in promo:
setattr(test_promotion, attribute, promo[attribute])
if promo["promo_code"] == "promo_code_1":
test_promotion.products.append(product_1)
test_promotion.products.append(product_2)
elif promo["promo_code"] == "promo_code_3":
test_promotion.products.append(product_1)
elif promo["promo_code"] == "promo_code_4":
test_promotion.products.append(product_2)
elif promo["promo_code"] == "promo_code_5":
test_promotion.products.append(product_4)
logging.debug(
f" Promo: {promo['promo_code']} (Promo ID: {test_promotion.id}): Products - {test_promotion.products}"
)
self.app.post(
"/promotions",
json=test_promotion.serialize(),
content_type="application/json",
)
logging.debug("Promotions created")
# Carry out the tests
for cart, result in tests[1:]:
logging.debug(cart)
resp = self.app.get("/promotions/apply", query_string=cart)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(data, result)
# ---------------------------------------------------------------
# > Test Cases for Error Handlers <
# ---------------------------------------------------------------
def test_invalid_content_type(self):
""" Test Invalid Content Type """
resp = self.app.post(
"/promotions", json="This is a string", content_type="text/html"
)
print(resp.__dir__())
print(resp.get_json())
self.assertEqual(resp.status_code, status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)
def test_bad_request(self):
""" Test Bad Request """
resp = self.app.post(
"/promotions", json="{'test': 'promotion'}", content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
def test_method_not_allowed(self):
""" Test Method Not Allowed """
resp = self.app.put("/promotions")
self.assertEqual(resp.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
######################################################################
# M A I N
######################################################################
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import sys
import math
data = [3, 1, 4, 1, 5]
data.sort()
mean = 0
sd = 0
med = 0;
for i in range(len(data)):
mean += data[i]
mean /= len(data)
for i in range(len(data)):
sd += ((mean - data[i]) ** 2)
if(len(data) % 2 == 0):
med = (data[len(data) // 2] + data[len(data) // 2 + 1]) / 2
else:
med = data[len(data) // 2]
print(f'Count: {len(data)} \nMinimum: {data[0]} \nMaximum: {data[len(data) - 1]} \nMean: {mean : .2f} \nStd. dev: {math.sqrt(sd / len(data)) : .2f} \nMedian: {med : .2f}')
|
import pyapr
def opening(apr: pyapr.APR,
parts: (pyapr.ShortParticles, pyapr.FloatParticles),
binary: bool = False,
radius: int = 1,
inplace: bool = False):
if inplace:
# reference assignment
parts_copy = parts
else:
# copy input particles
parts_copy = parts.copy()
# morphological opening
pyapr.numerics.transform.erosion(apr, parts_copy, binary=binary, radius=radius)
pyapr.numerics.transform.dilation(apr, parts_copy, binary=binary, radius=radius)
return parts_copy
def closing(apr: pyapr.APR,
parts: (pyapr.ShortParticles, pyapr.FloatParticles),
binary: bool = False,
radius: int = 1,
inplace: bool = False):
if inplace:
# reference assignment
parts_copy = parts
else:
# copy input particles
parts_copy = parts.copy()
# morphological closing
pyapr.numerics.transform.dilation(apr, parts_copy, binary=binary, radius=radius)
pyapr.numerics.transform.erosion(apr, parts_copy, binary=binary, radius=radius)
return parts_copy
def tophat(apr: pyapr.APR,
parts: (pyapr.ShortParticles, pyapr.FloatParticles),
binary: bool = False,
radius: int = 1):
# morphological opening
tmp = opening(apr, parts, binary=binary, radius=radius, inplace=False)
# return difference
return parts - tmp
def bottomhat(apr: pyapr.APR,
parts: (pyapr.ShortParticles, pyapr.FloatParticles),
binary: bool = False,
radius: int = 1):
# morphological closing
tmp = closing(apr, parts, binary=binary, radius=radius, inplace=False)
# return difference
return tmp - parts
|
import datetime
from typing import Dict, Optional, cast
import pytz
from ee.models.license import License, LicenseManager
from posthog.models import Organization
from posthog.test.base import APIBaseTest
class LicensedTestMixin:
"""
Test API using Django REST Framework test suite, for licensed PostHog (mainly enterprise edition).
"""
CONFIG_LICENSE_PLAN: Optional[str] = "enterprise"
license: License = None # type: ignore
def license_required_response(
self,
message: str = "This feature is part of the premium PostHog offering. To use it, get a self-hosted license: https://license.posthog.com",
) -> Dict[str, Optional[str]]:
return {
"type": "server_error",
"code": "payment_required",
"detail": message,
"attr": None,
}
@classmethod
def setUpTestData(cls):
super().setUpTestData() # type: ignore
if cls.CONFIG_LICENSE_PLAN:
cls.license = super(LicenseManager, cast(LicenseManager, License.objects)).create(
key=cls.CONFIG_LICENSE_PLAN,
plan=cls.CONFIG_LICENSE_PLAN,
valid_until=datetime.datetime(2038, 1, 19, 3, 14, 7, tzinfo=pytz.UTC),
)
if hasattr(cls, "organization") and cls.organization: # type: ignore
cls.organization.update_available_features() # type: ignore
class APILicensedTest(LicensedTestMixin, APIBaseTest):
pass
|
"""API Router is responsible for obtaining the connection values for each domain name.
This module obtains the IP, port and status of a microservice. Using the domain name,
it performs a Redis lookup by key value. The value is stored in Redis as JSON.
Typical usage example:
class OrdersMinosApiRouter(MinosApiRouter):
pass
foo = OrdersMinosApiRouter('order')
bar = foo.conn_values()
"""
import json
import aioredis
from minos.api_gateway.common import (
MinosConfig,
)
class MinosRedisClient:
"""Class that connects to Redis and returns the configuration values according to domain name.
The connection to Redis is made via the environment variables: REDIS_HOST, REDIS_PORT, REDIS_PASSWORD.
Attributes:
domain: A string which specifies the Domain Name. Example: order, cart, customer ....
"""
__slots__ = "address", "port", "password", "redis"
def __init__(self, config: MinosConfig):
"""Perform initial configuration and connection to Redis"""
address = config.discovery.database.host
port = config.discovery.database.port
password = config.discovery.database.password
pool = aioredis.ConnectionPool.from_url(f"redis://{address}:{port}", password=password, max_connections=10)
self.redis = aioredis.Redis(connection_pool=pool)
async def get_data(self, key: str) -> str:
"""Get redis value by key"""
json_data = {}
try:
redis_data = await self.redis.get(key)
json_data = json.loads(redis_data)
except Exception:
pass
return json_data
async def set_data(self, key: str, data: dict):
flag = True
try:
await self.redis.set(key, json.dumps(data))
except Exception: # pragma: no cover
flag = False
return flag
async def update_data(self): # pragma: no cover
"""Update specific value"""
pass
async def delete_data(self, key: str):
deleted_elements = await self.redis.delete(key)
if deleted_elements > 0:
return True
else:
return False
def get_redis_connection(self):
"""Redis connection itself"""
return self.redis
async def flush_db(self):
await self.redis.flushdb()
async def ping(self):
return await self.redis.ping()
|
#!/usr/bin/env python
###############################################################################
# Copyright (C) 2013-2014 by gempa GmbH
#
# Author: Stephan Herrnkind
# Email: herrnkind@gempa.de
###############################################################################
from __future__ import absolute_import, division, print_function
import sys
from fdsnwstest import FDSNWSTest
###############################################################################
class TestStation(FDSNWSTest):
#--------------------------------------------------------------------------
def test(self):
print('Testing station service')
query = '{}/station/1/query'.format(self.url)
ctTXT = 'text/plain'
ctXML = 'application/xml'
resFile = self.rootdir + '/results/station-'
i = 1
tests = [
('?format=text&level=channel', ctTXT, [], False),
('?format=text&includerestricted=false', ctTXT, [], True),
('?format=text&startbefore=2019-07-01', ctTXT, [], False),
('?level=channel&includeavailability=true', ctXML, [(172, 198, 7, 0)], False),
('?format=sc3ml&network=AM&station=R0F05&location=00&channel=SHZ&latitude=52&longitude=13&maxradius=0.5&level=response&includeavailability=true', ctXML, [], True),
]
for q, ct, ignoreRanges, concurrent in tests:
self.testGET('{}{}'.format(query, q), ct, ignoreRanges, concurrent,
dataFile='{}{}.txt'.format(resFile, i), testID=i)
i += 1
#------------------------------------------------------------------------------
if __name__ == '__main__':
app = TestStation()
sys.exit(app())
# vim: ts=4 et tw=79
|
from django.db import models
# Create your models here.
class Schema(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
class Code(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
active_instances = models.PositiveIntegerField(default=0)
schema = models.ForeignKey(Schema, related_name="codes")
code_type = models.IntegerField(default=0)
def __unicode__(self):
if self.description:
return "%s/%s (%d): %s" % (self.schema_id, self.name, self.id, self.description)
else:
return "%s/%s (%d)" % (self.schema_id, self.name, self.id)
class DataSet(models.Model):
name = models.CharField(max_length=100)
created = models.DateTimeField()
class Session(models.Model):
set = models.ForeignKey(DataSet)
started = models.DateTimeField()
ended = models.DateTimeField()
def __unicode__(self):
return "%d (%s - %s)" % (self.id, str(self.started), str(self.ended))
class Participant(models.Model):
name = models.CharField(max_length=100)
description = models.TextField()
def __unicode__(self):
return self.name
class Message(models.Model):
session = models.ForeignKey(Session)
idx = models.IntegerField()
time = models.DateTimeField()
type = models.IntegerField()
participant = models.ForeignKey(Participant, related_name='messages')
message = models.TextField()
codes = models.ManyToManyField(Code, through='CodeInstance')
@classmethod
def get_between(cls, start, end):
"""
Get messages that are inclusively between the two messages, or two dates.
Takes into account the exact ordering of messages,
meaning that you won't get messages at the same time but after the last message, for example.
"""
if isinstance(start, Message):
after_first = ~models.Q(session=start.session) | models.Q(idx__gte=start.idx)
after_first = models.Q(time__gte=start.time) & after_first
else:
after_first = models.Q(time__gte=start)
if isinstance(end, Message):
before_last = ~models.Q(session=end.session) | models.Q(idx__lte=end.idx)
before_last = models.Q(time__lte=end.time) & before_last
else:
before_last = models.Q(time__lte=end)
return cls.objects.filter(after_first, before_last)
@property
def text(self):
return self.message
@property
def user_name(self):
return self.participant.name
@property
def created_at(self):
return self.time
class User(models.Model):
name = models.CharField(max_length=100)
full_name = models.CharField(max_length=250)
email = models.CharField(max_length=250)
def __unicode__(self):
return self.name
class AbstractCodeInstance(models.Model):
class Meta:
abstract = True
code = models.ForeignKey(Code)
message = models.ForeignKey(Message)
added = models.DateTimeField()
class CodeInstance(AbstractCodeInstance):
user = models.ForeignKey(User)
task_id = models.PositiveIntegerField()
intensity = models.FloatField()
flag = models.IntegerField()
|
DEBUG = False
def merge(match_scheme, sum_match_scheme):
# print(match_scheme, sum_match_scheme)
for match_item, sum_match_item in zip(match_scheme, sum_match_scheme):
for node in match_item:
if node not in sum_match_item:
sum_match_item.append(node)
return sum_match_scheme
def bfs(node1, node2, graph1, graph2):
if DEBUG:
print(node1, node2)
adj_nodes1 = graph1[node1]
adj_nodes2 = graph2[node2]
if len(adj_nodes2) > len(adj_nodes1) or not adj_nodes2 and adj_nodes1:
return None
matchs1 = [False for _ in range(len(adj_nodes1))]
sum_match_scheme = [[] for _ in range(len(graph1))]
for node2_id in range(len(adj_nodes2)):
for node1_id in range(len(adj_nodes1)):
match_scheme = bfs(adj_nodes1[node1_id], adj_nodes2[node2_id], graph1, graph2)
if not matchs1[node1_id] and match_scheme:
matchs1[node1_id] = True
if DEBUG:
print("match {} {}".format(adj_nodes1[node1_id], adj_nodes2[node2_id]))
print("match_scheme", match_scheme)
break
if not match_scheme:
return None
else:
sum_match_scheme = merge(match_scheme, sum_match_scheme)
matchs1_id_list = []
for i in range(len(matchs1)):
if matchs1[i]:
matchs1_id_list.append(adj_nodes1[i])
sum_match_scheme[node1] = matchs1_id_list
if DEBUG:
print("sum_match_scheme", sum_match_scheme)
return sum_match_scheme
def minus(graph, sub_graph):
result = []
for a, b in zip(graph, sub_graph):
result.append(list(set(a)-set(b)))
return result
def subtraction(graph1, graph2):
# graph1(DAG), graph2(DAG)
# graph1 = graph2 + some edges,
# but their encode way maybe not same
# so you must shield their encode way by your code
# you can assume the first node is root of DAG
# so you don't have to find the root by your self
# return: graph1 - graph2 (the additive edges)
assert len(graph1) == len(graph2), "node_num of two graphs do not equal"
node1, node2 = 0, 0
match_scheme_in_graph1_pattern = bfs(node1, node2, graph1, graph2)
additive_edge = minus(graph1, match_scheme_in_graph1_pattern)
return additive_edge
if __name__ == '__main__':
# graph2 = [[1], [2, 4, 5], [3], [6], [3], [3], []]
# graph2 = [[4], [2], [6], [2], [1, 3, 5], [2], []]
graph2 = [[6], [4], [4], [], [3], [4], [2, 5, 1]]
graph1 = [[1, 2], [2, 4, 5, 3, 6], [3, 6], [6], [3], [3], []]
additive_edge = subtraction(graph1, graph2)
print(additive_edge)
|
# -*- coding: utf-8 -*-
import numpy as np
from operator import add
from functools import reduce
def AUC(scores):
"""Area Under the Curve
params
------
scores :
np.ndarray, shape = (n_combination, 3)
Each 3d row consists of (idx, jdx, indicator).
If rank(idx|c) < rank(jdx|c), the indicator is 1;
0 otherwise.
You have to reshape scores in advance.
e.g.
[[0, 1, 1], [0, 2, 1], [0, 4, 0], ...]
returns
-------
AUC :
float, The higher the AUC score, the better.
"""
pos_indices = np.unique(scores[:,0])
neg_indices = np.unique(scores[:,1])
indicators = scores[:,2]
assert scores.shape[1] == 3, "# of dimenstion in rows must be 3"
assert len(set(pos_indices) & set(neg_indices)) == 0, "pos_indices AND neg_indices must be empty set"
assert set(np.unique(indicators)) | {0, 1} == {0, 1}, "indicators must be subset of {0, 1}"
n_pos = len(pos_indices)
n_neg = len(neg_indices)
return reduce(add, indicators) / (n_pos * n_neg)
|
# ---------------
# Date: 10/06/2019
# Place: Biella/Torino/Ventimiglia
# Author: Vittorio Mazzia
# Project: Python in The Lab Project
# ---------------
# import some important libraries
import os
######################################################
# !! run this BEFORE importing TF or keras !!
# run code only on a single, particular GPU
######################################################
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
"""
Environment Variable Syntax Results
CUDA_VISIBLE_DEVICES=1 Only device 1 will be seen
CUDA_VISIBLE_DEVICES=0,1 Devices 0 and 1 will be visible
CUDA_VISIBLE_DEVICES=”0,1” Same as above, quotation marks are optional
CUDA_VISIBLE_DEVICES=0,2,3 Devices 0, 2, 3 will be visible; device 1 is masked
CUDA_VISIBLE_DEVICES="" None GPU, only CPU
"""
# choose an available GPU
os.environ["CUDA_VISIBLE_DEVICES"] = ''
import tensorflow as tf
import numpy as np
import matplotlib.pyplot
import cv2
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import logging
# configure the level of logging to INFO
logging.basicConfig(level=logging.INFO)
class Model(object):
"""
A class used to load and, if needed, train new models.
Using the available attributes is possible to train models only for digits or
for 47 different characters (all uppercase letters and digits plus some lowercase letters).
...
Attributes
----------
mainDir: str
path of the main folder of the project
datasetObj: obj
dataset object. It is used as source of data for new training
dirBin: str
path of the bin directory where all trained weights are stored
char: bool
if False a model for only digits recognition is loaded
epochs: int
how many iterations over the dataset are performed before concluding the training
batchSize: int
how many images are processed before updating weights values
train: bool
if True a foced training is performed even if already trained weights are available in the bin folder
verbose: bool
if True print some useful information
Methods
-------
buildDNN(n_classes)
Build a deep neural network object tailored for the specified number of classes
trainModel()
Train a new model with the specified attributes of the instantiated object
loadModel()
Try to load a model from the dirBin folder. If not found an automatic training is launched
plotHistory(history)
Plot the loss and accuracy curves for the training dataset
"""
def __init__(self, mainDir, datasetObj, dirBin = 'bin', char=False, epochs = 20, batchSize = 128, train=False, verbose=False):
"""
Parameters
----------
mainDir: str
path of the main folder of the project
datasetObj: obj
dataset object. It is used as source of data for new training
dirBin: str
path of the bin directory where all trained weights are stored
char: bool
if False a model for only digits recognition is loaded
epochs: int
how many iterations over the dataset are performed before concluding the training
batchSize: int
how many images are processed before updating weights values
train: bool
if True a foced training is performed even if already trained weights are available in the bin folder
verbose: bool
if True print some useful information
"""
self.dirBin = os.path.join(mainDir, dirBin)
self.verbose = verbose
self.dataset = datasetObj
self.train = train
self.batch_size = batchSize
self.epochs = epochs
self.char = char
# save the model
if not os.path.exists(dirBin):
os.mkdir(dirBin)
def buildDNN(self, n_classes):
"""
Build a deep neural network object tailored for the specified number of classes
Parameters
----------
n_classes: int
number of classes as output of the network
"""
# create a model object
model = Sequential([
Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same', activation ='relu', input_shape = (28,28,1)),
Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same', activation ='relu'),
MaxPool2D(pool_size=(2,2)),
Dropout(0.25),
Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same', activation ='relu'),
Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same', activation ='relu'),
MaxPool2D(pool_size=(2,2), strides=(2,2)),
Dropout(0.25),
Flatten(),
Dense(256, activation = "relu"),
Dropout(0.5),
Dense(n_classes, activation = "softmax")])
if self.verbose == True:
model.summary()
return model
def trainModel(self):
"""
Train a new model with the specified attributes of the instantiated object
"""
# load data throught the provided dataset object
X, y = self.dataset.load()
# count number of classes
unique, _ = np.unique(y, return_counts=True)
# call buildDNN method
model = self.buildDNN(n_classes = len(unique))
# preprocess data thorugh the dataset class
X_train = self.dataset.preProcessData(X)
y_train = self.dataset.labalEncoding(y, n_classes = len(unique))
# define optimizer
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
# compile the model
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics= ["accuracy"])
# set a learning rate annealer
learning_rate_reduction = ReduceLROnPlateau(monitor='acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
# data augmentation in order to improve model performance
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=10, # randomly rotate images
zoom_range = 0.1, # randomly zoom image
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=False, # randomly flip images
vertical_flip=False) # randomly flip images
datagen.fit(X_train)
# fit the model
history = model.fit_generator(datagen.flow(X_train,y_train, batch_size=self.batch_size),
epochs = self.epochs, steps_per_epoch=X_train.shape[0] // self.batch_size
,callbacks=[learning_rate_reduction])
# save the model
if self.char:
model.save(os.path.join(self.dirBin, 'model_char.h5'))
model.save_weights(os.path.join(self.dirBin, 'model_char_weights.h5'))
else:
model.save(os.path.join(self.dirBin, 'model_digit.h5'))
model.save_weights(os.path.join(self.dirBin, 'model_digit_weights.h5'))
logging.info('Model saved in {}'.format(self.dirBin))
if self.verbose:
self.plotHistory(history)
return model
def loadModel(self):
"""
Try to load a model from the dirBin folder. If not found an automatic training is launched
Rasises
-------
[WARNING] Model not found:
if no pre-trained is found in the dirBin folder
"""
if not self.train:
try:
if self.char:
model = load_model(os.path.join(self.dirBin, 'model_char.h5'))
model.load_weights(os.path.join(self.dirBin, 'model_char_weights.h5'))
else:
model = load_model(os.path.join(self.dirBin, 'model_digit.h5'))
model.load_weights(os.path.join(self.dirBin, 'model_digit_weights.h5'))
if self.verbose:
model.summary()
return model
except Exception as e:
logging.warning('Model not found')
logging.info('Train a new model')
model = self.trainModel()
return model
else:
logging.info('Train a new model')
model = self.trainModel()
return model
def plotHistory(self, history):
"""
Plot the loss and accuracy curves for the training dataset
Parameters
----------
history: obj
history object with all information of the training process
"""
fig, ax = plt.subplots(2,1, figsize=(20,15))
ax[0].plot(history.history['loss'], color='b', label="Training loss")
legend = ax[0].legend(loc='best', shadow=True)
ax[1].plot(history.history['acc'], color='b', label="Training accuracy")
legend = ax[1].legend(loc='best', shadow=True) |
'''
Find all anagrams in a list for a given lookup word
(can be a file)
'''
from __future__ import print_function
wordlist = ['abba',
'baba',
'caret',
'cater']
# Using a lookup table
def approach1(lookup):
d = {}
# O(n)
for w in wordlist:
# O(klogk)
w_s = ''.join(sorted(w))
if d.get(w_s, None):
d[w_s].append(w)
else:
d[w_s] = [w]
anagrams = d.get(''.join(sorted(lookup)))
if anagrams:
print(anagrams)
# sort the word and just read through the list of words
def approach2(lookup):
# O(klogk)
lookup_sorted = ''.join(sorted(lookup))
for w in wordlist:
# O(n)
w_s = ''.join(sorted(w))
if lookup_sorted == w_s:
print(w)
# using primes
# Primes are multiplicatively unique
# So if we assign a prime number to each character, and find
# the product: anagrams will have the same product.
# This will prevent sorting
approach1('terac')
approach2('terac')
|
from ModSecurity import ModSecurity
from ModSecurity import Rules
from ModSecurity import Transaction
modsec = ModSecurity()
print(modsec.whoAmI())
rules = Rules()
rules.loadFromUri("basic_rules.conf")
print(rules.getParserError())
transaction = Transaction(modsec, rules)
print(transaction.processURI("http://www.modsecurity.org/test?key1=value1&key2=value2&key3=value3&test=args&test=test", "GET", "2.0"))
|
#!/opt/local/bin/python
import argparse
import MySQLdb
import os, os.path
from HTMLParser import HTMLParser
import codecs
from nltk import RegexpTokenizer
from nltk.corpus import stopwords
__author__ = 'guglielmo'
"""
This script helps generating a Categorized corpus (like the Reuters NLTK corpus)
that can be used for machine learning algorithms to assign tags to Openparlamento'c acts.
See Chapter 6 of Natural Language Processing With Python
and Chapter 7 of Python Text Processing With NLTK 2.0 Cookbook
usage::
python generate_corpus.py --help
examples::
python generate_corpus.py --db=op_openparlamento --act-types=3,4,5,6 --delete --limit=5000 ../corpora/opp_interrogazioni
python generate_corpus.py --db=op_openparlamento --macro --act-types=2,3,4,5,6 --delete --limit=5000 ../corpora/opp_interrogazioni_macro
TODO: the acts and contents are directly fetched from a Mysql DB.
Accessing them through an API would decouple the script, avoiding the necessity
of having it run on Openparlamento's server.
"""
##
# HTML tag stripping through standard library's HTMLParser
##
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
##
# MySQL extraction functions
##
def get_acts(**kwargs):
"""
Returns list of act_ids of given act types,
sorted by presentation date.
"""
db_conn = kwargs['db']
limit = kwargs['limit']
offset = kwargs['offset']
act_types_ids = kwargs['act_types_ids']
# sql is built this way because the ids list must be inserted
# normally the placehoslders should be evaluated safely inside cursor.execute(sql, string parameters)
sql = """
select id as act_id
from opp_atto
where tipo_atto_id in (%s) order by data_pres
""" % (act_types_ids, )
cursor = db_conn.cursor(MySQLdb.cursors.DictCursor)
if limit is not 0:
if offset is not 0:
sql += " limit {0}, {1}".format(offset, limit)
else:
sql += " limit {0}".format(limit)
cursor.execute(sql)
rows = cursor.fetchall()
cursor.close()
return [row['act_id'] for row in rows]
def get_tags(act_id, **kwargs):
"""
Returns the list of tags associated with acts of the given act
"""
db_conn = kwargs['db']
cursor = db_conn.cursor(MySQLdb.cursors.DictCursor)
sql = """
select t.id as tag_id, t.triple_value as tag_name, t.triple_namespace as tag_namespace
from sf_tag t, sf_tagging g
where g.taggable_model='OppAtto'
and t.triple_namespace not like '%%geo%%'
and g.tag_id=t.id and g.taggable_id=%s
"""
cursor.execute(sql, act_id)
rows = cursor.fetchall()
cursor.close()
return [str(row['tag_id']) for row in rows]
def get_macro_tags(act_id, **kwargs):
"""
Returns the list of unnique top-tags associated with acts of the given act
"""
db_conn = kwargs['db']
cursor = db_conn.cursor(MySQLdb.cursors.DictCursor)
sql = """
select distinct tt.teseott_id as tag_id, ttt.denominazione as tag_name
from sf_tag t, sf_tagging g, opp_tag_has_tt tt, opp_teseott ttt
where g.taggable_model='OppAtto' and
g.tag_id=t.id and tt.tag_id=t.id and ttt.id=tt.teseott_id and
g.taggable_id=%s;
"""
cursor.execute(sql, act_id)
rows = cursor.fetchall()
cursor.close()
return [str(row['tag_id']) for row in rows]
def get_documents_text(act_id, **kwargs):
"""
Returns the concatenated, tag-stripped text of all documents related to act_id
"""
db_conn = kwargs['db']
italian_stops = set(stopwords.words('italian'))
cursor = db_conn.cursor(MySQLdb.cursors.DictCursor)
sql = """
select d.testo
from opp_documento as d
where d.atto_id=%s
"""
cursor.execute(sql, act_id)
rows = cursor.fetchall()
cursor.close()
testo = u''
for row in rows:
# strip html tags from texts, if present
testo += unicode(
strip_tags(
row['testo']
)
)
# remove stopwords
tokenizer = RegexpTokenizer("[\w]+")
words = tokenizer.tokenize(testo)
filtered_testo = " ".join([word for word in words if word.lower() not in italian_stops])
return filtered_testo
##
# generating function
##
def generate(**kwargs):
"""
Extract texts from acts' documents in db and produces files in the specified prefixed path
Write a cats.txt categories file
PATH
|- ID1
|- ID2
|- ...
|- cats.txt
"""
path = kwargs['path']
macro = kwargs['macro']
if not os.path.exists(path):
os.mkdir(path)
# delete all files under path, if required
if kwargs['delete']:
for the_file in os.listdir(path):
file_path = os.path.join(path, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, e:
print e
# write tags on cats.txt file
cat_file = os.path.join(path, "cats.txt")
f = codecs.open(cat_file, "w", "utf-8")
for c, act_id in enumerate(get_acts(**kwargs)):
print "{0}) {1}".format(c, act_id)
# extract tags ids list
if macro:
tags_ids_list = ",".join(get_macro_tags(act_id, **kwargs))
else:
tags_ids_list = ",".join(get_tags(act_id, **kwargs))
# only writes acts tags in cats file if there are some
if tags_ids_list:
# extract all texts from documents' acts
testo = get_documents_text(act_id, **kwargs)
# write to files only if there is a testo
if testo:
# write act's tags in file
f.write(u"{0},{1}\n".format(act_id, tags_ids_list))
# build text file name
text_file_path = os.path.join(path, str(act_id))
# open text file in append mode, append content to it, close the file
tf = codecs.open(text_file_path, "a", "utf-8")
tf.write(testo)
tf.close()
f.close()
##
# Main function, called when directly calling the script
##
def main():
# setup command-line args parser
parser = argparse.ArgumentParser()
parser.add_argument("-q", "--quiet", dest="quiet",
help="do not print to stdout", action="store_true")
parser.add_argument("--db", dest="db",
help="the openparlamento database to extract data from, defaults to op_openparlamento",
default='op_openparlamento')
parser.add_argument("--act-types-ids", dest="act_types_ids",
help="a comma separated list of acts types ids",
default='1')
parser.add_argument("--limit", dest="limit",
help="limit the number of acts analyzed (0 = no limit)", type=int,
default=0)
parser.add_argument("--offset", dest="offset",
help="offset of acts analyzed (0 = no offset)", type=int,
default=0)
parser.add_argument("--delete", dest="delete",
help="remove previously created file in path and prefix", action='store_true',
default=False)
parser.add_argument("--macro", dest="macro",
help="indicates that macro (top) categories should be extracted", action='store_true',
default=False)
parser.add_argument("path",
help="where produced files and categories will be written, an absolute path")
args = parser.parse_args()
# connect to RDBMS server
db = MySQLdb.connect(
host="localhost",
user="root",
passwd="",
db=args.db,
charset='utf8',
use_unicode=True
)
# build kwargs list for functions
kwargs = {
'act_types_ids': args.act_types_ids,
'path': args.path,
'delete': args.delete,
'macro': args.macro,
'limit': args.limit, 'offset': args.offset,
'db': db,
}
# call generating function
generate(**kwargs)
# disconnect from server
db.close()
if __name__ == "__main__":
main()
|
from datetime import datetime
from model.Candle import Candle
from storage.KafkaStorage import KafkaStorage
import logging
log = logging.getLogger(__name__)
class CandleStorage(KafkaStorage):
def __init__(self, bootstrap_servers='localhost:9092'):
super(CandleStorage, self).__init__(bootstrap_servers=bootstrap_servers,
value_serializer=lambda x: str(x).encode())
self._last_candle_date = dict()
self._dict = dict()
self._counter = dict()
self._flush_num = 10
def flush(self):
pass
def save(self, candles_map):
for instrument in candles_map.keys():
if instrument in self._last_candle_date:
last_date = self._last_candle_date[instrument]
income_candles = list(filter(lambda x: datetime.strptime(x['d'], '%Y-%m-%d %H:%M:%S') > last_date, candles_map[instrument]))
candles_to_send = list(map(lambda x: Candle(**x), income_candles))
else:
candles_to_send = list(map(lambda x: Candle(**x), candles_map[instrument]))
self._last_candle_date[instrument] = candles_to_send[len(candles_to_send) - 1].date
for value in candles_to_send:
try:
self._producer.send("candle", key=instrument, value=value)
except Exception as e:
log.error(e, exc_info=True)
|
#!/usr/bin/env python
import sys
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from scipy.interpolate import UnivariateSpline as interpolate
from settings import tableau20
from data import exo
m_nep = 0.0539531012
r_nep = 0.35219064
# Set dates for KOIs
# KOI -> 1609: 2011
# -> 2841: 2012
# -> 3149: 2012.5
# -> 4914: 2013
# -> 6251: 2014
# -> 7620: 2015
B11 = exo.KOI < 1610# & kepname
Q6 = exo.KOI <= 2841# & kepname
Q8 = exo.KOI <= 3149# & kepname
Q12 = exo.KOI <= 4914# & kepname
Q16 = exo.KOI <= 6251# & kepname
Q17 = exo.KOI <= 7620# & kepname
exo.loc[Q17, 'DATE'] = 2015
exo.loc[Q16, 'DATE'] = 2014
exo.loc[Q12, 'DATE'] = 2013
exo.loc[Q8, 'DATE'] = 2012.5
exo.loc[Q6, 'DATE'] = 2012
exo.loc[B11, 'DATE'] = 2011
kepname = (exo.NAME.str.contains('Kepler')).values
koiname = (exo.NAME.str.contains('KOI')).values
kepler = exo[kepname].copy()
koi = exo[koiname].copy()
all_kepler = exo[koiname | kepname].copy()
no_kepler = exo[~(kepname | koiname)].copy()
def cum_draw(df=exo,color='k',mindate=1995,maxdate=2015,ax=None,norm=False,fill=True, kois=False,
alpha=0.2,interp=False,kepler=False,kepsmall=False,label=None,xylabel=(0.1,0.8),
zorder=0):
if ax is None:
fig, ax = plt.subplots(1,1)
else:
fig = ax.figure
dates = np.sort(df.DATE)
ds = np.unique(dates).astype(float)
ns = np.zeros(len(ds))
for i,d in enumerate(ds):
ns[i] = (dates<=d).sum()
if norm:
ns /= ns.max()
ns *= norm
if interp:
dgrid = np.arange(mindate,maxdate,0.1)
fn = interpolate(ds,ns,s=0)
y1 = fn(dgrid)
y2 = -y1
else:
dgrid = ds
y1 = ns
y2 = -ns
#ax.plot(dgrid,y1,color=color)
#ax.plot(dgrid,y2,color=color)
ax.fill_between(dgrid,y1,y2,alpha=alpha,color=color, zorder=zorder)
ax.set_xlim(xmin=mindate,xmax=maxdate)
ax.set_yticks([])
date_ticks = np.arange(mindate,maxdate+1,2)
#for d in date_ticks:
# ax.axvline(d, ls=':', color='k', alpha=0.2)
ax.set_xticks(date_ticks)
ax.tick_params(labelsize=16)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.xaxis.set_tick_params(width=3, length=10)
if label is not None:
label = '%s (%i)' % (label,ns.max())
pl.annotate(label,xy=xylabel,xycoords='axes fraction',fontsize=18,color=color)
return fig
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--nosmall', action='store_true')
args = parser.parse_args()
if args.nosmall:
small_m = 0
small_r = 0
else:
small_m = m_nep
small_r = r_nep
no_kepler_small = no_kepler.query('MASS < {}'.format(small_m))
all_kepler_small = all_kepler.query('R < {}'.format(small_r))
fig, ax = plt.subplots(1,1, figsize=(12,8))
subsets = [no_kepler, no_kepler_small, all_kepler, all_kepler_small]
n = len(subsets)
for i,d in enumerate(subsets):
fig = cum_draw(d, ax=ax, color=tableau20[i], alpha=1, zorder=-len(d));
tag = ''
if args.nosmall:
tag = '_nosmall'
filename = 'planet_wedge{}.png'.format(tag)
fig.savefig(filename, transparent=True)
|
from typing import Dict, List
from mlagents.trainers.env_manager import EnvironmentStep
from mlagents.trainers.simple_env_manager import SimpleEnvManager
from mlagents_envs.side_channel.float_properties_channel import FloatPropertiesChannel
from mlagents.trainers.action_info import ActionInfo
from animalai.envs.arena_config import ArenaConfig
from animalai.envs.environment import AnimalAIEnvironment
class SimpleEnvManagerAAI(SimpleEnvManager):
def __init__(
self, env: AnimalAIEnvironment, float_prop_channel: FloatPropertiesChannel
):
self.shared_float_properties = float_prop_channel
self.env = env
self.previous_step: EnvironmentStep = EnvironmentStep.empty(0)
self.previous_all_action_info: Dict[str, ActionInfo] = {}
def _reset_env(self, config: ArenaConfig = None) -> List[EnvironmentStep]:
self.env.reset(arenas_configurations=config)
all_step_result = self._generate_all_results()
self.previous_step = EnvironmentStep(all_step_result, 0, {})
return [self.previous_step]
|
"""Unit tests for Yandex.Weather custom integration."""
|
import datetime
import random
import string
from lona import LonaView
from lona_bootstrap_5 import show_alert
from pillowfort.response_formatter import ResponseFormatter
class Endpoint(LonaView):
URL = ''
ROUTE_NAME = ''
INTERACTIVE = False
VARIABLES = []
RESPONSE_FORMATTER = ResponseFormatter
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.handle_request = self._handle_request_wrapper(self.handle_request)
def _generate_random_id(self, length=8):
return ''.join(
random.choice(string.ascii_letters)
for i in range(length)
)
def _add_activity(self, request, response, error):
if request.url.path == '/favicon.ico':
return
self.server.state['activities'].append({
'id': self._generate_random_id(),
'endpoint': self,
'timestamp': str(datetime.datetime.now()),
'url': str(request.url.path),
'method': request.method,
'post': request.POST,
'get': request.GET,
'response': response,
'error': error,
})
def _handle_request_wrapper(self, handle_request):
def run_handle_request(request):
error = None
try:
response = handle_request(request)
except Exception as e:
response = {
'text': '[No Response]'
}
error = e
self._add_activity(
request=request,
response=response,
error=error,
)
if error:
raise error
return response
return run_handle_request
def get_model(self, name):
return self.server.state['models'].get_model(name)
def get_variable(self, name, section=''):
return self.server.state['variables'][section][name]
def show_alert(
self,
text,
type='info',
timeout=None,
broadcast=False,
filter_connections=lambda connection: True,
wait=True,
):
return show_alert(
lona_view=self,
text=text,
type=type,
timeout=timeout,
broadcast=broadcast,
filter_connections=filter_connections,
wait=wait,
) |
"""
Created on Dec 20, 2017
@author: nhan.nguyen
Verify that system returns 'True' when
checking a pairwise that exists in wallet.
"""
import pytest
import json
from indy import did, pairwise
from utilities import utils, common
from test_scripts.functional_tests.pairwise.pairwise_test_base \
import PairwiseTestBase
class TestCheckPairwiseExist(PairwiseTestBase):
@pytest.mark.asyncio
async def test(self):
# 1. Create wallet.
# 2. Open wallet.
self.wallet_handle = await common.create_and_open_wallet_for_steps(
self.steps, self.wallet_name, self.pool_name, credentials=self.wallet_credentials)
# 3. Create and store 'my_did' by random seed.
self.steps.add_step("Create and store 'my_did' by random seed")
(my_did, _) = await utils.perform(self.steps,
did.create_and_store_my_did,
self.wallet_handle, "{}")
# 4. Create and "their_did".
self.steps.add_step("Create 'their_did'")
(their_did, _) = await utils.perform(self.steps,
did.create_and_store_my_did,
self.wallet_handle, '{}')
# 5. Store 'their_did'.
self.steps.add_step("Store 'their_did")
await utils.perform(self.steps, did.store_their_did,
self.wallet_handle, json.dumps({"did": their_did}))
# 6. Create pairwise.
self.steps.add_step("Creare pairwise between 'my_did' and 'their_did'")
await utils.perform(self.steps, pairwise.create_pairwise,
self.wallet_handle, their_did, my_did, None)
# 7. Verify that 'is_pairwise_exists' return 'True'.
self.steps.add_step("Verify that 'is_pairwise_exists' return 'True'")
pairwise_exists = await utils.perform(self.steps,
pairwise.is_pairwise_exists,
self.wallet_handle,
their_did,
ignore_exception=False)
utils.check(self.steps,
error_message="'False' is returned instead of 'True'",
condition=lambda: pairwise_exists is True)
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import hypothesis.strategies as st
import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
from caffe2.proto import caffe2_pb2
class TestONNXWhile(hu.HypothesisTestCase):
@given(
condition=st.booleans(),
max_trip_count=st.integers(0, 100),
save_scopes=st.booleans(),
seed=st.integers(0, 65535),
**hu.gcs_cpu_only)
def test_onnx_while_fibb(
self, condition, max_trip_count, save_scopes, seed, gc, dc):
np.random.seed(seed)
# Create body net
body_net = caffe2_pb2.NetDef()
# Two loop carried dependencies: first and second
body_net.external_input.extend(['i', 'cond', 'first', 'second'])
body_net.external_output.extend(['cond_new', 'second', 'third', 'third'])
add_op = core.CreateOperator(
'Add',
['first', 'second'],
['third'],
)
print3 = core.CreateOperator(
'Print',
['third'],
[],
)
limit_const = core.CreateOperator(
'ConstantFill',
[],
['limit_const'],
shape=[1],
dtype=caffe2_pb2.TensorProto.FLOAT,
value=100.0,
)
cond = core.CreateOperator(
'LT',
['third', 'limit_const'],
['cond_new'],
)
body_net.op.extend([add_op, print3, limit_const, cond])
while_op = core.CreateOperator(
'ONNXWhile',
['max_trip_count', 'condition', 'first_init', 'second_init'],
['first_a', 'second_a', 'third_a'],
body=body_net,
has_cond=True,
has_trip_count=True,
save_scopes=save_scopes,
)
condition_arr = np.array(condition).astype(np.bool)
max_trip_count_arr = np.array(max_trip_count).astype(np.int64)
first_init = np.array([1]).astype(np.float32)
second_init = np.array([1]).astype(np.float32)
def ref(max_trip_count, condition, first_init, second_init):
first = 1
second = 1
results = []
if condition:
for _ in range(max_trip_count):
third = first + second
first = second
second = third
results.append(third)
if third > 100:
break
return (first, second, np.array(results).astype(np.float32))
self.assertReferenceChecks(
gc,
while_op,
[max_trip_count_arr, condition_arr, first_init, second_init],
ref,
)
self.assertFalse(workspace.HasBlob("cond_new"))
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) 2017-2018 Martin Olejar
#
# SPDX-License-Identifier: BSD-3-Clause
# The BSD-3-Clause license for this file can be found in the LICENSE file included with this distribution
# or at https://spdx.org/licenses/BSD-3-Clause.html#licenseText
import os
class HotPlugBase:
def __init__(self):
pass
def attach(self, callback):
pass
def start(self):
pass
def stop(self):
pass
if os.name == 'posix':
import pyudev
# import syslog
class HotPlug(HotPlugBase):
def __init__(self, callback=None):
super().__init__()
context = pyudev.Context()
# context.log_priority = syslog.LOG_DEBUG
self.monitor = pyudev.Monitor.from_netlink(context)
self.monitor.filter_by(subsystem='usb')
self.callback = None
self.observer = None
if callback is not None:
self.attach(callback)
def attach(self, callback):
self.callback = callback
def start(self):
assert self.callback is not None, ""
self.callback()
self.observer = pyudev.MonitorObserver(self.monitor, callback=self.callback, name='monitor-observer')
self.observer.start()
def stop(self):
assert self.callback is not None, ""
if self.observer is not None:
self.observer.stop()
self.observer = None
elif os.name == 'nt':
class HotPlug(HotPlugBase):
def __init__(self, callback=None):
super().__init__()
self.callback = None
if callback is not None:
self.attach(callback)
def attach(self, callback):
self.callback = callback
def start(self):
assert self.callback is not None, ""
self.callback()
else:
raise OSError('Not supported OS type')
|
import circuits # import local file
from qiskit import transpile
from qiskit.providers.aer import StatevectorSimulator
from qiskit.visualization import plot_histogram
import itertools
import matplotlib.pyplot as plt
if __name__ == "__main__":
#circuit = circuits.no_overlap()
#circuit = circuits.no_1_1_2_tiles()
#circuit = circuits.no_1_1_1_tile()
circuit = circuits.combined_constraints_v1()
#circuit = circuits.constraint_free()
#circuit = circuits.combined_constraints_v2()
# Draw the quantum circuit
circuit.draw("mpl", plot_barriers=False)
# Use the StatevectorSimulator
sim = StatevectorSimulator()
circuit = transpile(circuit, sim)
result = sim.run(circuit).result()
# Retrieve the probabilities (N.b. it won't return null values)
sequence_probabilities = result.get_counts(circuit)
# Generate all the binary sequences for n bits
binary_sequences = ["".join(seq) for seq in itertools.product("01", repeat=4)]
# Map (combine) the probabilities as we're only interested in the 4 bits sequences
final_results = {}
for s in binary_sequences:
final_results[s] = 0.
for k, v in sequence_probabilities.items():
reverse = k[::-1]
final_results[reverse[0:4]] += v
for k, v in final_results.items():
print(k, v)
# Plot the histogram
plot_histogram(final_results)
plt.tight_layout()
# Show the plots
plt.show() |
# rename A.execute into A.calculate (Rename ⇧F6)
# Beware of not including wrong dynamic references!
# Add type annotation to run(a, b), try rename again (Rename ⇧F6)
class A:
def execute(self):
print('execute A')
class B:
def execute(self):
print('execute B')
def run(a, b):
a.execute()
b.execute()
if __name__ == '__main__':
a = A()
b = B()
run(a, b)
|
#!/usr/bin/env python
"""
Faça um Programa que calcule a área de um quadrado, em seguida mostre o dobro desta área para o usuário.
"""
def float_or_int(number):
return int(number) if number.is_integer() else float(number)
def get_number(input_question):
numero = None
while not numero:
try:
numero = float(input(input_question))
return float_or_int(numero)
except TypeError:
raise TypeError("O valor informado deve ser um número (separador de casas decimais deve ser ponto)")
if __name__ == "__main__":
lado = get_number("Informe o lado do quadrado: ")
area = float(lado ** 2)
print("A área é de {} u.a.".format(float_or_int(round(area, 3))))
print("O dobro da área é {}".format(float_or_int(round(area * 2, 3))))
|
import time
import chess
import chess.pgn
import pickle
import sys
import numpy as np
import argparse
from .utils import Tic
def convert_games(source='',save_path='',start_name='chess',block_size=1000000,blocks=0,inter_map=None):
if source=='':
print('There is not source specified')
return
game_nb=np.zeros([block_size],dtype=np.int32)
turn_nb=np.zeros([block_size],dtype=np.int16)
state=np.zeros([block_size,64],dtype=np.int8)
result=np.zeros([block_size,3],dtype=np.int8)
elo=np.zeros([block_size,2],dtype=np.int16)
pgn = open(source)
game = chess.pgn.read_game(pgn)
i=0
cont=1
nb=0
tic=Tic()
while game:
try:
temp_elo=[game.headers['WhiteElo'],game.headers['BlackElo']]
result_str = game.headers['Result']
nb+=1
j=0
sys.stdout.write(f'\r {cont} block reading: {100*i/block_size:.2f}%')
sys.stdout.flush()
board = game.board()
moves=list(game.mainline_moves())
if '1-0' in result_str: #White wins
winner=[1,0,0]
elif '0-1' in result_str: #Black wins
winner=[0,1,0]
else:
winner=[0,0,1]
for v in moves:
board.push(v)
b=str(board).replace(' ','').replace('\n','')
d=np.array([inter_map[i] for i in list(b)],dtype=np.int8)
state[i]=d
result[i]=winner
game_nb[i]=nb
turn_nb[i]=j
elo[i]=temp_elo
i+=1
j+=1
if j>32767:
print('Hay partidas con más de 32767 movimientos, por lo que no es posible guardar el turno con 16 bits')
return
if i%block_size == 0:
i=0
with open(f'{save_path}{start_name}_game.{cont}.pkl', 'wb') as outfile:
pickle.dump(game_nb, outfile, pickle.HIGHEST_PROTOCOL)
with open(f'{save_path}{start_name}_turn.{cont}.pkl', 'wb') as outfile:
pickle.dump(turn_nb, outfile, pickle.HIGHEST_PROTOCOL)
with open(f'{save_path}{start_name}_state.{cont}.pkl', 'wb') as outfile:
pickle.dump(state, outfile, pickle.HIGHEST_PROTOCOL)
with open(f'{save_path}{start_name}_elo.{cont}.pkl', 'wb') as outfile:
pickle.dump(elo, outfile, pickle.HIGHEST_PROTOCOL)
with open(f'{save_path}{start_name}_result.{cont}.pkl', 'wb') as outfile:
pickle.dump(result, outfile, pickle.HIGHEST_PROTOCOL)
sys.stdout.write(f'\r {cont} block reading: 100.00%')
tic.toc()
if cont==blocks:
return
cont+=1
tic.tic()
except KeyError:
pass
game = chess.pgn.read_game(pgn)
game_nb=game_nb[:i]
turn_nb=turn_nb[:i]
state=state[:i,:]
elo=elo[:i,:]
result=result[:i,:]
with open(f'{save_path}{start_name}_game.{cont}.pkl', 'wb') as outfile:
pickle.dump(game_nb, outfile, pickle.HIGHEST_PROTOCOL)
with open(f'{save_path}{start_name}_turn.{cont}.pkl', 'wb') as outfile:
pickle.dump(turn_nb, outfile, pickle.HIGHEST_PROTOCOL)
with open(f'{save_path}{start_name}_state.{cont}.pkl', 'wb') as outfile:
pickle.dump(state, outfile, pickle.HIGHEST_PROTOCOL)
with open(f'{save_path}{start_name}_elo.{cont}.pkl', 'wb') as outfile:
pickle.dump(elo, outfile, pickle.HIGHEST_PROTOCOL)
with open(f'{save_path}{start_name}_result.{cont}.pkl', 'wb') as outfile:
pickle.dump(result, outfile, pickle.HIGHEST_PROTOCOL)
tic.toc()
|
class Node:
def __init__(self, data=None):
self.data = data
self.next = None
def __str__(self):
return str(self.data)
class Stack:
def __init__(self, size):
self.head = None
self.size = size
self.length = 0
def __len__(self):
return self.length
def __str__(self):
node = self.head
retval = ""
while node is not None:
retval += str(node) + ','
node = node.next
return retval[:-1]
def is_empty(self):
return True if self.length == 0 else False
def is_full(self):
return True if self.length == self.size else False
def push(self, data):
if self.is_full():
raise IndexError('Stack is full')
node = Node(data)
node.next = self.head
self.head = node
self.length += 1
def pop(self):
if self.is_empty():
raise IndexError('Stack is full')
self.head = self.head.next
self.length -= 1
def peek(self, index):
if index >= self.length:
raise IndexError(f'index: {index} greater than the stack length: {self.length}')
count = self.length - 1
node = self.head
while count >= index:
if count == index:
return node.data
count -= 1
node = node.next
def change(self, index, value):
if index >= self.length:
raise IndexError(f'index: {index} greater than the stack length: {self.length}')
count = self.length - 1
node = self.head
while count >= index:
if count == index:
node.data = value
return
count -= 1
node = node.next |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from design import views
urlpatterns = patterns('',
url(r'^search$', views.searchParts),
url(r'^get$', views.getParts),
url(r'^dashboard$', views.dashboardView),
url(r'^updateChain$', views.saveChain),
url(r'^newProject$', views.createProject),
url(r'^getChain$', views.getProjectChain),
url(r'^getUserProject$', views.getUserProjects),
url(r'^getProject$', views.getProject),
url(r'^arecommend$', views.getARecommend),
url(r'^seqRecommend$', views.getMRecommend),
url(r'^tracks$', views.getTracks),
url(r'^project', views.projectView),
url(r'^getChainList', views.getProjectChains),
url(r'^newDevice', views.createNewDevice),
url(r'^getResultImage', views.getResultImage),
url(r'^getChainLength', views.getChainLength),
url(r'^changeProjectname', views.changeProjectName),
url(r'^changeTrack', views.changeProjectTrack),
url(r'^deleteProject', views.deleteProject),
url(r'^getTrackFunctions', views.getTrackFunctions),
url(r'^simulation$', views.simulationView),
url(r'^simulate$', views.simulate)
)
|
"""
Helpers for tests.
"""
import itertools
import unittest
import numpy as np
import pandas as pd
import pandas.testing
from vlnm.normalizers.base import Normalizer
# There are issues wth pandas and pylint
# See https://github.com/PyCQA/pylint/issues/2198 for some discussion.
#
#
DataFrame = pd.DataFrame
Series = pd.Series
def assert_frame_equal(*args, **kwargs):
"""Wrapper around pandas testing helper"""
return pandas.testing.assert_frame_equal(*args, **kwargs)
def assert_series_equal(*args, **kwargs):
"""Wrapper around pandas testing helper"""
return pandas.testing.assert_series_equal(*args, **kwargs)
def concat_df(*args, **kwargs):
"""Wrapper around pandas data-frame conact"""
return pd.concat(*args, **kwargs)
def make_set_up(set_up=None):
"""Make the set up function for a repeating test."""
def _do_set_up(obj):
if set_up:
return set_up(obj)
return obj.setUp()
return _do_set_up
def repeat_test(iterations=1, seed=None, set_up=None):
"""
Decorator for repeating tests with random numbers.
"""
set_up = make_set_up(set_up=set_up)
def _decorator(method):
def _wrapper(self, *args, **kwargs):
np.random.seed(seed)
for _ in range(iterations):
set_up(self)
method(self, *args, **kwargs)
_wrapper.__name__ = method.__name__
return _wrapper
return _decorator
def generate_data_frame(
speakers=1,
genders=None,
factors=None,
na_percentage=1.):
"""
Generate a random(ish) data-frame for testing.
"""
df_factors = factors.copy()
df_factors.update(speaker=[speaker for speaker in range(speakers)])
base_df = pd.DataFrame(
list(itertools.product(*df_factors.values())),
columns=df_factors.keys())
index = base_df['speaker'] % len(genders)
base_df['gender'] = np.array(genders)[index]
formants = ['f0', 'f1', 'f2', 'f3']
for f, formant in enumerate(formants):
base_df[formant] = (index + 1) * 250 + f * 400
base_df[formant] += np.random.randint(50, size=len(base_df)) - 25
i = np.random.random(len(base_df)) > (1. - na_percentage / 100.)
base_df.loc[i, formant] = np.nan
return base_df
def get_test_dataframe(speakers=8):
"""Generate a test dataframe."""
df = generate_data_frame(
speakers=speakers,
genders=['M', 'F'],
factors=dict(
group=['HV', 'LV'],
test=['pre', 'post'],
vowel=['a', 'e', 'i', 'o', 'u']))
return df
# Fixed dataframe for each test run, if required.
DATA_FRAME = get_test_dataframe()
class Helper:
"""Wraper class for base test class."""
class TestNormalizerBase(unittest.TestCase):
"""Common tests for the speaker normalizers."""
class TestNormalier(Normalizer):
pass
normalizer = TestNormalier
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.normalizer = self.__class__.normalizer
def setUp(self):
self.df = DATA_FRAME.copy()
self.formants = ['f0', 'f1', 'f2', 'f3']
self.kwargs = dict(formants=self.formants)
def test_column_missing(self):
"""
Missing columns raises ValueError.
"""
columns = self.normalizer().config['columns']
for column in columns:
df = self.df.copy()
df = df.drop(column, axis=1)
with self.assertRaises(ValueError):
self.normalizer().normalize(df, **self.kwargs)
def test_default_columns(self):
"""Check default columns returned."""
expected = self.df.columns
actual = self.normalizer().normalize(
self.df, **self.kwargs).columns
expected = sorted(expected)
actual = sorted(actual)
self.assertListEqual(actual, expected)
def test_new_columns(self):
"""Check new columns returned."""
rename = '{}*'
expected = (list(self.df.columns) +
list(rename.format(f) for f in self.formants))
actual = self.normalizer(rename=rename, **self.kwargs).normalize(
self.df).columns
expected = sorted(expected)
actual = sorted(actual)
self.assertListEqual(actual, expected)
def test_call(self):
"""Test calling the normalizer class."""
expected = self.df.columns
actual = self.normalizer()(
self.df, **self.kwargs).columns
expected = sorted(expected)
actual = sorted(actual)
self.assertListEqual(actual, expected)
class TestFormantSpecificNormalizerBase(TestNormalizerBase):
"""Common tests for normalizer classes with a FxNormalizderBase."""
def test_fx_spec(self):
"""
Specify formants using individual keys.
"""
df = self.df.copy()
normalizer = self.normalizer(f0='f0', f1='f1', f2='f2', f3='f3', **self.kwargs)
normalizer.normalize(df)
self.assertListEqual(
normalizer.params['formants'],
['f0', 'f1', 'f2', 'f3'])
def test_fx_list_spec(self):
"""
Missing column raises value error.
"""
df = self.df.copy()
normalizer = self.normalizer(
f0=['f0'], f1=['f1'], f2=['f2'], f3=['f3'], **self.kwargs)
normalizer.normalize(df)
self.assertListEqual(
normalizer.params['formants'],
['f0', 'f1', 'f2', 'f3'])
class TestFormantNormalizerBase(TestNormalizerBase):
"""Common tests for the formant normalizers."""
@staticmethod
def transform(x):
return x
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.formant_transform = self.__class__.transform
def test_normalize(self):
"""Test normalize output."""
expected = self.df.copy()
expected[self.formants] = self.formant_transform(expected[self.formants])
actual = self.normalizer().normalize(self.df)
assert_frame_equal(actual, expected)
class SpeakerNormalizerTests(TestNormalizerBase):
"""Common tests for the speaker normalizers."""
def setUp(self):
self.df = DATA_FRAME.copy()
self.formants = ['f0', 'f1', 'f2', 'f3']
self.kwargs = dict(f0='f0', f1='f1', f2='f2', f3='f3')
def test_incorrect_alias(self):
"""
Missing aliased column raises ValueError.
"""
df = self.df.copy()
with self.assertRaises(ValueError):
self.normalizer(speaker='talker', **self.kwargs).normalize(df)
|
'''test for having the file checked .
if we request the channels from the file and there are no channels
and when you call the channels from file .. you specify which file to call from .(which list)
if the stream is already there then you dont want it to add the stream
and notify the user that it already exists.
''' |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RouteCompilationError(Model):
"""Compilation error when evaluating route.
:param message: Route error message
:type message: str
:param severity: Severity of the route error. Possible values include:
'error', 'warning'
:type severity: str or ~azure.mgmt.iothub.models.RouteErrorSeverity
:param location: Location where the route error happened
:type location: ~azure.mgmt.iothub.models.RouteErrorRange
"""
_attribute_map = {
'message': {'key': 'message', 'type': 'str'},
'severity': {'key': 'severity', 'type': 'str'},
'location': {'key': 'location', 'type': 'RouteErrorRange'},
}
def __init__(self, **kwargs):
super(RouteCompilationError, self).__init__(**kwargs)
self.message = kwargs.get('message', None)
self.severity = kwargs.get('severity', None)
self.location = kwargs.get('location', None)
|
"""Simple 1D & 2D plotting utilities package for "Synchrotron Radiation Workshop" (SRW).
``uti_plot`` currently wraps ``matplotlib``, but other backends are
planned. If no suitable backend is available, ``uti_plot_init`` sets
the backend to ``uti_plot_none`` so that the calling program is still
functional. This is useful for systems where there is no graphing
library available, but you still want to see the results of the
SRW program.
Usage:
import uti_plot as up
up.uti_plot_init()
uti_plot1d(...)
uti_plot_show()
Modules:
uti_plot
This module, which loads all other modules dynamically
uti_plot_matplotlib
Does the actually plotting using matplotlib.pyplot. Currently, loaded in all cases except
when ``backend`` is ``None``
test_uti_plot
Simple tests for uti_plot
.. moduleauthor:: Rob Nagler <nagler@radiasoft.net>
"""
import sys
import uti_plot_com
import traceback
_backend = None
DEFAULT_BACKEND = '<default>'
def uti_plot_init(backend=DEFAULT_BACKEND, fname_format=None):
"""Initializes plotting engine with backend and, optionally, save plots to fname_format
Tries to initialize `backend` as the plotting engine. If not found, an
error will be printed, and this module's functions will be no-ops. If
DEFAULT_BACKEND provided, an appropriate backend will be chosen and printed.
Plots may also be saved if fname_format is supplied.
You may call ``uti_plot_init(None)`` explicitly so that no plotting occurs.
:param str backend: a matplot backend (TkAgg, etc.) or ``inline`` in IPython
:param str fname_format: where to save plots. format field is a sequential plot number, starting at 0.
"""
global _backend
if backend is not None:
try:
import uti_plot_matplotlib
_backend = uti_plot_matplotlib.Backend(backend, fname_format)
return
except:
traceback.print_exc()
print(backend + ': unable to import specified backend (or its dependency); no plots')
elif fname_format is not None:
#raise Value(fname_format + ': fname_format must be null if backend is None')
raise ValueError(fname_format + ': fname_format must be null if backend is None')
_backend = _BackendNone()
def uti_plot_show():
"""Display the plots"""
#if '_backend' not in locals(): uti_plot_init() #?
_backend.uti_plot_show()
def uti_plot1d(ar1d, x_range, labels=('Photon Energy [eV]', 'ph/s/0.1%bw'), units=None):
"""Generate one-dimensional line plot from given array
:param array ar1d: data points
:param list x_range: Passed to numpy.linspace(start sequence, stop sequnce, num samples)
:param tuple labels: [x-axis, y-axis]
"""
#if '_backend' not in locals(): uti_plot_init() #?
if(units is not None):
x_range, x_unit = uti_plot_com.rescale_dim(x_range, units[0])
units = [x_unit, units[1]]
strTitle = '' if(len(labels) < 3) else labels[2]
labels = (labels[0] + ' [' + units[0] + ']', labels[1] + ' [' + units[1] + ']', strTitle)
_backend.uti_plot1d(ar1d, x_range, labels)
def uti_plot1d_ir(ary, arx, labels=('Longitudinal Position [m]', 'Horizontal Position [m]'), units=None): #OC15112017
"""Generate one-dimensional line plot from given array
:param array arx: abscissa array
:param array ary: ordinate array
:param tuple labels: [x-axis, y-axis]
"""
#if '_backend' not in locals(): uti_plot_init() #?
if(units is not None):
#x_range = [min(arx), max(arx), len(arx)]
#x_range, x_unit = uti_plot_com.rescale_dim(x_range, units[0])
#units = [x_unit, units[1]]
strTitle = '' if(len(labels) < 3) else labels[2]
labels = (labels[0] + ' [' + units[0] + ']', labels[1] + ' [' + units[1] + ']', strTitle)
_backend.uti_plot1d_ir(ary, arx, labels)
def uti_plot2d(ar2d, x_range, y_range, labels=('Horizontal Position [m]','Vertical Position [m]'), units=None):
"""Generate quad mesh plot from given "flattened" array
:param array ar2d: data points
:param list x_range: Passed to numpy.linspace(start sequence, stop sequnce, num samples)
:param list y_range: y axis (same structure as x_range)
:param tuple labels: [x-axis, y-axis]
"""
#if '_backend' not in locals(): uti_plot_init() #?
if(units is not None):
x_range, x_unit = uti_plot_com.rescale_dim(x_range, units[0])
y_range, y_unit = uti_plot_com.rescale_dim(y_range, units[1])
units = [x_unit, y_unit, units[2]]
strTitle = '' if(len(labels) < 3) else labels[2]
labels = (labels[0] + ' [' + units[0]+ ']', labels[1] + ' [' + units[1] + ']', strTitle)
_backend.uti_plot2d(ar2d, x_range, y_range, labels)
def uti_plot2d1d(ar2d, x_range, y_range, x=0, y=0, labels=('Horizontal Position', 'Vertical Position', 'Intensity'), units=None, graphs_joined=True):
"""Generate 2d quad mesh plot from given "flattened" array, and 1d cuts passing through (x, y)
:param array ar2d: data points
:param list x_range: Passed to numpy.linspace(start sequence, stop sequnce, num samples)
:param list y_range: y axis (same structure as x_range)
:param x: x value for 1d cut
:param y: y value for 1d cut
:param tuple labels: [x-axis, y-axis, z-axis]
:param tuple units: [x-axis, y-axis, z-axis]
:param graphs_joined: switch specifying whether the 2d plot and 1d cuts have to be displayed in one panel or separately
"""
#if '_backend' not in locals(): uti_plot_init() #?
if(units is not None): #checking / re-scaling x, y
x_range, x_unit = uti_plot_com.rescale_dim(x_range, units[0])
y_range, y_unit = uti_plot_com.rescale_dim(y_range, units[1])
units = [x_unit, y_unit, units[2]]
strTitle = labels[2]
label2D = (labels[0] + ' [' + units[0]+ ']', labels[1] + ' [' + units[1] + ']', strTitle)
strTitle = 'At ' + labels[1] + ': ' + str(y)
if y != 0: strTitle += ' ' + units[1]
label1X = (labels[0] + ' [' + units[0] + ']', labels[2] + ' [' + units[2] + ']', strTitle)
strTitle = 'At ' + labels[0] + ': ' + str(x)
if x != 0: strTitle += ' ' + units[0]
label1Y = (labels[1] + ' [' + units[1] + ']', labels[2] + ' [' + units[2] + ']', strTitle)
else: #OC081115
strTitle = labels[2]
label2D = (labels[0], labels[1], strTitle)
strTitle = 'At ' + labels[1] + ': ' + str(y)
label1X = (labels[0], labels[2], strTitle)
strTitle = 'At ' + labels[0] + ': ' + str(x)
label1Y = (labels[1], labels[2], strTitle)
labels = [label2D, label1X, label1Y]
_backend.uti_plot2d1d(ar2d, x_range, y_range, x, y, labels, graphs_joined)
def uti_plot_data_file(_fname, _read_labels=1, _e=0, _x=0, _y=0, _graphs_joined=True, #Same as uti_data_file_plot, but better fits function name decoration rules in this module (uti_plot*)
_multicolumn_data=False, _column_x=None, _column_y=None, #MR31102017
_scale='linear', _width_pixels=None):
"""Generate plot from configuration in _fname
:param str _fname: config loaded from here
:param bool _read_labels: whether to read labels from _fname
:param float _e: photon energy adjustment
:param float _x: horizonal position adjustment
:param float _y: vertical position adjustment
:param bool _graphs_joined: if true, all plots in a single figure
:param bool _multicolumn_data: if true, visualize multicolumn data data
:param str _column_x: column for horizontal axis
:param str _column_x: column for vertical axis
:param str _scale: the scale to use for plotting data (linear by default, but could use log, log2, log10)
:param int _width_pixels: the width of the final plot in pixels
"""
#if '_backend' not in locals(): uti_plot_init() #?
_backend.uti_plot_data_file(_fname, _read_labels, _e, _x, _y, _graphs_joined,
_multicolumn_data, _column_x, _column_y, #MR31102017
_scale, _width_pixels)
#def uti_data_file_plot(_fname, _read_labels=1, _e=0, _x=0, _y=0, _graphs_joined=True):
#def uti_data_file_plot(_fname, _read_labels=1, _e=0, _x=0, _y=0, _graphs_joined=True, _traj_report=False, _traj_axis='x'): #MR29072016
#def uti_data_file_plot(_fname, _read_labels=1, _e=0, _x=0, _y=0, _graphs_joined=True, _traj_report=False, _traj_axis='x', _scale='linear', _width_pixels=None): #MR20012017
def uti_data_file_plot(_fname, _read_labels=1, _e=0, _x=0, _y=0, _graphs_joined=True,
_multicolumn_data=False, _column_x=None, _column_y=None, #MR31102017
_scale='linear', _width_pixels=None):
"""Generate plot from configuration in _fname
:param str _fname: config loaded from here
:param bool _read_labels: whether to read labels from _fname
:param float _e: photon energy adjustment
:param float _x: horizonal position adjustment
:param float _y: vertical position adjustment
:param bool _graphs_joined: if true, all plots in a single figure
:param bool _multicolumn_data: if true, visualize multicolumn data data
:param str _column_x: column for horizontal axis
:param str _column_x: column for vertical axis
:param str _scale: the scale to use for plotting data (linear by default, but could use log, log2, log10)
:param int _width_pixels: the width of the final plot in pixels
"""
#if '_backend' not in locals(): uti_plot_init() #?
#_backend.uti_data_file_plot(_fname, _read_labels, _e, _x, _y, _graphs_joined)
#_backend.uti_data_file_plot(_fname, _read_labels, _e, _x, _y, _graphs_joined, _traj_report, _traj_axis) #MR29072016
#_backend.uti_data_file_plot(_fname, _read_labels, _e, _x, _y, _graphs_joined, _traj_report, _traj_axis, _scale, _width_pixels) #MR20012017
#_backend.uti_data_file_plot(_fname, _read_labels, _e, _x, _y, _graphs_joined,
# _multicolumn_data, _column_x, _column_y, #MR31102017
# _scale, _width_pixels)
uti_plot_data_file(_fname, _read_labels, _e, _x, _y, _graphs_joined, _multicolumn_data, _column_x, _column_y, _scale, _width_pixels) #OC16112017
class _BackendBase(object):
def __getattr__(self, attr):
return self._backend_call
class _BackendMissing(_BackendBase):
def _backend_call(self, *args, **kwargs):
uti_plot_init()
method_name = sys._getframe(1).f_code.co_name
func = getattr(_backend, method_name)
return func(*args)
class _BackendNone(_BackendBase):
def _backend_call(*args, **kwargs):
pass
_backend = _BackendMissing()
|
# Generated by Django 3.0.7 on 2020-09-06 03:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('exams', '0015_remove_abilitytest_code'),
]
operations = [
migrations.AlterModelTable(
name='abilitytest',
table='AbilityTest',
),
migrations.AlterModelTable(
name='exam',
table='Exam',
),
migrations.AlterModelTable(
name='questionresponse',
table='QuestionResponse',
),
migrations.AlterModelTable(
name='studentexam',
table='StudentExam',
),
migrations.AlterModelTable(
name='studentexamresult',
table='StudentExamResult',
),
migrations.AlterModelTable(
name='userabilitytest',
table='UserAbilityTest',
),
migrations.AlterModelTable(
name='userresponse',
table='UserResponse',
),
]
|
import argparse
import GeneTree
def subparser(subparsers):
desc = 'Create the matrice genes against strains file'
subparser = subparsers.add_parser('matrice', description=desc,
formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False
)
required = subparser.add_argument_group('required arguments')
optional = subparser.add_argument_group('optional arguments')
required.add_argument('-tsv', '--tsv_files', nargs='+', metavar='IN_FILE1', type=str,
required=True,
help='Tsv files from Microscope (without the pan files).'
)
optional.add_argument('-out_dir', '--output_directory', metavar='OUT_DIR',
default= '<out_dir>',
help='name of the output directory.'
)
|
from sub_units.bayes_model_implementations.moving_window_model import \
MovingWindowModel # want to make an instance of this class for each state / set of params
from sub_units.utils import run_everything as run_everything_imported # for plotting the report across all states
from sub_units.utils import Region
import sub_units.load_data_country as load_data # only want to load this once, so import as singleton pattern
import datetime
#####
# Set up model
#####
n_bootstraps = 100
n_likelihood_samples = 100000
moving_window_size = 21 # three weeks
opt_force_calc = False
opt_force_plot = False
opt_simplified = False # set to True to just do statsmodels as a simplified daily service
override_run_states = None
override_max_date_str = None
# ['total', 'Virginia', 'Arkansas', 'Connecticut', 'Alaska', 'South Dakota', 'Hawaii', 'Vermont', 'Wyoming'] # None
###
# Execute
###
def run_everything():
if override_run_states is not None:
countries_plot_subfolder = _run_everything_sub(region=Region.countries, override_run_states=override_run_states)
return {Region.countries: countries_plot_subfolder}
else:
countries_plot_subfolder = _run_everything_sub(region=Region.countries)
us_states_plot_subfolder = _run_everything_sub(region=Region.US_states)
#us_counties_plot_subfolder = _run_everything_sub(region=Region.US_counties)
#provinces_plot_subfolder = _run_everything_sub(region=Region.provinces)
return {
Region.US_states: us_states_plot_subfolder,
Region.countries: countries_plot_subfolder,
#Region.US_counties: us_counties_plot_subfolder,
#Region.provinces: provinces_plot_subfolder
}
def _run_everything_sub(region=Region.US_states, override_run_states=None):
if type(load_data.current_cases_ranked_us_counties) == tuple:
load_data.current_cases_ranked_us_counties = load_data.current_cases_ranked_us_counties[0]
if type(load_data.current_cases_ranked_non_us_provinces) == tuple:
load_data.current_cases_ranked_non_us_provinces = load_data.current_cases_ranked_non_us_provinces[0]
load_data.current_cases_ranked_non_us_provinces = [x for x in load_data.current_cases_ranked_non_us_provinces \
if not x.startswith('US:')]
# Remove provinces without enough data
new_provinces = list()
for province in load_data.current_cases_ranked_non_us_provinces:
tmp_dict = load_data.get_state_data(province)
if tmp_dict['series_data'].shape[1] < 3 or tmp_dict['series_data'].shape[0] < 30 or province.startswith('China:'):
print(f'Removing province {province}')
if tmp_dict['series_data'].shape[1] >= 3:
print(f' with tmp_dict["series_data"].shape = {tmp_dict["series_data"].shape}')
else:
print(f'Keeping province {province}')
print(f' with tmp_dict["series_data"].shape = {tmp_dict["series_data"].shape}')
new_provinces.append(province)
load_data.current_cases_ranked_non_us_provinces = new_provinces
print('load_data.current_cases_ranked_non_us_provinces')
[print(x) for x in sorted(load_data.current_cases_ranked_non_us_provinces)]
if override_run_states is None:
if region == Region.US_states:
override_run_states = load_data.current_cases_ranked_us_states
elif region == Region.US_counties:
override_run_states = load_data.current_cases_ranked_us_counties[:50]
elif region == Region.countries:
override_run_states = load_data.current_cases_ranked_non_us_states[:50]
elif region == Region.provinces:
override_run_states = load_data.current_cases_ranked_non_us_provinces[:50]
override_run_states = [x for x in override_run_states if not x.startswith(' ')]
print('Gonna run these states:')
[print(x) for x in sorted(override_run_states)]
model_type_name = f'moving_window_{moving_window_size}_days_{region}_region'
if override_max_date_str is None:
hyperparameter_max_date_str = datetime.datetime.today().strftime('%Y-%m-%d')
else:
hyperparameter_max_date_str = override_max_date_str
state_models_filename = f'state_models_smoothed_moving_window_{region}_{n_bootstraps}_bootstraps_{n_likelihood_samples}_likelihood_samples_{hyperparameter_max_date_str.replace("-", "_")}_max_date.joblib'
state_report_filename = f'state_report_smoothed_moving_window_{region}_{n_bootstraps}_bootstraps_{n_likelihood_samples}_likelihood_samples_{hyperparameter_max_date_str.replace("-", "_")}_max_date.joblib'
# fixing parameters I don't want to train for saves a lot of computer power
extra_params = dict()
static_params = {'day0_positive_multiplier': 1,
'day0_deceased_multiplier': 1}
logarithmic_params = ['positive_intercept',
'deceased_intercept',
'sigma_positive',
'sigma_deceased',
# 'day0_positive_multiplier',
'day1_positive_multiplier',
'day2_positive_multiplier',
'day3_positive_multiplier',
'day4_positive_multiplier',
'day5_positive_multiplier',
'day6_positive_multiplier',
# 'day0_deceased_multiplier',
'day1_deceased_multiplier',
'day2_deceased_multiplier',
'day3_deceased_multiplier',
'day4_deceased_multiplier',
'day5_deceased_multiplier',
'day6_deceased_multiplier',
]
plot_param_names = ['positive_slope',
'positive_intercept',
'deceased_slope',
'deceased_intercept',
'sigma_positive',
'sigma_deceased'
]
if opt_simplified:
plot_param_names = ['positive_slope',
'deceased_slope',
'positive_intercept',
'deceased_intercept', ]
sorted_init_condit_names = list()
sorted_param_names = ['positive_slope',
'positive_intercept',
'deceased_slope',
'deceased_intercept',
'sigma_positive',
'sigma_deceased',
# 'day0_positive_multiplier',
'day1_positive_multiplier',
'day2_positive_multiplier',
'day3_positive_multiplier',
'day4_positive_multiplier',
'day5_positive_multiplier',
'day6_positive_multiplier',
# 'day0_deceased_multiplier',
'day1_deceased_multiplier',
'day2_deceased_multiplier',
'day3_deceased_multiplier',
'day4_deceased_multiplier',
'day5_deceased_multiplier',
'day6_deceased_multiplier']
curve_fit_bounds = {'positive_slope': (-10, 10),
'positive_intercept': (0, 1000000),
'deceased_slope': (-10, 10),
'deceased_intercept': (0, 1000000),
'sigma_positive': (0, 100),
'sigma_deceased': (0, 100),
# 'day0_positive_multiplier': (0, 10),
'day1_positive_multiplier': (0, 10),
'day2_positive_multiplier': (0, 10),
'day3_positive_multiplier': (0, 10),
'day4_positive_multiplier': (0, 10),
'day5_positive_multiplier': (0, 10),
'day6_positive_multiplier': (0, 10),
# 'day0_deceased_multiplier': (0, 10),
'day1_deceased_multiplier': (0, 10),
'day2_deceased_multiplier': (0, 10),
'day3_deceased_multiplier': (0, 10),
'day4_deceased_multiplier': (0, 10),
'day5_deceased_multiplier': (0, 10),
'day6_deceased_multiplier': (0, 10)
}
test_params = {'positive_slope': 0,
'positive_intercept': 2500,
'deceased_slope': 0,
'deceased_intercept': 250,
'sigma_positive': 0.05,
'sigma_deceased': 0.1,
# 'day0_positive_multiplier': 1,
'day1_positive_multiplier': 1,
'day2_positive_multiplier': 1,
'day3_positive_multiplier': 1,
'day4_positive_multiplier': 1,
'day5_positive_multiplier': 1,
'day6_positive_multiplier': 1,
# 'day0_deceased_multiplier': 1,
'day1_deceased_multiplier': 1,
'day2_deceased_multiplier': 1,
'day3_deceased_multiplier': 1,
'day4_deceased_multiplier': 1,
'day5_deceased_multiplier': 1,
'day6_deceased_multiplier': 1
}
# uniform priors with bounds:
priors = curve_fit_bounds
plot_subfolder = run_everything_imported(override_run_states,
MovingWindowModel,
load_data,
model_type_name=model_type_name,
state_models_filename=state_models_filename,
state_report_filename=state_report_filename,
moving_window_size=moving_window_size,
n_bootstraps=n_bootstraps,
n_likelihood_samples=n_likelihood_samples,
load_data_obj=load_data,
sorted_param_names=sorted_param_names,
sorted_init_condit_names=sorted_init_condit_names,
curve_fit_bounds=curve_fit_bounds,
priors=priors,
test_params=test_params,
static_params=static_params,
opt_force_calc=opt_force_calc,
opt_force_plot=opt_force_plot,
logarithmic_params=logarithmic_params,
extra_params=extra_params,
plot_param_names=plot_param_names,
opt_statsmodels=True,
opt_simplified=opt_simplified,
override_max_date_str=override_max_date_str,
)
return plot_subfolder
if __name__ == '__main__':
run_everything()
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
# Create your models here.
class GameCollector(models.Model):
username = models.CharField(max_length=70)
Password1 = models.CharField(max_length=70)
Password2 = models.CharField(max_length=70)
dateAccountCreated = models.DateField(default=timezone.now)
userTableForeignKey = models.ForeignKey(User, on_delete=models.PROTECT,
null=True, blank=True)
def __str__(self):
return self.username
class Game(models.Model):
name = models.CharField(max_length=70)
developer = models.CharField(max_length=70)
dateMade = models.DateField()
ageLimit = models.PositiveIntegerField()
gameCreator = models.ForeignKey(GameCollector, on_delete=models.PROTECT, null=True, blank=True)
def __str__(self):
return self.name
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.burst_home, name='burst'),
path('sets/', views.burst_sets_view, name='burst_sets'),
path('sets/add/', views.burst_set_add, name='add_burst_set'),
path('sets/<int:burst_set_id>', views.burst_set_interfaces_view, name='burst_set'),
path('sets/<int:burst_set_id>/update/', views.burst_set_update_view, name='burst_set_update'),
path('sets/<int:burst_set_id>/calculate/', views.burst_set_calculate_view, name='burst_set_calculate'),
path('calculate/', views.calculate, name='calculate'),
]
|
"""
urlresolver XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
reusable captcha methods
"""
from urlresolver import common
import re
import xbmcgui
import os
import recaptcha_v2
import helpers
import urlresolver
net = common.Net()
IMG_FILE = 'captcha_img.gif'
def get_response(img):
try:
img = xbmcgui.ControlImage(450, 0, 400, 130, img)
wdlg = xbmcgui.WindowDialog()
wdlg.addControl(img)
wdlg.show()
common.kodi.sleep(3000)
solution = common.kodi.get_keyboard(common.i18n('letters_image'))
if not solution:
raise Exception('captcha_error')
finally:
wdlg.close()
def do_captcha(html):
solvemedia = re.search('<iframe[^>]+src="((?:https?:)?//api.solvemedia.com[^"]+)', html)
recaptcha = re.search('<script\s+type="text/javascript"\s+src="(http://www.google.com[^"]+)', html)
recaptcha_v2 = re.search('data-sitekey="([^"]+)', html)
xfilecaptcha = re.search('<img\s+src="([^"]+/captchas/[^"]+)', html)
if urlresolver.ALLOW_POPUPS:
if solvemedia:
return do_solvemedia_captcha(solvemedia.group(1))
elif recaptcha:
return do_recaptcha(recaptcha.group(1))
elif recaptcha_v2:
return do_recaptcha_v2(recaptcha_v2.group(1))
elif xfilecaptcha:
return do_xfilecaptcha(xfilecaptcha.group(1))
else:
captcha = re.compile("left:(\d+)px;padding-top:\d+px;'>&#(.+?);<").findall(html)
result = sorted(captcha, key=lambda ltr: int(ltr[0]))
solution = ''.join(str(int(num[1]) - 48) for num in result)
if solution:
return {'code': solution}
else:
return {}
else:
return {}
def do_solvemedia_captcha(captcha_url):
common.logger.log_debug('SolveMedia Captcha: %s' % (captcha_url))
if captcha_url.startswith('//'): captcha_url = 'http:' + captcha_url
html = net.http_GET(captcha_url).content
data = {
'adcopy_challenge': '' # set to blank just in case not found; avoids exception on return
}
data.update(helpers.get_hidden(html), include_submit=False)
captcha_img = os.path.join(common.profile_path, IMG_FILE)
try: os.remove(captcha_img)
except: pass
# Check for alternate puzzle type - stored in a div
alt_frame = re.search('<div><iframe src="(/papi/media[^"]+)', html)
if alt_frame:
html = net.http_GET("http://api.solvemedia.com%s" % alt_frame.group(1)).content
alt_puzzle = re.search('<div\s+id="typein">\s*<img\s+src="data:image/png;base64,([^"]+)', html, re.DOTALL)
if alt_puzzle:
open(captcha_img, 'wb').write(alt_puzzle.group(1).decode('base64'))
else:
open(captcha_img, 'wb').write(net.http_GET("http://api.solvemedia.com%s" % re.search('<img src="(/papi/media[^"]+)"', html).group(1)).content)
solution = get_response(captcha_img)
data['adcopy_response'] = solution
html = net.http_POST('http://api.solvemedia.com/papi/verify.noscript', data)
return {'adcopy_challenge': data['adcopy_challenge'], 'adcopy_response': 'manual_challenge'}
def do_recaptcha(captcha_url):
common.logger.log_debug('Google ReCaptcha: %s' % (captcha_url))
if captcha_url.startswith('//'): captcha_url = 'http:' + captcha_url
personal_nid = common.get_setting('personal_nid')
if personal_nid:
headers = {'Cookie': 'NID=' + personal_nid}
else:
headers = {}
html = net.http_GET(captcha_url, headers=headers).content
part = re.search("challenge \: \\'(.+?)\\'", html)
captcha_img = 'http://www.google.com/recaptcha/api/image?c=' + part.group(1)
solution = get_response(captcha_img)
return {'recaptcha_challenge_field': part.group(1), 'recaptcha_response_field': solution}
def do_recaptcha_v2(sitekey):
token = recaptcha_v2.UnCaptchaReCaptcha().processCaptcha(sitekey, lang='en')
if token:
return {'g-recaptcha-response': token}
return {}
def do_xfilecaptcha(captcha_url):
common.logger.log_debug('XFileLoad ReCaptcha: %s' % (captcha_url))
if captcha_url.startswith('//'): captcha_url = 'http:' + captcha_url
solution = get_response(captcha_url)
return {'code': solution}
|
from django.conf.urls import url
from django.urls import include
from rest_framework.routers import DefaultRouter
from api.views.candidate import CandidateViewSet
from api.views.election import ElectionViewSet
from api.views.login import LoginView
from api.views.subelection import SubElectionViewSet
router = DefaultRouter()
router.register(r'elections', ElectionViewSet)
router.register(r'subelections', SubElectionViewSet)
router.register(r'candidates', CandidateViewSet)
urlpatterns = [
# /api
url('^', include(router.urls)),
url('^login/$', LoginView.as_view()),
]
|
"""
For in em python
Iterando strings com for
Função range recebe três argumentos (star = a,stop, step=1)
"""
# texto = 'Python'
# for letra in texto:
# print( letra )
'''texto = 'Python'
for n,letra in enumerate(texto) :
print(n,letra) '''
# print("Escolha um número e veja a sua tabuada ")
# mult = int(input("Digite um numero :"))
# for n in range(1, 11):
# print( f"{mult} X {n} = {mult * n}")
# continue - pua o próximo laço
# break - termina o laço
texto = 'python'
novo_string =''
for letra in texto:
if letra=='t' :
novo_string =novo_string +letra.upper()
elif letra =='p':
novo_string +=letra.upper()
else:
novo_string += letra
print(novo_string) |
from pygame import *
from random import randint
from time import time as timer
#музло
mixer.init()
mixer.music.load('muzlo.mp3')
mixer.music.play()
#картинки
racket = 'racket.png'
ten_ball = 'tenis_ball.png'
#класс-родитель для других спрайтов
class GameSprite(sprite.Sprite):
#конструктор класса
def __init__(self, player_image, player_x, player_y, player_speed,size_x, size_y):
#вызываем конструктор класса (Sprite):
sprite.Sprite.__init__(self)
#каждый спрайт должен хранить свойство image - изображение
self.image = transform.scale(image.load(player_image), (size_x, size_y))
self.speed = player_speed
#каждый спрайт должен хранить свойство rect - прямоугольник, в который он вписан
self.rect = self.image.get_rect()
self.rect.x = player_x
self.rect.y = player_y
#метод, отрисовывающий героя на окне
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
#класс главного игрока
class Player(GameSprite):
#метод для управления спрайтом стрелками клавиатуры
def update_left(self):
keys = key.get_pressed()
if keys[K_UP] and self.rect.y > 5:
self.rect.y -= self.speed
if keys[K_DOWN] and self.rect.y < win_height - 150:
self.rect.y += self.speed
def update_right(self):
keys = key.get_pressed()
if keys[K_w] and self.rect.y > 5:
self.rect.y -= self.speed
if keys[K_s] and self.rect.y < win_height - 150:
self.rect.y += self.speed
back = (0,255,255)
win_width = 600
win_height = 500
window = display.set_mode((win_width,win_height))
window.fill(back)
#фонты и переменные
sc1 = 0
sc2 = 0
font.init()
font1 = font.SysFont('Calibri', 50)
win1 = font1.render('Player 1 win!',True,(255,0,0))
win2 = font1.render('Player 2 win!',True,(255,0,0))
#игроки
player1 = Player(racket,30,200,4,50,150)
player2 = Player(racket,520,200,4,50,150)
ball = GameSprite(ten_ball,200,200,2,50,50)
run = True
finish = False
speed_x = 4
speed_y = 4
FPS = 60
clock = time.Clock()
while run:
last_time = timer()
#событие нажатия на кнопку “Закрыть”
for e in event.get():
if e.type == QUIT:
run = False
last_time = timer()
if not finish:
score_p1 = font1.render(str(sc1),True,(255,255,0))
score_p2 = font1.render(str(sc2),True,(255,255,0))
player1_text = font1.render('1',True,(0,0,255))
player2_text = font1.render('2',True,(255,0,0))
window.fill(back)
player1.update_right()
player2.update_left()
ball.rect.x += speed_x
ball.rect.y += speed_y
window.blit(score_p1,(200,0))
window.blit(score_p2,(410,0))
window.blit(player1_text,(100, 50))
window.blit(player2_text,(450, 50))
if sprite.collide_rect(ball,player1):
speed_x *= -1
sc1 += 1
if sprite.collide_rect(ball,player2):
speed_x *= -1
sc2 += 1
if ball.rect.y > win_height-50 or ball.rect.y < 0:
speed_y *= -1
if ball.rect.x < 0:
finish = True
window.blit(win2,(200,200))
if ball.rect.x > win_width:
finish = True
window.blit(win1,(200,200))
player1.reset()
player2.reset()
ball.reset()
now_time = timer()
if now_time - last_time >= 181:
mixer.music.play()
display.update()
clock.tick(FPS) |
# 剑指 Offer 04:二维数组中的查找
# leetcode submit region begin(Prohibit modification and deletion)
from typing import List
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
i = len(matrix) - 1
j = 0
while i >= 0 and j < len(matrix[0]):
if matrix[i][j] == target:
return True
elif matrix[i][j] > target:
i -= 1
else:
j += 1
return False
if __name__ == "__main__":
Solution.findNumberIn2DArray(Solution,
[[1, 4, 7, 11, 15], [2, 5, 8, 12, 19], [3, 6, 9, 16, 22], [10, 13, 14, 17, 24],
[18, 21, 23, 26, 30]], 5)
# leetcode submit region end(Prohibit modification and deletion)
|
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from bs4 import BeautifulSoup
import urllib.request
import sys
import time
import json
template:
OOS Alert
# Stores
# Skus
Store1
Skus w/ links
Store2
skus w/ links
architecture:
Loop through stores, with urls.
Parse Brand page (and get OOS from that if possible)
Parse each product page
When OOS, write to dict:
store
sku
link
write report-
count stores in dict
count skus in dict
write out skus for each store
send
class StoreOOS(object):
""" Store out of stock class """
def __init__(self, sitename='SephoraUSA', siteurl='https://sephora.com'):
self.sitename=sitename
self.siteurl=siteurl
def getBrandPage(self):
case
def BirchBoxBrandPage(url,oos_list):
url=f"https://www.birchbox.com/brand/4614" #BIRCHBOX
driver=webdriver.Firefox()
#driver.manage().timeouts().implicitlyWait(10, TimeUnit.SECONDS)
delay=10
driver.get(url)
try:
wait=WebDriverWait(driver,delay)
# DERMSTORE wait.until(EC.presence_of_element_located((By.ID,"tab_products")))
wait.until(EC.presence_of_element_located((By.CLASS_NAME,"vertical__content___2lOQc"))) #BIRCHBOX
print("page is ready")
except TimeoutException:
print("Loading took too much time")
driver.execute_script("window.scrollTo(0, 400);")
#print("scrolling")
#except
while scrollY<2400:
time.sleep(np.random.rand()+1)
driver.execute_script("window.scrollTo(0, {0});".format(scrollY))
scrollY+=400
#test=driver.find_elements_by_partial_link_text('Briogeo')
html = driver.execute_script("return document.body.outerHTML;")
soup=BeautifulSoup(html,'lxml')
base_url='https://www.birchbox.com/'
prod_thumb=soup.find_all('a',attrs={'class':'productThumb__title___1D-Rj'})
for this_one in prod_thumb:
#this_one=prod_thumb[0]
print(base_url+this_one.attrs['href'])
print(this_one.text)
def RileyRoseBrandPage(url,oos_list):
rr_url=f"https://www.rileyrose.com/us/shop/catalog/category/rr/promo-branded-briogeo"
rr_html_page=requests.get(rr_url)
rr_soup=BeautifulSoup(rr_html_page.content,'lxml')
rr_scripts=rr_soup.find_all('script',attrs={'type':'text/javascript'})
start_pos=rr_scripts[-1].text.find('var cData =')
end_pos=rr_scripts[-1].text.find('"};',start_pos)
jsondata = json.loads(rr_scripts[-1].text[start_pos+12:end_pos+2])
for p in jsondata['CatalogProducts']:
print(p['DisplayName'], p['IsOOS'])
if p['IsOOS']==True:
oos_list.append(('RileyRose',p['DisplayName'],p['ProductShareLinkUrl']))
return oos_list
|
"""
ulmo.cuahsi.wof
~~~~~~~~~~~~~~~
`CUAHSI WaterOneFlow`_ web services
.. _CUAHSI WaterOneFlow: http://his.cuahsi.org/wofws.html
"""
from __future__ import absolute_import
from . import core
from .core import (get_sites, get_site_info, get_values, get_variable_info)
|
'''
Created on 07-May-2017
@author: Sathesh Rgs
'''
print("Program to print digits of a number")
try:
count=0
num=int(input("Enter a number.."))
while num > 0:
num=num//10
count += 1
print("Length is ",count)
except:
print("Enter a valid number") |
from utils.model_forward import forward
from sklearn.cluster import KMeans
class UpdateReps(object):
def __init__(self, every, dataset, batch_size=64):
self.every = every
self.dataset = dataset
self.batch_size = batch_size
def __call__(self, epoch, batch, step, model, dataloaders, losses, optimizer, data, stats):
if step % self.every == 0:
print('Updating Reps')
outputs, labels = forward(model=model,
dataset=self.dataset,
batch_size=self.batch_size)
N = losses['train'].N # todo might be a nicer way to get these
k = losses['train'].k
for c in range(N):
class_mask = labels == c
class_examples = outputs[class_mask]
kmeans = KMeans(n_clusters=k, init='k-means++', n_init=1, max_iter=20)
kmeans.fit(class_examples)
start = c * k
stop = (c+1) * k
# losses['train'].reps.data[start:stop] = torch.Tensor(kmeans.cluster_centers_).cuda().float()
losses['train'].set_reps(kmeans.cluster_centers_, start, stop)
class UpdateValReps(object):
def __init__(self, every):
self.every = every
def __call__(self, epoch, batch, step, model, dataloaders, losses, optimizer, data, stats):
if step % self.every == 0:
losses['val'].reps = losses['train'].reps |
n = 2020
cnt = 0
for i in range(2, n+1):
tmp = i
while tmp > 1:
if tmp % 10 == 2:
cnt += 1
tmp /= 10
print(cnt)
|
#!/usr/bin/env python
import argparse
from ansipants import ANSIDecoder, PALETTE_NAMES
parser = argparse.ArgumentParser(description="Convert ANSI art to HTML.")
parser.add_argument('filename')
parser.add_argument(
'--encoding', default='cp437',
help="Character encoding of input (default: cp437)"
)
parser.add_argument(
'--palette', default='vga', choices=PALETTE_NAMES,
help="Colour palette (default: vga)"
)
parser.add_argument(
'--width', type=int, default=80,
help="Number of columns of output (default: 80)"
)
parser.add_argument(
'--strict', action='store_true',
help="Fail loudly on encountering a decoding error"
)
args = parser.parse_args()
with open(args.filename, 'rt', encoding=args.encoding) as f:
decoder = ANSIDecoder(f, palette=args.palette, width=args.width, strict=args.strict)
print(decoder.as_html())
|
#
# Copyright (c) 2019, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from .machinery import BehavioralEndpointDefinitionRouter
import django_declarative_apis.authentication
# don't require resources unless this adapter is in use
try:
from django_declarative_apis.resources.resource import Resource
except ImportError as e: # noqa
import traceback
traceback.print_exc()
Resource = object
class BaseHandler(object):
defined_methods = {"get", "put", "patch", "post", "delete"}
class EndpointHandler(object):
"""
Glue for combining the new-style endpoint definitions into the old-style piston handler
"""
def __init__(self, **kwargs):
super(EndpointHandler, self).__init__()
self.method_handlers = {}
for method, handler in kwargs.items():
if method not in BaseHandler.defined_methods:
raise TypeError(
"Unexpected keyword argument {0}: valid arguments are {1}".format(
method, BaseHandler.defined_methods
)
)
if isinstance(handler, (list, tuple)):
self.method_handlers[
method.upper()
] = BehavioralEndpointDefinitionRouter(*handler)
else:
self.method_handlers[
method.upper()
] = BehavioralEndpointDefinitionRouter(handler)
self.allowed_methods = self.method_handlers.keys()
def __call__(self, *args, **kwargs):
return self
def handle_request(self, method, *args, **kwargs):
return self.method_handlers[method](*args, **kwargs)
@property
def documentation(self):
return {
method: handler.documentation
for method, handler in self.method_handlers.items()
}
class EndpointResource(Resource):
""":code:`EndpointResource` is the DDA default resource adapter.
It validates the configuration of the authentication handler, and in combination with Django’s native urls.py
routes requests (through behavioral routing) to the same URL but to different handlers based on request attributes.
"""
def __init__(self, authentication=None, **kwargs):
super(EndpointResource, self).__init__(EndpointHandler(**kwargs))
if authentication is not None:
django_declarative_apis.authentication.validate_authentication_config(
authentication
)
self.authentication = authentication
def resource_adapter(*args, **kwargs):
""":code:`resource_adapter()` is a helper function that finds the endpoint resource adapter from settings.py and calls that resource adapter.
**resource_adapter takes two arguments:**
Handler/Resource
**Required |** The :code:`EndpointDefinition` implementation along with an HTTP verb.
Authentication Handler
**Optional |** If not specified, :code:`OAuth1.0a` will be used by default.
**Example:**
Handler defined in a separate file named :code:`handlers.py`.
.. code-block::
TodoEndpoint = resource_adapter(
post=resources.TodoUpdateDefinition,
get=resources.TodoDefinition,
authentication={None: (NoAuth(),)},
)
Django app’s :code:`urls.py`.
.. code-block::
url(
r"^tasks/$",
handlers.TodoEndpoint,
)
"""
setting_name = "DECLARATIVE_ENDPOINT_RESOURCE_ADAPTER"
adapter_name = getattr(settings, setting_name, None)
if not adapter_name:
raise ImproperlyConfigured(setting_name)
name_components = adapter_name.split(".")
module_name = ".".join(name_components[:-1])
module = import_module(module_name)
class_name = name_components[-1]
adapter_class = getattr(module, class_name)
return adapter_class(*args, **kwargs)
|
# Generated by Django 3.0.5 on 2021-08-02 14:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('quiz', '0007_course_attempt'),
]
operations = [
migrations.RemoveField(
model_name='course',
name='attempt',
),
migrations.RemoveField(
model_name='result',
name='attempt',
),
]
|
import numpy as np
import datetime as dt
import csv
from database import SQLite3Database
import smtplib
import mimetypes
from email.mime.multipart import MIMEMultipart
from email.message import Message
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
def send_report_email(validations=0,failures=0,admin=0,date1=dt.datetime(2020,11,1),date2=dt.datetime(2020,12,1),source_email=None,destination_emails=None,email_password=None,attachments=None):
"""
Sends an email reporting on the parking validation statistics
This requires a gmail account with 2FA set up so that app passwords can be used
The app password should be encrypted by running
python crypto.py passwd_email
(this will insert the encrypted password into the config file as well)
TODO: clean this up a bit and rework the function arguments; maybe should be a proper class?
"""
if source_email is None or destination_emails is None or email_password is None:
print("Failed to send email; one or more required emails and/or passwords was invalid!")
return
# body of the email
mail_content = f"This is your monthly automated parking validation report.\n\ntotal successful validations: {validations}\ntotal failed attempts: {failures}\ntotal admin mode activations: {admin}\n\nDetailed hourly data is attached."
# setup the MIME
msg = MIMEMultipart()
# fill out the fields
msg['From'] = source_email
msg['To'] = f"{','.join(destination_emails)}"
msg['Subject'] = f"Parking Validation Report for {date1:%Y-%b}"
# insert the body of the email
msg.attach(MIMEText(mail_content, 'plain'))
# go through the specified attachments and attach them
# strictly speaking, this is a bit overkill for simply attaching CSV files, but it can handle other attachment types too
if attachments is not None:
for filename in attachments:
ctype, encoding = mimetypes.guess_type(filename)
if ctype is None or encoding is not None:
ctype = "application/octet-stream"
maintype, subtype = ctype.split("/", 1)
if maintype == "text":
fp = open(filename)
# Note: we should handle calculating the charset
attachment = MIMEText(fp.read(), _subtype=subtype)
fp.close()
elif maintype == "image":
fp = open(filename, "rb")
attachment = MIMEImage(fp.read(), _subtype=subtype)
fp.close()
elif maintype == "audio":
fp = open(filename, "rb")
attachment = MIMEAudio(fp.read(), _subtype=subtype)
fp.close()
else:
fp = open(filename, "rb")
attachment = MIMEBase(maintype, subtype)
attachment.set_payload(fp.read())
fp.close()
attachment.add_header("Content-Disposition", "attachment", filename=filename)
msg.attach(attachment)
#Create SMTP session for sending the mail
session = smtplib.SMTP('smtp.gmail.com', 587) #use gmail with port
session.starttls() #enable security
session.login(source_email, email_password) #login with mail_id and password
session.sendmail(source_email, f"{','.join(destination_emails)}", msg.as_string())
session.quit()
print(f"report email for {date1:%Y-%b} sent!")
# functions to work on a dummy database to test report generation (actual function in database class)
def custom_db(db_file="test.db"):
"""
Create custom test db
"""
db=SQLite3Database(db_file)
db.open_db()
# create log table
db.create_log_table()
return db
def generate_random_log(db,num_start=15,num_end=75,hour_start=8,hour_end=18,day_start=1,day_end=30):
"""
Generate random log entries for custom db
"""
rng=np.random.default_rng()
for j in range(day_start,day_end+1):
for k in range(rng.integers(num_start,num_end)):
h=rng.normal(hour_start+(hour_end-hour_start)/2,(hour_end-hour_start)/2/4)
hour=int(h)
minute=int((h-hour)*60)
second=int((h-hour-minute/60)*60)
date=dt.datetime(2020,11,j,hour,minute,second,rng.integers(0,9999))
db.log_entry_at_time(date,db.Logs.VALIDATION_SUCCESS.value)
if __name__ == "__main__":
db=custom_db()
reset=False
if reset:
db.drop_log_table(run=True)
db.create_log_table()
generate_random_log(db)
date1=dt.datetime(2020,11,1)
date2=dt.datetime(2020,12,1)
validations=db.hist_between(date1=date1,date2=date2,data_type="validation")
failures=db.hist_between(date1=date1,date2=date2,data_type="failed")
admin=db.hist_between(date1=date1,date2=date2,data_type="admin")
|
from .classic_actor_critic import *
from .deep_actor_critic import *
|
from django.db import models
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
import uuid
import os
from InvoiceParser.settings import MEDIA_URL
# Create your models here.
UserModel=get_user_model()
def get_image_file_name(instance,filename):
ext=filename.split('.')[-1]
filename="{}.{}".format(uuid.uuid4(),ext)
return os.path.join('invoices/',filename)
class Invoice(models.Model):
User=models.ForeignKey(UserModel,on_delete=models.CASCADE,verbose_name=_('This Model Belongs to?'),
help_text=_('This invoice belongs to above chosen user'))
model_number=models.CharField(max_length=36,verbose_name=_('Model Number'),
unique=True,blank=False,default=uuid.uuid4,
help_text=_('This is used to identify this model'))
invoice_image=models.ImageField(upload_to=get_image_file_name,blank=True,null=True)
created_at=models.DateField(auto_now_add=True,verbose_name=_('Creation Date')
,help_text=_('This is the date when model was created'))
def get_image_url(self):
return os.path.join(MEDIA_URL,self.invoice_image.url)
def __str__(self):
return self.model_number
class InvoiceLabel(models.Model):
invoice_model=models.ForeignKey(Invoice,verbose_name=_("Invoice Number"),
help_text=_("This field tells the model number of invoice"),
on_delete=models.CASCADE)
key=models.CharField(max_length=50,blank=False,null=False,verbose_name=_("Label Name"))
value=models.TextField(verbose_name=_("Label Value"))
x_axis=models.PositiveIntegerField(default=0,verbose_name=_("X-Coordinate"))
y_axis=models.PositiveIntegerField(default=0,verbose_name=_("Y-Coordinate"))
width=models.PositiveIntegerField(default=0,verbose_name=_("Bounding Box Width"))
height=models.PositiveIntegerField(default=0,verbose_name=_("Bounding Box Height"))
def __str__(self):
return self.invoice_model.model_number+' | '+self.key
|
"""
Analyse the explicit projections calculated
"""
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import griddata
directory = "D:/uni/tomography-calibration/solid-angle-calculation/"
sangles = np.load(directory + "solid-angle-top-and-out-sensors-1-and-16.npy")
coords = np.load(directory + "out-coordinates-sensor-1.npy")
sangles = sangles[0:len(coords)]
# Summing over zz
# gridx,gridy=np.mgrid[-100:101,1:205].reshape(2,(101+100)*(205-1))
proj_values = []
summed_proj = []
gridx = np.arange(-100, 100.5, 0.5)
gridy = np.arange(-80, 80.5, 0.5)
for x in gridx:
proj_values.append([])
for y in gridy:
proj_values[-1].append(
np.sum(sangles[(np.abs(coords[:, 0] - x) < 0.0001) & (np.abs(coords[:, 1] - y) < 0.0001)]))
# %%
summed_proj = np.array(proj_values)
for column in summed_proj:
column[np.argmax(column)] = np.sum(column)
print(np.sum(column))
plot_gridx = np.arange(-100.25, 100.5, 0.5)
plot_gridy = np.arange(-80.25, 80.5, 0.5)
plt.figure()
plt.pcolormesh(plot_gridx, plot_gridy, np.array(proj_values).T)
plt.colorbar()
plt.xlabel("x (mm)")
plt.ylabel("y (mm)")
plt.show()
plt.figure()
plt.pcolormesh(plot_gridx, plot_gridy, summed_proj.T)
plt.colorbar()
plt.xlabel("x (mm)")
plt.ylabel("y (mm)")
plt.show()
# gridx,gridy,gridz=np.mgrid[-100:101:20,0:200:5,0:70:1]
#
# values=griddata(coords,sangles,(gridx,gridy,gridz),method='linear',fill_value=0.0)
##
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
#
##x = coords[:,0]
##y = coords[:,1]
##z = coords[:,2]
##c = sangles
#
# x = gridx.flatten()
# y = gridy.flatten()
# z = gridz.flatten()
# c = values.flatten()
#
# x = x[c>0]
# y = y[c>0]
# z = z[c>0]
# c = c[c>0]
#
# ax.scatter(x, y, z, c=c, cmap=plt.hot())
# plt.show()
|
#!/usr/bin/python3
import RPi.GPIO as GPIO
from pubnub.pnconfiguration import PNConfiguration
from pubnub.pubnub import PubNub
from pubnub.callbacks import SubscribeCallback
pnconfig = PNConfiguration()
pnconfig.subscribe_key = 'Your-Pubnub-Subscribe-Key-Here'
pnconfig.publish_key = 'Your-Pubnub-Publish-Key-Here'
pubnub = PubNub(pnconfig)
channel = 'Channel-Name-Here'
def setup_gpio():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.OUT)
GPIO.setup(13, GPIO.OUT)
GPIO.setup(15, GPIO.OUT)
def on(gpio):
print("Turning ON GPIO {gpio}".format(gpio=gpio))
GPIO.output(int(gpio), GPIO.HIGH)
def off(gpio):
print("Turning OFF GPIO {gpio}".format(gpio=gpio))
GPIO.output(int(gpio), GPIO.LOW)
class Listener(SubscribeCallback):
def message(self, pubnub, message):
msg = message.message
if 'action' in msg and msg['action'] == 'on':
on(msg['gpio'])
if 'action' in msg and msg['action'] == 'off':
off(msg['gpio'])
setup_gpio()
print('Listening...')
pubnub.add_listener(Listener())
pubnub.subscribe().channels(channel).execute()
|
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A Reader supports reading metadata and key info for key sets.
@author: arkajit.dey@gmail.com (Arkajit Dey)
"""
import os
import errors
import keydata
import keyinfo
import keys
import util
def CreateReader(location):
"""Factory function for Reader's
@param location: where (file, uri, etc) the reader should read from
@type location: string
"""
# make sure all readers are available
util.ImportBackends()
# return the first that accepts the location
for sc in Reader.__subclasses__():
reader = sc.CreateReader(location)
if reader:
return reader
raise errors.KeyczarError(
"Unable to create a reader for %s. Does the location exist?" % location)
class Reader(object):
"""Interface providing supported methods (no implementation)."""
__metaclass__ = util.ABCMeta
@util.abstractmethod
def GetMetadata(self):
"""
Return the KeyMetadata for the key set being read.
@return: JSON string representation of KeyMetadata object
@rtype: string
@raise KeyczarError: if unable to read metadata (e.g. IOError)
"""
return
@util.abstractmethod
def GetKey(self, version_number):
"""
Return the key corresponding to the given version.
@param version_number: the version number of the desired key
@type version_number: integer
@return: JSON string representation of a Key object
@rtype: string
@raise KeyczarError: if unable to read key info (e.g. IOError)
"""
return
@util.abstractmethod
def Close(self):
"""
Clean up this reader
@raise KeyczarError: if error during close
"""
return
@classmethod
def CreateReader(cls, location):
"""
Return an instance of this class if it handles the location
@param location: where (file, uri, etc) the reader should read from
@type location: string
"""
raise NotImplementedError('CreateReader() class method MUST be implemented for:%s' %cls)
class FileReader(Reader):
"""Reader that reads key data from files."""
def __init__(self, location):
self._location = location
def GetMetadata(self):
return util.ReadFile(os.path.join(self._location, "meta"))
def GetKey(self, version_number):
return util.ReadFile(os.path.join(self._location, str(version_number)))
def Close(self):
# Nothing to close - util.ReadFile() closes it
return
@classmethod
def CreateReader(cls, location):
result = None
location = str(location) # This fixes the case in case the location is
# an instance of Path (from django-environ)
if os.path.exists(location):
result = FileReader(location)
return result
class StaticKeyReader(Reader):
"""Reader that returns a static key"""
def __init__(self, key, purpose):
self._key = key
self._meta = keydata.KeyMetadata("Imported", purpose, key.type)
self._meta.AddVersion(keydata.KeyVersion(1, keyinfo.PRIMARY, False))
def GetMetadata(self):
return str(self._meta)
def GetKey(self, version_number):
return str(self._key)
def Close(self):
# Nothing to close - util.ReadFile() closes it
return
@classmethod
def CreateReader(cls, location):
# cannot be instantiated indirectly
return
class EncryptedReader(Reader):
"""Reader that reads encrypted key data from files."""
def __init__(self, reader, crypter):
self._reader = reader
self._crypter = crypter
def GetMetadata(self):
return self._reader.GetMetadata()
def GetKey(self, version_number):
return self._crypter.Decrypt(self._reader.GetKey(version_number))
def Close(self):
# Nothing to close - util.ReadFile() closes it
return
@classmethod
def CreateReader(cls, location):
# cannot be instantiated
return
class MockReader(Reader):
"""Mock reader used for testing Keyczart."""
def __init__(self, name, purpose, key_type, encrypted=False):
self.kmd = keydata.KeyMetadata(name, purpose, key_type, encrypted)
self.pubkmd = None
self.keys = {}
self.pubkeys = {}
@property
def numkeys(self):
return len(self.keys)
def GetMetadata(self):
return str(self.kmd)
def GetKey(self, version_number):
try:
return str(self.keys[version_number])
except KeyError:
raise errors.KeyczarError("Unrecognized Version Number")
def GetStatus(self, version_number):
return self.kmd.GetVersion(version_number).status
def Close(self):
# Nothing to close
return
def SetKey(self, version_number, key):
self.keys[version_number] = key
def SetPubKey(self, version_number, key):
self.pubkeys[version_number] = key
def AddKey(self, version_number, status, size=None):
"""Utility method for testing."""
key = keys.GenKey(self.kmd.type, size)
self.keys[version_number] = key
return self.kmd.AddVersion(keydata.KeyVersion(version_number, status,
False))
def RemoveKey(self, version_number):
"""Mocks out deleting revoked key files."""
self.keys.pop(version_number)
def ExistsVersion(self, version_number):
return version_number in self.keys
def HasPubKey(self, version_number):
priv = self.keys[version_number]
pub = self.pubkeys[version_number]
return priv.public_key == pub
def GetKeySize(self, version_number):
return self.keys[version_number].size
@classmethod
def CreateReader(cls, location):
# cannot be instantiated
return
|
import numpy as np
import cv2
class DensePoseEstimator(object):
def __init__(self):
pass
def estimate(self, depth_image, person_tracks, view_pose):
if depth_image is not None:
for p in person_tracks:
if p.is_located() and p.is_confirmed():
xmin = int(p.bbox.xmin)
ymin = int(p.bbox.ymin)
h = int(p.bbox.height())
w = int(p.bbox.width())
depth_image_cropped = depth_image[ymin:ymin+h, xmin:xmin+w]
mask = depth_image_cropped.copy()/1000.0
mask[mask > p.bbox.depth + 0.4] = 0
mask[mask < p.bbox.depth - 0.4] = 0
mask[mask != 0] = 255
p.mask = mask
|
# coding=utf-8
import io
import os
import re
from setuptools import setup
PACKAGE = 'moar'
def get_path(*args):
return os.path.join(os.path.dirname(__file__), *args)
def read_from(filepath):
with io.open(filepath, 'rt', encoding='utf8') as f:
return f.read()
def get_version():
data = read_from(get_path(PACKAGE, '__init__.py'))
version = re.search(r"__version__\s*=\s*u?'([^']+)'", data).group(1)
return str(version)
def find_package_data(root, include_files=('.gitignore', )):
files = []
src_root = get_path(root).rstrip('/') + '/'
for dirpath, subdirs, filenames in os.walk(src_root):
path, dirname = os.path.split(dirpath)
if dirname.startswith(('.', '_')):
continue
dirpath = dirpath.replace(src_root, '')
for filename in filenames:
is_valid_filename = not (
filename.startswith('.') or
filename.endswith('.pyc')
)
include_it_anyway = filename in include_files
if is_valid_filename or include_it_anyway:
files.append(os.path.join(dirpath, filename))
return files
def find_packages_data(*roots):
return dict([(root, find_package_data(root)) for root in roots])
def get_description():
data = read_from(get_path(PACKAGE, '__init__.py'))
desc = re.search('"""(.+)"""', data, re.DOTALL).group(1)
return desc.strip()
def get_requirements(filename='requirements.txt'):
data = read_from(get_path(filename))
lines = map(lambda s: s.strip(), data.splitlines())
return [l for l in lines if l and not l.startswith('#')]
setup(
name='Moar',
version=get_version(),
author='Juan-Pablo Scaletti',
author_email='juanpablo@lucumalabs.com',
packages=[PACKAGE],
package_data=find_packages_data(PACKAGE, 'tests'),
zip_safe=False,
url='http://github.com/lucuma/Moar',
license='MIT license (see LICENSE)',
description='Easy thumbnails for everyone',
long_description=get_description(),
install_requires=get_requirements(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
# python3 main.py -m ssd_mobilenet_v2_320x320_coco17_tpu-8/saved_model -l mscoco_label_map.pbtxt -t 0.5 -roi 0.5 -v example.mp4 -a
import cv2
import numpy as np
import argparse
import tensorflow as tf
import dlib
import imutils
import socket
import time
import base64
import requests
from object_detection.utils import label_map_util
from object_detection.utils import ops as utils_ops
from trackable_object import TrackableObject
from centroidtracker import CentroidTracker
# Server video
BUFF_SIZE = 65536
server_socket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
server_socket.setsockopt(socket.SOL_SOCKET,socket.SO_RCVBUF,BUFF_SIZE)
host_name = socket.gethostname()
host_ip = socket.gethostbyname(host_name)
print(host_ip)
port = 9999
socket_address = (host_ip,port)
server_socket.bind(socket_address)
print('Listening at:',socket_address)
# Swab Test Location
id = 1002
maks = 8
url_reg = 'http://mas-saco.herokuapp.com/api/auth/hospital/register'
url_add = 'http://mas-saco.herokuapp.com/api/update/hospital/' + str(id)
# database
# url_con = 'http://localhost/massaco/connect.php'
# url_add = 'http://localhost/massaco/add.php'
# patch tf1 into `utils.ops`
utils_ops.tf = tf.compat.v1
# Patch the location of gfile
tf.gfile = tf.io.gfile
# Global Variable
send = 0
check = 0
def load_model(model_path):
tf.keras.backend.clear_session()
model = tf.saved_model.load(model_path)
return model
def run_inference_for_single_image(model, image):
image = np.asarray(image)
# The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
input_tensor = tf.convert_to_tensor(image)
# The model expects a batch of images, so add an axis with `tf.newaxis`.
input_tensor = input_tensor[tf.newaxis, ...]
# Run inference
output_dict = model(input_tensor)
# All outputs are batches tensors.
# Convert to numpy arrays, and take index [0] to remove the batch dimension.
# We're only interested in the first num_detections.
num_detections = int(output_dict.pop('num_detections'))
output_dict = {key: value[0, :num_detections].numpy()
for key, value in output_dict.items()}
output_dict['num_detections'] = num_detections
# detection_classes should be ints.
output_dict['detection_classes'] = output_dict['detection_classes'].astype(
np.int64)
# Handle models with masks:
if 'detection_masks' in output_dict:
# Reframe the the bbox mask to the image size.
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
output_dict['detection_masks'], output_dict['detection_boxes'],
image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(
detection_masks_reframed > 0.5, tf.uint8)
output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()
return output_dict
def run_inference(model, category_index, cap, labels, roi_position=0.6, threshold=0.5, x_axis=True, skip_frames=20, save_path='', show=True):
counter = [0, 0, 0, 0] # left, right, up, down
total_frames = 0
ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
trackers = []
trackableObjects = {}
# Check if results should be saved
if save_path:
width = int(cap.get(3))
height = int(cap.get(4))
fps = cap.get(cv2.CAP_PROP_FPS)
out = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(
'M', 'J', 'P', 'G'), fps, (width, height))
if(id == 1001):
nama = 'pkm_mulyorejo'
lokasi = 'Puskemas Mulyorejo Surabaya'
telp = '(031) 381 6885'
latitude = '-7.26067'
langitude = '112.7826471'
pcr = 705000
antigen = 250000
elif(id == 1002):
nama = 'pkm_keputih'
lokasi = 'Puskemas Keputih Surabaya'
telp = '5820 1517'
latitude = '-7.2905265'
langitude = '112.7981101'
pcr = 650000
antigen = 245000
elif(id == 1003):
nama = 'pkm_dr_soetomo'
lokasi = 'Puskemas Dr. Soetomo Surabaya'
telp = '(031) 567 8279'
latitude = '-7.2773688'
langitude = '112.731944'
pcr = 830000
antigen = 260000
elif(id == 1004):
nama = 'pkm_kenjeran'
lokasi = 'Puskemas Kenjeran Surabaya'
telp = '(031) 382 2103'
latitude = '-7.2323769'
langitude = '112.7712687'
pcr = 800000
antigen = 230000
print()
print()
print("====================================================================")
print("==================== ====================")
print("==================== Mas Saco System Starting ====================")
print("==================== ====================")
print("====================================================================")
print()
named_tuple = time.localtime() # get struct_time
time_daynow = time.strftime("%m/%d/%Y", named_tuple)
time_string = time.strftime("%H:%M:%S", named_tuple)
print("System Start at")
print("id :", id)
print("nama :", nama)
print("Waktu :", time_string)
print("Tanggal :", time_daynow)
print("Lokasi :", lokasi)
# resp = requests.post(url=url_reg, data={'id':str(id), 'nama':str(nama), 'lokasi':str(lokasi), 'latitude':str(latitude), 'longitude':str(langitude), 'telp':str(telp)})
# data = resp.json()
# key = data['api_key']
# print('Response :', str(resp))
# print('Key :', str(key))
print("Status : Connected to database")
print()
print("====================================================================")
print()
while True:
msg,client_addr = server_socket.recvfrom(BUFF_SIZE)
print('GOT connection from ',client_addr)
while (cap.isOpened()):
ret, image_np = cap.read()
if not ret:
break
height, width, _ = image_np.shape
rgb = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
status = "Waiting"
rects = []
if total_frames % skip_frames == 0:
status = "Detecting"
trackers = []
# Actual detection.
output_dict = run_inference_for_single_image(model, image_np)
for i, (y_min, x_min, y_max, x_max) in enumerate(output_dict['detection_boxes']):
if output_dict['detection_scores'][i] > threshold and (labels == None or category_index[output_dict['detection_classes'][i]]['name'] in labels):
tracker = dlib.correlation_tracker()
rect = dlib.rectangle(
int(x_min * width), int(y_min * height), int(x_max * width), int(y_max * height))
tracker.start_track(rgb, rect)
trackers.append(tracker)
else:
status = "Tracking"
for tracker in trackers:
# update the tracker and grab the updated position
tracker.update(rgb)
pos = tracker.get_position()
# unpack the position object
x_min, y_min, x_max, y_max = int(pos.left()), int(
pos.top()), int(pos.right()), int(pos.bottom())
# add the bounding box coordinates to the rectangles list
rects.append((x_min, y_min, x_max, y_max))
objects = ct.update(rects)
for (objectID, centroid) in objects.items():
to = trackableObjects.get(objectID, None)
if to is None:
to = TrackableObject(objectID, centroid)
else:
if x_axis and not to.counted:
x = [c[0] for c in to.centroids]
direction = centroid[0] - np.mean(x)
if centroid[0] > roi_position*width and direction > 0 and np.mean(x) < args.roi_position*width:
counter[1] += 1
to.counted = True
elif centroid[0] < roi_position*width and direction < 0 and np.mean(x) > args.roi_position*width:
counter[0] += 1
to.counted = True
elif not x_axis and not to.counted:
y = [c[1] for c in to.centroids]
direction = centroid[1] - np.mean(y)
if centroid[1] > roi_position*height and direction > 0 and np.mean(y) < args.roi_position*height:
counter[3] += 1
to.counted = True
elif centroid[1] < roi_position*height and direction < 0 and np.mean(y) > args.roi_position*height:
counter[2] += 1
to.counted = True
to.centroids.append(centroid)
trackableObjects[objectID] = to
text = "Detected ID {}".format(objectID)
cv2.putText(image_np, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.circle(image_np, (centroid[0], centroid[1]), 8, (0, 0, 255), -1)
# Draw ROI line
if x_axis:
cv2.line(image_np, (int(roi_position*width), 0),
(int(roi_position*width), height), (0xFF, 0, 0), 5)
else:
cv2.line(image_np, (0, int(roi_position*height)),
(width, int(roi_position*height)), (0xFF, 0, 0), 5)
# Timer
named_tuple = time.localtime() # get struct_time
time_daynam = time.strftime("%A", named_tuple)
time_daynow = time.strftime("%m/%d/%Y", named_tuple)
time_string = time.strftime("%H:%M:%S", named_tuple)
time_second = time.strftime("%S", named_tuple)
# display count and status
font = cv2.FONT_HERSHEY_SIMPLEX
if x_axis:
total_keramaian = counter[1] - counter[0]
cv2.putText(image_np, f'Kiri: {counter[0]}; Kanan: {counter[1]}', (
10, 35), font, 0.8, (0, 0xFF, 0xFF), 2, cv2.FONT_HERSHEY_SIMPLEX)
else:
total_keramaian = counter[3] - counter[2]
cv2.putText(image_np, f'Up: {counter[2]}; Down: {counter[3]}', (
10, 35), font, 0.8, (0, 0xFF, 0xFF), 2, cv2.FONT_HERSHEY_SIMPLEX)
cv2.putText(image_np, 'Status: ' + status, (10, 70), font,
0.8, (0, 0xFF, 0xFF), 2, cv2.FONT_HERSHEY_SIMPLEX)
cv2.putText(image_np, f'Total: {total_keramaian}', (10, 110), font,
0.8, (0, 0xFF, 0xFF), 2, cv2.FONT_HERSHEY_SIMPLEX)
cv2.putText(image_np, f'Time: {time_string}', (10, 150), font,
0.8, (0, 0xFF, 0xFF), 2, cv2.FONT_HERSHEY_SIMPLEX)
cv2.putText(image_np, f'Date: {time_daynow}', (10, 190), font,
0.8, (0, 0xFF, 0xFF), 2, cv2.FONT_HERSHEY_SIMPLEX)
if( total_keramaian < 1):
total_keramaian = 0
tersedia = maks - total_keramaian
# Send to database
if(time_second == "00"):
send = 1
else:
send = 0
if(send):
if(send != check):
print()
print("====================================================================")
print("Lokasi :", lokasi)
print("Hari :", time_daynam)
print("Tanggal :", time_daynow)
print("Waktu :", time_string)
print("====================================================================")
print("======================== Total Keramaian =========================")
if x_axis:
print("Masuk : " + str(counter[1]) + " orang")
print("Keluar : " + str(counter[0]) + " orang")
else:
print("Masuk : " + str(counter[3]) + " orang")
print("Keluar : " + str(counter[2]) + " orang")
print("Total : " + str(total_keramaian) + " orang")
print("====================================================================")
# print("Send to database...")
# x = requests.get(str(url_add) + '?&id=' + str(id) + '&nama=' + str(nama) + '&waktu=' + str(time_string) + '&hari=' + str(time_daynam) + '&tanggal=' + str(time_daynow) + '&keramaian=' + str(total_keramaian))
# print(x.text)
# resp = requests.put(url=url_add, data={'keramaian':int(total_keramaian), 'kapasitas':int(maks), 'pcr':int(pcr), 'antigen':int(antigen), 'waktu':str(time_string), 'tanggal':str(time_daynow)})
# print('Response :', str(resp))
print("====================================================================")
print()
check = send
else:
check = 0
frame = imutils.resize(image_np,width=500)
encoded,buffer = cv2.imencode('.jpg',frame,[cv2.IMWRITE_JPEG_QUALITY,80])
message = base64.b64encode(buffer)
server_socket.sendto(message,client_addr)
if show:
cv2.imshow('Mas Saco System', image_np)
if cv2.waitKey(25) & 0xFF == ord('q'):
print()
print("====================================================================")
print("================= Mas Saco System Stoped =================")
print("================= " + time_string + " " + time_daynow + " =================")
print("================= Copyright 2021 @ Yuk Swab Team =================")
print("====================================================================")
server_socket.close()
break
if save_path:
out.write(image_np)
total_frames += 1
cap.release()
if save_path:
out.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
# -m ssd_mobilenet_v2_320x320_coco17_tpu-8/saved_model -l mscoco_label_map.pbtxt -t 0.5 -roi 0.5 -v example.mp4 -a
parser = argparse.ArgumentParser(
description='Detect objects inside webcam videostream')
parser.add_argument('-m', '--model', type=str,
default='ssd_mobilenet_v2_320x320_coco17_tpu-8/saved_model', help='Model Path')
parser.add_argument('-l', '--labelmap', type=str,
default='mscoco_label_map.pbtxt', help='Path to Labelmap')
parser.add_argument('-v', '--video_path', type=str, default='example.mp4',
help='Path to video. If None camera will be used')
parser.add_argument('-t', '--threshold', type=float,
default=0.5, help='Detection threshold')
parser.add_argument('-roi', '--roi_position', type=float,
default=0.5, help='ROI Position (0-1)')
parser.add_argument('-la', '--labels', nargs='+', type=str,
help='Label names to detect (default="all-labels")')
parser.add_argument('-a', '--axis', default=True, action="store_false",
help='Axis for cumulative counting (default=x axis)')
parser.add_argument('-s', '--skip_frames', type=int, default=20,
help='Number of frames to skip between using object detection model')
parser.add_argument('-sh', '--show', default=True,
action="store_false", help='Show output')
parser.add_argument('-sp', '--save_path', type=str, default='',
help='Path to save the output. If None output won\'t be saved')
args = parser.parse_args()
detection_model = load_model(args.model)
category_index = label_map_util.create_category_index_from_labelmap(
args.labelmap, use_display_name=True)
# if args.video_path != '':
# cap = cv2.VideoCapture(args.video_path)
# else:
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("Error opening video stream or file")
run_inference(detection_model, category_index, cap, labels=args.labels, threshold=args.threshold,
roi_position=args.roi_position, x_axis=args.axis, skip_frames=args.skip_frames, save_path=args.save_path, show=args.show)
|
from app import db, students
db.create_all()
test_rec = students(
'Marco Hemken',
'Los Angeles',
'123 Foobar Ave',
'12345'
)
db.session.add(test_rec)
db.session.commit()
|
import locale
import os
from pathlib import Path
import click
from tqdm import tqdm # type: ignore
from kaleidoscope.gallery import generate_gallery_ini, generate_album_ini
from kaleidoscope.generator import generate, DefaultListener
from kaleidoscope.reader import read_gallery
gallery_path = "."
@click.group()
@click.option('--gallery', type=click.Path())
@click.pass_context
def cli(ctx, gallery):
locale.setlocale(locale.LC_ALL, '')
if gallery is not None:
global gallery_path
gallery_path = gallery
@cli.command()
def build():
"""Build gallery."""
gallery = read_gallery(gallery_path)
output_path = os.path.join(gallery_path, "output")
generate(gallery, output_path, ProgressReporter())
@cli.command(name='init-gallery')
def init_gallery():
"""Generate gallery configuration file."""
generate_gallery_ini(Path(gallery_path))
@cli.command(name='init-album')
@click.argument('directory',
type=click.Path(exists=True, file_okay=False, dir_okay=True))
def init_album(directory):
"""Generate album configuration file with list of photos."""
generate_album_ini(Path(gallery_path).joinpath(directory))
class ProgressReporter(DefaultListener):
"""Reports progress of gallery generation to a user."""
def __init__(self):
self._progressbar = None
def starting_album(self, album, photos_to_process):
print("Generating album " + album.title)
if photos_to_process > 0:
self._progressbar = tqdm(desc="Resizing", unit="photo",
total=photos_to_process)
def resizing_photo(self, photo):
self._progressbar.update(1)
def finishing_album(self):
if self._progressbar:
self._progressbar.close()
self._progressbar = None
if __name__ == '__main__':
cli()
|
'''
Praveen Manimaran
CIS 41A Spring 2020
Exercise I
'''
import math
class Circle:
def __init__(self, radius):
self.radius = radius
def getArea(self):
return math.pi * math.pow(self.radius,2)
class Cylinder(Circle):
def __init__(self, radius, height):
super().__init__(radius)
self.height = height
def getVolume(self):
return super().getArea()* self.height
c1 = Circle(4)
print(f'Circle area is: {c1.getArea():.2f}')
c2 = Cylinder(2,5)
print(f'Cylinder volume is: {c2.getVolume():.2f}')
'''
Execution results:
Circle area is: 50.27
Cylinder volume is: 62.83
''' |
import os
import sys
from django_fakery.compat import HAS_GEOS
if hasattr(sys, "pypy_version_info"):
from psycopg2cffi import compat
compat.register()
DISABLE_SERVER_SIDE_CURSORS = False
if os.environ.get("PYTHON_VERSION", "").startswith("pypy"):
DISABLE_SERVER_SIDE_CURSORS = True
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
DATABASES = {
"default": {
"ENGINE": "django.contrib.gis.db.backends.postgis"
if HAS_GEOS
else "django.db.backends.postgresql_psycopg2",
"NAME": "django_fakery",
"USER": "postgres",
"PASSWORD": os.environ.get("POSTGRES_PASSWORD", None),
"DISABLE_SERVER_SIDE_CURSORS": True,
"HOST": os.environ.get("POSTGRES_HOST", None),
}
}
USE_TZ = True
TIMEZONE = "America/Chicago"
INSTALLED_APPS = ["django.contrib.auth", "django.contrib.contenttypes", "tests"]
SILENCED_SYSTEM_CHECKS = ["1_7.W001"]
SECRET_KEY = "itsasecret"
|
import time
from ....models.models import Speaker
from ....permissions.permissions import Permissions
from ....shared.exceptions import ActionException
from ....shared.patterns import Collection, FullQualifiedId
from ...generics.update import UpdateAction
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
from ...util.typing import ActionData
from ..projector_countdown.mixins import CountdownControl
@register_action("speaker.end_speech")
class SpeakerEndSpeach(CountdownControl, UpdateAction):
"""
Action to stop speakers.
"""
model = Speaker()
schema = DefaultSchema(Speaker()).get_default_schema(
required_properties=["id"],
title="End speach schema",
description="Schema to stop a speaker's speach.",
)
permission = Permissions.ListOfSpeakers.CAN_MANAGE
def get_updated_instances(self, action_data: ActionData) -> ActionData:
for instance in action_data:
speaker = self.datastore.get(
FullQualifiedId(self.model.collection, instance["id"]),
mapped_fields=["begin_time", "end_time", "meeting_id"],
)
if speaker.get("begin_time") is None or speaker.get("end_time") is not None:
raise ActionException(
f"Speaker {instance['id']} is not speaking at the moment."
)
instance["end_time"] = round(time.time())
# reset projector_countdown
meeting = self.datastore.get(
FullQualifiedId(Collection("meeting"), speaker["meeting_id"]),
[
"list_of_speakers_couple_countdown",
"list_of_speakers_countdown_id",
],
)
if meeting.get("list_of_speakers_couple_countdown") and meeting.get(
"list_of_speakers_countdown_id"
):
self.control_countdown(
meeting["list_of_speakers_countdown_id"], "reset"
)
yield instance
|
#!/usr/bin/env python
# @package nurbs
# Approximate a 3D curve with a B-Spline curve from either a set of data points or a set of control points
#
# If a set of data points is given, it generates a B-spline that either approximates the curve in the least square
# sense or interpolates the curve. It also computes the derivative of the 3D curve.
# getCourbe3D() returns the 3D fitted curve. The fitted z coordonate corresponds to the initial z, and the x and y are
# averaged for a given z getCourbe3D_deriv() returns the derivative of the 3D fitted curve also averaged along z-axis.
#
# USAGE
# ---------------------------------------------------------------------------------------
# from spinalcordtoolbox.centerline.nurbs import *
# nurbs=NURBS(degree,precision,data)
#
# MANDATORY ARGUMENTS
# ---------------------------------------------------------------------------------------
# degree the degree of the fitting B-spline curve
# precision number of points before averaging data
# data 3D list [x,y,z] of the data requiring fitting
#
# EXAMPLES
# ---------------------------------------------------------------------------------------
# from spinalcordtoolbox.centerline.nurbs import *
# nurbs = NURBS(3,1000,[[x_centerline[n],y_centerline[n],z_centerline[n]] for n in range(len(x_centerline))])
# P = nurbs.getCourbe3D()
# x_centerline_fit = P[0]
# y_centerline_fit = P[1]
# z_centerline_fit = P[2]
# D = nurbs.getCourbe3D_deriv()
# x_centerline_fit_der = D[0]
# y_centerline_fit_der = D[1]
# z_centerline_fit_der = D[2]
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2014 NeuroPoly, Polytechnique Montreal <www.neuropoly.info>
# Authors: Benjamin De Leener, Julien Touati
import os
import numpy as np
import logging
from spinalcordtoolbox.image import Image
from spinalcordtoolbox.types import Centerline
logger = logging.getLogger(__name__)
class ReconstructionError(RuntimeError):
pass
class NURBS:
def __init__(self, degre=3, precision=1000, liste=None, sens=False, nbControl=None, verbose=1, tolerance=0.01,
maxControlPoints=50, all_slices=True, twodim=False, weights=True):
"""
Ce constructeur initialise une NURBS et la construit.
Si la variable sens est True : On construit la courbe en fonction des points de controle
Si la variable sens est False : On reconstruit les points de controle en fonction de la courbe
"""
self.degre = degre + 1
self.sens = sens
self.pointsControle = []
self.pointsControleRelatif = []
self.courbe3D, self.courbe3D_deriv = [], []
self.courbe2D, self.courbe2D_deriv = [], []
self.nbControle = 10 # correspond au nombre de points de controle calcules.
self.precision = precision
self.tolerance = tolerance # in mm
self.maxControlPoints = maxControlPoints
self.verbose = verbose
self.all_slices = all_slices
self.twodim = twodim
if sens: # si on donne les points de controle#####
if type(liste[0][0]).__name__ == 'list':
self.pointsControle = liste
else:
self.pointsControle.append(liste)
for li in self.pointsControle:
if twodim:
[[P_x, P_y], [P_x_d, P_y_d]] = self.construct2D(li, degre, self.precision)
self.courbe2D.append([[P_x[i], P_y[i]] for i in len(P_x)])
self.courbe2D_deriv.append([[P_x_d[i], P_y_d[i]] for i in len(P_x_d)])
else:
[[P_x, P_y, P_z], [P_x_d, P_y_d, P_z_d]] = self.construct3D(li, degre, self.precision)
self.courbe3D.append([[P_x[i], P_y[i], P_z[i]] for i in len(P_x)])
self.courbe3D_deriv.append([[P_x_d[i], P_y_d[i], P_z_d[i]] for i in len(P_x_d)])
else:
# La liste est sous la forme d'une liste de points
P_x = [x[0] for x in liste]
P_y = [x[1] for x in liste]
if not twodim:
P_z = [x[2] for x in liste]
self.P_z = P_z
if nbControl is None:
# self.nbControl = len(P_z)/5 ## ordre 3 -> len(P_z)/10, 4 -> len/7, 5-> len/5 permet d'obtenir
# une bonne approximation sans trop "interpoler" la courbe compute the ideal number of control points
# based on tolerance
error_curve = 1000.0
self.nbControle = self.degre + 1
nb_points = len(P_x)
if self.nbControle > nb_points - 1:
raise ArithmeticError('There are too few points to compute. The number of points of the curve must '
'be strictly superior to degre + 2, in this case: ' + str(self.nbControle)
+ '. Either change degree to a lower value, or add points to the curve.')
# compute weights based on curve density
w = [1.0] * len(P_x)
if weights:
if not twodim:
for i in range(1, len(P_x) - 1):
dist_before = np.sqrt(
(P_x[i - 1] - P_x[i]) ** 2 + (P_y[i - 1] - P_y[i]) ** 2 + (P_z[i - 1] - P_z[i]) ** 2)
dist_after = np.sqrt(
(P_x[i] - P_x[i + 1]) ** 2 + (P_y[i] - P_y[i + 1]) ** 2 + (P_z[i] - P_z[i + 1]) ** 2)
w[i] = (dist_before + dist_after) / 2.0
else:
for i in range(1, len(P_x) - 1):
dist_before = np.sqrt((P_x[i - 1] - P_x[i]) ** 2 + (P_y[i - 1] - P_y[i]) ** 2)
dist_after = np.sqrt((P_x[i] - P_x[i + 1]) ** 2 + (P_y[i] - P_y[i + 1]) ** 2)
w[i] = (dist_before + dist_after) / 2.0
w[0], w[-1] = w[1], w[-2]
list_param_that_worked = []
last_error_curve = 0.0
second_last_error_curve = 0.0
while self.nbControle < len(P_x) and self.nbControle <= self.maxControlPoints:
if abs(error_curve - last_error_curve) <= self.tolerance and abs(
error_curve - second_last_error_curve) <= self.tolerance and error_curve <= last_error_curve and error_curve <= second_last_error_curve:
break
second_last_error_curve = last_error_curve
last_error_curve = error_curve
# compute the nurbs based on input data and number of controle points
logger.debug('Test: # of control points = ' + str(self.nbControle))
try:
if not twodim:
self.pointsControle = self.reconstructGlobalApproximation(P_x, P_y, P_z, self.degre,
self.nbControle, w)
self.courbe3D, self.courbe3D_deriv = self.construct3D(self.pointsControle, self.degre,
self.precision / 3) # generate curve with low resolution
else:
self.pointsControle = self.reconstructGlobalApproximation2D(P_x, P_y, self.degre,
self.nbControle, w)
self.courbe2D, self.courbe2D_deriv = self.construct2D(self.pointsControle, self.degre,
self.precision / 3)
# compute error between the input data and the nurbs
error_curve = 0.0
if not twodim:
for i in range(0, len(P_x)):
min_dist = 10000.0
for k in range(0, len(self.courbe3D[0])):
dist = (self.courbe3D[0][k] - P_x[i]) ** 2 + (self.courbe3D[1][k] - P_y[i]) ** 2 + (
self.courbe3D[2][k] - P_z[i]) ** 2
if dist < min_dist:
min_dist = dist
error_curve += min_dist
else:
for i in range(0, len(P_x)):
min_dist = 10000.0
for k in range(0, len(self.courbe2D[0])):
dist = (self.courbe2D[0][k] - P_x[i]) ** 2 + (self.courbe2D[1][k] - P_y[i]) ** 2
if dist < min_dist:
min_dist = dist
error_curve += min_dist
error_curve /= float(len(P_x))
if verbose >= 1:
logger.info('Error on approximation = ' + str(np.round(error_curve, 2)) + ' mm')
# Create a list of parameters that have worked in order to call back the last one that has worked
list_param_that_worked.append([self.nbControle, self.pointsControle, error_curve])
except ReconstructionError:
logger.warning('NURBS instability -> wrong reconstruction')
error_curve = last_error_curve + 10000.0
except np.linalg.LinAlgError as err_linalg: # if there is a linalg error
if 'singular matrix' in str(err_linalg): # and if it is a singular matrix
logger.warning('Singular Matrix in NURBS algorithm -> wrong reconstruction')
error_curve = last_error_curve + 10000.0
else:
raise # if it is another linalg error, raises it (so it stops the script)
# prepare for next iteration
self.nbControle += 1
self.nbControle -= 1 # last addition does not count
# self.courbe3D, self.courbe3D_deriv = self.construct3D(self.pointsControle, self.degre, self.precision) # generate curve with hig resolution
# select number of control points that gives the best results
list_param_that_worked_sorted = sorted(list_param_that_worked,
key=lambda list_param_that_worked: list_param_that_worked[2])
nbControle_that_last_worked = list_param_that_worked_sorted[0][0]
pointsControle_that_last_worked = list_param_that_worked_sorted[0][1]
self.error_curve_that_last_worked = list_param_that_worked_sorted[0][2]
if not twodim:
self.courbe3D, self.courbe3D_deriv = self.construct3D_uniform(pointsControle_that_last_worked,
self.degre,
self.precision) # generate curve with hig resolution
else:
self.courbe2D, self.courbe2D_deriv = self.construct2D(pointsControle_that_last_worked, self.degre,
self.precision)
self.pointsControle = pointsControle_that_last_worked
if self.nbControle != nbControle_that_last_worked:
logger.debug("The fitting of the curve was done using {} control points: the number that gave "
"the best results. \nError on approximation = {} mm".
format(nbControle_that_last_worked, np.round(self.error_curve_that_last_worked, 2)))
else:
logger.debug('Number of control points of the optimal NURBS = {}'.format(self.nbControle))
else:
logger.debug('In NURBS we get nurbs_ctl_points = {}'.format(nbControl))
w = [1.0] * len(P_x)
self.nbControl = nbControl # increase nbeControle if "short data"
if not twodim:
self.pointsControle = self.reconstructGlobalApproximation(P_x, P_y, P_z, self.degre,
self.nbControle, w)
self.courbe3D, self.courbe3D_deriv = self.construct3D(self.pointsControle, self.degre,
self.precision)
else:
self.pointsControle = self.reconstructGlobalApproximation2D(P_x, P_y, self.degre, self.nbControle,
w)
self.courbe2D, self.courbe2D_deriv = self.construct2D(self.pointsControle, self.degre,
self.precision)
def getControle(self):
return self.pointsControle
def setControle(self, pointsControle):
self.pointsControle = pointsControle
def getCourbe3D(self):
return self.courbe3D
def getCourbe3D_deriv(self):
return self.courbe3D_deriv
def getCourbe2D(self):
return self.courbe2D
def getCourbe2D_deriv(self):
return self.courbe2D_deriv
# Multiplie deux polynomes
def multipolynome(self, polyA, polyB):
result = []
for r in polyB:
temp = polyA * r[0]
result.append([temp, r[-1]])
return result
def N(self, i, k, x):
global Nik_temp
if k == 1:
tab = [[np.poly1d(1), i + 1]]
else:
tab = []
den_g = x[i + k - 1] - x[i]
den_d = x[i + k] - x[i + 1]
if den_g != 0:
if Nik_temp[i][k - 1] == -1:
Nik_temp[i][k - 1] = self.N(i, k - 1, x)
tab_b = self.multipolynome(np.poly1d([1 / den_g, -x[i] / den_g]), Nik_temp[i][k - 1])
tab.extend(tab_b)
if den_d != 0:
if Nik_temp[i + 1][k - 1] == -1:
Nik_temp[i + 1][k - 1] = self.N(i + 1, k - 1, x)
tab_d = self.multipolynome(np.poly1d([-1 / den_d, x[i + k] / den_d]), Nik_temp[i + 1][k - 1])
tab.extend(tab_d)
return tab
def Np(self, i, k, x):
global Nik_temp_deriv, Nik_temp
if k == 1:
tab = [[np.poly1d(0), i + 1]]
else:
tab = []
den_g = x[i + k - 1] - x[i]
den_d = x[i + k] - x[i + 1]
if den_g != 0:
if Nik_temp_deriv[i][-1] == -1:
Nik_temp_deriv[i][-1] = self.N(i, k - 1, x)
tab_b = self.multipolynome(np.poly1d([k / den_g]), Nik_temp_deriv[i][-1])
tab.extend(tab_b)
if den_d != 0:
if Nik_temp_deriv[i + 1][-1] == -1:
Nik_temp_deriv[i + 1][-1] = self.N(i + 1, k - 1, x)
tab_d = self.multipolynome(np.poly1d([-k / den_d]), Nik_temp_deriv[i + 1][-1])
tab.extend(tab_d)
return tab
def evaluateN(self, Ni, t, x):
result = 0
for Ni_temp in Ni:
if x[Ni_temp[-1] - 1] <= t <= x[Ni_temp[-1]]:
result += Ni_temp[0](t)
return result
def calculX3D(self, P, k):
n = len(P) - 1
c = []
sumC = 0
for i in range(n):
dist = np.sqrt((P[i + 1][0] - P[i][0]) ** 2 + (P[i + 1][1] - P[i][1]) ** 2 + (P[i + 1][2] - P[i][2]) ** 2)
c.append(dist)
sumC += dist
x = [0] * k
sumCI = 0
for i in range(n - k + 1):
sumCI += c[i + 1]
x.append((n - k + 2) / sumC * ((i + 1) * c[i + 1] / (n - k + 2) + sumCI))
x.extend([n - k + 2] * k)
return x
def calculX2D(self, P, k):
n = len(P) - 1
c = []
sumC = 0
for i in range(n):
dist = np.sqrt((P[i + 1][0] - P[i][0]) ** 2 + (P[i + 1][1] - P[i][1]) ** 2)
c.append(dist)
sumC += dist
x = [0] * k
sumCI = 0
for i in range(n - k + 1):
sumCI += c[i + 1]
x.append((n - k + 2) / sumC * ((i + 1) * c[i + 1] / (n - k + 2) + sumCI))
x.extend([n - k + 2] * k)
return x
def construct3D(self, P, k, prec): # P point de controles
global Nik_temp, Nik_temp_deriv
n = len(P) # Nombre de points de controle - 1
# Calcul des xi
x = self.calculX3D(P, k)
# Calcul des coefficients N(i,k)
Nik_temp = [[-1 for j in range(k)] for i in range(n)]
for i in range(n):
Nik_temp[i][-1] = self.N(i, k, x)
Nik = []
for i in range(n):
Nik.append(Nik_temp[i][-1])
# Calcul des Nik,p'
Nik_temp_deriv = [[-1] for i in range(n)]
for i in range(n):
Nik_temp_deriv[i][-1] = self.Np(i, k, x)
Nikp = []
for i in range(n):
Nikp.append(Nik_temp_deriv[i][-1])
# Calcul de la courbe
param = np.linspace(x[0], x[-1], int(round(prec)))
P_x, P_y, P_z = [], [], [] # coord fitees
P_x_d, P_y_d, P_z_d = [], [], [] # derivees
for i in range(len(param)):
sum_num_x, sum_num_y, sum_num_z, sum_den = 0, 0, 0, 0
sum_num_x_der, sum_num_y_der, sum_num_z_der, sum_den_der = 0, 0, 0, 0
for l in range(n - k + 1): # utilisation que des points non nuls
if x[l + k - 1] <= param[i] < x[l + k]:
debut = l
fin = debut + k - 1
for j, point in enumerate(P[debut:fin + 1]):
j = j + debut
N_temp = self.evaluateN(Nik[j], param[i], x)
N_temp_deriv = self.evaluateN(Nikp[j], param[i], x)
sum_num_x += N_temp * point[0]
sum_num_y += N_temp * point[1]
sum_num_z += N_temp * point[2]
sum_den += N_temp
sum_num_x_der += N_temp_deriv * point[0]
sum_num_y_der += N_temp_deriv * point[1]
sum_num_z_der += N_temp_deriv * point[2]
sum_den_der += N_temp_deriv
P_x.append(sum_num_x / sum_den) # sum_den = 1 !
P_y.append(sum_num_y / sum_den)
P_z.append(sum_num_z / sum_den)
P_x_d.append(sum_num_x_der)
P_y_d.append(sum_num_y_der)
P_z_d.append(sum_num_z_der)
if sum_den <= 0.05:
raise ReconstructionError()
P_x = [P_x[i] for i in np.argsort(P_z)]
P_y = [P_y[i] for i in np.argsort(P_z)]
P_x_d = [P_x_d[i] for i in np.argsort(P_z)]
P_y_d = [P_y_d[i] for i in np.argsort(P_z)]
P_z_d = [P_z_d[i] for i in np.argsort(P_z)]
P_z = np.sort(P_z)
# on veut que les coordonnees fittees aient le meme z que les coordonnes de depart. on se ramene donc a des entiers et on moyenne en x et y .
P_x = np.array(P_x)
P_y = np.array(P_y)
P_x_d = np.array(P_x_d)
P_y_d = np.array(P_y_d)
P_z_d = np.array(P_z_d)
if self.all_slices:
P_z = np.array([int(np.round(P_z[i])) for i in range(0, len(P_z))])
# not perfect but works (if "enough" points), in order to deal with missing z slices
for i in range(min(P_z), max(P_z) + 1, 1):
if i not in P_z:
P_z_temp = np.insert(P_z, np.where(P_z == i - 1)[-1][-1] + 1, i)
P_x_temp = np.insert(P_x, np.where(P_z == i - 1)[-1][-1] + 1, (
P_x[np.where(P_z == i - 1)[-1][-1]] + P_x[np.where(P_z == i - 1)[-1][-1] + 1]) / 2)
P_y_temp = np.insert(P_y, np.where(P_z == i - 1)[-1][-1] + 1, (
P_y[np.where(P_z == i - 1)[-1][-1]] + P_y[np.where(P_z == i - 1)[-1][-1] + 1]) / 2)
P_x_d_temp = np.insert(P_x_d, np.where(P_z == i - 1)[-1][-1] + 1, (
P_x_d[np.where(P_z == i - 1)[-1][-1]] + P_x_d[np.where(P_z == i - 1)[-1][-1] + 1]) / 2)
P_y_d_temp = np.insert(P_y_d, np.where(P_z == i - 1)[-1][-1] + 1, (
P_y_d[np.where(P_z == i - 1)[-1][-1]] + P_y_d[np.where(P_z == i - 1)[-1][-1] + 1]) / 2)
P_z_d_temp = np.insert(P_z_d, np.where(P_z == i - 1)[-1][-1] + 1, (
P_z_d[np.where(P_z == i - 1)[-1][-1]] + P_z_d[np.where(P_z == i - 1)[-1][-1] + 1]) / 2)
P_x, P_y, P_z, P_x_d, P_y_d, P_z_d = P_x_temp, P_y_temp, P_z_temp, P_x_d_temp, P_y_d_temp, P_z_d_temp
coord_mean = np.array(
[[np.mean(P_x[P_z == i]), np.mean(P_y[P_z == i]), i] for i in range(min(P_z), max(P_z) + 1, 1)])
P_x = coord_mean[:, :][:, 0]
P_y = coord_mean[:, :][:, 1]
coord_mean_d = np.array(
[[np.mean(P_x_d[P_z == i]), np.mean(P_y_d[P_z == i]), np.mean(P_z_d[P_z == i])] for i in
range(min(P_z), max(P_z) + 1, 1)])
P_z = coord_mean[:, :][:, 2]
P_x_d = coord_mean_d[:, :][:, 0]
P_y_d = coord_mean_d[:, :][:, 1]
P_z_d = coord_mean_d[:, :][:, 2]
return [P_x, P_y, P_z], [P_x_d, P_y_d, P_z_d]
def construct2D(self, P, k, prec): # P point de controles
global Nik_temp, Nik_temp_deriv
n = len(P) # Nombre de points de controle - 1
# Calcul des xi
x = self.calculX2D(P, k)
# Calcul des coefficients N(i,k)
Nik_temp = [[-1 for j in range(k)] for i in range(n)]
for i in range(n):
Nik_temp[i][-1] = self.N(i, k, x)
Nik = []
for i in range(n):
Nik.append(Nik_temp[i][-1])
# Calcul des Nik,p'
Nik_temp_deriv = [[-1] for i in range(n)]
for i in range(n):
Nik_temp_deriv[i][-1] = self.Np(i, k, x)
Nikp = []
for i in range(n):
Nikp.append(Nik_temp_deriv[i][-1])
# Calcul de la courbe
param = np.linspace(x[0], x[-1], prec)
P_x, P_y = [], [] # coord fitees
P_x_d, P_y_d = [], [] # derivees
for i in range(len(param)):
sum_num_x, sum_num_y, sum_den = 0, 0, 0
sum_num_x_der, sum_num_y_der, sum_den_der = 0, 0, 0
for l in range(n - k + 1): # utilisation que des points non nuls
if x[l + k - 1] <= param[i] < x[l + k]:
debut = l
fin = debut + k - 1
for j, point in enumerate(P[debut:fin + 1]):
j = j + debut
N_temp = self.evaluateN(Nik[j], param[i], x)
N_temp_deriv = self.evaluateN(Nikp[j], param[i], x)
sum_num_x += N_temp * point[0]
sum_num_y += N_temp * point[1]
sum_den += N_temp
sum_num_x_der += N_temp_deriv * point[0]
sum_num_y_der += N_temp_deriv * point[1]
sum_den_der += N_temp_deriv
P_x.append(sum_num_x / sum_den) # sum_den = 1 !
P_y.append(sum_num_y / sum_den)
P_x_d.append(sum_num_x_der)
P_y_d.append(sum_num_y_der)
if sum_den <= 0.05:
raise ReconstructionError()
P_x = [P_x[i] for i in np.argsort(P_y)]
P_x_d = [P_x_d[i] for i in np.argsort(P_y)]
P_y_d = [P_y_d[i] for i in np.argsort(P_y)]
P_y = np.sort(P_y)
# on veut que les coordonnees fittees aient le meme z que les coordonnes de depart. on se ramene donc a des entiers et on moyenne en x et y .
P_x = np.array(P_x)
P_y = np.array(P_y)
P_x_d = np.array(P_x_d)
P_y_d = np.array(P_y_d)
if self.all_slices:
P_y = np.array([int(np.round(P_y[i])) for i in range(0, len(P_y))])
# not perfect but works (if "enough" points), in order to deal with missing z slices
for i in range(min(P_y), max(P_y) + 1, 1):
if i not in P_y:
P_y_temp = np.insert(P_y, np.where(P_y == i - 1)[-1][-1] + 1, i)
P_x_temp = np.insert(P_x, np.where(P_y == i - 1)[-1][-1] + 1, (
P_x[np.where(P_y == i - 1)[-1][-1]] + P_x[np.where(P_y == i - 1)[-1][-1] + 1]) / 2)
P_x_d_temp = np.insert(P_x_d, np.where(P_y == i - 1)[-1][-1] + 1, (
P_x_d[np.where(P_y == i - 1)[-1][-1]] + P_x_d[np.where(P_y == i - 1)[-1][-1] + 1]) / 2)
P_y_d_temp = np.insert(P_y_d, np.where(P_y == i - 1)[-1][-1] + 1, (
P_y_d[np.where(P_y == i - 1)[-1][-1]] + P_y_d[np.where(P_y == i - 1)[-1][-1] + 1]) / 2)
P_x, P_y, P_x_d, P_y_d = P_x_temp, P_y_temp, P_x_d_temp, P_y_d_temp
coord_mean = np.array([[np.mean(P_x[P_y == i]), i] for i in range(min(P_y), max(P_y) + 1, 1)])
P_x = coord_mean[:, :][:, 0]
coord_mean_d = np.array(
[[np.mean(P_x_d[P_y == i]), np.mean(P_y_d[P_y == i])] for i in range(min(P_y), max(P_y) + 1, 1)])
P_y = coord_mean[:, :][:, 1]
P_x_d = coord_mean_d[:, :][:, 0]
P_y_d = coord_mean_d[:, :][:, 1]
return [P_x, P_y], [P_x_d, P_y_d]
def Tk(self, k, Q, Nik, ubar, u):
return Q[k] - self.evaluateN(Nik[-1], ubar, u) * Q[-1] - self.evaluateN(Nik[0], ubar, u) * Q[0]
def isXinY(self, y, x):
result = True
for i in range(0, len(y) - 1):
if y[i] - y[i + 1] != 0.0:
result_temp = False
for j in range(0, len(x)):
if y[i] - y[i + 1] != 0.0 and y[i] <= x[j] <= y[i + 1]:
result_temp = True
break
result = result and result_temp
return result
def reconstructGlobalApproximation(self, P_x, P_y, P_z, p, n, w):
# p = degre de la NURBS
# n = nombre de points de controle desires
# w is the weigth on each point P
global Nik_temp
m = len(P_x)
# Calcul des chords
di = 0.0
for k in range(m - 1):
di += np.sqrt((P_x[k + 1] - P_x[k]) ** 2 + (P_y[k + 1] - P_y[k]) ** 2 + (P_z[k + 1] - P_z[k]) ** 2)
ubar = [0]
for k in range(m - 1):
# ubar.append((k+1)/float(m)) # uniform method
# ubar.append(ubar[-1]+abs((P_x[k+1]-P_x[k])**2 + (P_y[k+1]-P_y[k])**2 + (P_z[k+1]-P_z[k])**2)/di) # chord length method
ubar.append(ubar[-1] + np.sqrt((P_x[k + 1] - P_x[k]) ** 2 + (P_y[k + 1] - P_y[k]) ** 2 + (
P_z[k + 1] - P_z[k]) ** 2) / di) # centripetal method
# the knot vector should reflect the distribution of ubar
d = (m + 1) / (n - p + 1)
u_nonuniform = [0.0] * p
for j in range(n - p):
i = int((j + 1) * d)
alpha = (j + 1) * d - i
u_nonuniform.append((1 - alpha) * ubar[i - 1] + alpha * ubar[i])
u_nonuniform.extend([1.0] * p)
# the knot vector can also is uniformly distributed
u_uniform = [0.0] * p
for j in range(n - p):
u_uniform.append((float(j) + 1) / float(n - p))
u_uniform.extend([1.0] * p)
# The only condition for NURBS to work here is that there is at least one point P_.. in each knot space.
# The uniform knot vector does not ensure this condition while the nonuniform knot vector ensure it but lack of uniformity in case of variable density of points.
# We need a compromise between the two methods: the knot vector must be as uniform as possible, with at least one point between each pair of knots.
# New algo:
# knotVector = uniformKnotVector
# while isKnotSpaceEmpty:
# knotVector += gamma * (nonuniformKnotVector - nonuniformKnotVector)
# # where gamma is a ratio [0,1] multiplier of an integer: 1/gamma = int
u_uniform = np.array(u_uniform)
u_nonuniform = np.array(u_nonuniform)
u = np.array(u_uniform, copy=True)
gamma = 1.0 / 10.0
n_iter = 0
while not self.isXinY(y=u, x=ubar) and n_iter <= 10000:
u += gamma * (u_nonuniform - u_uniform)
n_iter += 1
Nik_temp = [[-1 for j in range(p)] for i in range(n)] # noqa
for i in range(n):
Nik_temp[i][-1] = self.N(i, p, u)
Nik = []
for i in range(n):
Nik.append(Nik_temp[i][-1])
R = []
for k in range(m - 1):
Rtemp = []
den = 0
for Ni in Nik:
den += self.evaluateN(Ni, ubar[k], u)
for i in range(n - 1):
Rtemp.append(self.evaluateN(Nik[i], ubar[k], u) / den)
R.append(Rtemp)
R = np.matrix(R)
# create W diagonal matrix
W = np.diag(w[0:-1])
# calcul des denominateurs par ubar
denU = []
for k in range(m - 1):
temp = 0
for Ni in Nik:
temp += self.evaluateN(Ni, ubar[k], u)
denU.append(temp)
Tx = []
for i in range(n - 1):
somme = 0
for k in range(m - 1):
somme += w[k] * self.evaluateN(Nik[i], ubar[k], u) * self.Tk(k, P_x, Nik, ubar[k], u) / denU[k]
Tx.append(somme)
Tx = np.matrix(Tx)
Ty = []
for i in range(n - 1):
somme = 0
for k in range(m - 1):
somme += w[k] * self.evaluateN(Nik[i], ubar[k], u) * self.Tk(k, P_y, Nik, ubar[k], u) / denU[k]
Ty.append(somme)
Ty = np.matrix(Ty)
Tz = []
for i in range(n - 1):
somme = 0
for k in range(m - 1):
somme += w[k] * self.evaluateN(Nik[i], ubar[k], u) * self.Tk(k, P_z, Nik, ubar[k], u) / denU[k]
Tz.append(somme)
Tz = np.matrix(Tz)
P_xb = np.linalg.pinv(R.T * W * R) * Tx.T
P_yb = np.linalg.pinv(R.T * W * R) * Ty.T
P_zb = np.linalg.pinv(R.T * W * R) * Tz.T
# Modification of first and last control points
P_xb[0], P_yb[0], P_zb[0] = P_x[0], P_y[0], P_z[0]
P_xb[-1], P_yb[-1], P_zb[-1] = P_x[-1], P_y[-1], P_z[-1]
# At this point, we need to check if the control points are in a correct range or if there were instability.
# Typically, control points should be far from the data points. One way to do so is to ensure that the
from numpy import std
std_factor = 10.0
std_Px, std_Py, std_Pz, std_x, std_y, std_z = std(P_xb), std(P_yb), std(P_zb), std(np.array(P_x)), std(
np.array(P_y)), std(np.array(P_z))
if std_x >= 0.1 and std_y >= 0.1 and std_z >= 0.1 and (
std_Px > std_factor * std_x or std_Py > std_factor * std_y or std_Pz > std_factor * std_z):
raise ReconstructionError()
P = [[P_xb[i, 0], P_yb[i, 0], P_zb[i, 0]] for i in range(len(P_xb))]
return P
def reconstructGlobalApproximation2D(self, P_x, P_y, p, n, w):
# p = degre de la NURBS
# n = nombre de points de controle desires
# w is the weigth on each point P
global Nik_temp
m = len(P_x)
# Calcul des chords
di = 0.0
for k in range(m - 1):
di += np.sqrt((P_x[k + 1] - P_x[k]) ** 2 + (P_y[k + 1] - P_y[k]) ** 2)
ubar = [0]
for k in range(m - 1):
# ubar.append((k+1)/float(m)) # uniform method
# ubar.append(ubar[-1]+abs((P_x[k+1]-P_x[k])**2 + (P_y[k+1]-P_y[k])**2 + (P_z[k+1]-P_z[k])**2)/di) # chord length method
ubar.append(
ubar[-1] + np.sqrt((P_x[k + 1] - P_x[k]) ** 2 + (P_y[k + 1] - P_y[k]) ** 2) / di) # centripetal method
# the knot vector should reflect the distribution of ubar
d = (m + 1) / (n - p + 1)
u_nonuniform = [0.0] * p
for j in range(n - p):
i = int((j + 1) * d)
alpha = (j + 1) * d - i
u_nonuniform.append((1 - alpha) * ubar[i - 1] + alpha * ubar[i])
u_nonuniform.extend([1.0] * p)
# the knot vector can also is uniformly distributed
u_uniform = [0.0] * p
for j in range(n - p):
u_uniform.append((float(j) + 1) / float(n - p))
u_uniform.extend([1.0] * p)
# The only condition for NURBS to work here is that there is at least one point P_.. in each knot space.
# The uniform knot vector does not ensure this condition while the nonuniform knot vector ensure it but lack of uniformity in case of variable density of points.
# We need a compromise between the two methods: the knot vector must be as uniform as possible, with at least one point between each pair of knots.
# New algo:
# knotVector = uniformKnotVector
# while isKnotSpaceEmpty:
# knotVector += gamma * (nonuniformKnotVector - nonuniformKnotVector)
# # where gamma is a ratio [0,1] multiplier of an integer: 1/gamma = int
u_uniform = np.array(u_uniform)
u_nonuniform = np.array(u_nonuniform)
u = np.array(u_uniform, copy=True)
gamma = 1.0 / 10.0
n_iter = 0
while not self.isXinY(y=u, x=ubar) and n_iter <= 10000:
u += gamma * (u_nonuniform - u_uniform)
n_iter += 1
Nik_temp = [[-1 for j in range(p)] for i in range(n)] # noqa
for i in range(n):
Nik_temp[i][-1] = self.N(i, p, u)
Nik = []
for i in range(n):
Nik.append(Nik_temp[i][-1])
R = []
for k in range(m - 1):
Rtemp = []
den = 0
for Ni in Nik:
den += self.evaluateN(Ni, ubar[k], u)
for i in range(n - 1):
Rtemp.append(self.evaluateN(Nik[i], ubar[k], u) / den)
R.append(Rtemp)
R = np.matrix(R)
# create W diagonal matrix
W = np.diag(w[0:-1])
# calcul des denominateurs par ubar
denU = []
for k in range(m - 1):
temp = 0
for Ni in Nik:
temp += self.evaluateN(Ni, ubar[k], u)
denU.append(temp)
Tx = []
for i in range(n - 1):
somme = 0
for k in range(m - 1):
somme += w[k] * self.evaluateN(Nik[i], ubar[k], u) * self.Tk(k, P_x, Nik, ubar[k], u) / denU[k]
Tx.append(somme)
Tx = np.matrix(Tx)
Ty = []
for i in range(n - 1):
somme = 0
for k in range(m - 1):
somme += w[k] * self.evaluateN(Nik[i], ubar[k], u) * self.Tk(k, P_y, Nik, ubar[k], u) / denU[k]
Ty.append(somme)
Ty = np.matrix(Ty)
P_xb = (R.T * W * R).I * Tx.T
P_yb = (R.T * W * R).I * Ty.T
# Modification of first and last control points
P_xb[0], P_yb[0] = P_x[0], P_y[0]
P_xb[-1], P_yb[-1] = P_x[-1], P_y[-1]
# At this point, we need to check if the control points are in a correct range or if there were instability.
# Typically, control points should be far from the data points. One way to do so is to ensure that the
from numpy import std
std_factor = 10.0
std_Px, std_Py, std_x, std_y = std(P_xb), std(P_yb), std(np.array(P_x)), std(np.array(P_y))
if std_x >= 0.1 and std_y >= 0.1 and (std_Px > std_factor * std_x or std_Py > std_factor * std_y):
raise ReconstructionError()
P = [[P_xb[i, 0], P_yb[i, 0]] for i in range(len(P_xb))]
return P
def reconstructGlobalInterpolation(self, P_x, P_y, P_z, p): # now in 3D
global Nik_temp
n = 13
l = len(P_x)
newPx = P_x[::int(np.round(l / (n - 1)))]
newPy = P_y[::int(np.round(l / (n - 1)))]
newPz = P_y[::int(np.round(l / (n - 1)))]
newPx.append(P_x[-1])
newPy.append(P_y[-1])
newPz.append(P_z[-1])
n = len(newPx)
# Calcul du vecteur de noeuds
di = 0
for k in range(n - 1):
di += np.sqrt(
(newPx[k + 1] - newPx[k]) ** 2 + (newPy[k + 1] - newPy[k]) ** 2 + (newPz[k + 1] - newPz[k]) ** 2)
u = [0] * p
ubar = [0]
for k in range(n - 1):
ubar.append(ubar[-1] + np.sqrt(
(newPx[k + 1] - newPx[k]) ** 2 + (newPy[k + 1] - newPy[k]) ** 2 + (newPz[k + 1] - newPz[k]) ** 2) / di)
for j in range(n - p):
sumU = 0
for i in range(p):
sumU = sumU + ubar[j + i]
u.append(sumU / p)
u.extend([1] * p)
# Construction des fonctions basiques
Nik_temp = [[-1 for j in range(p)] for i in range(n)]
for i in range(n):
Nik_temp[i][-1] = self.N(i, p, u)
Nik = []
for i in range(n):
Nik.append(Nik_temp[i][-1])
# Construction des matrices
M = []
for i in range(n):
ligneM = []
for j in range(n):
ligneM.append(self.evaluateN(Nik[j], ubar[i], u))
M.append(ligneM)
M = np.matrix(M)
# Matrice des points interpoles
Qx = np.matrix(newPx).T
Qy = np.matrix(newPy).T
Qz = np.matrix(newPz).T
# Calcul des points de controle
P_xb = M.I * Qx
P_yb = M.I * Qy
P_zb = M.I * Qz
return [[P_xb[i, 0], P_yb[i, 0], P_zb[i, 0]] for i in range(len(P_xb))]
def compute_curve_from_parametrization(self, P, k, x, Nik, Nikp, param):
n = len(P) # Nombre de points de controle - 1
P_x, P_y, P_z = [], [], [] # coord fitees
P_x_d, P_y_d, P_z_d = [], [], [] # derivees
for i in range(len(param)):
sum_num_x, sum_num_y, sum_num_z, sum_den = 0, 0, 0, 0
sum_num_x_der, sum_num_y_der, sum_num_z_der, sum_den_der = 0, 0, 0, 0
for l in range(n - k + 1): # utilisation que des points non nuls
if x[l + k - 1] <= param[i] < x[l + k]:
debut = l # TODO: can yield UnboundLocalError: local variable 'debut' referenced before assignment
fin = debut + k - 1
for j, point in enumerate(P[debut:fin + 1]):
j = j + debut
N_temp = self.evaluateN(Nik[j], param[i], x)
N_temp_deriv = self.evaluateN(Nikp[j], param[i], x)
sum_num_x += N_temp * point[0]
sum_num_y += N_temp * point[1]
sum_num_z += N_temp * point[2]
sum_den += N_temp
sum_num_x_der += N_temp_deriv * point[0]
sum_num_y_der += N_temp_deriv * point[1]
sum_num_z_der += N_temp_deriv * point[2]
sum_den_der += N_temp_deriv
P_x.append(sum_num_x / sum_den) # sum_den = 1 !
P_y.append(sum_num_y / sum_den)
P_z.append(sum_num_z / sum_den)
P_x_d.append(sum_num_x_der)
P_y_d.append(sum_num_y_der)
P_z_d.append(sum_num_z_der)
if sum_den <= 0.05:
raise ReconstructionError()
P_x = [P_x[i] for i in np.argsort(P_z)]
P_y = [P_y[i] for i in np.argsort(P_z)]
P_x_d = [P_x_d[i] for i in np.argsort(P_z)]
P_y_d = [P_y_d[i] for i in np.argsort(P_z)]
P_z_d = [P_z_d[i] for i in np.argsort(P_z)]
P_z = np.sort(P_z)
# on veut que les coordonnees fittees aient le meme z que les coordonnes de depart. on se ramene donc a des entiers et on moyenne en x et y .
P_x = np.array(P_x)
P_y = np.array(P_y)
P_x_d = np.array(P_x_d)
P_y_d = np.array(P_y_d)
P_z_d = np.array(P_z_d)
return P_x, P_y, P_z, P_x_d, P_y_d, P_z_d
def construct3D_uniform(self, P, k, prec): # P point de controles
global Nik_temp, Nik_temp_deriv
n = len(P) # Nombre de points de controle - 1
# Calcul des xi
x = self.calculX3D(P, k)
# Calcul des coefficients N(i,k)
Nik_temp = [[-1 for j in range(k)] for i in range(n)]
for i in range(n):
Nik_temp[i][-1] = self.N(i, k, x)
Nik = []
for i in range(n):
Nik.append(Nik_temp[i][-1])
# Calcul des Nik,p'
Nik_temp_deriv = [[-1] for i in range(n)]
for i in range(n):
Nik_temp_deriv[i][-1] = self.Np(i, k, x)
Nikp = []
for i in range(n):
Nikp.append(Nik_temp_deriv[i][-1])
# Calcul de la courbe
# reparametrization of the curve
param = np.linspace(x[0], x[-1], prec)
P_x, P_y, P_z, P_x_d, P_y_d, P_z_d = self.compute_curve_from_parametrization(P, k, x, Nik, Nikp, param)
centerline = Centerline(P_x, P_y, P_z, P_x_d, P_y_d, P_z_d)
distances_between_points = centerline.progressive_length[1:]
range_points = np.linspace(0.0, 1.0, prec)
dist_curved = np.zeros(prec)
for i in range(1, prec):
dist_curved[i] = dist_curved[i - 1] + distances_between_points[i - 1] / centerline.length
param = x[0] + (x[-1] - x[0]) * np.interp(range_points, dist_curved, range_points)
P_x, P_y, P_z, P_x_d, P_y_d, P_z_d = self.compute_curve_from_parametrization(P, k, x, Nik, Nikp, param)
if self.all_slices:
P_z = np.array([int(np.round(P_z[i])) for i in range(0, len(P_z))])
# not perfect but works (if "enough" points), in order to deal with missing z slices
for i in range(min(P_z), max(P_z) + 1, 1):
if i not in P_z:
P_z_temp = np.insert(P_z, np.where(P_z == i - 1)[-1][-1] + 1, i)
P_x_temp = np.insert(P_x, np.where(P_z == i - 1)[-1][-1] + 1,
(P_x[np.where(P_z == i - 1)[-1][-1]] + P_x[
np.where(P_z == i - 1)[-1][-1] + 1]) / 2)
P_y_temp = np.insert(P_y, np.where(P_z == i - 1)[-1][-1] + 1,
(P_y[np.where(P_z == i - 1)[-1][-1]] + P_y[
np.where(P_z == i - 1)[-1][-1] + 1]) / 2)
P_x_d_temp = np.insert(P_x_d, np.where(P_z == i - 1)[-1][-1] + 1, (
P_x_d[np.where(P_z == i - 1)[-1][-1]] + P_x_d[np.where(P_z == i - 1)[-1][-1] + 1]) / 2)
P_y_d_temp = np.insert(P_y_d, np.where(P_z == i - 1)[-1][-1] + 1, (
P_y_d[np.where(P_z == i - 1)[-1][-1]] + P_y_d[np.where(P_z == i - 1)[-1][-1] + 1]) / 2)
P_z_d_temp = np.insert(P_z_d, np.where(P_z == i - 1)[-1][-1] + 1, (
P_z_d[np.where(P_z == i - 1)[-1][-1]] + P_z_d[np.where(P_z == i - 1)[-1][-1] + 1]) / 2)
P_x, P_y, P_z, P_x_d, P_y_d, P_z_d = P_x_temp, P_y_temp, P_z_temp, P_x_d_temp, P_y_d_temp, P_z_d_temp
coord_mean = np.array(
[[np.mean(P_x[P_z == i]), np.mean(P_y[P_z == i]), i] for i in range(min(P_z), max(P_z) + 1, 1)])
P_x = coord_mean[:, :][:, 0]
P_y = coord_mean[:, :][:, 1]
coord_mean_d = np.array(
[[np.mean(P_x_d[P_z == i]), np.mean(P_y_d[P_z == i]), np.mean(P_z_d[P_z == i])] for i in
range(min(P_z), max(P_z) + 1, 1)])
P_z = coord_mean[:, :][:, 2]
P_x_d = coord_mean_d[:, :][:, 0]
P_y_d = coord_mean_d[:, :][:, 1]
P_z_d = coord_mean_d[:, :][:, 2]
# check if slice should be in the result, based on self.P_z
indexes_to_remove = []
for i, ind_z in enumerate(P_z):
if ind_z not in self.P_z:
indexes_to_remove.append(i)
P_x = np.delete(P_x, indexes_to_remove)
P_y = np.delete(P_y, indexes_to_remove)
P_z = np.delete(P_z, indexes_to_remove)
P_x_d = np.delete(P_x_d, indexes_to_remove)
P_y_d = np.delete(P_y_d, indexes_to_remove)
P_z_d = np.delete(P_z_d, indexes_to_remove)
return [P_x, P_y, P_z], [P_x_d, P_y_d, P_z_d]
def getSize(x, y, z, file_name=None):
from math import sqrt
# get pixdim
if file_name is not None:
im_seg = Image(file_name)
p1, p2, p3 = im_seg.dim[0:3]
else:
p1, p2, p3 = 1.0, 1.0, 1.0
# compute size of centerline
s = 0
for i in range(len(x) - 1):
s += sqrt((p1 * (x[i + 1] - x[i]))**2 + (p2 * (y[i + 1] - y[i]))**2 + (p3 * (z[i + 1] - z[i])**2))
logger.debug('Centerline size: {}'.format(s))
return s
def b_spline_nurbs(x, y, z, fname_centerline=None, degree=3, point_number=3000, nbControl=-1, verbose=1,
all_slices=True, path_qc='.'):
"""
3D B-Spline function
:param x:
:param y:
:param z:
:param fname_centerline:
:param degree:
:param point_number:
:param nbControl:
:param verbose:
:param all_slices:
:param path_qc:
:return:
"""
from math import log
twodim = False
if z is None:
twodim = True
"""x.reverse()
y.reverse()
z.reverse()"""
logger.info('Fitting centerline using B-spline approximation')
if not twodim:
data = [[x[n], y[n], z[n]] for n in range(len(x))]
else:
data = [[x[n], y[n]] for n in range(len(x))]
if nbControl == -1:
centerlineSize = getSize(x, y, z, fname_centerline)
nbControl = 30 * log(centerlineSize, 10) - 42
nbControl = np.round(nbControl)
nurbs = NURBS(degree, point_number, data, False, nbControl, verbose, all_slices=all_slices, twodim=twodim)
if not twodim:
P = nurbs.getCourbe3D()
x_fit = P[0]
y_fit = P[1]
z_fit = P[2]
Q = nurbs.getCourbe3D_deriv()
x_deriv = Q[0]
y_deriv = Q[1]
z_deriv = Q[2]
else:
P = nurbs.getCourbe2D()
x_fit = P[0]
y_fit = P[1]
Q = nurbs.getCourbe2D_deriv()
x_deriv = Q[0]
y_deriv = Q[1]
"""x_fit = x_fit[::-1]
y_fit = x_fit[::-1]
z_fit = x_fit[::-1]
x_deriv = x_fit[::-1]
y_deriv = x_fit[::-1]
z_deriv = x_fit[::-1]"""
if verbose == 2:
# TODO qc
PC = nurbs.getControle()
PC_x = [p[0] for p in PC]
PC_y = [p[1] for p in PC]
if not twodim:
PC_z = [p[2] for p in PC]
import matplotlib
matplotlib.use('Agg') # prevent display figure
import matplotlib.pyplot as plt
if not twodim:
plt.figure(1)
#ax = plt.subplot(211)
plt.subplot(211)
plt.plot(z, x, 'r.')
plt.plot(z_fit, x_fit)
plt.plot(PC_z, PC_x, 'go')
# ax.set_aspect('equal')
plt.xlabel('z')
plt.ylabel('x')
plt.legend(["centerline", "NURBS", "control points"])
#ay = plt.subplot(212)
plt.subplot(212)
plt.plot(z, y, 'r.')
plt.plot(z_fit, y_fit)
plt.plot(PC_z, PC_y, 'go')
# ay.set_aspect('equal')
plt.xlabel('z')
plt.ylabel('y')
plt.legend(["centerline", "NURBS", "control points"],loc=4)
# plt.show()
else:
plt.figure(1)
plt.plot(y, x, 'r.')
plt.plot(y_fit, x_fit)
plt.plot(PC_y, PC_x, 'go')
# ax.set_aspect('equal')
plt.xlabel('y')
plt.ylabel('x')
plt.legend(["centerline", "NURBS", "control points"])
# plt.show()
plt.savefig(os.path.join(path_qc, 'fig_b_spline_nurbs.png'))
plt.close()
if not twodim:
return x_fit, y_fit, z_fit, x_deriv, y_deriv, z_deriv, nurbs.error_curve_that_last_worked
else:
return x_fit, y_fit, x_deriv, y_deriv, nurbs.error_curve_that_last_worked
|
import datetime
import glob
import random
import pandas as pd
# Load modules
from psychopy import core, event, gui, sound, visual
|
"""Defines the class that handles ingest trigger rules"""
from __future__ import unicode_literals
import logging
from django.db import transaction
from data.data.data import Data
from data.data.json.data_v6 import convert_data_to_v6_json
from data.data.value import FileValue
from ingest.models import IngestEvent, Scan, Strike
from messaging.manager import CommandMessageManager
from recipe.messages.create_recipes import create_recipes_messages
from recipe.models import RecipeType, RecipeTypeRevision
from trigger.models import TriggerEvent
logger = logging.getLogger(__name__)
RECIPE_TYPE = 'RECIPE'
class IngestRecipeHandler(object):
"""Handles ingest trigger rules
"""
def __init__(self):
"""Constructor
"""
super(IngestRecipeHandler, self).__init__()
def process_manual_ingested_source_file(self, ingest_id, source_file, when, recipe_type_id):
"""Processes a manual ingest where a strike or scan is not involved. All database
changes are made in an atomic transaction
:param ingest_id:
:type ingest_id: int
:param source_file: The source file that was ingested
:type source_file: :class:`source.models.SourceFile`
:param when: When the source file was ingested
:type when: :class:`datetime.datetime`
:param recipe_type_id: id of the Recipe type to kick off
:type recipe_type_id: int
"""
recipe_type = RecipeType.objects.get(id=recipe_type_id)
if recipe_type and recipe_type.is_active:
recipe_data = Data()
input_name = recipe_type.get_definition().get_input_keys()[0]
recipe_data.add_value(FileValue(input_name, [source_file.id]))
event = self._create_trigger_event(None, source_file, when)
ingest_event = self._create_ingest_event(ingest_id, None, source_file, when)
messages = create_recipes_messages(recipe_type.name, recipe_type.revision_num,
convert_data_to_v6_json(recipe_data).get_dict(),
event.id, ingest_event.id)
CommandMessageManager().send_messages(messages)
else:
logger.info('No recipe type found for id %s or recipe type is inactive' % recipe_type_id)
def process_ingested_source_file(self, ingest_id, source, source_file, when):
"""Processes the given ingested source file by kicking off its recipe.
All database changes are made in an atomic transaction.
:param source: The strike that triggered the ingest
:type scan: `object`
:param source_file: The source file that was ingested
:type source_file: :class:`source.models.SourceFile`
:param when: When the source file was ingested
:type when: :class:`datetime.datetime`
"""
# Create the recipe handler associated with the ingest strike/scan
source_recipe_config = source.configuration['recipe']
recipe_name = source_recipe_config['name']
recipe_revision = source_recipe_config['revision_num'] if 'revision_num' in source_recipe_config else None
recipe_type = RecipeType.objects.get(name=recipe_name)
if recipe_revision:
recipe_type = RecipeTypeRevision.objects.get_revision(recipe_name, recipe_revision).recipe_type
if len(recipe_type.get_definition().get_input_keys()) == 0:
logger.info('No inputs defined for recipe %s. Recipe will not be run.' % recipe_name)
return
if recipe_type and recipe_type.is_active:
# Assuming one input per recipe, so pull the first defined input you find
recipe_data = Data()
input_name = recipe_type.get_definition().get_input_keys()[0]
recipe_data.add_value(FileValue(input_name, [source_file.id]))
event = self._create_trigger_event(source, source_file, when)
ingest_event = self._create_ingest_event(ingest_id, source, source_file, when)
# This can cause a race condition with a slow DB.
messages = create_recipes_messages(recipe_type.name, recipe_type.revision_num,
convert_data_to_v6_json(recipe_data).get_dict(),
event.id, ingest_event.id)
CommandMessageManager().send_messages(messages)
else:
logger.info('No recipe type found for %s %s or recipe type is inactive' % (recipe_name, recipe_revision))
def _create_ingest_event(self, ingest_id, source, source_file, when):
"""Creates in the database and returns a trigger event model for the given ingested source file and recipe type
:param source: The strike that triggered the ingest
:type source: :class:`ingest.models.Strike`
:param source_file: The source file that was ingested
:type source_file: :class:`source.models.SourceFile`
:param when: When the source file was ingested
:type when: :class:`datetime.datetime`
:returns: The new ingest event
:rtype: :class:`ingest.models.IngestEvent`
"""
event = None
description = {'version': '1.0', 'file_id': source_file.id, 'file_name': source_file.file_name}
with transaction.atomic():
if type(source) is Strike:
event = IngestEvent.objects.create_strike_ingest_event(ingest_id, source, description, when)
elif type(source) is Scan:
event = IngestEvent.objects.create_scan_ingest_event(ingest_id, source, description, when)
elif ingest_id:
event = IngestEvent.objects.create_manual_ingest_event(ingest_id, description, when)
else:
logger.info('No valid source event for source file %s', source_file.file_name)
return event
def _create_trigger_event(self, source, source_file, when):
"""Creates in the database and returns a trigger event model for the given ingested source file and recipe type
:param source: The source of the ingest
:param source_file: The source file that was ingested
:type source_file: :class:`source.models.SourceFile`
:param when: When the source file was ingested
:type when: :class:`datetime.datetime`
:returns: The new trigger event
:rtype: :class:`trigger.models.TriggerEvent`
:raises trigger: If the trigger is invalid
"""
description = {'version': '1.0', 'file_id': source_file.id, 'file_name': source_file.file_name}
event_type = ''
if type(source) is Strike:
event_type = 'STRIKE_INGEST'
elif type(source) is Scan:
event_type = 'SCAN_INGEST'
else:
event_type = 'MANUAL_INGEST'
with transaction.atomic():
event = TriggerEvent.objects.create_trigger_event(event_type, None, description, when)
return event
|
"""Raspberry Pi Face Recognition Attendence System Configuration
import cv2
import glob
import os
import sys
import select
import config
import face
import Database_1
def is_letter_input(letter):
# Utility function to check if a specific character is available on stdin.
# Comparison is case insensitive.
if select.select([sys.stdin,],[],[],0.0)[0]:
input_char = sys.stdin.read(1)
return input_char.lower() == letter.lower()
return False
if __name__ == '__main__':
count = 0
# Load training data into model
print 'Loading training data...'
model = cv2.createLBPHFaceRecognizer()
model.load(config.Training_Xml)
print 'Training data loaded!'
# Initialize camer and box.
camera = config.get_camera()
# Move box to locked position.
print 'Running box...'
#print 'Press button to lock (if unlocked), or unlock if the correct face is detected.'
print 'Press Ctrl-C to quit.'
while True:
# Check if capture should be made.
# Check if button is pressed.
if is_letter_input('c'):
print 'Button pressed, looking for face...'
# Check for the positive face and unlock if found.
image = camera.read()
# Convert image to grayscale.
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Get coordinates of single face in captured image.
result = face.detect_single(image)
if result is None:
print 'Could not detect single face! Check the image in capture.pgm' \
' to see what was captured and try again with only one face visible.'
continue
x, y, w, h = result
# Crop and resize image to face.
crop = face.resize(face.crop(image, x, y, w, h))
# Test face against model.
label, confidence = model.predict(crop)
if confidence < config.Threshold:
print 'Recognized face!'
else:
print 'Did not recognize face!'
print 'name = {0} with confidence {1}'.format(label,confidence)
Database_1.data_entry(label)
|
from flask import Flask
from flask import request
from generate_conditional import conditional
app = Flask(__name__)
@app.route('/')
def hello_world():
input = request.args.get('input')
top_k = request.args.get('top_k', 100, int)
return conditional(raw_text_input=input, top_k=int(top_k))
@app.route('/ping')
def hello_test():
return 'pong'
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/shutdown')
def shutdown():
shutdown_server()
return 'Server shutting down...'
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0') |
"""BC alias for previous msearch_daemon command name"""
import logging
from mjolnir.utilities.kafka_msearch_daemon import arg_parser, main
if __name__ == '__main__':
logging.basicConfig()
kwargs = dict(vars(arg_parser().parse_args()))
main(**kwargs)
|
#!/usr/bin/env python
###############################################################################
## Databrowse: An Extensible Data Management Platform ##
## Copyright (C) 2012-2016 Iowa State University Research Foundation, Inc. ##
## All rights reserved. ##
## ##
## Redistribution and use in source and binary forms, with or without ##
## modification, are permitted provided that the following conditions are ##
## met: ##
## 1. Redistributions of source code must retain the above copyright ##
## notice, this list of conditions and the following disclaimer. ##
## 2. Redistributions in binary form must reproduce the above copyright ##
## notice, this list of conditions and the following disclaimer in the ##
## documentation and/or other materials provided with the distribution. ##
## 3. Neither the name of the copyright holder nor the names of its ##
## contributors may be used to endorse or promote products derived from ##
## this software without specific prior written permission. ##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ##
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED ##
## TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A ##
## PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER ##
## OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ##
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ##
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ##
## PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ##
## LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ##
## NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
## ##
## This material is based on work supported by the Air Force Research ##
## Laboratory under Contract #FA8650-10-D-5210, Task Order #023 and ##
## performed at Iowa State University. ##
## ##
## DISTRIBUTION A. Approved for public release: distribution unlimited; ##
## 19 Aug 2016; 88ABW-2016-4051. ##
## ##
## This material is based on work supported by NASA under Contract ##
## NNX16CL31C and performed by Iowa State University as a subcontractor ##
## to TRI Austin. ##
## ##
## Approved for public release by TRI Austin: distribution unlimited; ##
## 01 June 2018; by Carl W. Magnuson (NDE Division Director). ##
###############################################################################
""" plugins/renderers/db_file_ops.py - File Operations Utility Plugin """
import os
import os.path
from databrowse.support.renderer_support import renderer_class
import re
import json
import shutil
import databrowse.support.zipstream as zipstream
class db_file_ops(renderer_class):
""" File Operations Utility Plugin """
_namespace_uri = "http://thermal.cnde.iastate.edu/databrowse/fileops"
_namespace_local = "fileops"
_default_content_mode = "raw"
_default_style_mode = "list"
_default_recursion_depth = 2
def get_file_size(self, file):
file.seek(0, 2) # Seek to the end of the file
size = file.tell() # Get the position of EOF
file.seek(0) # Reset the file position to the beginning
return size
def getContent(self):
if not "operation" in self._web_support.req.form:
raise self.RendererException("Operation Must Be Specified")
else:
operation = self._web_support.req.form['operation'].value
if operation == "upload":
if not os.path.isdir(self._fullpath):
raise self.RendererException("Uploads Must Be in Folder")
elif "files[]" not in self._web_support.req.form:
raise self.RendererException("No Uploads Found")
fieldStorage = self._web_support.req.form["files[]"]
fullfilename = os.path.abspath(self._fullpath + "/" + fieldStorage.filename)
if not fullfilename.startswith(os.path.normpath(self._web_support.dataroot)):
raise self.RendererException("Attempt to Save File Outside of Dataroot")
# Let's check on the directory and make sure its writable and it exists
if not os.access(self._fullpath, os.W_OK) and os.path.exists(self._fullpath):
raise self.RendererException("Save Directory Not Writable")
else:
#Let's check on the file and make sure its writable and doesn't exist
if os.path.exists(fullfilename):
# rename old version into .1 .2. .3 etc.
filenum = 1
while os.path.exists("%s.%.2d%s" % (os.path.splitext(fullfilename)[0], filenum, os.path.splitext(fullfilename)[1])):
filenum += 1
pass
os.rename(fullfilename, "%s.%.2d%s" % (os.path.splitext(fullfilename)[0], filenum, os.path.splitext(fullfilename)[1]))
pass
f = open(fullfilename, "wb")
f.write(fieldStorage.value)
f.close
results = []
result = {}
result['name'] = re.sub(r'^.*\\', '', fieldStorage.filename)
result['type'] = fieldStorage.type
result['size'] = self.get_file_size(fieldStorage.file)
#result['delete_type'] = 'DELETE'
#result['delete_url'] = self.getURL(os.path.join(self._relpath, fieldStorage.filename), handler=None)
result['url'] = self.getURL(os.path.join(self._relpath, fieldStorage.filename), handler=None)
if os.path.splitext(fieldStorage.filename)[1][1:] in ["png", "jpg", "jpeg", "gif", "bmp", "tif", "tiff"]:
result['thumbnail_url'] = self.getURL(os.path.join(self._relpath, fieldStorage.filename), content_mode="raw", thumbnail="small", handler=None)
results.append(result)
resultwrapper = {'files': results}
s = json.dumps(resultwrapper, separators=(',', ':'))
#if 'HTTP_ACCEPT' in self._web_support.req.environ and 'application/json' in self._web_support.req.environ['HTTP_ACCEPT']:
# self._web_support.req.response_headers['Content-Type'] = 'application/json'
#else:
self._web_support.req.response_headers['Content-Type'] = 'text/plain'
self._web_support.req.start_response(self._web_support.req.status, self._web_support.req.response_headers.items())
self._web_support.req.output_done = True
return [s]
elif operation == "download":
def zipdir(path, ziph):
for root, dirs, files in os.walk(path):
for f in files:
ziph.write(os.path.join(root, f))
pass
pass
pass
z = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED, allowZip64=True)
if os.path.isdir(self._fullpath):
zipdir(self._fullpath, z)
else:
z.write(self._fullpath)
#s = sum(e.compress_size for e in z.infolist())
#self._web_support.req.response_headers['Content-Length'] = str(s)
self._web_support.req.response_headers['Content-Disposition'] = "attachment; filename=" + os.path.basename(self._fullpath) + ".zip"
self._web_support.req.response_headers['Content-Type'] = 'application/zip'
self._web_support.req.start_response(self._web_support.req.status, self._web_support.req.response_headers.items())
self._web_support.req.output_done = True
return z
elif operation == "newdir":
outputmsg = ""
if not os.path.isdir(self._fullpath):
outputmsg = "ERROR: Directories Must Be Created Inside Directories"
elif not "dirname" in self._web_support.req.form or self._web_support.req.form["dirname"].value == "":
outputmsg = "ERROR: No Directory Name Supplied"
elif not os.access(self._fullpath, os.W_OK) and not os.path.exists(self._fullpath):
outputmsg = "ERROR: Directory '" + self._fullpath + "' Not Writable"
else:
newdirpath = os.path.abspath(os.path.join(self._fullpath, self._web_support.req.form["dirname"].value))
if not newdirpath.startswith(os.path.normpath(self._web_support.dataroot)):
outputmsg = "ERROR: Cannot Write Outside Of Dataroot"
elif os.path.exists(newdirpath):
outputmsg = "ERROR: Directory Already Exists"
else:
try:
os.makedirs(newdirpath)
pass
except Exception as err:
outputmsg = "ERROR: " + repr(err)
pass
else:
outputmsg = "Directory Created Successfully"
pass
pass
pass
self._web_support.req.response_headers['Content-Type'] = 'text/plain'
self._web_support.req.start_response(self._web_support.req.status, self._web_support.req.response_headers.items())
self._web_support.req.output_done = True
return [outputmsg]
elif operation == "rename":
outputmsg = ""
if not "newname" in self._web_support.req.form or self._web_support.req.form["newname"].value == "":
outputmsg = "ERROR: Name Must Be Specified"
elif not os.access(self._fullpath, os.W_OK) and not os.path.exists(self._fullpath):
outputmsg = "ERROR: Directory '" + self._fullpath + "' Not Writable"
else:
newpath = os.path.abspath(os.path.join(os.path.dirname(self._fullpath), self._web_support.req.form["newname"].value))
if not newpath.startswith(os.path.normpath(self._web_support.dataroot)):
outputmsg = "ERROR: Cannot Write Outside Of Dataroot"
elif os.path.exists(newpath):
outputmsg = "ERROR: File or Directory Already Exists"
else:
try:
os.renames(self._fullpath, newpath)
pass
except Exception as err:
outputmsg = "ERROR: " + repr(err)
pass
else:
outputmsg = "Item Renamed Successfully"
pass
pass
pass
pass
self._web_support.req.response_headers['Content-Type'] = 'text/plain'
self._web_support.req.start_response(self._web_support.req.status, self._web_support.req.response_headers.items())
self._web_support.req.output_done = True
return [outputmsg]
elif operation == "delete":
outputmsg = ""
if not os.access(self._fullpath, os.W_OK) and not os.path.exists(self._fullpath):
outputmsg = "ERROR: Directory '" + self._fullpath + "' Not Writable"
elif self._fullpath == self._web_support.dataroot:
outputmsg = "ERROR: Databrowse Data Root Directory '" + self._fullpath + "' Cannot Be Deleted"
else:
trashdir = os.path.abspath(os.path.dirname(self._fullpath) + "/.databrowse/trash/")
if not os.path.exists(trashdir):
try:
os.makedirs(trashdir)
except Exception as err:
outputmsg = "ERROR: Unable to Create Trash Directory - Check File Permissions"
if outputmsg == "":
try:
shutil.move(self._fullpath, trashdir)
pass
except Exception as err:
outputmsg = "ERROR: " + repr(err)
pass
else:
outputmsg = "Item Moved To Trash Successfully"
pass
pass
pass
pass
self._web_support.req.response_headers['Content-Type'] = 'text/plain'
self._web_support.req.start_response(self._web_support.req.status, self._web_support.req.response_headers.items())
self._web_support.req.output_done = True
return [outputmsg]
else:
raise self.RendererException("Invalid Operation Specificed")
pass
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Global level manager of shared numpy arrays."""
from typing import TYPE_CHECKING
import warnings
from multiprocessing import Lock, RawArray # type: ignore
import numpy as np
if TYPE_CHECKING:
# pylint: disable=unused-import
from typing import Any, Optional, Tuple, List
class SharedMemManager(object):
"""Manager of global shared numpy arrays.
Multiprocessing requires that shared memory needs to be inherited, and to
use this with pools (not processes), this requires that it is global. This
class is responsible for managing this global memory.
"""
_INITIAL_SIZE = 1024
_instance = None # type: SharedMemManager
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(SharedMemManager, cls).__new__(
cls, *args, **kwargs)
return cls._instance
def __init__(self):
self._lock = Lock()
self._current = 0
self._count = 0
self._arrays: List[Optional[Tuple[
Any, Tuple[int, ...]]]] = SharedMemManager._INITIAL_SIZE * [None]
def _create_array(self, arr: np.ndarray) -> int:
"""Returns the handle of a RawArray created from the given numpy array.
Args:
arr: A numpy ndarray.
Returns:
The handle (int) of the array.
Raises:
ValueError: if arr is not a ndarray or of an unsupported dtype. If
the array is of an unsupported type, using a view of the array to
another dtype and then converting on get is often a work around.
"""
if not isinstance(arr, np.ndarray):
raise ValueError('Array is not a numpy ndarray.')
try:
c_arr = np.ctypeslib.as_ctypes(arr)
except (KeyError, NotImplementedError):
raise ValueError(
'Array has unsupported dtype {}.'.format(arr.dtype))
# pylint: disable=protected-access
raw_arr = RawArray(c_arr._type_, c_arr)
with self._lock:
if self._count >= len(self._arrays):
self._arrays += len(self._arrays) * [None]
self._get_next_free()
# Note storing the shape is a workaround for an issue encountered
# when upgrading to numpy 1.15.
# See https://github.com/numpy/numpy/issues/11636
self._arrays[self._current] = (raw_arr, arr.shape)
self._count += 1
return self._current
def _get_next_free(self):
loop_count = 0
while self._arrays[self._current] is not None:
self._current = (self._current + 1) % len(self._arrays)
loop_count += 1
if loop_count == len(self._arrays):
raise RuntimeError(
'Cannot find free space to allocate new array.')
def _free_array(self, handle: int):
"""Frees the memory for the array with the given handle.
Args:
handle: The handle of the array whose memory should be freed. This
handle must come from the _create_array method.
"""
with self._lock:
if self._arrays[handle] is not None:
self._arrays[handle] = None
self._count -= 1
def _get_array(self, handle: int) -> np.ndarray:
"""Returns the array with the given handle.
Args:
handle: The handle of the array whose memory should be freed. This
handle must come from the _create_array method.
Returns:
The numpy ndarray with the handle given from _create_array.
"""
tup = self._arrays[handle]
assert tup is not None
c_arr, shape = tup
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
result = np.ctypeslib.as_array(c_arr)
result.shape = shape
return result
@staticmethod
def get_instance() -> 'SharedMemManager':
"""Get the SharedMemManager instance."""
if not SharedMemManager._instance:
SharedMemManager._instance = SharedMemManager()
return SharedMemManager._instance
@staticmethod
def create_array(arr: np.ndarray) -> int:
"""Returns the handle of a RawArray created from the given numpy array.
Args:
arr: A numpy ndarray. Only arrays with a dtype supported by numpy
ctypeslib as_ctypes can be used.
Returns:
The handle (int) of the array.
Raises:
ValueError: if arr is not a ndarray or of an unsupported dtype. If
the array is of an unsupported type, using a view of the array to
another dtype and then converting on get is often a work around.
"""
# pylint: disable=protected-access
return SharedMemManager._instance._create_array(arr)
@staticmethod
def free_array(handle: int):
"""Frees the memory for the array with the given handle.
Args:
handle: The handle of the array whose memory should be freed. This
handle must come from the create_array method.
"""
# pylint: disable=protected-access
SharedMemManager._instance._free_array(handle)
@staticmethod
def get_array(handle: int) -> np.ndarray:
"""Frees the memory for the array with the given handle.
Args:
handle: The handle of the array whose memory should be freed. This
handle must come from the create_array method.
Returns:
The numpy ndarray with the handle given from _create_array.
"""
# pylint: disable=protected-access
return SharedMemManager._instance._get_array(handle)
# Create instance on module load.
SharedMemManager.get_instance()
|
from django import forms
from django.core.exceptions import ValidationError
class CustomForm(forms.Form):
error_css_class = 'is-invalid'
def clean(self):
cleaned_data = super(CustomForm, self).clean()
if 'password1' in self.changed_data and 'password2' in self.changed_data:
if cleaned_data.get('password1') == cleaned_data.get('password2'):
return cleaned_data
else:
self.add_error('password1', ValidationError("As senhas não coicidem!"))
self.add_error('password2', ValidationError("As senhas não coicidem!"))
return cleaned_data
def is_valid(self):
"""Return True if the form has no errors, or False otherwise."""
for field in self.errors:
self[field].field.widget.attrs['class'] += ' is-invalid'
return self.is_bound and not self.errors
class CustomModelForm(forms.ModelForm):
error_css_class = 'is-invalid'
def clean(self):
cleaned_data = super(CustomModelForm, self).clean()
if 'password1' in self.changed_data and 'password2' in self.changed_data:
if cleaned_data.get('password1') == cleaned_data.get('password2'):
return cleaned_data
else:
self.add_error('password1', ValidationError("As senhas não coicidem!"))
self.add_error('password2', ValidationError("As senhas não coicidem!"))
return cleaned_data
def is_valid(self):
"""Return True if the form has no errors, or False otherwise."""
for field in self.errors:
self[field].field.widget.attrs['class'] += ' is-invalid'
return self.is_bound and not self.errors
|
#models
import uuid
import django_tables2 as tables
import datetime
from django.db import models
from django.utils import timezone
from django.urls import reverse
#from django.utils.timesince import timesince
from django.utils.timezone import utc
from phonenumber_field.modelfields import PhoneNumberField
#from django.contrib.auth.models import AbstractUser
#from django_tables2 import MultiTableMixin
#from django.forms import ModelForm
class Restaurant(models.Model):
Restaurant_name = models.ForeignKey('auth.User', on_delete=models.CASCADE)
contact = PhoneNumberField(blank=True)
slug = models.SlugField()
location = models.CharField(max_length=30)
city = models.CharField(max_length=30)
# features = models.ManyToManyField() # dinner, launch, nightlife,
# timing = models.ManyToManyField() # sunday, monday, tuesday,
delivery = models.BooleanField(default=False)
# image = models.ImageField()
BOOL_CHOICES = ((True, 'Waiting'), (False, 'Seated'))
class Customer(models.Model):
name = models.CharField(primary_key=True, max_length=30)
# unique_id = models.UUIDField(default=uuid.uuid4, editable=False)
partysize = models.IntegerField()
arrival_time = models.DateTimeField(auto_now_add=True, blank=True)
contact = PhoneNumberField(blank=True)
status = models.BooleanField(choices=BOOL_CHOICES)
def get_time_diff(self):
if self.arrival_time:
now = datetime.datetime.utcnow().replace(tzinfo=utc)
timediff = now - self.arrival_time
return timediff.total_seconds()
wait = get_time_diff
def __str__(self):
return self.name
class Meta:
verbose_name = 'Customer'
verbose_name_plural = 'Customers'
ordering = ['-arrival_time']
|
from Inheritance.class_Inheritance.project_zoo.reptile import Reptile
class Lizard(Reptile):
pass |
class Chess:
tag = 0
camp = 0
x = 0
y = 0
|
# Generated by Django 3.0.7 on 2020-07-17 06:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('problem', '0012_auto_20200716_1903'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('status', '0003_auto_20200717_1048'),
]
operations = [
migrations.RemoveField(
model_name='status',
name='judge_detail',
),
migrations.AddField(
model_name='statusdetail',
name='main_state',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE,
related_name='judge_detail', to='status.Status'),
preserve_default=False,
),
migrations.AlterField(
model_name='status',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='status',
name='problem',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='problem.Problem'),
),
]
|
from __future__ import print_function
import logging
import sys
import os
import unittest
import torch
import torch_mlu.core.mlu_model as ct
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(cur_dir + "/../../")
logging.basicConfig(level=logging.DEBUG)
from common_utils import testinfo, TestCase # pylint: disable=C0413
class TestCloneOp(TestCase):
# @unittest.skip("not test")
@testinfo()
def test_clone(self):
a = torch.randn(1, 3, 512, 224, dtype=torch.float)
b = torch.zeros(1, 3, 512, 224, dtype=torch.float)
a_mlu = a.to(ct.mlu_device())
b_mlu = b.to(ct.mlu_device())
out_cpu = a.clone()
out_mlu = a_mlu.clone()
self.assertTensorsEqual(out_cpu, out_mlu.cpu(), 0.0, use_MSE=True)
c = a + b
c_mlu = a_mlu + b_mlu
out_cpu = c.clone()
out_mlu = c_mlu.clone()
self.assertTensorsEqual(out_cpu, out_mlu.cpu(), 0.0, use_MSE=True)
# @unittest.skip("not test")
@testinfo()
def test_clone_channels_last(self):
x = torch.randn(1, 3, 512, 224, dtype=torch.float)
x_cpu = x.to(memory_format=torch.channels_last)
x_mlu = x_cpu.to('mlu')
out_cpu = x_cpu.clone()
out_mlu = x_mlu.clone()
self.assertTensorsEqual(out_cpu, out_mlu.cpu(), 0.0, use_MSE=True)
self.assertTrue(out_cpu.stride() == out_mlu.stride())
self.assertTrue(out_cpu.storage_offset() == out_mlu.storage_offset())
# @unittest.skip("not test")
@testinfo()
def test_clone_not_dense(self):
x = torch.randn(1, 3, 512, 224, dtype=torch.float)
x_cpu = x[:, :, :, 100:200]
x_mlu = x.to('mlu')[:, :, :, 100:200]
out_cpu = x_cpu.clone()
out_mlu = x_mlu.clone()
self.assertTensorsEqual(out_cpu, out_mlu.cpu(), 0.0, use_MSE=True)
self.assertTrue(out_cpu.stride() == out_mlu.stride())
self.assertTrue(out_cpu.storage_offset() == out_mlu.storage_offset())
if __name__ == '__main__':
unittest.main()
|
import requests
from bs4 import BeautifulSoup
import os
import re
import getpass
class Evaluation:
def toJson(self):
template = '{{"yr":{},"su":"{}","cr":"{}","sc":"{}","in":"{}","rc":{:.1f},"ri":{:.1f},"wl":{:.0f},"gr":"{}"}}'
return template.format(
self.year, self.subject, self.course, self.section, self.instructor, self.courseQuality,
self.instructorQuality, self.workload, self.grade)
def toFilename(self):
return "{}-{}-{}-{}.html".format(
self.year, self.subject, self.course, self.section)
class InvalidLoginError(Exception):
pass
session = requests.Session()
pages = {
'home': 'https://bannerweb.wpi.edu',
'login': 'https://bannerweb.wpi.edu/pls/prod/twbkwbis.P_WWWLogin',
'validate': 'https://bannerweb.wpi.edu/pls/prod/twbkwbis.P_ValLogin',
'year': 'https://bannerweb.wpi.edu/pls/prod/hwwkscevrp.P_Select_Year',
'course': 'https://bannerweb.wpi.edu/pls/prod/hwwkscevrp.P_Select_CrseInst',
'section': 'https://bannerweb.wpi.edu/pls/prod/hwwkscevrp.P_Select_CrseSect'
}
def runGetUrls():
username = input('username: ')
password = getpass.getpass('password: ')
try:
login(username, password)
print('Login succeeded')
with open('urls.txt', 'w') as output:
print('Getting academic year listing')
for year in years():
print('Getting course listing for ' + year)
for course in courses(year):
print('Getting sections for ' + course + ' in ' + year)
for section in sections(course, year):
output.write(section + '\n')
except InvalidLoginError:
print('Login failed')
def runDownloadUrls():
username = input('username: ')
password = getpass.getpass('password: ')
filename = input('url file: ')
try:
login(username, password)
print('Login succeeded')
downloadEvaluations(filename)
except InvalidLoginError:
print('Login failed')
def runProcessDownloads():
try:
os.mkdir('evaluations')
except Exception:
pass
filenames = os.listdir(os.path.join(os.getcwd(), 'temp'))
json = '[\n'
for name in filenames:
try:
evaluation = None
path = os.path.join('temp', name)
with open(path, 'r') as html:
evaluation = parseEvaluation(html.read())
json += evaluation.toJson() + ',\n'
try:
os.rename(path, os.path.join('evaluations', evaluation.toFilename()))
except OSError:
# The naming scheme can't handle multiple instructors teaching a course
# Just ignore this case if it happens
pass
except Exception as e:
print('Error processing ' + name)
json += ']'
with open('courses.json', 'w') as courses:
courses.write(json)
def parseEvaluation(text):
result = Evaluation()
result.year = int(re.search(r'Academic Year \d{4}-(\d{4})', text).group(1))
match = re.search(r'([A-Z]+)-(\w+)', text)
result.subject = match.group(1)
result.course = match.group(2)
result.section = re.search(r'Section (\w+)', text).group(1)
result.instructor = re.search(r'Prof\. ([^<]+)', text).group(1)
matches = re.finditer(r'<p.*?> ([\d\.]+)</p>', text)
match = next(matches)
result.courseQuality = float(match.group(1))
match = next(matches)
result.instructorQuality = float(match.group(1))
aCount = int(re.search(r'A</p>.*?(\d+)', text, re.DOTALL).group(1))
bCount = int(re.search(r'B</p>.*?(\d+)', text, re.DOTALL).group(1))
cCount = int(re.search(r'C</p>.*?(\d+)', text, re.DOTALL).group(1))
nrCount = int(re.search(r'NR/D/F</p>.*?(\d+)', text, re.DOTALL).group(1))
otherCount = int(re.search(r"Other/Don't know</p>.*?(\d+)", text, re.DOTALL)
.group(1))
inClassTime = re.search(r'26A\..*?3 hr/wk or less</p>.*?(\d+).*?' +
r'4 hr/wk</p>.*?(\d+).*?5 hr/wk</p>.*?(\d+).*?' +
r'6 hr/wk</p>.*?(\d+).*?' +
r'7 hr/wk or more</p>.*?(\d+)', text, re.DOTALL)
outClassTime = re.search(r'26B\..*?0 hr/wk</p>.*?(\d+).*?' +
r'1-5 hr/wk</p>.*?(\d+).*?' +
r'6-10 hr/wk</p>.*?(\d+).*?' +
r'11-15 hr/wk</p>.*?(\d+).*?' +
r'16-20 hr/wk</p>.*?(\d+).*?' +
r'21 hr/wk or more</p>.*?(\d+)', text, re.DOTALL)
oldClassTime = re.search(r'26\..*?8 hrs\. or fewer</p>.*?(\d+).*?' +
r'9-12 hrs\.</p>.*?(\d+).*?' +
r'13-16 hrs\.</p>.*?(\d+).*?' +
r'17-20 hrs\.</p>.*?(\d+).*?' +
r'21 hrs\. or more</p>.*?(\d+)', text, re.DOTALL)
if inClassTime and outClassTime:
inCount = sum([int(inClassTime.group(i+1)) for i in range(5)])
inTime = sum([int(inClassTime.group(i+1))*[2,4,5,6,7][i] for i in range(5)])
outCount = sum([int(outClassTime.group(i+1)) for i in range(6)])
outTime = sum([int(outClassTime.group(i+1))*[0,3,8,13,18,22][i] for i in range(6)])
result.workload = float(inTime) / inCount + float(outTime) / outCount
else:
count = sum([int(oldClassTime.group(i+1)) for i in range(5)])
time = sum([int(oldClassTime.group(i+1))*[6,10,15,18,22][i] for i in range(5)])
result.workload = float(time) / count
if nrCount >= aCount and nrCount >= bCount and nrCount >= cCount:
result.grade = "NR"
if cCount >= aCount and cCount >= bCount and cCount >= nrCount:
result.grade = "C"
if bCount >= aCount and bCount >= cCount and bCount >= nrCount:
result.grade = "B"
if aCount >= bCount and aCount >= cCount and aCount >= nrCount:
result.grade = "A"
return result
def downloadEvaluations(urlFile):
with open(urlFile, 'r') as urls:
i = 1
directory = 'temp'
try:
os.mkdir(directory)
except Exception:
pass
for url in urls.read().splitlines():
print('Downloading evaluation ' + str(i), end='\r')
downloadEvaluation(url, os.path.join(directory, str(i) + '.htm'))
i += 1
print('Done')
def downloadEvaluation(url, name):
response = session.get(pages['home'] + url, stream=True)
with open(name, 'wb') as output:
for block in response.iter_content(1024):
if block:
output.write(block)
output.flush()
def login(username, password):
session.get(pages['home'])
response = session.post(pages['validate'], params = {
'sid': username,
'PIN': password
}, headers = {
'referer': pages['login']
})
# Check if the login succeeded by looking for a session cookie
if 'SESSID' not in session.cookies:
raise InvalidLoginError
def years():
response = session.get(pages['year'])
document = BeautifulSoup(response.text)
for option in (document.find('select', { 'name': 'IN_ACYR' })
.find_all('option')):
yield option['value']
def courses(year):
response = session.post(pages['course'], params = {
'IN_ACYR': year,
'IN_ADLN_OIX': 'X'
})
document = BeautifulSoup(response.text)
for option in (document.find('select', { 'name': 'IN_SUBCRSE' })
.find_all('option')):
if option['value']:
yield option['value']
def sections(course, year):
response = session.post(pages['section'], params = {
'IN_SUBCRSE': course,
'IN_PIDM': '',
'IN_ACYR': year,
'IN_ADLN_OIX': 'X'
})
document = BeautifulSoup(response.text)
table = document.find('table', { 'class': 'datadisplaytable' })
rows = table.find_all('tr')
for row in rows:
columns = row.find_all('td')
if len(columns) >= 4:
yield columns[4].a['href']
|
"""
For this project we have multiple ice datasets. This script contains the helper functions for the notebook which is used to combine them to a new dataset:
Antarctic Ice Concentration (AIC)
"""
import numpy as np
import xarray as xr
import glob
from tqdm import tqdm
from pyproj import Proj, transform
def load_seaice():
SIC = xr.open_dataset('processed_data/seaice.nc').sic
return SIC
def load_landice():
files = glob.glob('data/landice2/*.nc')
ds = xr.open_mfdataset(files)
ds = ds.where(ds.land_mask==1).lwe_thickness
return ds.compute()
def latlon_to_polarstereo(da):
Y, X = [10*np.arange(435000,-395000,-2500),
10*np.arange(-395000,395000,2500)]
x,y = np.meshgrid(X,Y)
inProj = Proj(init='epsg:3031')
outProj = Proj(init='epsg:4326')
x,y = transform(inProj,outProj,x,y)
x = x.flatten()
y = y.flatten()
x[x<0] = x[x<0]+360
x = xr.DataArray(x, dims='z')
y = xr.DataArray(y, dims='z')
newdata = xr.DataArray(dims=('time','y','x'),coords = [da.time,Y,X])
for time in tqdm(da.time.values):
subdata = da.sel(time=time)
variable_data = subdata.interp(lon=x, lat=y, method = 'linear', kwargs={"fill_value": 0.0})
newdata.loc[newdata.time==time] = variable_data.values.reshape([1,len(Y),len(X)])
return newdata |
# Support for the Numato Opsis - The first HDMI2USB production board
from mibuild.generic_platform import *
from mibuild.xilinx import XilinxPlatform
from mibuild.openocd import OpenOCD
from mibuild.xilinx import UrJTAG
from mibuild.xilinx import iMPACT
_io = [
## FXO-HC536R - component U17
# 100MHz - CMOS Crystal Oscillator
#NET "clk" LOC = "AB13" |IOSTANDARD = None; # (/FPGA_Bank_1_2/USRCLK)
("clk100", 0, Pins("AB13"), IOStandard("LVCMOS33")),
## FXO-HC536R - component U26
# 27MHz - CMOS Crystal Oscillator
#NET "clk" LOC = "N19" |IOSTANDARD = None; # (/SPI_Flash/27MHz)
("clk27", 0, Pins("N19"), IOStandard("LVCMOS33")),
## SW_PUSH - component SW1
# Connected to Bank 3 - 1.5V bank
#NET "???" LOC = "Y3" |IOSTANDARD = None; # (/FPGA_Bank_0_3/SWITCH | Net-(R54-Pad2))
("cpu_reset", 0, Pins("Y3"), IOStandard("LVCMOS15"), Misc("PULLUP")),
# CY7C68013A_100AC - component U2
("fx2", 0,
#NET "fx2_ifclk" LOC = "P20" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY-IFCLK)
Subsignal("ifclk", Pins("P20"), IOStandard("LVCMOS33")),
#NET "fx2_fd<0>" LOC = "C20" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_FD0)
#NET "fx2_fd<1>" LOC = "C22" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_FD1)
#NET "fx2_fd<2>" LOC = "L15" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_FD2)
#NET "fx2_fd<3>" LOC = "K16" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_FD3)
#NET "fx2_fd<4>" LOC = "D21" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_FD4)
#NET "fx2_fd<5>" LOC = "D22" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_FD5)
#NET "fx2_fd<6>" LOC = "G19" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_FD6)
#NET "fx2_fd<7>" LOC = "F20" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_FD7)
#NET "fx2_fd<8>" LOC = "H18" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_FD8)
#NET "fx2_fd<9>" LOC = "H19" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_FD9)
#NET "fx2_fd<10>" LOC = "F21" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_FD10)
#NET "fx2_fd<11>" LOC = "F22" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_FD11)
#NET "fx2_fd<12>" LOC = "E20" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_FD12)
#NET "fx2_fd<13>" LOC = "E22" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_FD13)
#NET "fx2_fd<14>" LOC = "J19" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_FD14)
#NET "fx2_fd<15>" LOC = "H20" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_FD15)
Subsignal("data", Pins("C20 C22 L15 K16 D21 D22 G19 F20 H18 H19 F21 F22 E20 E22 J19 H20"), IOStandard("LVCMOS33")),
#NET "fx2_fifoadr<0>" LOC = "B21" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_PA4)
#NET "fx2_fifoadr<1>" LOC = "B22" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_PA5)
Subsignal("addr", Pins("B21 B22"), IOStandard("LVCMOS33"), Misc("DRIVE=12")),
#NET "fx2_flaga" LOC = "N16" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_CTL0)
#NET "fx2_flagb" LOC = "P16" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_CTL1)
#NET "fx2_flagc" LOC = "R15" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_CTL2)
Subsignal("flaga", Pins("N16"), IOStandard("LVCMOS33"), Misc("DRIVE=12")),
Subsignal("flagb", Pins("P16"), IOStandard("LVCMOS33"), Misc("DRIVE=12")),
Subsignal("flagc", Pins("R15"), IOStandard("LVCMOS33"), Misc("DRIVE=12")),
#NET "fx2_flagd/slcs_n" LOC = "J17" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_PA7)
Subsignal("cs_n", Pins("J17"), IOStandard("LVCMOS33"), Misc("DRIVE=12")),
#NET "fx2_slrd" LOC = "P19" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_RD0)
Subsignal("rd_n", Pins("P19"), IOStandard("LVCMOS33"), Misc("DRIVE=12")),
#NET "fx2_slwr" LOC = "R19" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_RD1)
Subsignal("wr_n", Pins("R19"), IOStandard("LVCMOS33")),
#NET "fx2_sloe" LOC = "H16" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_PA2)
Subsignal("oe_n", Pins("H16"), IOStandard("LVCMOS33"), Misc("DRIVE=12")),
#NET "fx2_pktend" LOC = "J16" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_PA6)
Subsignal("pktend_n", Pins("J16"), IOStandard("LVCMOS33"), Misc("DRIVE=12")),
#NET "fx2_ctl<3>" LOC = "M18" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_CTL3)
#NET "fx2_ctl<4>" LOC = "M17" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_CTL4)
#NET "fx2_ctl<5>" LOC = "R16" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_CTL5)
#NET "fx2_init5_n" LOC = "T19" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_INT5)
#NET "fx2_int<0>" LOC = "F18" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_PA0)
#NET "fx2_int<1>" LOC = "F19" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_PA1)
#NET "fx2_wu<2>" LOC = "H17" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_PA3)
#NET "fx2_gpifadr<0>" LOC = "U20" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_PC0)
#NET "fx2_gpifadr<1>" LOC = "U22" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_PC1)
#NET "fx2_gpifadr<2>" LOC = "V21" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_PC2)
#NET "fx2_gpifadr<3>" LOC = "V22" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_PC3)
#NET "fx2_gpifadr<4>" LOC = "W20" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_PC4)
#NET "fx2_gpifadr<5>" LOC = "W22" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_PC5)
#NET "fx2_gpifadr<6>" LOC = "Y21" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_PC6)
#NET "fx2_gpifadr<7>" LOC = "Y22" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_PC7)
#NET "fx2_gpifadr<8>" LOC = "AB21" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Power/DONE | Net-(R28-Pad1))
# Timers
#NET "fx2_t<0>" LOC = "G17" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/SPI_Flash/TDO-FPGA/TDO-JTAG | Net-(P3-Pad8) | Net-(R14-Pad1))
## \/ Strongly pulled (4k) to VCC3V3 via R56
#NET "fx2_t<1>" LOC = "AB2" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Power/PROG_B | Net-(R15-Pad1))
#NET "fx2_t<2>" LOC = "E18" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/SPI_Flash/TDO-USB/TDI-FPGA | Net-(P3-Pad10) | Net-(R23-Pad1))
#NET "fx2_rd_n" LOC = "K19" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_RD)
#NET "fx2_rdy<2>" LOC = "M16" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_RD2)
#NET "fx2_rdy<3>" LOC = "N15" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_RD3)
#NET "fx2_rdy<4>" LOC = "U19" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_RD4)
#NET "fx2_rdy<5>" LOC = "T20" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_RD5)
## UART0
#NET "fx2_rxd0" LOC = "P18" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_RXD1)
#NET "fx2_txd0" LOC = "T17" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_TXD1)
## UART1
#NET "fx2_rxd1" LOC = "P17" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_RXD0)
#NET "fx2_txd1" LOC = "R17" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_TXD0)
#
#NET "fx2_t0" LOC = "G20" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_T0)
#NET "fx2_wr_n" LOC = "K18" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/CY_WR)
# JTAG
# - TMS?
#NET "fx2_rxd<0>" LOC = "D20" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Power/TMS | Net-(P3-Pad4) | Net-(R24-Pad1))
# - TCK
#NET "fx2_rxd<1>" LOC = "A21" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Power/TCK | Net-(P3-Pad6) | Net-(R26-Pad1))
## \/ Strongly pulled (4k) to VCC3V3 via R52
#NET "fx2_t<2>" LOC = "Y4" |IOSTANDARD = LVCMOS33 |SLEW=SLOW |DRIVE=12 ; # (/FPGA_Bank_1_2/INIT_B | Net-(R27-Pad1))
## Same pins as the EEPROM
## \/ Strongly pulled (2k) to VCC3V3 via R34
#NET "fx2_scl" LOC = "G6" |IOSTANDARD = I2C; # (/Ethernet/MAC_SCL)
#Subsignal("scl", Pins("G6"), IOStandard("I2C")),
#NET "fx2_sda" LOC = "C1" |IOSTANDARD = I2C; # (/Ethernet/MAC_SDA)
#Subsignal("sda", Pins("C1"), IOStandard("I2C")),
),
("fx2_reset", 0, Pins("G22"), IOStandard("LVCMOS33"), Misc("PULLUP"), Misc("DRIVE=24"), Misc("SLEW=SLOW")),
## onBoard Quad-SPI Flash
## W25Q128FVEIG - component U3
## 128M (16M x 8) - 104MHz
("spiflash4x", 0,
## \/ Strongly pulled (10k) to VCC3V3 via R18
#NET "???" LOC = "AA3" |IOSTANDARD = None; # (/FPGA_Bank_1_2/SPI_CS_N)
Subsignal("cs_n", Pins("AA3")),
#NET "???" LOC = "Y20" |IOSTANDARD = None; # (/FPGA_Bank_1_2/SPI_CLK)
Subsignal("clk", Pins("Y20")),
#NET "???" LOC = "AB20" |IOSTANDARD = None; # (/FPGA_Bank_1_2/SPI_MOSI_CSI_N_MISO0)
## \/ Strongly pulled (10k) to VCC3V3 via R19
#NET "???" LOC = "AA20" |IOSTANDARD = None; # (/FPGA_Bank_1_2/SPI_DO_DIN_MISO1 | Net-(R16-Pad1))
## \/ Strongly pulled (10k) to VCC3V3 via R20
#NET "???" LOC = "R13" |IOSTANDARD = None; # (/FPGA_Bank_1_2/SPI_D1_MISO2 | Net-(R17-Pad1))
## \/ Strongly pulled (10k) to VCC3V3 via R21
#NET "???" LOC = "T14" |IOSTANDARD = None; # (/FPGA_Bank_1_2/SPI_D2_MISO3)
Subsignal("dq", Pins("AB20", "AA20", "R13", "T14")),
IOStandard("LVCMOS33"), Misc("SLEW=FAST")
),
## onBoard Leds
# NET "Led<0>" LOC = "U18"; # Bank = 1, Pin name = IO_L52N_M1DQ15, Sch name = LD0
#("user_led", 0, Pins("U18")),
## TEMAC Ethernet MAC - FIXME
# 10/100/1000 Ethernet PHY
## RTL8211E-VL - component U20 - RGMII
("eth_clocks", 0,
Subsignal("tx", Pins("AB12")),
Subsignal("rx", Pins("AA12")),
IOStandard("LVCMOS33")
),
("eth", 0,
Subsignal("rst_n", Pins("U8")),
Subsignal("int_n", Pins("V9")),
Subsignal("mdio", Pins("T8")),
Subsignal("mdc", Pins("V7")),
Subsignal("rx_ctl", Pins("U9")),
Subsignal("rx_data", Pins("R9 R8 W6 Y6")),
Subsignal("tx_ctl", Pins("W8")),
Subsignal("tx_data", Pins("W9 Y8 AA6 AB6")),
IOStandard("LVCMOS33")
),
## 24AA02E48 - component U23
## 2 Kbit Electrically Erasable PROM
## Pre-programmed Globally Unique, 48-bit Node Address
## The device is organized as two blocks of 128 x 8-bit memory with a 2-wire serial interface.
##
## \/ Strongly pulled (2k) to VCC3V3 via R34
#NET "eeprom_scl" LOC = "G6" |IOSTANDARD = I2C; # (/Ethernet/MAC_SCL)
#NET "eeprom_sda" LOC = "C1" |IOSTANDARD = I2C; # (/Ethernet/MAC_SDA)
("opsis_eeprom", 0,
Subsignal("scl", Pins("G6"), IOStandard("I2C")),
Subsignal("sda", Pins("C1"), IOStandard("I2C")),
),
## DDR3
# MT41J128M16JT-125:K - 16 Meg x 16 x 8 Banks - DDR3-1600 11-11-11
# FBGA Code: D9PSL, Part Number: MT41J128M16 - http://www.micron.com/support/fbga
("ddram_clock", 0,
Subsignal("p", Pins("K4")),
Subsignal("n", Pins("K3")),
IOStandard("DIFF_SSTL15_II"), Misc("IN_TERM=NONE")
),
("ddram", 0,
Subsignal("cke", Pins("F2"), IOStandard("SSTL15_II")),
Subsignal("ras_n", Pins("M5"), IOStandard("SSTL15_II")),
Subsignal("cas_n", Pins("M4"), IOStandard("SSTL15_II")),
Subsignal("we_n", Pins("H2"), IOStandard("SSTL15_II")),
Subsignal("ba", Pins("J3 J1 H1"), IOStandard("SSTL15_II")),
Subsignal("a", Pins("K2 K1 K5 M6 H3 L4 M3 K6 G3 G1 J4 E1 F1 J6 H5"), IOStandard("SSTL15_II")),
Subsignal("dq", Pins(
"R3 R1 P2 P1 L3 L1 M2 M1",
"T2 T1 U3 U1 W3 W1 Y2 Y1"), IOStandard("SSTL15_II")),
Subsignal("dqs", Pins("N3 V2"), IOStandard("DIFF_SSTL15_II")),
Subsignal("dqs_n", Pins("N1 V1"), IOStandard("DIFF_SSTL15_II")),
Subsignal("dm", Pins("N4 P3"), IOStandard("SSTL15_II")),
Subsignal("odt", Pins("L6"), IOStandard("SSTL15_II")),
Subsignal("reset_n", Pins("E3"), IOStandard("LVCMOS15")),
Misc("SLEW=FAST"),
Misc("VCCAUX_IO=HIGH")
),
## onboard HDMI IN1
## HDMI - connector J5 - Direction RX
("hdmi_in", 0,
Subsignal("clk_p", Pins("L20"), IOStandard("TMDS_33")),
Subsignal("clk_n", Pins("L22"), IOStandard("TMDS_33")),
Subsignal("data0_p", Pins("M21"), IOStandard("TMDS_33")),
Subsignal("data0_n", Pins("M22"), IOStandard("TMDS_33")),
Subsignal("data1_p", Pins("N20"), IOStandard("TMDS_33")),
Subsignal("data1_n", Pins("N22"), IOStandard("TMDS_33")),
Subsignal("data2_p", Pins("P21"), IOStandard("TMDS_33")),
Subsignal("data2_n", Pins("P22"), IOStandard("TMDS_33")),
Subsignal("scl", Pins("T21"), IOStandard("LVCMOS33")),
Subsignal("sda", Pins("R22"), IOStandard("LVCMOS33")),
Subsignal("hpd_en", Pins("R20"), IOStandard("LVCMOS33"))
),
## onboard HDMI IN2
## HDMI - connector J4 - Direction RX
("hdmi_in", 1,
Subsignal("clk_p", Pins("M20"), IOStandard("TMDS_33")),
Subsignal("clk_n", Pins("M19"), IOStandard("TMDS_33")),
Subsignal("data0_p", Pins("J20"), IOStandard("TMDS_33")),
Subsignal("data0_n", Pins("J22"), IOStandard("TMDS_33")),
Subsignal("data1_p", Pins("H21"), IOStandard("TMDS_33")),
Subsignal("data1_n", Pins("H22"), IOStandard("TMDS_33")),
Subsignal("data2_p", Pins("K20"), IOStandard("TMDS_33")),
Subsignal("data2_n", Pins("L19"), IOStandard("TMDS_33")),
Subsignal("scl", Pins("L17"), IOStandard("LVCMOS33")),
Subsignal("sda", Pins("T18"), IOStandard("LVCMOS33")),
Subsignal("hpd_en", Pins("V19"), IOStandard("LVCMOS33"))
),
# Debug header?
#("debug", 0, Pins("AA2"), IOStandard("LVCMOS15")), # (/FPGA_Bank_0_3/DEBUG_IO0)
## UARTs
# To Cypress FX2 UART0
# WARNING: This was labelled incorrectly - https://github.com/timvideos/HDMI2USB-numato-opsis-hardware/issues/13
# Current use FX2 firmware from https://github.com/mithro/fx2lib/tree/cdc-usb-serialno-from-eeprom/examples/cdc/to-uart
# FIXME: Will be supported by opsis-mode-switch --mode=serial soon.
# FIXME: Will be supported by opsis-mode-siwtch --mode=jtag longer term.
("serial_fx2", 0,
# CY_RXD1 - P18 - Cypress RXD0
Subsignal("tx", Pins("P18"), IOStandard("LVCMOS33")),
# CY_TXD1 - T17 - Cypress TXD0
Subsignal("rx", Pins("T17"), IOStandard("LVCMOS33"), Misc("PULLUP")),
),
# To Cypress FX2 UART1
#("serial", 1,
# Subsignal("rx", Pins("A16"), IOStandard("LVCMOS33")),
# Subsignal("tx", Pins("B16"), IOStandard("LVCMOS33")),
#),
#
# Florent's UART (requires desoldering 2 resistors on the SD card connector)
("serial_sd_card", 0,
# SD_CMD
Subsignal("tx", Pins("U6"), IOStandard("LVCMOS33")),
# SD_DAT0
Subsignal("rx", Pins("AA4"), IOStandard("LVCMOS33"), Misc("PULLUP")),
),
## onboard HDMI OUT1
## HDMI - connector J3 - Direction TX
("hdmi_out", 0,
Subsignal("clk_p", Pins("Y11"), IOStandard("TMDS_33")),
Subsignal("clk_n", Pins("AB11"), IOStandard("TMDS_33")),
Subsignal("data0_p", Pins("W12"), IOStandard("TMDS_33")),
Subsignal("data0_n", Pins("Y12"), IOStandard("TMDS_33")),
Subsignal("data1_p", Pins("AA10"), IOStandard("TMDS_33")),
Subsignal("data1_n", Pins("AB10"), IOStandard("TMDS_33")),
Subsignal("data2_p", Pins("Y9"), IOStandard("TMDS_33")),
Subsignal("data2_n", Pins("AB9"), IOStandard("TMDS_33")),
Subsignal("scl", Pins("Y7"), IOStandard("I2C")),
Subsignal("sda", Pins("Y10"), IOStandard("I2C")),
Subsignal("hpd_notif", Pins("AB7"), IOStandard("LVCMOS33"))
),
## onboard HDMI OUT2
## HDMI - connector J2 - Direction TX
("hdmi_out", 1,
Subsignal("clk_p", Pins("T12"), IOStandard("TMDS_33")),
Subsignal("clk_n", Pins("U12"), IOStandard("TMDS_33")),
Subsignal("data0_p", Pins("Y15"), IOStandard("TMDS_33")),
Subsignal("data0_n", Pins("AB15"), IOStandard("TMDS_33")),
Subsignal("data1_p", Pins("AA16"), IOStandard("TMDS_33")),
Subsignal("data1_n", Pins("AB16"), IOStandard("TMDS_33")),
Subsignal("data2_p", Pins("U14"), IOStandard("TMDS_33")),
Subsignal("data2_n", Pins("U13"), IOStandard("TMDS_33")),
Subsignal("scl", Pins("Y17"), IOStandard("I2C")),
Subsignal("sda", Pins("AB17"), IOStandard("I2C")),
Subsignal("hpd_notif", Pins("AB18"), IOStandard("LVCMOS33"))
),
# TOFE connector
("tofe_eeprom", 0,
Subsignal("scl", Pins("N6"), IOStandard("I2C")),
Subsignal("sda", Pins("N7"), IOStandard("I2C")),
),
# ("fpga_cfg",
# Subsignal("din", Pins("T14")),
# Subsignal("cclk", Pins("R14")),
# Subsignal("init_b", Pins("T12")),
# Subsignal("prog_b", Pins("A2")),
# Subsignal("done", Pins("T15")),
# ),
# ("jtag",
# Subsignal("tms", Pins("B2")),
# Subsignal("tdo", Pins("B16")),
# Subsignal("tdi", Pins("B1")),
# Subsignal("tck", Pins("A15")),
# ),
# TOFE LowSpeedIO board, USB UART
# ("serial", 0,
# # TX(USB->FPGA) == DIFF_IO_XP == C19
# Subsignal("rx", Pins("C19"), IOStandard("LVCMOS33")),
# # RX(USB<-FPGA) == DIFF_IO_XN == A19
# Subsignal("tx", Pins("A19"), IOStandard("LVCMOS33")),
# )
]
_connectors = [
]
_hdmi_infos = {
"HDMI_OUT0_MNEMONIC": "TX1",
"HDMI_OUT0_DESCRIPTION" : (
" The *first* HDMI port from the left.\\r\\n"
" Labeled J3 and HDMI Out 1.\\r\\n"
),
"HDMI_OUT1_MNEMONIC": "TX2",
"HDMI_OUT1_DESCRIPTION" : (
" The *second* HDMI port from the left.\\r\\n"
" Labeled J2 and HDMI Out 2.\\r\\n"
),
"HDMI_IN0_MNEMONIC": "RX1",
"HDMI_IN0_DESCRIPTION" : (
" The *third* HDMI port from the left.\\r\\n"
" Labeled J5 and HDMI In 1.\\r\\n"
),
"HDMI_IN1_MNEMONIC": "RX2",
"HDMI_IN1_DESCRIPTION" : (
" The *fourth* HDMI port from the left. (Closest to the USB.)\\r\\n"
" Labeled J4 and HDMI In 2.\\r\\n"
),
}
class Platform(XilinxPlatform):
default_clk_name = "clk100"
default_clk_period = 10.0
hdmi_infos = _hdmi_infos
# W25Q128FVEIG - component U3
# 128M (16M x 8) - 104MHz
# Pretends to be a Micron N25Q128 (ID 0x0018ba20)
# FIXME: Create a "spi flash module" object in the same way we have SDRAM
# module objects.
spiflash_read_dummy_bits = 10
spiflash_clock_div = 4
spiflash_total_size = int((128/8)*1024*1024) # 128Mbit
spiflash_page_size = 256
spiflash_sector_size = 0x10000
# The Opsis has a XC6SLX45 which bitstream takes up ~12Mbit (1484472 bytes)
# 0x200000 offset (16Mbit) gives plenty of space
gateware_size = 0x200000
def __init__(self, programmer="openocd"):
# XC6SLX45T-3FGG484C
XilinxPlatform.__init__(self, "xc6slx45t-fgg484-3", _io, _connectors)
self.programmer = programmer
pins = {
'ProgPin': 'PullUp',
'DonePin': 'PullUp',
'TckPin': 'PullNone',
'TdiPin': 'PullNone',
'TdoPin': 'PullNone',
'TmsPin': 'PullNone',
'UnusedPin': 'PullNone',
}
for pin, config in pins.items():
self.toolchain.bitgen_opt += " -g %s:%s " % (pin, config)
# FPGA AUX is connected to the 3.3V supply
self.add_platform_command("""CONFIG VCCAUX="3.3";""")
def create_programmer(self):
# Preferred programmer - Needs ixo-usb-jtag and latest openocd.
proxy="bscan_spi_{}.bit".format(self.device.split('-')[0])
if self.programmer == "openocd":
return OpenOCD(config="board/numato_opsis.cfg", flash_proxy_basename=proxy)
# Alternative programmers - not regularly tested.
elif self.programmer == "urjtag":
return UrJTAG(cable="USBBlaster")
elif self.programmer == "impact":
return iMPACT()
else:
raise ValueError("{} programmer is not supported".format(self.programmer))
def do_finalize(self, fragment):
XilinxPlatform.do_finalize(self, fragment)
for i in range(2):
try:
self.add_period_constraint(self.lookup_request("hdmi_in", i).clk_p, 12)
except ConstraintError:
pass
try:
self.add_period_constraint(self.lookup_request("eth_clocks").rx, 40.0)
except ConstraintError:
pass
try:
self.add_period_constraint(self.lookup_request("fx2").ifclk, 20.8)
except ConstraintError:
pass
|
#from scapy.all import *
class ASN1_object(object):
# Class migth be:
# UNIVERSAL (0)
# APPLICATION (1)
# PRIVATE (2)
# CONTEXT-SPECIFIC (3)
#class_number = 0
# Build-in types mighth by:
# Simple (0)
# Constructed (1)
#kind_of_type = 0
# Universal types are:
# Reserved for BER (0)
# BOOLEAN (1)
# INTEGER (2)
# BIT STRING (3)
# OCTET STRING (4)
# NULL (5)
# OBJECT IDENTIFIER (6)
# ObjectDescriptor (7)
# INTANCE OF, EXTERNAL (8)
# REAL (9)
# ENUMERATED (10)
# EMBEDDED PDV (11)
# UTF8String (12)
# RELATIVE-OID (13)
# SEQUENCE, SEQUENCE OF (16)
# SET, SET OF (17)
# NumericString (18)
# PrintableString (19)
#type = 0
# Represent the length in octets of the value
#length = 0
# The value it self if a simple type or other ASN.1 objects when constructed
#value = 0
def __init__(self, class_number, kind_of_type, type, length, value):
self.class_number = class_number
self.kind_of_type = kind_of_type
self.type = type
self.tag = (self.class_number * 64) + (self.kind_of_type * 32) + self.type
self.length = length
self.value = value
def encode_value(self):
encoded = self.value
return encoded
def get_ber(self):
encoding = []
encoding.append(self.tag)
encoding.append(self.length)
encoding = encoding + self.encode_value()
#if (self.kind_of_type == 0):
# encoding.append(self.value)
#else:
# encoding = encoding + self.value.get_ber()
return encoding
class INTEGER(ASN1_object):
def __init__(self, value):
class_number = 0 # Universal
kind_of_type = 0 # Simple
type = 2 # Integer
# Calc length
length = int(value / 255) + 1
ASN1_object.__init__(self, class_number, kind_of_type, type, length, value)
def encode_value(self):
encoded = [self.value]
return encoded
class OCTET_STRING(ASN1_object):
def __init__(self, value):
class_number = 0 # Universal
kind_of_type = 0 # Simple
type = 4 # OCTET STRING
# Calc length
length = len(value)
ASN1_object.__init__(self, class_number, kind_of_type, type, length, value)
def encode_value(self):
encoded = []
for c in self.value:
encoded.append(ord(c))
return encoded
class OBJECT_IDENTIFIER(ASN1_object):
def __init__(self, value):
class_number = 0 # Universal
kind_of_type = 0 # Simple
type = 6 # OBJECT IDENTIFIER
# Calc length
length = len(value.split('.'))-1
ASN1_object.__init__(self, class_number, kind_of_type, type, length, value)
def encode_value(self):
encoded = self.value.split('.')
return encoded
class VarBind(ASN1_object):
def __init__(self, name, value):
self.name = name
self.val = value
class_number = 0 # Universal
kind_of_type = 1 # Constructed
type = 16 # OBJECT IDENTIFIER
# Calc length
length = len(name.get_ber()) + len(value.get_ber())
value = name.get_ber() + value.get_ber()
ASN1_object.__init__(self, class_number, kind_of_type, type, length, value)
class VarBindList(ASN1_object):
def __init__(self, varBinds):
self.varBinds = varBinds
class_number = 0 # Universal
kind_of_type = 1 # Constructed
type = 16 # OBJECT IDENTIFIER
# Calc length
length = 0
for varBind in varBinds:
length = length + len(varBind.get_ber())
value = []
for varBind in varBinds:
value = value + varBind.get_ber()
ASN1_object.__init__(self, class_number, kind_of_type, type, length, value)
class PDU(ASN1_object):
def __init__(self, pdu_type, request_id, error_status, error_index, variable_bindings):
self.pdu_type = pdu_type
self.request_id = request_id
self.error_status = error_status
self.error_index = error_index
self.variable_bindings = variable_bindings
class_number = 0 # Universal
kind_of_type = 1 # Constructed
type = 16 # OBJECT IDENTIFIER
# Calc length
length = len(request_id.get_ber()) + len(error_status.get_ber()) + len(error_index.get_ber()) + len(variable_bindings.get_ber())
value = request_id.get_ber() + error_status.get_ber() + error_index.get_ber() + variable_bindings.get_ber()
ASN1_object.__init__(self, class_number, kind_of_type, type, length, value)
class GetRequest(PDU):
def __init__(self, request_id, error_status, error_index, variable_bindings):
pdu_type = 0 # GetRequest
PDU.__init__(self, pdu_type, request_id, error_status, error_index, variable_bindings)
class Message(ASN1_object):
def __init__(self, version, community, data):
self.version = version
self. community = community
self.data = data
class_number = 0 # Universal
kind_of_type = 1 # Constructed
type = 16 # OBJECT IDENTIFIER
# Calc length
length = len(version.get_ber()) + len(community.get_ber()) + len(data.get_ber())
value = version.get_ber() + community.get_ber() + data.get_ber()
ASN1_object.__init__(self, class_number, kind_of_type, type, length, value)
name = OBJECT_IDENTIFIER('1.3.6')
value = INTEGER(2)
varBind = VarBind(name, value)
varBindList = VarBindList([varBind])
payload = GetRequest(INTEGER(0), INTEGER(0), INTEGER(0), varBindList)
packet = Message(INTEGER(1), OCTET_STRING('public'), payload)
print(packet.get_ber()) |
from dubhe_sdk.config import *
from dubhe_sdk.service.ADCkafka import *
from dubhe_sdk.service import context
from dubhe_sdk.service.service_prepare import *
from concurrent.futures import ThreadPoolExecutor
import socketserver
import sys
import importlib
import random
import typing
import inspect
import traceback
def image_run():
try:
serve_flag = False
main_file_path = sys.argv[0]
pipelines = _find_pipelines(main_file_path)
entrances = _find_entrances(pipelines)
# 中间结果目录
TMP_DIR = '/temporary/debug'
ctxb = context.ContextBuilder(None)\
.add(context.ContextKey.ENV_INSTANCE_ID, INSTANCE_ID)\
.add(context.ContextKey.ENV_PLATFORM_TYPE, PLATFORM_TYPE)\
.add(context.ContextKey.ENV_RELY_MODEL_DATA, RELY_MODEL_DATA)\
.add(context.ContextKey.ENV_MODEL_PATH, model_path)\
.add(context.ContextKey.ENV_SPECIAL_PARAMS, special_params)\
.add(context.ContextKey.KEY_TMP_DIR, TMP_DIR)
ctx = ctxb.build()
for entrance in entrances:
func_type = entrance[2]
# 训练:modelid1412-algorithm112345540-train-1412-4
# 推理:taskid84-application170644901-inference-84
if func_type == TASK_TYPE:
module = entrance[0]
name = entrance[1]
user_Model = module.__getattribute__(name)
Model = user_Model(ctx)
function = entrance[3]
user_func = function.__get__(Model)
model_infos = (func_type, user_func, ctxb)
if 'protocol' in os.environ and str(os.environ['protocol']) == "http":
# HTTP通信方式
from dubhe_sdk.http_server import app, setModel
setModel(value=model_infos)
ThreadPoolExecutor(1).submit(open_browser)
app.run(host=IP, port=PORT, threaded=THREADED)
else:
# TCP通信方式
from dubhe_sdk.tcp_server import TcpHandler, setModel
setModel(value=model_infos)
ThreadPoolExecutor(1).submit(connect_tcp, IP, PORT)
server = socketserver.ThreadingTCPServer((IP, PORT), TcpHandler)
server.serve_forever()
serve_flag = True
if serve_flag == False:
raise Exception('Serve not found!')
except Exception as ex:
traceback.print_exc()
exceptionMSG = traceback.format_exc()
if TASK_TYPE != TASK_INFERENCE_TYPE and PLATFORM_TYPE == AI_PLATFORM:
end_json = train_exception_end_data('%s current model error %s' % (INSTANCE_ID, ex))
send_kafka(MODEL_STATUS, end_json, TOPIC_MODEL_STATUS)
logger.error(exceptionMSG)
def _find_pipelines(filename: str) -> typing.List[typing.Tuple]:
pipelines = []
assert filename[-3:] == '.py' and os.path.isfile(filename), 'Python entry point is not correct'
_, module_name_with_ext = os.path.split(filename)
module_name, _ = os.path.splitext(module_name_with_ext)
module_spec = importlib.util.spec_from_file_location(module_name, filename)
module = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(module)
for name in dir(module):
if name.startswith('__'):
continue
obj = module.__getattribute__(name)
if hasattr(obj, '_is_pipeline_func'):
pipelines.append((module, name))
return pipelines
def _find_entrances(pipelines: typing.List[typing.Tuple]) -> typing.List[typing.Tuple]:
entrances = []
for pipe in pipelines:
module = pipe[0]
name = pipe[1]
obj = module.__getattribute__(name)
entrance = None
for _, function in inspect.getmembers(
obj,
predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x),
):
func_type = None
if hasattr(function, "_is_train_func"):
func_type = TASK_TRAIN_TYPE
elif hasattr(function, "_is_predict_func"):
func_type = TASK_PREDICT_TYPE
elif hasattr(function, '_is_inference_func'):
func_type = TASK_INFERENCE_TYPE
if func_type is not None:
entrance = (module, name, func_type, function)
entrances.append(entrance)
return entrances
# def taskType():
# curType = INSTANCE_ID.split('-')[2]
# if curType == PREFIX_TRAIN:
# return TASK_TRAIN_TYPE
# elif curType == PREFIX_PREDICT:
# return TASK_PREDICT_TYPE
# elif curType == PREFIX_INFERENCE or curType == PREFIX_AUTOMARK:
# return TASK_INFERENCE_TYPE
# else:
# logger.error("TASK %s PREFIX not defined!"%curType)
# return -1 |
import os
import re
import emoji
import twitter
from dotenv import load_dotenv
from PIL import Image, ImageDraw, ImageFont
from font_fredoka_one import FredokaOne
PATH = os.path.dirname(os.path.abspath(__file__))
load_dotenv(PATH + '/../mclare.env')
# Set font
try:
ttf = ImageFont.truetype(os.getenv("DANK_MONO_ITALIC"), size=36)
except ValueError:
ttf = ImageFont.truetype(FredokaOne, size=36)
def strip_emoji(text):
new_text = re.sub(emoji.get_emoji_regexp(), r"", text)
return new_text
def format_line(font, msg, width):
lines = []
w, h = font.getsize(msg)
if w <= width:
lines.append(msg)
else:
toks = msg.split()
cur_line = ''
for tok in toks:
cur_w, _ = font.getsize(cur_line + tok + ' ')
if cur_w <= width:
cur_line = cur_line + tok + ' '
else:
lines.append(cur_line)
cur_line = tok + ' '
lines.append(cur_line)
return lines
def get_tweet_img(width, height, toFile=True):
font = ttf
text = get_recent_care_tweet()
padding = 2*10
lines = format_line(font, text, width-padding)
_, line_height = font.getsize(lines[0])
centered_y = (height / 2) - ((line_height * len(lines)) / 2)
height_counter = centered_y
img = Image.new("RGB", (width, height), (255, 255, 255))
draw = ImageDraw.Draw(img)
for i in range(0, len(lines)):
msg = lines[i]
w, h = font.getsize(msg)
x = (width / 2) - (w / 2)
y = height_counter
draw.text((x, y), msg, (0, 0, 0), font)
height_counter += h
if toFile:
img.save(PATH + '/../assets/tweet.png', format='png')
return img
def get_recent_care_tweet():
TTC_BOT = os.getenv("TTC_BOT")
TTC_CONSUMER_KEY = os.getenv("TTC_CONSUMER_KEY")
TTC_CONSUMER_SECRET = os.getenv("TTC_CONSUMER_SECRET")
twit = twitter.Api(consumer_key=TTC_CONSUMER_KEY,
consumer_secret=TTC_CONSUMER_SECRET,
application_only_auth=True)
recent_tweet = twit.GetUserTimeline(screen_name=TTC_BOT)[0].text.strip()
recent_tweet = strip_emoji(recent_tweet)
return recent_tweet
if __name__ == "__main__":
get_tweet_image(400, 400)
|
# for getting inputs passed to program
import sys
# for coercing bytestring<->python dict
from json import dumps, loads
# for retrieving/setting environment variables
from os import environ
# for encoding
from base64 import b64decode, b64encode
# for making request
import http.client
from dotenv import find_dotenv
# for encrypting token
from nacl import public, encoding
# for verifying token
from jose import jwt
# GITHUB
ACCESS_TOKEN = sys.argv[1] # token that is found in developer tab
TESTING_TOKEN = sys.argv[2] # current testing token being used by github secrets
# AUTH0
DOMAIN = sys.argv[3] # domain of auth0
# AUDIENCE = sys.argv[4] # audience of auth0
AUDIENCE_TESTING_TOKEN= sys.argv[4]
CLIENT_ID = sys.argv[5] # client id of auth0
CLIENT_SECRET = sys.argv[6] # cilent secret of auth0
GRANT_TYPE = sys.argv[7] # grant-type of auth0
ALGORITHMS = sys.argv[8].split(',') # algoriths accepted by auth0
def set_testing_token():
"""
Since we want to use TESTING_TOKEN in django tests, we need to have a way to somehow store the token
in the environment. Therefore will store in .env, use find_dotenv to find file when in tests.py,
load_dotenv, which will fill environ with new values. Badda bing badda boom, testing token is available
"""
env_location = find_dotenv('.testing_token_env')
if env_location in '':
env_location = '.testing_token_env'
with open(env_location, 'w') as f:
f.write(f'TESTING_TOKEN={environ.get("TESTING_TOKEN")}')
def fetch_new_testing_token():
"""
Makes a request to auth0 domain for a testing key
Steps:
1. Create pending connection
2. Create payload as required by auth0
3. Make POST request to pending connection with headers indicating dumped payload is of type json
4. Coerce bytestring response into json, and retrieve new testing token
5. Set testing token as environment variable for other methods to use (TODO: decide if to just set a global variable instead, perhaps keeping history of env in bash is no bueno)
"""
token = http.client.HTTPSConnection(DOMAIN)
payload = {'client_id': CLIENT_ID, 'client_secret': CLIENT_SECRET, 'grant_type': GRANT_TYPE, 'audience': AUDIENCE_TESTING_TOKEN}
token.request('POST', '/oauth/token', dumps(payload), headers={'content-type': 'application/json'})
token = loads(token.getresponse().read().decode()).get('access_token')
environ['TESTING_TOKEN'] = token
def save_key():
"""
Consecutively executes three important steps in order to save a new testing key to github secrets, to be used in future github action workflows.
Steps:
- Define a header dict to be used throughout method
- Create a pending connection to api.github.com
1. Get repo public key to encrypt new token
- make a request to: GET '/repos/{owner}/{repo}/actions/secrets/public-key
- read response, decode back to utf-8, now string, coerce value to dict
- retrieve following: key_id, key
2. Encrypt new token with public key (https://pynacl.readthedocs.io/en/stable/public/#nacl-public-sealedbox)
- Create instance of PublicKey which corresspond with private key (public_key bytes, encoder that is able to decode the public_key)
- create instance of a SealedBox using reciever key (public_key recieved from api call in step 2)
- encrypt using the sealedbox (plaintext), encrypt using base64, then decode to bytestring
3. Update secret/Create secret with encrypted token
- make a request to: PUT '/repos/{owner}/{repo}/actions/secrets/{SECRET_NAME}
- needs body: key_id, encrypted_value
"""
headers = {
'Accept': 'application/vnd.github.v3+json',
'Authorization': f'token {ACCESS_TOKEN}',
'user-agent': 'python'
}
conn = http.client.HTTPSConnection('api.github.com')
"""
>>>> Get public key, and key_id
"""
conn.request('GET',
'/repos/molinitomario/feynman-it/actions/secrets/public-key',
headers=headers)
res = loads(conn.getresponse().read().decode())
public_key_id = res.get('key_id')
public_key = res.get('key')
"""
>>>> Encrypt new token with public key from step above
"""
public_key = public.PublicKey(public_key.encode(), encoding.Base64Encoder())
sealed_box = public.SealedBox(public_key)
token_encrypted = b64encode(sealed_box.encrypt(environ.get('TESTING_TOKEN').encode())).decode()
"""
>>>> Update secret with encrypted value
"""
conn.request('PUT',
'/repos/molinitomario/feynman-it/actions/secrets/TESTING_TOKEN',
dumps({'key_id': public_key_id,
'encrypted_value': token_encrypted}),
headers=headers)
res = conn.getresponse()
print(res.status)
# 204 = response when updating a secret
# 200 = response when creatina a secret
if res.status == 200 or res.status == 204:
print('Successfully set secret with new encrypted value')
else:
# this should stop the test from running and prevent workflow from finishing thus preventing the push to the repo
raise Exception('Could not set secret')
def verify_jwt():
"""
Using the token given to program when running testing_key.py (this program)
attempt to verify that its a valid token. If not valid, fetch a new testing token,
since current testing token is not good anymore, and set the TESTING_TOKEN in github secrets
to new encrypted working testing token just fetched from auth0.
"""
# get jwks that are availbel in the discovery endpoint and or from the .well-known/jwks.json route
conn = http.client.HTTPSConnection(DOMAIN)
conn.request('GET', '/.well-known/jwks.json')
jwks = loads(conn.getresponse().read().decode()).get('keys')
# get kid from header of token being used
kid = loads(b64decode(TESTING_TOKEN.split('.')[0]).decode()).get('kid')
# filter jwks
jwks = [key for key in jwks if 'kid' in key and key.get('use') == 'sig' and key.get('kty') == 'RSA']
# get exact jwk
jwk = list(filter(lambda key: key.get('kid') == kid, jwks))[0]
print('>>> Below is the testing token')
print(TESTING_TOKEN)
try:
# verify token is valid using specific jwk
jwt.decode(token=TESTING_TOKEN, key=jwk, audience=AUDIENCE_TESTING_TOKEN, algorithms=ALGORITHMS, issuer=f'https://{DOMAIN}/')
# is valid set that token as the testing_token
environ['TESTING_TOKEN'] = TESTING_TOKEN
print('No need to set or create a testing token, current token is valid.')
except Exception as e:
print('Was invalid')
# not valid get a new key, and set as TESTING_TOKEN in environmental variables
fetch_new_testing_token()
# then save the token in repos secrets
save_key()
set_testing_token()
if __name__ == '__main__':
verify_jwt()
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'untitled.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox
from cadastro import Cadastro
from pessoa import Pessoa
class Ui_TelaCadastro(object):
def setupUi(self, TelaCadastro):
TelaCadastro.setObjectName("TelaCadastro")
TelaCadastro.resize(656, 488)
font = QtGui.QFont()
font.setFamily("Hack")
TelaCadastro.setFont(font)
TelaCadastro.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.label = QtWidgets.QLabel(TelaCadastro)
self.label.setGeometry(QtCore.QRect(280, 0, 101, 31))
font = QtGui.QFont()
font.setFamily("Hack")
font.setPointSize(14)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.lineEdit = QtWidgets.QLineEdit(TelaCadastro)
self.lineEdit.setGeometry(QtCore.QRect(200, 50, 261, 21))
self.lineEdit.setObjectName("lineEdit")
self.label_2 = QtWidgets.QLabel(TelaCadastro)
self.label_2.setGeometry(QtCore.QRect(160, 50, 41, 21))
font = QtGui.QFont()
font.setFamily("Hack")
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.lineEdit_2 = QtWidgets.QLineEdit(TelaCadastro)
self.lineEdit_2.setGeometry(QtCore.QRect(200, 90, 261, 21))
self.lineEdit_2.setObjectName("lineEdit_2")
self.label_3 = QtWidgets.QLabel(TelaCadastro)
self.label_3.setGeometry(QtCore.QRect(170, 90, 31, 21))
font = QtGui.QFont()
font.setFamily("Hack")
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.lineEdit_3 = QtWidgets.QLineEdit(TelaCadastro)
self.lineEdit_3.setGeometry(QtCore.QRect(200, 130, 261, 21))
self.lineEdit_3.setObjectName("lineEdit_3")
self.label_4 = QtWidgets.QLabel(TelaCadastro)
self.label_4.setGeometry(QtCore.QRect(120, 130, 81, 21))
font = QtGui.QFont()
font.setFamily("Hack")
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.lineEdit_4 = QtWidgets.QLineEdit(TelaCadastro)
self.lineEdit_4.setGeometry(QtCore.QRect(200, 170, 261, 21))
self.lineEdit_4.setObjectName("lineEdit_4")
self.label_5 = QtWidgets.QLabel(TelaCadastro)
self.label_5.setGeometry(QtCore.QRect(100, 170, 101, 21))
font = QtGui.QFont()
font.setFamily("Hack")
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.pushButton = QtWidgets.QPushButton(TelaCadastro)
self.pushButton.setGeometry(QtCore.QRect(280, 210, 91, 25))
font = QtGui.QFont()
font.setFamily("Hack")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.pushButton.setFont(font)
self.pushButton.setObjectName("pushButton")
self.label_6 = QtWidgets.QLabel(TelaCadastro)
self.label_6.setGeometry(QtCore.QRect(290, 250, 71, 31))
font = QtGui.QFont()
font.setFamily("Hack")
font.setPointSize(14)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.lineEdit_5 = QtWidgets.QLineEdit(TelaCadastro)
self.lineEdit_5.setGeometry(QtCore.QRect(150, 300, 261, 21))
self.lineEdit_5.setObjectName("lineEdit_5")
self.label_7 = QtWidgets.QLabel(TelaCadastro)
self.label_7.setGeometry(QtCore.QRect(120, 300, 31, 21))
font = QtGui.QFont()
font.setFamily("Hack")
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.pushButton_2 = QtWidgets.QPushButton(TelaCadastro)
self.pushButton_2.setGeometry(QtCore.QRect(430, 300, 81, 21))
font = QtGui.QFont()
font.setFamily("Hack")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.pushButton_2.setFont(font)
self.pushButton_2.setObjectName("pushButton_2")
self.line = QtWidgets.QFrame(TelaCadastro)
self.line.setGeometry(QtCore.QRect(7, 240, 641, 20))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.lineEdit_6 = QtWidgets.QLineEdit(TelaCadastro)
self.lineEdit_6.setGeometry(QtCore.QRect(200, 370, 261, 21))
self.lineEdit_6.setObjectName("lineEdit_6")
self.label_8 = QtWidgets.QLabel(TelaCadastro)
self.label_8.setGeometry(QtCore.QRect(100, 450, 101, 21))
font = QtGui.QFont()
font.setFamily("Hack")
self.label_8.setFont(font)
self.label_8.setObjectName("label_8")
self.lineEdit_7 = QtWidgets.QLineEdit(TelaCadastro)
self.lineEdit_7.setGeometry(QtCore.QRect(200, 410, 261, 21))
self.lineEdit_7.setObjectName("lineEdit_7")
self.lineEdit_8 = QtWidgets.QLineEdit(TelaCadastro)
self.lineEdit_8.setGeometry(QtCore.QRect(200, 450, 261, 21))
self.lineEdit_8.setObjectName("lineEdit_8")
self.label_9 = QtWidgets.QLabel(TelaCadastro)
self.label_9.setGeometry(QtCore.QRect(150, 370, 41, 21))
font = QtGui.QFont()
font.setFamily("Hack")
self.label_9.setFont(font)
self.label_9.setObjectName("label_9")
self.label_10 = QtWidgets.QLabel(TelaCadastro)
self.label_10.setGeometry(QtCore.QRect(120, 410, 71, 21))
font = QtGui.QFont()
font.setFamily("Hack")
self.label_10.setFont(font)
self.label_10.setObjectName("label_10")
self.retranslateUi(TelaCadastro)
QtCore.QMetaObject.connectSlotsByName(TelaCadastro)
"""modificações"""
self.cad = Cadastro()
self.pushButton.clicked.connect(self.botaoCadastra)
self.pushButton_2.clicked.connect(self.botaoBusca)
def botaoCadastra(self):
nome = self.lineEdit.text()
endereco = self.lineEdit_3.text()
cpf = self.lineEdit_2.text()
nascimento = self.lineEdit_4.text()
if not(nome == '' or endereco == '' or cpf == '' or nascimento == ''):
p = Pessoa(nome, endereco, cpf, nascimento)
if(self.cad.cadastra(p)):
QMessageBox.information(None, 'POOII', 'Cadastro realizado com sucesso!')
self.lineEdit.setText('')
self.lineEdit_2.setText('')
self.lineEdit_3.setText('')
self.lineEdit_4.setText('')
else:
QMessageBox.information(None, 'POOII', 'CPF informado já existe!')
else:
QMessageBox.information(None, 'POOII', 'Todas as informações devem ser preenchidas!')
def botaoBusca(self):
cpf = self.lineEdit_5.text()
pessoa = self.cad.busca(cpf)
if(pessoa != None):
self.lineEdit_6.setText(pessoa.nome)
self.lineEdit_7.setText(pessoa.endereco)
self.lineEdit_8.setText(pessoa.nascimento)
else:
QMessageBox.information(None,'POOII', 'CPF não encontrado!')
def retranslateUi(self, TelaCadastro):
_translate = QtCore.QCoreApplication.translate
TelaCadastro.setWindowTitle(_translate("TelaCadastro", "Tela de Cadastro"))
self.label.setText(_translate("TelaCadastro", "CADASTRO"))
self.label_2.setText(_translate("TelaCadastro", "NOME"))
self.label_3.setText(_translate("TelaCadastro", "CPF"))
self.label_4.setText(_translate("TelaCadastro", "ENDEREÇO"))
self.label_5.setText(_translate("TelaCadastro", "NASCIMENTO"))
self.pushButton.setText(_translate("TelaCadastro", "CADASTRAR"))
self.label_6.setText(_translate("TelaCadastro", "BUSCAR"))
self.label_7.setText(_translate("TelaCadastro", "CPF"))
self.pushButton_2.setText(_translate("TelaCadastro", "Buscar"))
self.label_8.setText(_translate("TelaCadastro", "NASCIMENTO"))
self.label_9.setText(_translate("TelaCadastro", "NOME"))
self.label_10.setText(_translate("TelaCadastro", "ENDEREÇO"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
TelaCadastro = QtWidgets.QFrame()
ui = Ui_TelaCadastro()
ui.setupUi(TelaCadastro)
TelaCadastro.show()
sys.exit(app.exec_())
|
# Copyright 2018 Michael DeHaan LLC, <michael@michaeldehaan.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from opsmop.core.errors import ValidationError
class Validators(object):
def __init__(self, resource):
self.resource = resource
def mutually_exclusive(self, fields):
values = [ f for f in fields if getattr(self.resource, f) ]
if len(values) > 1:
raise ValidationError(self.resource, "fields are mutually exclusive: %s" % fields)
def path_exists(self, path):
if path is None:
return False
# FIXME use the FileTest module, don't duplicate this here
path = os.path.expandvars(os.path.expanduser(path))
if not os.path.exists(path):
raise ValidationError(self.resource, "path does not exist: %s" % path)
|
import logging
import sys
from contextvars import copy_context
from typing import Optional
import os
TRACE_ID_KEY = 'trace_id'
TEST_ENV = 'test'
DEFAULT_FORMAT_STR = '%(levelname)s:%(filename)s:%(lineno)d: %(message)s'
class KubricLogAdapter(logging.LoggerAdapter):
"""
This adapter checks for a trace_id in the current context. If a trace_id
is available, it is prepended to the msg.
"""
def process(self, msg, kwargs):
# try to get trace_id from context vars
context = copy_context()
trace_id = None
for item in context.items():
# Ugly hack because contextvars does not provide a
# dictionary like interface to get a contextvar by name.
if item[0].name == TRACE_ID_KEY:
trace_id = item[1]
# add trace_id to msg if available and returns
new_msg = f'[{trace_id}] {msg}' if trace_id else msg
return new_msg, kwargs
class Colors:
grey = "\x1b[38;21m"
yellow = "\x1b[33;21m"
red = "\x1b[31;21m"
bold_red = "\x1b[31;1m"
reset = "\x1b[0m"
class ColorfulFormatter(logging.Formatter):
"""Logging Formatter to add colors and count warning / errors"""
def __init__(self, fmt: str, datefmt: Optional[str] = None, style: str = '%',
validate: bool = True) -> None:
super().__init__(fmt=fmt, datefmt=datefmt, style=style,
validate=validate)
self.FORMATS = {
logging.DEBUG: Colors.grey + fmt + Colors.reset,
logging.INFO: Colors.grey + fmt + Colors.reset,
logging.WARNING: Colors.yellow + fmt + Colors.reset,
logging.ERROR: Colors.red + fmt + Colors.reset,
logging.CRITICAL: Colors.bold_red + fmt + Colors.reset
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
def get_logger(name: str = None, handler: logging.Handler = None, level=logging.INFO,
formatting: str = DEFAULT_FORMAT_STR, propagate: bool = False,
print_trace_id: bool = True):
logger = logging.getLogger(name)
logger.setLevel(level)
logger.propagate = propagate
if not handler:
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
# If APP_ENV is `test` or unset we use a colorful formatter 🌈.
# Else we use a plain formatter to avoid passing ANSI color characters
# into staging/prod env logs
app_env = os.getenv('APP_ENV', TEST_ENV)
if app_env == TEST_ENV:
formatter = ColorfulFormatter(formatting)
else:
formatter = logging.Formatter(formatting)
handler.setFormatter(formatter)
if logger.hasHandlers():
# To prevent the same stream handler from being added multiple times to the
# same logger. If the same handler (stdout in this case) is added multiple
# times to the same logger then each log will show up more and more times in
# that stream.
logger.handlers.clear()
logger.addHandler(handler)
if print_trace_id:
logger = KubricLogAdapter(logger, {})
return logger
|
"""Tools to create profiles (i.e. 1D "slices" from 2D images)."""
import numpy as np
from astropy import units as u
from regions import CircleAnnulusSkyRegion, RectangleSkyRegion
from gammapy.datasets import Datasets, SpectrumDatasetOnOff
from gammapy.maps import MapAxis
from gammapy.modeling.models import PowerLawSpectralModel, SkyModel
from gammapy.stats import CashCountsStatistic, WStatCountsStatistic
from gammapy.utils.table import table_from_row_data
from .core import Estimator
__all__ = ["ExcessProfileEstimator"]
class ExcessProfileEstimator(Estimator):
"""Estimate profile from a DataSet.
Parameters
----------
regions : list of `regions`
regions to use
energy_edges : `~astropy.units.Quantity`
Energy edges of the profiles to be computed.
n_sigma : float (optional)
Number of sigma to compute errors. By default, it is 1.
n_sigma_ul : float (optional)
Number of sigma to compute upper limit. By default, it is 3.
spectrum : `~gammapy.modeling.models.SpectralModel` (optional)
Spectral model to compute the fluxes or brightness.
Default is power-law with spectral index of 2.
selection_optional : list of str
Additional quantities to be estimated. Possible options are:
* "errn-errp": estimate asymmetric errors.
* "ul": estimate upper limits.
By default all quantities are estimated.
Examples
--------
This example shows how to compute a counts profile for the Fermi galactic
center region::
import matplotlib.pyplot as plt
from astropy import units as u
from astropy.coordinates import SkyCoord
from gammapy.data import GTI
from gammapy.estimators import ExcessProfileEstimator, ImageProfile
from gammapy.utils.regions import make_orthogonal_rectangle_sky_regions
from gammapy.datasets import Datasets
# load example data
datasets = Datasets.read("$GAMMAPY_DATA/fermi-3fhl-crab/",
"Fermi-LAT-3FHL_datasets.yaml", "Fermi-LAT-3FHL_models.yaml")
# configuration
datasets[0].gti = GTI.create("0s", "1e7s", "2010-01-01")
# creation of the boxes and axis
start_line = SkyCoord(182.5, -5.8, unit='deg', frame='galactic')
end_line = SkyCoord(186.5, -5.8, unit='deg', frame='galactic')
boxes, axis = make_orthogonal_rectangle_sky_regions(start_line,
end_line,
datasets[0].counts.geom.wcs,
1.*u.deg,
11)
# set up profile estimator and run
prof_maker = ExcessProfileEstimator(boxes, axis)
fermi_prof = prof_maker.run(datasets[0])
# smooth and plot the data using the ImageProfile class
fermi_prof.peek()
plt.show()
ax = plt.gca()
ax.set_yscale('log')
ax = fermi_prof.plot("flux", ax=ax)
"""
tag = "ExcessProfileEstimator"
_available_selection_optional = ["errn-errp", "ul", "scan"]
def __init__(
self,
regions,
energy_edges=None,
spectrum=None,
n_sigma=1.0,
n_sigma_ul=3.0,
selection_optional="all",
):
self.regions = regions
self.n_sigma = n_sigma
self.n_sigma_ul = n_sigma_ul
self.energy_edges = (
u.Quantity(energy_edges) if energy_edges is not None else None
)
if spectrum is None:
spectrum = PowerLawSpectralModel()
self.spectrum = spectrum
self.selection_optional = selection_optional
def get_spectrum_datasets(self, dataset):
""" Utility to make the final `~gammapy.datasts.Datasets`
Parameters
----------
dataset : `~gammapy.datasets.MapDataset` or `~gammapy.datasets.MapDatasetOnOff`
the dataset to use for profile extraction
Returns
--------
sp_datasets : array of `~gammapy.datasets.SpectrumDataset`
the list of `~gammapy.datasets.SpectrumDataset` computed in each box
"""
datasets = Datasets()
for reg in self.regions:
spectrum_dataset = dataset.to_spectrum_dataset(reg)
datasets.append(spectrum_dataset)
return datasets
def _get_projected_distance(self):
distances = []
center = self.regions[0].center
for idx, region in enumerate(self.regions):
if isinstance(region, CircleAnnulusSkyRegion):
distance = (region.inner_radius + region.outer_radius) / 2.0
else:
distance = center.separation(region.center)
distances.append(distance)
return MapAxis.from_nodes(
u.Quantity(distances, "deg"), name="projected distance"
)
def make_prof(self, sp_datasets):
""" Utility to make the profile in each region
Parameters
----------
sp_datasets : `~gammapy.datasets.MapDatasets` of `~gammapy.datasets.SpectrumDataset` or \
`~gammapy.datasets.SpectrumDatasetOnOff`
the dataset to use for profile extraction
Returns
--------
results : list of dictionary
the list of results (list of keys: x_min, x_ref, x_max, alpha, counts, background, excess, ts, sqrt_ts, \
err, errn, errp, ul, exposure, solid_angle)
"""
results = []
distance = self._get_projected_distance()
for index, spds in enumerate(sp_datasets):
old_model = None
if spds.models is not None:
old_model = spds.models
spds.models = SkyModel(spectral_model=self.spectrum)
e_reco = spds.counts.geom.axes["energy"].edges
# ToDo: When the function to_spectrum_dataset will manage the masks, use the following line
# mask = spds.mask if spds.mask is not None else slice(None)
mask = slice(None)
if isinstance(spds, SpectrumDatasetOnOff):
stats = WStatCountsStatistic(
spds.counts.data[mask][:, 0, 0],
spds.counts_off.data[mask][:, 0, 0],
spds.alpha.data[mask][:, 0, 0],
)
else:
stats = CashCountsStatistic(
spds.counts.data[mask][:, 0, 0],
spds.npred_background().data[mask][:, 0, 0],
)
result = {
"x_min": distance.edges[index],
"x_max": distance.edges[index + 1],
"x_ref": distance.center[index],
"energy_edge": e_reco,
}
if isinstance(spds, SpectrumDatasetOnOff):
result["alpha"] = stats.alpha
result.update(
{
"counts": stats.n_on,
"background": stats.n_bkg,
"excess": stats.n_sig,
}
)
result["ts"] = stats.ts
result["sqrt_ts"] = stats.sqrt_ts
result["err"] = stats.error * self.n_sigma
if "errn-errp" in self.selection_optional:
result["errn"] = stats.compute_errn(self.n_sigma)
result["errp"] = stats.compute_errp(self.n_sigma)
if "ul" in self.selection_optional:
result["ul"] = stats.compute_upper_limit(self.n_sigma_ul)
npred = spds.npred().data[mask][:, 0, 0]
e_reco_lo = e_reco[:-1]
e_reco_hi = e_reco[1:]
flux = (
stats.n_sig
/ npred
* spds.models[0].spectral_model.integral(e_reco_lo, e_reco_hi).value
)
result["flux"] = flux
result["flux_err"] = stats.error / stats.n_sig * flux
if "errn-errp" in self.selection_optional:
result["flux_errn"] = np.abs(result["errn"]) / stats.n_sig * flux
result["flux_errp"] = result["errp"] / stats.n_sig * flux
if "ul" in self.selection_optional:
result["flux_ul"] = result["ul"] / stats.n_sig * flux
solid_angle = spds.counts.geom.solid_angle()
result["solid_angle"] = (
np.full(result["counts"].shape, solid_angle.to_value("sr")) * u.sr
)
results.append(result)
if old_model is not None:
spds.models = old_model
return results
def run(self, dataset):
"""Make the profiles
Parameters
----------
dataset : `~gammapy.datasets.MapDataset` or `~gammapy.datasets.MapDatasetOnOff`
the dataset to use for profile extraction
Returns
--------
imageprofile : `~gammapy.estimators.ImageProfile`
Return an image profile class containing the result
"""
if self.energy_edges is not None:
axis = MapAxis.from_energy_edges(self.energy_edges)
dataset = dataset.resample_energy_axis(energy_axis=axis)
else:
dataset = dataset.to_image()
spectrum_datasets = self.get_spectrum_datasets(dataset)
results = self.make_prof(spectrum_datasets)
table = table_from_row_data(results)
if isinstance(self.regions[0], RectangleSkyRegion):
table.meta["PROFILE_TYPE"] = "orthogonal_rectangle"
table.meta["SPECTRAL_MODEL"] = self.spectrum.to_dict()
# return ImageProfile(table)
return table
|
from functools import lru_cache
from dwave.cloud import Client
from dwave.cloud.exceptions import ConfigFileError
@lru_cache(maxsize=None)
def qpu_available():
"""Check whether QPU solver is available"""
try:
with Client.from_config() as client:
solver = client.get_solver(qpu=True)
except (ConfigFileError, ValueError) as e:
return False
else:
return True
|
from __future__ import annotations
import os
import sys
import time
import discord
import psutil
from pgbot import common, embed_utils, utils
from pgbot.commands.base import CodeBlock, String, MentionableID
from pgbot.commands.user import UserCommand
from pgbot.commands.emsudo import EmsudoCommand
process = psutil.Process(os.getpid())
class AdminCommand(UserCommand, EmsudoCommand):
async def handle_cmd(self):
"""
Temporary function, to divert paths for emsudo commands and other
commands
"""
if self.cmd_str.startswith("emsudo"):
await EmsudoCommand.handle_cmd(self)
else:
await UserCommand.handle_cmd(self)
async def cmd_eval(self, code: CodeBlock):
"""
->type Admin commands
->signature pg!eval [command]
->description Execute a line of command without restrictions
-----
Implement pg!eval, for admins to run arbitrary code on the bot
"""
try:
script = compile(code.code, "<string>", "eval") # compile script
script_start = time.perf_counter()
eval_output = eval(script) # pylint: disable = eval-used
total = time.perf_counter() - script_start
await embed_utils.replace(
self.response_msg,
f"Return output (code executed in {utils.format_time(total)}):",
utils.code_block(repr(eval_output))
)
except Exception as ex:
await embed_utils.replace(
self.response_msg,
common.EXC_TITLES[1],
utils.code_block(
type(ex).__name__ + ": " + ", ".join(map(str, ex.args))
)
)
async def cmd_sudo(self, msg: String):
"""
->type More admin commands
->signature pg!sudo [message]
->description Send a message trough the bot
-----
Implement pg!sudo, for admins to send messages via the bot
"""
await self.invoke_msg.channel.send(msg.string)
await self.response_msg.delete()
await self.invoke_msg.delete()
async def cmd_sudo_edit(self, msg_id: MentionableID, msg: String):
"""
->type More admin commands
->signature pg!sudo_edit [msg_id] [message]
->description Edit a message that the bot sent.
-----
Implement pg!sudo_edit, for admins to edit messages via the bot
"""
edit_msg = await self.invoke_msg.channel.fetch_message(msg_id.id)
await edit_msg.edit(content=msg.string)
await self.response_msg.delete()
await self.invoke_msg.delete()
async def cmd_sudo_get(
self,
msg_id: MentionableID,
channel_id: MentionableID = None,
attach: bool = False,
):
"""
->type More admin commands
->signature pg!sudo_get [msg_id] [channel_id] [attach]
->description Get the text of a message through the bot
Get the contents of the embed of a message from the given arguments and send it as another message
(as an embed code block or a message with a `.txt` file attachment containing the message data)
to the channel where this command was invoked.
-----
Implement pg!sudo_get, to return the the contents of a message in a text file.
"""
channel = self.invoke_msg.channel if channel_id is None else \
self.invoke_msg.guild.get_channel(channel_id.id)
if channel is None:
await embed_utils.replace(
self.response_msg,
"Cannot execute command:",
"Invalid channel id!"
)
return
try:
msg = await channel.fetch_message(msg_id.id)
except discord.NotFound:
await embed_utils.replace(
self.response_msg,
"Cannot execute command:",
"Invalid message id!"
)
return
msg_link = f"https://discord.com/channels/{msg.guild.id}/{channel.id}/{msg.id}"
if attach:
try:
with open("messagedata.txt", "w", encoding="utf-8") as msg_txt:
msg_txt.write(msg.content)
await self.response_msg.channel.send(
file=discord.File("messagedata.txt"),
embed=await embed_utils.send_2(
None,
author_name="Message data",
description=f"**[View Original]({msg_link})**",
color=0xFFFFAA
)
)
finally:
if os.path.exists("messagedata.txt"):
os.remove("messagedata.txt")
else:
await embed_utils.send_2(
self.response_msg.channel,
author_name="Message data",
description="```\n{0}```".format(
msg.content.replace("```", "\\`\\`\\`")
),
fields=(
(
"\u2800",
f"**[View Original]({msg_link})**",
False
),
)
)
await self.response_msg.delete()
async def cmd_sudo_clone(
self,
msg_id: MentionableID,
channel_id: MentionableID = None,
embeds: bool = True,
attach: bool = True,
spoiler: bool = False,
):
"""
->type More admin commands
->signature pg!sudo_clone [msg_id] [channel_id] [embeds] [attach] [spoiler]
->description Clone a message through the bot
Get a message from the given arguments and send it as another message to the channel where this command was invoked.
-----
Implement pg!sudo_clone, to get the content of a message and send it.
"""
channel = self.invoke_msg.channel if channel_id is None else \
self.invoke_msg.guild.get_channel(channel_id.id)
if channel is None:
await embed_utils.replace(
self.response_msg,
"Cannot execute command:",
"Invalid channel id!"
)
return
try:
msg = await channel.fetch_message(msg_id.id)
except discord.NotFound:
await embed_utils.replace(
self.response_msg,
"Cannot execute command:",
"Invalid message id!"
)
return
msg_files = None
if msg.attachments and attach:
msg_files = [await a.to_file(spoiler=spoiler) for a in msg.attachments]
await self.response_msg.channel.send(
content=msg.content,
embed=msg.embeds[0] if msg.embeds and embeds else None,
files=msg_files
)
await self.response_msg.delete()
async def cmd_heap(self):
"""
->type Admin commands
->signature pg!heap
->description Show the memory usage of the bot
-----
Implement pg!heap, for admins to check memory taken up by the bot
"""
mem = process.memory_info().rss
await embed_utils.replace(
self.response_msg,
"Total memory used:",
f"**{utils.format_byte(mem, 4)}**\n({mem} B)"
)
async def cmd_stop(self):
"""
->type Admin commands
->signature pg!stop
->description Stop the bot
-----
Implement pg!stop, for admins to stop the bot
"""
await embed_utils.replace(
self.response_msg,
"Stopping bot...",
"Change da world,\nMy final message,\nGoodbye."
)
sys.exit(0)
async def cmd_archive(
self,
origin: MentionableID,
quantity: int,
destination: MentionableID = None,
):
"""
->type Admin commands
->signature pg!archive [origin] [quantity] [destination]
->description Archive messages to another channel
-----
Implement pg!archive, for admins to archive messages
"""
origin_channel = None
destination_channel = None
if destination is None:
destination = MentionableID("0")
destination.id = self.invoke_msg.channel.id
if destination.id == origin.id:
await embed_utils.replace(
self.response_msg,
"Cannot execute command:",
"Origin and destination channels are same"
)
return
for channel in common.bot.get_all_channels():
if channel.id == origin.id:
origin_channel = channel
if channel.id == destination.id:
destination_channel = channel
if not origin_channel:
await embed_utils.replace(
self.response_msg,
"Cannot execute command:",
"Invalid origin channel!"
)
return
elif not destination_channel:
await embed_utils.replace(
self.response_msg,
"Cannot execute command:",
"Invalid destination channel!"
)
return
messages = await origin_channel.history(limit=quantity).flatten()
messages.reverse()
message_list = await utils.format_archive_messages(messages)
archive_str = f"+{'=' * 40}+\n" + \
f"+{'=' * 40}+\n".join(message_list) + f"+{'=' * 40}+\n"
archive_list = utils.split_long_message(archive_str)
for message in archive_list:
await destination_channel.send(message)
await embed_utils.replace(
self.response_msg,
f"Successfully archived {len(messages)} message(s)!",
""
)
|
""" Various configuration routines for musync.
"""
import logging
import time
import musync
ARCHIVE_TOP = 'http://www.mutopiaproject.org/ftp'
_MSG_FORMAT='%(asctime)s - %(name)s %(levelname)s - %(message)s'
_DATE_FORMAT='%Y-%m-%d %H:%M:%S'
class UTCFormatter(logging.Formatter):
converter = time.gmtime
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'utc': {
'()': UTCFormatter,
'format': _MSG_FORMAT,
'datefmt': _DATE_FORMAT,
},
'local': {
'format': _MSG_FORMAT,
'datefmt': _DATE_FORMAT,
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'utc',
'level': 'INFO',
},
'logfile': {
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'musync.log',
'formatter': 'utc',
'level': 'INFO',
'backupCount': 3,
'maxBytes': 2048,
},
},
'root': {
'handlers': ['console', 'logfile'],
}
}
|
from flask import Blueprint
main = Blueprint("main", "pbxd.main")
v2 = Blueprint("v2", "pbxd.v2")
v3 = Blueprint("v3", "pbxd.v3")
|
# Generated by Django 3.1.2 on 2021-01-14 17:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('backend', '0042_seminar_similarities'),
]
operations = [
migrations.RemoveField(
model_name='seminar',
name='similarities',
),
]
|
"""A custom turtle.Turtle.
Author: Henrik Abel Christensen
"""
import turtle
from typing import Tuple
class Tortoise(turtle.Turtle):
"""Custom Turtle class.
Parameters
----------
shape : str
visible : bool
"""
def __init__(self, shape: str = 'turtle', visible: bool = True) -> None:
super().__init__(shape=shape, visible=visible)
def step(self, move: Tuple[int, int]) -> None:
"""Move the turtle forward.
Parameters
----------
move : Tuple[int, int]
Delta x and delta y values.
"""
delta_x, delta_y = move
if delta_x < 0:
self.setheading(270)
self.backward(delta_x)
elif delta_x > 0:
self.setheading(90)
self.forward(delta_x)
if delta_y < 0:
self.setheading(180)
self.forward(delta_x)
elif delta_y > 0:
self.setheading(0)
self.forward(delta_x)
|
import numpy as np
from functions.global_settings import settings, us_in_1_second
from tkinter import filedialog
from datetime import date
ideal_speed_as_const = 1296000.0 / 86164.0
def get_data_from_correction_file(filepath):
print(f"Loaded correction data from {filepath}")
with open(filepath) as f:
lines = [s.strip() for s in f.readlines()]
all_text = ' '.join(lines)
old_times_str, old_data_str, old_model_str = [r.strip() for r in all_text.split('};') if r]
old_times = [int(s.strip()) for s in old_times_str.split(',')]
old_data = np.array([int(''.join(filter(str.isnumeric, s))) for s in old_data_str.split(',')])
print(f"Times = {old_times}")
print(f"Data = {old_data}")
return old_times, old_data
def get_new_correction_data(old_data, error_data):
tics_per_s = np.divide(us_in_1_second * np.ones_like(old_data), old_data)
speed_factor = settings.get_worm_speed_factor()
worm_speed_as = speed_factor * tics_per_s
ideal_speed_as = ideal_speed_as_const*np.ones_like(worm_speed_as)
real_speed_as = np.add(ideal_speed_as, error_data)
new_model = np.divide(worm_speed_as, real_speed_as)
new_worm_speed = np.multiply(ideal_speed_as, new_model)
new_ticks_pers_s = new_worm_speed / speed_factor
new_data = np.divide(us_in_1_second * np.ones_like(new_ticks_pers_s), new_ticks_pers_s)
return new_data
def write_correction_data(times, new_data):
new_times_str = ', '.join(map(str, times)) + "};\n"
new_data_str = 'UL, '.join(map(str, map(int, new_data))) + "};\n"
d = date.today().strftime("%Y-%m-%d-%H-%M-%S")
with open(d + '_correction_data.mdl', 'w') as f:
f.write(new_times_str)
f.write(new_data_str)
#correction_data_path_mock = "C:/Users/Florek/Desktop/_STEROWANIE/PEliminator/old_data.txt"
correction_data_path_mock = None
class TimesGenerator:
def __init__(self):
self._times = None
self._data = None
self._old = None
self._old_path = correction_data_path_mock
def push_errors(self, errors):
self._times, self._data = errors
def save(self):
pass
def load(self):
direction = settings.get_correction_direction()
if self._old_path is None:
self._old_path = filedialog.askopenfilename(title="Select file with previous data:")
if self._old_path is None:
return
old_times, old_data = get_data_from_correction_file(self._old_path)
new_data, new_model = get_new_correction_data(old_data, self._data)
write_correction_data(old_times, new_data)
|
# Generated by Django 3.1.1 on 2020-09-08 04:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0002_auto_20200908_0405'),
]
operations = [
migrations.AlterField(
model_name='projects',
name='image',
field=models.ImageField(blank=True, upload_to='media/project_imgs/%Y/%m/%d'),
),
]
|
import unittest
from webob.compat import text_type
class text_Tests(unittest.TestCase):
def _callFUT(self, *arg, **kw):
from webob.compat import text_
return text_(*arg, **kw)
def test_binary(self):
result = self._callFUT(b'123')
self.assertTrue(isinstance(result, text_type))
self.assertEqual(result, text_type(b'123', 'ascii'))
def test_binary_alternate_decoding(self):
result = self._callFUT(b'La Pe\xc3\xb1a', 'utf-8')
self.assertTrue(isinstance(result, text_type))
self.assertEqual(result, text_type(b'La Pe\xc3\xb1a', 'utf-8'))
def test_binary_decoding_error(self):
self.assertRaises(UnicodeDecodeError, self._callFUT, b'\xff', 'utf-8')
def test_text(self):
result = self._callFUT(text_type(b'123', 'ascii'))
self.assertTrue(isinstance(result, text_type))
self.assertEqual(result, text_type(b'123', 'ascii'))
class bytes_Tests(unittest.TestCase):
def _callFUT(self, *arg, **kw):
from webob.compat import bytes_
return bytes_(*arg, **kw)
def test_binary(self):
result = self._callFUT(b'123')
self.assertTrue(isinstance(result, bytes))
self.assertEqual(result, b'123')
def test_text(self):
val = text_type(b'123', 'ascii')
result = self._callFUT(val)
self.assertTrue(isinstance(result, bytes))
self.assertEqual(result, b'123')
def test_text_alternate_encoding(self):
val = text_type(b'La Pe\xc3\xb1a', 'utf-8')
result = self._callFUT(val, 'utf-8')
self.assertTrue(isinstance(result, bytes))
self.assertEqual(result, b'La Pe\xc3\xb1a')
|
import requests
from bs4 import BeautifulSoup as bs
import csv
import pandas as pd
file = "Hotel-2.0.xlsx"
hotel_names = pd.ExcelFile(file)
df1 = hotel_names.parse('Sheet1')
hotel = []
hotel_amenities = []
hotel_room_types = []
hotel_nearby_hotels = []
hotel_nearby_restaurants = []
hotel_nearby_attractions = []
for page in df1['hotel_website']:
print(page)
page_data = requests.get(page)
soup = bs(page_data.text, "html.parser")
hotel_id = page.split('-d')[1].split('-')[0]
hotel_info_container = soup.find('div', attrs={'id': 'taplc_location_detail_header_hotels_0'})
hotel_name = hotel_info_container.find('h1', attrs={'id': 'HEADING'}).text
hotel_user_rating = hotel_info_container.find('span', attrs={'style': 'font-size:16px;'})['alt'].split(' ')[0]
hotel_add_str = hotel_info_container.find('span', attrs={'class': 'street-address'}).text.split(",")[0]
hotel_add_city = hotel_info_container.find('span', attrs={'class': 'locality'}).text.split(",")[0]
hotel_add_state = hotel_info_container.find('span', attrs={'class': 'locality'}).text.split(", ")[-1].split(" ")[0]
hotel_add_zip = hotel_info_container.find('span', attrs={'class': 'locality'}).text.split(", ")[-1].split(" ")[1][0:5]
amenities_table = soup.find('div', attrs={'class': 'ui_columns section_content'})
amenities = []
for row in amenities_table.findAll("li"):
if (not [hotel_id, row.get_text().strip()] in amenities) and (len(row['class']) == 1):
amenities.append([hotel_id, row.get_text().strip()])
hotel_details_container = soup.find('div', attrs={'class': 'details-top ui_column is-4'})
hotel_price_lower = \
hotel_details_container.find('ul', attrs={'class': 'list price_range'}).find_all('li', attrs={'class': 'item'})[
1].get_text().split(" (")[0].replace(' ', '').split('-')[0].replace('$', '')
hotel_price_upper = \
hotel_details_container.find('ul', attrs={'class': 'list price_range'}).find_all('li', attrs={'class': 'item'})[
1].get_text().split(" (")[0].replace(' ', '').split('-')[1].replace('$', '')
hotel_expedia_rating = hotel_details_container.find('div', attrs={'title': 'Hotel class'})
if (hotel_expedia_rating != None):
hotel_expedia_rating = float(hotel_expedia_rating['class'][1].split('_')[1].strip()) / 10
else:
hotel_expedia_rating = ''
room_types_table = hotel_details_container.find('ul', attrs={'class': 'list room_types'})
room_types = []
for row in room_types_table.findAll("li"):
if (not row.get_text() in room_types) and (len(row['class']) == 1):
room_types.append([hotel_id, row.get_text().replace(',','').strip()])
hotel_total_rooms = \
hotel_details_container.find('ul', attrs={'class': 'list number_of_rooms'}).find_all('li', attrs={'class': 'item'})[
1].get_text().replace(',','').strip()
def nearby_adder(row, nearby_list):
name = row.div.find('div', attrs={'class': 'poiName'})
if (name != None):
name = name.get_text().strip()
else:
name = ''
rating = row.div.find('div', attrs={'class': 'prw_rup prw_common_bubble_rating rating'})
if (rating != None):
rating = rating.span['alt'].split(' ')[0].strip()
else:
rating = ''
total_reviews = row.div.find('div', attrs={'class': 'reviewCount'})
if (total_reviews != None):
total_reviews = total_reviews.get_text().split(' ')[0].replace(',', '').strip()
else:
total_reviews = ''
distance = row.div.find('div', attrs={'class': 'distance'})
if (distance != None):
distance = distance.get_text().split(' ')[0].strip()
else:
distance = ''
return nearby_list.append([hotel_id, name, rating, total_reviews, distance])
nearby_hotels = soup.find_all('div', attrs={'class': 'prw_rup prw_common_btf_nearby_poi_grid poiGrid hotel'})
if (nearby_hotels != []):
nearby_hotels = nearby_hotels[0].find_all('div')[1]
nearby_hotels_list = []
for row in nearby_hotels:
nearby_adder(row, nearby_hotels_list)
hotel_nearby_hotels.append(nearby_hotels_list)
nearby_restaurants = soup.find_all('div', attrs={'class': 'prw_rup prw_common_btf_nearby_poi_grid poiGrid eatery'})
if (nearby_restaurants != []):
nearby_restaurants = nearby_restaurants[0].find_all('div')[1]
nearby_restaurants_list = []
for row in nearby_restaurants:
nearby_adder(row, nearby_restaurants_list)
hotel_nearby_restaurants.append(nearby_restaurants_list)
nearby_attractions = soup.find_all('div',
attrs={'class': 'prw_rup prw_common_btf_nearby_poi_grid poiGrid attraction'})
if (nearby_attractions != []):
nearby_attractions = nearby_attractions[0].find_all('div')[1]
nearby_attractions_list = []
for row in nearby_attractions:
nearby_adder(row, nearby_attractions_list)
hotel_nearby_attractions.append(nearby_attractions_list)
hotel.append(
[hotel_id, hotel_name, hotel_user_rating, hotel_expedia_rating, hotel_add_str, hotel_add_city, hotel_add_state,
hotel_add_zip, hotel_total_rooms, hotel_price_lower, hotel_price_upper])
hotel_amenities.append(amenities)
hotel_room_types.append(room_types)
def file_writer(data):
if (data == hotel):
for i in range(len(data)):
writer.writerow(data[i])
else:
for i in range(len(data)):
for j in range(len(data[i])):
writer.writerow(data[i][j])
with open('hotel.csv', 'a') as csv_file:
writer = csv.writer(csv_file,lineterminator='\n')
writer.writerow(['hotel_id','hotel_name', 'hotel_user_rating', 'hotel_expedia_rating', 'hotel_add_str', 'hotel_add_city', 'hotel_add_state', 'hotel_add_zip', 'hotel_total_rooms', 'hotel_price_lower($)', 'hotel_price_upper($)'])
file_writer(hotel)
with open('amenities.csv', 'a') as csv_file:
writer = csv.writer(csv_file,lineterminator='\n')
writer.writerow(['hotel_id','amenity'])
file_writer(hotel_amenities)
with open('room_types.csv', 'a') as csv_file:
writer = csv.writer(csv_file,lineterminator='\n')
writer.writerow(['hotel_id', 'room_type'])
file_writer(hotel_room_types)
with open('nearby_hotels.csv', 'a') as csv_file:
writer = csv.writer(csv_file,lineterminator='\n')
writer.writerow(['hotel_id','nearby_hotel_name','rating','reviews','distance'])
file_writer(hotel_nearby_hotels)
with open('nearby_restaurants.csv', 'a') as csv_file:
writer = csv.writer(csv_file,lineterminator='\n')
writer.writerow(['hotel_id', 'nearby_restaurants_name', 'rating', 'reviews', 'distance'])
file_writer(hotel_nearby_restaurants)
with open('nearby_attractions.csv', 'a') as csv_file:
writer = csv.writer(csv_file,lineterminator='\n')
writer.writerow(['hotel_id', 'nearby_attraction_name', 'rating', 'reviews', 'distance'])
file_writer(hotel_nearby_attractions) |
import AnalogShield as AS
import matplotlib.pyplot as plt
import numpy as np
import operator
import RigolInstruments as RI
import time
a = AS.AnalogShield("/dev/analog_shield", "D784216")
a.adc_calibrate(0, "/dev/multimeter")
# Connect to the multimeter
multimeter = RI.DM3058("/dev/multimeter")
actual_readings = []
adc_readings = []
# A big jump in DAC output occurs going to -5V, so give the multimeter extra time to adjust
a.analog_write(0, -5)
time.sleep(2)
for v_out in range(-5, 6): # Go from -5 to 5V in 1V steps
a.analog_write(0, v_out)
# Collect data
time.sleep(0.01) # Delay to let the multimeter adjust
v_actual = multimeter.voltage()
v_adc = np.mean(a.analog_read(0, 500, correct=False)) # Average 500 readings to reduce noise
# Save data
actual_readings.append(v_actual)
adc_readings.append(v_adc)
error = map(operator.sub, adc_readings, actual_readings)
plt.plot(actual_readings, error, ".")
plt.title("ADC error - without calibration")
plt.xlabel("Input [V]")
plt.ylabel("Error (ADC - input) [V]")
plt.savefig("adc_error.png")
|
# -*- coding: utf-8 -*-
import asyncio
import numpy as np
from poke_env.player.random_player import RandomPlayer
from poke_env.teambuilder.teambuilder import Teambuilder
class RandomTeamFromPool(Teambuilder):
def __init__(self, teams):
self.teams = [self.join_team(self.parse_showdown_team(team)) for team in teams]
def yield_team(self):
return np.random.choice(self.teams)
team_1 = """
Goodra (M) @ Assault Vest
Ability: Sap Sipper
EVs: 248 HP / 252 SpA / 8 Spe
Modest Nature
IVs: 0 Atk
- Dragon Pulse
- Flamethrower
- Sludge Wave
- Thunderbolt
Sylveon (M) @ Leftovers
Ability: Pixilate
EVs: 248 HP / 244 Def / 16 SpD
Calm Nature
IVs: 0 Atk
- Hyper Voice
- Mystical Fire
- Protect
- Wish
Cinderace (M) @ Life Orb
Ability: Blaze
EVs: 252 Atk / 4 SpD / 252 Spe
Jolly Nature
- Pyro Ball
- Sucker Punch
- U-turn
- High Jump Kick
Toxtricity (M) @ Throat Spray
Ability: Punk Rock
EVs: 4 Atk / 252 SpA / 252 Spe
Rash Nature
- Overdrive
- Boomburst
- Shift Gear
- Fire Punch
Seismitoad (M) @ Leftovers
Ability: Water Absorb
EVs: 252 HP / 252 Def / 4 SpD
Relaxed Nature
- Stealth Rock
- Scald
- Earthquake
- Toxic
Corviknight (M) @ Leftovers
Ability: Pressure
EVs: 248 HP / 80 SpD / 180 Spe
Impish Nature
- Defog
- Brave Bird
- Roost
- U-turn
"""
team_2 = """
Togekiss @ Leftovers
Ability: Serene Grace
EVs: 248 HP / 8 SpA / 252 Spe
Timid Nature
IVs: 0 Atk
- Air Slash
- Nasty Plot
- Substitute
- Thunder Wave
Galvantula @ Focus Sash
Ability: Compound Eyes
EVs: 252 SpA / 4 SpD / 252 Spe
Timid Nature
IVs: 0 Atk
- Sticky Web
- Thunder Wave
- Thunder
- Energy Ball
Cloyster @ King's Rock
Ability: Skill Link
EVs: 252 Atk / 4 SpD / 252 Spe
Adamant Nature
- Icicle Spear
- Rock Blast
- Ice Shard
- Shell Smash
Sandaconda @ Focus Sash
Ability: Sand Spit
EVs: 252 Atk / 4 SpD / 252 Spe
Jolly Nature
- Stealth Rock
- Glare
- Earthquake
- Rock Tomb
Excadrill @ Focus Sash
Ability: Sand Rush
EVs: 252 Atk / 4 SpD / 252 Spe
Adamant Nature
- Iron Head
- Rock Slide
- Earthquake
- Rapid Spin
Cinccino @ King's Rock
Ability: Skill Link
EVs: 252 Atk / 4 Def / 252 Spe
Jolly Nature
- Bullet Seed
- Knock Off
- Rock Blast
- Tail Slap
"""
custom_builder = RandomTeamFromPool([team_1, team_2])
async def main():
# We create two players
player_1 = RandomPlayer(
battle_format="gen8ou", team=custom_builder, max_concurrent_battles=10
)
player_2 = RandomPlayer(
battle_format="gen8ou", team=custom_builder, max_concurrent_battles=10
)
await player_1.battle_against(player_2, n_battles=5)
if __name__ == "__main__":
asyncio.get_event_loop().run_until_complete(main())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.