content
stringlengths 5
1.05M
|
|---|
import ctypes
import mmap
def create_callable_from_machine_code(machine_code, doc=None, restype=None, argtypes=None, use_errno=False, use_last_error=False):
if argtypes is None:
argtypes = []
exec_code = mmap.mmap(-1, len(machine_code), prot=mmap.PROT_WRITE | mmap.PROT_READ | mmap.PROT_EXEC)
exec_code.write(machine_code)
c_type = ctypes.c_byte * len(machine_code)
c_var = c_type.from_buffer(exec_code)
address = ctypes.addressof(c_var)
c_func_factory = ctypes.CFUNCTYPE(restype, *argtypes, use_errno=use_errno, use_last_error=use_last_error)
func = c_func_factory(address)
func._exec_code = exec_code # prevent GC of code
func.__doc__ = doc
return func
breakpoint = create_callable_from_machine_code(b'\xCC\xC3')
|
#!/usr/bin/env python3
"""
https://www.hackerrank.com/challenges/greedy-florist
A group of friends want to buy a bouquet of flowers.
The florist wants to maximize his number of new customers and the money he makes.
To do this, he decides he'll multiply the price of each flower by the number of that
customer's previously purchased flowers plus 1. The first flower will be original price:
(0 + 1) x price, the next will be
(1 + 1) x price
and so on.
Given the size of the group of friends,
the number of flowers they want to purchase and the original prices of the flowers,
determine the minimum cost to purchase all of the flowers.
The number of flowers they want equals the length of the
array.
"""
import math
import os
import random
import re
import sys
import heapq
def getMinimumCost(number_of_friends, flower_costs):
"""
Args:
number_of_friends (int): how many friends do we have
flower_costs (list): list of ints
Returns:
int: min cost for all flowers"""
friends_heap = [0] * number_of_friends
heapq.heapify(friends_heap)
total_cost = 0
# the idea is to purchase the most expensive flowers
# as the first flower for each of friends
# this way we minimize total cost
for cost in sorted(flower_costs, reverse=True):
# heap is very efficient for consecutive get_min operations
smallest_friend = heapq.heappop(friends_heap)
total_cost += (1 + smallest_friend) * cost
heapq.heappush(friends_heap, smallest_friend + 1)
return total_cost
if __name__ == "__main__":
ex_friends = 2
ex_flowers = [1, 2, 3]
result = getMinimumCost(ex_friends, ex_flowers)
print(result)
|
# -*- coding: utf-8 -*-
import os.path
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone # Timezone.
from django.core.management import call_command
from foundation_tenant.models.bizmula.document import Document
from foundation_tenant.models.bizmula.workspace import Workspace
from foundation_tenant.models.bizmula.questionanswer import QuestionAnswer
from foundation_tenant.models.base.s3file import S3File
from foundation_tenant.bizmula_utils import BizmulaAPI
from foundation_tenant.utils import int_or_none
from foundation_tenant.utils import get_random_string
from smegurus import constants
class Command(BaseCommand):
help = _('Docxpresso processing command for the particular document in a tenant.')
def add_arguments(self, parser):
parser.add_argument('id', nargs='+')
def handle(self, *args, **options):
"""
Function will get the inputted tenant name and doc_id and
set the database to the tenant schema and begin processing
for the particular document.
"""
schema_name = options['id'][0]
workspace_id = int_or_none(options['id'][1])
# the tenant metadata is stored.
from django.db import connection
# Connection will set it back to our tenant.
connection.set_schema(schema_name, True) # Switch to Tenant.
api = BizmulaAPI(
settings.DOCXPRESSO_PUBLIC_KEY,
settings.DOCXPRESSO_PRIVATE_KEY,
settings.DOCXPRESSO_URL
)
self.begin_processing(workspace_id, api)
# Return a success message to the console.
self.stdout.write(
self.style.SUCCESS(_('Finished processing stage 10 for workspace_id #%s.') % str(workspace_id))
)
def get_workspace(self, workspace_id):
"""
Utility function will return the Workspace for the parameter ID.
"""
try:
return Workspace.objects.get(id=workspace_id)
except Workspace.DoesNotExist:
raise CommandError(_('Cannot find a workspace.'))
except Exception as e:
raise CommandError(_('Unknown error occured: %s.' % e))
def get_document(self, workspace_id):
try:
return Document.objects.get(
workspace_id=workspace_id,
document_type__stage_num=10
)
except Workspace.DoesNotExist:
raise CommandError(_('Cannot find a workspace.'))
except Exception as e:
raise CommandError(_('Unknown error occured: %s.' % e))
def get_answers(self, workspace_id):
"""
Utility function will return all answers for the parameter workspace ID.
"""
return QuestionAnswer.objects.filter(workspace_id=workspace_id)
def begin_processing(self, workspace_id, api):
workspace = self.get_workspace(workspace_id)
answers = self.get_answers(workspace_id)
self.process(workspace, answers, api)
def process(self, workspace, answers, api):
api.new(
name="workspace_" + str(workspace.id) + "_stage_10",
format="odt",
template="templates/stage10.odt"
)
# Take our content and populate docxpresso with it.
self.set_answers(workspace, answers, api)
# Generate our document!
doc_filename = api.get_filename()
doc_modified_filename = settings.SMEGURUS_APP_DOCXPRESSO_FILE_PREFIX+doc_filename
doc_bin_data = api.generate()
# DEVELOPERS NOTE:
# The following three lines will take the 'default_storage' class
# django uses for saving files and apply it. Because we overloaded
# this default class with S3 storage, therefore when this code runs
# we will be saving to S3.
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
# Fetch the document and then atomically modify it.
with transaction.atomic():
# Fetch the document.
document = self.get_document(workspace.id)
# If the file already exists then delete it from S3.
if document.docxpresso_file:
# Try deleting the previously uploaded file and if the file
# does not exist or ANY error occurs then catch it here and
# safely continue our application.
try:
document.docxpresso_file.delete()
except Exception as e:
print("WARNING: ", str(e))
# Save our file to DB.
docxpresso_file = S3File.objects.create(
stem=doc_filename,
suffix='odt',
owner=document.owner,
key=doc_modified_filename
)
docxpresso_file.upload_file(doc_bin_data)
# Generate our new file.
document.docxpresso_file = docxpresso_file
document.save()
def set_answers(self, workspace, answers, api):
# Assign date.
self.do_date(api)
# Some questions need to be stored for later reuse.
qid_099_answer = None
qid_104_answer = None
qid_136_answer = None
qid_137_answer = None
qid_138_answer = None
# Iterate through all the answers and transcode the business plan.
for answer in answers.all():
if answer.question.pk == 1:
api.do_q1(answer, api)
elif answer.question.pk == 2:
api.do_q2(answer, api)
elif answer.question.pk == 3:
api.do_q3(answer, api)
elif answer.question.pk == 4:
api.do_q4(answer, api)
elif answer.question.pk == 5:
api.do_q5(answer, api)
elif answer.question.pk == 6:
api.do_q6(answer, api)
elif answer.question.pk == 7:
api.do_q7(answer, api)
elif answer.question.pk == 8:
api.do_q8(answer, api)
elif answer.question.pk == 9:
api.do_q9(answer, api)
elif answer.question.pk == 10:
api.do_q10(answer, api)
elif answer.question.pk == 21:
api.do_q21(answer, api)
elif answer.question.pk == 25:
api.do_q25(answer, api)
elif answer.question.pk == 26:
api.do_q26(answer, api)
elif answer.question.pk == 27:
api.do_q27(answer, api)
elif answer.question.pk == 28:
api.do_q28(answer, api)
elif answer.question.pk == 29:
api.do_q29(answer, api)
elif answer.question.pk == 30:
api.do_q30(answer, api)
# elif answer.question.pk == 32:
# api.do_q32(answer, api)
elif answer.question.pk == 33:
api.do_q33(answer, api)
elif answer.question.pk == 34:
api.do_q34(answer, api)
elif answer.question.pk == 35:
api.do_q35(answer, api)
elif answer.question.pk == 36:
api.do_q36(answer, api)
elif answer.question.pk == 37:
api.do_q37(answer, api)
elif answer.question.pk == 38:
api.do_q38(answer, api)
elif answer.question.pk == 39:
api.do_q39(answer, api)
elif answer.question.pk == 40:
api.do_q40(answer, api)
elif answer.question.pk == 41:
api.do_q41(answer, api)
elif answer.question.pk == 42:
api.do_q42(answer, api)
elif answer.question.pk == 43:
api.do_q43(answer, api)
elif answer.question.pk == 44:
api.do_q44(answer, api)
elif answer.question.pk == 45:
api.do_q45(answer, api)
elif answer.question.pk == 46:
api.do_q46(answer, api)
elif answer.question.pk == 47:
api.do_q47(answer, api)
elif answer.question.pk == 48:
api.do_q48(answer, api)
elif answer.question.pk == 49:
api.do_q49(answer, api)
elif answer.question.pk == 50:
api.do_q50(answer, api)
elif answer.question.pk == 51:
api.do_q51(answer, api)
elif answer.question.pk == 52:
api.do_q52(answer, api)
elif answer.question.pk == 53:
api.do_q53(answer, api)
elif answer.question.pk == 54:
api.do_q54(answer, api)
elif answer.question.pk == 55:
api.do_q55(answer, api)
elif answer.question.pk == 56:
# actual_contact_number
api.do_q56(answer, api)
elif answer.question.pk == 58:
# actual_supported_number
# validation_outcome_met
api.do_q58(answer, api, answers)
elif answer.question.pk == 59:
# validation_lessons_learned
api.do_q59(answer, api)
elif answer.question.pk == 61:
api.do_q61(answer, api)
elif answer.question.pk == 62:
api.do_q62(answer, api)
elif answer.question.pk == 63:
api.do_q63(answer, api)
elif answer.question.pk == 64:
api.do_q64(answer, api)
elif answer.question.pk == 65:
api.do_q65(answer, api)
elif answer.question.pk == 66:
api.do_q66(answer, api)
elif answer.question.pk == 67:
api.do_q67(answer, api)
elif answer.question.pk == 68:
api.do_q68(answer, api)
elif answer.question.pk == 69:
api.do_q69(answer, api)
elif answer.question.pk == 70:
api.do_q70(answer, api)
elif answer.question.pk == 71:
api.do_q71(answer, api)
elif answer.question.pk == 72:
api.do_q72(answer, api)
elif answer.question.pk == 73:
api.do_q73(answer, api)
elif answer.question.pk == 74:
api.do_q74(answer, api)
elif answer.question.pk == 75:
api.do_q75(answer, api)
elif answer.question.pk == 76:
api.do_q76(answer, api)
elif answer.question.pk == 77:
api.do_q77(answer, api)
elif answer.question.pk == 78:
api.do_q78(answer, api)
elif answer.question.pk == 79:
api.do_q79(answer, api)
elif answer.question.pk == 80:
api.do_q80(answer, api)
elif answer.question.pk == 81:
api.do_q81(answer, api)
elif answer.question.pk == 82:
api.do_q82(answer, api)
elif answer.question.pk == 83:
api.do_q83(answer, api)
elif answer.question.pk == 84:
api.do_q84(answer, api)
elif answer.question.pk == 85:
api.do_q85(answer, api)
elif answer.question.pk == 86:
api.do_q86(answer, api)
elif answer.question.pk == 87:
api.do_q87(answer, api)
elif answer.question.pk == 88:
api.do_q88(answer, api)
elif answer.question.pk == 89:
api.do_q89(answer, api)
elif answer.question.pk == 90:
api.do_q90(answer, api)
elif answer.question.pk == 91:
api.do_q91(answer, api)
elif answer.question.pk == 92:
api.do_q92(answer, api)
elif answer.question.pk == 93:
api.do_q93(answer, api)
elif answer.question.pk == 94:
api.do_q94(answer, api)
elif answer.question.pk == 95:
api.do_q95(answer, api)
elif answer.question.pk == 97:
api.do_q97(answer, api)
elif answer.question.pk == 98:
api.do_q98(answer, api)
elif answer.question.pk == 99:
api.do_q99(answer, api)
qid_099_answer = answer
elif answer.question.pk == 100:
api.do_q100(answer, api)
elif answer.question.pk == 101:
api.do_q101(answer, api)
elif answer.question.pk == 102:
api.do_q102(answer, api)
elif answer.question.pk == 103:
api.do_q103(answer, api)
elif answer.question.pk == 104:
api.do_q104(answer, api)
qid_104_answer = answer
elif answer.question.pk == 105:
api.do_q105(answer, api)
elif answer.question.pk == 106:
api.do_q106(answer, api)
elif answer.question.pk == 107:
api.do_q107(answer, api)
elif answer.question.pk == 109:
api.do_q109(answer, api)
elif answer.question.pk == 108:
api.do_q108(answer, api)
elif answer.question.pk == 110:
api.do_q110(answer, api)
elif answer.question.pk == 111:
api.do_q111(answer, api)
elif answer.question.pk == 112:
api.do_q112(answer, api)
elif answer.question.pk == 113:
api.do_q113(answer, api)
elif answer.question.pk == 114:
api.do_q114(answer, api)
elif answer.question.pk == 116:
api.do_q116(answer, api)
elif answer.question.pk == 117:
api.do_q117(answer, api)
elif answer.question.pk == 118:
api.do_q118(answer, api)
elif answer.question.pk == 119:
api.do_q119(answer, api)
elif answer.question.pk == 120:
api.do_q120(answer, api)
elif answer.question.pk == 121:
api.do_q121(answer, api)
elif answer.question.pk == 122:
api.do_q122(answer, api)
elif answer.question.pk == 123:
api.do_q123(answer, api)
elif answer.question.pk == 124:
api.do_q124(answer, api)
elif answer.question.pk == 125:
api.do_q125(answer, api)
elif answer.question.pk == 126:
api.do_q126(answer, api)
elif answer.question.pk == 127:
api.do_q127(answer, api)
elif answer.question.pk == 128:
api.do_q128(answer, api)
elif answer.question.pk == 129:
api.do_q129(answer, api)
elif answer.question.pk == 130:
api.do_q130(answer, api)
elif answer.question.pk == 131:
api.do_q131(answer, api)
elif answer.question.pk == 132:
api.do_q132(answer, api)
elif answer.question.pk == 133:
api.do_q133(answer, api)
elif answer.question.pk == 134:
api.do_q134(answer, api)
elif answer.question.pk == 135:
api.do_q135(answer, api)
elif answer.question.pk == 136:
api.do_q136(answer, api)
qid_136_answer = answer
elif answer.question.pk == 137:
api.do_q137(answer, api)
qid_137_answer = answer
elif answer.question.pk == 138:
api.do_q138(answer, api)
qid_138_answer = answer
elif answer.question.pk == 139:
api.do_q139(answer, api)
elif answer.question.pk == 140:
api.do_q140(answer, api)
elif answer.question.pk == 141:
api.do_q141(answer, api)
elif answer.question.pk == 142:
api.do_q142(answer, api)
elif answer.question.pk == 142:
api.do_q142(answer, api)
elif answer.question.pk == 143:
api.do_q143(answer, api)
elif answer.question.pk == 144:
api.do_q144(answer, api)
elif answer.question.pk == 145:
api.do_q145(answer, api)
elif answer.question.pk == 146:
api.do_q146(answer, api)
elif answer.question.pk == 147:
api.do_q147(answer, api)
elif answer.question.pk == 148:
api.do_q148(answer, api)
elif answer.question.pk == 149:
api.do_q149(answer, api)
elif answer.question.pk == 150:
api.do_q150(answer, api)
elif answer.question.pk == 151:
api.do_q151(answer, api)
elif answer.question.pk == 152:
api.do_q152(answer, api)
elif answer.question.pk == 153:
api.do_q153(answer, api)
elif answer.question.pk == 154:
api.do_q154(answer, api)
elif answer.question.pk == 155:
api.do_q155(answer, api)
elif answer.question.pk == 156:
api.do_q156(answer, api)
elif answer.question.pk == 157:
api.do_q157(answer, api)
elif answer.question.pk == 158:
api.do_q158(answer, api)
elif answer.question.pk == 161:
api.do_q161(answer, api)
elif answer.question.pk == 162:
api.do_q162(answer, api)
elif answer.question.pk == 165:
api.do_q165(answer, api)
elif answer.question.pk == 166:
api.do_q166(answer, api)
elif answer.question.pk == 167:
api.do_q167(answer, api)
elif answer.question.pk == 168:
api.do_q168(answer, api)
elif answer.question.pk == 169:
api.do_q169(answer, api)
# Perform specific computations based on previous saved answers.
api.do_q136_q137_q138(qid_136_answer, qid_137_answer, qid_138_answer, api)
api.do_marketing_roi(qid_099_answer, qid_104_answer, api)
def do_date(self, api):
today = timezone.now()
api.add_text("date", "{:%Y-%m-%d}".format(today))
def do_owner_names(self, workspace, api):
names = ""
for me in workspace.mes.all():
if names == "":
names += str(me)
else:
names += ", " + str(me)
api.add_text("owner_names", names)
|
import scrapy
from kingfisher_scrapy.base_spider import SimpleSpider, browser_user_agent
from kingfisher_scrapy.util import (append_path_components, components, handle_http_error, join, parameters,
replace_parameters)
class Ukraine(SimpleSpider):
"""
Domain
ProZorro OpenProcurement API
Caveats
The API returns OCDS-like contracting processes data, however an ocid is not set. Therefore, as part of this
spider, the data.tenderID is used as the ocid and the data.id + data.dateModified fields are used and release.id
Spider arguments
from_date
Download only data from this time onward (YYYY-MM-DDThh:mm:ss format).
API documentation
https://prozorro-api-docs.readthedocs.io/uk/latest/tendering/index.html
"""
name = 'ukraine'
user_agent = browser_user_agent # to avoid HTTP 412 errors
# BaseSpider
encoding = 'utf-16'
data_type = 'release'
date_format = 'datetime'
ocds_version = '1.0'
def start_requests(self):
# A https://public.api.openprocurement.org/api/0/contracts endpoint also exists but the data returned from
# there is already included in the tenders endpoint. If we would like to join both, the tender_id field from
# the contract endpoint can be used with the id field from the tender endpoint.
url = 'https://public.api.openprocurement.org/api/0/tenders'
if self.from_date:
url = f'{url}?offset={self.from_date.strftime(self.date_format)}'
yield scrapy.Request(url, meta={'file_name': 'list.json'}, callback=self.parse_list)
@handle_http_error
def parse_list(self, response):
data = response.json()
for item in data['data']:
url = append_path_components(replace_parameters(response.request.url, offset=None), item['id'])
yield self.build_request(url, formatter=components(-2))
yield self.build_request(data['next_page']['uri'], formatter=join(components(-1), parameters('offset')),
callback=self.parse_list)
@handle_http_error
def parse(self, response):
data = response.json()
# The data looks like:
# {
# "data": {
# "id": "..",
# "dateModified": "...",
# "tenderID": "",
# other tender fields,
# "awards": ...,
# "contracts": ...
# }
# }
awards = data['data'].pop('awards', None)
contracts = data['data'].pop('contracts', None)
ocds_data = {
# The data.id field corresponds to the internal identifier. The data.dateModified is concatenated to ensure
# the id's uniqueness.
'id': f"{data['data']['id']}-{data['data']['dateModified']}",
# The data.tenderID field corresponds to the official identifier.
'ocid': data['data']['tenderID'],
'date': data['data']['dateModified'],
'tender': data['data'],
}
if contracts:
ocds_data['contracts'] = contracts
if awards:
ocds_data['awards'] = awards
yield self.build_file_from_response(response, data=ocds_data, data_type=self.data_type)
|
"""
Django settings for project ers_backend.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_PATH = os.path.abspath(os.path.dirname(__name__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ul2)0@*k-3snu(fijr8)9t1ozwuk3&4wmp_l=uikt426boodl@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# REST API
'rest_framework',
'rest_framework_swagger',
'corsheaders',
# Tests
'testing',
'model_mommy',
# Websockets
'swampdragon',
# Help
'annoying',
# Apps
'dataset_manager',
'video_processor',
'arousal_modeler',
'timeframe_annotator',
'emotion_annotator'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ers_backend.urls'
WSGI_APPLICATION = 'ers_backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'()': 'djangocolors_formatter.DjangoColorsFormatter',
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'()': 'djangocolors_formatter.DjangoColorsFormatter',
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'loggers': {
'dataset_manager': {
'handlers': ['console'],
'propagate': False,
'level': 'DEBUG',
},
'video_processor': {
'handlers': ['console'],
'propagate': False,
'level': 'DEBUG',
},
'ers_backend': {
'handlers': ['console'],
'propagate': False,
'level': 'DEBUG',
}
}
}
# REST
CORS_ORIGIN_WHITELIST = (
'http://localhost:3333'
)
CORS_ALLOW_HEADERS = (
'x-requested-with',
'content-type',
'accept',
'origin',
'authorization',
'X-CSRFToken'
)
CORS_ALLOW_CREDENTIALS = True
REST_FRAMEWORK = {
'UNICODE_JSON': False,
}
# Swampdragon
SWAMP_DRAGON_CONNECTION = ('swampdragon.connections.sockjs_connection.DjangoSubscriberConnection', '/data')
DRAGON_URL = 'http://localhost:9999/'
# Celery
USE_CELERY = True
BROKER_URL = 'redis://localhost:6379/1'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'ers_backend_db',
'USER': 'root',
'PASSWORD': 'root',
'HOST': '', # If mamp under OS X: /Applications/MAMP/tmp/mysql/mysql.sock
'PORT': '',
}
}
# Modify PATH if under OS X to have access to libraries such as ffmpeg
#os.environ["PATH"] += os.pathsep + os.pathsep.join(["/opt/local/bin", "/usr/local/bin"])
# Constants
VIDEO_EXTENSIONS = ("avi", "mkv", "mov", "mp4", "m4v", "mpeg", "mpg", "wmv")
DATASET_DEFAULT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,'datasets'))
WEBCLIENT_VIDEOS_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,'ers_frontend/_public/datasets/$datasetId$/videos'))
|
import pytest
from wtforms.validators import UUID
from wtforms.validators import ValidationError
@pytest.mark.parametrize(
"uuid_val",
["2bc1c94f-0deb-43e9-92a1-4775189ec9f8", "2bc1c94f0deb43e992a14775189ec9f8"],
)
def test_valid_uuid_passes(uuid_val, dummy_form, dummy_field):
"""
Valid UUID should pass without raising
"""
validator = UUID()
dummy_field.data = uuid_val
validator(dummy_form, dummy_field)
@pytest.mark.parametrize(
"uuid_val",
[
"2bc1c94f-deb-43e9-92a1-4775189ec9f8",
"2bc1c94f-0deb-43e9-92a1-4775189ec9f",
"gbc1c94f-0deb-43e9-92a1-4775189ec9f8",
"2bc1c94f 0deb-43e9-92a1-4775189ec9f8",
],
)
def test_bad_uuid_raises(uuid_val, dummy_form, dummy_field):
"""
Bad UUID should raise ValueError
"""
validator = UUID()
dummy_field.data = uuid_val
with pytest.raises(ValidationError):
validator(dummy_form, dummy_field)
|
import re
import sys
import click
from pyftdi.ftdi import Ftdi
from pyftdi.eeprom import FtdiEeprom
def load_eeprom(vid, pid, sn):
ftdi = Ftdi()
eeprom = FtdiEeprom()
ftdi.open(vid, pid, serial=sn)
eeprom.connect(ftdi)
return eeprom
def scan_devices(sn):
ftdi = Ftdi()
devices = [d[0] for d in ftdi.list_devices()]
# If the caller gave us a serial or a regex, keep only matching devices
if sn:
devices = [d for d in devices if re.search(sn, d.sn)]
# Sorty devices by their serials so that the presentation order does not
# depend on the order in which the devices were connected or reset.
devices = sorted(devices, key=lambda d: d.sn)
# Construct the FtdiEeprom object for each matching device
devices = [load_eeprom(d.vid, d.pid, d.sn) for d in devices]
return devices
def print_devices(devices):
for i, device in enumerate(devices):
s = "%i:" % i
sep = " "
if device.serial:
s += "%sserial: %s" % (sep, device.serial)
sep = ", "
if device.product:
s += "%sproduct: %s" % (sep, device.product)
sep = ", "
if device.manufacturer:
s += "%smanufacturer: %s" % (sep, device.manufacturer)
click.echo(s)
def select_device(sn):
devices = scan_devices(sn)
# We found no device we could work with, bail.
if len(devices) == 0:
raise Exception("No compatible/matching FTDI devices found")
# No need to ask the user to confirm the selection if we only have one device left.
if len(devices) == 1:
return devices[0]
print_devices(devices)
d = int(click.prompt('Please choose device (line number)'))
return devices[d]
def update_eeprom(sn, manufacturer=None, product=None, serial=None, reset=False):
eeprom = select_device(sn)
updated = False
if manufacturer is not None and manufacturer != eeprom.manufacturer:
eeprom.set_manufacturer_name(manufacturer)
updated = True
if product is not None and product != eeprom.product:
eeprom.set_product_name(product)
updated = True
if serial is not None and serial != eeprom.serial:
eeprom.set_serial_number(serial)
updated = True
if updated:
eeprom.commit(dry_run=False)
click.echo("The FTDI device EEPROM was updated.")
# On Darwin (Mac OS) we need to reset the USB device if the EEPROM was
# updated in order for the USB host to pick up the new USB descriptors.
if reset or (updated and sys.platform == 'darwin'):
click.echo("Resetting the USB device.")
eeprom.reset_device()
# On Linux, modifying the EEPROM or resetting the USB device will cause the
# kernel to unbind the ftdio_sio driver from the device. Thus, the character
# device ttyUSBx will not be available. To remedy this situation, one needs
# to either unplug and replug the device or write to
# /sys/bus/usb/drivers/ftdi_sio/bind to manually rebind the driver to the
# USB device.
if (reset or updated) and sys.platform == 'linux':
click.echo("You may need to disconnect and reconnect the device.")
def list_devices(sn):
devices = scan_devices(sn)
if len(devices) == 0:
click.echo("No compatible/matching FTDI devices found.")
else:
print_devices(devices)
|
import io
from omnibus import iterables as it
from .. import js
JS_WITH_MINML_HEADER = """
/*
args: {
x: int,
y: int,
]
*/
call_count = 0;
function add2(x, y) {
call_count += 1;
return x + 2;
}
"""
def test_fallback_js(tmpdir):
loop = js.build_loop('function(s){ return s + "!" }')
proc = js.launch_node(loop)
lines = it.cut_lines()(iter(io.TextIOWrapper(proc.stdout).readline, ''))
proc.stdin.write(b'"abc"\n')
proc.stdin.write(b'"def"\n')
proc.stdin.write(b'"ghi"\n')
proc.stdin.flush()
proc.stdin.close()
print(list(lines))
rc = proc.wait()
print(rc)
|
from sqlalchemy import String, Numeric, Integer, Boolean
from sqlalchemy.types import TypeDecorator, CHAR, VARCHAR
|
from get_params import get_params
import numpy as np
from get_features import get_features
from eval_rankings import eval_rankings
import matplotlib.pyplot as plt
def grafic (params,etiqueta):
params['split']=etiqueta
get_features(params)
ap_list,dict_=eval_rankings(params)
mean=np.mean(dict_)
return mean
if __name__ == "__main__":
params = get_params()
params['descriptor_size'] = 1024
mean=grafic(params,'train')
meant=grafic(params,'val')
train=[mean]
val=[meant]
params['descriptor_size'] = 256
mean=grafic(params,'train')
meant=grafic(params,'val')
train.append(mean)
val.append(meant)
params['descriptor_size'] = 2048
mean=grafic(params,'train')
meant=grafic(params,'val')
train.append(mean)
val.append(meant)
params['descriptor_size'] = 512
mean=grafic(params,'train')
meant=grafic(params,'val')
train.append(mean)
val.append(meant)
params['descriptor_size'] = 3072
mean=grafic(params,'train')
meant=grafic(params,'val')
train.append(mean)
val.append(meant)
size=[256,512,1024,2048,3072]
plt.figure('scatter')
plt.scatter(train,size)
plt.figure('plot')
plt.plot(val,size)
plt.show()
|
import FWCore.ParameterSet.Config as cms
selectedModules = []
selectedModules4cosmics = []
pluginsMonName = {}
modulesLabel = {}
categories = {}
### LocalReco
pluginsMonName['LocalReco'] = cms.string ('LocalReco')
modulesLabel ['LocalReco'] = cms.vstring('siPixelDigis', 'siStripDigis', 'siPixelClusters', 'siStripClusters' ) # siPixelDigis : SiPixelRawToDigi, siStripDigis : SiStripRawToDigi (SiStripRawToDigiUnpacker), siPixelClusters : SiPixelClusterProducer, siStripClusters : SiStripClusterizer
categories ['LocalReco'] = cms.vstring('SiPixelRawToDigi', 'TooManyErrors', 'TooManyClusters' )
# apparentely there are not LogError in RecoLocalTracker/SubCollectionProducers/src/TrackClusterRemover.cc
pluginsMonName['Clusterizer'] = cms.string ( 'TrackClusterRemover' )
modulesLabel ['Clusterizer'] = cms.vstring( 'lowPtTripletStepClusters', 'pixelPairStepClusters', 'detachedTripletStepClusters', 'mixedTripletStepClusters', 'pixelLessStepClusters', 'tobTecStepClusters', 'displacedGeneralStepClusters' ) # TrackClusterRemover
categories ['Clusterizer'] = cms.vstring( )
# initialStepSeeds,lowPtTripletStepSeeds, pixelPairStepSeeds, detachedTripletStepSeeds, : TooManyClusters (SeedGeneratorFromRegionHitsEDProducer),
# photonConvTrajSeedFromSingleLeg : (PhotonConversionTrajectorySeedProducerFromSingleLeg)
pluginsMonName['Seeding'] = cms.string ( 'Seeding' )
modulesLabel ['Seeding'] = cms.vstring( 'initialStepSeeds', 'lowPtTripletStepSeeds', 'pixelPairStepSeeds', 'detachedTripletStepSeeds', 'mixedTripletStepSeedsA', 'mixedTripletStepSeedsB', 'mixedTripletStepSeeds', 'pixelLessStepSeeds', 'tobTecStepSeeds', 'displacedGeneralStepSeeds','photonConvTrajSeedFromSingleLeg')
categories ['Seeding'] = cms.vstring( 'TooManyClusters', 'TooManyPairs', 'TooManyTriplets', 'TooManySeeds' )
# RecoTracker/CkfPattern/src/CkfTrackCandidateMakerBase.cc
pluginsMonName['TrackCandidate'] = cms.string ( 'TrackCandidate' )
modulesLabel ['TrackCandidate'] = cms.vstring( 'initialStepTrackCandidates', 'lowPtTripletStepTrackCandidates', 'pixelPairStepTrackCandidates', 'detachedTripletStepTrackCandidates', 'mixedTripletStepTrackCandidates', 'pixelLessStepTrackCandidates', 'tobTecStepTrackCandidates', 'displacedGeneralStepTrackCandidates', 'convTrackCandidates' )
categories ['TrackCandidate'] = cms.vstring( 'TooManySeeds', 'CkfPattern', 'BaseCkfTrajectoryBuilder_InfiniteLoop' )
# TrackProducer:FailedPropagation
pluginsMonName['TrackFinder'] = cms.string ( 'TrackFinder' )
modulesLabel ['TrackFinder'] = cms.vstring( 'pixelTracks', 'initialStepTracks', 'lowPtTripletStepTracks', 'pixelPairStepTracks', 'detachedTripletStepTracks', 'mixedTripletStepTracks', 'pixelLessStepTracks', 'tobTecStepTracks', 'displacedGeneralStepTracks', 'generalTracks' )
categories ['TrackFinder'] = cms.vstring( 'FailedPropagation' )
pluginsMonName['FullIterTracking'] = cms.string ( 'FullIterTracking' )
modulesLabel ['FullIterTracking'] = cms.vstring(
'initialStepSeeds_iter0',
'initialStepTrackCandidates_iter0',
'initialStepTracks_iter0',
'lowPtTripletStepSeeds_iter1',
'lowPtTripletStepTrackCandidates_iter1',
'lowPtTripletStepTracks_iter1',
'pixelPairStepSeeds_iter2',
'pixelPairStepTrackCandidates_iter2',
'pixelPairStepTracks_iter2',
'detachedTripletStepSeeds_iter3',
'detachedTripletStepTrackCandidates_iter3',
'detachedTripletStepTracks_iter3',
'mixedTripletStepSeedsA_iter4',
'mixedTripletStepSeedsB_iter4',
'mixedTripletStepTrackCandidates_iter4',
'mixedTripletStepTracks_iter4',
'pixelLessStepSeeds_iter5',
'pixelLessStepTrackCandidates_iter5',
'pixelLessStepTracks_iter5',
'tobTecStepSeeds_iter6',
'tobTecStepTrackCandidates_iter6',
'tobTecStepTracks_iter6',
'displacedGeneralStepSeeds_iter7',
'displacedGeneralStepTrackCandidates_iter7',
'displacedGeneralStepTracks_iter7',
'photonConvTrajSeedFromSingleLeg',
'convTrackCandidates',
'convStepTracks',
)
categories['FullIterTracking'] = cms.vstring(
'TooManyClusters',
'TooManyPairs',
'TooManyTriplets',
'TooManySeeds',
)
pluginsMonName['IterTracking'] = cms.string ( 'IterTracking' )
modulesLabel ['IterTracking'] = cms.vstring(
'initialStepSeeds_iter0',
'initialStepTrackCandidates_iter0',
'initialStepTracks_iter0',
'lowPtTripletStepSeeds_iter1',
'lowPtTripletStepTrackCandidates_iter1',
'lowPtTripletStepTracks_iter1',
'pixelPairStepSeeds_iter2',
'pixelPairStepTrackCandidates_iter2',
'pixelPairStepTracks_iter2',
'detachedTripletStepSeeds_iter3',
'detachedTripletStepTrackCandidates_iter3',
'detachedTripletStepTracks_iter3',
'mixedTripletStepSeedsA_iter4',
'mixedTripletStepSeedsB_iter4',
'mixedTripletStepTrackCandidates_iter4',
'mixedTripletStepTracks_iter4',
'pixelLessStepSeeds_iter5',
'pixelLessStepTrackCandidates_iter5',
'pixelLessStepTracks_iter5',
'tobTecStepSeeds_iter6',
'tobTecStepTrackCandidates_iter6',
'tobTecStepTracks_iter6',
'displacedGeneralStepSeeds_iter7',
'displacedGeneralStepTrackCandidates_iter7',
'displacedGeneralStepTracks_iter7'
)
categories['IterTracking'] = cms.vstring(
'TooManyClusters',
'TooManyPairs',
'TooManyTriplets',
'TooManySeeds',
)
pluginsMonName['Conversion'] = cms.string ( 'Conversion' )
modulesLabel ['Conversion'] = cms.vstring( 'photonConvTrajSeedFromSingleLeg', 'convTrackCandidates', 'convStepTracks' )
categories ['Conversion'] = cms.vstring( 'TooManyClusters', 'TooManyPairs', 'TooManyTriplets', 'TooManySeeds' )
selectedModules.extend( ['LocalReco'] )
selectedModules.extend( ['Clusterizer'] )
selectedModules.extend( ['Seeding'] )
selectedModules.extend( ['TrackCandidate'] )
selectedModules.extend( ['TrackFinder'] )
# cosmicseedfinderP5 (CosmicSeedGenerator): TooManyClusters; combinatorialcosmicseedfinderP5 (SeedGeneratorFromRegionHitsEDProducer):TooManyClusters; regionalCosmicTrackerSeeds (CtfSpecialSeedGenerator): TooManyClusters, CtfSpecialSeedGenerator
pluginsMonName['cosmicsSeeding'] = cms.string ( 'Seeding' )
modulesLabel ['cosmicsSeeding'] = cms.vstring( 'cosmicseedfinderP5', 'combinatorialcosmicseedfinderP5', 'regionalCosmicTrackerSeeds', 'CtfSpecialSeedGenerator' )
categories ['cosmicsSeeding'] = cms.vstring( 'TooManyClusters' )
# ckfTrackCandidatesP5 (CkfTrackCandidateMaker), regionalCosmicCkfTrackCandidates (CkfTrackCandidateMaker):
# RecoTracker/CkfPattern/src/BaseCkfTrajectoryBuilder.cc
# - CkfPattern
# - BaseCkfTrajectoryBuilder_InfiniteLoop
# RecoTracker/CkfPattern/src/CkfTrajectoryBuilder.cc
# - CkfPattern
# RecoTracker/CkfPattern/src/CkfTrackCandidateMakerBase.cc
# - TooManySeeds
pluginsMonName['cosmicsTrackCandidate'] = cms.string ( 'TrackCandidate' )
modulesLabel ['cosmicsTrackCandidate'] = cms.vstring( 'ckfTrackCandidatesP5', 'regionalCosmicCkfTrackCandidates' )
categories ['cosmicsTrackCandidate'] = cms.vstring( 'CkfPattern', 'BaseCkfTrajectoryBuilder_InfiniteLoop', 'TooManySeeds' )
pluginsMonName['cosmicsTrack'] = cms.string ( 'TrackFinder' )
modulesLabel ['cosmicsTrack'] = cms.vstring( 'ctfWithMaterialTracksCosmics', 'regionalCosmicTracks' )
categories ['cosmicsTrack'] = cms.vstring( 'FailedPropagation', 'RKPropagatorInS' )
selectedModules4cosmics.extend( ['LocalReco'] )
selectedModules4cosmics.extend( ['cosmicsSeeding'] )
selectedModules4cosmics.extend( ['cosmicsTrackCandidate'] )
selectedModules4cosmics.extend( ['cosmicsTrack'] )
|
"""helper functions for benchmarks."""
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def create_json(csvs, trials, seeds, xs, ys, factors, names):
"""Convert multiple algorithms csvs outputs to json format.
Args:
csvs (list[list]): A list of list of csvs which contains all csv files
for each algorithms in the task.
trials (int): Number of trials in the task.
seeds (list[int]): A list of positive integers which is used in the
algorithms
xs (list[string]): A list of X column names of algorithms csv.
ys (list[string]): A list of Y column names of algorithms csv.
factors (list[int]): A list of factor value for each algorithms
names (list[string]): A list of name of each algorithms
Returns:
dict: a dictionary(json) whose values should contain time_steps
(x-value) and return values (y-value) for each algorithms for each
trials
"""
task_result = {}
for trial in range(trials):
trial_seed = 'trial_%d' % (trial + 1)
task_result['seed'] = seeds[trial]
task_result[trial_seed] = {}
dfs = (json.loads(pd.read_csv(csv[trial]).to_json()) for csv in csvs)
task_result[trial_seed] = {
name: {
'time_steps': [float(val) * factor for val in df[x].values()],
'return': df[y]
}
for df, x, y, factor, name in zip(dfs, xs, ys, factors, names)
}
return task_result
def plot_average_over_trials(csvs, ys, xs, plt_file, env_id, x_label, y_label,
names):
"""Plot mean and confidence area of benchmark from csv files of algorithms.
x-value is step and y-value depends on the parameter ys.
Calculate mean and std for the y values and draw a line using mean and
show confidence area using std.
Step length of every csv data ans ys should be same.
Args:
csvs (list[list]): A list of list of csvs which contains all csv files
for each algorithms in the task.
ys (list[int]): A list of Y column names of algorithms csv.
plt_file (string): Path of the plot png file.
env_id (string): String contains the id of the environment. (for title)
x_label (string): label for x axis of the plot
y_label (string): label for y axis of the plot
names (list[string]): labels for each line in the graph
"""
assert all(len(x) == len(csvs[0]) for x in csvs)
for trials, y, x, name in zip(csvs, ys, xs, names):
y_vals = np.array([np.array(pd.read_csv(t)[y]) for t in trials])
x_vals = [np.array(pd.read_csv(t)[x]) for t in trials]
for x_val in x_vals:
assert np.array_equal(x_val, x_vals[0])
y_mean, y_std = y_vals.mean(axis=0), y_vals.std(axis=0)
# pylint: disable=unsubscriptable-object
plt.plot(x_vals[0], y_mean, label=name)
# pylint: disable=unsubscriptable-object
plt.fill_between(x_vals[0], (y_mean - y_std), (y_mean + y_std),
alpha=.1)
plt.legend()
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(env_id)
plt.savefig(plt_file)
plt.close()
def plot_average_over_trials_with_x(csvs, ys, xs, plt_file, env_id, x_label,
y_label, names):
"""Plot mean and confidence area of benchmark from csv files of algorithms.
x-value is step and y-value depends on the parameter ys.
Calculate mean and std for the y values and draw a line using mean and
show confidence area using std.
Step length of every csv data ans ys should be same.
Args:
csvs (list[list]): A list of list of csvs which contains all csv files
for each algorithms in the task.
ys (list[int]): A list of Y column names of algorithms csv.
plt_file (string): Path of the plot png file.
env_id (string): String contains the id of the environment. (for title)
x_label (string): label for x axis of the plot
y_label (string): label for y axis of the plot
names (list[string]): labels for each line in the graph
"""
assert all(len(x) == len(csvs[0]) for x in csvs)
for trials, y, x, name in zip(csvs, ys, xs, names):
y_vals = np.array([np.array(pd.read_csv(t)[y]) for t in trials])
x_vals = [np.array(pd.read_csv(t)[x]) for t in trials]
x_mean = x_vals.mean()
y_mean, y_std = y_vals.mean(axis=0), y_vals.std(axis=0)
# pylint: disable=unsubscriptable-object
plt.plot(x_vals[0], y_mean, label=name)
# pylint: disable=unsubscriptable-object
plt.fill_between(x_mean, (y_mean - y_std),
(y_mean + y_std),
alpha=.1)
plt.legend()
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(env_id)
plt.savefig(plt_file)
plt.close()
|
"""Testing file for homography (not sorted yet)
"""
import numpy as np
import tensorflow as tf
import torch
# corner_img = np.array([(0, 0), (img_w, 0), (0, img_h), (img_w, img_h)])
import cv2
from utils.utils import warp_points_np
def sample_homography(inv_scale=3):
corner_img = np.array([(-1, -1), (-1, 1), (1, -1), (1, 1)])
offset_r = 1 - 1/inv_scale
img_offset = np.array([(-1, -1), (-1, offset_r), (offset_r, -1), (offset_r, offset_r)])
corner_map = np.random.rand(4,2)/inv_scale + img_offset
matrix = cv2.getPerspectiveTransform(np.float32(corner_img), np.float32(corner_map))
return matrix
import matplotlib.pyplot as plt
def plot_points(matrix, ls='--', lw=1.2, colors=None):
x_points, y_points = matrix[:,0], matrix[:,1]
size = len(x_points)
colors = ['red', 'blue', 'orange', 'green'] if not None else colors
for i in range(size):
plt.plot(x_points[i], y_points[i], color=colors[i], marker='o')
# plt.plot(x_points[i], x_points[(i+1) % size],color=colors[i],linestyle=ls, linewidth=lw)
# plt.plot(x_points[i], x_points[(i + 1) % size], linestyle=ls, linewidth=lw)
# [y_points[i], y_points[(i+1) % size]],
# color=colors[i],
# linestyle=ls, linewidth=lw)
def printCorners(corner_img, mat_homographies):
points = warp_points_np(corner_img, mat_homographies)
# plot
plot_points(corner_img)
for i in range(points.shape[0]):
plot_points(points[i,:,:])
plt.show()
def test_sample_homography():
batch_size = 30
filename = '../configs/superpoint_coco_train.yaml'
import yaml
with open(filename, 'r') as f:
config = yaml.load(f)
test_tf = False
test_corner_def = True
if test_tf == True:
from utils.homographies import sample_homography as sample_homography
boundary = 1
# from utils.homographies import sample_homography_np as sample_homography
# mat_homographies = matrix[np.newaxis, :,:]
# mat_homographies = [sample_homography(tf.constant([boundary,boundary]),
mat_homographies = [sample_homography(np.array([boundary,boundary]),
**config['data']['warped_pair']['params']) for i in range(batch_size)]
mat_homographies = np.stack(mat_homographies, axis=0)
corner_img = np.array([[0., 0.], [0., boundary], [boundary, boundary], [boundary, 0.]])
printCorners(corner_img, mat_homographies)
if test_corner_def:
corner_img = np.array([(-1, -1), (-1, 1), (1, 1), (1, -1)])
from utils.homographies import sample_homography_np as sample_homography
boundary = 2
mat_homographies = [sample_homography(np.array([boundary,boundary]), shift=-1,
**config['data']['warped_pair']['params']) for i in range(batch_size)]
mat_homographies = np.stack(mat_homographies, axis=0)
printCorners(corner_img, mat_homographies)
else:
from utils.utils import sample_homography
mat_homographies = [sample_homography(1) for i in range(batch_size)]
# sess = tf.Session()
# with sess.as_default():
# m = mat_homographies[0].eval()
print("end")
def test_valid_mask():
from utils.utils import pltImshow
batch_size = 1
mat_homographies = [sample_homography(3) for i in range(batch_size)]
mat_H = np.stack(mat_homographies, axis=0)
corner_img = np.array([(-1, -1), (-1, 1), (1, -1), (1, 1)])
# printCorners(corner_img, mat_H)
# points = warp_points_np(corner_img, mat_homographies)
mat_H = torch.tensor(mat_H, dtype=torch.float32)
mat_H_inv = torch.stack([torch.inverse(mat_H[i, :, :]) for i in range(batch_size)])
from utils.utils import compute_valid_mask, labels2Dto3D
device = 'cpu'
shape = torch.tensor([240, 320])
for i in range(1):
r = 3
mask_valid = compute_valid_mask(shape, inv_homography=mat_H_inv, device=device, erosion_radius=r)
pltImshow(mask_valid[0,:,:])
cell_size = 8
mask_valid = labels2Dto3D(mask_valid.view(batch_size, 1, mask_valid.shape[1], mask_valid.shape[2]), cell_size=cell_size)
mask_valid = torch.prod(mask_valid[:,:cell_size*cell_size,:,:], dim=1)
pltImshow(mask_valid[0,:,:].cpu().numpy())
mask = {}
mask.update({'homographies': mat_H, 'masks': mask_valid})
np.savez_compressed('h2.npz', **mask)
print("finish testing valid mask")
if __name__ == '__main__':
# test_sample_homography()
test_valid_mask()
'''
x_points = np.array([0, 0, 20, 20])
y_points = np.array([0, 20, 20, 0])
matrix = np.array([x_points, y_points])
# colors = ['red', 'blue', 'magenta', 'green']
colors = ['r', 'b', 'm', 'g']
size = len(x_points)
plot_points(matrix, colors)
plt.ylim([-5,25])
plt.xlim([-5,25])
plt.axes().set_aspect('equal')
plt.show()
'''
|
import argparse
import os
import math
import time
import shutil
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import pandas as pd
import numpy as np
from torch.nn.utils import clip_grad_norm_
from ssn_opts import parser
from load_binary_score import BinaryDataSet
from binary_model import BinaryClassifier
from transforms import *
from ops.utils import get_actionness_configs, ScheduledOptim
from ops.anet_db import ANetDB
from torch.utils import model_zoo
from attention.utils import Rank_Criterion, CE_Criterion_multi
from ops.AdamW import AdamW
from ops.eval_utils import area_under_curve, grd_activity
best_loss = 100
def convert_categorical(x_in, n_classes=2):
shp = x_in.shape
x = (x_in.ravel().astype('int'))
x_mask = (x >= 0).reshape(-1, 1)
x = x.clip(0)
y = np.diag(np.ones((n_classes,)))
y = y[x] * x_mask
y = y.reshape(shp + (n_classes,)).astype('float32')
return y
def main():
global args, best_loss
args = parser.parse_args()
dataset_configs = get_actionness_configs(args.dataset)
sampling_configs = dataset_configs['sampling']
num_class = dataset_configs['num_class']
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
db = ANetDB.get_db("1.3")
# set the directory for the rgb features
if args.feat_model == 'i3d_rgb' or args.feat_model == 'i3d_rgb_trained':
args.input_dim = 1024
elif args.feat_model == 'inception_resnet_v2' or args.feat_model == 'inception_resnet_v2_trained':
args.input_dim = 1536
if args.use_flow:
if not args.only_flow:
args.input_dim += 1024
else:
args.input_dim = 1024
print(("=> the input features are extracted from '{}' and the dim is '{}'").format(
args.feat_model, args.input_dim))
# if reduce the dimension of input feature first
if args.reduce_dim > 0:
assert args.reduce_dim % args.n_head == 0, "reduce_dim {} % n_head {} != 0".format(
args.reduce_dim, args.n_head)
args.d_k = int(args.reduce_dim // args.n_head)
args.d_v = args.d_k
else:
assert args.input_dim % args.n_head == 0, "input_dim {} % n_head {} != 0".format(
args.input_dim, args.n_head)
args.d_k = int(args.input_dim // args.n_head)
args.d_v = args.d_k
args.d_model = args.n_head * args.d_k
if not os.path.exists(args.result_path):
os.makedirs(args.result_path)
if args.pos_enc:
save_path = os.path.join(args.result_path, '_'.join(
(args.att_kernel_type, 'N'+str(args.n_layers))))
else:
save_path = os.path.join(args.result_path, '_'.join(
(args.att_kernel_type, 'N'+str(args.n_layers)))) + '_nopos'
if args.num_local > 0:
save_path = save_path + '_loc' + str(args.num_local) + args.local_type
if args.dilated_mask:
save_path += '_dilated'
if args.groupwise_heads > 0:
save_path = save_path + '_G' + str(args.groupwise_heads)
if len(args.roi_poolsize) > 0:
save_path = save_path + '_roi' + str(args.roi_poolsize)
model_name = os.path.split(save_path)[1]
# logger = Logger('./logs/{}'.format(model_name))
logger = None
model = BinaryClassifier(
num_class, args.num_body_segments, args, dropout=args.dropout)
model = torch.nn.DataParallel(model, device_ids=None).cuda()
cudnn.enabled = False
# cudnn.benchmark = True
pin_memory = True
train_prop_file = 'data/{}_proposal_list.txt'.format(
dataset_configs['train_list'])
val_prop_file = 'data/{}_proposal_list.txt'.format(
dataset_configs['test_list'])
train_videos = db.get_subset_videos('training')
val_videos = db.get_subset_videos('validation')
train_loader = torch.utils.data.DataLoader(
BinaryDataSet(args.feat_root, args.feat_model, train_prop_file, train_videos,
exclude_empty=True, body_seg=args.num_body_segments,
input_dim=args.d_model, prop_per_video=args.prop_per_video,
fg_ratio=6, bg_ratio=6, num_local=args.num_local,
use_flow=args.use_flow, only_flow=args.only_flow),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=pin_memory,
drop_last=True)
# val_loader = torch.utils.data.DataLoader(
# BinaryDataSet(args.feat_root, args.feat_model, val_prop_file, val_videos,
# exclude_empty=True, body_seg=args.num_body_segments,
# input_dim=args.d_model, prop_per_video=args.prop_per_video,
# fg_ratio=6, bg_ratio=6, num_local=args.num_local,
# use_flow=args.use_flow, only_flow=args.only_flow),
# batch_size=args.batch_size//2, shuffle=False,
# num_workers=args.workers*2, pin_memory=pin_memory)
val_loader = torch.utils.data.DataLoader(
BinaryDataSet(args.feat_root, args.feat_model, val_prop_file, subset_videos=val_videos,
exclude_empty=True, body_seg=args.num_body_segments,
input_dim=args.d_model, test_mode=True, use_flow=args.use_flow,
verbose=False, num_local=args.num_local, only_flow=args.only_flow),
batch_size=1, shuffle=False,
num_workers=10, pin_memory=True)
ground_truth, cls_to_idx = grd_activity('data/activity_net.v1-3.min_save.json', subset='validation')
del cls_to_idx['background']
# optimizer = torch.optim.Adam(
# model.parameters(),
# args.lr, weight_decay=args.weight_decay)
optimizer = AdamW(
model.parameters(),
args.lr, weight_decay=args.weight_decay)
# optimizer = torch.optim.SGD(model.parameters(),
# args.lr,
# momentum=args.momentum,
# weight_decay=args.weight_decay, nesterov=False)
if args.resume is not None and len(args.resume) > 0:
model.load_state_dict(torch.load(args.resume)[
'state_dict'], strict=False)
criterion_stage1 = CE_Criterion_multi(use_weight=True)
criterion_stage2 = Rank_Criterion(epsilon=0.02)
patience = 0
for epoch in range(args.start_epoch, args.epochs):
# adjust_learning_rate(optimizer, epoch, args.lr_steps)
# train for one epoch
if patience > 5:
break
train(train_loader, model, optimizer, criterion_stage1,
criterion_stage2, epoch, logger)
# evaluate on validation list
if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1:
loss = validate(
val_loader, model, ground_truth, (epoch + 1) * len(train_loader), epoch)
# remember best prec@1 and save checkpoint
is_best = 1.0001 * loss < best_loss
if is_best:
patience = 0
else:
patience += 1
best_loss = min(loss, best_loss)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.model,
'state_dict': model.state_dict(),
'best_loss': best_loss,
}, is_best, save_path)
def train(train_loader, model, optimizer, criterion_stage1, criterion_stage2, epoch, logger):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
score_losses = AverageMeter()
start_losses = AverageMeter()
end_losses = AverageMeter()
roi_losses = AverageMeter()
# switch to train model
model.train()
end_time = time.time()
optimizer.zero_grad()
for i, (feature, feature_mask, target, start, end, pos_ind, gts) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end_time)
# feature_mask = feature.abs().mean(2).ne(0).float()
feature = feature.cuda().requires_grad_(False)
feature_mask = feature_mask.cuda().requires_grad_(False)
pos_ind = pos_ind.cuda().requires_grad_(False)
# compute output
score_output, enc_slf_attn, roi_scores, labels, rois_mask = model(
feature, pos_ind, target, gts=gts, feature_mask=feature_mask, epoch_id=epoch)
score_loss, start_loss, end_loss, attn_loss = criterion_stage1(
score_output, target, start, end, attn=enc_slf_attn, mask=feature_mask)
roi_loss = criterion_stage2(roi_scores, labels, rois_mask)
loss = score_loss + 20. * roi_loss + 0.5 * start_loss + 0.5 * end_loss
score_losses.update(score_loss.item(), feature.size(0))
start_losses.update(start_loss.item(), feature.size(0))
end_losses.update(end_loss.item(), feature.size(0))
roi_losses.update(roi_loss.item(), feature.size(0))
losses.update(loss.item(), feature.size(0))
# compute gradient and do SGD step
loss.backward()
if args.clip_gradient is not None:
total_norm = clip_grad_norm_(
model.module.get_trainable_parameters(), args.clip_gradient)
if total_norm > args.clip_gradient:
print('Clipping gradient: {} with coef {}'.format(
total_norm, args.clip_gradient / total_norm))
else:
total_norm = 0
optimizer.step()
# # 1. Log scalar values (scalar summary)
# info = {'train_loss': loss.item(),
# 'train_score_loss': score_loss.item(),
# 'train_start_loss': start_loss.item(),
# 'train_end_loss': end_loss.item(),
# 'train_roi_loss': roi_loss.item()}
# for tag, value in info.items():
# logger.scalar_summary(tag, value, i+epoch*len(train_loader)+1)
# # 2. Log values and gradients of the parameters (histogram summary)
# for tag, value in model.named_parameters():
# tag_ = tag.replace('.', '/')
# if np.isnan(value.grad.data.cpu().numpy()).any() or np.isnan(value.data.cpu().numpy()).any():
# import pdb; pdb.set_trace()
# logger.histo_summary(tag_, value.data.cpu().numpy(), i+epoch*len(train_loader)+1)
# logger.histo_summary(tag_+'/grad', value.grad.data.cpu().numpy(), i+epoch*len(train_loader)+1)
del loss, score_loss, roi_loss, score_output, enc_slf_attn, roi_scores, labels, rois_mask
# optimizer.update_learning_rate()
optimizer.zero_grad()
# measure elapsed time
batch_time.update(time.time() - end_time)
end_time = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}], lr: {lr:.5f}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score_Loss {score_loss.val:.4f} ({score_loss.avg:.4f})\t'
'Start_Loss {start_loss.val:.4f} ({start_loss.avg:.4f})\t'
'End_Loss {end_loss.val:.4f} ({end_loss.avg:.4f})\t'
'ROI_Loss {roi_loss.val:.4f} ({roi_loss.avg:.4f})\t'
.format(
epoch, i, len(train_loader), lr=optimizer.param_groups[0]['lr'],
batch_time=batch_time, data_time=data_time, loss=losses,
score_loss=score_losses, start_loss=start_losses,
end_loss=end_losses, roi_loss=roi_losses)
)
def validate(val_loader, model, ground_truth, iter, epoch):
batch_time = AverageMeter()
model.eval()
end_time = time.time()
video_lst, t_start_lst, t_end_lst, score_lst = [], [], [], []
for i, (feature, feature_mask, num_feat, pos_ind, video_id, video_duration) in enumerate(val_loader):
feature = feature[0].cuda()
feature_mask = feature_mask[0].cuda()
pos_ind = pos_ind[0].cuda()
video_id = video_id[0]
video_duration = float(video_duration[0].cpu().numpy())
with torch.no_grad():
rois, actness, roi_scores = model(
feature, pos_ind, feature_mask=feature_mask, test_mode=True)
rois, actness, roi_scores = rois[0].cpu().numpy(
), actness[0].cpu().numpy(), roi_scores[0].cpu().numpy()[:, 1]
# import pdb; pdb.set_trace()
rois = list(filter(lambda b: b[1]-b[0] > 0, rois))
actness = list(filter(lambda b: b > 0, actness))
roi_scores = list(filter(lambda b: b > 0, roi_scores))
# save results
video_lst.extend([video_id] * len(rois))
t_start_lst.extend([x[0] / float(num_feat) * video_duration for x in rois])
t_end_lst.extend([x[1] / float(num_feat) * video_duration for x in rois])
score_lst.extend([roi_score*act_score for (act_score, roi_score) in zip(actness, roi_scores)])
batch_time.update(time.time() - end_time)
end_time = time.time()
if i % (1000) == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.4f} ({batch_time.avg:.4f})\t'
.format(i, len(val_loader), batch_time=batch_time))
prediction = pd.DataFrame({'video-id': video_lst,
't-start': t_start_lst,
't-end': t_end_lst,
'score': score_lst})
auc, ar_at_prop, nr_proposals_lst = area_under_curve(prediction, ground_truth, max_avg_nr_proposals=100,
tiou_thresholds=np.linspace(0.5, 0.95, 10))
nr_proposals_lst = np.around(nr_proposals_lst)
print('AR@1 is {:.6f}, AR@10 is {:.6f}, AR@20 is {:.6f}'.format(ar_at_prop[0], ar_at_prop[9], ar_at_prop[19]))
print('AR@50 is {:.6f}, AR@100 is {:.6f}, AUC is {:.6f}'.format(ar_at_prop[49], ar_at_prop[99], auc))
return -1. * auc
def save_checkpoint(state, is_best, save_path, filename='/checkpoint.pth.tar'):
if not os.path.exists(save_path):
os.makedirs(save_path)
filename = save_path + filename
torch.save(state, filename)
if is_best:
best_name = save_path + '/model_best.pth.tar'
shutil.copyfile(filename, best_name)
def adjust_learning_rate(optimizer, epoch, lr_steps):
# Set the learning rate to the initial LR decayed by 10 every 30 epoches
decay = 0.1 ** (sum(epoch >= np.array(lr_steps)))
lr = args.lr * decay
decay = args.weight_decay
for param_group in optimizer.param_groups:
param_group['lr'] = lr
param_group['weight_decay'] = decay
class AverageMeter(object):
# Computes and stores the average and current value
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
target = target.view(-1)
# computes the precision@k for the specific values of k
maxk = max(topk)
batch_size = target.size(0)
output = output.view(batch_size, -1)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
# print(output.size(), pred.size(), target.size())
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 3 22:26:25 2021
@author: zrowl
"""
import types
from geographiclib.geodesic import Geodesic
from paths import linear
def coords2path(coords=[], geo=Geodesic.WGS84, interp_func_name='linear'):
lat_funcstr = "def lat_path(d, _):"
lon_funcstr = "def lon_path(d, _):"
d = 0.
for i in range(len(coords)-1):
g = geo.Inverse(coords[i][0], coords[i][1],
coords[i+1][0], coords[i+1][1])
azi = g['azi1']
d = d + g['s12']
if i==0:
ifstate = "if"
elif i>0:
ifstate = "elif"
lat_line = ifstate + " d < "+str(d)+": \
return " + interp_func_name + "(d, {'ordinate': 'lat', 'aziDeg': "+str(azi)+"})"
lon_line = ifstate + " d < "+str(d)+": \
return " + interp_func_name + "(d, {'ordinate': 'lon', 'aziDeg': "+str(azi)+"})"
lat_funcstr = lat_funcstr + "\n\t" + lat_line
lon_funcstr = lon_funcstr + "\n\t" + lon_line
lat_funcstr = lat_funcstr + "\n\t" + "else: return 0"
lon_funcstr = lon_funcstr + "\n\t" + "else: return 0"
lat_funcobj = compile(lat_funcstr, '<string>', 'exec')
lon_funcobj = compile(lon_funcstr, '<string>', 'exec')
lat_functype = types.FunctionType(lat_funcobj.co_consts[0], globals())
lon_functype = types.FunctionType(lon_funcobj.co_consts[0], globals())
return lat_functype, lon_functype, lat_funcstr, lon_funcstr, d
|
"""
Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
The Universal Permissive License (UPL), Version 1.0
"""
from java.io import File
from java.lang import IllegalArgumentException
from oracle.weblogic.deploy.util import PyOrderedDict as OrderedDict
from oracle.weblogic.deploy.util import StringUtils
from oracle.weblogic.deploy.util import WLSDeployArchiveIOException
from wlsdeploy.aliases import model_constants
from wlsdeploy.aliases.location_context import LocationContext
from wlsdeploy.aliases.wlst_modes import WlstModes
from wlsdeploy.exception import exception_helper
from wlsdeploy.logging.platform_logger import PlatformLogger
from wlsdeploy.tool.discover import discoverer
from wlsdeploy.tool.discover.discoverer import Discoverer
from wlsdeploy.util import path_utils
_class_name = 'DeploymentsDiscoverer'
_logger = PlatformLogger(discoverer.get_discover_logger_name())
class DeploymentsDiscoverer(Discoverer):
"""
Discover the application and shared library deployments from the weblogic domain. Collect all deployment
binaries and plans into the discovery archive file.
"""
def __init__(self, model_context, deployments_dictionary, base_location, wlst_mode=WlstModes.OFFLINE, aliases=None):
Discoverer.__init__(self, model_context, base_location, wlst_mode, aliases)
self._dictionary = deployments_dictionary
def discover(self):
"""
Discover the deployment information from the domain. This is the API method to discover all
deployment information from the domain.
:return: dictionary containing the deployment information
"""
_method_name = 'discover'
_logger.entering(class_name=_class_name, method_name=_method_name)
_logger.info('WLSDPLY-06380', class_name=_class_name, method_name=_method_name)
model_top_folder_name, libraries = self.get_shared_libraries()
discoverer.add_to_model_if_not_empty(self._dictionary, model_top_folder_name, libraries)
model_top_folder_name, applications = self.get_applications()
discoverer.add_to_model_if_not_empty(self._dictionary, model_top_folder_name, applications)
_logger.exiting(class_name=_class_name, method_name=_method_name)
return self._dictionary
def get_shared_libraries(self):
"""
Discover the shared library deployment information from the domain. Collect any shared library binaries into
the discovered archive file. If the shared library cannot be collected into the archive, the shared library
source path will be removed from the model and the shared library un-targeted.
:return: model name for the dictionary and the dictionary containing the shared library information
"""
_method_name = 'get_shared_libraries'
_logger.entering(class_name=_class_name, method_name=_method_name)
result = OrderedDict()
model_top_folder_name = model_constants.LIBRARY
location = LocationContext(self._base_location)
location.append_location(model_top_folder_name)
libraries = self._find_names_in_folder(location)
if libraries:
_logger.info('WLSDPLY-06381', len(libraries), class_name=_class_name, method_name=_method_name)
typedef = self._model_context.get_domain_typedef()
name_token = self._alias_helper.get_name_token(location)
for library in libraries:
if typedef.is_system_shared_library(library):
_logger.info('WLSDPLY-06401', typedef.get_domain_type(), library, class_name=_class_name,
method_name=_method_name)
else:
_logger.info('WLSDPLY-06382', library, class_name=_class_name, method_name=_method_name)
location.add_name_token(name_token, library)
result[library] = OrderedDict()
self._populate_model_parameters(result[library], location)
self._add_shared_libraries_to_archive(library, result[library])
self._discover_subfolders(result[library], location)
location.remove_name_token(name_token)
_logger.exiting(class_name=_class_name, method_name=_method_name, result=model_top_folder_name)
return model_top_folder_name, result
def _add_shared_libraries_to_archive(self, library_name, library_dict):
"""
Add the binary or directory referenced by the shared library to the archive file.
If the binary can not be located and added to the archive file, un-target the library and log the problem.
:param library_name: name of the shared library
:param library_dict: containing the shared library information
:raise DiscoverException: An unexpected exception occurred trying to write the library to the archive
"""
_method_name = 'add_shared_library_to_archive'
_logger.entering(library_name, class_name=_class_name, method_name=_method_name)
archive_file = self._model_context.get_archive_file()
if model_constants.SOURCE_PATH in library_dict:
file_name = library_dict[model_constants.SOURCE_PATH]
if file_name:
file_name_path = self._convert_path(file_name)
if self._is_oracle_home_file(file_name_path):
_logger.info('WLSDPLY-06383', library_name, class_name=_class_name, method_name=_method_name)
else:
_logger.info('WLSDPLY-06384', library_name, file_name_path, class_name=_class_name,
method_name=_method_name)
new_source_name = None
try:
new_source_name = archive_file.addSharedLibrary(File(file_name_path))
except IllegalArgumentException, iae:
if model_constants.TARGET in library_dict:
target = library_dict[model_constants.TARGET]
del library_dict[model_constants.TARGET]
_logger.warning('WLSDPLY-06385', library_name, target, iae.getLocalizedMessage(),
file_name_path, class_name=_class_name, method_name=_method_name)
else:
_logger.warning('WLSDPLY-06386', library_name, iae.getLocalizedMessage(), file_name_path,
class_name=_class_name, method_name=_method_name)
except WLSDeployArchiveIOException, wioe:
de = exception_helper.create_discover_exception('WLSDPLY-06387', library_name, file_name_path,
wioe.getLocalizedMessage())
_logger.throwing(class_name=_class_name, method_name=_method_name, error=de)
raise de
if new_source_name is not None:
library_dict[model_constants.SOURCE_PATH] = new_source_name
_logger.finer('WLSDPLY-06388', library_name, new_source_name, class_name=_class_name,
method_name=_method_name)
self._add_shared_libray_plan_to_archive(library_name, library_dict)
_logger.exiting(class_name=_class_name, method_name=_method_name)
return
def _add_shared_libray_plan_to_archive(self, library_name, library_dict):
"""
Add the shared library deployment plan to the archive file. Create a unique name for the deployment plan from
the library binary name and the plan name. If the plan cannot be added to the archive file, the plan
information will remain in the model library entry, but a warning will be generated.
:param library_name: shared library name in the model
:param library_dict: containing the library information
:raise: DiscoverException: An unexpected exception occurred trying to write the plan to the archive file
"""
_method_name = 'add_application_plan_to_archive'
_logger.entering(library_name, class_name=_class_name, method_name=_method_name)
archive_file = self._model_context.get_archive_file()
if model_constants.PLAN_PATH in library_dict:
library_source_name = library_dict[model_constants.SOURCE_PATH]
plan_path = library_dict[model_constants.PLAN_PATH]
if plan_path:
new_plan_name = None
_logger.info('WLSDPLY-06389', library_name, plan_path, class_name=_class_name, method_name=_method_name)
plan_dir = None
if model_constants.PLAN_DIR in library_dict:
plan_dir = library_dict[model_constants.PLAN_DIR]
del library_dict[model_constants.PLAN_DIR]
plan_file_name = self._resolve_deployment_plan_path(plan_dir, plan_path)
try:
new_plan_name = archive_file.addApplicationDeploymentPlan(File(plan_file_name),
_generate_new_plan_name(
library_source_name,
plan_file_name))
except IllegalArgumentException, iae:
_logger.warning('WLSDPLY-06385', library_name, plan_file_name,
iae.getLocalizedMessage(), class_name=_class_name,
method_name=_method_name)
except WLSDeployArchiveIOException, wioe:
de = exception_helper.create_discover_exception('WLSDPLY-06387', library_name,
plan_file_name,
wioe.getLocalizedMessage())
_logger.throwing(class_name=_class_name, method_name=_method_name, error=de)
raise de
if new_plan_name is not None:
_logger.finer('WLSDPLY-06390', library_name, new_plan_name,
class_name=_class_name, method_name=_method_name)
library_dict[model_constants.PLAN_PATH] = new_plan_name
_logger.exiting(class_name=_class_name, method_name=_method_name)
return
def get_applications(self):
"""
Discover application deployment information from the domain. Collect the application deployment binaries into
the discovery archive file. If an application binary cannot be collected into the archive file, remove
the application source path from the model and un-target the application.
:return: model name for the dictionary and the dictionary containing the applications information
"""
_method_name = 'get_applications'
_logger.entering(class_name=_class_name, method_name=_method_name)
result = OrderedDict()
model_top_folder_name = model_constants.APPLICATION
location = LocationContext(self._base_location)
location.append_location(model_top_folder_name)
applications = self._find_names_in_folder(location)
if applications:
_logger.info('WLSDPLY-06391', len(applications), class_name=_class_name, method_name=_method_name)
typedef = self._model_context.get_domain_typedef()
name_token = self._alias_helper.get_name_token(location)
for application in applications:
if typedef.is_system_app(application):
_logger.info('WLSDPLY-06400', typedef.get_domain_type(), application, class_name=_class_name,
method_name=_method_name)
else:
_logger.info('WLSDPLY-06392', application, class_name=_class_name, method_name=_method_name)
location.add_name_token(name_token, application)
result[application] = OrderedDict()
self._populate_model_parameters(result[application], location)
self._add_application_to_archive(application, result[application])
self._discover_subfolders(result[application], location)
location.remove_name_token(name_token)
_logger.exiting(class_name=_class_name, method_name=_method_name, result=result)
return model_top_folder_name, result
def _add_application_to_archive(self, application_name, application_dict):
"""
Add the binary or directory referenced by the application to the archive file.
If the binary can not be located and added to the archive file, un-target the application and log the problem.
:param application_name: name of the application in the model
:param application_dict: containing the application information
:raise DiscoverException: An unexpected exception occurred trying to write the application to the archive
"""
_method_name = 'add_application_to_archive'
_logger.entering(application_name, class_name=_class_name, method_name=_method_name)
archive_file = self._model_context.get_archive_file()
if model_constants.SOURCE_PATH in application_dict:
file_name = application_dict[model_constants.SOURCE_PATH]
if file_name:
file_name_path = self._convert_path(file_name)
if self._is_oracle_home_file(file_name_path):
_logger.info('WLSDPLY-06393', application_name, class_name=_class_name, method_name=_method_name)
else:
_logger.info('WLSDPLY-06394', application_name, file_name_path, class_name=_class_name,
method_name=_method_name)
new_source_name = None
try:
new_source_name = archive_file.addApplication(File(file_name_path))
except IllegalArgumentException, iae:
if model_constants.TARGET in application_dict:
target = application_dict[model_constants.TARGET]
del application_dict[model_constants.TARGET]
_logger.warning('WLSDPLY-06395', application_name, target, iae.getLocalizedMessage(),
class_name=_class_name, method_name=_method_name)
else:
_logger.warning('WLSDPLY-06396', application_name, iae.getLocalizedMessage(),
class_name=_class_name, method_name=_method_name)
except WLSDeployArchiveIOException, wioe:
de = exception_helper.create_discover_exception('WLSDPLY-06397', application_name,
file_name_path, wioe.getLocalizedMessage())
_logger.throwing(class_name=_class_name, method_name=_method_name, error=de)
raise de
if new_source_name is not None:
_logger.finer('WLSDPLY-06398', application_name, new_source_name, class_name=_class_name,
method_name=_method_name)
application_dict[model_constants.SOURCE_PATH] = new_source_name
self.add_application_plan_to_archive(application_name, application_dict)
_logger.exiting(class_name=_class_name, method_name=_method_name)
return
def add_application_plan_to_archive(self, application_name, application_dict):
"""
Add the application deployment plan to the archive file. Create a unique name for the deployment plan from
the application name and the plan name. If the plan cannot be located and added to the archive file, the
plan will remain in the model, but a warning message will be generated about the problem.
:param application_name: name of the application in the model
:param application_dict: containing the application information
:raise: DiscoverException: An unexpected exception occurred trying to write the plan to the archive file
"""
_method_name = 'add_application_plan_to_archive'
_logger.entering(application_name, class_name=_class_name, method_name=_method_name)
archive_file = self._model_context.get_archive_file()
if model_constants.PLAN_PATH in application_dict:
app_source_name = application_dict[model_constants.SOURCE_PATH]
plan_path = application_dict[model_constants.PLAN_PATH]
if plan_path:
_logger.info('WLSDPLY-06402', application_name, plan_path, class_name=_class_name,
method_name=_method_name)
plan_dir = None
if model_constants.PLAN_DIR in application_dict:
plan_dir = application_dict[model_constants.PLAN_DIR]
del application_dict[model_constants.PLAN_DIR]
plan_file_name = self._resolve_deployment_plan_path(plan_dir, plan_path)
new_plan_name = None
try:
new_plan_name = archive_file.addApplicationDeploymentPlan(File(plan_file_name),
_generate_new_plan_name(
app_source_name,
plan_file_name))
except IllegalArgumentException, iae:
_logger.warning('WLSDPLY-06395', application_name, plan_file_name,
iae.getLocalizedMessage(), class_name=_class_name,
method_name=_method_name)
except WLSDeployArchiveIOException, wioe:
de = exception_helper.create_discover_exception('WLSDPLY-06397', application_dict, plan_file_name,
wioe.getLocalizedMessage())
_logger.throwing(class_name=_class_name, method_name=_method_name, error=de)
raise de
if new_plan_name is not None:
_logger.finer('WLSDPLY-06399', application_name, new_plan_name,
class_name=_class_name, method_name=_method_name)
application_dict[model_constants.PLAN_PATH] = new_plan_name
_logger.exiting(class_name=_class_name, method_name=_method_name)
return
def _resolve_deployment_plan_path(self, plan_dir, plan_path):
"""
Find the deployment plan absolute file path.
This is a private helper method.
:param plan_dir: directory discovered from the domain, which is concatenated to the plan path
:param plan_path: plan path discovered from the domain
:return: absolute file path for the plan from the plan_dir and plan_paht
"""
if not StringUtils.isEmpty(plan_path):
if not StringUtils.isEmpty(plan_dir):
relative_to = plan_dir
else:
relative_to = self._model_context.get_domain_home()
return discoverer.convert_to_absolute_path(relative_to, plan_path)
return plan_path
def _generate_new_plan_name(binary_path, plan_path):
"""
Generate a new plan name from the plan path and binary path.
This is a private helper method.
:param binary_path: source path of the deployment file
:param plan_path: path of the plan from the domain
:return: newly generated plan name for the archive file
"""
new_name = path_utils.get_filename_from_path(plan_path)
if binary_path is not None:
prefix = path_utils.get_filename_no_ext_from_path(binary_path)
new_name = prefix + '-' + new_name
return new_name
|
"""
Django settings for postfixadmin project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import os
from os.path import abspath, join, normpath
import dj_database_url
PROJECT_ROOT = normpath(join(abspath(__file__), "..", "..", "..",))
REPO_ROOT = normpath(join(PROJECT_ROOT, "..",))
MEDIA_ROOT = normpath(join(PROJECT_ROOT, "media"))
MEDIA_URL = '/media/'
STATIC_ROOT = normpath(join(PROJECT_ROOT, "static"))
STATIC_URL = '/static/'
STATICFILES_DIRS = (
normpath(join(REPO_ROOT, "assets")),
)
TEMPLATE_DIRS = (
normpath(join(PROJECT_ROOT, "templates")),
)
DATABASES = {
'default': dj_database_url.config(
default="sqlite:///{0}".format(
normpath(join(REPO_ROOT, "db", "db.sqlite3")))
)
}
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# Quick-start deve:pment settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['_SECRET_KEY']
SITE_ID = 1
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.environ.get('_DEBUG', False))
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
AUTH_USER_MODEL = 'pfa.User'
PFA_DEFAULT_MAILDIR = '/var/mail/vmail/'
DJANGO_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'braces',
'crispy_forms',
'rest_framework',
'debug_toolbar.apps.DebugToolbarConfig',
)
LOCAL_APPS = (
'pfa',
'api',
)
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
REST_FRAMEWORK = {
# Use hyperlinked styles by default.
# Only used if the `serializer_class` attribute is not set on a view.
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.ModelSerializer',
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissions'
]
}
ROOT_URLCONF = 'postfixadmin.urls'
WSGI_APPLICATION = 'postfixadmin.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
FIXTURE_DIRS = ('fixtures', )
# Celery
BROKER_URL = 'redis://127.0.0.1:6379/0'
BROKER_TRANSPORT = 'redis'
LOG_FILENAME = normpath(join(REPO_ROOT, "logs", "debug.log"))
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(message)s'
},
'simple': {
'format': '%(asctime)s %(levelname)s %(message)s',
'datefmt': '[%d/%b/%Y %H:%M:%S]'
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'formatter': 'verbose',
'filename': LOG_FILENAME,
}
},
'loggers': {
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'DEBUG',
},
'django.request': {
'handlers': ['console', 'file'],
'propagate': False,
'level': 'ERROR',
},
'pfa.views': {
'handlers': ['console', 'file'],
'level': 'DEBUG',
},
},
}
|
import pandas as pd
from transfer_classifier.augmentor.autoencoder_augmentor import AutoEncoderAugmentor
from transfer_classifier.dataset_preprocessor.amazon_review import AmazonReview
from transformers import AutoModelForMaskedLM, AutoTokenizer
class TestAutoEncoderAugmentor:
def test_replace_words(self) -> None:
model_name = "cl-tohoku/bert-base-japanese-whole-word-masking"
model = AutoModelForMaskedLM.from_pretrained(model_name, num_labels=2)
tokenizer = AutoTokenizer.from_pretrained(model_name)
augmentor = AutoEncoderAugmentor(model=model, tokenizer=tokenizer, replace_rate=0.2)
text = "今日もいい天気で、花がきれいに咲いています"
num_replaced, replaced = augmentor.replace_words(text, lang="ja")
assert num_replaced > 0
assert text != replaced
def test_generate(self) -> None:
model_name = "cl-tohoku/bert-base-japanese-whole-word-masking"
model = AutoModelForMaskedLM.from_pretrained(model_name, num_labels=2)
tokenizer = AutoTokenizer.from_pretrained(model_name)
augmentor = AutoEncoderAugmentor(model=model, tokenizer=tokenizer, replace_rate=0.3)
review = AmazonReview(input_column="review_title", label_column="stars")
samples = review.load("validation").select(range(10))
augmenteds = augmentor.generate(samples, review)
result = []
for original, augmented in zip(samples, augmenteds):
result.append(
{
"original": original[review.input_column],
"augmented": augmented[review.input_column],
"stars": original[review.label_column],
}
)
df = pd.DataFrame(result)
df.to_csv("autoencoder.csv", index=False)
assert df is not None
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'AuditLogConfigLogType',
'EndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteria',
'EndpointPolicyType',
]
class AuditLogConfigLogType(str, Enum):
"""
The log type that this config enables.
"""
LOG_TYPE_UNSPECIFIED = "LOG_TYPE_UNSPECIFIED"
"""
Default case. Should never be this.
"""
ADMIN_READ = "ADMIN_READ"
"""
Admin reads. Example: CloudIAM getIamPolicy
"""
DATA_WRITE = "DATA_WRITE"
"""
Data writes. Example: CloudSQL Users create
"""
DATA_READ = "DATA_READ"
"""
Data reads. Example: CloudSQL Users list
"""
class EndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteria(str, Enum):
"""
Specifies how matching should be done. Supported values are: MATCH_ANY: At least one of the Labels specified in the matcher should match the metadata presented by xDS client. MATCH_ALL: The metadata presented by the xDS client should contain all of the labels specified here. The selection is determined based on the best match. For example, suppose there are three EndpointPolicy resources P1, P2 and P3 and if P1 has a the matcher as MATCH_ANY , P2 has MATCH_ALL , and P3 has MATCH_ALL . If a client with label connects, the config from P1 will be selected. If a client with label connects, the config from P2 will be selected. If a client with label connects, the config from P3 will be selected. If there is more than one best match, (for example, if a config P4 with selector exists and if a client with label connects), an error will be thrown.
"""
METADATA_LABEL_MATCH_CRITERIA_UNSPECIFIED = "METADATA_LABEL_MATCH_CRITERIA_UNSPECIFIED"
"""
Default value. Should not be used.
"""
MATCH_ANY = "MATCH_ANY"
"""
At least one of the Labels specified in the matcher should match the metadata presented by xDS client.
"""
MATCH_ALL = "MATCH_ALL"
"""
The metadata presented by the xDS client should contain all of the labels specified here.
"""
class EndpointPolicyType(str, Enum):
"""
Required. The type of endpoint policy. This is primarily used to validate the configuration.
"""
ENDPOINT_POLICY_TYPE_UNSPECIFIED = "ENDPOINT_POLICY_TYPE_UNSPECIFIED"
"""
Default value. Must not be used.
"""
SIDECAR_PROXY = "SIDECAR_PROXY"
"""
Represents a proxy deployed as a sidecar.
"""
GRPC_SERVER = "GRPC_SERVER"
"""
Represents a proxyless gRPC backend.
"""
|
from eth2spec.test.context import expect_assertion_error
from eth2spec.test.helpers.block import sign_block, transition_unsigned_block
def get_balance(state, index):
return state.balances[index]
def next_slot(spec, state):
"""
Transition to the next slot.
"""
spec.process_slots(state, state.slot + 1)
def next_slots(spec, state, slots):
"""
Transition given slots forward.
"""
spec.process_slots(state, state.slot + slots)
def transition_to(spec, state, slot):
"""
Transition to ``slot``.
"""
assert state.slot <= slot
for _ in range(slot - state.slot):
next_slot(spec, state)
assert state.slot == slot
def next_epoch(spec, state):
"""
Transition to the start slot of the next epoch
"""
slot = state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH)
spec.process_slots(state, slot)
def get_state_root(spec, state, slot) -> bytes:
"""
Return the state root at a recent ``slot``.
"""
assert slot < state.slot <= slot + spec.SLOTS_PER_HISTORICAL_ROOT
return state.state_roots[slot % spec.SLOTS_PER_HISTORICAL_ROOT]
def state_transition_and_sign_block(spec, state, block, expect_fail=False):
"""
State transition via the provided ``block``
then package the block with the correct state root and signature.
"""
if expect_fail:
expect_assertion_error(lambda: transition_unsigned_block(spec, state, block))
else:
transition_unsigned_block(spec, state, block)
block.state_root = state.hash_tree_root()
return sign_block(spec, state, block)
|
import re
from datetime import datetime, timedelta
import pytz
class UnknownTimeException(Exception):
pass
RELATIVE_DATE_PATTERN = re.compile('^([0-9]+)([d])$')
RELATIVE_DATE_UNITS = {
'd': 'days',
}
def today():
now = datetime.now()
return now.replace(hour=0, minute=0, second=0, microsecond=0)
def parse_relative_date(date):
match = RELATIVE_DATE_PATTERN.fullmatch(date)
if match is None:
raise ValueError(f'Invalid relative date: {date}')
count, unit = match.groups()
count = int(count)
unit = RELATIVE_DATE_UNITS[unit]
return today() - timedelta(**{unit: count})
def parse_datetime(datetime_str, timezone=pytz.timezone('America/Toronto')):
return datetime.strptime(datetime_str, '%Y-%m-%d %H:%M:%S').replace(tzinfo=timezone)
def parse_datetime_or_default(datetime_str, default=None, timezone=pytz.timezone('America/Toronto')):
try:
return parse_datetime(datetime_str, timezone=timezone)
except:
return default
def parse_duration(duration):
parts = duration.split(':')
if len(parts) != 2:
raise UnknownTimeException(
f'Expect time to contain exactly one `:` but found {len(parts) - 1}: {duration}'
)
minutes, seconds = parts
try:
minutes = int(minutes)
seconds = int(seconds)
if seconds >= 60:
assert False
except ValueError:
raise UnknownTimeException(
f'Expected time to be of form XX:XX but found: {duration}'
)
return timedelta(minutes=minutes, seconds=seconds)
def parse_duration_or_default(duration, default=timedelta(seconds=0)):
try:
return parse_duration(duration)
except:
return default
|
import numpy as np
class LR:
def _add_ones(self, X_train):
ones = np.ones(shape=(X_train.shape[0], 1))
return np.concatenate((ones, X_train), axis=1)
def train(self, X_train, y_train):
X = self._add_ones(X_train)
X_t = np.transpose(X)
inv = np.linalg.pinv(np.dot(X_t, X))
self.W = np.dot(np.dot(inv, X_t), y_train)
def predict(self, X):
X = self._add_ones(X)
return np.dot(X, self.W)
if __name__ == "__main__":
data_n = 200000000
mean = 0
sigma = 2
X = np.random.lognormal(mean, sigma, size=(data_n,1))
X.sort(axis=0) # in-place sort
X = X / np.max(X) * 1e9 # scale to 1B as the paper says
y = np.arange(X.shape[0], dtype=np.int64)
lr = LR()
lr.train(X, y)
sec_stg_model_n = 10000
dispatch = lr.predict(X) * sec_stg_model_n / data_n
dispatch = dispatch.astype(np.int).reshape((-1,))
dispatch[dispatch < 0] = 0
dispatch[dispatch >= sec_stg_model_n] = sec_stg_model_n - 1
# sec_stage_data_n = np.zeros((sec_stg_model_n,))
# for model_i in dispatch:
# sec_stage_data_n += 1
sec_stage_data_n = []
dispatch = dispatch.reshape((-1,))
dispatch.sort()
cur_target_i = dispatch[0]
cur_start_i = 0
for cur_i, target_i in enumerate(dispatch):
if target_i != cur_target_i:
sec_stage_data_n.append(cur_i - cur_start_i)
cur_target_i = target_i
cur_start_i = cur_i
sec_stage_data_n.append(len(dispatch) - cur_start_i)
sec_stage_data_n = np.asarray(sec_stage_data_n)
sec_stage_data_n.sort()
np.save("dispatch", sec_stage_data_n)
|
from .exceptions import DisallowedURlInput, NotAValidServer, FailedLogin, TooMuchRequests, ReasonNotFound, TemplateNotFound
|
n = float(input())
for i in range(0,100):
print("N[%d] = %.4f" %(i,n))
n=n/2
|
from .args import Args
import paramiko as pk
import os
from scp import SCPClient
import socket
from .utils import locked
from threading import Lock
import logging
logger = logging.getLogger(__name__)
class SSHClient(pk.SSHClient):
class SCPClientContext(SCPClient):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return exc_val is None
def __init__(self, usr: str = Args().user, ip_addr: str = Args().ip, key_file: str = Args().ssh_key, pwd: str = Args().pwd, connect_kwargs: dict = {}):
super(SSHClient, self).__init__()
self._usr = usr
self._ip_addr = ip_addr
self._key_file = key_file
self._pwd = pwd
self.set_missing_host_key_policy(pk.AutoAddPolicy()) # It is needed to add the device policy
self._connect_kwargs = connect_kwargs
def __enter__(self):
self.connect(self._ip_addr, username=self._usr, key_filename=self._key_file, password=self._pwd, **self._connect_kwargs)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return exc_val is None
def scp_client(self) -> SCPClientContext:
return self.SCPClientContext(self.get_transport())
_try_ssh_cache = None
_try_ssh_lock = Lock()
@locked(_try_ssh_lock)
def try_ssh(timeout: float = 0.5, force: bool = False) -> bool:
global _try_ssh_cache
if _try_ssh_cache is None or force:
try:
with SSHClient(connect_kwargs=dict(timeout=timeout)):
_try_ssh_cache = True
except Exception:
_try_ssh_cache = False
return _try_ssh_cache
def copy_file_ssh(remote_filepath: str, local_file: str):
local_abs_path = os.path.abspath(os.path.normpath(local_file))
os.makedirs(os.path.dirname(local_abs_path), exist_ok=True)
with SSHClient() as client:
with client.scp_client() as scp_client:
scp_client.get(remote_filepath, local_abs_path)
# Copyright (c) 2020 Covmatic.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
import json
def lambda_handler(event, context):
result = ""
controlledMajors = ["Architectural Engineering",
"Biomedical Engineering",
"Chemical Engineering",
"Civil Engineering",
"Computer Engineering",
"Computer Science",
"Industrial Engineering",
"Mechanical Engineering",
"Nuclear Engineering",
"Accounting",
"Corporate Innovation and Entrepreneurship",
"Finance",
"Management",
"Management Information Systems",
"Marketing",
"Risk Management",
"Supply Chain and Information Systems",
"Cybersecurity Analytics and Operations",
"Information Sciences and Technology",
"Security and Risk Analysis"]
engMajors = ["Agricultural Engineering",
"Architectural Engineering",
"Aerospace Engineering",
"Biological Engineering",
"Biomedical Engineering",
"Chemical Engineering",
"Civil Engineering",
"Computer Science",
"Electrical Engineering",
"Engineering Science",
"Environmental Engineering",
"Industrial Engineering",
"Industrial and Management Systems",
"Mechanical Engineering",
"Nuclear Engineering"]
mechanicalAndNuclear = ["Mechanical Engineering", "Nuclear Engineering"]
doubleMajor = ["Spanish", "French", "German"]
surveyingAndCivil = ["Surveying Engineering", "Civil Engineering"]
mechanicalAndBiomedical = ["Biomedical Engineering", "Mechanical Engineering"]
notAllowedOne = ["Electrical Engineering", "General Science"]
notAllowedTwo = ["Computer Engineering", "Computer Science", "Mathematics Computational Option"]
slotDict = event["currentIntent"]["slots"]
if event["currentIntent"]["slots"]["MajorOne"] == event["currentIntent"]["slots"]["MajorTwo"]:
result = "You have entered the same major twice."
elif event["currentIntent"]["slots"]["MajorOne"] in mechanicalAndNuclear and event["currentIntent"]["slots"]["MajorTwo"] in mechanicalAndNuclear:
result = "You are able to double major in these subjects. You must first be admitted to the Mechanical Engineering program and add Nuclear Engineering as a second major."
elif event["currentIntent"]["slots"]["MajorOne"] in mechanicalAndBiomedical and event["currentIntent"]["slots"]["MajorTwo"] in mechanicalAndBiomedical:
result = "You are able to double major in these subjects. You must first be admitted to the Biomedical Engineering program and add Mechanical Engineering as a second major."
elif event["currentIntent"]["slots"]["MajorOne"] in surveyingAndCivil and event["currentIntent"]["slots"]["MajorTwo"] in surveyingAndCivil:
result = "You are able to double major in these subjects. However, you must be enrolled in the Surveying Engineering at the Wilkes-Barre campus."
elif event["currentIntent"]["slots"]["MajorOne"] in controlledMajors and event["currentIntent"]["slots"]["MajorTwo"] in controlledMajors:
result = "You are unable to double major in these subjects."
elif event["currentIntent"]["slots"]["MajorOne"] in notAllowedOne and event["currentIntent"]["slots"]["MajorTwo"] in notAllowedOne:
result = "You are unable to double major in these subjects."
elif event["currentIntent"]["slots"]["MajorOne"] in notAllowedTwo and event["currentIntent"]["slots"]["MajorTwo"] in notAllowedTwo:
result = "You are unable to double major in these subjects."
elif (event["currentIntent"]["slots"]["MajorOne"] in engMajors and event["currentIntent"]["slots"]["MajorTwo"] == "Liberal Arts") or (event["currentIntent"]["slots"]["MajorOne"] == "Liberal Arts" and event["currentIntent"]["slots"]["MajorTwo"] in engMajors):
result = "You are able double major in these subjects, but some of the engineering majors may not be available."
elif (event["currentIntent"]["slots"]["MajorOne"] in engMajors or event["currentIntent"]["slots"]["MajorTwo"] in engMajors) and (event["currentIntent"]["slots"]["MajorOne"] in doubleMajor or event["currentIntent"]["slots"]["MajorTwo"] in doubleMajor):
result = "You are able double major in these subjects."
else:
result = "You may or may not be able to double major in these subjects. You should make an appointment with your advisor to discuss this further."
return {
"dialogAction": {
"type": "Close",
"fulfillmentState": "Fulfilled",
"message": {
"contentType": "PlainText",
"content": result
}
}
}
|
import fnmatch
import os
pattern = 'fnmatch_*.py'
print('pattern', pattern)
print()
files = os.listdir('.')
for name in files:
print('Filename: {:<25} {}'.format(
name, fnmatch.fnmatch(name, pattern)))
|
# Script for Adding Images Stored on Computer Into Google Slides
# By Ken Burchfiel
# Released under the MIT License
# (This code makes extensive use of various Google code excerpts. I believe these snippets all use the Apache 2.0 license.)
# Github repository link: https://github.com/kburchfiel/google_slides_image_uploader
# For additional background on this program, see the slide_update_script.ipynb file, which applies this code to add images to a sample presentation.
# This file contains various functions that allow images to be inserted into Google Slides presentations. These functions include:
#
# 1. upload_blob(), which uploads an image to Google Cloud Storage
# 2. generate_download_signed_url_v4(), which creates a signed URL that can be utilized for the image import
# 3. add_image_to_slide(), which uses the URL created in generate_download_slide_url to import the image in Google Cloud Storage into Google Drive
# 4. delete_blob(), which removes the image from the Cloud Storage Bucket
# 5. upload_image_and_add_to_slide, which applies the above four functions to complete the image import process. This function is then called by slide_update_script.ipynb for each image/slide pair specified in that script.
# First, I'll import a number of libraries and modules that will be relevant for the code.
import time
# The following two import lines come from the "Creating a Signed URL to upload an object" code sample available at https://cloud.google.com/storage/docs/access-control/signing-urls-with-helpers#code-samples_1 . This sample uses the Apache 2.0 license.
import datetime
from google.cloud import storage
# The following import statements come from the Slides API Python quickstart (https://developers.google.com/slides/api/quickstart/python), which uses the Apache 2.0 license.
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google.oauth2 import service_account # From https://developers.google.com/identity/protocols/oauth2/service-account#authorizingrequests , which uses the Apache 2.0 license.
def upload_blob(credentials, bucket_name, source_file_path, destination_blob_name): # The original source of this code, comments, and docstring was https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-code-sample ; the code was made available under the Apache 2.0 license. I made some minor modifications to the code.
"""Uploads a file to the bucket."""
# The ID of your GCS bucket
# bucket_name = "your-bucket-name"
# The path to your file to upload
# source_file_name = "local/path/to/file"
# The ID of your GCS object
# destination_blob_name = "storage-object-name"
storage_client = storage.Client(credentials=credentials) # The default argument for credentials is None--see https://googleapis.dev/python/storage/latest/client.html . I edited this line to use the credentials passed in from upload_image_and_add_to_slide().
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(source_file_path)
# To avoid a 403 error with the above line, I also needed to add the storage admin role to my service account within the IAM settings in the Google Cloud Console. See https://stackoverflow.com/a/56305946/13097194
print("File uploaded to cloud.")
def generate_download_signed_url_v4(bucket_name, blob_name, credentials):
# The original source of this code, comments, and docstring was https://cloud.google.com/storage/docs/access-control/signing-urls-with-helpers#storage-signed-url-object-python ; the code was made available under the Apache 2.0 license. I made some minor modifications to the code.
"""Generates a v4 signed URL for downloading a blob.
Note that this method requires a service account key file. You can not use
this if you are using Application Default Credentials from Google Compute
Engine or from the Google Cloud SDK.
"""
# bucket_name = 'your-bucket-name'
# blob_name = 'your-object-name'
storage_client = storage.Client(credentials=credentials)
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(blob_name)
url = blob.generate_signed_url(
version="v4",
expiration=datetime.timedelta(minutes=1),
# I made this URL valid for 1 minute only (see below) since it will be used immediately and will only be needed once.
# Allow GET requests using this URL.
method="GET",
)
print("Signed URL generated.")
return url
# The following function (add_image_to_slide()) uses the image URL returned from generate_download_signed_url_v4 to attach an image to a slide within a Google Slides presentation.
def add_image_to_slide(presentation_id, credentials, image_url, image_name, page_object_id, scaleX = 1.8, scaleY = 1.8, translateX = 1100000, translateY = -500000):
# The scaleX, scaleY, translateX, and translateY values align each image with its corresponding slide. The translateX and translateY values are in EMU (English Metric Units), so you can use the 914,400 EMUs per inch conversion as a reference when appending slides. I recommend testing the scale and translation values on one image, then expanding the for loop to include all images once you have the positioning right.
# To find the presentation ID, look for the string of characters that proceeds the /d/ section of the URL for a Google Slides document. For instance, if a presentation's URL is https://docs.google.com/presentation/d/1xJfItB6w7hH0Nq2va-B1nUJFKGto_sFKxdMRvrMRvsI/ , the last part of that URL (1xJfItB6w7hH0Nq2va-B1nUJFKGto_sFKxdMRvrMRvsI), excluding the forward slash, is the presentation's ID.
service = build('slides', 'v1', credentials=credentials)
# Call the Slides API. From the Slides API Python Quickstart (https://developers.google.com/slides/api/quickstart/python); the Quickstart code sample uses the Apache 2.0 license.
# The following code derives from the 'Adding Images to a Slide' Python code snippet available at https://developers.google.com/slides/api/guides/add-image . This code uses the Apache 2.0 license.
emu4M = {
'magnitude': 4000000,
'unit': 'EMU'
}
requests = []
# The following set of code checks whether the image is already present on the slide. It does so by generating a list of all object IDs and checking whether the name of the image is present within those IDs.
page_info = service.presentations().pages().get(presentationId = presentation_id, pageObjectId = page_object_id).execute() # Retrieves a wealth of information about the current slide. This line is adapted from https://googleapis.github.io/google-api-python-client/docs/dyn/slides_v1.presentations.pages.html#get ; I believe the code on that page also uses the Apache 2.0 license.
objects_on_page = []
# print(page_info) # For debugging
# The documentation for the get() function at https://googleapis.github.io/google-api-python-client/docs/dyn/slides_v1.presentations.pages.html#get will serve as a helpful reference for understanding and modifying this code.
if 'pageElements' in page_info.keys(): # This check was included so that the following code would be skipped if there weren't any page elements on the slide. Without this check, the program would return a KeyError for blank slides due to the missing pageElements key.
#print('pageElements found in keys')
for k in range(len(page_info['pageElements'])): # i.e. for each separate element on the page. Each member of the pageElements list is a dictionary, and objectId (accessed below) is a key that retrieves the object id within that dictionary.
objects_on_page.append(page_info['pageElements'][k]['objectId']) #
if image_name in objects_on_page: # Checks whether the image planned for addition to the slide is already present. If so, the following line files a request to delete that image. This assumes that both the image on the slide and the image planned to be added to the slide have the same name.
requests.append({'deleteObject':{'objectId':image_name}}) # This code is based on the batchUpdate() function available at https://developers.google.com/resources/api-libraries/documentation/slides/v1/python/latest/slides_v1.presentations.html. The code uses the Apache 2.0 license unless I'm mistaken. See https://developers.google.com/open-source/devplat
print("Image already present on slide. Request added to delete the pre-existing copy so that a new one can be added.")
#else:
# print('pageElements not found in keys') # For debugging
# The following code, which provides information about a new image to be added to the presentation, also comes from the 'Adding Images to a Slide' code sample, though I made some minor modifications.
requests.append({
'createImage': {
'objectId': image_name,
'url': image_url,
'elementProperties': {
'pageObjectId': page_object_id,
'size': {
'height': emu4M,
'width': emu4M
},
'transform': {
'scaleX': scaleX,
'scaleY': scaleY,
'translateX': translateX,
'translateY': translateY,
'unit': 'EMU'
}
}
}
})
body = {
'requests': requests
}
response = service.presentations() \
.batchUpdate(presentationId=presentation_id, body=body).execute() # Based on both the batchUpdate function and the Slides API Python Quickstart. I originally # Originally started with response = slides.service but this Stack Overflow answer by Shubham Kushwaha (https://stackoverflow.com/a/59096196/13097194) indicated that I could just use service instead of slides_service.
print("Image added to slide. Pre-existing image deleted if requested.")
def delete_blob(bucket_name, blob_name, credentials): # Source of original code: https://cloud.google.com/storage/docs/deleting-objects#code-samples Code uses the Apache 2.0 License .
"""Deletes a blob from the bucket."""
# bucket_name = "your-bucket-name"
# blob_name = "your-object-name"
storage_client = storage.Client(credentials=credentials) # Updated this line to use credentials from the upload_image_and_add_to_slide function
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(blob_name)
blob.delete()
print("Blob {} deleted.".format(blob_name))
def upload_image_and_add_to_slide(image_folder_path, image_file_name, image_file_extension, service_account_path, scopes, presentation_id, page_object_id, bucket_name, scaleX, scaleY, translateX,translateY):
'''
This function performs 5 steps:
Step 1: Uploads an image to Google Cloud Storage (via the upload_blob function)
Step 2: Creates a signed URL that can be used to copy the uploaded image into Google Slides (via the generate_download_signed_url_v4 function)
Step 3: Deletes the previous copy of the image from Google Slides (if one exists) using the add_image_to_slide() function
Step 4: Copies the image into Google Slides via the add_image_to_slide() function
Step 5: Deletes the image from Google Cloud Storage using the delete_blob() function
Variable descriptions:
image_folder_path: The path to the folder containing the image, which shouldn't be confused with the path to the image itself
image_file_name: the name of the image within that folder. If you're adding images to a slide within a for loop, consider storing the image file names within a list and iterating through that list. Alternately, you could store both the image file names and page object ids as values within a list of lists.
image_file_extension: .png, .jpg, etc. Separated from image_file_name because I don't think Google allows object IDs to have periods
service_account_path: the path to the credentials.json account stored on your computer
page_object_id: the id of the slide being updated by the function. Needs to be retrieved within Google
bucket_name: the name of the Google Cloud Storage bucket containing the image file
scaleX, scaleY, transformX, and transformY can be adjusted to set the correct position and size of the image on the slide.
'''
# The following two lines of code derive from the 'Preparing to make an authorized API call' code samples available at https://developers.google.com/identity/protocols/oauth2/service-account#authorizingrequests . These code samples use the Apache 2.0 license.
SERVICE_ACCOUNT_FILE = service_account_path
credentials = service_account.Credentials.from_service_account_file(SERVICE_ACCOUNT_FILE, scopes=scopes)
image_file_path = image_folder_path + image_file_name + image_file_extension # i.e. ..\\pictures\image_1.png'
# Step 1
upload_blob(credentials,bucket_name,image_file_path, destination_blob_name = image_file_name)
# Step 2
url = generate_download_signed_url_v4(bucket_name, blob_name = image_file_name, credentials = credentials)
time_image_becomes_accessible = time.time()
# Steps 3 and 4
add_image_to_slide(presentation_id = presentation_id, credentials = credentials, image_url = url, page_object_id=page_object_id, image_name = image_file_name, scaleX=scaleX, scaleY=scaleY, translateX=translateX,translateY=translateY)
# Step 5
delete_blob(bucket_name = bucket_name, blob_name = image_file_name, credentials = credentials)
time_image_is_no_longer_accessible = time.time()
time_image_was_accessible = time_image_is_no_longer_accessible - time_image_becomes_accessible
print("Image was accessible for",'{:.4f}'.format(time_image_was_accessible),"second(s).")
# On my computer, the image was accessible for 2.10-2.37 seconds (based on my first four tests); however, the image can be accessible for longer periods in some cases (but not longer than 1 minute, the length that the signed URL is valid).
|
#-*- coding: utf-8 -*-
""" Artist playback module """
import random
random.seed()
import re
from voiceplay.webapp.baseresource import APIV1Resource
from voiceplay.utils.helpers import SingleQueueDispatcher
from .basetask import BasePlayerTask
class Artist(APIV1Resource):
"""
Artist API endpoint
"""
route_base = '/api/v1/play/artist/<artist>/<query>'
queue = None
def post(self, artist, query):
"""
HTTP POST handler
"""
result = {'status': 'timeout', 'message': ''}
if self.queue and artist and query:
dispatcher = SingleQueueDispatcher(queue=self.queue)
message = dispatcher.send_and_wait('play' + ' %s ' % query + ' by ' + artist)
result = {'status': 'ok', 'message': message}
return result
class SingleArtistTask(BasePlayerTask):
"""
Single artist playback class
"""
__group__ = ['play']
__regexp__ = ['^play some (?!fresh|new)\s?(?:music|tracks?|songs?) by (.+)$']
__priority__ = 20
@classmethod
def run_shuffle_artist(cls, artist):
"""
Shuffle artist tracks
"""
if cls.lfm().get_query_type(artist) == 'artist':
tracks = cls.lfm().get_top_tracks(cls.lfm().get_corrected_artist(artist))
random.shuffle(tracks)
for track in cls.tracks_with_prefetch(tracks):
if cls.get_exit(): # pylint:disable=no-member
break
cls.play_full_track(track)
@classmethod
def process(cls, regexp, message):
"""
Run task
"""
cls.logger.debug('Message: %r matches %r, running %r', message, regexp, cls.__name__)
artist = re.match(regexp, message, re.I).groups()[0]
artist = cls.lfm().get_corrected_artist(artist)
cls.say('Shuffling songs by %s' % artist)
cls.run_shuffle_artist(artist)
class SingleTrackArtistTask(BasePlayerTask):
"""
Single track playback class.
The simplest form of player task, can be used as a reference for writing new tasks.
"""
__group__ = ['play']
__regexp__ = ['^play (?!fresh|new)\s?(?!tracks|songs)(.+) bu?(?:t|y) (.+)$']
__priority__ = 70
@classmethod
def process(cls, regexp, message):
"""
Run task
"""
cls.logger.debug('Message: %r matches %r, running %r', message, regexp, cls.__name__)
track, artist = re.match(regexp, message, re.I).groups()
# TODO add track correction here
artist = cls.lfm().get_corrected_artist(artist)
cls.say('%s by %s' % (track, artist))
cls.play_full_track('%s - %s' % (artist, track))
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pyatv/mrp/protobuf/ContentItem.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pyatv.mrp.protobuf import ContentItemMetadata_pb2 as pyatv_dot_mrp_dot_protobuf_dot_ContentItemMetadata__pb2
from pyatv.mrp.protobuf import LanguageOption_pb2 as pyatv_dot_mrp_dot_protobuf_dot_LanguageOption__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pyatv/mrp/protobuf/ContentItem.proto',
package='',
syntax='proto2',
serialized_options=None,
serialized_pb=b'\n$pyatv/mrp/protobuf/ContentItem.proto\x1a,pyatv/mrp/protobuf/ContentItemMetadata.proto\x1a\'pyatv/mrp/protobuf/LanguageOption.proto\"\x8c\x01\n\x13LanguageOptionGroup\x12\x1b\n\x13\x61llowEmptySelection\x18\x01 \x01(\x08\x12.\n\x15\x64\x65\x66\x61ultLanguageOption\x18\x02 \x01(\x0b\x32\x0f.LanguageOption\x12(\n\x0flanguageOptions\x18\x03 \x03(\x0b\x32\x0f.LanguageOption\"\xf4\x02\n\x0b\x43ontentItem\x12\x12\n\nidentifier\x18\x01 \x01(\t\x12&\n\x08metadata\x18\x02 \x01(\x0b\x32\x14.ContentItemMetadata\x12\x13\n\x0b\x61rtworkData\x18\x03 \x01(\x0c\x12\x0c\n\x04info\x18\x04 \x01(\t\x12\x36\n\x18\x61vailableLanguageOptions\x18\x05 \x03(\x0b\x32\x14.LanguageOptionGroup\x12/\n\x16\x63urrentLanguageOptions\x18\x06 \x03(\x0b\x32\x0f.LanguageOption\x12\x18\n\x10parentIdentifier\x18\t \x01(\t\x12\x1a\n\x12\x61ncestorIdentifier\x18\n \x01(\t\x12\x17\n\x0fqueueIdentifier\x18\x0b \x01(\t\x12\x19\n\x11requestIdentifier\x18\x0c \x01(\t\x12\x18\n\x10\x61rtworkDataWidth\x18\r \x01(\x05\x12\x19\n\x11\x61rtworkDataHeight\x18\x0e \x01(\x05'
,
dependencies=[pyatv_dot_mrp_dot_protobuf_dot_ContentItemMetadata__pb2.DESCRIPTOR,pyatv_dot_mrp_dot_protobuf_dot_LanguageOption__pb2.DESCRIPTOR,])
_LANGUAGEOPTIONGROUP = _descriptor.Descriptor(
name='LanguageOptionGroup',
full_name='LanguageOptionGroup',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='allowEmptySelection', full_name='LanguageOptionGroup.allowEmptySelection', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='defaultLanguageOption', full_name='LanguageOptionGroup.defaultLanguageOption', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='languageOptions', full_name='LanguageOptionGroup.languageOptions', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=128,
serialized_end=268,
)
_CONTENTITEM = _descriptor.Descriptor(
name='ContentItem',
full_name='ContentItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='identifier', full_name='ContentItem.identifier', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='ContentItem.metadata', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='artworkData', full_name='ContentItem.artworkData', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='info', full_name='ContentItem.info', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='availableLanguageOptions', full_name='ContentItem.availableLanguageOptions', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='currentLanguageOptions', full_name='ContentItem.currentLanguageOptions', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parentIdentifier', full_name='ContentItem.parentIdentifier', index=6,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ancestorIdentifier', full_name='ContentItem.ancestorIdentifier', index=7,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='queueIdentifier', full_name='ContentItem.queueIdentifier', index=8,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='requestIdentifier', full_name='ContentItem.requestIdentifier', index=9,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='artworkDataWidth', full_name='ContentItem.artworkDataWidth', index=10,
number=13, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='artworkDataHeight', full_name='ContentItem.artworkDataHeight', index=11,
number=14, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=271,
serialized_end=643,
)
_LANGUAGEOPTIONGROUP.fields_by_name['defaultLanguageOption'].message_type = pyatv_dot_mrp_dot_protobuf_dot_LanguageOption__pb2._LANGUAGEOPTION
_LANGUAGEOPTIONGROUP.fields_by_name['languageOptions'].message_type = pyatv_dot_mrp_dot_protobuf_dot_LanguageOption__pb2._LANGUAGEOPTION
_CONTENTITEM.fields_by_name['metadata'].message_type = pyatv_dot_mrp_dot_protobuf_dot_ContentItemMetadata__pb2._CONTENTITEMMETADATA
_CONTENTITEM.fields_by_name['availableLanguageOptions'].message_type = _LANGUAGEOPTIONGROUP
_CONTENTITEM.fields_by_name['currentLanguageOptions'].message_type = pyatv_dot_mrp_dot_protobuf_dot_LanguageOption__pb2._LANGUAGEOPTION
DESCRIPTOR.message_types_by_name['LanguageOptionGroup'] = _LANGUAGEOPTIONGROUP
DESCRIPTOR.message_types_by_name['ContentItem'] = _CONTENTITEM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
LanguageOptionGroup = _reflection.GeneratedProtocolMessageType('LanguageOptionGroup', (_message.Message,), {
'DESCRIPTOR' : _LANGUAGEOPTIONGROUP,
'__module__' : 'pyatv.mrp.protobuf.ContentItem_pb2'
# @@protoc_insertion_point(class_scope:LanguageOptionGroup)
})
_sym_db.RegisterMessage(LanguageOptionGroup)
ContentItem = _reflection.GeneratedProtocolMessageType('ContentItem', (_message.Message,), {
'DESCRIPTOR' : _CONTENTITEM,
'__module__' : 'pyatv.mrp.protobuf.ContentItem_pb2'
# @@protoc_insertion_point(class_scope:ContentItem)
})
_sym_db.RegisterMessage(ContentItem)
# @@protoc_insertion_point(module_scope)
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'fake-key'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'canvas_oauth.apps.CanvasOAuthConfig'
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
},
]
ROOT_URLCONF = 'canvas_oauth.urls'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
CANVAS_OAUTH_CLIENT_ID = 101
CANVAS_OAUTH_CLIENT_SECRET = "fake-secret"
CANVAS_OAUTH_CANVAS_DOMAIN = "canvas.localhost"
|
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2020 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import Logger
from unittest import TestCase
from pybuilder.core import Project
from pybuilder.plugins.python.snakefood_plugin import (
check_snakefood_available,
check_graphviz_available,
generate_graph,
generate_pdf
)
from test_utils import Mock
class CheckSnakeFoodAvailableTests(TestCase):
def setUp(self):
self.project = Project("basedir")
self.reactor = Mock()
self.reactor.python_env_registry = {}
self.reactor.python_env_registry["pybuilder"] = pyb_env = Mock()
pyb_env.environ = {}
self.reactor.pybuilder_venv = pyb_env
def test_should_check_that_snakefood_is_available(self):
mock_logger = Mock(Logger)
check_snakefood_available(self.project, mock_logger, self.reactor)
self.reactor.pybuilder_venv.verify_can_execute.assert_called_with(
["sfood", "-h"], "sfood", "plugin python.snakefood")
def test_should_check_that_graphviz_is_available(self):
mock_logger = Mock(Logger)
check_graphviz_available(self.project, mock_logger, self.reactor)
self.reactor.pybuilder_venv.verify_can_execute.assert_called_with(
["dot", "-V"], "graphviz", "plugin python.snakefood")
def test_should_call_generate_graph(self):
report_file = "foo"
graph_file = "bar.dot"
generate_graph(self.reactor.pybuilder_venv, report_file, graph_file)
self.reactor.pybuilder_venv.execute_command.assert_called_with(
["sfood-graph", report_file], graph_file)
def test_should_call_generate_pdf(self):
pdf_file = "foo.pdf"
graph_file = "bar.dot"
generate_pdf(self.reactor.pybuilder_venv, graph_file, pdf_file)
self.reactor.pybuilder_venv.execute_command.assert_called_with(
["dot", "-Tpdf", graph_file], pdf_file)
|
from rest_framework import serializers
from .models import User
class UserSerializer(serializers.ModelSerializer):
class Meta():
model = User
fields = ("id", "discord_id", "email", "user_tag", "created_at")
|
# -*- coding: utf-8 -*-
"""Contains Base and helper classes for image processing algorithms.
Processors can be stacked by passing other processors or capturing adaptors as first argument.
Can also be used to build processor stacks with the help of Processor Stack Builder.
"""
from EasyVision.vision.base import *
import cv2
import numpy as np
try:
from future_builtins import zip
except:
pass
try:
import cPickle as pickle
except:
import pickle
class KeyPoint(namedtuple('KeyPoint', 'pt size angle response octave class_id')):
"""KeyPoint struct that mirrors cv2.KeyPoint. Mainly used for serialization as pickle does not understand cv2.KeyPoint.
"""
__slots__ = ()
def todict(self):
"""Converts KeyPoint into a dictionary"""
return self._asdict()
@staticmethod
def fromdict(d):
"""Creates KeyPoint object from a dictionary"""
return KeyPoint(**d)
class Features(namedtuple('Features', 'points descriptors points3d')):
"""Image Features structure.
Contains feature points either as 2d points or KeyPoint, descriptors and associated 3d points.
Basically points can be anything.
"""
__slots__ = ()
def __new__(cls, points, descriptors, points3d=None):
if len(points) and hasattr(points[0], 'pt'):
points = tuple(KeyPoint(pt.pt, pt.size, pt.angle, pt.response, pt.octave, pt.class_id) for pt in points)
elif not isinstance(points, np.ndarray):
points = np.float32(points)
if not isinstance(points3d, np.ndarray) and points3d is not None:
points3d = np.float32(points3d)
return super(Features, cls).__new__(cls, points, descriptors, points3d)
@property
def keypoints(self):
"""Returns a list of cv2.KeyPoint items for displaying purposes"""
return [cv2.KeyPoint(x=pt.pt[0], y=pt.pt[1], _size=pt.size, _angle=pt.angle,
_response=pt.response, _octave=pt.octave, _class_id=pt.class_id) for pt in self.points]
def todict(self):
"""Converts Features into a dictionary"""
d = {
'points': [pt.todict() for pt in self.points] if len(points) and isinstance(points[0], KeyPoint) else self.points.tolist(),
'points3d': self.points.tolist(),
'descriptors': self.descriptors.tolist(),
'dtype': self.descriptors.dtype.name
}
return d
@staticmethod
def fromdict(d):
"""Creates Features object from a dictionary"""
pts = d['points']
if len(pts) and isinstance(pts[0], dict):
points = [KeyPoint.fromdict(pt) for pt in pts]
else:
points = pts
descriptors = np.array(d['descriptors'], dtype=np.dtype(d['dtype']))
return Features(points, descriptors, d['points3d'])
def tobytes(self):
"""Uses pickle to serialize Features into bytes"""
return pickle.dumps(self, protocol=-1)
@staticmethod
def frombytes(data):
"""Uses pickle to deserialize Features from bytes"""
return pickle.loads(data)
def tobuffer(self, buf):
"""Uses pickle to serialize Features to buffer-like object"""
pickle.dump(self, buf, protocol=-1)
@staticmethod
def frombuffer(buf):
"""Uses pickle to deserialize Features from buffer-like object"""
return pickle.load(buf)
def __reduce__(self):
"""Used for pickle serialization in order to deal with UMat descriptors"""
return self.__class__, (self.points, self.descriptors.get() if isinstance(self.descriptors, cv2.UMat) else self.descriptors)
class ProcessorBase(VisionBase):
"""Abstract Base class for image processor algorithms
Capture will call process on each image in the frame if associated mask is set to '1'.
Mask is a string of '1' and '0', where index of the mask is the same as the index of frame image.
Processor mask will override frame processor mask.
Abstract methods:
process
.. note::
This base class overrides ``__getattr__`` and ``__setattr__``. That means, that if there is no attribute in self,
it will try to get it from the source.
.. warning::
Overriding ``__setattr__`` incurrs a limitation on how internal attributes should be named when initialized
in ``__init__`` method. All internal attributes should be starting from "_", e.g. ``self._my_internal_var = 0``.
"""
def __init__(self, vision, processor_mask=None, append=False, null_image=False, enabled=True, *args, **kwargs):
"""Instance initialization. Must be called using super().__init__(*args, *kwargs)
:param vision: capturing source object.
:param processor_mask: a mask specifying which images in a frame should be processed
:param append: indicates whether to replace images or append to the frame
:param null_image: indicates whether to set image.image to None for processed image
:param enabled: indicates whether to run processing
"""
if not isinstance(vision, VisionBase) and vision is not None:
raise TypeError("Vision object must be of type VisionBase")
self._vision = vision
self._processor_mask = Frame.tidy_processor_mask(processor_mask)
self._enabled = True
self._append = append
self._null_image = null_image
self.enabled = enabled
super(ProcessorBase, self).__init__(*args, **kwargs)
@abstractmethod
def process(self, image):
"""Processes frame image. Must return a valid Image instance
Note, that this method will only be called if self.enabled is True and associated processor mask is '1'
:param image: instance of Image struct - input image
:return: an instance of Image struct.
"""
pass
def capture(self):
super(ProcessorBase, self).capture()
frame = self._vision.capture()
if not self.enabled:
return frame
elif frame:
if self._null_image:
postprocess = lambda x: x._replace(image=None, original=None, mask=None, source=self)
else:
postprocess = lambda x: x._replace(source=self)
processor_mask = self._processor_mask if self._processor_mask is not None else frame.processor_mask
if processor_mask is None:
processor_mask = "1" * len(frame.images)
if not self._append:
images = tuple(m == "0" and img or postprocess(self.process(img)) for m, img in zip(processor_mask, frame.images))
else:
images = tuple(postprocess(self.process(img)) for m, img in zip(processor_mask, frame.images) if m != "0")
images = frame.images + images
return frame._replace(images=images)
def setup(self):
if self._vision is not None:
self._vision.setup()
super(ProcessorBase, self).setup()
def release(self):
if self._vision is not None:
self._vision.release()
super(ProcessorBase, self).release()
@property
def source(self):
"""Returns a source for this processor"""
return self._vision
def get_source(self, name):
"""Recursively searches for a source class by class name"""
if self.__class__.__name__ == name:
return self
elif isinstance(self._vision, ProcessorBase) or self._vision.__class__.__name__ == 'CameraPairProxy':
return self._vision.get_source(name)
elif self._vision.__class__.__name__ == name:
return self._vision
def __getattr__(self, name):
"""Allows to access attributes of deeper sources"""
# this line is required for pickling/unpickling after fork
if '_vision' not in self.__dict__:
raise AttributeError("Source was not set")
try:
return super(ProcessorBase, self).__getattr__(name)
except AttributeError:
return getattr(self._vision, name)
#def __setattr__(self, name, value):
# """Allows to set attributes of deeper sources"""
# print(name, self.__class__.__dict__)
# if name.startswith('_') or name in self.__dict__ or name in self.__class__.__dict__:
# super(ProcessorBase, self).__setattr__(name, value)
# else:
# setattr(self._vision, name, value)
@property
def enabled(self):
"""Sets/Gets a flag indicated whether process method should be called"""
return self._enabled
@enabled.setter
def enabled(self, value):
lastenabled, self._enabled = self._enabled, value
if lastenabled != value and hasattr(self, 'enabled_changed'):
self.enabled_changed(lastenabled, value)
@property
def is_open(self):
return self._vision.is_open
@property
def frame_size(self):
return self._vision.frame_size
@property
def fps(self):
return self._vision.fps
@property
def name(self):
return "{} <- {}".format(super(ProcessorBase, self).name, self._vision.name)
@property
def frame_count(self):
return self._vision.frame_count
@property
def path(self):
return self._vision.path
@property
def devices(self):
return self._vision.devices
@property
def autoexposure(self):
return self._vision.autoexposure
@property
def autofocus(self):
return self._vision.autofocus
@property
def autowhitebalance(self):
return self._vision.autowhitebalance
@property
def autogain(self):
return self._vision.autogain
@property
def exposure(self):
return self._vision.exposure
@property
def focus(self):
return self._vision.focus
@property
def whitebalance(self):
return self._vision.whitebalance
@property
def gain(self):
return self._vision.gain
@autoexposure.setter
def autoexposure(self, value):
self._vision.autoexposure = value
@autofocus.setter
def autofocus(self, value):
self._vision.autofocus = value
@autowhitebalance.setter
def autowhitebalance(self, value):
self._vision.autowhitebalance = value
@autogain.setter
def autogain(self, value):
self._vision.autogain = value
@exposure.setter
def exposure(self, value):
self._vision.exposure = value
@focus.setter
def focus(self, value):
self._vision.focus = value
@whitebalance.setter
def whitebalance(self, value):
self._vision.whitebalance = value
@gain.setter
def gain(self, value):
self._vision.gain = value
|
"""Routines to solve for circumstances like sunrise, sunset, and moon phase."""
from numpy import cos, diff, flatnonzero, linspace, multiply, sign
from .constants import DAY_S, tau
from .nutationlib import iau2000b
EPSILON = 0.001 / DAY_S
# Simple facts.
def phase_angle(ephemeris, body, t):
"""Compute the phase angle of a body viewed from Earth.
The ``body`` should be an integer or string that can be looked up in
the given ``ephemeris``, which will also be asked to provide
positions for the Earth and Sun. The return value will be an
:class:`~skyfield.units.Angle` object.
"""
earth = ephemeris['earth']
sun = ephemeris['sun']
body = ephemeris[body]
pe = earth.at(t).observe(body)
pe.position.au *= -1 # rotate 180 degrees to point back at Earth
t2 = t.ts.tt_jd(t.tt - pe.light_time)
ps = body.at(t2).observe(sun)
return pe.separation_from(ps)
def fraction_illuminated(ephemeris, body, t):
"""Compute the illuminated fraction of a body viewed from Earth.
The ``body`` should be an integer or string that can be looked up in
the given ``ephemeris``, which will also be asked to provide
positions for the Earth and Sun. The return value will be a
floating point number between zero and one. This simple routine
assumes that the body is a perfectly uniform sphere.
"""
a = phase_angle(ephemeris, body, t).radians
return 0.5 * (1.0 + cos(a))
# Search routines.
def find_discrete(start_time, end_time, f, epsilon=EPSILON, num=12):
"""Find the times when a function changes value.
Searches between ``start_time`` and ``end_time``, which should both
be :class:`~skyfield.timelib.Time` objects, for the occasions where
the function ``f`` changes from one value to another. Use this to
search for events like sunrise or moon phases.
A tuple of two arrays is returned. The first array gives the times
at which the input function changes, and the second array specifies
the new value of the function at each corresponding time.
This is an expensive operation as it needs to repeatedly call the
function to narrow down the times that it changes. It continues
searching until it knows each time to at least an accuracy of
``epsilon`` Julian days. At each step, it creates an array of
``num`` new points between the lower and upper bound that it has
established for each transition. These two values can be changed to
tune the behavior of the search.
"""
ts = start_time.ts
jd0 = start_time.tt
jd1 = end_time.tt
if jd0 >= jd1:
raise ValueError('your start_time {0} is later than your end_time {1}'
.format(start_time, end_time))
periods = (jd1 - jd0) / f.rough_period
if periods < 1.0:
periods = 1.0
jd = linspace(jd0, jd1, periods * num // 1.0)
end_mask = linspace(0.0, 1.0, num)
start_mask = end_mask[::-1]
o = multiply.outer
while True:
t = ts.tt_jd(jd)
y = f(t)
indices = flatnonzero(diff(y))
if not len(indices):
return indices, y[0:0]
starts = jd.take(indices)
ends = jd.take(indices + 1)
# Since we start with equal intervals, they all should fall
# below epsilon at around the same time; so for efficiency we
# only test the first pair.
if ends[0] - starts[0] <= epsilon:
break
jd = o(starts, start_mask).flatten() + o(ends, end_mask).flatten()
return ts.tt_jd(ends), y.take(indices + 1)
def _find_maxima(start_time, end_time, f, epsilon=EPSILON, num=12):
ts = start_time.ts
jd0 = start_time.tt
jd1 = end_time.tt
if jd0 >= jd1:
raise ValueError('your start_time {0} is later than your end_time {1}'
.format(start_time, end_time))
jd = linspace(jd0, jd1, (jd1 - jd0) / f.rough_period * num // 1.0)
end_mask = linspace(0.0, 1.0, num)
start_mask = end_mask[::-1]
o = multiply.outer
while True:
t = ts.tt_jd(jd)
y = f(t)
indices = flatnonzero(diff(sign(diff(y))) == -2)
if not len(indices):
raise ValueError('cannot find a maximum in that range')
starts = jd.take(indices)
ends = jd.take(indices + 2)
# Since we start with equal intervals, they all should fall
# below epsilon at around the same time; so for efficiency we
# only test the first pair.
if ends[0] - starts[0] <= epsilon:
break
jd = o(starts, start_mask).flatten() + o(ends, end_mask).flatten()
return ts.tt_jd(ends), y.take(indices)
# Discrete circumstances to search.
SEASONS = [
'Spring',
'Summer',
'Autumn',
'Winter',
]
SEASON_EVENTS = [
'Vernal Equinox',
'Summer Solstice',
'Autumnal Equinox',
'Winter Solstice',
]
SEASON_EVENTS_NEUTRAL = [
'March Equinox',
'June Solstice',
'September Equinox',
'December Solstice',
]
def seasons(ephemeris):
"""Build a function of time that returns the quarter of the year.
The function that this returns will expect a single argument that is
a :class:`~skyfield.timelib.Time` and will return 0 through 3 for
the seasons Spring, Summer, Autumn, and Winter.
"""
earth = ephemeris['earth']
sun = ephemeris['sun']
def season_at(t):
"""Return season 0 (Spring) through 3 (Winter) at time `t`."""
t._nutation_angles = iau2000b(t.tt)
e = earth.at(t)
_, slon, _ = e.observe(sun).apparent().ecliptic_latlon('date')
return (slon.radians // (tau / 4) % 4).astype(int)
season_at.rough_period = 90.0
return season_at
def sunrise_sunset(ephemeris, topos):
"""Build a function of time that returns whether the sun is up.
The function that this returns will expect a single argument that is
a :class:`~skyfield.timelib.Time` and will return ``True`` if the
sun is up, else ``False``.
"""
sun = ephemeris['sun']
topos_at = (ephemeris['earth'] + topos).at
def is_sun_up_at(t):
"""Return `True` if the sun has risen by time `t`."""
t._nutation_angles = iau2000b(t.tt)
return topos_at(t).observe(sun).apparent().altaz()[0].degrees > -0.8333
is_sun_up_at.rough_period = 0.5 # twice a day
return is_sun_up_at
MOON_PHASES = [
'New Moon',
'First Quarter',
'Full Moon',
'Last Quarter',
]
def moon_phases(ephemeris):
"""Build a function of time that returns the moon phase 0 through 3.
The function that this returns will expect a single argument that is
a :class:`~skyfield.timelib.Time` and will return the phase of the
moon as an integer. See the accompanying array ``MOON_PHASES`` if
you want to give string names to each phase.
"""
earth = ephemeris['earth']
moon = ephemeris['moon']
sun = ephemeris['sun']
def moon_phase_at(t):
"""Return the phase of the moon 0 through 3 at time `t`."""
t._nutation_angles = iau2000b(t.tt)
e = earth.at(t)
_, mlon, _ = e.observe(moon).apparent().ecliptic_latlon('date')
_, slon, _ = e.observe(sun).apparent().ecliptic_latlon('date')
return ((mlon.radians - slon.radians) // (tau / 4) % 4).astype(int)
moon_phase_at.rough_period = 7.0 # one lunar phase per week
return moon_phase_at
def _distance_to(center, target):
def distance_at(t):
t._nutation_angles = iau2000b(t.tt)
distance = center.at(t).observe(target).distance().au
return distance
return distance_at
|
from plenum.common.messages.internal_messages import ReOrderedInNewView
from plenum.test.testing_utils import FakeSomething
def test_reset_monitor_after_view_change_events(create_node_and_not_start):
node = create_node_and_not_start
node.view_changer = FakeSomething(propagate_primary=False,
view_no=1)
node.monitor.throughputs[0].throughput = 1
node.monitor.throughputs[0].first_ts = 0
node.monitor.throughputs[1].throughput = 100
node.monitor.throughputs[1].first_ts = 0
node.replicas._replicas[0].primaryName = "Alpha:0"
node.replicas._replicas[1].primaryName = "Beta:1"
# TODO: Actually it would be nice to check that NewViewAccepted also resets monitor,
# however this requires either much more mocking (which is fragile) or rewriting test
# to use actual Replicas
node._process_re_ordered_in_new_view(ReOrderedInNewView())
# After reset throughput must be 0 or None
# depending on the throughput measurement strategy
assert not node.monitor.getThroughput(0)
assert not node.monitor.getThroughput(1)
assert node.monitor.isMasterDegraded() is False
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import argparse
import base64
import csv
import json
import math
import os
import random
import sys
from multiprocessing import Pool
import cv2
import MatterSim
import numpy as np
import torch
import torch.nn as nn
import torchvision
import torchvision.models
import torchvision.transforms.functional as F
from PIL import Image
from timer import Timer
# sys.path.insert(0, "/root/mount/Matterport3DSimulator/models/")
# import bit_pytorch.models as bit_models
csv.field_size_limit(sys.maxsize)
parser = argparse.ArgumentParser()
parser.add_argument(
"--model",
type=str,
default="ResNet-152",
choices=[
"ResNet-152",
"BiT-M-R50x1",
"BiT-M-R50x3",
"BiT-M-R50x1",
"BiT-M-R101x1",
"BiT-M-R101x3",
"BiT-M-R152x4",
"BiT-S-R50x1",
"BiT-S-R50x3",
"BiT-S-R101x1",
"BiT-S-R101x3",
"BiT-S-R152x4",
],
)
parser.add_argument("--img-features-dir", type=str, default="srv/img_features")
parser.add_argument("--models-dir", type=str, default="models/")
parser.add_argument("--output-feature-file", type=str, default="")
parser.add_argument("--seed", type=int, default=1, help="")
parser.add_argument("--batch-size", type=int, default=12, help="")
args = parser.parse_args()
FEATURE_SIZES = {
"ResNet-152": 2048,
"BiT-M-R50x1": 2048,
"BiT-M-R50x3": 6144,
"BiT-M-R101x1": 2048,
"BiT-M-R101x3": 6144,
"BiT-M-R152x4": 8192,
"BiT-S-R50x1": 2048,
"BiT-S-R50x3": 6144,
"BiT-S-R101x1": 2048,
"BiT-S-R101x3": 6144,
"BiT-S-R152x4": 8192,
}
NUM_GPUS = 1
MODEL_NAME = args.model
FEATURE_SIZE = FEATURE_SIZES[MODEL_NAME]
BATCH_SIZE = (
args.batch_size
) # Some fraction of viewpoint size - batch size 4 equals 11GB memory
if args.output_feature_file == "":
OUTFILE = "%s-imagenet-pytorch.tsv" % MODEL_NAME
else:
OUTFILE = args.output_feature_file
OUTFILE = os.path.join(args.img_features_dir, OUTFILE)
MERGED = OUTFILE
if NUM_GPUS != 1:
OUTFILE = OUTFILE + ".%d"
MODELS = args.models_dir
GRAPHS = "connectivity/"
SEED = args.seed
print("SEED: %d" % SEED)
# --------------------------------------------
# --------------------------------------------
TSV_FIELDNAMES = ["scanId", "viewpointId", "image_w", "image_h", "vfov", "features"]
VIEWPOINT_SIZE = 36 # Number of discretized views from one viewpoint
GPU_ID = 0
# Simulator image parameters
WIDTH = 640
HEIGHT = 480
VFOV = 60
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
def load_model(model_name):
if model_name == "ResNet-152":
resnet_full = torchvision.models.resnet152(pretrained=True)
resnet = nn.Sequential(*list(resnet_full.children())[:-1])
return resnet
elif "BiT" in model_name: # BiT-M-R50x1
model = bit_models.KNOWN_MODELS[model_name](head_size=1000, zero_head=True)
model.load_from(np.load(MODELS + model_name + ".npz"))
all_layers = list(model.children())
main_stack = all_layers[:-1]
last_layer_wihout_fc = all_layers[-1][:-1]
model_without_fc = main_stack + [last_layer_wihout_fc]
bit = nn.Sequential(*model_without_fc)
return bit
def load_viewpointids(job_id=0):
viewpointIds = []
with open(GRAPHS + "scans.txt") as f:
scans = [scan.strip() for scan in f.readlines()]
for scan in scans:
with open(GRAPHS + scan + "_connectivity.json") as j:
data = json.load(j)
for item in data:
if item["included"]:
viewpointIds.append((scan, item["image_id"]))
random.seed(SEED)
random.shuffle(viewpointIds)
if NUM_GPUS != 1:
viewpointIds = viewpointIds[job_id::NUM_GPUS]
print("%d: Loaded %d viewpoints" % (job_id, len(viewpointIds)))
return viewpointIds
def transform_img_resnet(im):
""" Prep opencv 3 channel image for the network """
np_im = np.array(im, copy=True).astype(np.float32)
np_im = np_im[..., ::-1]
np_im = np_im.transpose((2, 0, 1)) # (3, H, W)
np_im = np.ascontiguousarray(np_im, dtype=np.float32)
im = torch.from_numpy(np_im)
im /= 255.0
return F.normalize(im, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
def transform_img_bit(im):
np_im = np.array(im, copy=True).astype(np.float32)
np_im = np_im[..., ::-1]
np_im = np_im.transpose((2, 0, 1)) # (3, H, W)
np_im = np.ascontiguousarray(np_im, dtype=np.float32)
im = torch.from_numpy(np_im)
im /= 255.0
return F.normalize(im, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
def build_tsv(ids):
job_id = ids[0]
gpu_id = ids[1]
print("JOB ID %d GPU ID %d: build_tsv" % (job_id, gpu_id))
# Set up the simulator
sim = MatterSim.Simulator()
sim.setCameraResolution(WIDTH, HEIGHT)
sim.setCameraVFOV(math.radians(VFOV))
sim.setDiscretizedViewingAngles(True)
sim.setBatchSize(1)
sim.initialize()
with torch.no_grad():
device = torch.device("cuda:%d" % gpu_id)
model = load_model(MODEL_NAME).to(device)
model.eval()
count = 0
t_render = Timer()
t_net = Timer()
if NUM_GPUS == 1:
output_file = OUTFILE
else:
output_file = OUTFILE % job_id
with open(output_file, "wt") as tsvfile:
writer = csv.DictWriter(tsvfile, delimiter="\t", fieldnames=TSV_FIELDNAMES)
# Loop all the viewpoints in the simulator
viewpointIds = load_viewpointids(job_id)
for scanId, viewpointId in viewpointIds:
t_render.tic()
# Loop all discretized views from this location
blobs = []
features = np.empty([VIEWPOINT_SIZE, FEATURE_SIZE], dtype=np.float32)
for ix in range(VIEWPOINT_SIZE):
if ix == 0:
sim.newEpisode(
[scanId], [viewpointId], [0], [math.radians(-30)]
)
elif ix % 12 == 0:
sim.makeAction([0], [1.0], [1.0])
else:
sim.makeAction([0], [1.0], [0])
state = sim.getState()[0]
assert state.viewIndex == ix
# Transform and save generated image
if "ResNet" in MODEL_NAME:
transformed_im = transform_img_resnet(state.rgb)
elif "BiT" in MODEL_NAME:
transformed_im = transform_img_bit(state.rgb)
blobs.append(transformed_im)
t_render.toc()
t_net.tic()
# Run as many forward passes as necessary
assert VIEWPOINT_SIZE % BATCH_SIZE == 0
forward_passes = VIEWPOINT_SIZE // BATCH_SIZE
ix = 0
data = torch.empty(
(BATCH_SIZE, 3, HEIGHT, WIDTH), dtype=torch.float32, device=device
)
for f in range(forward_passes):
for n in range(BATCH_SIZE):
# Copy image blob to the net
data[n, :, :, :] = blobs[ix]
ix += 1
# Forward pass
features[f * BATCH_SIZE : (f + 1) * BATCH_SIZE, :] = (
model(data).squeeze().cpu().detach().numpy()
)
writer.writerow(
{
"scanId": scanId,
"viewpointId": viewpointId,
"image_w": WIDTH,
"image_h": HEIGHT,
"vfov": VFOV,
"features": str(base64.b64encode(features), "utf-8"),
}
)
count += 1
t_net.toc()
if count % 100 == 0:
print(
"Processed %d / %d viewpoints, %.1fs avg render time, %.1fs avg net time, projected %.1f hours"
% (
count,
len(viewpointIds),
t_render.average_time,
t_net.average_time,
(t_render.average_time + t_net.average_time)
* len(viewpointIds)
/ 3600,
)
)
def merge_tsvs():
test = [OUTFILE % i for i in range(NUM_GPUS)]
with open(MERGED, "wt") as tsvfile:
writer = csv.DictWriter(tsvfile, delimiter="\t", fieldnames=TSV_FIELDNAMES)
for infile in test:
print(infile)
with open(infile, "rt") as tsv_in_files:
reader = csv.DictReader(
tsv_in_files, delimiter="\t", fieldnames=TSV_FIELDNAMES
)
for item in reader:
try:
writer.writerow(item)
except Exception as e:
print(e)
print(item["image_id"])
def read_tsv(infile):
# Verify we can read a tsv
in_data = []
with open(infile, "rt") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter="\t", fieldnames=TSV_FIELDNAMES)
for item in reader:
item["scanId"] = item["scanId"]
item["image_h"] = int(item["image_h"])
item["image_w"] = int(item["image_w"])
item["vfov"] = int(item["vfov"])
item["features"] = np.frombuffer(
base64.b64decode(item["features"]), dtype=np.float32
).reshape((VIEWPOINT_SIZE, FEATURE_SIZE))
in_data.append(item)
return in_data
if __name__ == "__main__":
if NUM_GPUS == 1:
build_tsv()
data = read_tsv(OUTFILE)
print("Completed %d viewpoints" % len(data))
else:
# JOB, GPU
ids = [[0, 0], [1, 1], [2, 2], [3, 3]]
p = Pool(4)
p.map(build_tsv, ids)
merge_tsvs()
data = read_tsv(MERGED)
print("Completed %d viewpoints" % len(data))
|
import matplotlib
import numpy as np
import numpy.testing as npt
import pytest
import freud
matplotlib.use("agg")
class TestMSD:
def test_attribute_access(self):
positions = np.array([[[1, 0, 0]]])
msd = freud.msd.MSD()
with pytest.raises(AttributeError):
msd.msd
with pytest.raises(AttributeError):
msd.plot()
assert msd._repr_png_() is None
msd.compute(positions)
msd.msd
msd.box
msd._repr_png_()
def test_MSD(self):
"""Test correct behavior for various constructor signatures"""
positions = np.array([[[1, 0, 0]]])
msd = freud.msd.MSD()
msd_direct = freud.msd.MSD(mode="direct")
assert msd.compute(positions).msd == [0]
assert msd_direct.compute(positions).msd == [0]
positions = positions.repeat(10, axis=0)
npt.assert_allclose(msd.compute(positions).msd, 0, atol=1e-4)
npt.assert_allclose(msd_direct.compute(positions).msd, 0, atol=1e-4)
positions[:, 0, 0] = np.arange(10)
npt.assert_allclose(msd.compute(positions).msd, np.arange(10) ** 2, atol=1e-4)
npt.assert_allclose(msd_direct.compute(positions).msd, np.arange(10) ** 2)
positions = positions.repeat(2, axis=1)
positions[:, 1, :] = 0
npt.assert_allclose(
msd.compute(positions).msd, np.arange(10) ** 2 / 2, atol=1e-4
)
npt.assert_allclose(msd_direct.compute(positions).msd, np.arange(10) ** 2 / 2)
# Test accumulation
positions.flags["WRITEABLE"] = False
msd.compute(positions[:, [0], :])
msd.compute(positions[:, [1], :], reset=False)
msd_accumulated = msd.msd.copy()
npt.assert_allclose(msd_accumulated, msd.compute(positions).msd)
# Test on a lot of random data against a more naive MSD calculation.
def simple_msd(positions):
"""A naive MSD calculation, used to test."""
msds = []
for m in np.arange(positions.shape[0]):
if m:
diffs = positions[:-m, :, :] - positions[m:, :, :]
else:
diffs = np.zeros_like(positions)
sqdist = np.square(diffs).sum(axis=2)
msds.append(sqdist.mean(axis=0))
return np.array(msds).mean(axis=1), np.array(msds)
num_tests = 5
np.random.seed(10)
for _ in range(num_tests):
positions = np.random.rand(10, 10, 3)
simple, simple_particle = simple_msd(positions)
solution = msd.compute(positions).msd
solution_particle = msd.compute(positions).particle_msd
npt.assert_allclose(solution, simple, atol=1e-6)
npt.assert_allclose(solution_particle, simple_particle, atol=1e-5)
def test_repr(self):
msd = freud.msd.MSD()
assert str(msd) == str(eval(repr(msd)))
msd2 = freud.msd.MSD(box=freud.box.Box(1, 2, 3, 4, 5, 6), mode="direct")
assert str(msd2) == str(eval(repr(msd2)))
|
import theano
theano.config.floatX = 'float32'
import theano.tensor as T
import numpy as np
import lasagne
import matplotlib
matplotlib.use('Agg')
import time
import datasets
train_weights = True
num_epochs = 50
batch_size = 128
X_train, y_train, X_test, y_test, X_test_all, y_test_all = datasets.load_CIFAR10([0,1,2,3])
dropout_p = 0.5
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
network = lasagne.layers.InputLayer(shape=(None, 3, 32, 32), input_var=input_var)
# Convolution + pooling + dropout
network = lasagne.layers.Conv2DLayer(network, num_filters=192, filter_size=5, nonlinearity=lasagne.nonlinearities.elu)
network = lasagne.layers.DropoutLayer(network, p=dropout_p)
network = lasagne.layers.Pool2DLayer(network, pool_size=2)
network = lasagne.layers.Conv2DLayer(network, num_filters=192, filter_size=5, nonlinearity=lasagne.nonlinearities.elu)
network = lasagne.layers.DropoutLayer(network, p=dropout_p)
network = lasagne.layers.Pool2DLayer(network, pool_size=2)
# Fully-connected + dropout
network = lasagne.layers.DenseLayer(network, num_units=1000, nonlinearity=lasagne.nonlinearities.elu)
network = lasagne.layers.DropoutLayer(network, p=dropout_p)
network = lasagne.layers.DenseLayer(
network, num_units=10,
nonlinearity=lasagne.nonlinearities.softmax)
if train_weights:
# Softmax output
prediction = lasagne.layers.get_output(network, deterministic=False)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# L2 regularization (weight decay)
weightsl2 = lasagne.regularization.regularize_network_params(network, lasagne.regularization.l2)
loss += 1e-5*weightsl2
# ADAM training
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.adam(loss, params)
train = theano.function([input_var, target_var], loss, updates=updates)
# Test functions
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction, target_var).mean()
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var), dtype=theano.config.floatX)
test = theano.function([input_var, target_var], [test_loss, test_acc])
bayesian_test_acc = T.mean(T.eq(T.argmax(prediction, axis=1), target_var), dtype=theano.config.floatX)
bayesian_test = theano.function([input_var, target_var], bayesian_test_acc)
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, y_train, batch_size, shuffle=True):
inputs, targets = batch
err = train(inputs, targets)
train_err += err
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_test, y_test, batch_size, shuffle=False):
inputs, targets = batch
err, acc = test(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
test_err = 0
test_bayes_acc = 0
test_acc = 0
test_batches = 0
bayes_repeat = 32
for batch in iterate_minibatches(X_test, y_test, batch_size, shuffle=False):
inputs, targets = batch
# Bayesian accuracy (multiple dropout samples)
bayes_acc = 0.0
for i, t in zip(inputs, targets):
bayes_acc += bayesian_test(np.repeat(i[np.newaxis], bayes_repeat, 0), np.repeat(t[np.newaxis], bayes_repeat, 0))
bayes_acc /= batch_size
test_bayes_acc += bayes_acc
# Standard accuracy (no dropout)
err, acc = test(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(test_acc / test_batches * 100))
print(" bayes accuracy:\t\t{:.2f} %".format(test_bayes_acc / test_batches * 100))
np.savez('cifar_outsiders_pred.npz', *lasagne.layers.get_all_param_values(network))
else:
with np.load('cifar_outsiders_pred.npz') as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
lasagne.layers.set_all_param_values(network, param_values)
|
import asyncio
from datetime import datetime
import email.utils
import logging
from types import MappingProxyType
from typing import Union
from pathlib import PosixPath
import json
# Third Party
import aiohttp.helpers
import aiobotocore.session
import aiobotocore.config
import yarl
import backoff
_empty_dict = MappingProxyType({})
_empty_list = tuple()
_DEFAULT_BATCH_SIZE = 100
def _parse_rfc822(dt: str) -> datetime:
ts = email.utils.mktime_tz(email.utils.parsedate_tz(dt))
dt = datetime.utcfromtimestamp(ts)
return dt
class _Pager:
def __init__(self, session: aiohttp.ClientSession, url: yarl.URL, batch_size: int, response_key: str):
self._session = session
self._url = url
self._batch_size = batch_size
self._batch = None
self._response_key = response_key
self._next = None
def __aiter__(self):
self._batch = None
self._next = None
return self
@backoff.on_exception(backoff.expo, (aiohttp.ClientError, asyncio.TimeoutError), max_tries=2)
async def _get_next_batch(self, url: Union[yarl.URL, str]):
with aiohttp.helpers.CeilTimeout(180):
async with self._session.get(url) as response:
self._next = response.links.get('next', _empty_dict).get('url')
self._batch = await response.json()
async def __anext__(self) -> str:
if self._batch is None:
url = self._url.with_query(dict(n=self._batch_size))
await self._get_next_batch(url)
elif self._next and not self._batch[self._response_key]:
await self._get_next_batch(self._next)
try:
errors = self._batch.get('errors')
if errors and errors[0].get('code') == 'NAME_UNKNOWN' and self._url.path.endswith('/tags/list'):
raise StopAsyncIteration
return self._batch[self._response_key].pop(0)
except IndexError:
raise StopAsyncIteration
class RegistryClient:
def __init__(self, url: str, max_connections: int=100):
"""
Creates docker registry client instance based on V2 docker registry REST API
:param url: base url to docker registry endpoint
:param max_connections: maximum number of connections to use
"""
self._url = yarl.URL(url)
self._session: aiohttp.ClientSession = None
self._logger = logging.getLogger('RegistryClient')
boto_session = aiobotocore.session.get_session()
config = aiobotocore.config.AioConfig(connect_timeout=10, read_timeout=10, max_pool_connections=max_connections)
self._s3_client = boto_session.create_client('s3', config=config)
async def __aenter__(self):
timeout = aiohttp.ClientTimeout(sock_read=15, sock_connect=15)
self._session = await aiohttp.ClientSession(timeout=timeout).__aenter__()
self._s3_client = await self._s3_client.__aenter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self._session.__aexit__(exc_type, exc_val, exc_tb)
await self._s3_client.__aexit__(exc_type, exc_val, exc_tb)
def catalog_pager(self, batch_size: int=_DEFAULT_BATCH_SIZE):
return _Pager(self._session, self._url / 'v2/_catalog', batch_size, 'repositories')
def image_tag_pager(self, image_name: str, batch_size: int=_DEFAULT_BATCH_SIZE):
return _Pager(self._session, self._url / f'v2/{image_name}/tags/list', batch_size, 'tags')
@backoff.on_exception(backoff.expo, (aiohttp.ClientError, asyncio.TimeoutError), max_tries=2)
async def get_image_manifest(self, image_name: str, tag: str):
with aiohttp.helpers.CeilTimeout(180):
async with self._session.get(self._url / 'v2' / image_name / 'manifests' / tag, headers=dict(Accept='application/vnd.docker.distribution.manifest.v2+json')) as response:
manifest = await response.json(content_type=None)
for history in manifest.get('history', _empty_list):
history["v1Compatibility"] = json.loads(history["v1Compatibility"])
return manifest
@backoff.on_exception(backoff.expo, (aiohttp.ClientError, asyncio.TimeoutError), max_tries=2)
async def get_blob_info(self, image_name: str, blob_sum: str):
info = dict()
with aiohttp.helpers.CeilTimeout(180):
async with self._session.head(self._url / 'v2' / image_name / 'blobs' / blob_sum) as response:
location = response.headers.get("Location")
if location:
location = yarl.URL(location)
if location and location.host.startswith("s3-") and location.host.endswith(".amazonaws.com"):
region = location.host[3:].split(".", 1)[0]
bucket, key = yarl.URL(location).path[1:].split("/", 1)
info["s3location"] = dict(region=region, bucket=bucket, key=key)
response = await self._s3_client.head_object(Bucket=bucket, Key=key)
info["size"] = response['ContentLength']
info['modified'] = response['LastModified']
if 's3location' not in info:
with aiohttp.helpers.CeilTimeout(180):
async with self._session.get(self._url / 'v2' / image_name / 'blobs' / blob_sum, read_until_eof=False) as response:
info["size"] = int(response.headers["Content-Length"])
info["modified"] = _parse_rfc822(response.headers["Last-Modified"])
response.close()
return info
class _S3Pager:
def __init__(self, async_iter):
self._async_iter = async_iter
self._prefix_len = None
self._next = None
async def _get_next_batch(self):
with aiohttp.helpers.CeilTimeout(180):
response = await self._async_iter.__anext__()
self._prefix_len = len(response['Prefix'])
self._next = response.get('CommonPrefixes', [])
def __aiter__(self):
self._next = None
self._prefix_len = None
return self
async def __anext__(self):
if not self._next:
await self._get_next_batch()
try:
item = self._next.pop(0)
return item['Prefix'][self._prefix_len:-1]
except IndexError:
raise StopAsyncIteration
class S3RegistryClient:
def __init__(self, bucket: str, prefix: str, max_connections: int=100):
"""
Creates docker registry client instance based on S3 registry backend
:param bucket: S3 bucket of registry
:param prefix: S3 bucket prefix, ex: docker/v2/dev/docker/registry/v2, where "blobs" and "repositories" folders exist
:param max_connections: maximum number of connections to use
"""
self._bucket = bucket
self._prefix = PosixPath(prefix)
boto_session = aiobotocore.session.get_session()
config = aiobotocore.config.AioConfig(connect_timeout=15, read_timeout=15, max_pool_connections=max_connections)
self._s3_client = boto_session.create_client('s3', config=config)
async def __aenter__(self):
self._s3_client = await self._s3_client.__aenter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self._s3_client.__aexit__(exc_type, exc_val, exc_tb)
def catalog_pager(self):
paginator = self._s3_client.get_paginator('list_objects')
prefix = str(self._prefix / "repositories") + '/'
return _S3Pager(paginator.paginate(Bucket=self._bucket, Prefix=prefix, Delimiter='/'))
def image_tag_pager(self, image_name: str):
# TODO: validate against repository API
paginator = self._s3_client.get_paginator('list_objects')
prefix = str(self._prefix / "repositories" / image_name / "_manifests" / "tags") + '/'
return _S3Pager(paginator.paginate(Bucket=self._bucket, Prefix=prefix, Delimiter='/'))
async def get_image_manifest(self, image_name: str, tag: str):
# TODO: this is not correct, not matching repository API
# get pointer to current version of this tag
with aiohttp.helpers.CeilTimeout(180):
response = await self._s3_client.get_object(Bucket=self._bucket, Key=str(self._prefix / "repositories" / image_name / "_manifests" / "tags" / tag / "current" / "link"))
async with response["Body"] as stream:
data = await stream.read()
sha_prefix, sha256 = data.decode('utf-8').split(':', 1)
# now get the manifest
with aiohttp.helpers.CeilTimeout(180):
response = await self._s3_client.get_object(Bucket=self._bucket, Key=str(self._prefix / "blobs" / "sha256" / sha256[:2] / sha256 / "data"))
async with response["Body"] as stream:
manifest = await stream.read()
manifest = json.loads(manifest)
for history in manifest.get('history', _empty_list):
history["v1Compatibility"] = json.loads(history["v1Compatibility"])
return manifest
async def get_blob_info(self, image_name: str, blob_sum: str):
prefix, shasum = blob_sum.split(':', 1)
key = str(self._prefix / "blobs" / "sha256" / shasum[:2] / shasum / "data")
with aiohttp.helpers.CeilTimeout(180):
response = await self._s3_client.head_object(Bucket=self._bucket, Key=key)
return {'size': response['ContentLength'], 'bucket': self._bucket, 'key': key}
|
# coding: utf-8
# In[4]:
from keras.layers import Activation, Dense, Dropout
from keras.layers.advanced_activations import LeakyReLU, PReLU, ThresholdedReLU, ELU
from keras import regularizers
# In[5]:
def get_activation_layer(activation):
"""
Returns the activation layer given its name
"""
if activation == 'ELU':
return ELU()
if activation == 'LeakyReLU':
return LeakyReLU()
if activation == 'ThresholdedReLU':
return ThresholdedReLU()
if activation == 'PReLU':
return PReLU()
return Activation(activation)
# In[4]:
class Layer(object):
"""
Layer object for adding different types of layers to the model
"""
def __init__(self, layer_type):
self.layer_type = layer_type
if self.layer_type in ["hidden", "input", "output"]:
self.kernel_initializer='normal'
self.kernel_regularizer=regularizers.l2(0.01)
def add_to_model(self, model, params, count, input_dim=None, output_layer_units=None, mode=None, layers=None):
"""
Add layer to model
"""
## Input Layer
if self.layer_type == "input":
units = params[str(self.layer_type + "_layer_" + str(count) + "_units")]
if input_dim is not None:
model.add(Dense(units, input_dim=input_dim, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer))
else:
model.add(Dense(units, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer))
return model
## Hidden Layer
if self.layer_type == "hidden":
units = params[str(self.layer_type + "_layer_" + str(count) + "_units")]
if input_dim is not None:
model.add(Dense(units, input_dim=input_dim, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer))
else:
model.add(Dense(units, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer))
return model
## Activation Layer
if self.layer_type == "activation":
model.add(get_activation_layer(params["activation_function"]))
return model
## Dropout Layer
if self.layer_type == "dropout":
dropout_rate = params["dropout_rate"]
if dropout_rate > 0:
model.add(Dropout(dropout_rate))
return model
## Output Layer
if self.layer_type == "output":
if mode == "classifier":
model.add(Dense(output_layer_units, kernel_initializer=self.kernel_initializer))
try:
if params["output_activation_function"] != None:
model.add(get_activation_layer(params["output_activation_function"]))
except KeyError:
pass
elif mode == "regressor":
model.add(Dense(output_layer_units, kernel_initializer=self.kernel_initializer))
else:
raise ValueError("mode has to be 'regressor' or 'classifier'")
return model
## LSTM Layer
# if self.layer_type == "LSTM":
# units = params[str(self.layer_type + "_layer_" + str(count) + "_units")]
# count_LSTM = layers.count("LSTM")
# if count < count_LSTM:
# return_sequences = True
# else:
# return_sequences = False
# if input_dim is not None:
# model.add(LSTM(units, input_dim=input_dim, recurrent_activation=params["LSTM_recurrent_activation_function"], return_sequences=return_sequences))
# else:
# model.add(LSTM(units, recurrent_activation=params["LSTM_recurrent_activation_function"], return_sequences=return_sequences))
# return model
|
#######################################################
#
# ClientInformationController.py
# Python implementation of the Class ClientInformationController
# Generated by Enterprise Architect
# Created on: 19-May-2020 6:56:00 PM
# Original author: Natha Paquette
#
#######################################################
from lxml import etree
from model.Event import Event
from model.ClientInformation import ClientInformation
from BasicModelInstantiate import BasicModelInstantiate
import uuid
from logging.handlers import RotatingFileHandler
import logging
from configuration.LoggingConstants import LoggingConstants
import sys
from CreateLoggerController import CreateLoggerController
logger = CreateLoggerController("ClientInformationController").getLogger()
loggingConstants = LoggingConstants()
class ClientInformationController(BasicModelInstantiate):
def __init__(self):
pass
'''
connection setup is obsolete with intstantiateClientInformationModelFromController
'''
def intstantiateClientInformationModelFromConnection(self, rawClientInformation, queue):
try:
self.m_clientInformation = ClientInformation()
argument = "initialConnection"
self.m_clientInformation.dataQueue = queue
self.modelObject = Event(argument)
self.m_clientInformation.socket = rawClientInformation[0]
self.m_clientInformation.IP = rawClientInformation[1]
self.m_clientInformation.idData = rawClientInformation[2]
self.m_clientInformation.alive = 1
self.m_clientInformation.ID = uuid.uuid1().int
super().__init__(self.m_clientInformation.idData, self.modelObject)
self.m_clientInformation.modelObject = self.modelObject
return self.m_clientInformation
except Exception as e:
logger.error('error in client information controller '+str(e))
def connectionSetup(self, client, address):
pass
'''
try:
sqliteServer = sqlite3.connect(const.DATABASE)
cursor = sqliteServer.cursor()
first_run = 1
#create client dictionary within main dictionary containing arrays for data and chat also other stuff for client enitial connection
current_id = 0
total_clients_connected = 0
total_clients_connected += 1
id_data = client.recv(const.STARTBUFFER)
print(id_data)
print('\n'+str(id_data))
print('\n \n')
tree = ET.fromstring(id_data)
uid = tree.get('uid')
if uid == self.bandaidUID:
return 'Bandaid'
callsign = tree[1][1].attrib['callsign']
current_id = uuid.uuid1().int
#add identifying information
self.client_dict[current_id] = {'id_data': '', 'main_data': [], 'alive': 1, 'uid': '', 'client':client, 'callsign':callsign}
self.client_dict[current_id]['id_data'] = id_data
self.client_dict[current_id]['uid'] = uid
cursor.execute(sql.INSERTNEWUSER,(str(current_id), str(uid), str(callsign)))
sqliteServer.commit()
cursor.close()
sqliteServer.close()
#print(self.client_dict)
logger.info('client connected, information is as follows initial'+ '\n'+ 'connection data:'+str(id_data)+'\n'+'current id:'+ str(current_id))
return str(first_run)+' ? '+str(total_clients_connected)+' ? '+str(id_data)+' ? '+str(current_id)
except Exception as e:
logger.warning('error in connection setup: ' + str(e))
logger.warning(id_data)
return "error"
'''
#rawClientInformation = ['abc', 'def', b'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n<event version="2.0" uid="ANDROID-359975090666199" type="a-f-G-U-C" time="2020-05-25T12:23:13.288Z" start="2020-05-25T12:23:13.288Z" stale="2020-05-25T12:29:28.288Z" how="h-e"><point lat="43.855596" lon="-66.10805" hae="20.395709421887993" ce="62.1" le="9999999.0"/><detail><takv os="28" version="3.12.0-45691.45691-CIV" device="SAMSUNG SM-G950W" platform="ATAK-CIV"/><contact endpoint="*:-1:stcp" callsign="SUMMER"/><uid Droid="SUMMER"/><precisionlocation altsrc="GPS" geopointsrc="GPS"/><__group role="Sniper" name="Cyan"/><status battery="4"/><track course="191.76600028243948" speed="0.0"/></detail></event>']
#ClientInformationController().intstantiateClientInformationModelFromConnection(rawClientInformation)
|
'''
Runtime: 44 ms, faster than 98.75% of Python3 online submissions for Search in Rotated Sorted Array II.
Memory Usage: 14.5 MB, less than 19.90% of Python3 online submissions for Search in Rotated Sorted Array II.
'''
class Solution:
def search(self, nums: List[int], target: int) -> bool:
return self.helper(nums, target, 0, len(nums) - 1)
def helper(self, nums: list, target: int, start: int, end: int) -> bool:
if start > end:
return False
else:
midpoint: int = start + (end - start) // 2
if nums[midpoint] == target:
return True
elif nums[midpoint] > target and target > nums[start]:
return self.helper(nums, target, start, midpoint - 1)
elif nums[midpoint] < target and target < nums[end]:
return self.helper(nums, target, midpoint + 1, end)
else:
f: bool = self.helper(nums, target, start, midpoint - 1)
if not f:
f = self.helper(nums, target, midpoint + 1, end)
return f
|
from discord.ext import commands, tasks
from discord import File
from datetime import date, datetime
import numpy as np
from functions import cal
class Exercise(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.reminder.start()
def cog_unload(self):
self.reminder.cancel()
@commands.command(name="checkin", help="Check in for the day")
async def check_in(self, ctx):
response = str(ctx.author)[:-5] + " has checked in for the day!"
await ctx.send(response)
log = "./data/log.dat"
with open(log, "a") as myfile:
item = str(ctx.author)[:-5] + "," + str(date.today()) + "\n"
myfile.write(item)
@commands.command(name="zrobione", help="Check in for the day")
async def zrobione(self, ctx):
if str(ctx.author) == "SaiDuc#4556" or str(ctx.author) == "zztop66#0863":
response = str(ctx.author)[:-5] + " ukończył ćwiczenia na dzisiaj!"
else:
response = str(ctx.author)[:-5] + " ukończyła ćwiczenia na dzisiaj!"
await ctx.send(response)
log = "./data/log.dat"
with open(log, "a") as myfile:
item = str(ctx.author)[:-5] + "," + str(date.today()) + "\n"
myfile.write(item)
@commands.command(name="graph", help="Makes calendar of exercise")
async def make_graph(self, ctx, name):
if str(name) == "Kasia" or str(name) == "kasia":
name = "Kasiakoo"
elif str(name) == "Zuzia" or str(name) == "zuzia":
name = "zuziek424"
elif str(name) == "Sai" or str(name) == "sai":
name = "SaiDuc"
elif str(name) == "Zbyszek" or str(name) == "zbyszek":
name = "zztop66"
elif str(name) == "Anna" or str(name) == "anna":
name = "AnetaK"
else:
await ctx.send("User not found")
return
days, months = cal.get_data(name)
cal.plot_calendar(days, months)
with open(r"./tmp.jpg", "rb") as image:
await ctx.send(name+"'s graph:", file=File(image))
@tasks.loop(seconds=60.0)
async def reminder(self):
message_channel = self.bot.get_channel(725329702985662505)
current_time = datetime.now().strftime("%H:%M:%S")
if current_time[:2] == "19" and current_time[3:5] == "00" and datetime.today().weekday() < 5:
log = "./data/log.dat"
names, times = np.loadtxt(log, dtype="str", unpack=True, delimiter=",")
members = ["Kasiakoo", "SaiDuc", "zuziek424", "zztop66", "AnetaK"]
people = []
for i in range(len(times)):
if times[i] == str(date.today()):
people.append(names[i])
for i in members:
if i not in people:
response = i + " has not checked in today!"
await message_channel.send(response)
else:
return
def setup(bot):
bot.add_cog(Exercise(bot))
|
import sqlite3, argparse, os, pytz
from datetime import timedelta as td
from datetime import datetime as dt
from dateutil import parser as dt_parser
from configuration import *
import matplotlib as mpl
from PyQt4.QtCore import QSettings
import matplotlib.pyplot as plt
import numpy as np
def hex_to_rgb(value):
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i+lv/3], 16) for i in range(0, lv, lv/3))
def connection_and_cursor(path_to_db):
"""Connect to the db and pass back the connection and cursor. Create db
and directories if needed."""
if not os.path.exists(path_to_db):
p, db_name = os.path.split(path_to_db)
if p:
plist = p.split(os.path.sep)
for i in range(len(plist)):
dirpath = os.path.sep.join(plist[:i+1])
if dirpath and not os.path.exists( dirpath ):
os.mkdir(dirpath)
conn = sqlite3.connect(path_to_db, detect_types=sqlite3.PARSE_DECLTYPES)
cur = conn.cursor()
return conn,cur
def make_aware_of_local_tz(unaware):
"""Make an unaware time object aware of the local time zone. This tries to
get the timezone from settings."""
settings = QSettings(CONF_QSETTINGS_DEVELOPER,CONF_QSETTINGS_APPLICATION)
try:
local_zone_str = str( settings.value('timezone',LOCAL_TIME_ZONE).toString() )
except AttributeError:
local_zone_str = str( settings.value('timezone',LOCAL_TIME_ZONE) )
# LOCAL_TIME_ZONE is from configuration.py
local_zone = pytz.timezone(local_zone_str)
return local_zone.localize(unaware)
def utc_from_local(localtime):
"""Given a local datetime, return the UTC datetime equivalent. If the time
given is not aware, assume it is suppose to be local and make it aware."""
if not localtime.tzinfo: # make sure it really is aware
localtime = make_aware_of_local_tz(localtime)
return localtime.astimezone(pytz.UTC)
def local_from_utc(utc_datetime):
"""Given a utc datetime, return the local equivalent."""
if not utc_datetime.tzinfo: # make sure it is aware
utc_datetime = pytz.utc.localize(utc_datetime)
return utc_datetime.astimezone( pytz.timezone(LOCAL_TIME_ZONE) ) # LOCAL_TIME_ZONE from configuration.py
|
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.http import require_POST
from products.models import Product
from .cart import Cart
from .forms import CartAddProductForm
@require_POST
def cart_add(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
form = CartAddProductForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
cart.add(
product=product, quantity=cd["quantity"], override_quantity=cd["override"]
)
return redirect("cart:detail")
@require_POST
def cart_remove(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
cart.remove(product)
return redirect("cart:detail")
def cart_detail(request):
cart = Cart(request)
return render(request, "cart/cart_detail.html", {'cart': cart})
|
import timeit
from cProfile import Profile
Environment = r'''
from datetime import date, datetime, time
from decimal import Decimal as decimal
from uuid import uuid4
from scheme import *
Schema = Structure({
'id': UUID(nonempty=True),
'binary': Binary(),
'boolean': Boolean(),
'date': Date(),
'datetime': DateTime(),
'decimal': Decimal(),
'definition': Definition(),
'email': Email(),
'enumeration': Enumeration('one two'),
'float': Float(),
'integer': Integer(),
'map': Map(Sequence(Text())),
'object': Object(),
'sequence': Sequence(Map(Text())),
'structure': Structure({
'alpha': {
'numbers': Sequence(Integer()),
},
'beta': {
'letters': Sequence(Text()),
},
'*': {
'always': Boolean(),
},
}, polymorphic_on='type'),
'surrogate': Surrogate(),
'text': Text(minlength=5),
'time': Time(),
'token': Token(),
'tuple': Tuple((Integer(), Text())),
})
UnprocessedData = {
'id': 'fec5eadb-1de1-4102-86fb-e88e131086e3',
#'binary': '\x00' * 1024,
'boolean': True,
'date': date(2000, 1, 1),
'datetime': datetime(2000, 1, 1, 12, 0, 0, tzinfo=UTC),
'decimal': decimal('1.0'),
'email': 'test@test.com',
'enumeration': 'one',
'float': 1.0,
'integer': 1,
'map': {'a': ['a', 'a'], 'b': ['b', 'b']},
'sequence': [{'a': 'a'}, {'b': 'b'}],
'structure': {'type': 'alpha', 'always': True, 'numbers': [1, 2, 3]},
'text': 'x' * 4096,
'time': time(12, 0, 0),
'token': 'token',
'tuple': (12, '12'),
}
SerializedData = {
'id': 'fec5eadb-1de1-4102-86fb-e88e131086e3',
#'binary': ('A' * 1366) + '==',
'boolean': True,
'date': '2000-01-01',
'datetime': '2000-01-01T12:00:00Z',
'decimal': '1.0',
'email': 'test@test.com',
'enumeration': 'one',
'float': 1.0,
'integer': 1,
'map': {'a': ['a', 'a'], 'b': ['b', 'b']},
'sequence': [{'a': 'a'}, {'b': 'b'}],
'structure': {'type': 'alpha', 'always': True, 'numbers': [1, 2, 3]},
'text': 'x' * 4096,
'time': '12:00:00',
'token': 'token',
'tuple': (12, '12'),
}
JsonData = """{
"id": "fec5eadb-1de1-4102-86fb-e88e131086e3",
"boolean": true,
"date": "2000-01-01",
"datetime": "2000-01-01T12:00:00Z",
"decimal": "1.0",
"email": "test@test.com",
"enumeration": "one",
"float": 1.0,
"integer": 1,
"map": {"a": ["a", "a"], "b": ["b", "b"]},
"sequence": [{"a": "a"}, {"b": "b"}],
"structure": {"type": "alpha", "always": true, "numbers": [1, 2, 3]},
"text": "%s",
"time": "12:00:00",
"token": "token",
"tuple": [12, "12"]
}""" % ('x' * 4096)
'''
def profile_while_timing(statement, setup=Environment, number=5000):
profile = Profile()
profile.enable()
timeit.timeit(statement, setup, number=number)
profile.disable()
profile.print_stats('cumtime')
def profile_serialization():
profile_while_timing("Schema.process(UnprocessedData, OUTBOUND, True)")
def profile_json_serialization():
profile_while_timing("Schema.serialize(UnprocessedData, 'json')")
def profile_json_unserialization():
profile_while_timing("Schema.unserialize(JsonData, 'json')")
|
__VERSION__ = '1.4'
from pymls.solver import Solver
from pymls.layers import Layer, StochasticLayer
from pymls.media import from_yaml
import pymls.backing as backing
|
def solution():
data = open(r'inputs\day13.in').readlines()
print('Part 1 result: ' + str(part1(data)))
print('Part 2 result: ' + str(part2(data)))
def part1(data):
# build out the grid and instructions from our data
grid, fold_instructions = build_grid_and_instructions(data)
# run the first fold only
grid = fold(grid, fold_instructions[0])
# the length of the grid is our answer
return len(grid)
def part2(data):
# build out the grid and instructions from our data
grid, fold_instructions = build_grid_and_instructions(data)
# loop through every fold instruction, running it
for i in range(len(fold_instructions)):
instr = fold_instructions[i]
grid = fold(grid, instr)
# get the max x and y values
X = max([x for (x,y) in grid.keys()]) + 1
Y = max([y for (x,y) in grid.keys()]) + 1
# print out the word by looping through the grid printing one row at a time
ans = ''
for y in range(Y):
for x in range(X):
ans += ('x' if (x,y) in grid else ' ')
print(ans)
ans = ''
return 'Read above'
def build_grid_and_instructions(data):
grid = {}
fold_instructions = []
for line in data:
line = line.strip()
# ignore the blank lines
if line == '':
continue
if line.startswith('fold'):
# split on the equals sign
a, b = line.split('=')
# now, split the first piece on the comma and take the third element, this is our axis
a = a.split(' ')[2]
# add the instruction as a tuple to our fold_instructions list, for example fold along y=3 would be ('y', 3)
fold_instructions.append((a, b))
else:
# split on the comma and cast both to ints
x, y = [int(x) for x in line.strip().split(',')]
# set that spot in our grid to be true
grid[(x, y)] = True
# return the grid & instructions
return grid, fold_instructions
def fold(grid, instruction):
grid2 = {}
# the line we want to fold along
fold_line = int(instruction[1])
if instruction[0] == 'x':
for (x, y) in grid:
# if our x value is less than the fold line, we leave it alone, and copy the same location to the new grid
if x < fold_line:
grid2[(x, y)] = True
else:
# otherwise, we need the new location to be flipped across the fold line,
# so its new x value is the fold line minus the distance between the fold line and the current x value
# i.e. an x of 7 folded over 5 would get moved to x = 3
grid2[((fold_line - (x - fold_line), y))] = True
else:
# if it isn't x, it must be y
assert instruction[0] == 'y'
for (x, y) in grid:
# if our y value is less than the fold line, we leave it alone, and copy the same location to the new grid
if y < fold_line:
grid2[(x, y)] = True
else:
# otherwise, we need the new location to be flipped across the fold line,
# same logic as above, but with y instead
grid2[((x, fold_line - (y - fold_line)))] = True
# return the new grid
return grid2
solution()
|
# The main entrance of the optimal power flow in unviersal energy management system
import threading # Thread management (timeout and return value)
from solvers.mixed_integer_solvers_gurobi import mixed_integer_linear_programming
# from solvers.mixed_integer_solvers_mosek import mixed_integer_linear_programming
class SolvingThread(threading.Thread):
# Thread operation with time control and return value
def __init__(self, parameter):
threading.Thread.__init__(self)
self.parameter = parameter
self.value = 0
def run(self):
self.value = solving_procedure(self.parameter)
def solving_procedure(*args):
# By using linear programming to solve the optimal power flow problem
# The input is dictionary
c = args[0]["c"]
A = args[0]["A"]
b = args[0]["b"]
Aeq = args[0]["Aeq"]
beq = args[0]["beq"]
lb = args[0]["lb"]
ub = args[0]["ub"]
option = {"disp": False}
# Convert list into tuple
# boundary = tuple()
# for i in range(len(lb)):
# boundary += ((lb[i], ub[i]),)
vtypes = ["c"] * len(lb)
(solution, obj, success) = mixed_integer_linear_programming(c, Aeq=Aeq, beq=beq, A=A, b=b, xmin=lb, xmax=ub, vtypes=vtypes)
# res = optimize.linprog(c, A_ub=A, b_ub=b, A_eq=Aeq, b_eq=beq, bounds=boundary, options=option)
res = {"x": solution,
"obj": obj,
"success": success > 0}
return res
|
"""
This module constain a new abstract type of faust record to process some data from CW api
"""
# We start importing some typing stuff
from typing import Generic, TypeVar
# and the faust related objects and types
from faust import Record
from faust.serializers import codecs
# then we import our custom codec serializer
from .._codec import digest
# and we register that codec (to be loaded for every package or script or app that use this)
codecs.register("digest", digest())
## The digest record
# We use a generic var for generic use in digests
T = TypeVar("T")
# and then we build digest that represent any 'response in list shape' throwed by the api
class Digest(Record, Generic[T], serializer="digest", abstract=True):
digest: list[T]
def __iter__(self):
for d in self.digest:
yield d
def __str__(self) -> str:
return str(self.digest)
|
from homeassistant.core import ServiceCall
from requests import put
import logging
import requests
_LOGGER = logging.Logger(__name__)
def create_or_update(call: ServiceCall) -> bool:
host = call.data.get("host", None)
protocol = call.data.get("protocol", "http")
host_port = call.data.get("host_port", 8500)
token = call.data.get("token", None)
key = call.data.get("key", None)
value = call.data.get("value", None)
datacenter = call.data.get("datacenter", None)
flags = call.data.get("flags", None)
cas = call.data.get("cas", None)
acquire = call.data.get("acquire", None)
release = call.data.get("release", None)
ns = call.data.get("ns", None)
params = {}
data = None
headers = {}
if datacenter:
params['dc'] = datacenter
if value:
data = value
if flags:
params['flags'] = flags
if cas:
params['cas'] = cas
if acquire:
params['acquire'] = acquire
if release:
params['release'] = release
if ns:
params['ns'] = ns
if token:
headers['X-Consul-Token'] = token
try:
response = put("{protocol}://{host}:{host_port}/v1/kv/{key}".format(protocol=protocol, host=host, host_port=host_port, key=key),
data=data,
headers=headers,
params=params
)
return response.__bool__()
except IOError as e:
_LOGGER.exception(e)
return False
def delete(call: ServiceCall) -> bool:
host = call.data.get("host", None)
protocol = call.data.get("protocol", "http")
host_port = call.data.get("host_port", 8500)
token = call.data.get("token", None)
key = call.data.get("key", None)
datacenter = call.data.get("datacenter", None)
cas = call.data.get("cas", None)
recurse = call.data.get("recurse", None)
ns = call.data.get("ns", None)
params = {}
data = None
headers = {}
if datacenter:
params['dc'] = datacenter
if recurse:
params['recurse'] = recurse
if cas:
params['cas'] = cas
if ns:
params['ns'] = ns
if token:
headers['X-Consul-Token'] = token
try:
response = requests.delete("{protocol}://{host}:{host_port}/v1/kv/{key}".format(protocol=protocol, host=host, host_port=host_port, key=key),
data=data,
headers=headers,
params=params
)
return response.__bool__()
except IOError as e:
_LOGGER.exception(e)
return False
|
from random import randint
print("Jogo De Dados")
dados1 = randint(1, 6)
dados2 = randint(1, 6)
jogador1 = dados1 + dados2
dados3 = randint(1, 6)
dados4 = randint(1, 6)
jogador2 = dados3 + dados4
print("Dado 1:", dados1)
print("Dado 2:", dados2)
print("Dado 3:", dados3)
print("Dado 4:", dados4)
print("Resultado do jogador 1:", jogador1)
print("Resultado do jogador 2:", jogador2)
if jogador1 > jogador2:
print("Jogador 1 Venceu")
elif jogador2 > jogador1:
print("Jogador 2 Venceu")
else:
print("Jogadores Empataram")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Newick format validator
# imported in CLI.py
# imports
from re import split
# returns true if the given input is a number
def is_number(string):
try:
float(string)
return True
except ValueError:
return False
# returns the index of the first comma outside parenthesis
# comma separates branches
def find_branch(parsed_tokens):
open = 0
closed = 0
first_comma_index = None
for char_index, char in enumerate(parsed_tokens):
if char == "(":
open += 1
elif char == ")":
closed += 1
if open - closed == 0:
if char == "," \
and first_comma_index is None:
first_comma_index = char_index
return first_comma_index
# tree -> subtree [label] [: length] ";"
# tree -> subtree ";" | branch ";"
def parse_tree(parsed_tokens):
if is_number(parsed_tokens[-1]):
# found a branch with length
return parse_branch(parsed_tokens)
# subtree without label and length
return parse_subtree(parsed_tokens)
# subtree -> leaf | internal
def parse_subtree(parsed_tokens):
try:
if parsed_tokens[0] == '(':
# found an internal node
if parsed_tokens[-1] == ')':
return parse_internal(parsed_tokens)
if ')' not in parsed_tokens:
print("Unbalanced parentheses in %s!" % ''.join(parsed_tokens))
return False
else:
if parse_name(parsed_tokens[-1]):
# found a labelled internal node
return parse_internal(parsed_tokens[:-1])
else:
return False
else:
if ')' in parsed_tokens:
print("Unbalanced parentheses in %s!" % ''.join(parsed_tokens))
return False
# found a leaf
return parse_name(parsed_tokens[0])
except IndexError:
pass
# leaf --> name
# name --> empty | string
def parse_name(name):
# checking whether a string contains a space
if ' ' in name:
print("Error: space in %s." % name)
return False
# checking whether a string contains :
if ':' in name:
print("Error: colon in %s." % name)
return False
# checking whether a string contains (
if '(' in name or ')' in name:
print("Error: unbalanced parentheses in %s." % name)
return False
# checking whether a string contains ;
if ';' in name:
print("Error: semicolon in %s." % name)
return False
return True
# branchset --> branch | branch "," branchset
def parse_branchset(parsed_tokens):
comma = find_branch(parsed_tokens)
if comma is None:
# found a single branch
return parse_branch(parsed_tokens)
# found a branch and a branchset
else:
if parse_branch(parsed_tokens[0:comma]):
# successful parsing
return parse_branchset(parsed_tokens[comma + 1:])
else:
return False
# branch --> subtree length
def parse_branch(parsed_tokens):
# empty branch
if not parsed_tokens:
return True
# length is not empty
try:
if parsed_tokens[-2] == ':':
length_ok = parse_length(parsed_tokens[-1])
# label or subtree are not empty
if parsed_tokens[:-2]:
subtree_ok = parse_subtree(parsed_tokens[:-2])
return length_ok and subtree_ok
else:
return length_ok
except IndexError:
pass
# there is only a subtree
return parse_subtree(parsed_tokens)
# length --> empty | ":" number
def parse_length(number):
if is_number(number):
return True
print("%s is not a number." % number)
return False
# internal --> "(" branchset ")" name
def parse_internal(parsed_tokens):
if parsed_tokens[-1] != ')':
# name is not empty
name_ok = parse_name(parsed_tokens[-1])
if name_ok:
return parse_branchset(parsed_tokens[1:-2])
else:
return False
# controls on balanced parentheses already made
return parse_branchset(parsed_tokens[1:-1])
# first function performing the initial controls
def is_newick(tree):
# dividing the string into tokens, to check them singularly
tokens = split(r'([A-Za-z]+[^A-Za-z,)]+[A-Za-z]+|[0-9.]*[A-Za-z]+[0-9.]+|[0-9.]+\s+[0-9.]+|[0-9.]+|[A-za-z]+|\(|\)|;|:|,)', tree)
# removing spaces and empty strings (spaces within labels are still present)
parsed_tokens = list(filter(lambda x: not (x.isspace() or not x), tokens))
# checking whether the tree ends with ;
if parsed_tokens[-1] != ';':
print("Tree without ; at the end.")
return False
# first controls passed, calling the recursive function
else:
del parsed_tokens[-1]
return parse_tree(parsed_tokens)
|
#!/usr/bin/python
import sys
import gzip
import xlsxwriter
if len(sys.argv) != 3:
print ("input file required")
exit()
input_file_name = sys.argv[1]
output_file_name = sys.argv[2]
enum_req_r = [
'Control',
'Data',
'Request_Control',
'Reissue_Control',
'Response_Data',
'ResponseL2hit_Data',
'ResponseLocal_Data',
'Response_Control',
'Writeback_Data',
'Writeback_Control',
'Broadcast_Control',
'Multicast_Control',
'Forwarded_Control',
'Invalidate_Control',
'Unblock_Control',
'Persistent_Control',
'Completion_Control',
'SPECLD_Control',
'SPECLD_Request_Control',
'SPECLD_Data',
'EXPOSE_Control',
'EXPOSE_Request_Control',
'EXPOSE_Data',
]
req_rows = {
'Control' : 1,
'Data' : 2,
'Request_Control' : 3,
'Reissue_Control' : 4,
'Response_Data' : 5,
'ResponseL2hit_Data' : 6,
'ResponseLocal_Data' : 7,
'Response_Control' : 8,
'Writeback_Data' : 9,
'Writeback_Control' : 10,
'Broadcast_Control' : 11,
'Multicast_Control' : 12,
'Forwarded_Control' : 13,
'Invalidate_Control' : 14,
'Unblock_Control' : 15,
'Persistent_Control' : 16,
'Completion_Control' : 17,
'SPECLD_Control' : 18,
'SPECLD_Request_Control' : 19,
'SPECLD_Data' : 20,
'EXPOSE_Control' : 21,
'EXPOSE_Request_Control' : 22,
'EXPOSE_Data' : 23,
}
enum_req_c = [
"L1Cache",
"L2Cache",
"L3Cache",
"Directory",
"DMA",
"Collector",
"L1Cache_wCC",
"L2Cache_wCC",
"CorePair",
"TCP",
"TCC",
"TCCdir",
"SQC",
"RegionDir",
"RegionBuffer",
"NULL",
]
req_cols = {
"L1Cache": 1,
"L2Cache": 2,
"L3Cache": 3,
"Directory": 4,
"DMA": 5,
"Collector": 6,
"L1Cache_wCC": 7,
"L2Cache_wCC": 8,
"CorePair": 9,
"TCP": 10,
"TCC": 11,
"TCCdir": 12,
"SQC": 13,
"RegionDir": 14,
"RegionBuffer": 15,
"NULL": 16,
}
def process_line(msg):
print 'process msg: ' + msg
msg = " ".join(msg.split())
print 'msg: ' + msg
requestor = (msg.split(" ")[0]).split("::")[1]
data = int(msg.split(" ")[1])
idx_req = msg.find("message_size_type")
idx_del = msg.find("::")
type = int(msg[idx_req+22:idx_del])
print ("type: %s, fr: %s, data: %d" %(type, requestor, data))
return type, requestor, data
def update_dictionary(dic, enum, type, fr, data):
if !dic.has_key(enum[type]):
dic[enum[type]] = {}
print("update %s, %s" % (enum[type], fr))
dic[enum[type]][fr] = data
requests = {}
with open(input_file_name, 'rb') as f:
stat_cnt = 0
for line in f:
if line.find("sim_ticks") != -1:
stat_cnt += 1
if stat_cnt > 1:
break
if line.find("system.ruby.network.message_size_type_req") != -1:
if line.find('total') != -1:
continue
type, requestor, data = process_line(line)
update_dictionary(requests, enum_req_r, type, requestor, data)
print requests
response = {}
with open(input_file_name, 'rb') as f:
stat_cnt = 0
for line in f:
if line.find("sim_ticks") != -1:
stat_cnt += 1
if stat_cnt > 1:
break
if line.find("system.ruby.network.message_size_type_res") != -1:
if line.find('total') != -1:
continue
type, requestor, data = process_line(line)
update_dictionary(response, enum_req_r, type, requestor, data)
print response
# Create a workbook and add a worksheet.
workbook = xlsxwriter.Workbook(output_file_name)
ws = respons = workbook.add_worksheet()
ws.write(0,0, 'REQUEST')
row = 1
for req in enum_req_r:
ws.write(row, 0, req)
row += 1
col = 1
for requestor in enum_req_c:
ws.write(0, col, requestor)
col += 1
for event, req_cnt in requests.iteritems():
for req, num_req in req_cnt.iteritems():
#print event
#print req
#print num_req
ws.write(req_rows[event], req_cols[req], num_req)
ws.write(30,0, 'RESPONSE')
row = 1
for res in enum_req_r:
ws.write(row+30, 0, res)
row += 1
col = 1
for responsor in enum_req_c:
ws.write(0, col, responsor)
col += 1
for event, res_cnt in response.iteritems():
for res, num_res in res_cnt.iteritems():
#print event
#print req
#print num_req
ws.write(req_rows[event]+30, req_cols[res], num_res)
workbook.close()
|
# -*- coding: utf-8 -*-
import unittest
from roman_numerals import RomanNumerals
class TestClass(unittest.TestCase):
def test_case_3(self):
roman_numerals = RomanNumerals()
roman_number = roman_numerals.to_roman('3')
self.assertEqual(roman_number, 'III')
def test_case_4(self):
roman_numerals = RomanNumerals()
roman_number = roman_numerals.to_roman('4')
self.assertEqual(roman_number, 'IV')
def test_case_5(self):
roman_numerals = RomanNumerals()
roman_number = roman_numerals.to_roman('5')
self.assertEqual(roman_number, 'V')
def test_case_1999(self):
roman_numerals = RomanNumerals()
roman_number = roman_numerals.to_roman('1999')
self.assertEqual(roman_number, 'MCMXCIX')
def test_case_unit_4(self):
roman_numerals = RomanNumerals()
roman_number = roman_numerals.to_roman_factor('4', 0)
self.assertEqual(roman_number, 'IV')
def test_case_unit_7(self):
roman_numerals = RomanNumerals()
roman_number = roman_numerals.to_roman_factor('7', 0)
self.assertEqual(roman_number, 'VII')
def test_case_tens_7(self):
roman_numerals = RomanNumerals()
roman_number = roman_numerals.to_roman_factor('7', 1)
self.assertEqual(roman_number, 'LXX')
if __name__ == "__main__":
unittest.main()
|
"""
API MAPPING FOR Slack API V1
"""
mapping_table = {
'content_type': 'application/x-www-form-urlencoded',
'path_prefix': '/api',
'api_test': {
'path': '/api.test',
'valid_params': ['format', 'auth_token']
},
'channels_history': {
'path': '/channels.history',
'valid_params': ['channel','latest','oldest','inclusive','count']
},
'channels_info': {
'path': '/channels.info',
'valid_params': ['channel']
},
'channels_list': {
'path': '/channels.list',
'valid_params': ['exclude_archived']
},
'channels_set_topic': {
'path': '/channels.setTopic',
'valid_params': ['channel','topic']
},
'groups_history': {
'path': '/groups.history',
'valid_params': ['channel','latest','oldest','inclusive','count']
},
'groups_list': {
'path': '/groups.list',
'valid_params': ['exclude_archived']
},
'chat_post_message': {
'path': '/chat.postMessage',
'valid_params': ['channel','text','username','as_user','parse','link_names','attachments','unfurl_links','unfurl_media','username','as_user','icon_url','icon_emoji','thread_ts','reply_broadcast']
},
'chat_delete': {
'path': '/chat.delete',
'valid_params': ['channel','ts']
},
'conversations_history': {
'path': '/conversations.history',
'valid_params': ['channel','latest','oldest','inclusive','limit','cursor']
},
'conversations_info': {
'path': '/conversations.info',
'valid_params': ['channel','include_locale','include_num_members']
},
'conversations_list': {
'path': '/conversations.list',
'valid_params': ['exclude_archived','types','limit','cursor']
},
'conversations_members': {
'path': '/conversations.members',
'valid_params': ['channel','limit','cursor']
},
'conversations_open': {
'path': '/conversations.open',
'valid_params': ['channel','return_im','users']
},
'conversations_set_topic': {
'path': '/conversations.setTopic',
'valid_params': ['channel','topic']
},
'files_delete': {
'path': '/files.delete',
'valid_params': ['file']
},
'files_info': {
'path': '/files.info',
'valid_params': ['file','count','page']
},
'files_list': {
'path': '/files.list',
'valid_params': ['user','channel','ts_from','ts_to','types','count','page']
},
'files_revoke_public_url': {
'path': '/files.revokePublicURL',
'valid_params': ['file']
},
'files_shared_public_url': {
'path': '/files.sharedPublicURL',
'valid_params': ['file']
},
'files_revoke_public_url': {
'path': '/files.revokePublicURL',
'valid_params': ['file']
},
'files_upload': {
'path': '/files.upload',
'valid_params': ['file','content','filetype','filename','title','initial_comment','channels']
},
'emoji_list': {
'path': '/emoji.list'
},
'im_list': {
'path': '/im.list'
},
'im_open': {
'path': '/im.open',
'valid_params': ['user']
},
'mpim_list': {
'path': '/mpim.list'
},
'mpim_open': {
'path': '/mpim.open',
'valid_params': ['users']
},
'pins_add': {
'path': '/pins.add',
'valid_params': ['channel','file','file_comment','timestamp']
},
'pins_list': {
'path': '/pins.list',
'valid_params': ['channel']
},
'pins_remove': {
'path': '/pins.remove',
'valid_params': ['channel','file','file_comment','timestamp']
},
'reactions_add': {
'path': '/reactions.add',
'valid_params': ['name','file','file_comment','channel','timestamp']
},
'reactions_get': {
'path': '/reactions.get',
'valid_params': ['file','file_comment','channel','timestamp','full']
},
'reactions_list': {
'path': '/reactions.list',
'valid_params': ['user','full','count','page']
},
'reactions_remove': {
'path': '/reactions.remove',
'valid_params': ['name','file','file_comment','channel','timestamp']
},
'search_messages': {
'path': '/search.messages',
'valid_params': ['query','sort','sort_dir','highlight','count','page']
},
'users_list': {
'path': '/users.list',
'valid_params': ['presence']
},
}
|
# -*- coding: utf-8 -*-
import collections
# PyTorch
import torch.nn as nn
###############################################################################
_ACTIVATIONS = {
"leakyrelu": nn.LeakyReLU,
"relu": nn.ReLU,
"sigmoid": nn.Sigmoid,
"tanh": nn.Tanh
}
def get_activation_layer(name):
"""Get an activation layer given its name.
:param name: Name of the activation layer, valid values are: leakyrelu,
relu, sigmoid and tanh.
"""
try:
return _ACTIVATIONS[name]()
except KeyError:
msg = "invalid layer '{}', valid options are: {}"
raise ValueError(
msg.format(name, ", ".join(sorted(_ACTIVATIONS.keys()))))
def soft_update(source, target, tau):
"""Moves the target network values slightly to the values of source."""
for param, target_param in zip(source.parameters(), target.parameters()):
target_param.data.mul_(1.0 - tau).add_(param.data * tau)
def create_mlp(input_size, output_size, hidden_layers,
layer_norm=False, activation="relu", last_activation=None):
"""Creates a multi-layer perceptron network."""
layers_sizes = [input_size]
layers_sizes.extend(hidden_layers)
if hidden_layers:
layers_sizes.append(hidden_layers[-1])
layers_sizes.append(output_size)
layers = []
for i in range(len(layers_sizes) - 1):
layers.append(("linear{}".format(i),
nn.Linear(layers_sizes[i],
layers_sizes[i + 1])))
if i < len(layers_sizes) - 2:
if layer_norm:
layers.append(("layer_norm{}".format(i),
nn.LayerNorm(layers_sizes[i])))
layers.append(("{}{}".format(activation, i),
get_activation_layer(activation)))
elif last_activation is not None:
layers.append(("{}{}".format(last_activation, i),
get_activation_layer(last_activation)))
return nn.Sequential(collections.OrderedDict(layers))
|
#!/usr/bin/env python
# pip install --user tmdbsimple
import argparse
import os
import tmdbsimple as tmdb
APIKEY_FILE = os.path.join(os.getenv("HOME"), "Dropbox", "env", "tmdb", "APIKEY")
with open(APIKEY_FILE,'r') as infile:
tmdb.API_KEY = infile.read()
print(tmdb.API_KEY)
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--search", help="search for movie")
#parser.add_argument()
#parser.add_argument()
args = parser.parse_args()
search = tmdb.Search()
response = search.movie(query=args.search)
for movie in search.results:
print(dir(movie))
print(movie)
#import rtsimple as rt
#import argparse
#import os
##tmdb_api_file = os.path.join(os.getenv("HOME"), "Dropbox", "env", "tmdb", "APIKEY")
#rt_api_file = os.path.join(os.getenv("HOME"), "Dropbox", "env", "rotten_tomatoes", "APIKEY")
#with open(rt_api_file,'r') as infile:
#rt.API_KEY = infile.read()
##print(rt.API_KEY)
#parser = argparse.ArgumentParser()
#parser.add_argument("-s", "--search", help="search for movie")
##parser.add_argument()
##parser.add_argument()
#args = parser.parse_args()
#movie = rt.Movies()
#response = movie.search(q=args.search)
#for movie in movie.movies:
#print(dir(movie))
#print(movie)
|
import discord
from discord.ext import commands, tasks
from dotenv import load_dotenv
import os
import requests
import json
from datetime import datetime
import math
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
bot = commands.Bot(command_prefix='czqo?')
vatsimData = []
@bot.event
async def on_ready():
print("Starting up")
activity = discord.Activity(
name="the HF radio", type=discord.ActivityType.listening)
await bot.change_presence(activity=activity)
channel = bot.get_channel(482860026831175690)
await channel.send('Hello! Starting...')
notify_atc.start()
@bot.command(hidden=True)
@commands.has_permissions(administrator=True)
async def send_rules_resources(ctx):
embed1 = discord.Embed(title="**Rules**", colour=discord.Colour(0x80c9))
embed1.add_field(inline=False, name="Discord Terms of Service & Community Guidelines",
value="All members must follow Discord's Community Guidelines and Terms of Service at all times.\nToS — https://discordapp.com/terms\nGuidelines — https://discordapp.com/guidelines")
embed1.add_field(inline=False, name="VATSIM Code of Conduct Applies",
value="The VATSIM CoC applies at all times on all Gander Oceanic communication platforms.\nhttps://www.vatsim.net/documents/code-of-conduct")
embed1.add_field(inline=False, name="Always show respect and common decency to fellow members",
value="We aim to be a community where all people can feel they belong. Therefore, we ask that you show respect to each other at all times. This ties into Article A1 of the Code of Conduct.")
embed1.add_field(inline=False, name="Server invites", value="We ask that you not send invites to Discord servers unrelated to VATSIM with the permission of a staff member. Invites to servers related to the VATSIM community are permitted. Do not send **ANY** invites via DMs unless the person requests it.")
embed1.add_field(inline=False, name="Spam, including images, text, or emotes.",
value="Do not send spam in the server, including images, text, or emotes. ")
embed1.add_field(inline=False, name="Enforcement",
value="These rules are enforced by the <@&482816721280040964> of Gander Oceanic. If required, we may refer behaviour to a Network Supervisor.")
await ctx.send(embed=embed1)
embed2 = discord.Embed(title="**Resources**",
colour=discord.Colour(0x80c9))
embed2.add_field(inline=False, name="Pilot Tools",
value="Check out pilot tools including oceanic clearance/position report assists, natTRAK, and a map of current NAT tracks here:\nhttps://ganderoceanic.com/pilots/")
embed2.add_field(inline=False, name="Policies",
value="Find all Gander Oceanic policies here, including currency policies for controllers:\nhttps://ganderoceanic.com/policies")
embed2.add_field(inline=False, name="Feedback",
value="Have some feedback on how we're doing? Let us know here:\nhttps://vats.im/czqofeedback")
await ctx.send(embed=embed2)
embed3 = discord.Embed(
title="**Controller ATIS Template**", colour=discord.Colour(0x80c9))
embed3.add_field(inline=False, name="2.1.5 of the Controller Policy", value="```\nLINE 2: Gander (or Shanwick) Radio\nLINE 3: Pilot resources: vats.im/czqotools\nLINE 4: Have some feedback? We’d love to hear it! vats.im/czqofeedback\n\nA controller may choose to add additional information on line 2, to the right of the callsign.\nExample: “Gander Radio | Welcome to the OCA!”\n```")
await ctx.send(embed=embed3)
embed4 = discord.Embed(
title="**Approved Frequencies and Logon Positions**", colour=discord.Colour(0x80c9))
embed4.set_image(
url="https://cdn.discordapp.com/attachments/681071305394749458/752813169470341140/unknown.png")
await ctx.send(embed=embed4)
@bot.command(hidden=True)
@commands.has_permissions(administrator=True)
async def send_tim_hortons_msg(ctx):
await ctx.send("Hey stranger! If you can see this message, then you haven't yet linked your discord account with Gander Oceanic. You can do so on your myCZQO :grin:: https://ganderoceanic.ca/my")
@bot.command(hidden=True)
@commands.has_permissions(administrator=True)
async def admin_commands(ctx):
embed = discord.Embed(title="**Admin Commands**",
colour=discord.Colour(0x80c9))
embed.add_field(inline=False, name="czqo?send_rules_resources",
value="Posts rules+resources embeds")
embed.add_field(inline=False, name="czqo?send_tim_hortons_msg",
value="Sends #tim-hortons explanation message")
await ctx.send(embed=embed)
@bot.command()
async def solocerts(ctx):
waiting = await ctx.send("Working on it...")
endpoint = os.getenv("SOLO_CERTS_ENDPOINT")
response = requests.get(endpoint)
if len(response.json()) == 0:
await waiting.delete()
await ctx.send("No solo certifications active")
return
embed = discord.Embed(title="**Solo Certifications**",
colour=discord.Colour(0x80c9))
for cert in response.json():
embed.add_field(inline=False, name="**{0}**".format(
cert['roster_member']['cid']), value="Expires {0}".format(cert['expires']))
await waiting.delete()
await ctx.send(embed=embed)
@bot.command()
async def ping(ctx):
now = datetime.utcnow()
nowTime = datetime.timestamp(now)
reqTime = datetime.timestamp(ctx.message.created_at)
ping = math.floor(nowTime - reqTime) * 1000
await ctx.send("🏓Ping, pong! {}ms".format(ping))
@tasks.loop(seconds=300)
async def notify_atc():
channel_id = os.getenv("ATC_ONLINE_CHANNEL")
dataFeedUrl = os.getenv("DATA_FEED_URL")
channel = bot.get_channel(int(channel_id))
newVatsimData = requests.get(dataFeedUrl)
newControllerList = []
for controller in newVatsimData.json()['controllers']:
if (controller['callsign'].startswith('CZQO_') or controller['callsign'].startswith('EGGX_') or controller['callsign'].startswith('NAT_')) and (controller['callsign'].endswith('_CTR') or controller['callsign'].endswith('_DEL') or controller['callsign'].endswith('_FSS')):
newControllerList.append(controller['callsign'])
for controller in newControllerList:
if controller not in vatsimData:
full_controller_data = [c for c in newVatsimData.json(
)['controllers'] if c['callsign'] == controller]
embed = discord.Embed(title="An oceanic controller went online!",
colour=discord.Colour(0x80c9))
embed.add_field(inline=False, name="{} is now online!".format(controller),
value="{0} came online at **{1}** and will be providing service on **{2}**".format(full_controller_data[0]['name'], datetime.utcnow().strftime('%d.%m.%Y %H:%M z'), full_controller_data[0]['frequency']))
await channel.send(embed=embed)
for controller in vatsimData:
if controller not in newControllerList:
embed = discord.Embed(title="An oceanic controller went offline!",
colour=discord.Colour(0x80c9))
embed.add_field(inline=False, name="{} is now offline!".format(controller),
value="They went offline at **{0}**".format(datetime.utcnow().strftime('%d.%m.%Y %H:%M z')))
await channel.send(embed=embed)
vatsimData.clear()
for controller in newControllerList:
vatsimData.append(controller)
bot.run(TOKEN)
|
from typing import TYPE_CHECKING
from . import Base
from .user import HasUser
if TYPE_CHECKING:
from sqlalchemy import Column # noqa
from sqlalchemy import Integer # noqa
from sqlalchemy.orm import RelationshipProperty # noqa
from .user import User # noqa
class Address(Base, HasUser):
pass
|
"""The configuration object
"""
import logging
import os
from copy import deepcopy
from typing import List
from typing import Tuple
from typing import Union
from ..utils.functions import ExitMessage
from ..utils.functions import ExitPrefix
from ..utils.functions import LogMessage
from ..utils.functions import oxfordcomma
from ..utils.functions import shlex_join
from ..utils.serialize import SafeLoader
from ..utils.serialize import yaml
from .definitions import ApplicationConfiguration
from .definitions import Constants as C
from .parser import Parser
class Configurator:
"""the configuration class"""
def __init__(
self,
params: List[str],
application_configuration: ApplicationConfiguration,
apply_previous_cli_entries: Union[List, C] = C.NONE,
initial: bool = False,
):
"""
:param params: A list of parameters e.g. ['-x', 'value']
:param application_configuration: An application specific Config object
:param apply_previous_cli_entries: Apply previous USER_CLI values where the current value
is not a USER_CLI sourced value, a list of entry names
['all'] will apply all previous
:param initial: Save the resulting configuration as the 'initial' configuration
The 'initial' will be used as a source for apply_previous_cli
"""
self._apply_previous_cli_entries = apply_previous_cli_entries
self._config = application_configuration
self._exit_messages: List[ExitMessage] = []
self._messages: List[LogMessage] = []
self._params = params
self._initial = initial
self._sanity_check()
self._unaltered_entries = deepcopy(self._config.entries)
def _sanity_check(self) -> None:
if self._apply_previous_cli_entries is not C.NONE:
if self._initial is True:
raise ValueError("'apply_previous_cli' cannot be used with 'initial'")
if self._config.initial is None:
raise ValueError("'apply_previous_cli' enabled prior to 'initial'")
def _roll_back(self) -> None:
"""In the case of a rollback, log the configuration state
prior to roll back
"""
message = "Configuration errors encountered, rolling back to previous configuration."
self._messages.append(LogMessage(level=logging.WARNING, message=message))
for entry in self._config.entries:
message = f"Prior to rollback: {entry.name} = '{entry.value.current}'"
message += f" ({type(entry.value.current).__name__}/{entry.value.source.value})"
self._messages.append(LogMessage(level=logging.DEBUG, message=message))
self._config.entries = self._unaltered_entries
for entry in self._config.entries:
message = f"After rollback: {entry.name} = '{entry.value.current}'"
message += f" ({type(entry.value.current).__name__}/{entry.value.source.value})"
self._messages.append(LogMessage(level=logging.DEBUG, message=message))
message = "Configuration rollback complete."
self._messages.append(LogMessage(level=logging.DEBUG, message=message))
def configure(self) -> Tuple[List[LogMessage], List[ExitMessage]]:
"""Perform the configuration
save the original entries, if an error is encountered
restore them
"""
self._config.original_command = self._params
shlex_joined = shlex_join(self._config.original_command)
cmd_message = f"Command provided: '{shlex_joined}'"
self._messages.append(LogMessage(level=logging.DEBUG, message=cmd_message))
self._restore_original()
self._apply_defaults()
self._apply_settings_file()
self._apply_environment_variables()
self._apply_cli_params()
if self._exit_messages:
self._exit_messages.insert(0, ExitMessage(message=cmd_message))
self._roll_back()
return self._messages, self._exit_messages
self._apply_previous_cli_to_current()
self._post_process()
self._check_choices()
if self._exit_messages:
self._exit_messages.insert(0, ExitMessage(message=cmd_message))
self._roll_back()
return self._messages, self._exit_messages
if self._initial:
self._config.initial = deepcopy(self._config)
return self._messages, self._exit_messages
def _argparse_error_handler(self, message: str):
"""callback for argparse error handling to prevent sys.exit
:param message: A message from the parser
:type message: str
"""
self._exit_messages.append(ExitMessage(message=message))
def _restore_original(self) -> None:
"""Since we always operate on the same object
restore the current values back to NOT_SET
"""
for entry in self._config.entries:
if self._initial or entry.change_after_initial:
entry.value.current = C.NOT_SET
entry.value.source = C.NOT_SET
else:
message = f"'{entry.name}' cannot be reconfigured. (restore original)"
self._messages.append(LogMessage(level=logging.INFO, message=message))
def _apply_defaults(self) -> None:
for entry in self._config.entries:
if self._initial or entry.change_after_initial:
if entry.value.default is not C.NOT_SET:
entry.value.current = entry.value.default
entry.value.source = C.DEFAULT_CFG
else:
message = f"'{entry.name}' cannot be reconfigured. (apply defaults)"
self._messages.append(LogMessage(level=logging.INFO, message=message))
def _apply_settings_file(self) -> None:
settings_filesystem_path = self._config.internals.settings_file_path
if isinstance(settings_filesystem_path, str):
with open(settings_filesystem_path, "r", encoding="utf-8") as fh:
try:
config = yaml.load(fh, Loader=SafeLoader)
except (yaml.scanner.ScannerError, yaml.parser.ParserError) as exc:
exit_msg = (
f"Settings file found {settings_filesystem_path}, but failed to load it."
)
self._exit_messages.append(ExitMessage(message=exit_msg))
exit_msg = f" error was: '{' '.join(str(exc).splitlines())}'"
self._exit_messages.append(ExitMessage(message=exit_msg))
exit_msg = (
f"Try checking the settings file '{settings_filesystem_path}'"
"and ensure it is properly formatted"
)
self._exit_messages.append(
ExitMessage(message=exit_msg, prefix=ExitPrefix.HINT),
)
return
for entry in self._config.entries:
settings_file_path = entry.settings_file_path(self._config.application_name)
path_parts = settings_file_path.split(".")
data = config
try:
for key in path_parts:
data = data[key]
if self._initial or entry.change_after_initial:
entry.value.current = data
entry.value.source = C.USER_CFG
else:
message = f"'{entry.name}' cannot be reconfigured. (settings file)"
self._messages.append(LogMessage(level=logging.INFO, message=message))
except TypeError as exc:
exit_msg = (
"Errors encountered when loading settings file:"
f" {settings_filesystem_path}"
f" while loading entry {entry.name}, attempted: {settings_file_path}."
f"The resulting error was {str(exc)}"
)
self._exit_messages.append(ExitMessage(message=exit_msg))
exit_msg = (
f"Try checking the settings file '{settings_filesystem_path}'"
"and ensure it is properly formatted"
)
self._exit_messages.append(
ExitMessage(message=exit_msg, prefix=ExitPrefix.HINT),
)
return
except KeyError:
message = f"{settings_file_path} not found in settings file"
self._messages.append(LogMessage(level=logging.DEBUG, message=message))
def _apply_environment_variables(self) -> None:
for entry in self._config.entries:
set_env_var = os.environ.get(entry.environment_variable(self._config.application_name))
if set_env_var is not None:
if self._initial or entry.change_after_initial:
if entry.cli_parameters is not None and entry.cli_parameters.nargs == "+":
entry.value.current = set_env_var.split(",")
else:
entry.value.current = set_env_var
entry.value.source = C.ENVIRONMENT_VARIABLE
else:
message = f"'{entry.name}' cannot be reconfigured. (environment variables)"
self._messages.append(LogMessage(level=logging.INFO, message=message))
def _apply_cli_params(self) -> None:
parser = Parser(self._config).parser
setattr(parser, "error", self._argparse_error_handler)
parser_response = parser.parse_known_args(self._params)
if parser_response is None:
return
args, cmdline = parser_response
if cmdline:
self._config.entry("cmdline").value.current = cmdline
self._config.entry("cmdline").value.source = C.USER_CLI
for param, value in vars(args).items():
if self._config.entry(param).subcommand_value is True and value is None:
continue
entry = self._config.entry(param)
if self._initial or entry.change_after_initial:
entry.value.current = value
entry.value.source = C.USER_CLI
else:
message = f"'{entry.name}' cannot be reconfigured. (cli params)"
self._messages.append(LogMessage(level=logging.INFO, message=message))
def _post_process(self) -> None:
delayed = []
normal = []
# Separate normal and delayed entries so they can be processed in that order.
for entry in self._config.entries:
if entry.delay_post_process:
delayed.append(entry)
else:
normal.append(entry)
for entry in normal + delayed:
if self._initial or entry.change_after_initial:
processor = getattr(self._config.post_processor, entry.name, None)
if callable(processor):
messages, errors = processor(entry=entry, config=self._config)
self._messages.extend(messages)
self._exit_messages.extend(errors)
else:
message = f"'{entry.name}' cannot be reconfigured. (post process)"
self._messages.append(LogMessage(level=logging.INFO, message=message))
def _check_choices(self) -> None:
for entry in self._config.entries:
if entry.cli_parameters and entry.choices:
if entry.value.current not in entry.choices:
self._exit_messages.append(ExitMessage(message=entry.invalid_choice))
choices = [
f"{entry.cli_parameters.short} {str(choice).lower()}"
for choice in entry.choices
]
exit_msg = f"Try again with {oxfordcomma(choices, 'or')}"
self._exit_messages.append(
ExitMessage(message=exit_msg, prefix=ExitPrefix.HINT),
)
def _apply_previous_cli_to_current(self) -> None:
"""Apply eligible previous CLI values to current not set by the CLI"""
# _apply_previous_cli_entries must be ALL or a list of entries
if self._apply_previous_cli_entries is not C.ALL and not isinstance(
self._apply_previous_cli_entries,
list,
):
return
current_subcommand = [
entry.value.current for entry in self._config.entries if entry.subcommand_value is True
][0]
previous_subcommand = [
entry.value.current
for entry in self._config.initial.entries
if entry.subcommand_value is True
][0]
for current_entry in self._config.entries:
# retrieve the corresponding previous entry
previous_entry = self._config.initial.entry(current_entry.name)
# skip if not initial and not able to be changed
if not any((self._initial, current_entry.change_after_initial)):
message = f"'{current_entry.name}' cannot be reconfigured (apply previous cli)"
self._messages.append(LogMessage(level=logging.INFO, message=message))
continue
# skip if currently set from the CLI
if current_entry.value.source is C.USER_CLI:
continue
# skip if _apply_previous_cli_entries is a list and the entry isn't in it
if (
isinstance(self._apply_previous_cli_entries, list)
and current_entry.name not in self._apply_previous_cli_entries
):
continue
# skip if the previous entry not eligible for reapplication
if previous_entry.apply_to_subsequent_cli not in [C.ALL, C.SAME_SUBCOMMAND]:
continue
# skip if the same subcommand is required for reapplication
if current_entry.apply_to_subsequent_cli is C.SAME_SUBCOMMAND:
if current_subcommand != previous_subcommand:
continue
# skip if the previous entry was not set by the CLI
if previous_entry.value.source is not C.USER_CLI:
continue
current_entry.value.current = previous_entry.value.current
current_entry.value.source = C.PREVIOUS_CLI
|
# Generated by Django 2.0.13 on 2020-04-14 16:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
]
|
from sqlalchemy.orm import sessionmaker
from models import Forecasts, db_connect, create_forecast_table
import logging
class PollenScraperPipeline(object):
def __init__(self):
engine = db_connect()
create_forecast_table(engine)
self.Session = sessionmaker(bind=engine)
def process_item(self, item, spider):
session = self.Session()
forecast = Forecasts(**item)
try:
session.add(forecast)
session.commit()
except:
session.rollback()
raise
finally:
session.close()
return item
|
from facenet_pytorch import MTCNN, InceptionResnetV1
import torch
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
while(cap.isOpened()):
ret, frame = cap.read()
if ret==True:
frame = cv2.flip(frame,0)
# write the flipped frame
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
# Release everything if job is finished
cap.release()
out.release()
cv2.destroyAllWindows()
quit()
mtcnn = MTCNN(image_size=160, keep_all=True)
resnet = InceptionResnetV1(pretrained='vggface2').eval()
aligned = []
ids = []
link = []
x_aligned, prob = mtcnn("<photo>", save_path=None, return_prob=True)
if len(prob) == 1 and prob[0] is not None:
# Уверенность 99% и выше
if prob[0] > 0.99:
aligned.append(x_aligned[0])
ids.append(id)
link.append(current_link)
elif x_aligned is not None:
for k in range(len(prob)):
# Уверенность 99% и выше
if prob[k] >= 0.99:
aligned.append(x_aligned[k])
ids.append(id)
link.append(current_link)
aligned = torch.stack(aligned).to(device)
embeddings = resnet(aligned).detach().cpu().tolist() # numpy()
dists = [[norm(e1 - e2) for e2 in embeddings] for e1 in embeddings]
print(pd.DataFrame(dists, columns=ids, index=ids))
|
#!/usr/local/bin/python3
import re, sys
HEADER_FILE = 'dss_capi/dss_capi.h'
_last_comment = ''
_is_in_comment = False
def decode_func(line):
global _last_comment, _is_in_comment
if '/*' in line:
_is_in_comment = True
_last_comment = line.replace('/*', '') + ' '
elif '*/' in line:
_is_in_comment = False
_last_comment += line.replace('*/', '') + ' '
elif _is_in_comment:
_last_comment += line + ' '
r = re.findall('^(\S+) (\S+)\((.+)\);$', line)
if len(r) > 0:
r = r[0]
# Get return argument
c_return = r[0]
# Get the function name
c_function = r[1]
# Get the input arguments
c_arguments = r[2]
c_num_arguments = len(c_arguments.split(','))
# Make it a CamelCase function name for Swift
swift_function = list(filter(lambda x: len(x) > 0, c_function.split('_')))
swift_function = ''.join([l[0].upper() + l[1:] for l in swift_function])
swift_function = swift_function.replace('DSS', '')
func_def = '\n/**\n'
# First add documentation (if there is any)
if len(_last_comment) > 0:
func_def += _last_comment.rstrip().lstrip() + '.\n'
_last_comment = ''
func_def += 'The C function called is: ```' + c_return + ' ' + c_function + '(' + c_arguments + ');```\n*/\n'
if c_return == 'void': # When not returning anything
if c_num_arguments == 1: # When only accepting 1 argument
if c_arguments == 'void': # When there is no argument
func_def += 'func ' + swift_function + '() {\n' + \
c_function + '()\n}\n'
else:
r = re.findall('^(\S+) (\S+)$', c_arguments)
if len(r) > 0:
r = r[0]
ptr_depth = len(list(filter(lambda x: x == '*', r[1])))
if ptr_depth == 0:
if r[0] == 'char*': # When there is a String
func_def += 'func ' + swift_function + '(_ value: String) {\n' + \
c_function + '(DSS.getPointer(to: value))\n}\n'
elif r[0] == 'double': # When there is a Double
func_def += 'func ' + swift_function + '(_ value: Double) {\n' + \
c_function + '(value)\n}\n'
elif r[0] == 'uint16_t': # When there is a UInt16
func_def += 'func ' + swift_function + '(_ value: Int) {\n' + \
c_function + '(UInt16(value))\n}\n'
elif r[0] == 'int32_t': # When there is a Int32
func_def += 'func ' + swift_function + '(_ value: Int) {\n' + \
c_function + '(Int32(value))\n}\n'
elif c_num_arguments == 2: # When accepting 2 arguments
r = re.findall('^(\S+).+, (\S+).+$', c_arguments)
if len(r) > 0:
r = r[0]
if r[0] == 'char***' and r[1] == 'int32_t*':
func_def += 'func ' + swift_function + '() -> [String] {\n' + \
'return DSS.getStringArray(' + c_function + ')\n}\n'
elif r[0] == 'double**' and r[1] == 'int32_t*':
func_def += 'func ' + swift_function + '() -> [Double] {\n' + \
'return DSS.getDoubleArray(' + c_function + ')\n}\n'
elif r[0] == 'int32_t**'and r[1] == 'int32_t*':
func_def += 'func ' + swift_function + '() -> [Int] {\n' + \
'return DSS.getIntArray(' + c_function + ')\n}\n'
elif r[0] == 'int8_t**' and r[1] == 'int32_t*':
func_def += 'func ' + swift_function + '() -> [Int] {\n' + \
'return DSS.getByteArray(' + c_function + ')\n}\n'
elif c_num_arguments == 3: # When accepting 3 c_arguments
r = re.findall('^(\S+).+, (\S+).+, (\S+).+$', c_arguments)
if len(r) > 0:
r = r[0]
if r[0] == 'double**' and r[1] == 'int32_t*' and r[2] == 'int32_t':
func_def += 'func ' + swift_function + '(_ value: Int) -> [Double] {\n' + \
'return DSS.getDoublePhaseArray(' + c_function + ', Int32(value))\n}\n'
elif c_return == 'int32_t' or c_return == 'uint16_t': # When returning an Int
if c_num_arguments == 1: # When only accepting 1 argument
if c_arguments == 'void': # When there is no argument
func_def += 'func ' + swift_function + '() -> Int {\n' + \
'return Int(' + c_function + '())\n}\n'
else:
r = re.findall('^(\S+) (\S+)$', c_arguments)
if len(r) > 0:
r = r[0]
ptr_depth = len(list(filter(lambda x: x == '*', r[1])))
if ptr_depth == 0:
if r[0] == 'int32_t': # When there is an Int
func_def += 'func ' + swift_function + '(_ value: Int) -> Int {\n' + \
'return Int(' + c_function + '(Int32(value)))\n}\n'
elif c_return == 'char*': # When returning a String?
if c_num_arguments == 1: # When only accepting 1 argument
if c_arguments == 'void': # When there is no argument
func_def += 'func ' + swift_function + '() -> String? {\n' + \
'return DSS.getString(from: ' + c_function + ')\n}\n'
else:
r = re.findall('^(\S+) (\S+)$', c_arguments)
if len(r) > 0:
r = r[0]
ptr_depth = len(list(filter(lambda x: x == '*', r[1])))
if ptr_depth == 0:
if r[0] == 'int32_t': # When there is an Int
func_def += 'func ' + swift_function + '(_ value: Int) -> String? {\n' + \
'return DSS.getString(from: ' + c_function + ', for: value)\n}\n'
elif c_return == 'double': # When returning a Double
if c_num_arguments == 1: # When only accepting 1 argument
if c_arguments == 'void': # When there is no argument
func_def += 'func ' + swift_function + '() -> Double {\n' + \
'return ' + c_function + '()\n}\n'
func_def = func_def.split('\n')
is_inside = False
for i in range(len(func_def)):
if '}' in func_def[i]:
is_inside = False
if is_inside:
func_def[i] = ' ' + func_def[i]
continue
elif '{' in func_def[i]:
is_inside = True
func_def[i] = ' ' + func_def[i]
func_def = '\n'.join(func_def)
# Return function
return func_def
return ''
def main():
if len(sys.argv) != 3:
print('python3 convert.py path/to/c_header.h path/to/c_class.swift')
exit()
HEADER_FILE = sys.argv[1]
SWIFT_FILE = sys.argv[2]
print('Converting the dss c-header')
# Opens the header file
dss_file = open(HEADER_FILE, 'r')
# Loads content and breaks it into lines
dss_header = dss_file.read()
# Cleanup immediately -> close header file
dss_file.close()
# Remove DSS_CAPI_V7_DLL wherever it exists
dss_header = dss_header.replace('DSS_CAPI_V7_DLL', '')
# Make a list
dss_header_list = dss_header.split('\n')
# Remove all leading white spaces on all lines
dss_header_list = [l.lstrip() for l in dss_header_list]
# Remove all empty lines
dss_header_list = list(filter(lambda x: len(x) is not 0, dss_header_list))
# Remove all '#define', '#include' etc. and 'extern C {' thingies
dss_header_list = list(filter(lambda x: '#' not in x and 'extern' not in x, dss_header_list))
# Merge multi-line functions into one lines
i_func = 0
i_in_comment = False
i_in_func = False
for i in range(len(dss_header_list)):
if '*/' in dss_header_list[i]:
i_in_comment = False
continue
elif i_in_comment or '/*' in dss_header_list[i]:
i_in_comment = True
continue
if i_in_func:
if dss_header_list[i][-1] == ',':
dss_header_list[i_func] += dss_header_list[i] + ' '
else:
dss_header_list[i_func] += dss_header_list[i]
if ')' in dss_header_list[i]:
i_in_func = False
if i_in_func:
dss_header_list[i] = ''
if '(' in dss_header_list[i] and ')' not in dss_header_list[i]:
i_in_func = True
i_func = i
# Remove all empty lines after combining multi-line functions
dss_header_list = list(filter(lambda x: len(x) is not 0, dss_header_list))
# Open template file
swift_template_file = open('dss_template.txt', 'r')
# Load in the template
swift_template = swift_template_file.read()
# Cleanup immediately -> close template file
swift_template_file.close()
# Make template list -> [head, foot]
swift_template_list = swift_template.split('##########')
# Start writing to the output file
fout = open(SWIFT_FILE, 'w')
# Write the head first
fout.write(swift_template_list[0])
try:
[fout.write(decode_func(l)) for l in dss_header_list]
except Exception as e:
print(e.message())
# Write the foot last
fout.write(swift_template_list[1])
fout.close()
if __name__ == '__main__':
# Run main after lading the whole script
main()
|
from __future__ import absolute_import
from __future__ import print_function
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.losses import categorical_crossentropy
from src.keras.dataset import DataGenerator
from src.keras.model import _3d_cnn_model
if __name__ == "__main__":
num_classes = 10
input_shape = (20, 80, 40, 1)
epochs = 1
train_iter = DataGenerator('dummy_dir', 10)
model = _3d_cnn_model(input_shape, num_classes)
opt = Adam()
loss = categorical_crossentropy
model.compile(loss=loss,
optimizer=opt,
metrics=['accuracy'])
model.fit_generator(train_iter,
epochs=epochs,
callbacks=None,
)
|
import copy
from urllib import parse
import requests
from affilitest import endpoints
API_RESP_ERROR = 'API response error. Status code {} '
class AffiliTest(object):
def __init__(self, api_key=None):
self.api_key = api_key
def login(self, email, password):
return self._post(endpoints.LOGIN, {'email' : email, 'password' : password})
def logout(self):
return self._get(endpoints.LOGOUT)
def app_info(self, url = None, package = None, country = None):
if url is None and package is None:
raise APIException('No parameters were passed to appInfo', endpoints.APPINFO)
if url is not None and package is not None:
raise APIException('Only one parameter should be passed', endpoints.APPINFO)
if url is not None:
return self._app_info_fetch(url, 'url')
return self._app_info_fetch(package, 'package', country)
def _app_info_fetch(self, data, reqType, country = None):
payload = {}
payload[reqType] = data
if country:
payload['country'] = country
return self._get(endpoints.APPINFO, payload)['data']
def test(self, url, country, device, meta=False):
data = self._post(endpoints.TEST, {
'url' : url,
'country' : country,
'device' : device
})
if meta:
return data
return data['data']
def compare_to_preview(self, url, preview_url, country, device, meta=False):
data = self._post(endpoints.COMPARE, {
'url' : url,
'previewURL' : preview_url,
'country' : country,
'device' : device
})
if meta:
return data
return data['data']
def calls_left(self):
return self._get(endpoints.CALLS_LEFT)['data']
def clone(self):
api_clone = copy.deepcopy(self)
api_clone._request_session = requests.Session()
api_clone._request_session.cookies = self._request_session.cookies
return api_clone
def _post(self, endpoint, payload):
self._last_response = self.requests_session().post(
endpoint,
data = payload,
headers = self._auth_headers()
)
try:
res_data = self._last_response.json()
except Exception as e:
raise APIException(API_RESP_ERROR.format(self._last_response.status_code), endpoint)
if res_data['error']:
raise APIException(res_data['error'], endpoint)
return res_data
def _get(self, endpoint, payload=None):
url = endpoint
if payload is not None:
url = endpoint + '?' + parse.urlencode(payload)
self._last_response = self.requests_session().get(url, headers=self._auth_headers())
res_data = self._last_response.json()
if res_data['error']:
raise APIException(res_data['error'], endpoint)
return res_data
def _auth_headers(self):
if self.api_key:
return {'Authorization': 'AT-API ' + self.api_key}
return {}
def last_response(self):
return self._last_response
def requests_session(self):
if hasattr(self, '_request_session'):
return self._request_session
self._request_session = requests.Session()
return self._request_session
class APIException(Exception):
def __init__(self, error, endpoint):
super(APIException, self).__init__(error, endpoint)
self.endpoint = endpoint
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from torchdiffeq import odeint_adjoint as odeint
from exact_solver import solver
class MLP(nn.Module):
def __init__(self, hidden_size, y1=1.0):
super(MLP, self).__init__()
self.fc1 = nn.Linear(1, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, 1)
self.y1 = y1
def _f(self, x):
out = F.softplus(self.fc1(x))
out = F.softplus(self.fc2(out))
out = self.fc3(out)
return out.sum()
def forward(self, x):
'''
y(0) = 0
y(1) = y1
'''
f0 = self._f(torch.tensor([0.0]))
f1 = self._f(torch.tensor([1.0]))
return self._f(x) - (f0 + self.y1)*(1.0-x) - f1*x + self.y1
def value_and_grad(self, x):
y = self.forward(x)
return y, torch.autograd.grad(y, x, grad_outputs=torch.ones(x.shape[0]), create_graph=True)[0]
class Brachistochrone(nn.Module):
def __init__(self, g, v0, net):
super(Brachistochrone, self).__init__()
self.v0 = v0
self.g = g
self.net = net
def forward(self, x, t):
with torch.enable_grad():
y, dydx = self.net.value_and_grad(x.view(-1).detach().requires_grad_())
return torch.sqrt((1+dydx**2)/(2*self.g*y+ self.v0**2))
def plot(model,para):
plt.cla()
xlist = torch.linspace(0.0, 1.0, 21)
ylist = [model.net(torch.tensor([x])) for x in xlist]
plt.plot(xlist.numpy(), ylist, lw=2,label='learned curve')
plt.plot([0.0, 1.0], [0.0, model.net.y1], 'r*', ms=20)
plt.gca().invert_yaxis()
tlist = np.linspace(para[2],para[3],21)
xlist = para[0]*(tlist- np.sin(tlist)) - para[1]
ylist = para[0]*(1 - np.cos(tlist)) -para[4]
plt.plot(xlist,ylist,lw=2,label='exact')
plt.legend(loc='upper right')
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.draw()
plt.pause(0.01)
if __name__ == '__main__':
g = 10.0 #gravity
v0 = 1.0 #initial velocity
nh = 32 #number of hidden neurons
y1 = 1.0 #fininal y coordinate
para = np.append(solver(v0,g,y1),v0**2/(2*g)) # exact solution as a reference
tbest = (para[3]-para[2])*np.sqrt(para[0]/g)
model = Brachistochrone(g, v0, MLP(nh, y1))
optimizer = optim.Adam(model.parameters(), lr=1E-2)
import matplotlib.pyplot as plt
# Set up figure.
fig = plt.figure(figsize=(8,8), facecolor='white')
ax = fig.add_subplot(111, frameon=False)
plt.ion()
plt.show(block=False)
for epoch in range(100):
optimizer.zero_grad()
t = odeint(model, torch.tensor([0.0]), torch.tensor([0.0, 1.0]))
loss = t[1] - t[0]
loss.backward()
optimizer.step()
print ("step = %d"%epoch, "time = %.5f"%loss.item(), " (exact = %.5f)"%tbest)
plot(model,para)
|
#!/usr/bin/env python
"""
This script performs the first step of initializing the global ocean. This
includes:
1. combining Natural Earth land coverage north of 60S with Antarctic
ice coverage or grounded ice coverage from Bedmap2.
2. combining transects defining cricial passages.*
3. combining points used to seed a flood fill of the global ocean.
4. create masks from land coverage.
5. create masks from transects.*
6. cull cells based on land coverage but with transects present
7. create flood-fill mask based on seeds
8. cull cells based on flood-fill mask
9. create masks from transects on the final culled mesh*
* skipped if flag --with_critical_passages not present
Optionally, the -p flag provides the path to the geometric_features
repository, which is assumed to be the current directory by default.
Also, the optional --with_cavities flag indicates that ice-shelf cavities
are present and the grounded-ice mask from Bedmap2 should be used.
The optional --with_critical_passages flag indicates that critical
passages are to be opened. Otherwise, steps 2, 5 and 9 are skipped
"""
import os
import os.path
import subprocess
from optparse import OptionParser
def removeFile(fileName):
try:
os.remove(fileName)
except OSError:
pass
parser = OptionParser()
parser.add_option("--with_cavities", action="store_true", dest="with_cavities")
parser.add_option("--with_critical_passages", action="store_true",
dest="with_critical_passages")
parser.add_option("-p", "--geom_feat_path", type="string", dest="path",
default="geometric_features",
help="Path to the geometric_features repository.")
options, args = parser.parse_args()
path = options.path
landCoverage = '{}/natural_earth/region/Land_Coverage/' \
'region.geojson'.format(path)
landCoverageMask = '{}/ocean/region/Global_Ocean_90S_to_60S/' \
'region.geojson'.format(path)
removeFile('land_coverage.geojson')
# mask the land coverage to exclude the region below 60S
args = ['{}/difference_features.py'.format(path),
'-f', landCoverage,
'-m', landCoverageMask,
'-o', 'land_coverage.geojson']
print "running", ' '.join(args)
subprocess.check_call(args, env=os.environ.copy())
# add the appropriate land coverage below 60S (either all ice or grounded ice)
if options.with_cavities:
antarcticLandCoverage = '{}/bedmap2/region/AntarcticGroundedIceCoverage/' \
'region.geojson'.format(path)
else:
antarcticLandCoverage = '{}/bedmap2/region/AntarcticIceCoverage/' \
'region.geojson'.format(path)
args = ['{}/merge_features.py'.format(path), '-f', antarcticLandCoverage,
'-o', 'land_coverage.geojson']
print "running", ' '.join(args)
subprocess.check_call(args, env=os.environ.copy())
# create the land mask based on the land coverage
# Run command is:
# ./MpasMaskCreator.x base_mesh.nc land_mask.nc -f land_coverage.geojson
args = ['./MpasMaskCreator.x', 'base_mesh.nc', 'land_mask.nc',
'-f', 'land_coverage.geojson']
print "running", ' '.join(args)
subprocess.check_call(args, env=os.environ.copy())
# create seed points for a flood fill of the ocean
# use all points in the ocean directory, on the assumption that they are, in
# fact, in the ocean
removeFile('seed_points.geojson')
args = ['{}/merge_features.py'.format(path),
'-d', '{}/ocean/point'.format(path),
'-t', 'seed_point',
'-o', 'seed_points.geojson']
print "running", ' '.join(args)
subprocess.check_call(args, env=os.environ.copy())
if options.with_critical_passages:
# merge transects for critical passages into critical_passages.geojson
removeFile('critical_passages.geojson')
args = ['{}/merge_features.py'.format(path),
'-d', '{}/ocean/transect'.format(path),
'-t', 'Critical_Passage',
'-o', 'critical_passages.geojson']
print "running", ' '.join(args)
subprocess.check_call(args, env=os.environ.copy())
# create masks from the transects
# Run command is:
# ./MpasMaskCreator.x base_mesh.nc critical_passages_mask.nc
# -f critical_passages.geojson
args = ['./MpasMaskCreator.x', 'base_mesh.nc', 'critical_passages_mask.nc',
'-f', 'critical_passages.geojson']
print "running", ' '.join(args)
subprocess.check_call(args, env=os.environ.copy())
# cull the mesh based on the land mask and keeping critical passages open
# Run command is:
# ./MpasCellCuller.x base_mesh.nc culled_mesh.nc -m land_mask.nc
# -p critical_passages_mask.nc
args = ['./MpasCellCuller.x', 'base_mesh.nc', 'culled_mesh.nc',
'-m', 'land_mask.nc', '-p', 'critical_passages_mask.nc']
print "running", ' '.join(args)
subprocess.check_call(args, env=os.environ.copy())
else:
# cull the mesh based on the land mask
# Run command is:
# ./MpasCellCuller.x base_mesh.nc culled_mesh.nc -m land_mask.nc
args = ['./MpasCellCuller.x', 'base_mesh.nc', 'culled_mesh.nc',
'-m', 'land_mask.nc']
print "running", ' '.join(args)
subprocess.check_call(args, env=os.environ.copy())
# create a mask for the flood fill seed points
# Run command is:
# ./MpasMaskCreator.x culled_mesh.nc seed_mask.nc -s seed_points.geojson
args = ['./MpasMaskCreator.x', 'culled_mesh.nc', 'seed_mask.nc',
'-s', 'seed_points.geojson']
print "running", ' '.join(args)
subprocess.check_call(args, env=os.environ.copy())
# cull the mesh a second time using a flood fill from the seed points
# Run command is:
# ./MpasCellCuller.x culled_mesh.nc culled_mesh_final.nc -i seed_mask.nc
args = ['./MpasCellCuller.x', 'culled_mesh.nc', 'culled_mesh_final.nc',
'-i', 'seed_mask.nc']
print "running", ' '.join(args)
subprocess.check_call(args, env=os.environ.copy())
if options.with_critical_passages:
# make a new version of the critical passages mask on the culled mesh
# Run command is:
# ./MpasMaskCreator.x culled_mesh_final.nc critical_passages_mask_final.nc
# -f critical_passages.geojson
args = ['./MpasMaskCreator.x', 'culled_mesh_final.nc',
'critical_passages_mask_final.nc',
'-f', 'critical_passages.geojson']
print "running", ' '.join(args)
subprocess.check_call(args, env=os.environ.copy())
|
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
num1 = l1
num2 = l2
carry = 0
result = None
resultList = None
while num1.next != None or num2.next != None:
tempSum = num1.val + num2.val + carry
carry = tempSum // 10
if resultList == None:
resultList = ListNode(tempSum % 10)
result = resultList
else:
resultList.next = ListNode(tempSum % 10)
resultList = resultList.next
num1 = ListNode(0) if num1.next == None else num1.next
num2 = ListNode(0) if num2.next == None else num2.next
return result
l = l1 = ListNode(2)
l1.next = ListNode(4)
l1 = l1.next
l1.next = ListNode(3)
l1 = l1.next
m = l2 = ListNode(7)
l2.next = ListNode(0)
l2 = l2.next
l2.next = ListNode(8)
l2 = l2.next
s = Solution()
s.addTwoNumbers(l, m)
|
__title__ = "betconnect"
__description__ = "A betconnect API client"
__url__ = "https://github.com/betcode-org/betconnect"
__version__ = "0.1.2"
__author__ = "Oliver Varney"
__license__ = "MIT"
|
from pyansys.examples.examples import *
|
# -*- coding: utf-8 -*-
"""
Test Redgrease Config parameters.
"""
__author__ = "Anders Åström"
__contact__ = "anders@lyngon.com"
__copyright__ = "2021, Lyngon Pte. Ltd."
__licence__ = """The MIT License
Copyright © 2021 Lyngon Pte. Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the “Software”), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
import redis.exceptions
from redgrease import RedisGears
@pytest.mark.parametrize("case", ["", "upper", "lower"])
@pytest.mark.parametrize("new_val", [1000, 1001])
def get_set_by_name(
rg: RedisGears, name: str, orig_val, new_val, case: str, read_only: bool = False
):
"""Helper test function for repetitive testing of getters and setters
based on config name strings.
Note: it is assumed that the config has been set to the 'orig_val'
prior to calling this function.
Args:
rg (redgrease.client.RedisGears): redis instance with gears
name (str): Name of config option
orig_val (Any): One config value to try
new_val (Any): Another config value to write
case (str): Either "upper", "lower" or anything Falsy.
Indicates if the config name should be transformed
to upperccase, lowercase or not at all
read_only (bool): Indicates that write operations should
be expected not to go through
Raises:
ValueError: [description]
"""
# Get and Set by name
if not case:
n = name
elif case == "upper":
n = name.upper()
elif case == "lower":
n = name.lower()
else:
raise ValueError(f"Unknown string case {case}")
dict_val = rg.gears.config.get(n)
assert isinstance(dict_val, dict), f"Get dict, by '{n}' "
assert dict_val[n] == orig_val
if read_only:
assert (
rg.gears.config.set({n: new_val}) is False
), f"Set dict, by '{n}' not allowed"
final_val = orig_val
else:
assert rg.gears.config.set({n: new_val}) is True, f"Set dict, by '{n}'"
final_val = new_val
assert (
rg.gears.config.get_single(n) == final_val
), f"Get single value, by string '{n}'"
def raw(value):
"""Returns the expected raw (bytes) version of a value"""
# Bools are for some reason not supported by the Redis client,
# but the logical translation would be as if it was int, so
# that is how redgrease.client.Redis/RedisGears will handle bools
if isinstance(value, bool):
value = int(value)
if isinstance(value, (int, float)):
value = str(value)
if isinstance(value, str):
value = value.encode()
if isinstance(value, bytes):
return value
raise TypeError(
f"Value {value} of type {type(value)} does not have a raw representation. AFAIK"
)
# ########################################## #
# Test of Read- and Writeable Configurations #
# ########################################## #
@pytest.mark.parametrize("case", ["", "upper", "lower"])
@pytest.mark.parametrize("new_val", [1000, 1001])
def test_MaxExecutions(rg: RedisGears, new_val, case):
name = "MaxExecutions"
read_response_type = int
orig_val = rg.gears.config.MaxExecutions
assert isinstance(
orig_val, read_response_type
), f"Get '{name}' by Property of type {read_response_type}"
rg.gears.config.MaxExecutions = new_val
assert rg.gears.config.MaxExecutions == new_val, f"Set '{name}' by Property updated"
assert (
rg.gears.config.set(MaxExecutions=orig_val) is True
), f"Set '{name}' by Argument"
get_set_by_name(rg, name, orig_val, new_val, case)
@pytest.mark.parametrize("case", ["", "upper", "lower"])
@pytest.mark.parametrize("new_val", [100, 101])
def test_MaxExecutionsPerRegistration(rg: RedisGears, new_val, case):
name = "MaxExecutionsPerRegistration"
read_response_type = int
orig_val = rg.gears.config.MaxExecutionsPerRegistration
assert isinstance(
orig_val, read_response_type
), f"Get '{name}' by Property of type {read_response_type}"
rg.gears.config.MaxExecutionsPerRegistration = new_val
assert (
rg.gears.config.MaxExecutionsPerRegistration == new_val
), f"Set '{name}' by Property updated"
assert (
rg.gears.config.set(MaxExecutionsPerRegistration=orig_val) is True
), f"Set '{name}' by Argument"
get_set_by_name(rg, name, orig_val, new_val, case)
@pytest.mark.parametrize("case", ["", "upper", "lower"])
@pytest.mark.parametrize("new_val", [True, False])
def test_ProfileExecutions(rg: RedisGears, new_val, case):
name = "ProfileExecutions"
read_response_type = bool
orig_val = rg.gears.config.ProfileExecutions
assert isinstance(
orig_val, read_response_type
), f"Get '{name}' by Property of type {read_response_type}"
rg.gears.config.ProfileExecutions = new_val
assert (
rg.gears.config.ProfileExecutions == new_val
), f"Set '{name}' by Property updated"
assert (
rg.gears.config.set(ProfileExecutions=orig_val) is True
), f"Set '{name}' by Argument"
get_set_by_name(rg, name, orig_val, new_val, case)
@pytest.mark.parametrize("case", ["", "upper", "lower"])
@pytest.mark.parametrize("new_val", [True, False])
def test_PythonAttemptTraceback(rg: RedisGears, new_val, case):
name = "PythonAttemptTraceback"
read_response_type = bool
orig_val = rg.gears.config.PythonAttemptTraceback
assert isinstance(
orig_val, read_response_type
), f"Get '{name}' by Property of type {read_response_type}"
rg.gears.config.PythonAttemptTraceback = new_val
assert (
rg.gears.config.PythonAttemptTraceback == new_val
), f"Set '{name}' by Property updated"
assert (
rg.gears.config.set(PythonAttemptTraceback=orig_val) is True
), f"Set '{name}' by Argument"
get_set_by_name(rg, name, orig_val, new_val, case)
@pytest.mark.parametrize("case", ["", "upper", "lower"])
@pytest.mark.parametrize("new_val", [5, 6])
def test_ExecutionMaxIdleTime(rg: RedisGears, new_val, case):
name = "ExecutionMaxIdleTime"
read_response_type = int
orig_val = rg.gears.config.ExecutionMaxIdleTime
assert isinstance(
orig_val, read_response_type
), f"Get '{name}' by Property of type {read_response_type}"
rg.gears.config.ExecutionMaxIdleTime = new_val
assert (
rg.gears.config.ExecutionMaxIdleTime == new_val
), f"Set '{name}' by Property updated"
assert (
rg.gears.config.set(ExecutionMaxIdleTime=orig_val) is True
), f"Set '{name}' by Argument"
get_set_by_name(rg, name, orig_val, new_val, case)
@pytest.mark.parametrize("case", ["", "upper", "lower"])
@pytest.mark.parametrize("new_val", [30000, 30001])
def test_PythonInstallReqMaxIdleTime(rg: RedisGears, new_val, case):
name = "PythonInstallReqMaxIdleTime"
read_response_type = int
orig_val = rg.gears.config.PythonInstallReqMaxIdleTime
assert isinstance(
orig_val, read_response_type
), f"Get '{name}' by Property of type {read_response_type}"
rg.gears.config.PythonInstallReqMaxIdleTime = new_val
assert (
rg.gears.config.PythonInstallReqMaxIdleTime == new_val
), f"Set '{name}' by Property updated"
assert (
rg.gears.config.set(PythonInstallReqMaxIdleTime=orig_val) is True
), f"Set '{name}' by Argument"
get_set_by_name(rg, name, orig_val, new_val, case)
@pytest.mark.parametrize("case", ["", "upper", "lower"])
@pytest.mark.parametrize("new_val", [3, 4])
def test_SendMsgRetries(rg: RedisGears, new_val, case):
name = "SendMsgRetries"
read_response_type = int
orig_val = rg.gears.config.SendMsgRetries
assert isinstance(
orig_val, read_response_type
), f"Get '{name}' by Property of type {read_response_type}"
rg.gears.config.SendMsgRetries = new_val
assert (
rg.gears.config.SendMsgRetries == new_val
), f"Set '{name}' by Property updated"
assert (
rg.gears.config.set(SendMsgRetries=orig_val) is True
), f"Set '{name}' by Argument"
get_set_by_name(rg, name, orig_val, new_val, case)
# ################################ #
# Test of Read-Only Configurations #
# ################################ #
@pytest.mark.parametrize("case", ["", "upper", "lower"])
@pytest.mark.parametrize("new_val", [True, False])
def test_DownloadDeps(rg: RedisGears, new_val, case):
name = "DownloadDeps"
read_response_type = bool
orig_val = rg.gears.config.DownloadDeps
assert isinstance(
orig_val, read_response_type
), f"Get '{name}' by Property of type {read_response_type}"
with pytest.raises(AttributeError):
rg.gears.config.DownloadDeps = new_val
assert (
rg.gears.config.DownloadDeps == orig_val
), f"Set '{name}' by Property not updated"
assert (
rg.gears.config.set(DownloadDeps=orig_val) is False
), f"Set '{name}' by Argument"
get_set_by_name(rg, name, orig_val, new_val, case, read_only=True)
@pytest.mark.parametrize("case", ["", "upper", "lower"])
@pytest.mark.parametrize("new_val", ["www.lyngon.com/gears.tgz"])
def test_DependenciesUrl(rg: RedisGears, new_val, case):
name = "DependenciesUrl"
read_response_type = str # URL like actually
orig_val = rg.gears.config.DependenciesUrl
assert isinstance(
orig_val, read_response_type
), f"Get '{name}' by Property of type {read_response_type}"
with pytest.raises(AttributeError):
rg.gears.config.DependenciesUrl = new_val
assert (
rg.gears.config.DependenciesUrl == orig_val
), f"Set '{name}' by Property updated"
assert (
rg.gears.config.set(DependenciesUrl=orig_val) is False
), f"Set '{name}' by Argument"
get_set_by_name(rg, name, orig_val, new_val, case, read_only=True)
@pytest.mark.parametrize("case", ["", "upper", "lower"])
@pytest.mark.parametrize("new_val", ["Hashibashi"])
def test_DependenciesSha256(rg: RedisGears, new_val, case):
name = "DependenciesSha256"
read_response_type = str
orig_val = rg.gears.config.DependenciesSha256
assert isinstance(
orig_val, read_response_type
), f"Get '{name}' by Property of type {read_response_type}"
with pytest.raises(AttributeError):
rg.gears.config.DependenciesSha256 = new_val
assert (
rg.gears.config.DependenciesSha256 == orig_val
), f"Set '{name}' by Property updated"
assert (
rg.gears.config.set(DependenciesSha256=orig_val) is False
), f"Set '{name}' by Argument"
get_set_by_name(rg, name, orig_val, new_val, case, read_only=True)
@pytest.mark.parametrize("case", ["", "upper", "lower"])
@pytest.mark.parametrize("new_val", ["/home/bob/pictures"])
def test_PythonInstallationDir(rg: RedisGears, new_val, case):
name = "PythonInstallationDir"
read_response_type = str # Path
orig_val = rg.gears.config.PythonInstallationDir
assert isinstance(
orig_val, read_response_type
), f"Get '{name}' by Property of type {read_response_type}"
with pytest.raises(AttributeError):
rg.gears.config.PythonInstallationDir = new_val
assert (
rg.gears.config.PythonInstallationDir == orig_val
), f"Set '{name}' by Property updated"
assert (
rg.gears.config.set(PythonInstallationDir=orig_val) is False
), f"Set '{name}' by Argument"
get_set_by_name(rg, name, orig_val, new_val, case, read_only=True)
@pytest.mark.parametrize("case", ["", "upper", "lower"])
@pytest.mark.parametrize("new_val", [1000, 1001])
def test_CreateVenv(rg: RedisGears, new_val, case):
name = "CreateVenv"
read_response_type = bool
orig_val = rg.gears.config.CreateVenv
assert isinstance(
orig_val, read_response_type
), f"Get '{name}' by Property of type {read_response_type}"
with pytest.raises(AttributeError):
rg.gears.config.CreateVenv = new_val
assert rg.gears.config.CreateVenv == orig_val, f"Set '{name}' by Property updated"
assert (
rg.gears.config.set(CreateVenv=orig_val) is False
), f"Set '{name}' by Argument"
get_set_by_name(rg, name, orig_val, new_val, case, read_only=True)
@pytest.mark.parametrize("case", ["", "upper", "lower"])
@pytest.mark.parametrize("new_val", [1000, 1001])
def test_ExecutionThreads(rg: RedisGears, new_val, case):
name = "ExecutionThreads"
read_response_type = int
orig_val = rg.gears.config.ExecutionThreads
assert isinstance(
orig_val, read_response_type
), f"Get '{name}' by Property of type {read_response_type}"
with pytest.raises(AttributeError):
rg.gears.config.ExecutionThreads = new_val
assert (
rg.gears.config.ExecutionThreads == orig_val
), f"Set '{name}' by Property updated"
assert (
rg.gears.config.set(ExecutionThreads=orig_val) is False
), f"Set '{name}' by Argument"
get_set_by_name(rg, name, orig_val, new_val, case, read_only=True)
# ################################## #
# Test of Erroneous Usage and Other #
# ################################## #
@pytest.mark.parametrize("new_val", ["Meaning", 42, True, 3.14])
def test_UnknownConfigName(rg: RedisGears, var, new_val):
name = "ThisConfigDoesNotExist"
with pytest.raises(AttributeError):
rg.gears.config.ThisConfigDoesNotExist
with pytest.raises(AttributeError):
rg.gears.config.ThisConfigDoesNotExist = new_val
with pytest.raises(AttributeError):
rg.gears.config.ThisConfigDoesNotExist
attr_name = var(name) # For setting/getting by string, well use a unique name
assert rg.gears.config.set({attr_name: new_val})
assert rg.gears.config.get_single(attr_name) == raw(new_val)
# Interestingly, Gears settings are allowed to be empty strings,
# so Redgrease will allow that too.
@pytest.mark.parametrize("new_val", [1])
@pytest.mark.parametrize("config_name", [None, ..., True, object(), int])
def test_InvalidConfigName(rg: RedisGears, config_name, new_val):
with pytest.raises(redis.exceptions.DataError):
rg.gears.config.set({config_name: new_val})
with pytest.raises(KeyError):
assert rg.gears.config.get_single(config_name) == raw(new_val)
|
#############################################################################
##
## Copyright (C) 2021 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing/
##
## This file is part of the Qt for Python examples of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of The Qt Company Ltd nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
## $QT_END_LICENSE$
##
#############################################################################
from PySide6.QtCore import Qt, QPoint, QRect, QSize, Property, Slot
from PySide6.QtGui import QMouseEvent, QPainter, QPen
from PySide6.QtWidgets import QWidget
EMPTY = '-'
CROSS = 'X'
NOUGHT = 'O'
DEFAULT_STATE = "---------"
class TicTacToe(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self._state = DEFAULT_STATE
self._turn_number = 0
def minimumSizeHint(self):
return QSize(200, 200)
def sizeHint(self):
return QSize(200, 200)
def setState(self, new_state):
self._turn_number = 0
self._state = DEFAULT_STATE
for position in range(min(9, len(new_state))):
mark = new_state[position]
if mark == CROSS or mark == NOUGHT:
self._turn_number += 1
self._change_state_at(position, mark)
position += 1
self.update()
def state(self):
return self._state
@Slot()
def clear_board(self):
self._state = DEFAULT_STATE
self._turn_number = 0
self.update()
def _change_state_at(self, pos, new_state):
self._state = (self._state[:pos] + new_state
+ self._state[pos + 1:])
def mousePressEvent(self, event):
if self._turn_number == 9:
self.clear_board()
return
for position in range(9):
cell = self._cell_rect(position)
if cell.contains(event.position().toPoint()):
if self._state[position] == EMPTY:
new_state = CROSS if self._turn_number % 2 == 0 else NOUGHT
self._change_state_at(position, new_state)
self._turn_number += 1
self.update()
def paintEvent(self, event):
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.setPen(QPen(Qt.darkGreen, 1))
painter.drawLine(self._cell_width(), 0,
self._cell_width(), self.height())
painter.drawLine(2 * self._cell_width(), 0,
2 * self._cell_width(), self.height())
painter.drawLine(0, self._cell_height(),
self.width(), self._cell_height())
painter.drawLine(0, 2 * self._cell_height(),
self.width(), 2 * self._cell_height())
painter.setPen(QPen(Qt.darkBlue, 2))
for position in range(9):
cell = self._cell_rect(position)
if self._state[position] == CROSS:
painter.drawLine(cell.topLeft(), cell.bottomRight())
painter.drawLine(cell.topRight(), cell.bottomLeft())
elif self._state[position] == NOUGHT:
painter.drawEllipse(cell)
painter.setPen(QPen(Qt.yellow, 3))
for position in range(9):
if (self._state[position] != EMPTY
and self._state[position + 1] == self._state[position]
and self._state[position + 2] == self._state[position]):
y = self._cell_rect(position).center().y()
painter.drawLine(0, y, self.width(), y)
self._turn_number = 9
for position in range(3):
if (self._state[position] != EMPTY
and self._state[position + 3] == self._state[position]
and self._state[position + 6] == self._state[position]):
x = self._cell_rect(position).center().x()
painter.drawLine(x, 0, x, height())
self._turn_number = 9
if (self._state[0] != EMPTY and self._state[4] == self._state[0]
and self._state[8] == self._state[0]):
painter.drawLine(0, 0, self.width(), self.height())
self._turn_number = 9
if (self._state[2] != EMPTY and self._state[4] == self._state[2]
and self._state[6] == self._state[2]):
painter.drawLine(0, self.height(), self.width(), 0)
self._turn_number = 9
def _cell_rect(self, position):
h_margin = self.width() / 30
v_margin = self.height() / 30
row = int(position / 3)
column = position - 3 * row
pos = QPoint(column * self._cell_width() + h_margin,
row * self._cell_height() + v_margin)
size = QSize(self._cell_width() - 2 * h_margin,
self._cell_height() - 2 * v_margin)
return QRect(pos, size)
def _cell_width(self):
return self.width() / 3
def _cell_height(self):
return self.height() / 3
state = Property(str, state, setState)
|
# Copyright 2015 Idan Moyal. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import socket
import struct
import threading
import time
from six import advance_iterator
class Client(object):
class MemcachedServerNotRespondingError(Exception):
pass
def __init__(self, servers, debug=False, response_timeout=10):
self.servers = servers
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.settimeout(5)
self._results_handler_thread = threading.Thread(
target=self._get_results_handler)
self._results_handler_thread.setDaemon(True)
self._results_handler_thread.start()
self._results = {}
self._response_timeout = response_timeout
self._debug = debug
self._request_id_generator = itertools.count()
def _get_results_handler(self):
while True:
try:
data, server = self.socket.recvfrom(4096)
udp_header = struct.unpack('!Hhhh', data[:8])
if self._debug:
print(
'memcached_udp: results_handler [server]: {0}'.format(
server))
print('memcached_udp: results_handler [data]: {0}'.format(
data))
print('memcached_udp: id={0}, packet_number={1}, '
'total_packets={2}, misc={3}'.format(*udp_header))
request_id = udp_header[0]
if request_id in self._results:
self._results[request_id] = data[8:]
elif self._debug:
print('memcached_udp: request id not found in results - '
'ignoring... [request_id={0}]'.format(request_id))
except socket.timeout:
pass
@staticmethod
def _get_udp_header(request_id):
return struct.pack('!Hhhh', request_id, 0, 1, 0)
def _pick_server(self, key):
return self.servers[hash(key) % len(self.servers)]
def _get_request_id(self, server):
request_id = advance_iterator(self._request_id_generator)
if request_id in self._results:
raise RuntimeError(
'Request id already exists for server [server={0}, '
'request_id={1}]'.format(server, request_id))
if request_id > 60000:
self._request_id_generator = itertools.count()
self._results[request_id] = None
return request_id
def _wait_for_result(self, server, request_id):
deadline = time.time() + self._response_timeout
try:
while not self._results[request_id]:
if time.time() >= deadline:
raise self.MemcachedServerNotRespondingError(
'Memcached server is not responding: {0}'.format(
server))
time.sleep(0.1)
return self._results[request_id]
finally:
del self._results[request_id]
def set(self, key, value):
server = self._pick_server(key)
request_id = self._get_request_id(server)
cmd = b''.join([
self._get_udp_header(request_id),
b'set ',
key.encode(),
b' 0 0 ',
str(len(value)).encode(),
b'\r\n',
value.encode(), b'\r\n',
])
self.socket.sendto(cmd, server)
r = self._wait_for_result(server, request_id)
if r.split(b'\r\n')[0] != b'STORED':
raise RuntimeError(
'Error storing "{0}" in {1}'.format(key, server))
def get(self, key):
server = self._pick_server(key)
request_id = self._get_request_id(server)
cmd = b''.join([
self._get_udp_header(request_id),
b'get ',
key.encode(),
b'\r\n',
])
self.socket.sendto(cmd, server)
r = self._wait_for_result(server, request_id)
if r.startswith(b'VALUE'):
arr = r.split(b'\r\n')
return b'\r\n'.join(arr[1:len(arr)-2]).decode()
return None
|
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
df = pd.read_csv('./data/sigite2014-difficulty-data.csv', sep=';')
spent = df.filter(regex='SECONDS_SPENT_ON_*', axis=1)
studentSpentW1 = []
studentEduW1 = []
studentSpentW2 = []
studentEduW2 = []
studentSpentW3 = []
studentEduW3 = []
studentSpentW4 = []
studentEduW4 = []
studentSpentW5 = []
studentEduW5 = []
studentSpentW6 = []
studentEduW6 = []
for week in range(1, 7):
prefix = 'SECONDS_SPENT_ON_viikko0' + str(week) + '_'
prefix2 = 'EDUCATIONAL_VALUE_viikko0' + str(week) + '_'
for col in df.filter(regex=prefix + '.*', axis=1).columns:
name = col.replace(prefix, '')
if week == 1:
studentSpentW1 = df[col]
if week == 2:
studentSpentW2 = df[col]
if week == 3:
studentSpentW3 = df[col]
if week == 4:
studentSpentW4 = df[col]
if week == 5:
studentSpentW5 = df[col]
if week == 6:
studentSpentW6 = df[col]
#print(df[col])
for col in df.filter(regex=prefix2 + '.*', axis=1).columns:
name = col.replace(prefix2, '')
if week == 1:
studentEduW1 = df[col]
if week == 2:
studentEduW2 = df[col]
if week == 3:
studentEduW3 = df[col]
if week == 4:
studentEduW4 = df[col]
if week == 5:
studentEduW5 = df[col]
if week == 6:
studentEduW6 = df[col]
students = []
for i in range(len(studentEduW1)):
if(studentEduW1[i] > 2):
students.append(i)
timeUsed =[]
for i in range(len(students)):
timeUsed.append(studentSpentW1[students[i]])
#print(timeUsed)
#plt.plot(timeUsed)
plt.plot(studentSpentW1, studentEduW1, 'o')
plt.show()
|
import matplotlib.pyplot as plt
import numpy as np
from mpi4py import MPI
from .utils import *
from .boundary import HorizontalInletOutletBoundary
class LatticeBoltzmann():
def __init__(self, density, velocity_field):
comm = MPI.COMM_WORLD
self.n_workers = comm.Get_size()
self.rank = comm.Get_rank()
self._parallel_vars = []
assert len(density.shape) == 2
assert len(velocity_field.shape) == 3
assert density.shape == velocity_field.shape[:2]
assert velocity_field.shape[2] == 2
self.h, self.w = density.shape
density, velocity_field = density.astype(np.float32), velocity_field.astype(np.float32)
f = calculate_equilibrium_distribution(density, velocity_field)
# Create parallel variables
self._parallel_var('velocity_field', velocity_field)
self._parallel_var('density', density)
self._parallel_var('f', f)
self._parallel_var('feq', f)
self.boundaries = []
def stream(self):
for i in range(9):
self._s_f[:, :, i] = np.roll(self._s_f[:, :, i], C[i], axis=(0, 1))
def collide(self, tau=0.6):
self._s_density = calculate_density(self._s_f)
self._s_velocity_field = calculate_velocity_field(self._s_f, self._s_density)
self._s_feq = calculate_equilibrium_distribution(self._s_density, self._s_velocity_field)
self._s_f += 1/tau * (self._s_feq - self._s_f)
def stream_and_collide(self, tau=0.6):
"""A full LBM step with boundary handling."""
for boundary in self.boundaries:
if isinstance(boundary, HorizontalInletOutletBoundary):
boundary.forward(self.f, self.feq, self.velocity_field)
else:
boundary.forward(self.f)
self._partial_update_f()
self.stream()
self.collide(tau)
for parallel_var in self._parallel_vars:
setattr(self, f'_{parallel_var}_calculated', False)
for boundary in self.boundaries:
boundary.backward(self.f)
def add_boundary(self, boundary):
self.boundaries.append(boundary)
def _gather(self, name):
"""Gather split arrays for `name` to shared array if it is not updated.
See: `_parallel_var` function for definitions."""
if not getattr(self, f'_{name}_calculated'):
array = getattr(self, f'_s_{name}')[1:-1]
array = np.ascontiguousarray(array, dtype=np.float32)
x = getattr(self, f'_{name}')
# Handle edge where height is not divisible by number of workers.
if x.shape[0] % self.n_workers:
height = int(np.ceil(x.shape[0] / self.n_workers) * self.n_workers)
x = np.ascontiguousarray(np.zeros((height, *x.shape[1:])), dtype=np.float32)
MPI.COMM_WORLD.Allgatherv([array, MPI.FLOAT], [x, MPI.FLOAT])
array = getattr(self, f'_{name}')
array[:] = x[:array.shape[0]]
else:
MPI.COMM_WORLD.Allgatherv([array, MPI.FLOAT], [x, MPI.FLOAT])
setattr(self, f'_{name}_calculated', True)
array = getattr(self, f'_{name}')
def _split(self, array):
"""Split given array for MPI processes
but keeping the last row and first row of the previous and the next processes'
split array respectively for streaming operation."""
arrays = np.array_split(array, self.n_workers, axis=0)
array = np.concatenate([arrays[self.rank-1][-1:],
arrays[self.rank],
arrays[(self.rank+1) % self.n_workers][:1]])
return array
def _parallel_var(self, name, value):
"""Create a parallel variable for `name` with the given initial `value`.
Creates:
_name: Shared value for name.
name: Property, when it is accessed, it updates the shared value and returns it.
_s_name: Split value for name. Every MPI processes hold a different part of the shared value.
_name_calculated: Indicates whether the current shared value is updated or not.
gather_name: Function that updates the shared value.
"""
setattr(self, f'_s_{name}', self._split(value))
setattr(self, f'_{name}', np.zeros_like(value, dtype=np.float32))
setattr(self, f'_{name}_calculated', False)
setattr(LatticeBoltzmann, name, self._parallel_property(name))
setattr(LatticeBoltzmann, f'gather_{name}', lambda self: self._gather(name))
self._parallel_vars.append(name)
@staticmethod
def _parallel_property(name):
"""Create a property for `name` that gathers the value
from every MPI processes if not calculated."""
def func(self):
if not getattr(self, f'_{name}_calculated'):
self._gather(name)
return getattr(self, f'_{name}')
func.__name__ = name
return property(func)
def _partial_update_f(self):
"""Update split value of self._f"""
self._s_f = self._split(self.f)
def plot(self, ax=plt):
"""Plot velocity field."""
for boundary in self.boundaries:
boundary.update_velocity(self.velocity_field)
v = np.sqrt(self.velocity_field[:, :, 0]**2 +
self.velocity_field[:, :, 1]**2)
ax.imshow(v, cmap='RdBu_r', vmin=0, interpolation='spline16')
def streamplot(self, ax=plt):
"""Plot streamplot of the velocity field."""
for boundary in self.boundaries:
boundary.update_velocity(self.velocity_field)
x, y = np.meshgrid(np.arange(self.w), np.arange(self.h))
ax.streamplot(x, y, self.velocity_field[:, :, 1], self.velocity_field[:, :, 0])
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: list_container.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from topology_sdk.model.topology import container_pb2 as topology__sdk_dot_model_dot_topology_dot_container__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='list_container.proto',
package='container',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x14list_container.proto\x12\tcontainer\x1a+topology_sdk/model/topology/container.proto\"6\n\x14ListContainerRequest\x12\x0c\n\x04page\x18\x01 \x01(\x05\x12\x10\n\x08pageSize\x18\x02 \x01(\x05\"j\n\x15ListContainerResponse\x12\x0c\n\x04page\x18\x01 \x01(\x05\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\r\n\x05total\x18\x03 \x01(\x05\x12!\n\x04list\x18\x04 \x03(\x0b\x32\x13.topology.Container\"\x80\x01\n\x1cListContainerResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12.\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32 .container.ListContainerResponseb\x06proto3')
,
dependencies=[topology__sdk_dot_model_dot_topology_dot_container__pb2.DESCRIPTOR,])
_LISTCONTAINERREQUEST = _descriptor.Descriptor(
name='ListContainerRequest',
full_name='container.ListContainerRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='page', full_name='container.ListContainerRequest.page', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pageSize', full_name='container.ListContainerRequest.pageSize', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=80,
serialized_end=134,
)
_LISTCONTAINERRESPONSE = _descriptor.Descriptor(
name='ListContainerResponse',
full_name='container.ListContainerResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='page', full_name='container.ListContainerResponse.page', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_size', full_name='container.ListContainerResponse.page_size', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='total', full_name='container.ListContainerResponse.total', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='list', full_name='container.ListContainerResponse.list', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=136,
serialized_end=242,
)
_LISTCONTAINERRESPONSEWRAPPER = _descriptor.Descriptor(
name='ListContainerResponseWrapper',
full_name='container.ListContainerResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='container.ListContainerResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='container.ListContainerResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='container.ListContainerResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='container.ListContainerResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=245,
serialized_end=373,
)
_LISTCONTAINERRESPONSE.fields_by_name['list'].message_type = topology__sdk_dot_model_dot_topology_dot_container__pb2._CONTAINER
_LISTCONTAINERRESPONSEWRAPPER.fields_by_name['data'].message_type = _LISTCONTAINERRESPONSE
DESCRIPTOR.message_types_by_name['ListContainerRequest'] = _LISTCONTAINERREQUEST
DESCRIPTOR.message_types_by_name['ListContainerResponse'] = _LISTCONTAINERRESPONSE
DESCRIPTOR.message_types_by_name['ListContainerResponseWrapper'] = _LISTCONTAINERRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ListContainerRequest = _reflection.GeneratedProtocolMessageType('ListContainerRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTCONTAINERREQUEST,
'__module__' : 'list_container_pb2'
# @@protoc_insertion_point(class_scope:container.ListContainerRequest)
})
_sym_db.RegisterMessage(ListContainerRequest)
ListContainerResponse = _reflection.GeneratedProtocolMessageType('ListContainerResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTCONTAINERRESPONSE,
'__module__' : 'list_container_pb2'
# @@protoc_insertion_point(class_scope:container.ListContainerResponse)
})
_sym_db.RegisterMessage(ListContainerResponse)
ListContainerResponseWrapper = _reflection.GeneratedProtocolMessageType('ListContainerResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _LISTCONTAINERRESPONSEWRAPPER,
'__module__' : 'list_container_pb2'
# @@protoc_insertion_point(class_scope:container.ListContainerResponseWrapper)
})
_sym_db.RegisterMessage(ListContainerResponseWrapper)
# @@protoc_insertion_point(module_scope)
|
import ConfigParser
import os
import random
import re
import string
from getpass import getpass
from argyle import postgres, nginx, system
from argyle.supervisor import supervisor_command, upload_supervisor_app_conf
from argyle.system import service_command
from fabric import utils
from fabric.api import cd, env, get, hide, local, put, require, run, settings, sudo, task
from fabric.contrib import files, console
# Directory structure
PROJECT_ROOT = os.path.dirname(__file__)
CONF_ROOT = os.path.join(PROJECT_ROOT, 'conf')
SERVER_ROLES = ['app', 'lb', 'db']
env.project = 'raspberryio'
env.project_user = 'raspberryio'
env.repo = u'git@github.com:python/raspberryio.git'
env.shell = '/bin/bash -c'
env.disable_known_hosts = True
env.ssh_port = 2222
env.forward_agent = True
env.password_names = ['newrelic_license_key']
# Additional settings for argyle
env.ARGYLE_TEMPLATE_DIRS = (
os.path.join(CONF_ROOT, 'templates')
)
@task
def vagrant():
env.environment = 'staging'
env.vagrant = True
env.hosts = ['33.33.33.10', ]
env.branch = 'develop'
env.server_name = 'vagrant.raspberry.io'
setup_path()
@task
def staging():
env.environment = 'staging'
env.vagrant = False
env.hosts = ['raspberryio-staging.caktusgroup.com', ]
env.branch = 'develop'
env.server_name = 'raspberryio-staging.caktusgroup.com'
env.port = 2222
setup_path()
@task
def production():
env.environment = 'production'
env.vagrant = False
env.hosts = ['raspberry.int.python.org', ]
env.branch = 'master'
env.server_name = 'raspberry.io'
# Provided machine uses default port
env.ssh_port = 22
setup_path()
def setup_path():
env.home = '/home/%(project_user)s/' % env
env.root = os.path.join(env.home, 'www', env.environment)
env.code_root = os.path.join(env.root, env.project)
env.project_root = os.path.join(env.code_root, env.project)
env.virtualenv_root = os.path.join(env.root, 'env')
env.log_dir = os.path.join(env.root, 'log')
env.db = '%s_%s' % (env.project, env.environment)
env.vhost = '%s_%s' % (env.project, env.environment)
env.settings = '%(project)s.settings.%(environment)s' % env
@task
def create_users():
"""Create project user and developer users."""
ssh_dir = u"/home/%s/.ssh" % env.project_user
system.create_user(env.project_user, groups=['www-data', 'login', ])
sudo('mkdir -p %s' % ssh_dir)
user_dir = os.path.join(CONF_ROOT, "users")
for username in os.listdir(user_dir):
key_file = os.path.normpath(os.path.join(user_dir, username))
system.create_user(username, groups=['dev', 'login', 'admin', ], key_file=key_file)
with open(key_file, 'rt') as f:
ssh_key = f.read()
# Add ssh key for project user
files.append('%s/authorized_keys' % ssh_dir, ssh_key, use_sudo=True)
sudo('chown -R %s:%s %s' % (env.project_user, env.project_user, ssh_dir))
@task
def configure_ssh():
"""
Change sshd_config defaults:
Change default port
Disable root login
Disable password login
Restrict to only login group
"""
ssh_config = u'/etc/ssh/sshd_config'
files.sed(ssh_config, u"Port 22$", u"Port %s" % env.ssh_port, use_sudo=True)
files.sed(ssh_config, u"PermitRootLogin yes", u"PermitRootLogin no", use_sudo=True)
files.append(ssh_config, u"AllowGroups login", use_sudo=True)
files.append(ssh_config, u"PasswordAuthentication no", use_sudo=True)
service_command(u'ssh', u'reload')
@task
def install_packages(*roles):
"""Install packages for the given roles."""
config_file = os.path.join(CONF_ROOT, u'packages.conf')
config = ConfigParser.SafeConfigParser()
config.read(config_file)
for role in roles:
if config.has_section(role):
# Get ppas
if config.has_option(role, 'ppas'):
for ppa in config.get(role, 'ppas').split(' '):
system.add_ppa(ppa, update=False)
# Get sources
if config.has_option(role, 'sources'):
for section in config.get(role, 'sources').split(' '):
source = config.get(section, 'source')
key = config.get(section, 'key')
system.add_apt_source(source=source, key=key, update=False)
sudo(u"apt-get update")
sudo(u"apt-get install -y %s" % config.get(role, 'packages'))
sudo(u"apt-get upgrade -y")
@task
def setup_server(*roles):
"""Install packages and add configurations for server given roles."""
require('environment')
# Set server locale
sudo('/usr/sbin/update-locale LANG=en_US.UTF-8')
roles = list(roles)
if roles == ['all', ]:
roles = SERVER_ROLES
if 'base' not in roles:
roles.insert(0, 'base')
install_packages(*roles)
if 'db' in roles:
if console.confirm(u"Do you want to reset the Postgres cluster?.", default=False):
# Ensure the cluster is using UTF-8
pg_version = postgres.detect_version()
sudo('pg_dropcluster --stop %s main' % pg_version, user='postgres')
sudo('pg_createcluster --start -e UTF-8 --locale en_US.UTF-8 %s main' % pg_version,
user='postgres')
postgres.create_db_user(username=env.project_user)
postgres.create_db(name=env.db, owner=env.project_user)
if 'app' in roles:
# Create project directories and install Python requirements
project_run('mkdir -p %(root)s' % env)
project_run('mkdir -p %(log_dir)s' % env)
# FIXME: update to SSH as normal user and use sudo
# we ssh as the project_user here to maintain ssh agent
# forwarding, because it doesn't work with sudo. read:
# http://serverfault.com/questions/107187/sudo-su-username-while-keeping-ssh-key-forwarding
with settings(user=env.project_user):
# TODO: Add known hosts prior to clone.
# i.e. ssh -o StrictHostKeyChecking=no git@github.com
run('git clone %(repo)s %(code_root)s' % env)
with cd(env.code_root):
run('git checkout %(branch)s' % env)
# Install and create virtualenv
with settings(hide('everything'), warn_only=True):
test_for_pip = run('which pip')
if not test_for_pip:
sudo("easy_install -U pip")
with settings(hide('everything'), warn_only=True):
test_for_virtualenv = run('which virtualenv')
if not test_for_virtualenv:
sudo("pip install -U virtualenv")
project_run('virtualenv -p python2.7 --clear --distribute %s' % env.virtualenv_root)
path_file = os.path.join(env.virtualenv_root, 'lib', 'python2.7', 'site-packages', 'project.pth')
files.append(path_file, env.code_root, use_sudo=True)
sudo('chown %s:%s %s' % (env.project_user, env.project_user, path_file))
sudo('npm install less@1.3 -g')
update_requirements()
upload_supervisor_app_conf(app_name=u'gunicorn')
upload_supervisor_app_conf(app_name=u'group')
# Restart services to pickup changes
supervisor_command('reload')
supervisor_command('restart %(environment)s:*' % env)
if 'lb' in roles:
nginx.remove_default_site()
nginx.upload_nginx_site_conf(site_name=u'%(project)s-%(environment)s.conf' % env)
def project_run(cmd):
""" Uses sudo to allow developer to run commands as project user."""
sudo(cmd, user=env.project_user)
def _random_password(length=8, chars=string.letters + string.digits):
"""Generates a random password with the specificed length and chars."""
return ''.join([random.choice(chars) for i in range(length)])
def _load_passwords(names, length=20, generate=False):
"""Retrieve password from the user's home directory, or generate a new random one if none exists"""
for name in names:
filename = ''.join([env.home, name])
if generate:
passwd = _random_password(length=length)
sudo('touch %s' % filename, user=env.project_user)
sudo('chmod 600 %s' % filename, user=env.project_user)
with hide('running'):
sudo('echo "%s">%s' % (passwd, filename), user=env.project_user)
if env.host_string and files.exists(filename):
with hide('stdout'):
passwd = sudo('cat %s' % filename).strip()
else:
passwd = getpass('Please enter %s: ' % name)
setattr(env, name, passwd)
@task
def update_service_confs():
"""Update supervisor configuration."""
require('environment')
if not env.vagrant:
upload_newrelic_conf()
upload_supervisor_app_conf(app_name=u'gunicorn')
upload_supervisor_app_conf(app_name=u'group')
nginx.upload_nginx_site_conf(site_name=u'%(project)s-%(environment)s.conf' % env)
# Restart services to pickup changes
supervisor_command('reload')
@task
def upload_newrelic_conf():
"""Upload New Relic configuration from the template."""
require('environment')
_load_passwords(env.password_names)
template = os.path.join(CONF_ROOT, 'templates', 'newrelic.ini')
destination = os.path.join(env.root, 'newrelic-%(environment)s.ini' % env)
files.upload_template(template, destination, context=env, use_sudo=True)
sudo('chown %s:%s %s' % (env.project_user, env.project_user, destination))
@task
def update_requirements():
"""Update required Python libraries."""
require('environment')
project_run(u'HOME=%(home)s %(virtualenv)s/bin/pip install --use-mirrors -r %(requirements)s' % {
'virtualenv': env.virtualenv_root,
'requirements': os.path.join(env.code_root, 'requirements', 'production.txt'),
'home': env.home,
})
@task
def manage_run(command):
"""Run a Django management command on the remote server."""
require('environment')
manage_base = u"%(virtualenv_root)s/bin/django-admin.py " % env
if '--settings' not in command:
command = u"%s --settings=%s" % (command, env.settings)
project_run(u'%s %s' % (manage_base, command))
@task
def manage_shell():
"""Drop into the remote Django shell."""
manage_run("shell")
@task
def syncdb():
"""Run syncdb and South migrations."""
manage_run('syncdb')
manage_run('migrate --noinput')
@task
def collectstatic():
"""Collect static files."""
manage_run('compress')
manage_run('collectstatic --noinput')
def match_changes(changes, match):
pattern = re.compile(match)
return pattern.search(changes) is not None
@task
def deploy(branch=None, full=False):
"""Deploy to a given environment."""
require('environment')
if branch is not None:
env.branch = branch
requirements = False if not full else True
migrations = False if not full else True
# Fetch latest changes
with cd(env.code_root):
with settings(user=env.project_user):
run('git fetch origin')
# Look for new requirements or migrations
changes = run("git diff origin/%(branch)s --stat-name-width=9999" % env)
requirements = match_changes(changes, r"requirements/")
migrations = match_changes(changes, r"/migrations/")
if requirements or migrations:
supervisor_command('stop %(environment)s:*' % env)
with settings(user=env.project_user):
run("git reset --hard origin/%(branch)s" % env)
if requirements:
update_requirements()
# New requirements might need new tables/migrations
syncdb()
elif migrations:
syncdb()
collectstatic()
supervisor_command('restart %(environment)s:*' % env)
@task
def upload_secrets(secrets_filepath):
"""Upload a settings.ini file to the server"""
require('environment')
destination_file = os.path.join(env.root, 'settings.ini')
put(secrets_filepath, destination_file, use_sudo=True)
sudo('chown %s:%s %s' % (env.project_user, env.project_user, destination_file))
@task
def get_db_dump(clean=True):
"""Get db dump of remote enviroment."""
require('environment')
dump_file = '%(environment)s.sql' % env
temp_file = os.path.join(env.home, dump_file)
flags = '-Ox'
if clean:
flags += 'c'
sudo('pg_dump %s %s > %s' % (flags, env.db, temp_file), user=env.project_user)
get(temp_file, dump_file)
@task
def load_db_dump(dump_file):
"""Load db dump on a remote environment."""
require('environment')
temp_file = os.path.join(env.home, '%(environment)s.sql' % env)
put(dump_file, temp_file, use_sudo=True)
sudo('psql -d %s -f %s' % (env.db, temp_file), user=env.project_user)
@task
def reset_local_media():
""" Reset local media from remote host """
require('environment', provided_by=('staging', 'production'))
media = os.path.join(env.code_root, 'public/media')
local("rsync -rvaze 'ssh -p %s' %s@%s:%s %s/public" %
(env.ssh_port, env.user, env.hosts[0], media, PROJECT_ROOT))
@task
def reset_local_db():
""" Reset local database from remote host """
require('code_root', provided_by=('production', 'staging'))
question = 'Are you sure you want to reset your local ' \
'database with the %(environment)s database?' % env
if not console.confirm(question, default=False):
utils.abort('Local database reset aborted.')
if env.environment == 'staging':
from raspberryio.settings.staging import DATABASES as remote
else:
from raspberryio.settings.production import DATABASES as remote
from raspberryio.settings.local import DATABASES as loc
local_db = loc['default']['NAME']
remote_db = remote['default']['NAME']
with settings(warn_only=True):
local('dropdb %s' % local_db)
local('createdb %s' % local_db)
host = '%s@%s' % (env.project_user, env.hosts[0])
local('ssh -p %s -C %s pg_dump -Ox %s | psql %s' % (env.ssh_port, host, remote_db, local_db))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from math import log
#numeros del eje x
x = [50,100,150,200,250,300,350,400,450,500,550,600,650,700,750,800,850,900,950,1000]
# valores primera funcion
y_gauss = []
y_LU = []
y_cholesky = []
#valores primer funcion
f = open('../resultadosTiempos/tiemposGaussSinVariar.txt', 'r')
for i in range(0,20):
y_gauss.append(float(f.readline()[:-1]))
#valores segunda funcion
f = open('../resultadosTiempos/tiemposLUSinVariar.txt', 'r')
for i in range(0,20):
y_LU.append(float(f.readline()[:-1]))
#valores tercer funcion
f = open('../resultadosTiempos/tiemposCholeskySinVariar.txt', 'r')
for i in range(0,20):
y_cholesky.append(float(f.readline()[:-1]))
print len(y_gauss)
print len(y_LU)
print len(y_cholesky)
print len(x)
plt.plot(x,y_gauss,'ro', color='green', label="Gauss")
plt.plot(x,y_LU,'ro', color='red', label="LU")
plt.plot(x,y_cholesky,'ro', color='blue', label="Cholesky")
yfunction = []
a = 50
for m in range(0,20):
if m == 0:
yfunction.append(0)
else:
yfunction.append(0.00000001721*a*a*a)
a += 50
#sin 'ro' lo plotea como una funcion comun, continua
plt.plot(x,yfunction, color='purple', label='T(n)=k*(n^3)',linewidth=3)
plt.legend(bbox_to_anchor=(0.35,1))
#nombre que va en el eje x
plt.xlabel(u"Tamaño de la entrada (cantidad de equipos)")
#nombre que va en el eje y
plt.ylabel("Tiempo(segundos)")
plt.show()
|
__author__ = 'Madison'
import sqlite3
from app import config
from app.generic_error import GenericError
def create_table():
create_profile_table = '''CREATE TABLE IF NOT EXISTS user_profile
(profile_id INTEGER PRIMARY KEY asc, profilename text unique not NULL)'''
create_events_table = '''CREATE TABLE IF NOT EXISTS events
(profile_id INTEGER, request_time date, ipr text, ufield text, pfield text, usertype text,
FOREIGN KEY(profile_id) REFERENCES user_profile(profile_id))'''
conn = sqlite3.connect(config.DATABASE_LOC) # when this is :memory:, dies immediately on execution finish.
# conn.execute(create_statement)
with conn:
curr = conn.cursor()
curr.execute(create_profile_table)
curr.execute(create_events_table)
conn.commit()
def get_profile_list():
get_all_profiles = '''
select user_profile.profilename, CASE WHEN events.profile_id is null THEN 0 ELSE count(*) END
from user_profile
left outer join events on user_profile.profile_id = events.profile_id
group by 1;
'''
conn = sqlite3.connect(config.DATABASE_LOC) # when this is :memory:, dies immediately on execution finish.
with conn:
curr = conn.cursor()
curr.execute(get_all_profiles)
results = curr.fetchall()
#print results
return results
def profile_exists(name):
cmd = """select count(*) from user_profile where profilename=?"""
conn = sqlite3.connect(config.DATABASE_LOC) # when this is :memory:, dies immediately on execution finish.
with conn:
curr = conn.cursor()
curr.execute(cmd, [name])
db_results = curr.fetchall()
return db_results[0][0] > 0
def get_profile_id(name):
cmd = """select profile_id from user_profile where profilename=?"""
conn = sqlite3.connect(config.DATABASE_LOC) # when this is :memory:, dies immediately on execution finish.
with conn:
curr = conn.cursor()
curr.execute(cmd, [name])
db_results = curr.fetchall()
return db_results[0][0]
def create_new_profile(name):
# run get user list first, and make sure it's not in there
if profile_exists(name):
raise GenericError('Profile already exists')
insert_profile = '''insert into user_profile (profilename) VALUES (?)'''
conn = sqlite3.connect(config.DATABASE_LOC) # when this is :memory:, dies immediately on execution finish.
with conn:
curr = conn.cursor()
curr.execute(insert_profile, [name])
conn.commit()
def insert_event(name, time, ipr, ufield, pfield, usertype):
current_profiles = get_profile_list()
if not profile_exists(name):
raise GenericError('Profile does not exist')
profile_id = get_profile_id(name)
insert_event_statement = '''insert into events (profile_id, request_time, ipr, ufield, pfield, usertype)
VALUES (?,?,?,?,?,?);'''
conn = sqlite3.connect(config.DATABASE_LOC)
with conn:
curr = conn.cursor()
curr.execute(insert_event_statement, [profile_id, time, ipr, ufield, pfield, usertype])
conn.commit()
def get_all_events():
cmd = '''select u.profilename, e.request_time, e.ipr, e.ufield, e.pfield, e.usertype
from user_profile u join events e on e.profile_id = u.profile_id
order by request_time asc'''
conn = sqlite3.connect(config.DATABASE_LOC)
conn.row_factory= sqlite3.Row
with conn:
curr = conn.cursor()
curr.execute(cmd)
db_results = curr.fetchall()
return db_results
def get_all_events_for_profile(name):
get_events = '''select u.profilename, e.request_time, e.ipr, e.ufield, e.pfield
from user_profile u join events e on e.profile_id = u.profile_id
where u.profilename = ?'''
conn = sqlite3.connect(config.DATABASE_LOC)
with conn:
curr = conn.cursor()
curr.execute(get_events, [name])
db_results = curr.fetchall()
return db_results
if __name__ == '__main__':
# create_table()
res = get_all_events_for_profile('test2')
for r in res:
print r
# print create_new_user('test_user3')
# print insert_event('test_user2', 'time3', 'ipr dat1')
|
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmseg.ops import resize
from ..builder import HEADS
from .decode_head import BaseDecodeHead
from .aspp_head import ASPPHead
@HEADS.register_module()
class ASPPHeadExt(ASPPHead):
def __init__(self, dilations=(1, 6, 12, 18), **kwargs):
self.glove_dim = kwargs.pop('glove_dim')
super().__init__(dilations, **kwargs)
self.glove_conv = nn.Conv2d(self.channels, self.glove_dim, kernel_size=1)
self.glove_conv_seg = nn.Conv2d(self.glove_dim, self.num_classes, kernel_size=1)
def cls_seg(self, feat):
"""Classify each pixel."""
if self.dropout is not None:
feat = self.dropout(feat)
output = self.glove_conv(feat)
output = self.glove_conv_seg(output)
return output
|
#!/usr/bin/env python
import os
import zmq
import time
import json
import platform
import subprocess
import multiprocessing
import numpy as np
import joblib
import requests
from selfdrive.kegman_conf import kegman_conf
from selfdrive.services import service_list
from enum import Enum
from cereal import log
from cffi import FFI
from setproctitle import setproctitle
from common.params import Params
try:
params = Params()
user_id = str(params.get("DongleId"))
lateral_params = params.get("LateralParams")
lateral_params = json.loads(lateral_params)
lateral_offset = float(lateral_params['lateral_offset'])
angle_offset = float(lateral_params['angle_offset'])
except:
user_id = "unidentified"
lateral_offset = 0.0
angle_offset = 0.0
url_string = 'http://127.0.0.1:8086/write?db=carDB&u=liveOP&p=liveOP&precision=ms'
ffi = FFI()
ffi.cdef("long syscall(long number, ...);")
libc = ffi.dlopen(None)
def set_realtime_priority(level):
if platform.machine() == "x86_64":
NR_gettid = 186
elif platform.machine() == "aarch64":
NR_gettid = 178
else:
raise NotImplementedError
tid = libc.syscall(NR_gettid)
print("/n/n realtime priority = %d %s %s/n" %(level, NR_gettid, str(tid)))
return subprocess.call(['chrt', '-f', '-p', str(level), str(tid)])
def dump_sock(sock):
while 1:
try:
sock.recv(zmq.NOBLOCK)
except zmq.error.Again:
break
def pub_sock(port, addr="*"):
context = zmq.Context.instance()
sock = context.socket(zmq.PUB)
sock.bind("tcp://%s:%d" % (addr, port))
return sock
def sub_sock(port, poller=None, addr="127.0.0.1", conflate=False, timeout=None):
context = zmq.Context.instance()
sock = context.socket(zmq.SUB)
if conflate:
sock.setsockopt(zmq.CONFLATE, 1)
sock.connect("tcp://%s:%d" % (addr, port))
sock.setsockopt(zmq.SUBSCRIBE, b"")
if timeout is not None:
sock.RCVTIMEO = timeout
if poller is not None:
poller.register(sock, zmq.POLLIN)
return sock
pathFormatString1 = 'pathPlan,user=' + user_id + ' l0=%0.3f,l1=%0.3f,l2=%0.3f,l3=%0.3f,l4=%0.3f,l5=%0.3f,l6=%0.3f,l7=%0.3f,l8=%0.3f,l9=%0.3f,l10=%0.3f,l11=%0.3f,l12=%0.3f,l13=%0.3f,l14=%0.3f,'
pathFormatString2 = "r0=%0.3f,r1=%0.3f,r2=%0.3f,r3=%0.3f,r4=%0.3f,r5=%0.3f,r6=%0.3f,r7=%0.3f,r8=%0.3f,r9=%0.3f,r10=%0.3f,r11=%0.3f,r12=%0.3f,r13=%0.3f,r14=%0.3f,"
pathFormatString3 = "c0=%0.3f,c1=%0.3f,c2=%0.3f,c3=%0.3f,c4=%0.3f,c5=%0.3f,c6=%0.3f,c7=%0.3f,c8=%0.3f,c9=%0.3f,c10=%0.3f,c11=%0.3f,c12=%0.3f,c13=%0.3f,c14=%0.3f,"
pathFormatString4 = "a3=%0.3f,a4=%0.3f,a5=%0.3f,a6=%0.3f,a10=%0.3f,lprob=%0.3f,rprob=%0.3f,cprob=%0.3f,lane_width=%0.3f,angle=%0.3f,rate=%0.3f,angle_offset=%0.2f,lateral_offset=%0.2f,plan_age=%0.3f %d\n"
carStateFormatString2 = "carState,user=" + user_id + " angle_offset=%0.2f,lateral_offset=%0.2f,angle_steers=%0.4f,angle_rate=%0.4f,driver_torque=%0.4f,request=%0.4f,angle_rate_eps=%0.4f,yaw_rate_can=%0.4f,angle_steers_eps=%0.4f,long_accel=%0.4f,p2=%0.4f,p=%0.4f,i=%0.4f,f=%0.4f,damp_angle_steers=%0.4f,damp_angle_steers_des=%0.4f,ff_rate=%0.4f,ff_angle=%0.4f,left_frame=%d,far_right_frame=%d,v_ego=%0.4f,wheel_speed_fl=%0.4f,wheel_speed_fr=%0.4f,wheel_speed_rl=%0.4f,wheel_speed_rr=%0.4f,l_blinker=%d,r_blinker=%d,lk_mode=%d,enabled=%d,left_frame=%d,left_1=%d,left_2=%d,left_3=%d,left_4=%d,left_5=%d,left_6=%d,left_7=%d,left_8=%d,left_9=%d,left_10=%d,left_solid=%d,left_dashed=%d,right_frame=%d,right_1=%d,right_2=%d,right_3=%d,right_4=%d,right_5=%d,right_6=%d,right_7=%d,right_8=%d,right_9=%d,right_10=%d,right_solid=%d,right_dashed=%d,far_left_frame=%d,far_left_1=%d,far_left_2=%d,far_left_3=%d,far_left_4=%d,far_left_5=%d,far_left_6=%d,far_left_7=%d,far_left_8=%d,far_left_9=%d,far_left_10=%d,far_left_solid=%d,far_left_dashed=%d,far_right_frame=%d,far_right_1=%d,far_right_2=%d,far_right_3=%d,far_right_4=%d,far_right_5=%d,far_right_6=%d,far_right_7=%d,far_right_8=%d,far_right_9=%d,far_right_10=%d,far_right_solid=%d,far_right_dashed=%d %d\n"
carStateFormatString1 = "carState,user=" + user_id + " angle_steers=%0.4f,angle_rate=%0.4f,driver_torque=%0.4f,request=%0.4f,angle_rate_eps=%0.4f,yaw_rate_can=%0.4f,angle_steers_eps=%0.4f,long_accel=%0.4f,p2=%0.4f,p=%0.4f,i=%0.4f,f=%0.4f,damp_angle_steers=%0.4f,damp_angle_steers_des=%0.4f,ff_rate=%0.4f,ff_angle=%0.4f,left_frame=%d,far_right_frame=%d %d\n"
pathDataString = ""
kegmanDataString = ""
carStateDataString1 = ""
carStateDataString2 = ""
insertString = ""
canInsertString = ""
Inputs = 51
Outputs = 5
scaler_type = 'MinMax_tanh'
history_rows = 5
setproctitle('laterald')
set_realtime_priority(1)
poller = zmq.Poller()
carState = sub_sock(service_list['carState'].port, conflate=False, poller=poller)
gernPath = pub_sock(service_list['pathPlan'].port)
gernModelInputs = pub_sock(service_list['model'].port)
gernModelOutputs = sub_sock(8605, poller=poller)
recv_frames = 1
sent_frames = 1
frame_count = 1
dashboard_count = 0
try:
input_scaler = joblib.load(os.path.expanduser('./models/GRU_%s_%d_inputs_A.scaler' % (scaler_type, Inputs)))
output_scaler = joblib.load(os.path.expanduser('./models/GRU_%s_%d_outputs_A.scaler' % (scaler_type, Outputs)))
except:
input_scaler = joblib.load(os.path.expanduser('./models/GRU_%s_%d_inputs_001.scaler' % (scaler_type, Inputs)))
output_scaler = joblib.load(os.path.expanduser('./models/GRU_%s_%d_outputs_001.scaler' % (scaler_type, Outputs)))
scaler_padding = None
scaled_camera_array = []
scaled_vehicle_array = []
stock_cam_frame_prev = -1
lane_width = 0
half_width = 0
l_probs = {}
r_probs = {}
l_offset = {}
r_offset = {}
angle_steers = {}
l_prob_smooth = 0.
r_prob_smooth = 0.
path_send = log.Event.new_message()
path_send.init('pathPlan')
advanceSteer = 1
back_log = 0
dump_sock(carState)
r = requests.post('http://localhost:8086/query?q=CREATE DATABASE carDB')
row_count = 0
column_count = 0
while 1:
for socket, event in poller.poll():
if socket is carState:
back_log += 1
_cs = log.Event.from_bytes(socket.recv())
cs = _cs.carState
frame_count += 1
#lateral_offset = 0
steer_angle = round(cs.steeringAngle - angle_offset, 1)
#left_parm1 = min(cs.camLeft.parm1 + cs.camLeft.parm4, max(cs.camLeft.parm1 - cs.camLeft.parm4, cs.camLeft.parm1 - int(lateral_offset)))
#right_parm1 = min(cs.camRight.parm1 + cs.camRight.parm4, max(cs.camRight.parm1 - cs.camRight.parm4, cs.camRight.parm1 - int(lateral_offset)))
#far_left_parm1 = min(cs.camFarLeft.parm1 + cs.camFarLeft.parm4, max(cs.camFarLeft.parm1 - cs.camFarLeft.parm4, cs.camFarLeft.parm1 - int(lateral_offset)))
#far_right_parm1 = min(cs.camFarRight.parm1 + cs.camFarRight.parm4, max(cs.camFarRight.parm1 - cs.camFarRight.parm4, cs.camFarRight.parm1 - int(lateral_offset)))
unscaled_input_array = [[cs.vEgo, steer_angle, cs.lateralAccel, cs.steeringTorqueEps, cs.yawRateCAN, cs.longAccel, 0 , 0 , cs.steeringRate, cs.steeringTorque, cs.torqueRequest,
cs.camLeft.parm1, cs.camLeft.parm2, cs.camLeft.parm3, cs.camLeft.parm4, cs.camLeft.parm5, cs.camLeft.parm6, cs.camLeft.parm7, cs.camLeft.parm8, cs.camLeft.parm9, cs.camLeft.parm10,
cs.camFarLeft.parm1, cs.camFarLeft.parm2, cs.camFarLeft.parm3, cs.camFarLeft.parm4, cs.camFarLeft.parm5, cs.camFarLeft.parm6, cs.camFarLeft.parm7, cs.camFarLeft.parm8, cs.camFarLeft.parm9, cs.camFarLeft.parm10,
cs.camRight.parm1, cs.camRight.parm2, cs.camRight.parm3, cs.camRight.parm4, cs.camRight.parm5, cs.camRight.parm6, cs.camRight.parm7, cs.camRight.parm8, cs.camRight.parm9, cs.camRight.parm10,
cs.camFarRight.parm1, cs.camFarRight.parm2, cs.camFarRight.parm3, cs.camFarRight.parm4, cs.camFarRight.parm5, cs.camFarRight.parm6, cs.camFarRight.parm7, cs.camFarRight.parm8, cs.camFarRight.parm9, cs.camFarRight.parm10]]
scaled_data = input_scaler.transform(unscaled_input_array)
scaled_vehicle_array.append(scaled_data[:,:11])
if cs.vEgo > 10:
if cs.yawRateCAN < 0 and cs.steeringAngle > angle_offset:
angle_offset += (0.0001 * cs.vEgo)
elif cs.yawRateCAN > 0 and cs.steeringAngle < angle_offset:
angle_offset -= (0.0001 * cs.vEgo)
elif abs(cs.lateralControlState.pidState.p2 - cs.torqueRequest) > 1.1 * abs(cs.lateralControlState.pidState.p2) and abs(cs.torqueRequest) < 1 and abs(cs.steeringRate) < 5:
if cs.lateralControlState.pidState.p2 < 0:
lateral_offset -= (0.0001 * cs.vEgo)
else:
lateral_offset += (0.0001 * cs.vEgo)
#elif cs.camLeft.parm4 > 60 and cs.camRight.parm4 > 60 and abs(cs.torqueRequest) < 0.4 and abs(cs.torqueRequest) > 0 and abs(cs.steeringRate) < 5:
# if cs.camLeft.parm2 + cs.camRight.parm2 < -2 or (cs.camLeft.parm2 + cs.camRight.parm2 < 0 and cs.yawRateCAN < 0):
# lateral_offset -= (0.0001 * cs.vEgo)
# elif cs.camLeft.parm2 + cs.camRight.parm2 > 2 or (cs.camLeft.parm2 + cs.camRight.parm2 > 0 and cs.yawRateCAN > 0):
# lateral_offset += (0.0001 * cs.vEgo)
#if cs.camLeft.parm2 + cs.camRight.parm2 < 0 and (cs.camLeft.parm1 + cs.camRight.parm1) > 2 * lateral_offset:
# lateral_offset += (0.0001 * cs.vEgo)
#elif cs.camLeft.parm2 + cs.camRight.parm2 > 0 and (cs.camLeft.parm1 + cs.camRight.parm1) < 2 * lateral_offset:
# lateral_offset -= (0.0001 * cs.vEgo)
if cs.camLeft.frame != stock_cam_frame_prev and cs.camLeft.frame == cs.camFarRight.frame:
back_log = 0
scaled_camera_array.append(scaled_data[:,11:])
if len(scaled_camera_array) > history_rows:
scaled_array = np.concatenate((scaled_vehicle_array[-history_rows:], scaled_camera_array[-history_rows:]), axis = 2)
scaled_camera_array.pop(0)
scaled_vehicle_array.pop(0)
stock_cam_frame_prev = cs.camLeft.frame
if recv_frames > 0: sent_frames += 1
l_prob = cs.camLeft.parm4/127
r_prob = cs.camRight.parm4/127
if cs.camLeft.solid and cs.camRight.dashed:
l_prob *= -1
elif cs.camRight.solid and cs.camLeft.dashed:
r_prob *= -1
l_probs[cs.canTime] = l_prob
r_probs[cs.canTime] = r_prob
l_offset[cs.canTime] = cs.camLeft.parm2
r_offset[cs.canTime] = cs.camRight.parm2
angle_steers[cs.canTime] = cs.steeringAngle
input_array = list(np.asarray(scaled_array).reshape(history_rows * len(scaled_array[0][0])).astype('float'))
input_array.append(cs.canTime)
if recv_frames > 5 or sent_frames % 5 == 0:
gernModelInputs.send_json(list(input_array))
carStateDataString2 += (carStateFormatString2 % (round(angle_offset, 1), round(lateral_offset,3), cs.steeringAngle, cs.steeringRate, cs.steeringTorque, cs.torqueRequest, cs.steeringTorqueEps, cs.yawRateCAN, cs.lateralAccel, cs.longAccel, \
cs.lateralControlState.pidState.p2, cs.lateralControlState.pidState.p, cs.lateralControlState.pidState.i, cs.lateralControlState.pidState.f, \
cs.lateralControlState.pidState.steerAngle, cs.lateralControlState.pidState.steerAngleDes, 1.0 - cs.lateralControlState.pidState.angleFFRatio, cs.lateralControlState.pidState.angleFFRatio, cs.camLeft.frame, cs.camFarRight.frame, \
cs.vEgo, cs.wheelSpeeds.fl, cs.wheelSpeeds.fr, cs.wheelSpeeds.rl, cs.wheelSpeeds.rr, cs.leftBlinker, cs.rightBlinker, cs.lkMode, cs.cruiseState.enabled, \
cs.camLeft.frame, cs.camLeft.parm1, cs.camLeft.parm2, cs.camLeft.parm3, cs.camLeft.parm4, cs.camLeft.parm5, cs.camLeft.parm6, cs.camLeft.parm7, cs.camLeft.parm8, cs.camLeft.parm9, cs.camLeft.parm10, cs.camLeft.solid, cs.camLeft.dashed, \
cs.camRight.frame, cs.camRight.parm1, cs.camRight.parm2, cs.camRight.parm3, cs.camRight.parm4, cs.camRight.parm5, cs.camRight.parm6, cs.camRight.parm7, cs.camRight.parm8, cs.camRight.parm9, cs.camRight.parm10, cs.camRight.solid, cs.camRight.dashed, \
cs.camFarLeft.frame, cs.camFarLeft.parm1, cs.camFarLeft.parm2, cs.camFarLeft.parm3, cs.camFarLeft.parm4, cs.camFarLeft.parm5, cs.camFarLeft.parm6, cs.camFarLeft.parm7, cs.camFarLeft.parm8, cs.camFarLeft.parm9, cs.camFarLeft.parm10, cs.camFarLeft.solid, cs.camFarLeft.dashed, \
cs.camFarRight.frame, cs.camFarRight.parm1, cs.camFarRight.parm2, cs.camFarRight.parm3, cs.camFarRight.parm4, cs.camFarRight.parm5, cs.camFarRight.parm6, cs.camFarRight.parm7, cs.camFarRight.parm8, cs.camFarRight.parm9, cs.camFarRight.parm10, cs.camFarRight.solid, cs.camFarRight.dashed, cs.canTime))
elif cs.vEgo > 0:
carStateDataString1 += (carStateFormatString1 % (cs.steeringAngle, cs.steeringRate, cs.steeringTorque, cs.torqueRequest, cs.steeringTorqueEps, cs.yawRateCAN, cs.lateralAccel, cs.longAccel, \
cs.lateralControlState.pidState.p2, cs.lateralControlState.pidState.p, cs.lateralControlState.pidState.i, cs.lateralControlState.pidState.f, \
cs.lateralControlState.pidState.steerAngle, cs.lateralControlState.pidState.steerAngleDes, 1.0 - cs.lateralControlState.pidState.angleFFRatio, cs.lateralControlState.pidState.angleFFRatio, cs.camLeft.frame, cs.camFarRight.frame, cs.canTime))
if socket is gernModelOutputs:
recv_frames += 1
if recv_frames <= 5: sent_frames = recv_frames
output_list = list(socket.recv_json())
model_output = np.asarray(output_list[:-1])
if scaler_padding is None:
column_count = Outputs
row_count = len(model_output)//column_count
scaler_padding = [np.zeros((row_count,Outputs)), np.zeros((row_count,Outputs))]
left_center = np.zeros((row_count,1))
right_center = np.zeros((row_count,1))
calc_center = np.zeros((row_count,1))
left_probs = np.zeros((row_count,1))
right_probs = np.zeros((row_count,1))
angle = np.zeros((row_count,1))
model_output = model_output.reshape(row_count,column_count)
scaler_padding[0] = np.asarray(model_output)
descaled_output = [output_scaler.inverse_transform(scaler_padding[0]), output_scaler.inverse_transform(scaler_padding[1])]
l_prob = l_probs.pop(output_list[-1])
r_prob = r_probs.pop(output_list[-1])
if l_prob < 0 and r_prob > 0 and descaled_output[0][-1:, 1:2] > -descaled_output[0][-1:, 2:3] * 1.2:
l_prob *= 0.2
#print(" Diverging Left", l_prob)
elif r_prob < 0 and l_prob > 0 and descaled_output[0][-1:, 1:2] * 1.2 < -descaled_output[0][-1:,2:3]:
r_prob *= 0.2
#print(" Diverging Right", r_prob)
elif abs(l_prob) > 0 and abs(r_prob) > 0:
if lane_width > 0:
lane_width += 0.01 * (min(700, max(570, l_offset[output_list[-1]] - r_offset[output_list[-1]]) - lane_width))
else:
lane_width = min(700, max(570, l_offset[output_list[-1]] - r_offset[output_list[-1]]) - lane_width)
half_width = lane_width / 2
half_width = min(half_width + 1, max(half_width - 1, lane_width * 0.48))
else:
half_width = min(half_width + 1, max(half_width - 1, lane_width * 0.47))
l_prob = abs(l_prob)
r_prob = abs(r_prob)
l_prob_smooth = l_prob #max(0.05, l_prob_smooth - 0.1, min(l_prob_smooth + 0.1, l_prob))
r_prob_smooth = r_prob #max(0.05, r_prob_smooth - 0.1, min(r_prob_smooth + 0.1, r_prob))
lr_prob = (l_prob_smooth + r_prob_smooth) - l_prob_smooth * r_prob_smooth
a_prob = 1
left_probs[:,0] = l_prob * np.clip(model_output[:,3], 0, 1) + (1 - l_prob) * left_probs[:,0]
right_probs[:,0] = r_prob * np.clip(model_output[:,4], 0, 1) + (1 - r_prob) * right_probs[:,0]
left_center[:,0:] = l_prob * (descaled_output[0][:,1:2] - half_width) + (1 - l_prob) * left_center[:, 0:]
right_center[:,0:] = r_prob * (descaled_output[0][:,2:3] + half_width) + (1 - r_prob) * right_center[:, 0:]
left_center = l_prob_smooth * left_center + (1 - l_prob_smooth) * calc_center
right_center = r_prob_smooth * right_center + (1 - r_prob_smooth) * calc_center
angle = np.clip((descaled_output[0][:,0:1] - descaled_output[0][0,0:1]) * (1 + advanceSteer), angle - 0.25 * cs.vEgo, angle + 0.025 * cs.vEgo)
#angle = np.clip((descaled_output[0][:,0:1] - descaled_output[0][0,0:1]) * (1 + advanceSteer) + descaled_output[0][0,0:1], angle - 1.0, angle + 1.0)
#angle = np.add(descaled_output[0][1:,0], np.multiply(np.diff(descaled_output[0][:,0]), advanceSteer))
calc_center = (l_prob_smooth * left_center + r_prob_smooth * right_center) / (l_prob_smooth + r_prob_smooth + 0.05)
path_send.pathPlan.angleSteers = float(angle[5] + cs.steeringAngle)
path_send.pathPlan.mpcAngles = [float(x) for x in (angle[:] + cs.steeringAngle + lateral_offset)] #angle_steers.pop(output_list[-1]))]
path_send.pathPlan.laneWidth = float(lane_width)
path_send.pathPlan.angleOffset = float(round(angle_offset,1))
path_send.pathPlan.lateralOffset = float(lateral_offset)
path_send.pathPlan.lPoly = [float(x) for x in (left_center[:,0] + half_width)]
path_send.pathPlan.rPoly = [float(x) for x in (right_center[:,0] - half_width)]
path_send.pathPlan.cPoly = [float(x) for x in (calc_center[:,0])]
path_send.pathPlan.lProb = float(l_prob)
path_send.pathPlan.rProb = float(r_prob)
path_send.pathPlan.cProb = float(lr_prob)
path_send.pathPlan.canTime = output_list[-1]
gernPath.send(path_send.to_bytes())
if cs.vEgo >= 0:
pathDataString += pathFormatString1 % tuple([float(x) for x in (left_center[:,0] + half_width)])
pathDataString += pathFormatString2 % tuple([float(x) for x in (right_center[:,0] - half_width)])
pathDataString += pathFormatString3 % tuple([float(x) for x in calc_center[:,0]])
pathDataString += pathFormatString4 % (path_send.pathPlan.mpcAngles[3], path_send.pathPlan.mpcAngles[4], path_send.pathPlan.mpcAngles[5], path_send.pathPlan.mpcAngles[6],
path_send.pathPlan.mpcAngles[10], path_send.pathPlan.lProb, path_send.pathPlan.rProb, path_send.pathPlan.cProb, path_send.pathPlan.laneWidth,
path_send.pathPlan.angleSteers, path_send.pathPlan.rateSteers, angle_offset, lateral_offset, path_send.pathPlan.canTime - path_send.pathPlan.canTime, cs.canTime)
path_send = log.Event.new_message()
path_send.init('pathPlan')
if recv_frames % 30 == 0:
#try:
print(' sent: %d dropped: %d backlog: %d half_width: %0.1f center: %0.1f l_prob: %0.2f r_prob: %0.2f advance_steer: %0.2f angle_offset: %0.2f lateral_offset: %0.2f' % (sent_frames, sent_frames - recv_frames, back_log, half_width, calc_center[-1], l_prob, r_prob, advanceSteer, angle_offset, lateral_offset))
if frame_count >= 100 and back_log == 1:
try:
r = requests.post(url_string, data=pathDataString + carStateDataString1 + carStateDataString2)
#print(influxLineString)
if dashboard_count % 3 == 0: print('%d %s' % (frame_count, r))
dashboard_count += 1
except:
r = requests.post(url_string, data='create database carDB')
print(r)
# Send data to influxdb (after converting to Python3.7)
carStateDataString2 = ''
carStateDataString1 = ''
pathDataString = ''
frame_count = 0
# TODO: replace kegman_conf with params!
if recv_frames % 100 == 0 and back_log == 2:
try:
kegman = kegman_conf()
advanceSteer = max(0, float(kegman.conf['advanceSteer']))
#print("advanceSteer = ", advanceSteer)
except:
pass
if recv_frames % 1000 == 2 and back_log == 2:
params.put("LateralParams", json.dumps({'angle_offset': angle_offset, 'lateral_offset': lateral_offset}))
|
import warnings
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models import F, Q, Sum
from simple_history.models import HistoricalRecords
from usaspending_api.accounts.models import TreasuryAppropriationAccount
from usaspending_api.submissions.models import SubmissionAttributes
from usaspending_api.references.models import (
Agency, CFDAProgram, LegalEntity, Location, ObjectClass, RefProgramActivity)
from usaspending_api.common.models import DataSourceTrackedModel
from django.core.cache import caches, CacheKeyWarning
warnings.simplefilter("ignore", CacheKeyWarning)
class FinancialAccountsByAwards(DataSourceTrackedModel):
financial_accounts_by_awards_id = models.AutoField(primary_key=True)
treasury_account = models.ForeignKey(TreasuryAppropriationAccount, models.CASCADE, null=True)
submission = models.ForeignKey(SubmissionAttributes, models.CASCADE)
award = models.ForeignKey('awards.Award', models.CASCADE, null=True, related_name="financial_set")
program_activity = models.ForeignKey(RefProgramActivity, models.DO_NOTHING, null=True, db_index=True)
object_class = models.ForeignKey(ObjectClass, models.DO_NOTHING, null=True, db_index=True)
piid = models.CharField(max_length=50, blank=True, null=True)
parent_award_id = models.CharField(max_length=50, blank=True, null=True)
fain = models.CharField(max_length=30, blank=True, null=True)
uri = models.CharField(max_length=70, blank=True, null=True)
award_type = models.CharField(max_length=30, blank=True, null=True)
ussgl480100_undelivered_orders_obligations_unpaid_fyb = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
ussgl480100_undelivered_orders_obligations_unpaid_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
ussgl483100_undelivered_orders_oblig_transferred_unpaid_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
ussgl488100_upward_adjust_pri_undeliv_order_oblig_unpaid_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
ussgl490100_delivered_orders_obligations_unpaid_fyb = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
ussgl490100_delivered_orders_obligations_unpaid_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
ussgl493100_delivered_orders_oblig_transferred_unpaid_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
ussgl498100_upward_adjust_pri_deliv_orders_oblig_unpaid_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
ussgl480200_undelivered_orders_oblig_prepaid_advanced_fyb = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
ussgl480200_undelivered_orders_oblig_prepaid_advanced_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
ussgl483200_undeliv_orders_oblig_transferred_prepaid_adv_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
ussgl488200_up_adjust_pri_undeliv_order_oblig_ppaid_adv_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
ussgl490200_delivered_orders_obligations_paid_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
ussgl490800_authority_outlayed_not_yet_disbursed_fyb = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
ussgl490800_authority_outlayed_not_yet_disbursed_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
ussgl498200_upward_adjust_pri_deliv_orders_oblig_paid_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
obligations_undelivered_orders_unpaid_total_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
obligations_delivered_orders_unpaid_total_fyb = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
obligations_delivered_orders_unpaid_total_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
gross_outlays_undelivered_orders_prepaid_total_fyb = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
gross_outlays_undelivered_orders_prepaid_total_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
gross_outlays_delivered_orders_paid_total_fyb = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
gross_outlay_amount_by_award_fyb = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
gross_outlay_amount_by_award_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
obligations_incurred_total_by_award_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
ussgl487100_down_adj_pri_unpaid_undel_orders_oblig_recov_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
ussgl497100_down_adj_pri_unpaid_deliv_orders_oblig_recov_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
ussgl487200_down_adj_pri_ppaid_undel_orders_oblig_refund_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
ussgl497200_down_adj_pri_paid_deliv_orders_oblig_refund_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
deobligations_recoveries_refunds_of_prior_year_by_award_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
obligations_undelivered_orders_unpaid_total_fyb = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
gross_outlays_delivered_orders_paid_total_cpe = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
drv_award_id_field_type = models.CharField(max_length=10, blank=True, null=True)
drv_obligations_incurred_total_by_award = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
transaction_obligated_amount = models.DecimalField(max_digits=21, decimal_places=2, blank=True, null=True)
reporting_period_start = models.DateField(blank=True, null=True)
reporting_period_end = models.DateField(blank=True, null=True)
last_modified_date = models.DateField(blank=True, null=True)
certified_date = models.DateField(blank=True, null=True)
create_date = models.DateTimeField(auto_now_add=True, blank=True, null=True)
update_date = models.DateTimeField(auto_now=True, null=True)
@staticmethod
def get_default_fields(path=None):
return [
"financial_accounts_by_awards_id",
"award",
"treasury_account",
"transaction_obligated_amount",
"object_class",
"program_activity",
"piid",
"fain",
"uri",
"gross_outlay_amount_by_award_cpe",
"gross_outlay_amount_by_award_fyb",
"certified_date",
"last_modified_date"
]
class Meta:
managed = True
db_table = 'financial_accounts_by_awards'
class AwardManager(models.Manager):
def get_queryset(self):
'''
A generated award will have these set to null, but will also receive no
transactions. Thus, these will remain null. This finds those awards and
throws them out. As soon as one of those awards gets a transaction
(i.e. it is no longer empty), these will be updated via update_from_transaction
and the award will no longer match these criteria
'''
q_kwargs = {
"latest_transaction__isnull": True,
"date_signed__isnull": True,
"total_obligation__isnull": True
}
return super(AwardManager, self).get_queryset().filter(~Q(**q_kwargs))
awards_cache = caches['awards']
class Award(DataSourceTrackedModel):
"""
Model that provides a high-level award that individual transaction
data can be mapped to. Transactions (i.e., contract and financial assistance
data from the old USAspending site and D1/D2 files from the broker) represent
specific actions against an award, though the award records themselves aren't
assigned on the incoming data. To rectify that and to make the transactional
data easier for people to understand, we create Award objects and map
transactions to them.
Much of the award record data (for example, awarding_agency, funding_agency,
type) is automatically populated from info in the award's child transactions.
These updates happen in our USAspending and data act broker load processes:
see ETL\award_helpers.py for details.
"""
type = models.CharField(max_length=5, db_index=True, verbose_name="Award Type", null=True, help_text=" The mechanism used to distribute funding. The federal government can distribute funding in several forms. These award types include contracts, grants, loans, and direct payments.")
type_description = models.TextField(verbose_name="Award Type Description", blank=True, null=True, help_text="The plain text description of the type of the award")
piid = models.CharField(max_length=50, db_index=True, blank=True, null=True, help_text="Procurement Instrument Identifier - A unique identifier assigned to a federal contract, purchase order, basic ordering agreement, basic agreement, and blanket purchase agreement. It is used to track the contract, and any modifications or transactions related to it. After October 2017, it is between 13 and 17 digits, both letters and numbers.")
parent_award = models.ForeignKey('awards.Award', related_name='child_award', null=True, help_text="The parent award, if applicable")
fain = models.CharField(max_length=30, db_index=True, blank=True, null=True, help_text="An identification code assigned to each financial assistance award tracking purposes. The FAIN is tied to that award (and all future modifications to that award) throughout the award’s life. Each FAIN is assigned by an agency. Within an agency, FAIN are unique: each new award must be issued a new FAIN. FAIN stands for Federal Award Identification Number, though the digits are letters, not numbers.")
uri = models.CharField(max_length=70, db_index=True, blank=True, null=True, help_text="The uri of the award")
total_obligation = models.DecimalField(max_digits=15, db_index=True, decimal_places=2, null=True, verbose_name="Total Obligated", help_text="The amount of money the government is obligated to pay for the award")
total_outlay = models.DecimalField(max_digits=15, db_index=True, decimal_places=2, null=True, help_text="The total amount of money paid out for this award")
awarding_agency = models.ForeignKey(Agency, related_name='+', null=True, help_text="The awarding agency for the award")
funding_agency = models.ForeignKey(Agency, related_name='+', null=True, help_text="The funding agency for the award")
date_signed = models.DateField(null=True, db_index=True, verbose_name="Award Date", help_text="The date the award was signed")
recipient = models.ForeignKey(LegalEntity, null=True, help_text="The recipient of the award")
description = models.CharField(max_length=4000, null=True, verbose_name="Award Description", help_text="A description of the award")
period_of_performance_start_date = models.DateField(null=True, db_index=True, verbose_name="Start Date", help_text="The start date for the period of performance")
period_of_performance_current_end_date = models.DateField(null=True, db_index=True, verbose_name="End Date", help_text="The current, not original, period of performance end date")
place_of_performance = models.ForeignKey(Location, null=True, help_text="The principal place of business, where the majority of the work is performed. For example, in a manufacturing contract, this would be the main plant where items are produced.")
potential_total_value_of_award = models.DecimalField(max_digits=20, db_index=True, decimal_places=2, blank=True, null=True, verbose_name="Potential Total Value of Award", help_text="The sum of the potential_value_of_award from associated transactions")
last_modified_date = models.DateField(blank=True, null=True, help_text="The date this award was last modified")
certified_date = models.DateField(blank=True, null=True, help_text="The date this record was certified")
create_date = models.DateTimeField(auto_now_add=True, blank=True, null=True, help_text="The date this record was created in the API")
update_date = models.DateTimeField(auto_now=True, null=True, help_text="The last time this record was updated in the API")
latest_transaction = models.ForeignKey("awards.Transaction", related_name="latest_for_award", null=True, help_text="The latest transaction by action_date associated with this award")
# Subaward aggregates
total_subaward_amount = models.DecimalField(max_digits=20, decimal_places=2, null=True)
subaward_count = models.IntegerField(default=0)
objects = models.Manager()
nonempty = AwardManager()
def manual_hash(self):
"""Used to manually establish equality between instances.
Useful for unsaved records where `.id` is not yet set.
Possibly this could be converted to __hash__"""
return hash((self.piid, self.fain, self.uri,
(self.parent_award and
(self.parent_award.piid,
self.parent_award.fain,
self.parent_award.uri))))
@staticmethod
def get_default_fields(path=None):
return [
"id",
"type",
"type_description",
"total_obligation",
"total_outlay",
"date_signed",
"description",
"piid",
"fain",
"uri",
"period_of_performance_start_date",
"period_of_performance_current_end_date",
"potential_total_value_of_award",
"place_of_performance",
"awarding_agency",
"funding_agency",
"recipient",
"date_signed__fy",
"subaward_count",
"total_subaward_amount"
]
def __str__(self):
return '%s piid: %s fain: %s uri: %s' % (self.type_description, self.piid, self.fain, self.uri)
@staticmethod
def get_or_create_summary_award(awarding_agency=None, piid=None, fain=None,
uri=None, parent_award_id=None, use_cache=False):
"""
Given a set of award identifiers and awarding agency information,
find a corresponding Award record. If we can't find one, create it.
Returns:
created: a list of new awards created (or that need to be created
if using cache), used to enable bulk insert
summary_award: the summary award that the calling process can map to
"""
# If an award transaction's ID is a piid, it's contract data
# If the ID is fain or a uri, it's financial assistance. If the award transaction
# has both a fain and a uri, fain takes precedence.
q_kwargs = {}
for i in [(piid, "piid"), (fain, "fain"), (uri, "uri")]:
if i[0]:
q_kwargs[i[1]] = i[0]
if parent_award_id:
q_kwargs["parent_award__" + i[1]] = parent_award_id
# parent_award__piid, parent_award__fain, parent_award__uri
else:
q_kwargs["parent_award"] = None
# Now search for it
# Do we want to log something if the the query below turns up
# more than one award record?
if use_cache:
q_kwargs_fixed = list(q_kwargs.items()) + [('awarding_agency', awarding_agency), ]
q_kwargs_fixed.sort()
summary_award = awards_cache.get(q_kwargs_fixed)
if summary_award:
return [], summary_award
# Look for an existing award record
summary_award = Award.objects \
.filter(Q(**q_kwargs)) \
.filter(awarding_agency=awarding_agency) \
.first()
if (summary_award is None and
awarding_agency is not None and
awarding_agency.toptier_agency.name != awarding_agency.subtier_agency.name):
# No award match found when searching by award id info +
# awarding subtier agency. Relax the awarding agency
# critera to just the toptier agency instead of the subtier
# agency and try the search again.
awarding_agency_toptier = Agency.get_by_toptier(
awarding_agency.toptier_agency.cgac_code)
summary_award = Award.objects \
.filter(Q(**q_kwargs)) \
.filter(awarding_agency=awarding_agency_toptier) \
.first()
if summary_award:
if use_cache:
awards_cache.set(q_kwargs_fixed, summary_award)
return [], summary_award
# We weren't able to match, so create a new award record.
if parent_award_id:
# If parent award id was supplied, recursively get/create
# an award record for it
parent_created, parent_award = Award.get_or_create_summary_award(
use_cache=use_cache,
**{i[1]: parent_award_id, 'awarding_agency': awarding_agency})
else:
parent_created, parent_award = [], None
# Now create the award record for this award transaction
summary_award = Award(**{
i[1]: i[0],
"parent_award": parent_award,
"awarding_agency": awarding_agency})
created = [summary_award, ]
created.extend(parent_created)
if use_cache:
awards_cache.set(q_kwargs_fixed, summary_award)
else:
summary_award.save()
return created, summary_award
raise ValueError(
'Unable to find or create an award with the provided information: '
'piid={}, fain={}, uri={}, parent_id={}, awarding_agency={}'.format(
piid, fain, uri, parent_award_id, awarding_agency))
class Meta:
db_table = 'awards'
class TransactionAgeComparisonMixin:
def newer_than(self, dct):
"""Compares age of this instance to a Python dictionary
Determines the age of each by last_modified_date, if set,
otherwise action_date.
Returns `False` if either side lacks a date completely.
"""
my_date = self.last_modified_date or self.submission.certified_date
their_date = dct.get('last_modified_date') or dct.get('submission').certified_date
if my_date and their_date:
return my_date > their_date
else:
return False
class Transaction(DataSourceTrackedModel, TransactionAgeComparisonMixin):
award = models.ForeignKey(Award, models.CASCADE, help_text="The award which this transaction is contained in")
usaspending_unique_transaction_id = models.CharField(max_length=256, blank=True, null=True, help_text="If this record is legacy USASpending data, this is the unique transaction identifier from that system")
submission = models.ForeignKey(SubmissionAttributes, models.CASCADE, help_text="The submission which created this record")
type = models.CharField(max_length=5, verbose_name="Action Type", null=True, help_text="The type for this transaction. For example, A, B, C, D")
type_description = models.TextField(blank=True, verbose_name="Action Type Description", null=True, help_text="The plain text description of the transaction type")
period_of_performance_start_date = models.DateField(max_length=10, verbose_name="Period of Performance Start Date", null=True, help_text="The period of performance start date")
period_of_performance_current_end_date = models.DateField(max_length=10, verbose_name="Period of Performance Current End Date", null=True, help_text="The current end date of the period of performance")
action_date = models.DateField(max_length=10, verbose_name="Transaction Date", help_text="The date this transaction was actioned")
action_type = models.CharField(max_length=1, blank=True, null=True, help_text="The type of transaction. For example, A, B, C, D")
action_type_description = models.TextField(blank=True, null=True)
federal_action_obligation = models.DecimalField(max_digits=20, db_index=True, decimal_places=2, blank=True, null=True, help_text="The obligation of the federal government for this transaction")
modification_number = models.CharField(max_length=50, blank=True, null=True, verbose_name="Modification Number", help_text="The modification number for this transaction")
awarding_agency = models.ForeignKey(Agency, related_name='%(app_label)s_%(class)s_awarding_agency', null=True, help_text="The agency which awarded this transaction")
funding_agency = models.ForeignKey(Agency, related_name='%(app_label)s_%(class)s_funding_agency', null=True, help_text="The agency which is funding this transaction")
recipient = models.ForeignKey(LegalEntity, null=True, help_text="The recipient for this transaction")
description = models.CharField(max_length=4000, null=True, help_text="The description of this transaction")
place_of_performance = models.ForeignKey(Location, null=True, help_text="The location where the work on this transaction was performed")
drv_award_transaction_usaspend = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
drv_current_total_award_value_amount_adjustment = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
drv_potential_total_award_value_amount_adjustment = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
last_modified_date = models.DateField(blank=True, null=True, help_text="The date this transaction was last modified")
certified_date = models.DateField(blank=True, null=True, help_text="The date this transaction was certified")
create_date = models.DateTimeField(auto_now_add=True, blank=True, null=True, help_text="The date this transaction was created in the API")
update_date = models.DateTimeField(auto_now=True, null=True, help_text="The last time this transaction was updated in the API")
history = HistoricalRecords()
def __str__(self):
return '%s award: %s' % (self.type_description, self.award)
@staticmethod
def get_default_fields(path=None):
return [
"id",
"type",
"type_description",
"period_of_performance_start_date",
"period_of_performance_current_end_date",
"action_date",
"action_type",
"action_type_description",
"action_date__fy",
"federal_action_obligation",
"modification_number",
"awarding_agency",
"funding_agency",
"recipient",
"description",
"place_of_performance",
"contract_data", # must match related_name in TransactionContract
"assistance_data" # must match related_name in TransactionAssistance
]
@classmethod
def get_or_create_transaction(cls, **kwargs):
"""Gets and updates, or creates, a Transaction
Transactions must be unique on Award, Awarding Agency, and Mod Number
"""
transaction = cls.objects.filter(
award=kwargs.get('award'),
modification_number=kwargs.get('modification_number')
).order_by('-update_date').first()
if transaction:
if not transaction.newer_than(kwargs):
for (k, v) in kwargs.items():
setattr(transaction, k, v)
return transaction
return cls(**kwargs)
class Meta:
db_table = 'transaction'
index_together = ['award', 'action_date']
class TransactionContract(DataSourceTrackedModel):
transaction = models.OneToOneField(
Transaction, on_delete=models.CASCADE,
primary_key=True, related_name='contract_data', help_text="Non-specific transaction data, fields shared among both assistance and contract transactions")
submission = models.ForeignKey(SubmissionAttributes, models.CASCADE)
piid = models.CharField(max_length=50, blank=True, help_text="The PIID of this transaction")
parent_award_id = models.CharField(max_length=50, blank=True, null=True, verbose_name="Parent Award ID", help_text="The parent award id for this transaction. This is generally the piid of an IDV")
cost_or_pricing_data = models.CharField(max_length=1, blank=True, null=True, help_text="")
cost_or_pricing_data_description = models.TextField(blank=True, null=True)
type_of_contract_pricing = models.CharField(max_length=2, default="UN", blank=True, null=True, verbose_name="Type of Contract Pricing", help_text="The type of contract pricing data, as a code")
type_of_contract_pricing_description = models.TextField(blank=True, null=True, verbose_name="Type of Contract Pricing Description", help_text="A plain text description of the type of contract pricing data")
naics = models.CharField(max_length=6, blank=True, null=True, verbose_name="NAICS", help_text="Specified which industry the work for this transaction falls into. A 6-digit code")
naics_description = models.CharField(max_length=150, blank=True, null=True, verbose_name="NAICS Description", help_text="A plain text description of the NAICS code")
period_of_performance_potential_end_date = models.DateField(max_length=10, verbose_name="Period of Performance Potential End Date", null=True, help_text="The potential end date of the period of performance")
ordering_period_end_date = models.CharField(max_length=8, blank=True, null=True, help_text="The end date for the ordering period")
current_total_value_award = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True, help_text="The current value of the award")
potential_total_value_of_award = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True, verbose_name="Potential Total Value of Award", help_text="The potential total value of the award")
referenced_idv_agency_identifier = models.CharField(max_length=4, blank=True, null=True, help_text="The agency identifier of the agency on the IDV")
idv_type = models.CharField(max_length=1, blank=True, null=True, verbose_name="IDV Type", help_text="The IDV type code")
idv_type_description = models.TextField(null=True, blank=True)
multiple_or_single_award_idv = models.CharField(max_length=1, blank=True, null=True, help_text="Specifies whether the IDV is a single more multiple award vehicle")
multiple_or_single_award_idv_description = models.TextField(null=True, blank=True)
type_of_idc = models.CharField(max_length=1, blank=True, null=True, verbose_name="Type of IDC", help_text="Code representing the type of IDC")
type_of_idc_description = models.TextField(null=True, blank=True)
a76_fair_act_action = models.CharField(max_length=1, blank=True, null=True, verbose_name="A-76 FAIR Act Action", help_text="A-76 FAIR act action")
dod_claimant_program_code = models.CharField(max_length=3, blank=True, null=True)
clinger_cohen_act_planning = models.CharField(max_length=1, blank=True, null=True)
commercial_item_acquisition_procedures = models.CharField(max_length=1, blank=True, null=True)
commercial_item_acquisition_procedures_description = models.TextField(blank=True, null=True)
commercial_item_test_program = models.CharField(max_length=1, blank=True, null=True)
consolidated_contract = models.CharField(max_length=1, blank=True, null=True)
contingency_humanitarian_or_peacekeeping_operation = models.CharField(max_length=1, blank=True, null=True)
contingency_humanitarian_or_peacekeeping_operation_description = models.TextField(blank=True, null=True)
contract_bundling = models.CharField(max_length=1, blank=True, null=True)
contract_bundling_description = models.TextField(blank=True, null=True)
contract_financing = models.CharField(max_length=1, blank=True, null=True)
contract_financing_description = models.TextField(blank=True, null=True)
contracting_officers_determination_of_business_size = models.CharField(max_length=1, blank=True, null=True)
cost_accounting_standards = models.CharField(max_length=1, blank=True, null=True)
cost_accounting_standards_description = models.TextField(blank=True, null=True)
country_of_product_or_service_origin = models.CharField(max_length=3, blank=True, null=True)
davis_bacon_act = models.CharField(max_length=1, blank=True, null=True)
davis_bacon_act_description = models.TextField(null=True, blank=True)
evaluated_preference = models.CharField(max_length=6, blank=True, null=True)
evaluated_preference_description = models.TextField(null=True, blank=True)
extent_competed = models.CharField(max_length=3, blank=True, null=True)
extent_competed_description = models.TextField(null=True, blank=True)
fed_biz_opps = models.CharField(max_length=1, blank=True, null=True)
fed_biz_opps_description = models.TextField(null=True, blank=True)
foreign_funding = models.CharField(max_length=1, blank=True, null=True)
foreign_funding_description = models.TextField(null=True, blank=True)
gfe_gfp = models.CharField(max_length=1, blank=True, null=True)
information_technology_commercial_item_category = models.CharField(max_length=1, blank=True, null=True)
information_technology_commercial_item_category_description = models.TextField(null=True, blank=True)
interagency_contracting_authority = models.CharField(max_length=1, blank=True, null=True)
interagency_contracting_authority_description = models.TextField(null=True, blank=True)
local_area_set_aside = models.CharField(max_length=1, blank=True, null=True)
major_program = models.CharField(max_length=100, blank=True, null=True)
purchase_card_as_payment_method = models.CharField(max_length=1, blank=True, null=True)
multi_year_contract = models.CharField(max_length=1, blank=True, null=True)
national_interest_action = models.CharField(max_length=20, blank=True, null=True)
national_interest_action_description = models.TextField(null=True, blank=True)
number_of_actions = models.CharField(max_length=6, blank=True, null=True)
number_of_offers_received = models.CharField(max_length=3, blank=True, null=True)
other_statutory_authority = models.CharField(max_length=1000, blank=True, null=True)
performance_based_service_acquisition = models.CharField(max_length=1, blank=True, null=True)
performance_based_service_acquisition_description = models.TextField(null=True, blank=True)
place_of_manufacture = models.CharField(max_length=1, blank=True, null=True)
place_of_manufacture_description = models.TextField(null=True, blank=True)
price_evaluation_adjustment_preference_percent_difference = models.DecimalField(max_digits=5, decimal_places=2, blank=True, null=True)
product_or_service_code = models.CharField(max_length=4, blank=True, null=True)
program_acronym = models.CharField(max_length=25, blank=True, null=True)
other_than_full_and_open_competition = models.CharField(max_length=3, blank=True, null=True)
recovered_materials_sustainability = models.CharField(max_length=1, blank=True, null=True)
recovered_materials_sustainability_description = models.TextField(null=True, blank=True)
research = models.CharField(max_length=3, blank=True, null=True)
research_description = models.TextField(null=True, blank=True)
sea_transportation = models.CharField(max_length=1, blank=True, null=True)
sea_transportation_description = models.TextField(null=True, blank=True)
service_contract_act = models.CharField(max_length=1, blank=True, null=True)
service_contract_act_description = models.TextField(null=True, blank=True)
small_business_competitiveness_demonstration_program = models.CharField(max_length=1, blank=True, null=True)
solicitation_identifier = models.CharField(max_length=25, blank=True, null=True, verbose_name="Solicitation ID")
solicitation_procedures = models.CharField(max_length=5, blank=True, null=True)
solicitation_procedures_description = models.TextField(null=True, blank=True)
fair_opportunity_limited_sources = models.CharField(max_length=50, blank=True, null=True)
fair_opportunity_limited_sources_description = models.TextField(null=True, blank=True)
subcontracting_plan = models.CharField(max_length=1, blank=True, null=True)
subcontracting_plan_description = models.TextField(null=True, blank=True)
program_system_or_equipment_code = models.CharField(max_length=4, blank=True, null=True)
type_set_aside = models.CharField(max_length=10, blank=True, null=True, verbose_name="Type Set Aside")
type_set_aside_description = models.TextField(null=True, blank=True)
epa_designated_product = models.CharField(max_length=1, blank=True, null=True)
epa_designated_product_description = models.TextField(null=True, blank=True)
walsh_healey_act = models.CharField(max_length=1, blank=True, null=True, help_text="Denotes whether this transaction is subject to the Walsh-Healey act")
transaction_number = models.CharField(max_length=6, blank=True, null=True, help_text="The transaction number for this transaction")
referenced_idv_modification_number = models.CharField(max_length=25, blank=True, null=True, help_text="The modification number for the referenced IDV")
rec_flag = models.CharField(max_length=1, blank=True, null=True, help_text="The rec flag")
drv_parent_award_awarding_agency_code = models.CharField(max_length=4, blank=True, null=True)
drv_current_aggregated_total_value_of_award = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
drv_current_total_value_of_award = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
drv_potential_award_idv_amount_total_estimate = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
drv_potential_aggregated_award_idv_amount_total_estimate = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
drv_potential_aggregated_total_value_of_award = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
drv_potential_total_value_of_award = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
create_date = models.DateTimeField(auto_now_add=True, blank=True, null=True, help_text="The date this record was created in the API")
update_date = models.DateTimeField(auto_now=True, null=True, help_text="The last time this record was updated in the API")
last_modified_date = models.DateField(blank=True, null=True, help_text="The last time this transaction was modified")
certified_date = models.DateField(blank=True, null=True, help_text="The date this record was certified")
reporting_period_start = models.DateField(blank=True, null=True, help_text="The date marking the start of the reporting period")
reporting_period_end = models.DateField(blank=True, null=True, help_text="The date marking the end of the reporting period")
history = HistoricalRecords()
@staticmethod
def get_default_fields(path=None):
return [
"piid",
"parent_award_id",
"type",
"type_description",
"cost_or_pricing_data",
"type_of_contract_pricing",
"type_of_contract_pricing_description",
"naics",
"naics_description",
"product_or_service_code"
]
@classmethod
def get_or_create(cls, transaction, **kwargs):
try:
if not transaction.newer_than(kwargs):
for (k, v) in kwargs.items():
setattr(transaction.contract_data, k, v)
except ObjectDoesNotExist:
transaction.contract_data = cls(**kwargs)
return transaction.contract_data
class Meta:
db_table = 'transaction_contract'
class TransactionAssistance(DataSourceTrackedModel):
transaction = models.OneToOneField(
Transaction, on_delete=models.CASCADE,
primary_key=True, related_name='assistance_data')
submission = models.ForeignKey(SubmissionAttributes, models.CASCADE)
fain = models.CharField(max_length=30, blank=True, null=True)
uri = models.CharField(max_length=70, blank=True, null=True)
cfda_number = models.CharField(max_length=7, blank=True, null=True, verbose_name="CFDA Number")
cfda_title = models.CharField(max_length=250, blank=True, null=True, verbose_name="CFDA Title")
cfda = models.ForeignKey(CFDAProgram, models.DO_NOTHING, null=True)
business_funds_indicator = models.CharField(max_length=3)
business_funds_indicator_description = models.TextField(blank=True, null=True)
non_federal_funding_amount = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
total_funding_amount = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
face_value_loan_guarantee = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
original_loan_subsidy_cost = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
record_type = models.IntegerField()
record_type_description = models.TextField(null=True, blank=True)
correction_late_delete_indicator = models.CharField(max_length=1, blank=True, null=True)
correction_late_delete_indicator_description = models.TextField(blank=True, null=True)
fiscal_year_and_quarter_correction = models.CharField(max_length=5, blank=True, null=True)
sai_number = models.CharField(max_length=50, blank=True, null=True, verbose_name="SAI Number")
drv_federal_funding_amount = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
drv_award_finance_assistance_type_label = models.CharField(max_length=50, blank=True, null=True)
reporting_period_start = models.DateField(blank=True, null=True)
reporting_period_end = models.DateField(blank=True, null=True)
last_modified_date = models.DateField(blank=True, null=True)
submitted_type = models.CharField(max_length=1, blank=True, null=True, verbose_name="Submitted Type")
certified_date = models.DateField(blank=True, null=True)
create_date = models.DateTimeField(auto_now_add=True, blank=True, null=True)
update_date = models.DateTimeField(auto_now=True, null=True)
period_of_performance_start_date = models.DateField(blank=True, null=True)
period_of_performance_current_end_date = models.DateField(blank=True, null=True)
history = HistoricalRecords()
@staticmethod
def get_default_fields(path=None):
return [
"fain",
"uri",
"cfda",
"cfda_number",
"cfda_title",
"face_value_loan_guarantee",
"original_loan_subsidy_cost",
"type"
]
@classmethod
def get_or_create(cls, transaction, **kwargs):
try:
if not transaction.newer_than(kwargs):
for (k, v) in kwargs.items():
setattr(transaction.assistance_data, k, v)
except ObjectDoesNotExist:
transaction.assistance_data = cls(**kwargs)
return transaction.assistance_data
class Meta:
db_table = 'transaction_assistance'
class Subaward(DataSourceTrackedModel):
# Foreign keys
award = models.ForeignKey(Award, models.CASCADE, related_name="subawards")
recipient = models.ForeignKey(LegalEntity, models.DO_NOTHING)
submission = models.ForeignKey(SubmissionAttributes, models.CASCADE)
cfda = models.ForeignKey(CFDAProgram, models.DO_NOTHING, null=True)
awarding_agency = models.ForeignKey(Agency, models.DO_NOTHING, related_name="awarding_subawards", null=True)
funding_agency = models.ForeignKey(Agency, models.DO_NOTHING, related_name="funding_subawards", null=True)
place_of_performance = models.ForeignKey(Location, models.DO_NOTHING, null=True)
subaward_number = models.TextField(db_index=True)
amount = models.DecimalField(max_digits=20, decimal_places=2)
description = models.TextField(null=True, blank=True)
recovery_model_question1 = models.TextField(null=True, blank=True)
recovery_model_question2 = models.TextField(null=True, blank=True)
action_date = models.DateField(blank=True, null=True)
award_report_fy_month = models.IntegerField()
award_report_fy_year = models.IntegerField()
naics = models.TextField(blank=True, null=True, verbose_name="NAICS", help_text="Specified which industry the work for this transaction falls into. A 6-digit code")
naics_description = models.TextField(blank=True, null=True, verbose_name="NAICS Description", help_text="A plain text description of the NAICS code")
class Meta:
managed = True
unique_together = (('subaward_number', 'award'))
|
# Copyright 2021 The TF-Coder Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""An interface for handling natural language descriptions for TF-Coder."""
import abc
import re
from typing import Dict, List, Optional, Text
import six
from tf_coder.value_search import all_operations
from tf_coder.value_search import operation_base
from tf_coder.value_search import value_search_settings as settings_module
@six.add_metaclass(abc.ABCMeta)
class DescriptionHandler(object):
"""Handles a natural language description of a task.
Attributes:
operations: A list of operations that the handler knows about.
all_names: A list of operation names, in the same order as the `operations`
list.
"""
def __init__(self,
operations: Optional[List[operation_base.Operation]] = None):
"""Initializes the handler.
Args:
operations: A list of operations that the scorer should handle. Exposed
for testing.
Raises:
ValueError: If there are duplicate operation names.
"""
self.operations = (
operations if operations
else all_operations.get_operations(include_sparse_operations=True))
self.all_names = [operation.name for operation in self.operations]
if len(set(self.all_names)) != len(self.operations):
raise ValueError('Duplicate operation name.')
@abc.abstractmethod
def get_operation_multipliers(
self,
description: Text,
settings: settings_module.Settings) -> Dict[Text, float]:
"""Returns a map from operation names to their weight multiplier.
The weight multiplier should be between 0 and 1 if the operation should be
prioritized, or greater than 1 if it should be deprioritized.
Args:
description: The natural language description of a TF-Coder task, provided
by the user.
settings: A Settings object storing settings for this search.
Returns:
A map from operation name to weight multiplier, such that the operation
with that name should have its weight modified by that multiplier. If the
dict does not contain a key, it means the weight should not be modified
(equivalent to a multiplier of 1).
"""
def __repr__(self) -> Text:
"""Returns a string containing details about this handler and parameters."""
return self.__class__.__name__
class NoChangeDescriptionHandler(DescriptionHandler):
"""A description handler that does not change any operation weights."""
def get_operation_multipliers(
self,
description: Text,
settings: settings_module.Settings) -> Dict[Text, float]:
"""See base class."""
return {}
class FunctionNameDescriptionHandler(DescriptionHandler):
"""Prioritizes functions with names that appear in the docstring."""
def __init__(self,
operations: Optional[List[operation_base.Operation]] = None,
multiplier: float = 0.75):
"""Creates a FunctionNameDescriptionHandler.
Args:
operations: A list of operations that the scorer should handle. Exposed
for testing.
multiplier: The multiplier applied to an operation's weight if it is
prioritized.
"""
super(FunctionNameDescriptionHandler, self).__init__(operations)
self.multiplier = multiplier
def get_operation_multipliers(
self,
description: Text,
settings: settings_module.Settings) -> Dict[Text, float]:
"""See base class."""
description = description.lower()
multipliers = {}
for name in self.all_names:
if name.startswith('tf.') and '(' in name:
function_name = name[len('tf.') : name.index('(')].lower()
function_name_parts = re.split(r'[._]', function_name)
if all(part in description for part in function_name_parts):
if settings.printing.prioritized_operations:
print('FunctionNameDescriptionHandler prioritized {}'.format(name))
multipliers[name] = self.multiplier
return multipliers
def __repr__(self) -> Text:
"""See base class."""
return '{}(multiplier={})'.format(self.__class__.__name__, self.multiplier)
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
from torch import Tensor, vmap
import functools
import warnings
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_utils import TEST_WITH_ROCM
class TestVmapAPI(TestCase):
def test_non_tensor_output_raises(self):
with self.assertRaisesRegex(ValueError, "got type <class 'float'> as the return"):
output = vmap(lambda x: 3.14)(torch.ones(3))
def multiple_outputs(x):
return x, 3
with self.assertRaisesRegex(ValueError, "got type <class 'int'> for return 1"):
vmap(multiple_outputs)(torch.ones(3))
def test_different_map_dim_size_raises(self):
x = torch.randn(2)
y = torch.randn(3)
expected_msg = 'Expected all tensors to have the same size in the mapped dimension'
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(torch.mul)(x, y)
def test_func_with_no_inputs(self):
expected_msg = 'got no inputs'
def foo():
return torch.randn(3)
def bar(x):
return torch.randn(3)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(foo)()
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(bar)()
def test_constant_function(self):
output = vmap(lambda x: torch.tensor(3.14))(torch.ones(3))
self.assertEqual(output, torch.tensor([3.14, 3.14, 3.14]))
def test_single_input(self):
x = torch.randn(2, 3)
def square(x):
return x * x
output = vmap(square)(x)
self.assertEqual(output, x * x)
def test_multiple_inputs(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul)(x, y)
self.assertEqual(output, x * y)
def test_multiple_outputs(self):
def foo(x):
return x * x, x * x * x
x = torch.randn(3)
outputs = vmap(foo)(x)
self.assertEqual(outputs[0], x * x)
self.assertEqual(outputs[1], x * x * x)
def test_multiple_outputs_error_cases(self):
# This is the same thing as
# def returns_tuple_of_tensors(x):
# return x, x
def returns_tuple_of_tensors(x):
return (x, x)
def returns_list_of_two_tensors(x):
return [x, x]
def returns_list_of_one_tensor(x):
return [x]
x = torch.randn(3)
# should not throw
vmap(returns_tuple_of_tensors)(x)
# jax supports these, but we don't yet
msg = "must only return Tensors, got type <class 'list'>"
with self.assertRaisesRegex(ValueError, msg):
vmap(returns_list_of_two_tensors)(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(returns_list_of_one_tensor)(x)
def test_nested_with_same_map_dim(self):
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
output = vmap(vmap(torch.mul))(x, y)
self.assertEqual(output, x * y)
output = vmap(vmap(vmap(torch.mul)))(x, y)
self.assertEqual(output, x * y)
def test_nested_with_different_map_dim(self):
x = torch.randn(2, 3)
y = torch.randn(5, 3)
output = vmap(lambda x: vmap(lambda y: x * y)(y))(x)
self.assertEqual(output.shape, (2, 5, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
z = torch.randn(7, 3)
output = vmap(lambda x: vmap(lambda y: vmap(lambda z: x * y * z)(z))(y))(x)
self.assertEqual(output.shape, (2, 5, 7, 3))
self.assertEqual(output, x.view(2, 1, 1, 3) * y.view(5, 1, 3) * z)
def test_noop_in_inner_vmap(self):
x = torch.randn(3)
y = torch.randn(5)
output = vmap(lambda x: vmap(lambda y: x)(y))(x)
self.assertEqual(output, x.view(3, 1).expand(3, 5))
def test_unsupported_op_err_msg(self):
# Unsupported view op
tensor = torch.randn(2, 3)
with self.assertRaisesRegex(RuntimeError, "doesn't work on in-place or view ops"):
vmap(torch.as_strided, (0, None, None))(tensor, [2, 3], [0, 0])
# The fallback doesn't support TensorList
with self.assertRaisesRegex(RuntimeError, 'Batching rule not implemented'):
vmap(lambda t: torch.stack([t]))(tensor)
# Don't support non-tensor returns. This is a limitation of vmap;
# functions that don't return tensors must be special cased
with self.assertRaisesRegex(RuntimeError, 'Batching rule not implemented'):
vmap(torch.Tensor.item)(tensor)
def test_unsupported_inplace_op_err_msg(self):
def foo(x):
return x.cos_()
x = torch.randn(3)
with self.assertRaisesRegex(
RuntimeError, 'Batching rule not implemented'):
vmap(foo)(x)
def test_nonzero_out_dims(self):
# Basic test
tensor = torch.randn(2, 3)
result = vmap(lambda x: x, out_dims=1)(tensor)
self.assertEqual(result, tensor.permute(1, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
# Test that the batch dimension gets permuted to dim 2
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 0, 3))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
# negative out_dim
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=-1)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 3, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
# check that out_dims works on ALL outputs
tensor = torch.randn(2, 3, 5, 7)
other = torch.randn(2, 3, 5, 7)
result = vmap(lambda x, y: (x, y), out_dims=2)(tensor, other)
self.assertEqual(result, (tensor.permute(1, 2, 0, 3), other.permute(1, 2, 0, 3)))
# use out_dims with the maximum vmap-able tensor dims (64 dims)
ndims = 64
shape = [2] + [1] * (ndims - 1)
expected_shape = [1, 1, 2] + [1] * (ndims - 3)
tensor = torch.randn(shape)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result.shape, expected_shape)
# test something that is not the identity function
def foo(x, y):
return x, x * y, x * y * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=1)(x, y)
self.assertEqual(
result,
(x.permute(1, 0, 2), (x * y).permute(1, 0, 2), (x * y * y).permute(1, 0, 2)))
def test_multiple_out_dims(self):
def foo(x):
return x, x
def bar(x, y):
return x, x, x, x * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=(0, 1))(x)
self.assertEqual(result, (x, x.permute(1, 0, 2)))
result = vmap(bar, out_dims=(-1, 0, 1, 2))(x, y)
expected = (
x.permute(1, 2, 0),
x,
x.permute(1, 0, 2),
(x * y).permute(1, 2, 0),
)
self.assertEqual(result, expected)
def test_nested_out_dims(self):
y = torch.randn(2, 3, 5, 7)
# Inner vmap has non-zero out_dim
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y))(y)
self.assertEqual(result.shape, (2, 5, 3, 7))
self.assertEqual(result, y.permute(0, 2, 1, 3))
# all vmaps have non-zero out_dim
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y), out_dims=1)(y)
self.assertEqual(result.shape, (5, 2, 3, 7))
self.assertEqual(result, y.permute(2, 0, 1, 3))
# throwing in some negative out_dims
result = vmap(lambda y: vmap(lambda x: x, out_dims=-1)(y), out_dims=-1)(y)
self.assertEqual(result.shape, (5, 7, 3, 2))
self.assertEqual(result, y.permute(2, 3, 1, 0))
# testing fn that isn't the identity
x = torch.randn(2, 3)
y = torch.randn(5, 3)
result = vmap(lambda y: vmap(lambda x: x * y, out_dims=1)(x), out_dims=-1)(y)
self.assertEqual(result.shape, (3, 2, 5))
self.assertEqual(result, (y.view(5, 1, 3) * x).permute(2, 1, 0))
def test_out_dims_edge_case(self):
def foo(x):
return x
# Test that we accept out_dims=(1,) for a function with one output.
tensor = torch.randn(2, 3)
expected = vmap(foo, out_dims=1)(tensor)
result = vmap(foo, out_dims=(1,))(tensor)
self.assertEqual(result, expected)
def test_out_dims_must_be_int_or_tuple_of_int_err_msg(self):
msg = '`out_dims` must be an int or a tuple of int'
tensor = torch.randn(2, 3)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims='lol')(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=('lol',))(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=None)(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(None,))(tensor)
def test_out_dims_and_num_outputs_mismatch_err_msg(self):
msg = '`out_dims` must have one dim per output'
x = torch.randn(2, 3, 5)
# Too many out_dims
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(0, 0))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0, 0, 0))(x)
# Too few out_dims
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x), out_dims=(0,))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0))(x)
def test_out_dim_out_of_bounds_err_msg(self):
# TODO(rzou): This error message isn't that great. It comes straight
# from maybe_wrap_dim. Consider doing a try-catch-(add some context) to
# the error message in the future in C++
msg = 'Dimension out of range'
x = torch.randn(2, 3, 5)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=3)(x)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=-4)(x)
def test_non_zero_in_dims(self):
tensor = torch.randn(2, 3, 5)
# Implicit out_dims = 0; vmap will move the batch dim to the front.
output = vmap(lambda x: x, (1,))(tensor)
self.assertEqual(output, tensor.permute(1, 0, 2))
self.assertEqual(output.data_ptr(), tensor.data_ptr())
x = torch.randn(2, 3)
y = torch.randn(3, 2)
output = vmap(torch.mul, (0, 1))(x, y)
self.assertEqual(output, x * y.t())
output = vmap(torch.mul, (1, 0))(x, y)
self.assertEqual(output, x.t() * y)
def test_none_in_dims(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
# None in_dim for a Tensor means we don't map over it
output = vmap(torch.mul, (0, None))(x, y)
self.assertEqual(output.shape, (2, 2, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
# None in_dim for non-tensor arguments
output = vmap(torch.mul, (0, None))(x, 2)
self.assertEqual(output, x * 2)
def test_nested_non_default_in_dims(self):
x = torch.rand(5, 2, 3)
y = torch.rand(3, 5, 2)
result = vmap(vmap(vmap(torch.mul), (1, 0)), (1, 2))(x, y)
self.assertEqual(result, x.permute(1, 2, 0) * y.permute(2, 0, 1))
def test_non_default_in_dims_out_dims(self):
x = torch.randn(2, 3, 5)
# Same in_dim as out_dim, vmap over identity
result = vmap(lambda x: x, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x)
self.assertEqual(result.data_ptr(), x.data_ptr())
# Different in_dim from out_dim, vmap over identity
result = vmap(lambda x: x, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, x.transpose(1, 2))
self.assertEqual(result.data_ptr(), x.data_ptr())
def foo(x):
return x * 2
# Same in_dim as out_dim, vmap over operation
result = vmap(foo, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x * 2)
# Different in_dim as out_dim, vmap over operation
result = vmap(foo, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, (x * 2).transpose(1, 2))
# Basic nested test.
result = vmap(vmap(foo, 1, 1), 1, 1)(x)
self.assertEqual(result, x * 2)
def test_in_dims_wrong_type_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = 'expected `in_dims` to be int or tuple'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, [0, 0])(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, set({0, 0}))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, 'lol')(x, y)
# The following should not throw
vmap(torch.mul, (0, 0))(x, y)
def test_not_enough_in_dims_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'expected one `in_dim` per input \(got \w+ inputs\)'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0,))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0, 0, 0))(x, y)
# The following should not throw
vmap(torch.mul, (0, 0))(x, y)
def test_in_dims_must_be_flat_tuple_err_msg(self):
msg = 'in_dims must be a flat tuple containing ints and/or Nones'
x = torch.randn(3)
y = torch.randn(3)
z = torch.randn(3)
def foo(xy):
return xy[0] * xy[1]
def bar(x, yz):
return x * yz[0] * yz[1]
# NB: jax supports all of the following, we don't yet.
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, ((0, 0),))((x, y))
with self.assertRaisesRegex(ValueError, msg):
vmap(bar, (0, (0, 0)))(x, (y, z))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, ({0: 0, 1: 0},))({0: x, 1: y})
def test_integer_in_dim_but_not_tensor_input_err_msg(self):
def foo(xy):
return xy[0] * xy[1]
def bar(x, yz):
return x * yz[0] * yz[1]
x = torch.randn(2, 3)
y = torch.randn(2, 3)
# jax supports these, we too can in the future.
msg = 'Got in_dim=0 for input 0, but input 0 is not a Tensor'
with self.assertRaisesRegex(ValueError, msg):
vmap(foo)((x, y))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, (0,))((x, y))
# jax supports these as well, we too can in the future.
msg = 'Got in_dim=0 for input 1, but input 1 is not a Tensor'
with self.assertRaisesRegex(ValueError, msg):
vmap(foo)(x, (x, y))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, (0, 0))(x, (x, y))
# the following are errors in jax (and will always be errors)
msg = 'Got in_dim=0 for input 1, but input 1 is not a Tensor'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum)(x, 0)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum, (0, 0))(x, 0)
# The following should not throw
vmap(torch.sum, (0, None))(x, 0)
def test_in_dim_not_in_tensor_err_msg(self):
def foo(x):
return x * x
msg = r'Got in_dim=-?\w for input 0, but input 0 is a Tensor of dimensionality \w'
with self.assertRaisesRegex(ValueError, msg):
vmap(foo)(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(0,))(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(-1,))(torch.randn(2, 3))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(2,))(torch.randn(2, 3))
# the following should not throw
vmap(foo, in_dims=(0,))(torch.randn(2, 3))
vmap(foo, in_dims=(1,))(torch.randn(2, 3))
def _assert_uses_vmap_fallback(self, vmap_args, inputs):
with warnings.catch_warnings(record=True) as wa:
result = vmap(*vmap_args)(*inputs)
self.assertEqual(len(wa), 2)
self.assertRegex(str(wa[-1].message),
r'falling back to slow \(for loop and stack\) implementation')
def test_fallback_atan2(self):
# NB: One day we will implement a batching rule for torch.atan2.
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
op = torch.atan2
x = torch.randn(5, 7, 11)
y = torch.randn(5, 7, 11)
self._assert_uses_vmap_fallback((op,), (x, y))
# fallback on torch.sub
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(op, (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
# fallback on torch.sub, nested vmap
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(vmap(op), (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
# big batch size (total 10000)
x = torch.randn(100, 10, 10, 5)
y = torch.randn(100, 10, 10)
result = vmap(vmap(vmap(op)))(x, y)
self.assertEqual(result, op(x, y.view(100, 10, 10, 1)))
def test_fallback_masked_fill(self):
# NB: One day we will implement a batching rule for masked_fill
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
def run_test(batch_size):
B0 = batch_size
x = torch.randn(B0, 7, 11, 13)
dim = 0
index = torch.tensor([0, 4, 2])
values = torch.randn(B0, 3, 13)
self._assert_uses_vmap_fallback((torch.index_add, (0, None, None, 0)), (x, dim, index, values))
result = vmap(torch.index_add, (0, None, None, 0))(x, dim, index, values)
expected = torch.index_add(
x, dim + 1, index, values.view(B0, 3, 1, 13))
self.assertEqual(result, expected)
run_test(batch_size=5)
run_test(batch_size=1237)
def test_fallback_multiple_returns(self):
# NB: One day we will implement a batching rule for torch.var_mean
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
B0, B1, B2 = 2, 3, 1237
tensor = torch.randn(B0, 10)
self._assert_uses_vmap_fallback((torch.var_mean,), (tensor,))
# fallback correctness on torch.var_mean
result = vmap(torch.var_mean)(tensor)
expected = torch.var_mean(tensor, dim=1)
self.assertEqual(result, expected)
# nested vmap
tensor = torch.randn(B0, B1, 10)
result = vmap(vmap(torch.var_mean))(tensor)
expected = torch.var_mean(tensor, dim=2)
self.assertEqual(result, expected)
# big batch size, nested vmap
tensor = torch.randn(B0, B1, B2, 10)
result = vmap(vmap(vmap(torch.var_mean)))(tensor)
expected = torch.var_mean(tensor, dim=3)
self.assertEqual(result, expected)
def test_backward_unsupported_interaction(self):
x = torch.randn(3, requires_grad=True)
y = torch.randn(5)
grad = torch.randn_like(x)
err_msg = r'backward\(\) called inside torch.vmap'
def backward_on_vmapped_tensor(x):
x.sum().backward()
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(backward_on_vmapped_tensor)(x)
def backward_with_vmapped_grad(x, grad):
x.backward(grad)
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(backward_with_vmapped_grad)(x, grad)
def completely_unrelated_backward(y):
x.sum().backward()
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(completely_unrelated_backward)(y)
def test_grad_unsupported_interaction(self):
input_tensor = torch.randn(3, requires_grad=True)
err_msg = 'autograd.grad.* called inside torch.vmap'
captured = torch.randn(3, requires_grad=True)
def output_to_grad_is_vmapped(input_tensor):
output = (captured * input_tensor).sum()
return torch.autograd.grad([output], [captured])[0]
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(output_to_grad_is_vmapped)(input_tensor)
output = (input_tensor ** 2).sum()
def input_to_grad_is_vmapped(input_tensor):
return torch.autograd.grad([output], [input_tensor])[0]
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(input_to_grad_is_vmapped)(input_tensor)
def test_batched_gradient_basic(self):
N = 3
x = torch.randn(N, requires_grad=True)
y = torch.randn(N)
def vjp_mul(v):
return torch.autograd.grad([x * y], [x], grad_outputs=[v])[0]
batched_v = torch.eye(N)
jacobian = vmap(vjp_mul)(batched_v)
self.assertEqual(jacobian, torch.diagflat(y))
def test_functools_partial(self):
x = torch.randn(3)
y = torch.randn(2, 3)
result = vmap(functools.partial(torch.mul, x))(y)
self.assertEqual(result, x * y)
def test_nn_module(self):
tensor = torch.randn(2, 3)
model = torch.nn.Linear(3, 3, bias=False)
result = vmap(model)(tensor)
self.assertEqual(result, model(tensor))
def slice_inputs(inputs, bdims, i):
result = []
for inp, bdim in zip(inputs, bdims):
if bdim is None:
result.append(inp)
else:
result.append(inp.select(bdim, i))
return tuple(result)
def reference_vmap(op, inputs, in_dims=0, out_dims=0):
if isinstance(in_dims, int):
in_dims = (in_dims,) * len(inputs)
bdim_sizes = [inp.size(dim) for inp, dim in zip(inputs, in_dims) if dim is not None]
assert all(bdim_size == bdim_sizes[0] for bdim_size in bdim_sizes)
bdim_size = bdim_sizes[0]
results = tuple(op(*slice_inputs(inputs, in_dims, i)) for i in range(bdim_size))
assert len(results) > 0
op_has_single_return = not isinstance(results[0], tuple)
if op_has_single_return:
assert all(isinstance(result, torch.Tensor) for result in results)
if isinstance(out_dims, int):
out_dims = (out_dims,) * 1
return torch.stack(results, dim=out_dims[0])
assert all(isinstance(result, tuple) for result in results)
num_returns = len(results[0])
assert all(len(result) == num_returns for result in results)
if isinstance(out_dims, int):
out_dims = (out_dims,) * num_returns
return tuple(torch.stack(result_shards, out_dim)
for result_shards, out_dim in zip(zip(*results), out_dims))
class TensorFactory:
@staticmethod
def rand(size, device='cpu', dtype=torch.float):
return torch.rand(size, device=device, dtype=dtype)
@staticmethod
def randn(size, device='cpu', dtype=torch.float):
return torch.randn(size, device=device, dtype=dtype)
@staticmethod
def randp1(size, device='cpu', dtype=torch.float):
return torch.rand(size, device=device, dtype=dtype) + 1
# Tests vmap(op, in_dims, out_dims)(*inputs) by comparing the output to a
# (slow) sequential map+stack fallback.
#
# check_view: Test if the first returned output is a view of the first input
# check_propagates_grad: Test if the operation propagates gradients.
def _vmap_test(self, op, inputs, in_dims=0, out_dims=0,
check_view=False, check_propagates_grad=True):
result = vmap(op, in_dims, out_dims)(*inputs)
reference_result = reference_vmap(op, inputs, in_dims, out_dims)
self.assertEqual(result, reference_result)
op_has_single_return = not isinstance(result, tuple)
if check_view:
result_as_tuple = (result,) if op_has_single_return else result
for output in result_as_tuple:
input0_base = inputs[0] if inputs[0]._base is None else inputs[0]._base
self.assertTrue(output._base is input0_base,
msg="result was not a view of the first input!")
if not check_propagates_grad:
return
# Assuming input[0] is a floating-point tensor. Check if the vmap
# operation propagates the requires_grad flag to the zeroth output.
# Some vmap operators are implemented in a way that assumes that
# they are composite with respect to autograd. If the operator ever is
# changed to not be composite with respect to autograd, then the
# following check should fail.
inputs_clone = list(inputs)
inputs_clone[0] = inputs[0].clone().requires_grad_()
result = vmap(op, in_dims, out_dims)(*inputs_clone)
result_as_tuple = (result,) if op_has_single_return else result
self.assertTrue(result[0].requires_grad)
class TestVmapOperators(TestCase):
def _vmap_test(self, *args, **kwargs):
return _vmap_test(self, *args, **kwargs)
def _vmap_view_test(self, *args, **kwargs):
self._vmap_test(*args, **kwargs, check_view=True)
def _assert_doesnt_use_vmap_fallback(self, vmap_args, inputs):
regex = r'falling back to slow \(for loop and stack\) implementation'
with warnings.catch_warnings(record=True) as wa:
result = vmap(*vmap_args)(*inputs)
for captured_warning in wa:
self.assertNotRegex(str(captured_warning.message), regex)
def test_assert_doesnt_use_vmap_fallback(self):
with self.assertRaises(AssertionError):
# One day we'll implement a batching rule for torch.var_mean.
# When that happens, please change the example to use an
# operator that doesn't have a batching rule implemented.
self._assert_doesnt_use_vmap_fallback([torch.var_mean], [torch.rand(3)])
def _test_unary(self, op, getter, device):
test = self._vmap_test
B0, B1 = 7, 11
self._assert_doesnt_use_vmap_fallback([op], [getter([B0], device)])
# Single vmap, various in_dims / out_dims
test(op, [getter([B0, 3], device)])
test(op, [getter([2, 5, B0, 3], device)], in_dims=2)
test(op, [getter([2, 5, B0, 3], device)], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(op), [getter([B0, B1], device)])
test(vmap(op), [getter([B1, 2, 5, B0, 3], device)], in_dims=2)
test(vmap(op, in_dims=2), [getter([2, 5, B0, B1, 3], device)],
in_dims=2, out_dims=2)
def test_unary_pointwise_ops(self):
cases = [
(torch.abs, TensorFactory.randn),
(torch.acos, TensorFactory.rand),
(torch.asin, TensorFactory.rand),
(torch.atan, TensorFactory.rand),
(torch.ceil, TensorFactory.randn),
(torch.cos, TensorFactory.rand),
(torch.cosh, TensorFactory.rand),
(torch.digamma, TensorFactory.rand),
(torch.exp, TensorFactory.randn),
(torch.expm1, TensorFactory.randn),
(torch.floor, TensorFactory.randn),
(torch.frac, TensorFactory.randn),
(torch.lgamma, TensorFactory.rand),
(torch.log, TensorFactory.randp1),
(torch.log10, TensorFactory.randp1),
(torch.log1p, TensorFactory.randp1),
(torch.log2, TensorFactory.randp1),
(torch.neg, TensorFactory.randn),
(torch.reciprocal, TensorFactory.randp1),
(torch.relu, TensorFactory.randn),
(torch.round, TensorFactory.randn),
(torch.rsqrt, TensorFactory.randp1),
(torch.sigmoid, TensorFactory.randn),
(torch.sign, TensorFactory.randn),
(torch.sin, TensorFactory.rand),
(torch.sinh, TensorFactory.rand),
(torch.sqrt, TensorFactory.rand),
(torch.tan, TensorFactory.rand),
(torch.tanh, TensorFactory.rand),
(torch.trunc, TensorFactory.randn),
]
for op, getter in cases:
self._test_unary(op, getter, 'cpu')
def test_binary_pointwise_ops(self):
def get_number(getter):
return getter([]).item()
def make_case(op, input_getter=TensorFactory.randn):
return (op, input_getter)
cases = [
# Basic arithmetic
make_case(torch.add),
make_case(lambda x, y: x + y),
make_case(torch.sub),
make_case(lambda x, y: x - y),
make_case(torch.mul),
make_case(lambda x, y: x * y),
make_case(torch.div, input_getter=TensorFactory.randp1),
make_case(lambda x, y: x / y, input_getter=TensorFactory.randp1),
make_case(torch.pow, input_getter=TensorFactory.randp1),
make_case(lambda x, y: x ** y, input_getter=TensorFactory.randp1),
]
test = self._vmap_test
for op, getter in cases:
device = 'cpu'
B0, B1 = 7, 11
self._assert_doesnt_use_vmap_fallback(
[op], (getter([B0], device), getter([B0], device)))
# Single vmap: op(Tensor, Tensor)
test(op, (getter([B0, 3], device), getter([B0, 3], device)))
test(op, (getter([B0], device), getter([B0, 2, 3], device)))
test(op, (getter([B0], device), getter([2, B0, 3], device)), in_dims=(0, 1))
test(op, (getter([B0], device), getter([2, B0, 3], device)),
in_dims=(0, 1), out_dims=1)
test(op, (getter([B0], device), getter([2, 3], device)), in_dims=(0, None))
test(op, (getter([2, 3], device), getter([B0, 3], device)), in_dims=(0, None))
# Nested vmap: op(Tensor, Tensor)
test(vmap(op), (getter([B0, B1, 2, 3], device), getter([B0, B1, 3], device)))
test(vmap(op, in_dims=(None, 0)),
(getter([B0, 2, 3], device), getter([B1, 3], device)), in_dims=(0, None))
# Python number overload: op(Tensor, Number) (and vice-versa)
number = get_number(getter)
self._test_unary(lambda t: op(t, number), getter, device)
number = get_number(getter)
self._test_unary(lambda t: op(number, t), getter, device)
# Type promotion: op(Logical Scalar Tensor, Logical Scalar Tensor)
test(op, (getter([B0], device), getter([B0], device, dtype=torch.double)))
test(op, (getter([B0], device, dtype=torch.double), getter([B0], device)))
test(op, (getter([B0], device), getter([B0], device)))
# Type promotion: op(Tensor, Logical Scalar Tensor) (and vice-versa)
test(op, (getter([B0, 2], device), getter([B0], device, torch.double)))
test(op, (getter([B0], device, torch.double), getter([B0, 2], device)))
if not torch.cuda.is_available():
continue
# Test cross-device scalars
number = get_number(getter)
self._test_unary(lambda t: op(t, number), getter, device='cuda')
self._test_unary(lambda t: op(number, t), getter, device='cuda')
self._test_unary(lambda t: op(t, torch.tensor(number)), getter, device='cuda')
def test_chunk(self):
test = self._vmap_view_test
op = torch.chunk
B0, B1, B2 = 7, 11, 13
# tests for torch.split(self, split_size: int, dim)
test(op, (torch.rand(B0, 2, 1024), 15, -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), 9, 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), 4, 0),
in_dims=(2, None, None))
test(vmap(vmap(lambda t: op(t, 4, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
def test_diagonal(self):
tensor = torch.randn(3, 5, 7, 11, 13)
test = self._vmap_view_test
op = torch.diagonal
test(op, (tensor, 1, 0, 1), in_dims=(0, None, None, None))
test(op, (tensor, 0, 2, -1), in_dims=(0, None, None, None))
test(op, (tensor, 2, 1, 2), in_dims=(1, None, None, None))
test(op, (tensor, 0, -2, -1), in_dims=(1, None, None, None), out_dims=1)
test(vmap(lambda t: op(t, 0, 0, -1)), (tensor,), in_dims=1, out_dims=1)
test(vmap(vmap(lambda t: op(t, 0, 0, 1), in_dims=1), in_dims=3),
(tensor,), in_dims=1, out_dims=1)
def test_expand_as(self):
op = torch.Tensor.expand_as
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 1, 5), torch.rand(B0, 2, 3, 5)))
test(op, (torch.rand(B0, 1, 5), torch.rand(2, 3, 5)), in_dims=(0, None))
test(op, (torch.rand(1, 5), torch.rand(B0, 2, 3, 5)), in_dims=(None, 0))
test(vmap(op), (torch.rand(B0, B1, 1, 5), torch.rand(B0, B1, 2, 3, 5)))
test(vmap(op), (torch.rand(B0, B1, 1, 5), torch.rand(B1, B0, 2, 3, 5)), in_dims=(0, 1))
test(vmap(op), (torch.rand(B0, B1), torch.rand(B1, 2, 3, 5)), in_dims=(0, None))
test(vmap(vmap(op)), (torch.rand(B0, B1, B2), torch.rand(B0, B1, B2, 2, 3, 5)))
def test_movedim(self):
op = torch.movedim
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
# movedim(tensor, int, int) variant
test(op, (torch.rand(B0, 2, 5), 0, 1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 5), 0, 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 2, B0, 5), 0, 1), in_dims=(2, None, None))
test(vmap(vmap(op, in_dims=(2, None, None)), in_dims=(0, None, None)),
(torch.rand(B1, 2, B0, 5, B2), 0, 1), in_dims=(2, None, None))
# movedim(tensor, intlist, intlist) variant
test(op, (torch.rand(B0, 2, 3, 5), [1, 0], [0, 2]), in_dims=(0, None, None))
test(op, (torch.rand(2, 3, B0, 5), [1, 0], [0, 2]), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)),
(torch.rand(B1, 2, B0, 5), [0, 1], [1, 0]), in_dims=(2, None, None))
test(vmap(vmap(op, in_dims=(2, None, None)), in_dims=(0, None, None)),
(torch.rand(B1, 2, B0, 5, B2), [0, 1], [1, 0]), in_dims=(2, None, None))
def test_narrow(self):
op = torch.narrow
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 5), -1, 1, 3), in_dims=(0, None, None, None))
test(op, (torch.rand(2, B0, 5), 1, 1, 3), in_dims=(1, None, None, None))
test(vmap(op, in_dims=(0, None, None, None)),
(torch.rand(B1, 2, B0, 5), 1, 0, 0), in_dims=(2, None, None, None))
test(vmap(vmap(op, in_dims=(2, None, None, None)), in_dims=(0, None, None, None)),
(torch.rand(B1, 2, B0, 5, B2), -1, 2, 3), in_dims=(2, None, None, None))
def test_select(self):
op = torch.select
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 5), 0, 0), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 5), 1, 1), in_dims=(1, None, None))
test(vmap(lambda t: op(t, 1, 1)), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(vmap(lambda t: op(t, 1, 1), in_dims=1)), (torch.rand(B1, 2, B0, B2, 5),), in_dims=2)
def test_slice(self):
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(lambda t: t[0:1], (torch.rand(B0, 3, 5),))
test(lambda t: t[:, 1:3], (torch.rand(3, 5, B0),), in_dims=2)
test(vmap(lambda t: t[:, 0:1], in_dims=2), (torch.rand(3, 5, B0, B1),), in_dims=2)
test(vmap(vmap(lambda t: t[0:1], in_dims=2), in_dims=2),
(torch.rand(3, 5, B0, B1, B2),), in_dims=2)
def test_reshape(self):
test = self._vmap_test
B0, B1, B2 = 7, 11, 13
op = torch.reshape
test(op, (torch.rand(B0, 2 * 5), [2, 5]), in_dims=(0, None), check_view=True)
test(op, (torch.rand(2, B0, 5), [1, 1, 10]), in_dims=(1, None), check_view=False)
test(vmap(lambda t: t.reshape([-1])), (torch.rand(B0, B1, 2, 5),), check_view=True)
test(vmap(vmap(lambda t: t.reshape([-1]), in_dims=2), in_dims=1),
(torch.rand(3, B1, 2, B2, 5, B0),), in_dims=5, check_view=False)
def test_reshape_as(self):
test = self._vmap_test
B0, B1, B2 = 7, 11, 13
op = torch.Tensor.reshape_as
test(op, (torch.rand(B0, 2 * 5), torch.rand(B0, 2, 5)), check_view=True)
test(op, (torch.rand(2 * 5), torch.rand(B0, 2, 5)), in_dims=(None, 0), check_view=True)
test(op, (torch.rand(B0, 2 * 5), torch.rand(2, 5)), in_dims=(0, None), check_view=True)
test(op, (torch.rand(2, B0, 5), torch.rand(1, 1, 10)), in_dims=(1, None), check_view=False)
test(vmap(op), (torch.rand(B0, B1, 2, 5), torch.randn(B0, B1, 10)), check_view=True)
test(vmap(vmap(op, in_dims=(2, None)), in_dims=(1, None)),
(torch.rand(3, B1, 2, B2, 5, B0), torch.rand(B0, 3 * 2 * 5)),
in_dims=(5, 0), check_view=False)
def test_result_type(self):
def scalar_tensor_with_dtype(op):
def wrapped(*args, **kwargs):
dtype = op(*args, **kwargs)
return torch.ones([], dtype=dtype)
return wrapped
test = self._vmap_test
op = scalar_tensor_with_dtype(torch.result_type)
B0 = 2
test(op, (torch.randn(B0), torch.randn(B0, dtype=torch.float64)),
check_propagates_grad=False)
test(op, (torch.randn(B0), torch.randint(10, [B0], dtype=torch.int64)),
check_propagates_grad=False)
test(lambda x: op(x, 1), (torch.randn(B0),), check_propagates_grad=False)
test(lambda x: op(x, 1.6), (torch.randn(B0),), check_propagates_grad=False)
test(lambda x: op(x, torch.tensor(1)), (torch.randn(B0),),
check_propagates_grad=False)
test(lambda x: op(x, torch.tensor(1.6, dtype=torch.double)),
(torch.randn(B0),), check_propagates_grad=False)
test(op, (torch.randn(B0, 2), torch.randn(B0, 2, dtype=torch.float64)),
check_propagates_grad=False)
test(op, (torch.randn(B0, 2), torch.randint(10, [B0, 2], dtype=torch.int64)),
check_propagates_grad=False)
test(lambda x: op(x, 1), (torch.randn(B0, 2),), check_propagates_grad=False)
test(lambda x: op(x, 1.6), (torch.randn(B0, 2),), check_propagates_grad=False)
test(lambda x: op(x, torch.tensor(1)), (torch.randn(B0, 2),),
check_propagates_grad=False)
test(lambda x: op(x, torch.tensor(1.6, dtype=torch.double)),
(torch.randn(B0, 2),), check_propagates_grad=False)
test(op, (torch.randn(B0, 2), torch.randn(B0, dtype=torch.float64)),
check_propagates_grad=False)
test(op, (torch.randn(B0, 2), torch.randint(10, [B0], dtype=torch.int64)),
check_propagates_grad=False)
def test_split(self):
test = self._vmap_view_test
op = torch.split
B0, B1, B2 = 7, 11, 13
# tests for torch.split(self, split_size: int, dim)
test(op, (torch.rand(B0, 2, 1024), 101, -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), 130, 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), 256, 0),
in_dims=(2, None, None))
test(vmap(vmap(lambda t: op(t, 4, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
# tests for torch.split(self, split_size: List[int], dim)
test(op, (torch.rand(B0, 2, 1024), [1, 1020, 3], -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), [100] * 10 + [24], 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), [256] * 3 + [255], 0),
in_dims=(2, None, None))
test(vmap(vmap(lambda t: op(t, [4] * 8 + [8] * 4, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
def test_t(self):
op = torch.t
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 5),))
test(op, (torch.rand(2, B0, 5),), in_dims=1)
test(vmap(op), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(vmap(op, in_dims=2)), (torch.rand(B1, 2, B0, 5, B2),), in_dims=2)
def test_T_numpy(self):
def op(t):
return t.T
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 3, 5),))
test(op, (torch.rand(B0),))
test(op, (torch.rand(2, B0, 3, 5),), in_dims=1)
test(vmap(op), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(op), (torch.rand(B1, 2, B0, 3, 5),), in_dims=2)
test(vmap(vmap(op, in_dims=2)), (torch.rand(B1, 2, B0, 3, B2, 5),), in_dims=2)
def test_to(self):
test = self._vmap_test
B0, B1 = 7, 11
test(lambda t: t.to('cpu'), (torch.rand(B0),))
test(lambda t: t.to(torch.double), (torch.rand(B0),))
test(lambda t, o: t.to(o), (torch.rand(B0), torch.randn(B0, dtype=torch.float64)))
test(lambda t, o: t.to(o),
(torch.rand(B0), torch.randn(B0, dtype=torch.float64)),
in_dims=(0, None))
test(vmap(lambda t: t.to(torch.double)), (torch.rand(B0, B1, 3),))
# also test some casting methods
test(lambda t: t.double(), (torch.rand(B0),))
test(lambda t: t.float(), (torch.rand(B0),))
test(lambda t: t.int(), (torch.rand(B0),), check_propagates_grad=False)
test(lambda t: t.long(), (torch.rand(B0),), check_propagates_grad=False)
def test_unfold(self):
op = torch.Tensor.unfold
test = self._vmap_view_test
B0, B1, B2 = 3, 2, 5
test(op, (torch.rand(B0, 7, 11), 0, 2, 1), in_dims=(0, None, None, None))
test(op, (torch.rand(7, B0, 11), 1, 4, 2), in_dims=(1, None, None, None))
test(vmap(op, in_dims=(0, None, None, None)),
(torch.rand(B1, 7, B0, 11), 1, 5, 1), in_dims=(2, None, None, None))
test(vmap(vmap(op, in_dims=(2, None, None, None)), in_dims=(0, None, None, None)),
(torch.rand(B1, 7, B0, 11, B2), -1, 2, 4), in_dims=(2, None, None, None))
def test_unbind(self):
test = self._vmap_view_test
op = torch.unbind
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 1024), -1), in_dims=(0, None))
test(op, (torch.rand(B0, 2, 0),))
test(op, (torch.rand(2, B0, 7), 0), in_dims=(1, None))
test(vmap(op, in_dims=(0, None)), (torch.rand(B1, 1023, B0, 5), 1),
in_dims=(2, None))
test(vmap(vmap(lambda t: op(t, dim=1), in_dims=2)),
(torch.rand(B1, 2, B0, 32, B2),), in_dims=2)
def test_view(self):
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
op = torch.Tensor.view
# We should error out if the view would produce an incorrect result
with self.assertRaises(RuntimeError):
vmap(op, in_dims=(1, None))(torch.rand(2, B0, 5), [10])
test(op, (torch.rand(B0, 2 * 5), [2, 5]), in_dims=(0, None))
test(op, (torch.rand(B0, 4, 5), [1, 2, 1, 10]), in_dims=(0, None))
test(vmap(lambda t: t.view([-1])), (torch.rand(B0, B1, 2, 5, 3),))
test(vmap(vmap(lambda t: t.reshape([-1])), in_dims=1),
(torch.rand(B2, B0, B1, 3, 2, 5),), in_dims=1)
def test_view_as(self):
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
op = torch.Tensor.view_as
# We should error out if the view would produce an incorrect result
with self.assertRaises(RuntimeError):
vmap(op, in_dims=(1, 0))(torch.rand(2, B0, 5), torch.rand(B0, 10))
test(op, (torch.rand(B0, 2 * 5), torch.rand(B0, 2, 5)))
test(op, (torch.rand(2 * 5), torch.rand(B0, 2, 5)), in_dims=(None, 0))
test(op, (torch.rand(B0, 2 * 5), torch.rand(2, 5)), in_dims=(0, None))
test(op, (torch.rand(B0, 4, 5), torch.rand(2, 1, 1, 10)), in_dims=(0, None))
test(vmap(op), (torch.rand(B0, B1, 2, 5), torch.randn(B0, B1, 10)))
test(vmap(vmap(op, in_dims=(0, None)), in_dims=(0, None)),
(torch.rand(B1, B2, B0, 3, 2, 5), torch.rand(B0, 3 * 2 * 5)),
in_dims=(2, 0))
def test_no_random_op_support(self):
B0 = 2
captured = torch.rand(3)
random_ops = [
# out-of-place on BatchedTensor
(torch.bernoulli, (torch.rand(B0, 1),)),
(lambda t: torch.bernoulli(t, p=0.5), (torch.rand(B0, 1),)),
(lambda t: torch.multinomial(t, 2), (torch.rand(B0, 3),)),
(torch.normal, (torch.randn(B0, 1), torch.randn(B0, 1))),
(lambda t: torch.normal(t, 1.), (torch.randn(B0, 1),)),
(lambda t: torch.normal(0., t), (torch.randn(B0, 1),)),
(torch.poisson, (torch.rand(B0, 1),)),
(torch.rand_like, (torch.rand(B0, 1),)),
(torch.randn_like, (torch.rand(B0, 1),)),
(lambda t: torch.randint_like(t, 2), (torch.rand(B0, 1),)),
(lambda t: torch.randint_like(t, 0, 2), (torch.rand(B0, 1),)),
# out-of-place on captured tensor
(lambda t: torch.bernoulli(captured), (torch.rand(B0),)),
(lambda t: torch.bernoulli(captured, p=0.5), (torch.rand(B0),)),
(lambda t: torch.multinomial(captured, 2), (torch.rand(B0),)),
(lambda t: torch.normal(captured, captured), (torch.randn(B0),)),
(lambda t: torch.normal(captured, 1.), (torch.randn(B0),)),
(lambda t: torch.normal(0., captured), (torch.randn(B0),)),
(lambda t: torch.poisson(captured), (torch.rand(B0),)),
(lambda t: torch.rand_like(captured), (torch.rand(B0),)),
(lambda t: torch.randn_like(captured) , (torch.rand(B0),)),
(lambda t: torch.randint_like(captured, 2), (torch.rand(B0),)),
(lambda t: torch.randint_like(captured, 0, 2), (torch.rand(B0),)),
# in-place on BatchedTensor
(lambda t: t.bernoulli_(), (torch.randn(B0, 1),)),
(lambda t: t.cauchy_(), (torch.randn(B0, 1),)),
(lambda t: t.exponential_(), (torch.randn(B0, 1),)),
(lambda t: t.geometric_(0.5), (torch.randn(B0, 1),)),
(lambda t: t.log_normal_(), (torch.randn(B0, 1),)),
(lambda t: t.normal_(), (torch.randn(B0, 1),)),
(lambda t: t.random_(), (torch.randn(B0, 1),)),
(lambda t: t.random_(0, 2), (torch.randn(B0, 1),)),
(lambda t: t.random_(2), (torch.randn(B0, 1),)),
(lambda t: t.uniform_(), (torch.randn(B0, 1),)),
# in-place on captured tensor
(lambda t: captured.bernoulli_(), (torch.randn(B0),)),
(lambda t: captured.cauchy_(), (torch.randn(B0),)),
(lambda t: captured.exponential_(), (torch.randn(B0),)),
(lambda t: captured.geometric_(0.5), (torch.randn(B0),)),
(lambda t: captured.log_normal_(), (torch.randn(B0),)),
(lambda t: captured.normal_(), (torch.randn(B0),)),
(lambda t: captured.random_(), (torch.randn(B0),)),
(lambda t: captured.random_(0, 2), (torch.randn(B0),)),
(lambda t: captured.random_(2), (torch.randn(B0),)),
(lambda t: captured.uniform_(), (torch.randn(B0),)),
# factory functions
(lambda t: torch.rand(1), (torch.randn(B0),)),
(lambda t: torch.randn(1), (torch.randn(B0),)),
(lambda t: torch.randint(5, [1]), (torch.randn(B0),)),
(lambda t: torch.randperm(5), (torch.randn(B0),)),
]
for op, args in random_ops:
with self.assertRaisesRegex(RuntimeError,
'vmap: We do not yet support calling random operations'):
vmap(op)(*args)
def construct_v(output, batch_size):
return torch.randn(batch_size, *output.shape,
dtype=output.dtype, device=output.device)
def as_tuple(x):
if isinstance(x, tuple):
return x
elif isinstance(x, list):
return tuple(x)
else:
return x,
def differentiable(args):
return tuple(arg for arg in as_tuple(args)
if isinstance(arg, torch.Tensor) and arg.requires_grad)
class TestVmapBatchedGradient(TestCase):
def _vmap_test(self, *args, **kwargs):
return _vmap_test(self, *args, **kwargs)
# Tests batched gradient computation of outputs = op(*args, **kwargs)
# by comparing it to a sequential map+stack fallback.
#
# output_process_fn: a function that maps the outputs to the part
# that should be differentiated.
# batch_size: the batch dim size for the batched grad
def _batched_grad_test(self, op, args, kwargs, output_process_fn=lambda x: x, batch_size=3):
outputs = op(*args, **kwargs)
outputs = differentiable(output_process_fn(outputs))
batched_vectors = tuple(construct_v(out, batch_size) for out in outputs)
def vector_jacobian_product(*vectors):
return torch.autograd.grad(outputs, differentiable(args), vectors,
retain_graph=True)
self._vmap_test(vector_jacobian_product, batched_vectors,
check_propagates_grad=False)
# Tests batched second grad computation of outputs = op(*args, **kwargs).
# by comparing it to a sequential map+stack fallback.
#
# output_process_fn: a function that maps the outputs to the part
# that should be differentiated.
# batch_size: the batch dim size for the batched grad
#
# NB: we only test computing batched gradients in the second gradient
# computation. One specific use case that does this is computing the hessian
# matrix of a scalar-valued function; this is useful in Bayesian Logistic
# Regression.
# It might be useful to have a test that computes batched first gradients and
# then uses those to compute batched second gradients in the future.
def _batched_grad_grad_test(self, op, args, kwargs, output_process_fn=lambda x: x, batch_size=3):
outputs = op(*args, **kwargs)
outputs = differentiable(output_process_fn(outputs))
ones = tuple(torch.ones_like(out) for out in outputs)
# Same thing as summing together all of the outputs and calling .backward()
first_grads = torch.autograd.grad(outputs, differentiable(args), ones,
create_graph=True)
first_grads = differentiable(first_grads)
self.assertNotEqual(
len(first_grads), 0, "None of the first grads depend on the input!")
batched_vectors = tuple(construct_v(grad, batch_size) for grad in first_grads)
def vector_hessian_product(*vectors):
outputs = torch.autograd.grad(first_grads, differentiable(args), vectors,
retain_graph=True, allow_unused=True)
outputs = tuple(out for out in outputs if out is not None)
assert len(outputs) > 0
return outputs
self._vmap_test(vector_hessian_product, batched_vectors,
check_propagates_grad=False)
def test_sigmoid(self, device):
# Maybe we can make the "check that the slow fallback was not invoked"
# into a context manager, because it's used a lot. I'll leave that for
# future work.
regex = r'falling back to slow \(for loop and stack\) implementation'
with warnings.catch_warnings(record=True) as wa:
warnings.simplefilter('always')
x = torch.randn(2, 3, requires_grad=True, device=device)
self._batched_grad_test(Tensor.sigmoid, (x,), {})
self._batched_grad_grad_test(Tensor.sigmoid, (x,), {})
for captured_warning in wa:
self.assertNotRegex(str(captured_warning.message), regex)
instantiate_device_type_tests(
TestVmapBatchedGradient,
globals(),
# Excluding ROCM
except_for='cuda' if TEST_WITH_ROCM else None,
only_for=['cuda', 'cpu'],
)
if __name__ == '__main__':
run_tests()
|
"""
*Variable State*
"""
from abc import ABCMeta
from dataclasses import dataclass
from language.type import Expression
from language.type import State
@dataclass
class Variable(
State,
):
__metaclass__ = ABCMeta
expression: Expression
type: Type
|
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.regularizers import l1_l2
def regression(X, Y, epochs, reg_mode):
x, y = np.array(X),np.array(Y)
model = Sequential()
if reg_mode == 'linear':
model.add(Dense(1, input_dim=x.shape[1]))
model.compile(optimizer='rmsprop', metrics=['accuracy'], loss='mse')
elif reg_mode == 'logistic':
model.add(Dense(1, activation='sigmoid', input_dim=x.shape[1]))
model.compile(optimizer='rmsprop', metrics=['accuracy'], loss='binary_crossentropy')
elif reg_mode == 'regularized':
reg = l1_l2(l1=0.01, l2=0.01)
model.add(Dense(1, activation='sigmoid', W_regularizer=reg, input_dim=x.shape[1]))
model.compile(optimizer='rmsprop', metrics=['accuracy'], loss='binary_crossentropy')
out = model.fit(x, y, nb_epoch=epochs, verbose=0, validation_split=.33)
return model, out
|
# -*- coding: utf-8 -*-
import numpy as np
# from numpy import testing
from sktime.classification.dictionary_based import MUSE
from sktime.datasets.base import load_japanese_vowels
def test_muse_on_japanese_vowels():
# load japanese vowels data
X_train, y_train = load_japanese_vowels(split="train", return_X_y=True)
X_test, y_test = load_japanese_vowels(split="test", return_X_y=True)
indices = np.random.RandomState(0).permutation(50)
# train WEASEL+MUSE on multivariate data
muse = MUSE(random_state=1379, window_inc=4, use_first_order_differences=False)
muse.fit(X_train.iloc[indices], y_train[indices])
score = muse.score(X_test.iloc[indices], y_test[indices])
# print(score)
assert score >= 0.99
|
import click
import os
import sys
#
# Flask restarts itself when a file changes, but this restart
# does not have PYTHONPATH set properly if you start the
# app with python -m microraiden.
#
from microraiden.crypto import privkey_to_addr
import logging
import requests
log = logging.getLogger(__name__)
if __package__ is None:
path = os.path.dirname(os.path.dirname(__file__))
sys.path.insert(0, path)
sys.path.insert(0, path + "/../")
from web3 import HTTPProvider, Web3
from microraiden.make_helpers import make_paywalled_proxy
from microraiden import utils, config
from microraiden.exceptions import StateFileLocked, InsecureStateFile, NetworkIdMismatch
from microraiden.proxy.paywalled_proxy import PaywalledProxy
pass_app = click.make_pass_decorator(PaywalledProxy)
@click.group()
@click.option(
'--channel-manager-address',
default=None,
help='Ethereum address of the channel manager contract.'
)
@click.option(
'--state-file',
default=None,
help='State file of the proxy'
)
@click.option(
'--private-key',
required=True,
help='Path to private key file of the proxy',
type=click.Path(exists=True, dir_okay=False, resolve_path=True)
)
@click.option(
'--private-key-password-file',
default=None,
help='Path to file containing password for the JSON-encoded private key',
type=click.Path(exists=True, dir_okay=False, resolve_path=True)
)
@click.option(
'--ssl-cert',
default=None,
help='Cerfificate of the server (cert.pem or similar)'
)
@click.option(
'--rpc-provider',
default='http://localhost:8545',
help='Address of the Ethereum RPC provider'
)
@click.option(
'--ssl-key',
default=None,
help='SSL key of the server (key.pem or similar)'
)
@click.option(
'--paywall-info',
default=os.path.abspath(os.path.join(os.path.dirname(__file__), '../webui')),
help='Directory where the paywall info is stored. '
'The directory shoud contain a index.html file with the payment info/webapp. '
'Content of the directory (js files, images..) is available on the "js/" endpoint.'
)
@click.pass_context
def main(
ctx,
channel_manager_address,
ssl_key,
ssl_cert,
state_file,
private_key,
private_key_password_file,
paywall_info,
rpc_provider,
):
private_key = utils.get_private_key(private_key, private_key_password_file)
if private_key is None:
sys.exit(1)
receiver_address = privkey_to_addr(private_key)
channel_manager_address = channel_manager_address or config.CHANNEL_MANAGER_ADDRESS
if not state_file:
state_file_name = "%s_%s.json" % (channel_manager_address[:10], receiver_address[:10])
app_dir = click.get_app_dir('microraiden')
if not os.path.exists(app_dir):
os.makedirs(app_dir)
state_file = os.path.join(app_dir, state_file_name)
config.paywall_html_dir = paywall_info
web3 = Web3(HTTPProvider(rpc_provider, request_kwargs={'timeout': 60}))
try:
app = make_paywalled_proxy(private_key, state_file, web3=web3)
except StateFileLocked as ex:
log.fatal('Another uRaiden process is already running (%s)!' % str(ex))
sys.exit(1)
except InsecureStateFile as ex:
msg = ('The permission bits of the state file (%s) are set incorrectly (others can '
'read or write) or you are not the owner. For reasons of security, '
'startup is aborted.' % state_file)
log.fatal(msg)
sys.exit(1)
except NetworkIdMismatch as ex:
log.fatal(str(ex))
sys.exit(1)
except requests.exceptions.ConnectionError as ex:
log.fatal("Ethereum node refused connection: %s" % str(ex))
sys.exit(1)
ctx.obj = app
|
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
SECRET_KEY = env('DJANGO_SECRET_KEY', default='!!!SET DJANGO_SECRET_KEY!!!')
ALLOWED_HOSTS = [
"localhost",
"0.0.0.0",
"127.0.0.1",
]
# TEMPLATES
# ------------------------------------------------------------------------------
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG # noqa F405
# EMAIL
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['django_extensions'] # noqa F405
# Your stuff...
# ------------------------------------------------------------------------------
|
from django.conf.urls import url, include
from . import views
urlpatterns = [
url('^ponto/new', views.add_new_point_view, name="add_new_point"),
]
|
import sys
from setuptools import find_packages, setup
CURRENT_PYTHON = sys.version_info[:2]
REQUIRED_PYTHON = (3, 7)
# Check python version
if CURRENT_PYTHON < REQUIRED_PYTHON:
sys.stderr.write("""
==========================
Unsupported Python version
==========================
This version of modnet requires Python {}.{}, but you're trying to
install it on Python {}.{}.
""".format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))
sys.exit(1)
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="mod-net",
version="v2.0.0-beta5",
description="Make SET and SEU fault injections in hierarchical verilog netlists",
long_description=long_description,
long_description_content_type='text/x-rst',
packages=find_packages(exclude=("tests",)),
install_requires=[
'Click==7.*',
],
entry_points={
'console_scripts': ['modnet=modnet.cli:main'],
}
)
|
import __builtin__
import compileall
import os
import py_compile
import shutil
import subprocess
import sys
import textwrap
import unittest
from test.test_support import TESTFN, is_jython, run_unittest, temp_cwd
class TestMtime(unittest.TestCase):
def test_mtime_compile(self):
"""
This test exercises the mtime annotation that is now stored in Jython
compiled files. CPython already stores an mtime in its pyc files. To
exercise this functionality, I am writing a py file, compiling it,
setting the os modified time to a very low value on the compiled file,
then changing the py file after a small sleep. On CPython, this would
still cause a re-compile. In Jython before this fix it would not.
See http://bugs.jython.org/issue1024
"""
import time
os.mkdir(TESTFN)
try:
mod = "mod1"
source_path = os.path.join(TESTFN, "%s.py" % mod)
if is_jython:
compiled_path = os.path.join(TESTFN, "%s$py.class" % mod)
else:
compiled_path = os.path.join(TESTFN, "%s.pyc" % mod)
fp = open(source_path, "w")
fp.write("def foo(): return 'first'\n")
fp.close()
py_compile.compile(source_path)
#sleep so that the internal mtime is older for the next source write.
time.sleep(1)
fp = open(source_path, "w")
fp.write("def foo(): return 'second'\n")
fp.close()
# make sure the source file's mtime is artificially younger than
# the compiled path's mtime.
os.utime(source_path, (1,1))
sys.path.append(TESTFN)
import mod1
self.assertEquals(mod1.foo(), 'second')
finally:
shutil.rmtree(TESTFN)
class TestCompileall(unittest.TestCase):
def write_code(self, package, name, code):
with open(os.path.join(package, name), "w") as f:
f.write(textwrap.dedent(code))
def test_compileall(self):
with temp_cwd():
PACKAGE = os.path.realpath("./greetings")
PYC_GREETER = os.path.join(PACKAGE, "greeter.pyc")
PYCLASS_GREETER = os.path.join(PACKAGE, "greeter$py.class")
PYCLASS_TEST = os.path.join(PACKAGE, "test$py.class")
os.mkdir(PACKAGE)
self.write_code(
PACKAGE, "greeter.py",
"""
def greet():
print 'Hello world!'
""")
self.write_code(
PACKAGE, "test.py",
"""
from greeter import greet
greet()
""")
# pretend we have a Python bytecode compiler by touching this file
open(PYC_GREETER, "a").close()
compileall.compile_dir(PACKAGE, quiet=True)
self.assertTrue(os.path.exists(PYC_GREETER)) # still exists
self.assertTrue(os.path.exists(PYCLASS_TEST)) # along with these new compiled files
self.assertTrue(os.path.exists(PYCLASS_GREETER))
# verify we can work with just compiled files
os.unlink(os.path.join(PACKAGE, "greeter.py"))
self.assertEqual(
subprocess.check_output([sys.executable, os.path.join(PACKAGE, "test.py")]).rstrip(),
"Hello world!")
def test_main():
run_unittest(TestMtime, TestCompileall)
if __name__ == "__main__":
test_main()
|
#
# Project: retdec-python
# Copyright: (c) 2015 by Petr Zemek <s3rvac@gmail.com> and contributors
# License: MIT, see the LICENSE file for more details
#
"""A representation of fileinfo analyses."""
from retdec.exceptions import AnalysisFailedError
from retdec.resource import Resource
class Analysis(Resource):
"""A representation of a fileinfo analysis."""
def wait_until_finished(self, on_failure=AnalysisFailedError):
"""Waits until the analysis is finished.
:param callable on_failure: What should be done when the analysis
fails?
If `on_failure` is ``None``, nothing is done when the analysis fails.
Otherwise, it is called with the error message. If the returned value
is an exception, it is raised.
"""
# Currently, the retdec.com API does not support push notifications, so
# we have to do polling.
while not self.has_finished():
self._wait_until_state_can_be_updated()
# The analysis has finished.
if self._failed:
self._handle_failure(on_failure, self._error)
def get_output(self):
"""Obtains and returns the output from the analysis (`str`)."""
file_path = '/{}/output'.format(self.id)
return self._get_file_contents(file_path, is_text_file=True)
def __repr__(self):
return '<{} id={!r}>'.format(
__name__ + '.' + self.__class__.__name__,
self.id
)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'wid.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(900, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.Chara_setting = QtWidgets.QGroupBox(self.centralwidget)
self.Chara_setting.setEnabled(True)
self.Chara_setting.setGeometry(QtCore.QRect(30, 50, 491, 191))
self.Chara_setting.setObjectName("Chara_setting")
self.pushButton_shootPhoto = QtWidgets.QPushButton(self.Chara_setting)
self.pushButton_shootPhoto.setGeometry(QtCore.QRect(40, 120, 93, 28))
self.pushButton_shootPhoto.setObjectName("pushButton_shootPhoto")
self.checkBox_finish = QtWidgets.QCheckBox(self.Chara_setting)
self.checkBox_finish.setEnabled(False)
self.checkBox_finish.setGeometry(QtCore.QRect(160, 130, 91, 19))
self.checkBox_finish.setObjectName("checkBox_finish")
self.horizontalLayoutWidget = QtWidgets.QWidget(self.Chara_setting)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(0, 30, 441, 81))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_Chara = QtWidgets.QLabel(self.horizontalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_Chara.sizePolicy().hasHeightForWidth())
self.label_Chara.setSizePolicy(sizePolicy)
self.label_Chara.setAlignment(QtCore.Qt.AlignCenter)
self.label_Chara.setObjectName("label_Chara")
self.horizontalLayout.addWidget(self.label_Chara)
self.comboBox_chooseChara = QtWidgets.QComboBox(self.horizontalLayoutWidget)
self.comboBox_chooseChara.setObjectName("comboBox_chooseChara")
self.horizontalLayout.addWidget(self.comboBox_chooseChara)
self.pushButton_choosePsd = QtWidgets.QPushButton(self.horizontalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_choosePsd.sizePolicy().hasHeightForWidth())
self.pushButton_choosePsd.setSizePolicy(sizePolicy)
self.pushButton_choosePsd.setObjectName("pushButton_choosePsd")
self.horizontalLayout.addWidget(self.pushButton_choosePsd)
self.Vocie_setting = QtWidgets.QGroupBox(self.centralwidget)
self.Vocie_setting.setEnabled(True)
self.Vocie_setting.setGeometry(QtCore.QRect(30, 260, 491, 191))
self.Vocie_setting.setObjectName("Vocie_setting")
self.label_voiceKind = QtWidgets.QLabel(self.Vocie_setting)
self.label_voiceKind.setGeometry(QtCore.QRect(60, 90, 72, 15))
self.label_voiceKind.setObjectName("label_voiceKind")
self.pushButton_testMic = QtWidgets.QPushButton(self.Vocie_setting)
self.pushButton_testMic.setGeometry(QtCore.QRect(60, 140, 93, 28))
self.pushButton_testMic.setObjectName("pushButton_testMic")
self.comboBox_vc = QtWidgets.QComboBox(self.Vocie_setting)
self.comboBox_vc.setGeometry(QtCore.QRect(140, 81, 121, 31))
self.comboBox_vc.setEditable(False)
self.comboBox_vc.setCurrentText("")
self.comboBox_vc.setObjectName("comboBox_vc")
self.checkBox_enableVC = QtWidgets.QCheckBox(self.Vocie_setting)
self.checkBox_enableVC.setGeometry(QtCore.QRect(40, 40, 281, 19))
self.checkBox_enableVC.setObjectName("checkBox_enableVC")
self.label_charaPreview = QtWidgets.QLabel(self.centralwidget)
self.label_charaPreview.setGeometry(QtCore.QRect(560, 50, 72, 15))
self.label_charaPreview.setObjectName("label_charaPreview")
self.Image_preview = QtWidgets.QLabel(self.centralwidget)
self.Image_preview.setGeometry(QtCore.QRect(560, 70, 301, 371))
self.Image_preview.setText("")
self.Image_preview.setObjectName("Image_preview")
self.pushButton_start = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_start.setGeometry(QtCore.QRect(620, 500, 181, 51))
self.pushButton_start.setObjectName("pushButton_start")
self.Other_setting = QtWidgets.QGroupBox(self.centralwidget)
self.Other_setting.setGeometry(QtCore.QRect(30, 480, 381, 80))
self.Other_setting.setObjectName("Other_setting")
self.checkBox_debugOn = QtWidgets.QCheckBox(self.Other_setting)
self.checkBox_debugOn.setGeometry(QtCore.QRect(40, 40, 281, 19))
self.checkBox_debugOn.setObjectName("checkBox_debugOn")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.comboBox_vc.setCurrentIndex(-1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.Chara_setting.setTitle(_translate("MainWindow", "虚拟形象设置"))
self.pushButton_shootPhoto.setText(_translate("MainWindow", "动作采集"))
self.checkBox_finish.setText(_translate("MainWindow", "未完成"))
self.label_Chara.setText(_translate("MainWindow", "形象选择"))
self.pushButton_choosePsd.setText(_translate("MainWindow", "扫描"))
self.Vocie_setting.setTitle(_translate("MainWindow", "变声器设置"))
self.label_voiceKind.setText(_translate("MainWindow", "音色"))
self.pushButton_testMic.setText(_translate("MainWindow", "麦克风测试"))
self.checkBox_enableVC.setText(_translate("MainWindow", "启用变声器"))
self.label_charaPreview.setText(_translate("MainWindow", "形象预览"))
self.pushButton_start.setText(_translate("MainWindow", "链接形象!"))
self.Other_setting.setTitle(_translate("MainWindow", "其他设置"))
self.checkBox_debugOn.setText(_translate("MainWindow", "(Debug模式)渲染同时打开摄像头"))
|
import glyphsLib
import importlib
import argparse
import sys
from glob import glob
parser = argparse.ArgumentParser(description='Filter a font file')
parser.add_argument('input', metavar='GLYPHS',
help='the Glyphs file')
parser.add_argument('filter',metavar='FILTER',
help='the filter to use')
args = parser.parse_args()
base_path = "NaNGlyphFilters"
sys.path.append(base_path)
glyphsLib.Glyphs.font = glyphsLib.GSFont(args.input)
filter_script = args.filter
sys.modules['GlyphsApp'] = glyphsLib
try:
i = importlib.import_module(filter_script)
except ModuleNotFoundError as e:
modules = [x[len(base_path)+1:-3] for x in sorted(glob(base_path+"/*.py")) if "/NaN" not in x]
print("Couldn't find filter '%s'.\nTry one of: %s" % (filter_script, ", ".join(modules)))
sys.exit(1)
save_file = args.input.replace(".glyphs", "-"+filter_script+".glyphs")
glyphsLib.Glyphs.font.save(save_file)
print("Saved on %s" % save_file)
|
"""
It is a Pydantic model for Books
"""
from typing import Optional
from pydantic import BaseModel
class BooksBase(BaseModel):
"""
A schema class used to represent Books table column values
"""
Title: Optional[str] = None
AuthorId: Optional[int] = None
class Config:
"""
Instead of using title = data["Title"]
replace it with title = data.Title
"""
orm_mode = True
class BooksCreate(BooksBase):
"""
A schema class used to represent column to create a new book
"""
Title: str
AuthorId: int
class Config:
"""enable orm mode"""
orm_mode = True
class BooksUpdate(BooksBase):
"""
A schema class used to represent column to create a new book
"""
Title: str
AuthorId: int
class Config:
"""enable orm mode"""
orm_mode = True
class BooksInDBBase(BooksBase):
"""
A schema class used to represent book data based on its ID
"""
BookId: Optional[int] = None
class Config:
"""enable orm mode"""
orm_mode = True
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
'''
***************2015-10-21*******************
1.access的配置加bill;
2.access的配置加monitor的编译;
3.* 目前push先不做,迭代
4.* 目前没有rpc,这个应该放到worker去做
********************************************
'''
from monitor.monitor_api_pb2 import ReportBillReq, MsgStats
import socket
import struct
import traceback
from log import log
from bill import bill
import pb_helper
import time
from pb_rpc_agent import PbRpcAgent
__all__ = ['ProxyMonitor', ]
###########################################
class LimitMap(object):
def __init__(self, max_key):
self.o = dict()
self.max_key_cnt = max_key
self.key_cnt = 0
self.is_drop = False
def has_drop(self):
return self.is_drop
def set(self, k, v):
if self.key_cnt >= self.max_key_cnt:
self.is_drop = True
return False
self.o[k] = v
self.key_cnt += 1
def get(self, k, default):
if self.has(k):
return self.o[k]
return default
def get_key_ls(self):
return self.o.keys()
def has(self, k):
if k in self.o:
return True
return False
def length(self):
return len(self.o)
def get_statistic(self):
MAX_SHOW_CNT = 50
return '[max: %d, length: %d, %s]' % \
(self.max_key_cnt,
self.length(),
str(self.o.keys()[:MAX_SHOW_CNT]))
def clear(self):
self.o.clear()
self.key_cnt = 0
def del_one_key(self, key):
if not self.has(key):
return
self.key_cnt -= 1
del self.o[key]
def del_keys(self, keys):
valid_keys = set(keys) & set(self.o.keys())
for key in valid_keys:
self.key_cnt -= 1
del self.o[key]
###########################################
class TimeableLimitMap(LimitMap):
def set(self, k,
v):
key = k
val = (time.time(), v)
LimitMap.set(self, key, val)
def get_time(self, k,
default):
if not LimitMap.has(self, k):
return default
info = LimitMap.get(self, k, default)
return info[0]
def get_val(self, k,
default):
if not LimitMap.has(self, k):
return default
info = LimitMap.get(self, k, default)
return info[1]
###########################################
class ErrCollection(TimeableLimitMap):
def __init__(self, max_key):
TimeableLimitMap.__init__(self, max_key)
self.set_errinfo()
def set_errinfo(self):
from context import context
self.errinfo = context.errcode['SUCCESS']
def get_err_id(self):
return self.errinfo[0]
def get_err_info(self):
return self.errinfo[1]
def get_err_msg(self):
return self.errinfo[2]
class ErrPushSessionKeyMiss(ErrCollection):
def set_errinfo(self):
from context import context
self.errinfo = context.errcode['PROXY_MONITOR_PUSH_SESSION_KEY_MISS']
def set(self, seq,
session_key):
ErrCollection.set(self, seq, session_key)
class ErrPipeSend(ErrCollection):
def set_errinfo(self):
from context import context
self.errinfo = context.errcode['PROXY_MONITOR_PIPE_SEND']
def set(self, seq, fd):
ErrCollection.set(self, seq, fd)
class ErrSockSend(ErrCollection):
def set_errinfo(self):
from context import context
self.errinfo = context.errcode['PROXY_MONITOR_SOCK_SEND']
def set(self, seq,
fd):
ErrCollection.set(self, seq, fd)
class ErrSockSeqMiss(ErrCollection):
def set_errinfo(self):
from context import context
self.errinfo = context.errcode['PROXY_MONITOR_SOCK_SEQ_MISS']
def set(self, seq):
ErrCollection.set(self, seq, None)
class ErrRspMiss(ErrCollection):
def set_errinfo(self):
from context import context
self.errinfo = context.errcode['PROXY_MONITOR_SOCK_RSP_MISS']
def set(self, seq):
ErrCollection.set(self, seq, None)
###########################################
class Data(object):
def __init__(self, max_key):
self.req_o = TimeableLimitMap(max_key)
self.rsp_o = TimeableLimitMap(max_key)
def record_req(self, seq,
header):
self.req_o.set(seq, header)
def record_rsp(self, seq,
header):
self.rsp_o.set(seq, header)
def show_statistic(self):
log.debug('data->req:%s, data->rsq:%s',
self.req_o.get_statistic(),
self.rsp_o.get_statistic())
def in_req(self, seq):
return self.req_o.has(seq)
def clear(self):
self.req_o.clear()
self.rsp_o.clear()
def _del_lost_rsp(self):
req_keys = self.req_o.get_key_ls()
rsp_keys = self.rsp_o.get_key_ls()
del_keys = set(rsp_keys) - set(req_keys)
self.rsp_o.del_keys(del_keys)
def del_keys(self, keys):
self.req_o.del_keys(keys)
self.rsp_o.del_keys(keys)
# 可能发生req已经被删了,但是rsp还没回复
self._del_lost_rsp()
class SockData(Data):
def __init__(self, max_key,
rsq_err_o):
Data.__init__(self, max_key)
self.rsp_miss_o = rsq_err_o
def record_req(self, seq,
header):
Data.record_req(self, seq, header)
self.rsp_miss_o.set(seq)
def record_rsp(self, seq,
header):
Data.record_rsp(self, seq, header)
self.rsp_miss_o.del_one_key(seq)
class PushData(Data):
pass
###########################################
class Rpc(object):
'''
notes: 在worker得到的东西
'''
pass
class Bill(object):
'''
notes: 在proxy得到的东西
'''
def __init__(self, data_o,
seq_map,
fd_map,
err_o_ls,
mid):
self.data_o = data_o
self.seq_map = seq_map
self.fd_map = fd_map
self.err_o_ls = err_o_ls
self.my_mid = mid
def get_uid(self, seq):
header = self.data_o.req_o.get_val(seq, None)
if not header:
return ''
return header.str_uid
def get_cmd(self, seq):
header = self.data_o.req_o.get_val(seq, None)
if not header:
return ''
return header.str_cmd
def get_aid(self, seq):
header = self.data_o.req_o.get_val(seq, None)
if not header:
return ''
return header.str_aid
def get_skey(self, seq):
return self._get_conn_info_by_seq(seq, 'session_key', '')
def get_tid(self, seq):
header = self.data_o.req_o.get_val(seq, None)
if not header:
return ''
return header.str_tid
def get_client_addr(self, seq):
addr = self._get_conn_info_by_seq(seq, 'addr', ('', 0))
return '{}:{}'.format(addr[0], addr[1])
def get_seq(self, seq):
key = 'client_seqno'
return self._get_info_from_seq(seq, key, 0)
def _find_err_o(self, seq):
for err in self.err_o_ls:
if err.has(seq):
return err
return None
def get_client_code(self, seq):
'''
# 如果找不到响应包,就返回0;否则则为调用service的返回码
'''
header = self.data_o.rsp_o.get_val(seq, None)
if not header:
return 0
return header.i_code
def get_code(self, seq):
code = 0
err_o = self._find_err_o(seq)
if err_o:
code = err_o.get_err_id()
return code
def get_errinfo(self, seq):
errinfo = 'succ'
err_o = self._find_err_o(seq)
if err_o:
errinfo = err_o.get_err_info()
return errinfo
def get_result(self, seq):
return self.get_errinfo(seq)
def get_cost(self, seq):
code = self.get_code(seq)
# 1.未知错误,不计算了
if code == -1:
return 0
# 2.没有错误,返回响应包的时间
if code == 0:
end_o = self.data_o.rsp_o
else:
# 3.有错误,就返回错误的时间
end_o = self._find_err_o(seq)
end_time = end_o.get_time(seq, 0)
start_time = self.data_o.req_o.get_time(seq, 0)
cost = int((end_time - start_time) * 1000)
return cost
def get_duration(self, seq):
return self.get_cost(seq)
def get_caller_mid(self, seq):
# 调用者->app
return self.get_client_mid(seq)
def get_callee_mid(self, seq):
# 被调用者
return self.my_mid
def get_client_mid(self, seq):
# 客户端->app
header = self.data_o.req_o.get_val(seq, None)
if not header:
return 0
return header.msg_client_info.ui_mid
def _get_info_from_seq(self, seq, key, default):
if not self.seq_map:
return default
if seq not in self.seq_map:
return default
if key not in self.seq_map[seq]:
return default
return self.seq_map[seq][key]
def _get_conn_info_by_seq(self, seq, key, default):
client_fd = self._get_info_from_seq(seq, "client_fd", None)
if client_fd is None:
log.error("client_fd is absetn for seq: %d", seq)
return default
conn_info = self.fd_map.get(client_fd)
if not conn_info:
log.error("conn info is absent for client fd: %d", client_fd)
return default
return conn_info.get(key, default)
def get_req_len(self, seq):
return self._get_info_from_seq(seq, 'req_len', 0)
def _get_bt_str(self, buf):
'''
notes: 需要获取req的cls,然后才能解析,暂时先不搞了
'''
return ''
try:
return str(pb_helper.to_string(buf))
except:
log.error(traceback.format_exc())
return buf
def _get_proto_str(self, o):
return ''
if not o:
return ''
try:
return str(pb_helper.to_short_string(o))
except:
log.error(traceback.format_exc())
return ''
def get_proto_req_header(self, seq):
header = self.data_o.req_o.get_val(seq, None)
return self._get_proto_str(header)
def get_proto_rsp_header(self, seq):
header = self.data_o.rsp_o.get_val(seq, None)
return self._get_proto_str(header)
def get_bt_req_header(self, seq):
raw_header = self._get_info_from_seq(seq, 'req_header', '')
return self._get_bt_str(raw_header)
def get_bt_req_body(self, seq):
raw_body = self._get_info_from_seq(seq, 'req_body', '')
return self._get_bt_str(raw_body)
def get_caller_ip(self, seq):
# 调用者IP
header = self.data_o.req_o.get_val(seq, None)
if not header:
return ''
ui_ip = header.msg_client_info.msg_ip_info.ui_ip_v4
try:
str_ip = socket.inet_ntoa(struct.pack("!I", ui_ip))
except:
log.error(traceback.format_exc())
str_ip = ''
return str_ip
def get_client_ip(self, seq):
# 在access来说,caller_ip和client_ip是一样的
return self.get_caller_ip(seq)
def get_client_version(self, seq):
header = self.data_o.req_o.get_val(seq, None)
if not header:
return ''
try:
big_ver = header.msg_client_info.msg_version.ui_major_version
sma_ver = header.msg_client_info.msg_version.ui_minor_version
str_ver = '{b}.{s}'.format(b=big_ver, s=sma_ver)
except:
log.error(traceback.format_exc())
str_ver = ''
return str_ver
def get_rsp_len(self, seq):
return self._get_info_from_seq(seq, 'rsp_len', 0)
def get_bt_rsp_header(self, seq):
raw_header = self._get_info_from_seq(seq, 'rsp_header', '')
return self._get_bt_str(raw_header)
def get_bill_info(self, seq):
bill_info_map = \
{
'uid': self.get_uid(seq),
'cmd': self.get_cmd(seq),
'ret_code': self.get_client_code(seq),
'result': self.get_result(seq),
'cost': self.get_cost(seq),
'duration': self.get_duration(seq),
'mid': self.get_caller_mid(seq),
'client_mid': self.get_client_mid(seq),
'seq': self.get_seq(seq),
'skey': self.get_skey(seq),
'tid': self.get_tid(seq),
'reqlen': self.get_req_len(seq),
'rsplen': self.get_rsp_len(seq),
'caller': self.get_caller_ip(seq),
'ver': self.get_client_version(seq),
'aid': self.get_aid(seq),
'client_addr': self.get_client_addr(seq),
'backend_seq': seq,
'header': self.get_proto_req_header(seq),
'body': self.get_bt_req_body(seq),
}
k_v_ls = ['{k}:{v}'.format(k=key, v=val)
for key, val in bill_info_map.iteritems()]
str_bill_info = '|'.join(k_v_ls)
return str_bill_info
def get_bt_bill(self, seq):
stats = MsgStats()
stats.d_bill_time = time.time()
stats.str_uid = self.get_uid(seq)
stats.str_cmd = self.get_cmd(seq)
stats.ui_seq = self.get_seq(seq)
stats.i_code = self.get_code(seq)
stats.str_info = self.get_errinfo(seq)
stats.ui_cost = self.get_cost(seq)
stats.ui_duration = self.get_duration(seq)
stats.ui_req_len = self.get_req_len(seq)
stats.ui_rsp_len = self.get_rsp_len(seq)
stats.str_biz_info = self.get_bill_info(seq)
stats.ui_caller_mid = self.get_caller_mid(seq)
stats.str_caller_ip = self.get_caller_ip(seq)
stats.ui_callee_mid = self.get_callee_mid(seq)
stats.ui_client_mid = self.get_client_mid(seq)
stats.str_client_version = self.get_client_version(seq)
stats.str_client_ip = self.get_client_ip(seq)
stats.bt_req_header = self.get_proto_req_header(seq)
stats.bt_req_body = self.get_bt_req_body(seq)
stats.bt_rsp_header = self.get_proto_rsp_header(seq)
return stats
class LogicProxy(object):
def __init__(self, id):
self.logic_id = id
@classmethod
def send_to_monitor(cls, data):
try:
report_bill_cmd = "monitor.ReportBill"
report_bill_req = ReportBillReq()
stats = report_bill_req.msg_stats
stats.CopyFrom(data)
rpc_report_bill = PbRpcAgent(
report_bill_cmd,
None,
body=report_bill_req
)
log.debug("rpc_report_bill invoking...")
rpc_report_bill.invoke()
except:
log.error(traceback.format_exc())
@classmethod
def find_exclude_err_keys(cls, keys, err_o_ls):
exclude_keys = list()
for key in keys:
for err in err_o_ls:
if err.has(key):
exclude_keys.append(key)
break
return exclude_keys
@classmethod
def find_req_rsp_pair_ls(cls, data_o):
if not data_o:
return []
req_seq_ls = data_o.req_o.get_key_ls()
rsp_seq_ls = data_o.rsp_o.get_key_ls()
common_seq_ls = set(req_seq_ls) & set(rsp_seq_ls)
return list(common_seq_ls)
class AppLogicProxy(LogicProxy):
APP_ID = 8888
def __init__(self):
LogicProxy.__init__(self, self.APP_ID)
def report(self, seq_ls, seq_map,
fd_map, data_o, err_o_ls,
mid):
try:
bill_o = Bill(data_o, seq_map, fd_map, err_o_ls, mid)
for req_seq in seq_ls:
log.debug('app_logic_proxy -> proxy_seq[%s] -> start!' % req_seq)
if not data_o.in_req(req_seq):
# 不然可能会导致空req的header
log.warning('seq:%s is not in req, contiue!' % str(req_seq))
continue
try:
# 1.上报
self.send_to_monitor(bill_o.get_bt_bill(req_seq))
except:
log.error(traceback.format_exc())
log.debug('app_logic_proxy -> send_monitor finish!')
try:
# 2.记流水
bill.log(bill_o.get_bill_info(req_seq))
except:
log.error(traceback.format_exc())
log.debug('app_logic_proxy -> record_bill finish!')
except:
log.error(traceback.format_exc())
###########################################
class ProxyMonitor(object):
'''
notes:
# 监控业务(不监控网络问题,先简单处理proxy,在handle_close_fd批量处理):
# 开始:收到完整的请求包,能够解析数据包;
#
# 1.异常:
# a.socket
# I) 管道发送给worker失败(no rpc);
# II) client_seq找不到;
# III) socket发送响应包给app失败;
# b.push
# I) session_key找不到;
# II) 发送给socket失败;
'''
TYPE_SOCKE = 1
TYPE_PIPE = 2
def __init__(self, mid):
self.init_collections()
self.my_mid = mid
###################################################
def init_collections(self):
self.err_push_miss_sessionkey_o = ErrPushSessionKeyMiss(1000)
self.err_pipe_send_o = ErrPipeSend(1000)
self.err_sock_send_o = ErrSockSend(1000)
self.err_sock_miss_seq_o = ErrSockSeqMiss(1000)
self.err_rsp_miss_seq_o = ErrRspMiss(1000)
self.sock_data_o = SockData(1000, self.err_rsp_miss_seq_o)
self.push_data_o = PushData(1000)
self.data_o_ls = [self.sock_data_o,
self.push_data_o]
self.err_o_ls = [self.err_push_miss_sessionkey_o,
self.err_pipe_send_o,
self.err_sock_miss_seq_o,
self.err_sock_send_o,
self.err_rsp_miss_seq_o]
self.clearable_o_ls = self.data_o_ls + self.err_o_ls
###################################################
def report(self, seq_ls,
seq_map,
fd_map):
'''
notes: 先做app的监控
'''
self.sock_data_o.show_statistic()
report_o = AppLogicProxy()
report_o.report(seq_ls,
seq_map,
fd_map,
self.sock_data_o,
self.err_o_ls,
self.my_mid)
[o.del_keys(seq_ls) for o in self.clearable_o_ls]
###################################################
def record_sock_req(self, seq,
header):
self.sock_data_o.record_req(seq, header)
def record_sock_rsp(self, seq,
header):
self.sock_data_o.record_rsp(seq, header)
def record_push_req(self, seq,
header):
self.push_data_o.record_req(seq, header)
def record_push_rsp(self, seq,
header):
self.push_data_o.record_rsp(seq, header)
###################################################
def push_session_key_miss(self, proxy_seq,
session_key):
self.err_push_miss_sessionkey_o.set(proxy_seq, session_key)
def sock_send_err(self, proxy_seq,
fd):
self.err_sock_send_o.set(proxy_seq, fd)
def pipe_send_err(self, proxy_seq,
fd):
self.err_pipe_send_o.set(proxy_seq, fd)
def sock_seq_miss(self, proxy_seq):
self.err_sock_miss_seq_o.set(proxy_seq)
###################################################
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.