content stringlengths 5 1.05M |
|---|
import xml.etree.ElementTree as ET
import numpy as np
from gensim.models import Word2Vec
from utils.data_helper import clean_sentence
np.random.seed(26061997)
word_vector = Word2Vec.load('helper_files/word2vec/word2vec-dim300')
data = ET.parse('data/full.xml')
embedding_file_path = 'helper_files/embedding/generated_embedding.txt'
indexed_embedding_file_path = 'helper_files/embedding/indexed_embedding.txt'
embedding_indexes_file_path = 'helper_files/embedding/embedding_indexes.txt'
min_val = min(map(min, word_vector.wv.vectors))
max_val = max(map(max, word_vector.wv.vectors))
vector_size = word_vector.wv.vector_size
embedding_file = open(embedding_file_path, 'w')
indexed_embedding_file = open(indexed_embedding_file_path, 'w')
embedding_indexes_file = open(embedding_indexes_file_path, 'w')
embedding = {}
root = data.getroot()
idx = 1
for sentence in root:
for phrase in sentence:
cleaned_phrase = clean_sentence(phrase.text, word_vector.wv)
for word in cleaned_phrase.split():
if word not in embedding:
if word in word_vector.wv:
embedding[word] = word_vector.wv[word]
else:
embedding[word] = np.random.rand(
vector_size) * (max_val - min_val) + min_val
embedding_file.write(
word + ' ' + ' '.join(map(str, embedding[word])) + '\n')
indexed_embedding_file.write(' '.join(map(str, embedding[word])) + '\n')
embedding_indexes_file.write(word + ' ' + str(idx) + '\n')
idx += 1
embedding_file.close()
indexed_embedding_file.close()
embedding_indexes_file.close()
|
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
from vaccine_scraper import VACCINE_COUNTRY_LIST
layout_vaccine = html.Div(className="data-page-container",
# id='output-layout',
children=[
html.Div(
html.H1('Coronavirus vaccinations by country')
,className="data-page-title", id="v-data-title-container"),
dcc.Store(id='store-vaccine-data'),
html.Div([
dbc.Card([
html.Div(html.I("Select countries of interest, then click the Plot button."), className="select-text"),
html.Div([
dbc.Checklist(
id=f"{c_name}-v-data",
options=[{'label': c_name.title(),
'value': c_name}],
value=[c_name] if c_name in ['United Kingdom', 'Germany'] else [],
)
for i, c_name in enumerate(VACCINE_COUNTRY_LIST)]),
],
className="inter-card country-picker",
),
],className="data-country-picker"),
html.Div([
html.Div([
dbc.Button([
html.Div([
html.Img(src='/assets/images/plot.svg')
],className="plot-button-logo"),
html.Div('Plot',className='plot-button-text')
],
color='primary',
className='plot-button data',
id="button-plot-vd"),
]),
html.Div(["This section enables you to compare different countries' vaccination data in real-time. Use the checkboxes on the left to select the countries to plot. Data will automatically update as they are published. Data source: ",
html.A('OWID', href='https://github.com/owid/covid-19-data/blob/master/public/data/vaccinations/vaccinations.csv '),
"."],
className="this-section"),
dcc.Loading(id="loading-icon-vd", children=[html.Div(id="loading-output-1-vd")], type="default"),
html.Hr(),
html.H3(children='People vaccinated', className="plot-title"),
html.Div(
dcc.Checklist(
id='vd-normalise-check',
options=[{'label': "Plot as percentage of population?", 'value': 'normalise'}],
value=[],
style={'textAlign': 'center', "marginBottom": "20px"},
inputStyle={"marginRight": "5px"}
),
),
html.Div(dcc.Graph(id='vaccine-plot',
config = {'modeBarButtonsToRemove': [
'pan2d',
'toImage',
'select2d',
'toggleSpikelines',
'hoverCompareCartesian',
'hoverClosestCartesian',
'lasso2d',
]}
),className='data-fig'),
])
])
|
from django.shortcuts import redirect, render,HttpResponse
from django.contrib.auth import authenticate
from django.contrib.auth import logout,login
from django.contrib.auth.models import User
from django.contrib import messages
from .models import Message,Friend,Fileupload
from django.http import JsonResponse
# Create your views here.
def home(request):
if request.user.is_anonymous or request.user.is_active==False:
return redirect('/accounts/login')
return render(request, 'home.html')
def room(request,friendusername):
if request.user.is_anonymous or request.user.is_active==False:
return redirect('/accounts/login')
if request.user.username == friendusername:
return redirect('/')
return render(request, 'room.html',{'friend':friendusername})
def checkview(request):
if request.user.is_anonymous or request.user.is_active==False:
return redirect('/accounts/login')
if request.method == 'POST':
friendusername =request.POST.get("friendusername")
if request.user.username==friendusername:
return redirect('/')
if User.objects.filter(username=friendusername).exists():
return redirect('/room/'+friendusername)
else:
return redirect('/')
return redirect('/')
def send(request):
if request.user.is_anonymous or request.user.is_active==False:
return redirect('/accounts/login')
if request.method == 'POST':
sender=request.POST.get("username")
receiver=request.POST.get("friend")
message=request.POST.get("message")
message=message.strip()
if (message == "") or (request.user.username != sender):
return redirect('/room/'+receiver)
if sender==receiver:
return redirect('/')
newmessage=Message(sender=sender,receiver=receiver,message=message)
newmessage.save()
return HttpResponse("message sent")
return redirect('/')
def getmessages(request,friend):
if request.user.is_anonymous or request.user.is_active==False:
return redirect('/accounts/login')
if User.objects.filter(username=friend).exists()==False:
return redirect('/')
if request.user.username==friend:
return redirect('/')
all_messages=Message.objects.all().filter(sender=request.user).filter(receiver=friend)|Message.objects.all().filter(sender=friend).filter(receiver=request.user)
return JsonResponse({"messages":list(all_messages.values())})
def friends(request):
if request.user.is_anonymous or request.user.is_active==False:
return redirect('/accounts/login')
if request.method=='POST':
friend=request.POST.get('friendusername')
nickname=request.POST.get('friendnickname')
user=request.user.username
if friend==user:
return redirect('/friends')
if friend=="" or nickname=="":
return redirect('/friends')
if Friend.objects.filter(friend=friend).filter(user=user).exists():
return redirect('/friends')
if User.objects.filter(username=friend).exists()==False:
return redirect('/friends')
new_friend=Friend.objects.create(user=user, nickname=nickname,friend=friend)
new_friend.save()
unsorted_friends=Friend.objects.all().filter(user=request.user.username)
user_friends=sorted(list(unsorted_friends.values()),key=lambda k:k['nickname'].lower())
return render(request,'friends.html',{"user_friends": user_friends})
def removefriend(request):
if request.user.is_anonymous or request.user.is_active==False:
return redirect('/accounts/login')
if request.method =='POST':
friend=request.POST.get('friendusername')
user=request.user.username
if Friend.objects.all().filter(friend=friend).filter(user=user).exists()==False:
return redirect('/friends')
remove_friend=Friend.objects.all().filter(friend=friend).filter(user=user)
remove_friend[0].delete()
return redirect('/friends')
return redirect('/friends')
def uploadfiles(request, friend):
if request.user.is_anonymous or request.user.is_active==False:
return redirect('/accounts/login')
if(request.method=='POST'):
sender= request.user.username
receiver=friend
if ('file' in request.FILES)==False:
return redirect('/room/'+friend)
file=request.FILES.get('file')
new_file=Fileupload(file=file)
new_file.save()
file_name=new_file.file.name
#file_name=file_name[15:len(file_name):1]
new_message=Message(sender=sender,receiver=receiver,message=new_file.file.url,file_status=True,file_name=file_name)
new_message.save()
return HttpResponse('File uploaded successfully!')
|
import csv
import json
with open("res/uiuc_demographics_2005_15.csv") as f:
reader = csv.DictReader(f)
data = [row for row in reader]
majors = {}
for row in data:
majorName = row["Major Name"].strip()
year = row["Fall"].strip()
degree = row["Degree"].strip()
# Skip empty rows
if majorName == "":
continue
# Each entry in majors is a dictionary for a major...
if majorName not in majors:
majors[majorName] = {}
major = majors[majorName]
# Each entry in majors[] is a dictionary for a year...
if year not in major:
major[year] = {
"female": 0,
"male": 0,
"total": 0
}
# Tally the male and female numbers
major[year]["female"] += int(row["Female"])
major[year]["male"] += int(row["Male"])
# Compute the %_female
for majorName in majors:
major = majors[majorName]
for year in major:
d = major[year]
d["pct_f"] = d["female"] / (d["male"] + d["female"])
d["total"] = d["male"] + d["female"]
genderDiversityList = []
# Compile the list of max/min
for majorName in majors:
major = majors[majorName]
# Skip years without 2015 majors
if "2015" not in major:
continue
# Find max/min
pct_f_max = 0
pct_f_min = 1
for year in major:
d = major[year]
if d["pct_f"] > pct_f_max:
pct_f_max = d["pct_f"]
if d["pct_f"] < pct_f_min:
pct_f_min = d["pct_f"]
# Write data to the list
genderDiversityList.append({
"major": majorName,
"max_female_pct": pct_f_max,
"min_female_pct": pct_f_min,
"current_female_pct": major["2015"]["pct_f"]
})
# Write the list as a JSON:
with open("res/genderDiversity.json", "w") as f:
json.dump(genderDiversityList, f, indent=2)
|
from setuptools import setup
setup(
name="teapot",
version="0.0.1",
description="",
url="https://github.com/admiralobvious/teapot",
author="Alexandre Ferland",
author_email="aferlandqc@gmail.com",
license="MIT",
packages=["teapot"],
zip_safe=False,
install_requires=[
"certifi>=2019.3.9",
"urllib3>=1.25.3",
],
setup_requires=["pytest-runner>=5.1"],
tests_require=[
"pytest>=5.0.1",
"python-rapidjson>=0.7.2",
"urllib3-mock>=0.3.3"
],
platforms="any",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
|
#Author: Gentry Atkinson
#Organization: Texas University
#Data: 10 December, 2020
#Just a toy file to figure out bokeh
from bokeh.plotting import figure, output_file, show, ColumnDataSource
import numpy as np
from bokeh.models import Button, CustomJS
from bokeh.layouts import column
from bokeh.io import curdoc
def button_handler():
print("That's a click")
if __name__ == "__main__":
X = np.arange(0, 6.28, 0.1)
y = np.sin(X)
output_file("bokeh_test.html")
CustomJS(code="console.log('Custom JS ran', this.toString())")
p = figure(
tools="pan,box_zoom,reset,save",
y_axis_type="linear", y_range=[-1, 1], title="Only A Test",
x_axis_label='X', y_axis_label='Sin(X)'
)
p.line(X, y, legend_label="y=sin(x)")
button = Button(label="Button", button_type="success")
button.js_on_click(CustomJS(code="console.log('button: click!', this.toString())"))
#button.js_on_click(CustomJS(code=callback))
# button.js_on_click(CustomJS(code="""
# var data = source.data;
# value1=data['x'];
# var out = "";
# var file = new Blob([out], {type: 'text/plain'});
# var elem = window.document.createElement('a');
# elem.href = window.URL.createObjectURL(file);
# elem.download = 'button_clicks.txt';
# document.body.appendChild(elem);
# elem.click();
# document.body.removeChild(elem);
# """))
show(column(p,button))
|
"""
Six namespaced tasks
"""
import os
from invoke import task
def get_six_path():
here = os.path.abspath(os.path.dirname(__file__))
return os.path.join(here, '..', 'six')
@task
def build(ctx, install_prefix=None, cmake_options=''):
six_path = get_six_path()
here = os.path.abspath(os.path.dirname(__file__))
dev_path = os.path.join(here, '..', 'dev')
cmake_args = cmake_options + " -DBUILD_DEMO:BOOL=OFF -DCMAKE_INSTALL_PREFIX:PATH={}".format(install_prefix or dev_path)
ctx.run("cd {} && cmake {} .".format(six_path, cmake_args))
ctx.run("make -C {}".format(six_path))
@task
def install(ctx):
ctx.run("make -C {} install".format(get_six_path()))
@task
def test(ctx):
ctx.run("make -C {}/test run".format(get_six_path()))
@task
def format(ctx):
ctx.run("make -C {} clang-format".format(get_six_path()))
|
from decimal import Decimal
from typing import NamedTuple, Optional, Tuple, Union
BYTE = 1
# Binary
KIBIBYTE = BYTE * 1024
MEBIBYTE = KIBIBYTE * 1024
GIBIBYTE = MEBIBYTE * 1024
TEBIBYTE = GIBIBYTE * 1024
PEBIBYTE = TEBIBYTE * 1024
EXBIBYTE = PEBIBYTE * 1024
ZEBIBYTE = EXBIBYTE * 1024
YOBIBYTE = ZEBIBYTE * 1024
# SI
KILOBYTE = BYTE * 1000
MEGABYTE = KILOBYTE * 1000
GIGABYTE = MEGABYTE * 1000
TERABYTE = GIGABYTE * 1000
PETABYTE = TERABYTE * 1000
EXABYTE = PETABYTE * 1000
ZETTABYTE = EXABYTE * 1000
YOTTABYTE = ZETTABYTE * 1000
BINARY_PREFIXES = {
BYTE: 'B',
KIBIBYTE: 'KiB',
MEBIBYTE: 'MiB',
GIBIBYTE: 'GiB',
TEBIBYTE: 'TiB',
PEBIBYTE: 'PiB',
EXBIBYTE: 'EiB',
ZEBIBYTE: 'ZiB',
YOBIBYTE: 'YiB',
}
DECIMAL_PREFIXES = {
BYTE: 'B',
KILOBYTE: 'KB',
MEGABYTE: 'MB',
GIGABYTE: 'GB',
TERABYTE: 'TB',
PETABYTE: 'PB',
EXABYTE: 'EB',
ZETTABYTE: 'ZB',
YOTTABYTE: 'YB',
}
PREFIXES = BINARY_PREFIXES.copy()
PREFIXES.update(DECIMAL_PREFIXES)
class _BinaryUnits(NamedTuple):
BYTE: int
B: int
KIBIBYTE: int
KB: int
MEBIBYTE: int
MB: int
GIBIBYTE: int
GB: int
TEBIBYTE: int
TB: int
PEBIBYTE: int
PB: int
EXBIBYTE: int
EB: int
ZEBIBYTE: int
ZB: int
YOBIBYTE: int
YB: int
BinaryUnits = _BinaryUnits(
BYTE, BYTE,
KIBIBYTE, KIBIBYTE,
MEBIBYTE, MEBIBYTE,
GIBIBYTE, GIBIBYTE,
TEBIBYTE, TEBIBYTE,
PEBIBYTE, PEBIBYTE,
EXBIBYTE, EXBIBYTE,
ZEBIBYTE, ZEBIBYTE,
YOBIBYTE, YOBIBYTE,
)
class _DecimalUnits(NamedTuple):
BYTE: int
B: int
KILOBYTE: int
KB: int
MEGABYTE: int
MB: int
GIGABYTE: int
GB: int
TERABYTE: int
TB: int
PETABYTE: int
PB: int
EXABYTE: int
EB: int
ZETTABYTE: int
ZB: int
YOTTABYTE: int
YB: int
DecimalUnits = _DecimalUnits(
BYTE, BYTE,
KILOBYTE, KILOBYTE,
MEGABYTE, MEGABYTE,
GIGABYTE, GIGABYTE,
TERABYTE, TERABYTE,
PETABYTE, PETABYTE,
EXABYTE, EXABYTE,
ZETTABYTE, ZETTABYTE,
YOTTABYTE, YOTTABYTE,
)
def convert_units(
n: float,
unit: int = BYTE,
to: Optional[int] = None,
si: bool = False,
exact: bool = False
) -> Tuple[Union[float, Decimal], str]:
r"""Converts between and within binary and decimal units. If no ``unit``
is specified, ``n`` is assumed to already be in bytes. If no ``to`` is
specified, ``n`` will be converted to the highest unit possible. If
no ``unit`` nor ``to`` is specified, the output will be binary units
unless ``si`` is ``True``. If ``exact`` is ``True``. the calculations
will use decimal.Decimal.
Binary units conform to IEC standards, see:
https://en.wikipedia.org/wiki/Binary_prefix
https://en.wikipedia.org/wiki/IEC_80000-13
https://www.iso.org/standard/31898.html (paywalled)
Decimal units conform to SI standards, see:
https://en.wikipedia.org/wiki/International_System_of_Units
:param n: The number of ``unit``\ s.
:type n: ``int`` or ``float``
:param unit: The unit ``n`` represents.
:type unit: one of the global constants
:param to: The unit to convert to.
:type to: one of the global constants
:param si: Assume SI units when no ``unit`` nor ``to`` is specified.
:type si: ``bool``
:param exact: Use decimal.Decimal for calculations.
:type exact: ``bool``
:returns: The unit pair: a numeric quantity and the unit's string.
:rtype: tuple(quantity, string)
"""
if unit not in PREFIXES:
raise ValueError(f'{unit} is not a valid binary unit.')
# Always work with bytes to simplify logic.
b: Union[float, Decimal]
if exact:
b = Decimal(str(n)) * unit
else:
b = n * unit
if to:
try:
return b / to, PREFIXES[to]
except KeyError:
raise ValueError(f'{to} is not a valid binary unit.')
if unit in BINARY_PREFIXES and not si:
if b < KIBIBYTE:
return b, 'B'
elif b < MEBIBYTE:
return b / KIBIBYTE, 'KiB'
elif b < GIBIBYTE:
return b / MEBIBYTE, 'MiB'
elif b < TEBIBYTE:
return b / GIBIBYTE, 'GiB'
elif b < PEBIBYTE:
return b / TEBIBYTE, 'TiB'
elif b < EXBIBYTE:
return b / PEBIBYTE, 'PiB'
elif b < ZEBIBYTE:
return b / EXBIBYTE, 'EiB'
elif b < YOBIBYTE:
return b / ZEBIBYTE, 'ZiB'
else:
return b / YOBIBYTE, 'YiB'
else:
if b < KILOBYTE:
return b, 'B'
elif b < MEGABYTE:
return b / KILOBYTE, 'KB'
elif b < GIGABYTE:
return b / MEGABYTE, 'MB'
elif b < TERABYTE:
return b / GIGABYTE, 'GB'
elif b < PETABYTE:
return b / TERABYTE, 'TB'
elif b < EXABYTE:
return b / PETABYTE, 'PB'
elif b < ZETTABYTE:
return b / EXABYTE, 'EB'
elif b < YOTTABYTE:
return b / ZETTABYTE, 'ZB'
else:
return b / YOTTABYTE, 'YB'
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: gtfs-realtime.proto
# plugin: python-betterproto
from dataclasses import dataclass
from typing import List
import betterproto
class FeedHeaderIncrementality(betterproto.Enum):
FULL_DATASET = 0
DIFFERENTIAL = 1
class TripUpdateStopTimeUpdateScheduleRelationship(betterproto.Enum):
SCHEDULED = 0
SKIPPED = 1
NO_DATA = 2
UNSCHEDULED = 3
class VehiclePositionVehicleStopStatus(betterproto.Enum):
INCOMING_AT = 0
STOPPED_AT = 1
IN_TRANSIT_TO = 2
class VehiclePositionCongestionLevel(betterproto.Enum):
UNKNOWN_CONGESTION_LEVEL = 0
RUNNING_SMOOTHLY = 1
STOP_AND_GO = 2
CONGESTION = 3
SEVERE_CONGESTION = 4
class VehiclePositionOccupancyStatus(betterproto.Enum):
EMPTY = 0
MANY_SEATS_AVAILABLE = 1
FEW_SEATS_AVAILABLE = 2
STANDING_ROOM_ONLY = 3
CRUSHED_STANDING_ROOM_ONLY = 4
FULL = 5
NOT_ACCEPTING_PASSENGERS = 6
NO_DATA_AVAILABLE = 7
NOT_BOARDABLE = 8
class AlertCause(betterproto.Enum):
UNKNOWN_CAUSE = 1
OTHER_CAUSE = 2
TECHNICAL_PROBLEM = 3
STRIKE = 4
DEMONSTRATION = 5
ACCIDENT = 6
HOLIDAY = 7
WEATHER = 8
MAINTENANCE = 9
CONSTRUCTION = 10
POLICE_ACTIVITY = 11
MEDICAL_EMERGENCY = 12
class AlertEffect(betterproto.Enum):
NO_SERVICE = 1
REDUCED_SERVICE = 2
SIGNIFICANT_DELAYS = 3
DETOUR = 4
ADDITIONAL_SERVICE = 5
MODIFIED_SERVICE = 6
OTHER_EFFECT = 7
UNKNOWN_EFFECT = 8
STOP_MOVED = 9
NO_EFFECT = 10
ACCESSIBILITY_ISSUE = 11
class AlertSeverityLevel(betterproto.Enum):
UNKNOWN_SEVERITY = 1
INFO = 2
WARNING = 3
SEVERE = 4
class TripDescriptorScheduleRelationship(betterproto.Enum):
SCHEDULED = 0
ADDED = 1
UNSCHEDULED = 2
CANCELED = 3
REPLACEMENT = 5
DUPLICATED = 6
@dataclass
class FeedMessage(betterproto.Message):
"""
The contents of a feed message. A feed is a continuous stream of feed
messages. Each message in the stream is obtained as a response to an
appropriate HTTP GET request. A realtime feed is always defined with
relation to an existing GTFS feed. All the entity ids are resolved with
respect to the GTFS feed. Note that "required" and "optional" as stated in
this file refer to Protocol Buffer cardinality, not semantic cardinality.
See reference.md at https://github.com/google/transit/tree/master/gtfs-
realtime for field semantic cardinality.
"""
# Metadata about this feed and feed message.
header: "FeedHeader" = betterproto.message_field(1)
# Contents of the feed.
entity: List["FeedEntity"] = betterproto.message_field(2)
@dataclass
class FeedHeader(betterproto.Message):
"""Metadata about a feed, included in feed messages."""
# Version of the feed specification. The current version is 2.0. Valid
# versions are "2.0", "1.0".
gtfs_realtime_version: str = betterproto.string_field(1)
incrementality: "FeedHeaderIncrementality" = betterproto.enum_field(2)
# This timestamp identifies the moment when the content of this feed has been
# created (in server time). In POSIX time (i.e., number of seconds since
# January 1st 1970 00:00:00 UTC).
timestamp: int = betterproto.uint64_field(3)
@dataclass
class FeedEntity(betterproto.Message):
"""A definition (or update) of an entity in the transit feed."""
# The ids are used only to provide incrementality support. The id should be
# unique within a FeedMessage. Consequent FeedMessages may contain
# FeedEntities with the same id. In case of a DIFFERENTIAL update the new
# FeedEntity with some id will replace the old FeedEntity with the same id
# (or delete it - see is_deleted below). The actual GTFS entities (e.g.
# stations, routes, trips) referenced by the feed must be specified by
# explicit selectors (see EntitySelector below for more info).
id: str = betterproto.string_field(1)
# Whether this entity is to be deleted. Relevant only for incremental
# fetches.
is_deleted: bool = betterproto.bool_field(2)
# Data about the entity itself. Exactly one of the following fields must be
# present (unless the entity is being deleted).
trip_update: "TripUpdate" = betterproto.message_field(3)
vehicle: "VehiclePosition" = betterproto.message_field(4)
alert: "Alert" = betterproto.message_field(5)
# NOTE: This field is still experimental, and subject to change. It may be
# formally adopted in the future.
shape: "Shape" = betterproto.message_field(6)
@dataclass
class TripUpdate(betterproto.Message):
"""
Realtime update of the progress of a vehicle along a trip. Depending on the
value of ScheduleRelationship, a TripUpdate can specify: - A trip that
proceeds along the schedule. - A trip that proceeds along a route but has
no fixed schedule. - A trip that have been added or removed with regard to
schedule. The updates can be for future, predicted arrival/departure
events, or for past events that already occurred. Normally, updates should
get more precise and more certain (see uncertainty below) as the events
gets closer to current time. Even if that is not possible, the information
for past events should be precise and certain. In particular, if an update
points to time in the past but its update's uncertainty is not 0, the
client should conclude that the update is a (wrong) prediction and that the
trip has not completed yet. Note that the update can describe a trip that
is already completed. To this end, it is enough to provide an update for
the last stop of the trip. If the time of that is in the past, the client
will conclude from that that the whole trip is in the past (it is possible,
although inconsequential, to also provide updates for preceding stops).
This option is most relevant for a trip that has completed ahead of
schedule, but according to the schedule, the trip is still proceeding at
the current time. Removing the updates for this trip could make the client
assume that the trip is still proceeding. Note that the feed provider is
allowed, but not required, to purge past updates - this is one case where
this would be practically useful.
"""
# The Trip that this message applies to. There can be at most one TripUpdate
# entity for each actual trip instance. If there is none, that means there is
# no prediction information available. It does *not* mean that the trip is
# progressing according to schedule.
trip: "TripDescriptor" = betterproto.message_field(1)
# Additional information on the vehicle that is serving this trip.
vehicle: "VehicleDescriptor" = betterproto.message_field(3)
# Updates to StopTimes for the trip (both future, i.e., predictions, and in
# some cases, past ones, i.e., those that already happened). The updates must
# be sorted by stop_sequence, and apply for all the following stops of the
# trip up to the next specified one. Example 1: For a trip with 20 stops, a
# StopTimeUpdate with arrival delay and departure delay of 0 for
# stop_sequence of the current stop means that the trip is exactly on time.
# Example 2: For the same trip instance, 3 StopTimeUpdates are provided: -
# delay of 5 min for stop_sequence 3 - delay of 1 min for stop_sequence 8 -
# delay of unspecified duration for stop_sequence 10 This will be interpreted
# as: - stop_sequences 3,4,5,6,7 have delay of 5 min. - stop_sequences 8,9
# have delay of 1 min. - stop_sequences 10,... have unknown delay.
stop_time_update: List["TripUpdateStopTimeUpdate"] = betterproto.message_field(2)
# The most recent moment at which the vehicle's real-time progress was
# measured to estimate StopTimes in the future. When StopTimes in the past
# are provided, arrival/departure times may be earlier than this value. In
# POSIX time (i.e., the number of seconds since January 1st 1970 00:00:00
# UTC).
timestamp: int = betterproto.uint64_field(4)
# The current schedule deviation for the trip. Delay should only be
# specified when the prediction is given relative to some existing schedule
# in GTFS. Delay (in seconds) can be positive (meaning that the vehicle is
# late) or negative (meaning that the vehicle is ahead of schedule). Delay of
# 0 means that the vehicle is exactly on time. Delay information in
# StopTimeUpdates take precedent of trip-level delay information, such that
# trip-level delay is only propagated until the next stop along the trip with
# a StopTimeUpdate delay value specified. Feed providers are strongly
# encouraged to provide a TripUpdate.timestamp value indicating when the
# delay value was last updated, in order to evaluate the freshness of the
# data. NOTE: This field is still experimental, and subject to change. It may
# be formally adopted in the future.
delay: int = betterproto.int32_field(5)
trip_properties: "TripUpdateTripProperties" = betterproto.message_field(6)
@dataclass
class TripUpdateStopTimeEvent(betterproto.Message):
"""
Timing information for a single predicted event (either arrival or
departure). Timing consists of delay and/or estimated time, and
uncertainty. - delay should be used when the prediction is given relative
to some existing schedule in GTFS. - time should be given whether there
is a predicted schedule or not. If both time and delay are specified,
time will take precedence (although normally, time, if given for a
scheduled trip, should be equal to scheduled time in GTFS + delay).
Uncertainty applies equally to both time and delay. The uncertainty roughly
specifies the expected error in true delay (but note, we don't yet define
its precise statistical meaning). It's possible for the uncertainty to be
0, for example for trains that are driven under computer timing control.
"""
# Delay (in seconds) can be positive (meaning that the vehicle is late) or
# negative (meaning that the vehicle is ahead of schedule). Delay of 0 means
# that the vehicle is exactly on time.
delay: int = betterproto.int32_field(1)
# Event as absolute time. In Unix time (i.e., number of seconds since January
# 1st 1970 00:00:00 UTC).
time: int = betterproto.int64_field(2)
# If uncertainty is omitted, it is interpreted as unknown. If the prediction
# is unknown or too uncertain, the delay (or time) field should be empty. In
# such case, the uncertainty field is ignored. To specify a completely
# certain prediction, set its uncertainty to 0.
uncertainty: int = betterproto.int32_field(3)
@dataclass
class TripUpdateStopTimeUpdate(betterproto.Message):
"""
Realtime update for arrival and/or departure events for a given stop on a
trip. Updates can be supplied for both past and future events. The producer
is allowed, although not required, to drop past events.
"""
# Must be the same as in stop_times.txt in the corresponding GTFS feed.
stop_sequence: int = betterproto.uint32_field(1)
# Must be the same as in stops.txt in the corresponding GTFS feed.
stop_id: str = betterproto.string_field(4)
arrival: "TripUpdateStopTimeEvent" = betterproto.message_field(2)
departure: "TripUpdateStopTimeEvent" = betterproto.message_field(3)
# Expected occupancy after departure from the given stop. Should be provided
# only for future stops. In order to provide departure_occupancy_status
# without either arrival or departure StopTimeEvents, ScheduleRelationship
# should be set to NO_DATA.
departure_occupancy_status: "VehiclePositionOccupancyStatus" = (
betterproto.enum_field(7)
)
schedule_relationship: "TripUpdateStopTimeUpdateScheduleRelationship" = (
betterproto.enum_field(5)
)
# Realtime updates for certain properties defined within GTFS stop_times.txt
# NOTE: This field is still experimental, and subject to change. It may be
# formally adopted in the future.
stop_time_properties: "TripUpdateStopTimeUpdateStopTimeProperties" = (
betterproto.message_field(6)
)
@dataclass
class TripUpdateStopTimeUpdateStopTimeProperties(betterproto.Message):
"""
Provides the updated values for the stop time. NOTE: This message is still
experimental, and subject to change. It may be formally adopted in the
future.
"""
# Supports real-time stop assignments. Refers to a stop_id defined in the
# GTFS stops.txt. The new assigned_stop_id should not result in a
# significantly different trip experience for the end user than the stop_id
# defined in GTFS stop_times.txt. In other words, the end user should not
# view this new stop_id as an "unusual change" if the new stop was presented
# within an app without any additional context. For example, this field is
# intended to be used for platform assignments by using a stop_id that
# belongs to the same station as the stop originally defined in GTFS
# stop_times.txt. To assign a stop without providing any real-time arrival or
# departure predictions, populate this field and set
# StopTimeUpdate.schedule_relationship = NO_DATA. If this field is populated,
# it is preferred to omit `StopTimeUpdate.stop_id` and use only
# `StopTimeUpdate.stop_sequence`. If `StopTimeProperties.assigned_stop_id`
# and `StopTimeUpdate.stop_id` are populated, `StopTimeUpdate.stop_id` must
# match `assigned_stop_id`. Platform assignments should be reflected in other
# GTFS-realtime fields as well (e.g., `VehiclePosition.stop_id`). NOTE: This
# field is still experimental, and subject to change. It may be formally
# adopted in the future.
assigned_stop_id: str = betterproto.string_field(1)
@dataclass
class TripUpdateTripProperties(betterproto.Message):
"""
Defines updated properties of the trip, such as a new shape_id when there
is a detour. Or defines the trip_id, start_date, and start_time of a
DUPLICATED trip. NOTE: This message is still experimental, and subject to
change. It may be formally adopted in the future.
"""
# Defines the identifier of a new trip that is a duplicate of an existing
# trip defined in (CSV) GTFS trips.txt but will start at a different service
# date and/or time (defined using the TripProperties.start_date and
# TripProperties.start_time fields). See definition of trips.trip_id in (CSV)
# GTFS. Its value must be different than the ones used in the (CSV) GTFS.
# Required if schedule_relationship=DUPLICATED, otherwise this field must not
# be populated and will be ignored by consumers. NOTE: This field is still
# experimental, and subject to change. It may be formally adopted in the
# future.
trip_id: str = betterproto.string_field(1)
# Service date on which the DUPLICATED trip will be run, in YYYYMMDD format.
# Required if schedule_relationship=DUPLICATED, otherwise this field must not
# be populated and will be ignored by consumers. NOTE: This field is still
# experimental, and subject to change. It may be formally adopted in the
# future.
start_date: str = betterproto.string_field(2)
# Defines the departure start time of the trip when it’s duplicated. See
# definition of stop_times.departure_time in (CSV) GTFS. Scheduled arrival
# and departure times for the duplicated trip are calculated based on the
# offset between the original trip departure_time and this field. For
# example, if a GTFS trip has stop A with a departure_time of 10:00:00 and
# stop B with departure_time of 10:01:00, and this field is populated with
# the value of 10:30:00, stop B on the duplicated trip will have a scheduled
# departure_time of 10:31:00. Real-time prediction delay values are applied
# to this calculated schedule time to determine the predicted time. For
# example, if a departure delay of 30 is provided for stop B, then the
# predicted departure time is 10:31:30. Real-time prediction time values do
# not have any offset applied to them and indicate the predicted time as
# provided. For example, if a departure time representing 10:31:30 is
# provided for stop B, then the predicted departure time is 10:31:30. This
# field is required if schedule_relationship is DUPLICATED, otherwise this
# field must not be populated and will be ignored by consumers. NOTE: This
# field is still experimental, and subject to change. It may be formally
# adopted in the future.
start_time: str = betterproto.string_field(3)
# Specifies the shape of the vehicle travel path when the trip shape differs
# from the shape specified in (CSV) GTFS or to specify it in real-time when
# it's not provided by (CSV) GTFS, such as a vehicle that takes differing
# paths based on rider demand. See definition of trips.shape_id in (CSV)
# GTFS. If a shape is neither defined in (CSV) GTFS nor in real-time, the
# shape is considered unknown. This field can refer to a shape defined in the
# (CSV) GTFS in shapes.txt or a Shape in the (protobuf) real-time feed. The
# order of stops (stop sequences) for this trip must remain the same as (CSV)
# GTFS. Stops that are a part of the original trip but will no longer be
# made, such as when a detour occurs, should be marked as
# schedule_relationship=SKIPPED. NOTE: This field is still experimental, and
# subject to change. It may be formally adopted in the future.
shape_id: str = betterproto.string_field(4)
@dataclass
class VehiclePosition(betterproto.Message):
"""Realtime positioning information for a given vehicle."""
# The Trip that this vehicle is serving. Can be empty or partial if the
# vehicle can not be identified with a given trip instance.
trip: "TripDescriptor" = betterproto.message_field(1)
# Additional information on the vehicle that is serving this trip.
vehicle: "VehicleDescriptor" = betterproto.message_field(8)
# Current position of this vehicle.
position: "Position" = betterproto.message_field(2)
# The stop sequence index of the current stop. The meaning of
# current_stop_sequence (i.e., the stop that it refers to) is determined by
# current_status. If current_status is missing IN_TRANSIT_TO is assumed.
current_stop_sequence: int = betterproto.uint32_field(3)
# Identifies the current stop. The value must be the same as in stops.txt in
# the corresponding GTFS feed.
stop_id: str = betterproto.string_field(7)
# The exact status of the vehicle with respect to the current stop. Ignored
# if current_stop_sequence is missing.
current_status: "VehiclePositionVehicleStopStatus" = betterproto.enum_field(4)
# Moment at which the vehicle's position was measured. In POSIX time (i.e.,
# number of seconds since January 1st 1970 00:00:00 UTC).
timestamp: int = betterproto.uint64_field(5)
congestion_level: "VehiclePositionCongestionLevel" = betterproto.enum_field(6)
# If multi_carriage_status is populated with per-carriage OccupancyStatus,
# then this field should describe the entire vehicle with all carriages
# accepting passengers considered.
occupancy_status: "VehiclePositionOccupancyStatus" = betterproto.enum_field(9)
# A percentage value indicating the degree of passenger occupancy in the
# vehicle. The values are represented as an integer without decimals. 0 means
# 0% and 100 means 100%. The value 100 should represent the total maximum
# occupancy the vehicle was designed for, including both seated and standing
# capacity, and current operating regulations allow. The value may exceed 100
# if there are more passengers than the maximum designed capacity. The
# precision of occupancy_percentage should be low enough that individual
# passengers cannot be tracked boarding or alighting the vehicle. If
# multi_carriage_status is populated with per-carriage occupancy_percentage,
# then this field should describe the entire vehicle with all carriages
# accepting passengers considered. This field is still experimental, and
# subject to change. It may be formally adopted in the future.
occupancy_percentage: int = betterproto.uint32_field(10)
# Details of the multiple carriages of this given vehicle. The first
# occurrence represents the first carriage of the vehicle, given the current
# direction of travel. The number of occurrences of the
# multi_carriage_details field represents the number of carriages of the
# vehicle. It also includes non boardable carriages, like engines,
# maintenance carriages, etc… as they provide valuable information to
# passengers about where to stand on a platform. This message/field is still
# experimental, and subject to change. It may be formally adopted in the
# future.
multi_carriage_details: List[
"VehiclePositionCarriageDetails"
] = betterproto.message_field(11)
@dataclass
class VehiclePositionCarriageDetails(betterproto.Message):
"""
Carriage specific details, used for vehicles composed of several carriages
This message/field is still experimental, and subject to change. It may be
formally adopted in the future.
"""
# Identification of the carriage. Should be unique per vehicle.
id: str = betterproto.string_field(1)
# User visible label that may be shown to the passenger to help identify the
# carriage. Example: "7712", "Car ABC-32", etc... This message/field is still
# experimental, and subject to change. It may be formally adopted in the
# future.
label: str = betterproto.string_field(2)
# Occupancy status for this given carriage, in this vehicle This
# message/field is still experimental, and subject to change. It may be
# formally adopted in the future.
occupancy_status: "VehiclePositionOccupancyStatus" = betterproto.enum_field(3)
# Occupancy percentage for this given carriage, in this vehicle. Follows the
# same rules as "VehiclePosition.occupancy_percentage" -1 in case data is not
# available for this given carriage (as protobuf defaults to 0 otherwise)
# This message/field is still experimental, and subject to change. It may be
# formally adopted in the future.
occupancy_percentage: int = betterproto.int32_field(4)
# Identifies the order of this carriage with respect to the other carriages
# in the vehicle's list of CarriageDetails. The first carriage in the
# direction of travel must have a value of 1. The second value corresponds to
# the second carriage in the direction of travel and must have a value of 2,
# and so forth. For example, the first carriage in the direction of travel
# has a value of 1. If the second carriage in the direction of travel has a
# value of 3, consumers will discard data for all carriages (i.e., the
# multi_carriage_details field). Carriages without data must be represented
# with a valid carriage_sequence number and the fields without data should be
# omitted (alternately, those fields could also be included and set to the
# "no data" values). This message/field is still experimental, and subject to
# change. It may be formally adopted in the future.
carriage_sequence: int = betterproto.uint32_field(5)
@dataclass
class Alert(betterproto.Message):
"""
An alert, indicating some sort of incident in the public transit network.
"""
# Time when the alert should be shown to the user. If missing, the alert will
# be shown as long as it appears in the feed. If multiple ranges are given,
# the alert will be shown during all of them.
active_period: List["TimeRange"] = betterproto.message_field(1)
# Entities whose users we should notify of this alert.
informed_entity: List["EntitySelector"] = betterproto.message_field(5)
cause: "AlertCause" = betterproto.enum_field(6)
effect: "AlertEffect" = betterproto.enum_field(7)
# The URL which provides additional information about the alert.
url: "TranslatedString" = betterproto.message_field(8)
# Alert header. Contains a short summary of the alert text as plain-text.
header_text: "TranslatedString" = betterproto.message_field(10)
# Full description for the alert as plain-text. The information in the
# description should add to the information of the header.
description_text: "TranslatedString" = betterproto.message_field(11)
# Text for alert header to be used in text-to-speech implementations. This
# field is the text-to-speech version of header_text.
tts_header_text: "TranslatedString" = betterproto.message_field(12)
# Text for full description for the alert to be used in text-to-speech
# implementations. This field is the text-to-speech version of
# description_text.
tts_description_text: "TranslatedString" = betterproto.message_field(13)
severity_level: "AlertSeverityLevel" = betterproto.enum_field(14)
# TranslatedImage to be displayed along the alert text. Used to explain
# visually the alert effect of a detour, station closure, etc. The image must
# enhance the understanding of the alert. Any essential information
# communicated within the image must also be contained in the alert text. The
# following types of images are discouraged : image containing mainly text,
# marketing or branded images that add no additional information. NOTE: This
# field is still experimental, and subject to change. It may be formally
# adopted in the future.
image: "TranslatedImage" = betterproto.message_field(15)
# Text describing the appearance of the linked image in the `image` field
# (e.g., in case the image can't be displayed or the user can't see the image
# for accessibility reasons). See the HTML spec for alt image text -
# https://html.spec.whatwg.org/#alt. NOTE: This field is still experimental,
# and subject to change. It may be formally adopted in the future.
image_alternative_text: "TranslatedString" = betterproto.message_field(16)
@dataclass
class TimeRange(betterproto.Message):
"""
A time interval. The interval is considered active at time 't' if 't' is
greater than or equal to the start time and less than the end time.
"""
# Start time, in POSIX time (i.e., number of seconds since January 1st 1970
# 00:00:00 UTC). If missing, the interval starts at minus infinity.
start: int = betterproto.uint64_field(1)
# End time, in POSIX time (i.e., number of seconds since January 1st 1970
# 00:00:00 UTC). If missing, the interval ends at plus infinity.
end: int = betterproto.uint64_field(2)
@dataclass
class Position(betterproto.Message):
"""A position."""
# Degrees North, in the WGS-84 coordinate system.
latitude: float = betterproto.float_field(1)
# Degrees East, in the WGS-84 coordinate system.
longitude: float = betterproto.float_field(2)
# Bearing, in degrees, clockwise from North, i.e., 0 is North and 90 is East.
# This can be the compass bearing, or the direction towards the next stop or
# intermediate location. This should not be direction deduced from the
# sequence of previous positions, which can be computed from previous data.
bearing: float = betterproto.float_field(3)
# Odometer value, in meters.
odometer: float = betterproto.double_field(4)
# Momentary speed measured by the vehicle, in meters per second.
speed: float = betterproto.float_field(5)
@dataclass
class TripDescriptor(betterproto.Message):
"""
A descriptor that identifies an instance of a GTFS trip, or all instances
of a trip along a route. - To specify a single trip instance, the trip_id
(and if necessary, start_time) is set. If route_id is also set, then it
should be same as one that the given trip corresponds to. - To specify
all the trips along a given route, only the route_id should be set. Note
that if the trip_id is not known, then stop sequence ids in TripUpdate
are not sufficient, and stop_ids must be provided as well. In addition,
absolute arrival/departure times must be provided.
"""
# The trip_id from the GTFS feed that this selector refers to. For non
# frequency-based trips, this field is enough to uniquely identify the trip.
# For frequency-based trip, start_time and start_date might also be
# necessary. When schedule_relationship is DUPLICATED within a TripUpdate,
# the trip_id identifies the trip from static GTFS to be duplicated. When
# schedule_relationship is DUPLICATED within a VehiclePosition, the trip_id
# identifies the new duplicate trip and must contain the value for the
# corresponding TripUpdate.TripProperties.trip_id.
trip_id: str = betterproto.string_field(1)
# The route_id from the GTFS that this selector refers to.
route_id: str = betterproto.string_field(5)
# The direction_id from the GTFS feed trips.txt file, indicating the
# direction of travel for trips this selector refers to.
direction_id: int = betterproto.uint32_field(6)
# The initially scheduled start time of this trip instance. When the trip_id
# corresponds to a non-frequency-based trip, this field should either be
# omitted or be equal to the value in the GTFS feed. When the trip_id
# correponds to a frequency-based trip, the start_time must be specified for
# trip updates and vehicle positions. If the trip corresponds to
# exact_times=1 GTFS record, then start_time must be some multiple (including
# zero) of headway_secs later than frequencies.txt start_time for the
# corresponding time period. If the trip corresponds to exact_times=0, then
# its start_time may be arbitrary, and is initially expected to be the first
# departure of the trip. Once established, the start_time of this frequency-
# based trip should be considered immutable, even if the first departure time
# changes -- that time change may instead be reflected in a StopTimeUpdate.
# Format and semantics of the field is same as that of
# GTFS/frequencies.txt/start_time, e.g., 11:15:35 or 25:15:35.
start_time: str = betterproto.string_field(2)
# The scheduled start date of this trip instance. Must be provided to
# disambiguate trips that are so late as to collide with a scheduled trip on
# a next day. For example, for a train that departs 8:00 and 20:00 every day,
# and is 12 hours late, there would be two distinct trips on the same time.
# This field can be provided but is not mandatory for schedules in which such
# collisions are impossible - for example, a service running on hourly
# schedule where a vehicle that is one hour late is not considered to be
# related to schedule anymore. In YYYYMMDD format.
start_date: str = betterproto.string_field(3)
schedule_relationship: "TripDescriptorScheduleRelationship" = (
betterproto.enum_field(4)
)
@dataclass
class VehicleDescriptor(betterproto.Message):
"""Identification information for the vehicle performing the trip."""
# Internal system identification of the vehicle. Should be unique per
# vehicle, and can be used for tracking the vehicle as it proceeds through
# the system.
id: str = betterproto.string_field(1)
# User visible label, i.e., something that must be shown to the passenger to
# help identify the correct vehicle.
label: str = betterproto.string_field(2)
# The license plate of the vehicle.
license_plate: str = betterproto.string_field(3)
@dataclass
class EntitySelector(betterproto.Message):
"""A selector for an entity in a GTFS feed."""
# The values of the fields should correspond to the appropriate fields in the
# GTFS feed. At least one specifier must be given. If several are given, then
# the matching has to apply to all the given specifiers.
agency_id: str = betterproto.string_field(1)
route_id: str = betterproto.string_field(2)
# corresponds to route_type in GTFS.
route_type: int = betterproto.int32_field(3)
trip: "TripDescriptor" = betterproto.message_field(4)
stop_id: str = betterproto.string_field(5)
# Corresponds to trip direction_id in GTFS trips.txt. If provided the
# route_id must also be provided.
direction_id: int = betterproto.uint32_field(6)
@dataclass
class TranslatedString(betterproto.Message):
"""
An internationalized message containing per-language versions of a snippet
of text or a URL. One of the strings from a message will be picked up. The
resolution proceeds as follows: 1. If the UI language matches the language
code of a translation, the first matching translation is picked. 2. If a
default UI language (e.g., English) matches the language code of a
translation, the first matching translation is picked. 3. If some
translation has an unspecified language code, that translation is
picked.
"""
# At least one translation must be provided.
translation: List["TranslatedStringTranslation"] = betterproto.message_field(1)
@dataclass
class TranslatedStringTranslation(betterproto.Message):
# A UTF-8 string containing the message.
text: str = betterproto.string_field(1)
# BCP-47 language code. Can be omitted if the language is unknown or if no
# i18n is done at all for the feed. At most one translation is allowed to
# have an unspecified language tag.
language: str = betterproto.string_field(2)
@dataclass
class TranslatedImage(betterproto.Message):
"""
An internationalized image containing per-language versions of a URL
linking to an image along with meta information Only one of the images from
a message will be retained by consumers. The resolution proceeds as
follows: 1. If the UI language matches the language code of a translation,
the first matching translation is picked. 2. If a default UI language
(e.g., English) matches the language code of a translation, the first
matching translation is picked. 3. If some translation has an unspecified
language code, that translation is picked. NOTE: This field is still
experimental, and subject to change. It may be formally adopted in the
future.
"""
# At least one localized image must be provided.
localized_image: List["TranslatedImageLocalizedImage"] = betterproto.message_field(
1
)
@dataclass
class TranslatedImageLocalizedImage(betterproto.Message):
# String containing an URL linking to an image The image linked must be less
# than 2MB. If an image changes in a significant enough way that an update is
# required on the consumer side, the producer must update the URL to a new
# one. The URL should be a fully qualified URL that includes http:// or
# https://, and any special characters in the URL must be correctly escaped.
# See the following
# http://www.w3.org/Addressing/URL/4_URI_Recommentations.html for a
# description of how to create fully qualified URL values.
url: str = betterproto.string_field(1)
# IANA media type as to specify the type of image to be displayed. The type
# must start with "image/"
media_type: str = betterproto.string_field(2)
# BCP-47 language code. Can be omitted if the language is unknown or if no
# i18n is done at all for the feed. At most one translation is allowed to
# have an unspecified language tag.
language: str = betterproto.string_field(3)
@dataclass
class Shape(betterproto.Message):
"""
Describes the physical path that a vehicle takes when it's not part of the
(CSV) GTFS, such as for a detour. Shapes belong to Trips, and consist of a
sequence of shape points. Tracing the points in order provides the path of
the vehicle. Shapes do not need to intercept the location of Stops
exactly, but all Stops on a trip should lie within a small distance of the
shape for that trip, i.e. close to straight line segments connecting the
shape points NOTE: This message is still experimental, and subject to
change. It may be formally adopted in the future.
"""
# Identifier of the shape. Must be different than any shape_id defined in the
# (CSV) GTFS. This field is required as per reference.md, but needs to be
# specified here optional because "Required is Forever" See
# https://developers.google.com/protocol-
# buffers/docs/proto#specifying_field_rules NOTE: This field is still
# experimental, and subject to change. It may be formally adopted in the
# future.
shape_id: str = betterproto.string_field(1)
# Encoded polyline representation of the shape. This polyline must contain at
# least two points. For more information about encoded polylines, see https:/
# /developers.google.com/maps/documentation/utilities/polylinealgorithm This
# field is required as per reference.md, but needs to be specified here
# optional because "Required is Forever" See
# https://developers.google.com/protocol-
# buffers/docs/proto#specifying_field_rules NOTE: This field is still
# experimental, and subject to change. It may be formally adopted in the
# future.
encoded_polyline: str = betterproto.string_field(2)
|
from marshmallow import fields, Schema
class RunSchema(Schema):
id = fields.Int()
session = fields.Str(description='Session number')
acquisition = fields.Str(description='Acquisition')
subject = fields.Str(description='Subject id')
number = fields.Int(description='Run id')
duration = fields.Number(description='Total run duration in seconds.')
dataset_id = fields.Int(description='Dataset run belongs to.')
task = fields.Pluck(
'TaskSchema', 'id', description="Task id and name")
|
import os
from datetime import timedelta
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get("DEBUG", default=1)))
ALLOWED_HOSTS = os.environ.get("ALLOWED_HOST").split(" ")
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# Local
"posts.apps.PostsConfig",
"core.apps.CoreConfig",
# Rest_FW
"rest_framework",
"corsheaders",
# Celery
"celery",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "news_board.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "news_board.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": os.getenv("DB_NAME") or "news_board",
"USER": os.getenv("DB_USER") or "postgres",
"PASSWORD": os.getenv("DB_PASS"),
"HOST": os.getenv("DB_HOST") or "localhost",
"CONN_MAX_AGE": 500,
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
REST_FRAMEWORK = {
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework_simplejwt.authentication.JWTAuthentication",
),
}
# Cors headers white list
CORS_ORIGIN_WHITELIST = (
"http://localhost:3000",
"http://localhost:8000",
)
# JWT settings
SIMPLE_JWT = {
"ACCESS_TOKEN_LIFETIME": timedelta(minutes=30),
"REFRESH_TOKEN_LIFETIME": timedelta(days=1),
"ROTATE_REFRESH_TOKENS": False,
"BLACKLIST_AFTER_ROTATION": True,
"ALGORITHM": "HS256",
"SIGNING_KEY": SECRET_KEY,
"VERIFYING_KEY": None,
"AUDIENCE": None,
"ISSUER": None,
"AUTH_HEADER_TYPES": ("Bearer",),
"USER_ID_FIELD": "id",
"USER_ID_CLAIM": "user_id",
"AUTH_TOKEN_CLASSES": ("rest_framework_simplejwt.tokens.AccessToken",),
"TOKEN_TYPE_CLAIM": "token_type",
"JTI_CLAIM": "jti",
"SLIDING_TOKEN_REFRESH_EXP_CLAIM": "refresh_exp",
"SLIDING_TOKEN_LIFETIME": timedelta(days=10),
"SLIDING_TOKEN_REFRESH_LIFETIME": timedelta(days=30),
}
# Redis/Celery related settings
REDIS_HOST = os.environ.get("REDIS_HOST") or "0.0.0.0"
REDIS_PORT = os.environ.get("REDIS_PORT") or "6379"
CELERY_BROKER_URL = "redis://" + REDIS_HOST + ":" + REDIS_PORT + "/0"
CELERY_BROKER_TRANSPORT_OPTION = {"visibility_timeout": 3600}
CELERY_RESULT_BACKEND = "redis://" + REDIS_HOST + ":" + REDIS_PORT + "/0"
CELERY_ACCEPT_CONTENT = ["application/json"]
CELERY_TASK_SERIALIZER = "json"
CELERY_RESULT_SERIALIZER = "json"
# Static files (CSS, JavaScript, Images)
STATIC_URL = "/static/"
|
from xml.dom.minidom import Element
class Properties:
def __init__(self, testcase: Element):
self.__properties_tag_name = 'property'
self.__properties_iter = iter(testcase.getElementsByTagName(self.__properties_tag_name))
self.__current = None
print(f"INFO: Successfully extracted properties from test case")
def next(self):
next_value, self.__current = self.__next_iter_value(self.__properties_iter)
return next_value
def get_current(self):
return self.__current
@staticmethod
def __next_iter_value(iter_list):
try:
return True, iter_list.__next__()
except StopIteration:
print(f"INFO: No more properties found")
return False, None
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-20 11:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cadastro', '0006_evangelista_data_batismo_no_espirito_santo'),
]
operations = [
migrations.AlterField(
model_name='evangelista',
name='foto_perfil',
field=models.FileField(blank=True, null=True, upload_to=b''),
),
]
|
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import logbook
import pandas as pd
from pandas.tslib import normalize_date
from six import string_types
from sqlalchemy import create_engine
from zipline.assets import AssetDBWriter, AssetFinder
from zipline.assets.continuous_futures import CHAIN_PREDICATES
from zipline.data.loader import load_market_data
from zipline.utils.calendars import get_calendar
from zipline.utils.memoize import remember_last
log = logbook.Logger('Trading')
DEFAULT_CAPITAL_BASE = 1e5
class TradingEnvironment(object):
"""
The financial simulations in zipline depend on information
about the benchmark index and the risk free rates of return.
The benchmark index defines the benchmark returns used in
the calculation of performance metrics such as alpha/beta. Many
components, including risk, performance, transforms, and
batch_transforms, need access to a calendar of trading days and
market hours. The TradingEnvironment maintains two time keeping
facilities:
- a DatetimeIndex of trading days for calendar calculations
- a timezone name, which should be local to the exchange
hosting the benchmark index. All dates are normalized to UTC
for serialization and storage, and the timezone is used to
ensure proper rollover through daylight savings and so on.
User code will not normally need to use TradingEnvironment
directly. If you are extending zipline's core financial
components and need to use the environment, you must import the module and
build a new TradingEnvironment object, then pass that TradingEnvironment as
the 'env' arg to your TradingAlgorithm.
Parameters
----------
load : callable, optional
The function that returns benchmark returns and treasury curves.
The treasury curves are expected to be a DataFrame with an index of
dates and columns of the curve names, e.g. '10year', '1month', etc.
bm_symbol : str, optional
The benchmark symbol
exchange_tz : tz-coercable, optional
The timezone of the exchange.
trading_calendar : TradingCalendar, optional
The trading calendar to work with in this environment.
asset_db_path : str or sa.engine.Engine, optional
The path to the assets db or sqlalchemy Engine object to use to
construct an AssetFinder.
"""
# Token used as a substitute for pickling objects that contain a
# reference to a TradingEnvironment
PERSISTENT_TOKEN = "<TradingEnvironment>"
def __init__(
self,
load=None,
bm_symbol='SPY',
exchange_tz="US/Eastern",
trading_calendar=None,
asset_db_path=':memory:',
future_chain_predicates=CHAIN_PREDICATES,
local_benchmark=None,
environ=None,
):
self.bm_symbol = bm_symbol
self.local_benchmark = local_benchmark
self.environ = environ
if not load:
load = partial(load_market_data, local_benchmark=self.local_benchmark, environ=self.environ)
self.trading_calendar = trading_calendar
if not self.trading_calendar:
self.trading_calendar = get_calendar("NYSE")
self.benchmark_returns, self.treasury_curves = load(
self.trading_calendar.day,
self.trading_calendar.schedule.index,
self.bm_symbol,
)
self.exchange_tz = exchange_tz
if isinstance(asset_db_path, string_types):
asset_db_path = 'sqlite:///' + asset_db_path
self.engine = engine = create_engine(asset_db_path)
else:
self.engine = engine = asset_db_path
if engine is not None:
AssetDBWriter(engine).init_db()
self.asset_finder = AssetFinder(
engine,
future_chain_predicates=future_chain_predicates)
else:
self.asset_finder = None
def update_local_bench(self, local_benchmark):
load = partial(load_market_data, local_benchmark=local_benchmark, environ=self.environ)
self.benchmark_returns, self.treasury_curves = load(
self.trading_calendar.day,
self.trading_calendar.schedule.index,
self.bm_symbol,
)
def write_data(self, **kwargs):
"""Write data into the asset_db.
Parameters
----------
**kwargs
Forwarded to AssetDBWriter.write
"""
AssetDBWriter(self.engine).write(**kwargs)
class SimulationParameters(object):
def __init__(self, start_session, end_session,
trading_calendar,
capital_base=DEFAULT_CAPITAL_BASE,
emission_rate='daily',
data_frequency='daily',
arena='backtest'):
assert type(start_session) == pd.Timestamp
assert type(end_session) == pd.Timestamp
assert trading_calendar is not None, \
"Must pass in trading calendar!"
assert start_session <= end_session, \
"Period start falls after period end."
assert start_session <= trading_calendar.last_trading_session, \
"Period start falls after the last known trading day."
assert end_session >= trading_calendar.first_trading_session, \
"Period end falls before the first known trading day."
# chop off any minutes or hours on the given start and end dates,
# as we only support session labels here (and we represent session
# labels as midnight UTC).
self._start_session = normalize_date(start_session)
self._end_session = normalize_date(end_session)
self._capital_base = capital_base
self._emission_rate = emission_rate
self._data_frequency = data_frequency
# copied to algorithm's environment for runtime access
self._arena = arena
self._trading_calendar = trading_calendar
if not trading_calendar.is_session(self._start_session):
# if the start date is not a valid session in this calendar,
# push it forward to the first valid session
self._start_session = trading_calendar.minute_to_session_label(
self._start_session
)
if not trading_calendar.is_session(self._end_session):
# if the end date is not a valid session in this calendar,
# pull it backward to the last valid session before the given
# end date.
self._end_session = trading_calendar.minute_to_session_label(
self._end_session, direction="previous"
)
self._first_open = trading_calendar.open_and_close_for_session(
self._start_session
)[0]
self._last_close = trading_calendar.open_and_close_for_session(
self._end_session
)[1]
@property
def capital_base(self):
return self._capital_base
@property
def emission_rate(self):
return self._emission_rate
@property
def data_frequency(self):
return self._data_frequency
@data_frequency.setter
def data_frequency(self, val):
self._data_frequency = val
@property
def arena(self):
return self._arena
@arena.setter
def arena(self, val):
self._arena = val
@property
def start_session(self):
return self._start_session
@property
def end_session(self):
return self._end_session
@property
def first_open(self):
return self._first_open
@property
def last_close(self):
return self._last_close
@property
@remember_last
def sessions(self):
return self._trading_calendar.sessions_in_range(
self.start_session,
self.end_session
)
def create_new(self, start_session, end_session):
return SimulationParameters(
start_session,
end_session,
self._trading_calendar,
capital_base=self.capital_base,
emission_rate=self.emission_rate,
data_frequency=self.data_frequency,
arena=self.arena
)
def __repr__(self):
return """
{class_name}(
start_session={start_session},
end_session={end_session},
capital_base={capital_base},
data_frequency={data_frequency},
emission_rate={emission_rate},
first_open={first_open},
last_close={last_close})\
""".format(class_name=self.__class__.__name__,
start_session=self.start_session,
end_session=self.end_session,
capital_base=self.capital_base,
data_frequency=self.data_frequency,
emission_rate=self.emission_rate,
first_open=self.first_open,
last_close=self.last_close)
def noop_load(*args, **kwargs):
"""
A method that can be substituted in as the load method in a
TradingEnvironment to prevent it from loading benchmarks.
Accepts any arguments, but returns only a tuple of Nones regardless
of input.
"""
return None, None
|
from datetime import date
from layeredconfig import LayeredConfig, Defaults, Environment
from pypki.core.openssl_ca import run_cmd, run_cmd_pexpect, generate_password, opensslconfigfileparser, generate_certificate
from pypki.core.forms import config_form, usercert_form, servercert_form, bulkcert_form, revoke_form, report_form
import os
import re
import sys
import web
import time
import ruamel.yaml as yaml
import six
import base64
import pkg_resources
import pypki.core.users
#===============================================================================
# Init, things we cannot live without
#===============================================================================
# Declare URLs we will serve files for
urls = ('/', 'Home',
'/home', 'Home',
'/config', 'Config',
'/generatecertificate', 'GenerateCertificate',
'/clientcertificate', 'ClientCertificate',
'/servercertificate', 'ServerCertificate',
'/bulk', 'Bulk',
'/revoke', 'Revoke',
'/crl', 'Crl',
'/report', 'Report',
'/login', 'Login',
'/progress', 'Progress')
template_root = os.path.join(os.path.dirname(__file__), 'templates/')
render = web.template.render(template_root)
app = web.application(urls, globals()).wsgifunc()
# Load configuration
cfg_defaults = {
'pkiroot': '/pkiroot',
'opensslconfigfile': '/pkiroot/openssl.cnf',
'canames': ['RootCA', 'IntermCA'],
'cwdir': os.getcwd(),
'download_dir': './static'
}
config = LayeredConfig(Defaults(cfg_defaults), Environment(prefix='PYPKI_'))
csr_defaults_path = pkg_resources.resource_filename('pypki', 'config/csr_defaults.yaml')
with open(csr_defaults_path) as stream:
try:
csr_defaults = yaml.load(stream, Loader=yaml.Loader)
except yaml.YAMLError as exc:
raise
print("Loaded the following configuration:")
print(config)
ca_list, defaultcsr = opensslconfigfileparser(config.opensslconfigfile, config.canames)
bulk_progress = 0
version = '1.0.1'
#===============================================================================
# Functions required for the web interface to work
#===============================================================================
def create_zip(sources, destination, encrypt=False, password=''):
sources = ' '.join(sources)
if encrypt:
cmd = 'zip -ejr {destination} {sources}'.format(sources=sources, destination=destination)
run_cmd_pexpect(cmd, (('Enter password:', password), ('Verify password:', password)))
else:
cmd = 'zip -jr {destination} {sources}'.format(sources=sources, destination=destination)
run_cmd(cmd)
def prepare_crt_for_download(crt_list):
# Create list of all required p12, pwd and crt files to be included in the encrypted zip container
zip_contents = []
for crt in crt_list:
zip_contents.append(crt.p12file)
zip_contents.append(crt.p12pwdfile)
zip_contents.append(crt.crtfile)
# Create encrypted zip
filename = 'crt_{date_time}.zip'.format(date_time=time.strftime("%d_%m_%Y-%H%M%S"))
zipfile = os.path.join(config.download_dir, filename)
password = generate_password(12)
create_zip(zip_contents, zipfile, encrypt=True, password=password)
return os.path.join('/static/', filename), password
def prepare_files_for_download(file_list):
# Prepare file list for zip container
zip_contents = []
for file in file_list:
zip_contents.append(file)
# Create zip file
filename = 'crl_{date_time}.zip'.format(date_time=time.strftime("%d_%m_%Y-%H%M%S"))
zipfile = os.path.join(config.download_dir, filename)
create_zip(zip_contents, zipfile)
return os.path.join('/static/', filename)
def report_certificates_to_expire(calist, caname, period):
# Select proper ca object and request list of certificates
ca = [c for c in calist if c.name == caname][0]
cert_list = ca.list_db()
expiration_list = []
# Determine current date
today = date.today()
# Validate if certificate is valid and will expire within provided period
for cert_information in cert_list:
delta = cert_information['expiration_date'] - today
if delta.days <= int(period) and cert_information['status'] == 'V':
expiration_list.append(cert_information)
return expiration_list
def csv_to_csr_data(csv, cert_type='Server'):
csr_data_list = []
# Split lines on newline character
lines = csv.split('\n')
# Treat each line and create the csr_data
for line in lines:
values = line.split(',')
if cert_type == 'Server':
csr_data = {'certtype': cert_type,
'commonname': values[0],
'validity': values[1],
'country': defaultcsr.country,
'state': defaultcsr.state,
'locality': defaultcsr.locality,
'organisation': defaultcsr.organisation,
'organisationalunit': defaultcsr.organisationalunit}
else:
csr_data = {
'certtype': cert_type,
'country': csr_defaults['country'] if isinstance(csr_defaults['country'], six.string_types) else values[csr_defaults['country']],
'state': csr_defaults['state'] if isinstance(csr_defaults['state'], six.string_types) else values[csr_defaults['state']],
'locality': csr_defaults['locality'] if isinstance(csr_defaults['locality'], six.string_types) else values[csr_default['locality']],
'organisation': csr_defaults['organisation'] if isinstance(csr_defaults['organisation'], six.string_types) else values[csr_defaults['organisation']],
'organisationalunit': csr_defaults['organisationalunit'] if isinstance(csr_defaults['organisationalunit'], six.string_types) else values[csr_defaults['organisationalunit']],
'commonname': csr_defaults['commonname'] if isinstance(csr_defaults['commonname'], six.string_types) else values[csr_defaults['commonname']],
'email': csr_defaults['email'] if isinstance(csr_defaults['email'], six.string_types) else values[csr_defaults['email']],
'validity': csr_defaults['validity'] if isinstance(csr_defaults['validity'], six.string_types) else values[csr_defaults['validity']]
}
if csr_defaults['request_id']:
csr_data['request_id'] = values[csr_defaults.request_id]
csr_data_list.append(csr_data)
return csr_data_list
def authentication():
if web.ctx.path != '/login':
if web.ctx.env.get('HTTP_AUTHORIZATION') is not None:
pass
else:
raise web.seeother('/login')
#===============================================================================
# Web interface URI's
#===============================================================================
class Login(object):
def GET(self):
auth = web.ctx.env.get('HTTP_AUTHORIZATION')
authreq = False
if auth is None:
authreq = True
else:
auth = re.sub('^Basic ', '', auth)
username,password = base64.decodestring(auth).split(':')
if (username, password) in pypki.core.users.allowed:
raise web.seeother('/home')
else:
authreq = True
if authreq:
web.header('WWW-Authenticate', 'Basic realm="PKIweb authentication"')
web.ctx.status = '401 Unauthorized'
return
class Home(object):
def GET(self):
if web.ctx.env.get('HTTP_AUTHORIZATION') is not None:
return render.home(version)
else:
raise web.seeother('/login')
class Config(object):
def GET(self):
if web.ctx.env.get('HTTP_AUTHORIZATION') is not None:
form = config_form()
# Set current values on form
form.pkiroot.value = config.pkiroot
form.opensslconfigfile.value = config.opensslconfigfile
form.canames.value = ','.join(config.canames)
return render.configuration(form, version)
else:
raise web.seeother('/login')
class GenerateCertificate(object):
def GET(self):
if web.ctx.env.get('HTTP_AUTHORIZATION') is not None:
return render.generatecertificate(version)
else:
raise web.seeother('/login')
class ClientCertificate(object):
def GET(self):
if web.ctx.env.get('HTTP_AUTHORIZATION') is not None:
form = usercert_form()
# Set values based on default CSR
form.selected_ca.args = [ca.name for ca in ca_list]
form.country.value = defaultcsr.country
form.state.value = defaultcsr.state
form.locality.value = defaultcsr.locality
form.organisation.value = defaultcsr.organisation
form.organisationalunit.value = defaultcsr.organisationalunit
form.validity.value = 365
return render.form(form)
else:
raise web.seeother('/login')
def POST(self):
if web.ctx.env.get('HTTP_AUTHORIZATION') is not None:
form = usercert_form()
data = web.input()
if not form.validates():
# Set values based on default CSR
form.selected_ca.args = [ca.name for ca in ca_list]
form.country.value = defaultcsr.country
form.state.value = defaultcsr.state
form.locality.value = defaultcsr.locality
form.organisation.value = defaultcsr.organisation
form.organisationalunit.value = defaultcsr.organisationalunit
form.validity.value = 365
return render.generatecertificate_err(form, version)
# Prepare csr data based on form input
csr_data = {'certtype': data['certtype'],
'keylength': 2048,
'validity': data['validity'],
'country': data['country'],
'state': data['state'],
'locality': data['locality'],
'organisation': data['organisation'],
'organisationalunit': data['organisationalunit'],
'commonname': data['commonname'],
'email': data['email']}
try:
# Generate certificate based on CSR
crt = generate_certificate(csr_data, ca_list, data['selected_ca'], data['password'])
except Exception as e:
return render.error(e, version)
# Prepare certificate for download
crt_list = [crt, ]
zipfile, password = prepare_crt_for_download(crt_list)
return render.download(crt_list, zipfile, password, version)
else:
raise web.seeother('/login')
class ServerCertificate(object):
def GET(self):
if web.ctx.env.get('HTTP_AUTHORIZATION') is not None:
form = servercert_form()
# Set values based on default CSR
form.selected_ca.args = [ca.name for ca in ca_list]
form.country.value = defaultcsr.country
form.state.value = defaultcsr.state
form.locality.value = defaultcsr.locality
form.organisation.value = defaultcsr.organisation
form.organisationalunit.value = defaultcsr.organisationalunit
form.validity.value = 365
return render.form(form)
else:
raise web.seeother('/login')
def POST(self):
if web.ctx.env.get('HTTP_AUTHORIZATION') is not None:
form = servercert_form()
data = web.input()
if not form.validates():
# Set values based on default CSR
form.selected_ca.args = [ca.name for ca in ca_list]
form.country.value = defaultcsr.country
form.state.value = defaultcsr.state
form.locality.value = defaultcsr.locality
form.organisation.value = defaultcsr.organisation
form.organisationalunit.value = defaultcsr.organisationalunit
form.validity.value = 365
return render.generatecertificate_err(form, version)
# Prepare csr data based on form input
csr_data = {'certtype': data['certtype'],
'keylength': 2048,
'validity': data['validity'],
'country': data['country'],
'state': data['state'],
'locality': data['locality'],
'organisation': data['organisation'],
'organisationalunit': data['organisationalunit'],
'commonname': data['commonname']}
try:
# Generate certificate based on CSR
crt = generate_certificate(csr_data, ca_list, data['selected_ca'], data['password'])
except Exception as e:
return render.error(e, version)
# Prepare certificate for download
crt_list = [crt, ]
zipfile, password = prepare_crt_for_download(crt_list)
return render.download(crt_list, zipfile, password, version)
else:
raise web.seeother('/login')
class Bulk(object):
def GET(self):
if web.ctx.env.get('HTTP_AUTHORIZATION') is not None:
form = bulkcert_form()
# Set values of CA's
form.selected_ca.args = [ca.name for ca in ca_list]
return render.form(form)
else:
raise web.seeother('/login')
def POST(self):
if web.ctx.env.get('HTTP_AUTHORIZATION') is not None:
global bulk_progress
form = bulkcert_form()
data = web.input()
if not form.validates():
# Set values of CA's
form.selected_ca.args = [ca.name for ca in ca_list]
return render.generatecertificate_err(form, version)
csr_data_list = csv_to_csr_data(data['req_list'], cert_type=data['certtype'])
crt_list = []
for csr_data in csr_data_list:
try:
crt = generate_certificate(csr_data, ca_list, data['selected_ca'], data['password'])
bulk_progress += 100/len(csr_data_list)
except Exception as e:
return render.error(e, version)
crt_list.append(crt)
zipfile, password = prepare_crt_for_download(crt_list)
bulk_progress = 0
return render.download(crt_list, zipfile, password, version)
else:
raise web.seeother('/login')
class Revoke(object):
def GET(self):
if web.ctx.env.get('HTTP_AUTHORIZATION') is not None:
if not web.input():
# Initial request
form = revoke_form()
form.selected_ca.args = ['', ] + [ca.name for ca in ca_list]
return render.revoke(form, version)
if web.input()['request'] == 'getlist':
ca = [c for c in ca_list if c.name == web.input()['ca']][0]
cert_list = ca.list_db()
rev_list = [cert for cert in cert_list if cert['status'] == 'V']
return render.revoke_list(rev_list)
else:
raise web.seeother('/login')
def POST(self):
if web.ctx.env.get('HTTP_AUTHORIZATION') is not None:
data = web.input()
form = revoke_form()
if not form.validates():
form.selected_ca.args = ['', ] + [ca.name for ca in ca_list]
return render.revoke(form, version)
# Decide on CA
ca = [c for c in ca_list if c.name == web.input()['selected_ca']][0]
for key, value in data.iteritems():
if value == 'R':
try:
ca.revoke_cert(key, data['password'])
except Exception as e:
return render.error(e, version)
form = revoke_form()
form.selected_ca.args = ['', ] + [ca.name for ca in ca_list]
return render.revoke(form, version)
else:
raise web.seeother('/login')
class Crl(object):
def GET(self):
if web.ctx.env.get('HTTP_AUTHORIZATION') is not None:
# Decide on CA to generate CRL for
ca = [c for c in ca_list if c.name == web.input()['ca']][0]
# Generate CRL and get output
try:
crl_pem, crl_txt = ca.generate_crl(web.input()['password'])
except Exception as e:
return render.error(e, version)
# Prepare zip file for download
file_list = (crl_pem, crl_txt)
crl = prepare_files_for_download(file_list)
# Serve zip file for download
web.redirect(crl)
else:
raise web.seeother('/login')
class Report(object):
def GET(self):
if web.ctx.env.get('HTTP_AUTHORIZATION') is not None:
if not web.input():
# Initial request
form = report_form()
form.selected_ca.args = ['', ] + [ca.name for ca in ca_list]
return render.report(form, version)
if web.input()['request'] == 'getlist':
report = report_certificates_to_expire(ca_list, web.input()['ca'], web.input()['period'])
return render.report_list(report)
else:
raise web.seeother('/login')
class Progress(object):
def GET(self):
if web.input():
return bulk_progress
#===============================================================================
# Main
#===============================================================================
def main():
web.config.debug = False
# Start the web application
web.internalerror = web.debugerror
app.add_processor(web.loadhook(authentication))
app.run()
if __name__ == '__main__':
main()
|
from flask import url_for
from app.db import Session
from app.models import User, ApiKey
def test_apple_process_payment(flask_client):
user = User.create(
email="a@b.c", password="password", name="Test User", activated=True
)
Session.commit()
# create api_key
api_key = ApiKey.create(user.id, "for test")
Session.commit()
receipt_data = """MIIUHgYJKoZIhvcNAQcCoIIUDzCCFAsCAQExCzAJBgUrDgMCGgUAMIIDvwYJKoZIhvcNAQcBoIIDsASCA6wxggOoMAoCAQgCAQEEAhYAMAoCARQCAQEEAgwAMAsCAQECAQEEAwIBADALAgEDAgEBBAMMATIwCwIBCwIBAQQDAgEAMAsCAQ8CAQEEAwIBADALAgEQAgEBBAMCAQAwCwIBGQIBAQQDAgEDMAwCAQoCAQEEBBYCNCswDAIBDgIBAQQEAgIAjjANAgENAgEBBAUCAwH8/TANAgETAgEBBAUMAzEuMDAOAgEJAgEBBAYCBFAyNTMwGAIBBAIBAgQQS28CkyUrKkayzHXyZEQ8/zAbAgEAAgEBBBMMEVByb2R1Y3Rpb25TYW5kYm94MBwCAQUCAQEEFCvruJwvAhV9s7ODIiM3KShyPW3kMB4CAQwCAQEEFhYUMjAyMC0wNC0xOFQxNjoyOToyNlowHgIBEgIBAQQWFhQyMDEzLTA4LTAxVDA3OjAwOjAwWjAgAgECAgEBBBgMFmlvLnNpbXBsZWxvZ2luLmlvcy1hcHAwSAIBBwIBAQRAHWlCA6fQTbOn0QFDAOH79MzMxIwODI0g6I8LZ6OyThRArQ6krRg6M8UPQgF4Jq6lIrz0owFG+xn0IV2Rq8ejFzBRAgEGAgEBBEkx7BUjdVQv+PiguvEl7Wd4pd+3QIrNt+oSRwl05KQdBeoBKU78eBFp48fUNkCFA/xaibj0U4EF/iq0Lgx345M2RSNqqWvRbzsIMIIBoAIBEQIBAQSCAZYxggGSMAsCAgatAgEBBAIMADALAgIGsAIBAQQCFgAwCwICBrICAQEEAgwAMAsCAgazAgEBBAIMADALAgIGtAIBAQQCDAAwCwICBrUCAQEEAgwAMAsCAga2AgEBBAIMADAMAgIGpQIBAQQDAgEBMAwCAgarAgEBBAMCAQMwDAICBq4CAQEEAwIBADAMAgIGsQIBAQQDAgEAMAwCAga3AgEBBAMCAQAwEgICBq8CAQEECQIHA41+p92hIzAbAgIGpwIBAQQSDBAxMDAwMDAwNjUzNTg0NDc0MBsCAgapAgEBBBIMEDEwMDAwMDA2NTM1ODQ0NzQwHwICBqgCAQEEFhYUMjAyMC0wNC0xOFQxNjoyNzo0MlowHwICBqoCAQEEFhYUMjAyMC0wNC0xOFQxNjoyNzo0NFowHwICBqwCAQEEFhYUMjAyMC0wNC0xOFQxNjozMjo0MlowPgICBqYCAQEENQwzaW8uc2ltcGxlbG9naW4uaW9zX2FwcC5zdWJzY3JpcHRpb24ucHJlbWl1bS5tb250aGx5oIIOZTCCBXwwggRkoAMCAQICCA7rV4fnngmNMA0GCSqGSIb3DQEBBQUAMIGWMQswCQYDVQQGEwJVUzETMBEGA1UECgwKQXBwbGUgSW5jLjEsMCoGA1UECwwjQXBwbGUgV29ybGR3aWRlIERldmVsb3BlciBSZWxhdGlvbnMxRDBCBgNVBAMMO0FwcGxlIFdvcmxkd2lkZSBEZXZlbG9wZXIgUmVsYXRpb25zIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTE1MTExMzAyMTUwOVoXDTIzMDIwNzIxNDg0N1owgYkxNzA1BgNVBAMMLk1hYyBBcHAgU3RvcmUgYW5kIGlUdW5lcyBTdG9yZSBSZWNlaXB0IFNpZ25pbmcxLDAqBgNVBAsMI0FwcGxlIFdvcmxkd2lkZSBEZXZlbG9wZXIgUmVsYXRpb25zMRMwEQYDVQQKDApBcHBsZSBJbmMuMQswCQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKXPgf0looFb1oftI9ozHI7iI8ClxCbLPcaf7EoNVYb/pALXl8o5VG19f7JUGJ3ELFJxjmR7gs6JuknWCOW0iHHPP1tGLsbEHbgDqViiBD4heNXbt9COEo2DTFsqaDeTwvK9HsTSoQxKWFKrEuPt3R+YFZA1LcLMEsqNSIH3WHhUa+iMMTYfSgYMR1TzN5C4spKJfV+khUrhwJzguqS7gpdj9CuTwf0+b8rB9Typj1IawCUKdg7e/pn+/8Jr9VterHNRSQhWicxDkMyOgQLQoJe2XLGhaWmHkBBoJiY5uB0Qc7AKXcVz0N92O9gt2Yge4+wHz+KO0NP6JlWB7+IDSSMCAwEAAaOCAdcwggHTMD8GCCsGAQUFBwEBBDMwMTAvBggrBgEFBQcwAYYjaHR0cDovL29jc3AuYXBwbGUuY29tL29jc3AwMy13d2RyMDQwHQYDVR0OBBYEFJGknPzEdrefoIr0TfWPNl3tKwSFMAwGA1UdEwEB/wQCMAAwHwYDVR0jBBgwFoAUiCcXCam2GGCL7Ou69kdZxVJUo7cwggEeBgNVHSAEggEVMIIBETCCAQ0GCiqGSIb3Y2QFBgEwgf4wgcMGCCsGAQUFBwICMIG2DIGzUmVsaWFuY2Ugb24gdGhpcyBjZXJ0aWZpY2F0ZSBieSBhbnkgcGFydHkgYXNzdW1lcyBhY2NlcHRhbmNlIG9mIHRoZSB0aGVuIGFwcGxpY2FibGUgc3RhbmRhcmQgdGVybXMgYW5kIGNvbmRpdGlvbnMgb2YgdXNlLCBjZXJ0aWZpY2F0ZSBwb2xpY3kgYW5kIGNlcnRpZmljYXRpb24gcHJhY3RpY2Ugc3RhdGVtZW50cy4wNgYIKwYBBQUHAgEWKmh0dHA6Ly93d3cuYXBwbGUuY29tL2NlcnRpZmljYXRlYXV0aG9yaXR5LzAOBgNVHQ8BAf8EBAMCB4AwEAYKKoZIhvdjZAYLAQQCBQAwDQYJKoZIhvcNAQEFBQADggEBAA2mG9MuPeNbKwduQpZs0+iMQzCCX+Bc0Y2+vQ+9GvwlktuMhcOAWd/j4tcuBRSsDdu2uP78NS58y60Xa45/H+R3ubFnlbQTXqYZhnb4WiCV52OMD3P86O3GH66Z+GVIXKDgKDrAEDctuaAEOR9zucgF/fLefxoqKm4rAfygIFzZ630npjP49ZjgvkTbsUxn/G4KT8niBqjSl/OnjmtRolqEdWXRFgRi48Ff9Qipz2jZkgDJwYyz+I0AZLpYYMB8r491ymm5WyrWHWhumEL1TKc3GZvMOxx6GUPzo22/SGAGDDaSK+zeGLUR2i0j0I78oGmcFxuegHs5R0UwYS/HE6gwggQiMIIDCqADAgECAggB3rzEOW2gEDANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQGEwJVUzETMBEGA1UEChMKQXBwbGUgSW5jLjEmMCQGA1UECxMdQXBwbGUgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxFjAUBgNVBAMTDUFwcGxlIFJvb3QgQ0EwHhcNMTMwMjA3MjE0ODQ3WhcNMjMwMjA3MjE0ODQ3WjCBljELMAkGA1UEBhMCVVMxEzARBgNVBAoMCkFwcGxlIEluYy4xLDAqBgNVBAsMI0FwcGxlIFdvcmxkd2lkZSBEZXZlbG9wZXIgUmVsYXRpb25zMUQwQgYDVQQDDDtBcHBsZSBXb3JsZHdpZGUgRGV2ZWxvcGVyIFJlbGF0aW9ucyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMo4VKbLVqrIJDlI6Yzu7F+4fyaRvDRTes58Y4Bhd2RepQcjtjn+UC0VVlhwLX7EbsFKhT4v8N6EGqFXya97GP9q+hUSSRUIGayq2yoy7ZZjaFIVPYyK7L9rGJXgA6wBfZcFZ84OhZU3au0Jtq5nzVFkn8Zc0bxXbmc1gHY2pIeBbjiP2CsVTnsl2Fq/ToPBjdKT1RpxtWCcnTNOVfkSWAyGuBYNweV3RY1QSLorLeSUheHoxJ3GaKWwo/xnfnC6AllLd0KRObn1zeFM78A7SIym5SFd/Wpqu6cWNWDS5q3zRinJ6MOL6XnAamFnFbLw/eVovGJfbs+Z3e8bY/6SZasCAwEAAaOBpjCBozAdBgNVHQ4EFgQUiCcXCam2GGCL7Ou69kdZxVJUo7cwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBQr0GlHlHYJ/vRrjS5ApvdHTX8IXjAuBgNVHR8EJzAlMCOgIaAfhh1odHRwOi8vY3JsLmFwcGxlLmNvbS9yb290LmNybDAOBgNVHQ8BAf8EBAMCAYYwEAYKKoZIhvdjZAYCAQQCBQAwDQYJKoZIhvcNAQEFBQADggEBAE/P71m+LPWybC+P7hOHMugFNahui33JaQy52Re8dyzUZ+L9mm06WVzfgwG9sq4qYXKxr83DRTCPo4MNzh1HtPGTiqN0m6TDmHKHOz6vRQuSVLkyu5AYU2sKThC22R1QbCGAColOV4xrWzw9pv3e9w0jHQtKJoc/upGSTKQZEhltV/V6WId7aIrkhoxK6+JJFKql3VUAqa67SzCu4aCxvCmA5gl35b40ogHKf9ziCuY7uLvsumKV8wVjQYLNDzsdTJWk26v5yZXpT+RN5yaZgem8+bQp0gF6ZuEujPYhisX4eOGBrr/TkJ2prfOv/TgalmcwHFGlXOxxioK0bA8MFR8wggS7MIIDo6ADAgECAgECMA0GCSqGSIb3DQEBBQUAMGIxCzAJBgNVBAYTAlVTMRMwEQYDVQQKEwpBcHBsZSBJbmMuMSYwJAYDVQQLEx1BcHBsZSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEWMBQGA1UEAxMNQXBwbGUgUm9vdCBDQTAeFw0wNjA0MjUyMTQwMzZaFw0zNTAyMDkyMTQwMzZaMGIxCzAJBgNVBAYTAlVTMRMwEQYDVQQKEwpBcHBsZSBJbmMuMSYwJAYDVQQLEx1BcHBsZSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEWMBQGA1UEAxMNQXBwbGUgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOSRqQkfkdseR1DrBe1eeYQt6zaiV0xV7IsZid75S2z1B6siMALoGD74UAnTf0GomPnRymacJGsR0KO75Bsqwx+VnnoMpEeLW9QWNzPLxA9NzhRp0ckZcvVdDtV/X5vyJQO6VY9NXQ3xZDUjFUsVWR2zlPf2nJ7PULrBWFBnjwi0IPfLrCwgb3C2PwEwjLdDzw+dPfMrSSgayP7OtbkO2V4c1ss9tTqt9A8OAJILsSEWLnTVPA3bYharo3GSR1NVwa8vQbP4++NwzeajTEV+H0xrUJZBicR0YgsQg0GHM4qBsTBY7FoEMoxos48d3mVz/2deZbxJ2HafMxRloXeUyS0CAwEAAaOCAXowggF2MA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQr0GlHlHYJ/vRrjS5ApvdHTX8IXjAfBgNVHSMEGDAWgBQr0GlHlHYJ/vRrjS5ApvdHTX8IXjCCAREGA1UdIASCAQgwggEEMIIBAAYJKoZIhvdjZAUBMIHyMCoGCCsGAQUFBwIBFh5odHRwczovL3d3dy5hcHBsZS5jb20vYXBwbGVjYS8wgcMGCCsGAQUFBwICMIG2GoGzUmVsaWFuY2Ugb24gdGhpcyBjZXJ0aWZpY2F0ZSBieSBhbnkgcGFydHkgYXNzdW1lcyBhY2NlcHRhbmNlIG9mIHRoZSB0aGVuIGFwcGxpY2FibGUgc3RhbmRhcmQgdGVybXMgYW5kIGNvbmRpdGlvbnMgb2YgdXNlLCBjZXJ0aWZpY2F0ZSBwb2xpY3kgYW5kIGNlcnRpZmljYXRpb24gcHJhY3RpY2Ugc3RhdGVtZW50cy4wDQYJKoZIhvcNAQEFBQADggEBAFw2mUwteLftjJvc83eb8nbSdzBPwR+Fg4UbmT1HN/Kpm0COLNSxkBLYvvRzm+7SZA/LeU802KI++Xj/a8gH7H05g4tTINM4xLG/mk8Ka/8r/FmnBQl8F0BWER5007eLIztHo9VvJOLr0bdw3w9F4SfK8W147ee1Fxeo3H4iNcol1dkP1mvUoiQjEfehrI9zgWDGG1sJL5Ky+ERI8GA4nhX1PSZnIIozavcNgs/e66Mv+VNqW2TAYzN39zoHLFbr2g8hDtq6cxlPtdk2f8GHVdmnmbkyQvvY1XGefqFStxu9k0IkEirHDx22TZxeY8hLgBdQqorV2uT80AkHN7B1dSExggHLMIIBxwIBATCBozCBljELMAkGA1UEBhMCVVMxEzARBgNVBAoMCkFwcGxlIEluYy4xLDAqBgNVBAsMI0FwcGxlIFdvcmxkd2lkZSBEZXZlbG9wZXIgUmVsYXRpb25zMUQwQgYDVQQDDDtBcHBsZSBXb3JsZHdpZGUgRGV2ZWxvcGVyIFJlbGF0aW9ucyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eQIIDutXh+eeCY0wCQYFKw4DAhoFADANBgkqhkiG9w0BAQEFAASCAQCjIWg69JwLxrmuZL7R0isYWjNGR0wvs3YKtWSwHZG/gDaxPWlgZI0oszcMOI07leGl73vQRVFO89ngbDkNp1Mmo9Mmbc/m8EJtvaVkJp0gYICKpWyMMJPNL5CT+MinMj9gBkRrd5rwFlfRkNBSmD6bt/I23B1AKcmmMwklAuF/mxGzOF4PFiPukEtaQAOe7j4w+QLzEeEAi57DIQppp+uRupKQpZRnn/Q9MyGxXA30ei6C1suxPCoRqCKrRXfWp73UsGP5jH6tOLigkVoO4CtJs3fLWpkLi9by6/K6eoGbP5MOklsBJWYGVZbRRDiNROxqPOgWnS1+p+/KGIdIC4+u"""
r = flask_client.post(
url_for("api.apple_process_payment"),
headers={"Authentication": api_key.code},
json={"receipt_data": receipt_data},
)
# will fail anyway as there's apple secret is not valid
assert r.status_code == 400
assert r.json == {"error": "Processing failed"}
def test_apple_update_notification(flask_client):
user = User.create(
email="a@b.c", password="password", name="Test User", activated=True
)
Session.commit()
# create api_key
api_key = ApiKey.create(user.id, "for test")
Session.commit()
payload = {
"unified_receipt": {
"latest_receipt": "long string",
"pending_renewal_info": [
{
"is_in_billing_retry_period": "0",
"auto_renew_status": "0",
"original_transaction_id": "1000000654277043",
"product_id": "io.simplelogin.ios_app.subscription.premium.yearly",
"expiration_intent": "1",
"auto_renew_product_id": "io.simplelogin.ios_app.subscription.premium.yearly",
}
],
"environment": "Sandbox",
"status": 0,
"latest_receipt_info": [
{
"expires_date_pst": "2020-04-20 21:11:57 America/Los_Angeles",
"purchase_date": "2020-04-21 03:11:57 Etc/GMT",
"purchase_date_ms": "1587438717000",
"original_purchase_date_ms": "1587420715000",
"transaction_id": "1000000654329911",
"original_transaction_id": "1000000654277043",
"quantity": "1",
"expires_date_ms": "1587442317000",
"original_purchase_date_pst": "2020-04-20 15:11:55 America/Los_Angeles",
"product_id": "io.simplelogin.ios_app.subscription.premium.yearly",
"subscription_group_identifier": "20624274",
"web_order_line_item_id": "1000000051891577",
"expires_date": "2020-04-21 04:11:57 Etc/GMT",
"is_in_intro_offer_period": "false",
"original_purchase_date": "2020-04-20 22:11:55 Etc/GMT",
"purchase_date_pst": "2020-04-20 20:11:57 America/Los_Angeles",
"is_trial_period": "false",
},
{
"expires_date_pst": "2020-04-20 20:11:57 America/Los_Angeles",
"purchase_date": "2020-04-21 02:11:57 Etc/GMT",
"purchase_date_ms": "1587435117000",
"original_purchase_date_ms": "1587420715000",
"transaction_id": "1000000654313889",
"original_transaction_id": "1000000654277043",
"quantity": "1",
"expires_date_ms": "1587438717000",
"original_purchase_date_pst": "2020-04-20 15:11:55 America/Los_Angeles",
"product_id": "io.simplelogin.ios_app.subscription.premium.yearly",
"subscription_group_identifier": "20624274",
"web_order_line_item_id": "1000000051890729",
"expires_date": "2020-04-21 03:11:57 Etc/GMT",
"is_in_intro_offer_period": "false",
"original_purchase_date": "2020-04-20 22:11:55 Etc/GMT",
"purchase_date_pst": "2020-04-20 19:11:57 America/Los_Angeles",
"is_trial_period": "false",
},
{
"expires_date_pst": "2020-04-20 19:11:54 America/Los_Angeles",
"purchase_date": "2020-04-21 01:11:54 Etc/GMT",
"purchase_date_ms": "1587431514000",
"original_purchase_date_ms": "1587420715000",
"transaction_id": "1000000654300800",
"original_transaction_id": "1000000654277043",
"quantity": "1",
"expires_date_ms": "1587435114000",
"original_purchase_date_pst": "2020-04-20 15:11:55 America/Los_Angeles",
"product_id": "io.simplelogin.ios_app.subscription.premium.yearly",
"subscription_group_identifier": "20624274",
"web_order_line_item_id": "1000000051890161",
"expires_date": "2020-04-21 02:11:54 Etc/GMT",
"is_in_intro_offer_period": "false",
"original_purchase_date": "2020-04-20 22:11:55 Etc/GMT",
"purchase_date_pst": "2020-04-20 18:11:54 America/Los_Angeles",
"is_trial_period": "false",
},
{
"expires_date_pst": "2020-04-20 18:11:54 America/Los_Angeles",
"purchase_date": "2020-04-21 00:11:54 Etc/GMT",
"purchase_date_ms": "1587427914000",
"original_purchase_date_ms": "1587420715000",
"transaction_id": "1000000654293615",
"original_transaction_id": "1000000654277043",
"quantity": "1",
"expires_date_ms": "1587431514000",
"original_purchase_date_pst": "2020-04-20 15:11:55 America/Los_Angeles",
"product_id": "io.simplelogin.ios_app.subscription.premium.yearly",
"subscription_group_identifier": "20624274",
"web_order_line_item_id": "1000000051889539",
"expires_date": "2020-04-21 01:11:54 Etc/GMT",
"is_in_intro_offer_period": "false",
"original_purchase_date": "2020-04-20 22:11:55 Etc/GMT",
"purchase_date_pst": "2020-04-20 17:11:54 America/Los_Angeles",
"is_trial_period": "false",
},
{
"expires_date_pst": "2020-04-20 17:11:54 America/Los_Angeles",
"purchase_date": "2020-04-20 23:11:54 Etc/GMT",
"purchase_date_ms": "1587424314000",
"original_purchase_date_ms": "1587420715000",
"transaction_id": "1000000654285464",
"original_transaction_id": "1000000654277043",
"quantity": "1",
"expires_date_ms": "1587427914000",
"original_purchase_date_pst": "2020-04-20 15:11:55 America/Los_Angeles",
"product_id": "io.simplelogin.ios_app.subscription.premium.yearly",
"subscription_group_identifier": "20624274",
"web_order_line_item_id": "1000000051888827",
"expires_date": "2020-04-21 00:11:54 Etc/GMT",
"is_in_intro_offer_period": "false",
"original_purchase_date": "2020-04-20 22:11:55 Etc/GMT",
"purchase_date_pst": "2020-04-20 16:11:54 America/Los_Angeles",
"is_trial_period": "false",
},
{
"expires_date_pst": "2020-04-20 16:11:54 America/Los_Angeles",
"purchase_date": "2020-04-20 22:11:54 Etc/GMT",
"purchase_date_ms": "1587420714000",
"original_purchase_date_ms": "1587420715000",
"transaction_id": "1000000654277043",
"original_transaction_id": "1000000654277043",
"quantity": "1",
"expires_date_ms": "1587424314000",
"original_purchase_date_pst": "2020-04-20 15:11:55 America/Los_Angeles",
"product_id": "io.simplelogin.ios_app.subscription.premium.yearly",
"subscription_group_identifier": "20624274",
"web_order_line_item_id": "1000000051888825",
"expires_date": "2020-04-20 23:11:54 Etc/GMT",
"is_in_intro_offer_period": "false",
"original_purchase_date": "2020-04-20 22:11:55 Etc/GMT",
"purchase_date_pst": "2020-04-20 15:11:54 America/Los_Angeles",
"is_trial_period": "false",
},
],
},
"auto_renew_status_change_date": "2020-04-21 04:11:33 Etc/GMT",
"environment": "Sandbox",
"auto_renew_status": "false",
"auto_renew_status_change_date_pst": "2020-04-20 21:11:33 America/Los_Angeles",
"latest_expired_receipt": "long string",
"latest_expired_receipt_info": {
"original_purchase_date_pst": "2020-04-20 15:11:55 America/Los_Angeles",
"quantity": "1",
"subscription_group_identifier": "20624274",
"unique_vendor_identifier": "4C4DF6BA-DE2A-4737-9A68-5992338886DC",
"original_purchase_date_ms": "1587420715000",
"expires_date_formatted": "2020-04-21 04:11:57 Etc/GMT",
"is_in_intro_offer_period": "false",
"purchase_date_ms": "1587438717000",
"expires_date_formatted_pst": "2020-04-20 21:11:57 America/Los_Angeles",
"is_trial_period": "false",
"item_id": "1508744966",
"unique_identifier": "b55fc3dcc688e979115af0697a0195be78be7cbd",
"original_transaction_id": "1000000654277043",
"expires_date": "1587442317000",
"transaction_id": "1000000654329911",
"bvrs": "3",
"web_order_line_item_id": "1000000051891577",
"version_external_identifier": "834289833",
"bid": "io.simplelogin.ios-app",
"product_id": "io.simplelogin.ios_app.subscription.premium.yearly",
"purchase_date": "2020-04-21 03:11:57 Etc/GMT",
"purchase_date_pst": "2020-04-20 20:11:57 America/Los_Angeles",
"original_purchase_date": "2020-04-20 22:11:55 Etc/GMT",
},
"password": "22b9d5a110dd4344a1681631f1f95f55",
"auto_renew_status_change_date_ms": "1587442293000",
"auto_renew_product_id": "io.simplelogin.ios_app.subscription.premium.yearly",
"notification_type": "DID_CHANGE_RENEWAL_STATUS",
}
r = flask_client.post(
url_for("api.apple_update_notification"),
headers={"Authentication": api_key.code},
json=payload,
)
# will fail anyway as there's no such AppleSub in Test DB
assert r.status_code == 400
assert r.json == {"error": "Processing failed"}
|
# coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:support@memsource.com>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from memsource_cli.models.assignment_per_target_lang_dto import AssignmentPerTargetLangDto # noqa: F401,E501
from memsource_cli.models.business_unit_reference import BusinessUnitReference # noqa: F401,E501
from memsource_cli.models.client_reference import ClientReference # noqa: F401,E501
from memsource_cli.models.domain_reference import DomainReference # noqa: F401,E501
from memsource_cli.models.project_template_notify_provider_dto import ProjectTemplateNotifyProviderDto # noqa: F401,E501
from memsource_cli.models.sub_domain_reference import SubDomainReference # noqa: F401,E501
from memsource_cli.models.user_reference import UserReference # noqa: F401,E501
from memsource_cli.models.workflow_step_dto import WorkflowStepDto # noqa: F401,E501
from memsource_cli.models.workflow_step_settings_dto import WorkflowStepSettingsDto # noqa: F401,E501
class ProjectTemplateDto(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'template_name': 'str',
'name': 'str',
'source_lang': 'str',
'target_langs': 'list[str]',
'note': 'str',
'owner': 'UserReference',
'client': 'ClientReference',
'domain': 'DomainReference',
'sub_domain': 'SubDomainReference',
'created_by': 'UserReference',
'date_created': 'datetime',
'workflow_steps': 'list[WorkflowStepDto]',
'workflow_settings': 'list[WorkflowStepSettingsDto]',
'business_unit': 'BusinessUnitReference',
'notify_providers': 'ProjectTemplateNotifyProviderDto',
'assigned_to': 'list[AssignmentPerTargetLangDto]'
}
attribute_map = {
'id': 'id',
'template_name': 'templateName',
'name': 'name',
'source_lang': 'sourceLang',
'target_langs': 'targetLangs',
'note': 'note',
'owner': 'owner',
'client': 'client',
'domain': 'domain',
'sub_domain': 'subDomain',
'created_by': 'createdBy',
'date_created': 'dateCreated',
'workflow_steps': 'workflowSteps',
'workflow_settings': 'workflowSettings',
'business_unit': 'businessUnit',
'notify_providers': 'notifyProviders',
'assigned_to': 'assignedTo'
}
def __init__(self, id=None, template_name=None, name=None, source_lang=None, target_langs=None, note=None, owner=None, client=None, domain=None, sub_domain=None, created_by=None, date_created=None, workflow_steps=None, workflow_settings=None, business_unit=None, notify_providers=None, assigned_to=None): # noqa: E501
"""ProjectTemplateDto - a model defined in Swagger""" # noqa: E501
self._id = None
self._template_name = None
self._name = None
self._source_lang = None
self._target_langs = None
self._note = None
self._owner = None
self._client = None
self._domain = None
self._sub_domain = None
self._created_by = None
self._date_created = None
self._workflow_steps = None
self._workflow_settings = None
self._business_unit = None
self._notify_providers = None
self._assigned_to = None
self.discriminator = None
if id is not None:
self.id = id
if template_name is not None:
self.template_name = template_name
if name is not None:
self.name = name
if source_lang is not None:
self.source_lang = source_lang
if target_langs is not None:
self.target_langs = target_langs
if note is not None:
self.note = note
if owner is not None:
self.owner = owner
if client is not None:
self.client = client
if domain is not None:
self.domain = domain
if sub_domain is not None:
self.sub_domain = sub_domain
if created_by is not None:
self.created_by = created_by
if date_created is not None:
self.date_created = date_created
if workflow_steps is not None:
self.workflow_steps = workflow_steps
if workflow_settings is not None:
self.workflow_settings = workflow_settings
if business_unit is not None:
self.business_unit = business_unit
if notify_providers is not None:
self.notify_providers = notify_providers
if assigned_to is not None:
self.assigned_to = assigned_to
@property
def id(self):
"""Gets the id of this ProjectTemplateDto. # noqa: E501
:return: The id of this ProjectTemplateDto. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ProjectTemplateDto.
:param id: The id of this ProjectTemplateDto. # noqa: E501
:type: str
"""
self._id = id
@property
def template_name(self):
"""Gets the template_name of this ProjectTemplateDto. # noqa: E501
:return: The template_name of this ProjectTemplateDto. # noqa: E501
:rtype: str
"""
return self._template_name
@template_name.setter
def template_name(self, template_name):
"""Sets the template_name of this ProjectTemplateDto.
:param template_name: The template_name of this ProjectTemplateDto. # noqa: E501
:type: str
"""
self._template_name = template_name
@property
def name(self):
"""Gets the name of this ProjectTemplateDto. # noqa: E501
:return: The name of this ProjectTemplateDto. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ProjectTemplateDto.
:param name: The name of this ProjectTemplateDto. # noqa: E501
:type: str
"""
self._name = name
@property
def source_lang(self):
"""Gets the source_lang of this ProjectTemplateDto. # noqa: E501
:return: The source_lang of this ProjectTemplateDto. # noqa: E501
:rtype: str
"""
return self._source_lang
@source_lang.setter
def source_lang(self, source_lang):
"""Sets the source_lang of this ProjectTemplateDto.
:param source_lang: The source_lang of this ProjectTemplateDto. # noqa: E501
:type: str
"""
self._source_lang = source_lang
@property
def target_langs(self):
"""Gets the target_langs of this ProjectTemplateDto. # noqa: E501
:return: The target_langs of this ProjectTemplateDto. # noqa: E501
:rtype: list[str]
"""
return self._target_langs
@target_langs.setter
def target_langs(self, target_langs):
"""Sets the target_langs of this ProjectTemplateDto.
:param target_langs: The target_langs of this ProjectTemplateDto. # noqa: E501
:type: list[str]
"""
self._target_langs = target_langs
@property
def note(self):
"""Gets the note of this ProjectTemplateDto. # noqa: E501
:return: The note of this ProjectTemplateDto. # noqa: E501
:rtype: str
"""
return self._note
@note.setter
def note(self, note):
"""Sets the note of this ProjectTemplateDto.
:param note: The note of this ProjectTemplateDto. # noqa: E501
:type: str
"""
self._note = note
@property
def owner(self):
"""Gets the owner of this ProjectTemplateDto. # noqa: E501
:return: The owner of this ProjectTemplateDto. # noqa: E501
:rtype: UserReference
"""
return self._owner
@owner.setter
def owner(self, owner):
"""Sets the owner of this ProjectTemplateDto.
:param owner: The owner of this ProjectTemplateDto. # noqa: E501
:type: UserReference
"""
self._owner = owner
@property
def client(self):
"""Gets the client of this ProjectTemplateDto. # noqa: E501
:return: The client of this ProjectTemplateDto. # noqa: E501
:rtype: ClientReference
"""
return self._client
@client.setter
def client(self, client):
"""Sets the client of this ProjectTemplateDto.
:param client: The client of this ProjectTemplateDto. # noqa: E501
:type: ClientReference
"""
self._client = client
@property
def domain(self):
"""Gets the domain of this ProjectTemplateDto. # noqa: E501
:return: The domain of this ProjectTemplateDto. # noqa: E501
:rtype: DomainReference
"""
return self._domain
@domain.setter
def domain(self, domain):
"""Sets the domain of this ProjectTemplateDto.
:param domain: The domain of this ProjectTemplateDto. # noqa: E501
:type: DomainReference
"""
self._domain = domain
@property
def sub_domain(self):
"""Gets the sub_domain of this ProjectTemplateDto. # noqa: E501
:return: The sub_domain of this ProjectTemplateDto. # noqa: E501
:rtype: SubDomainReference
"""
return self._sub_domain
@sub_domain.setter
def sub_domain(self, sub_domain):
"""Sets the sub_domain of this ProjectTemplateDto.
:param sub_domain: The sub_domain of this ProjectTemplateDto. # noqa: E501
:type: SubDomainReference
"""
self._sub_domain = sub_domain
@property
def created_by(self):
"""Gets the created_by of this ProjectTemplateDto. # noqa: E501
:return: The created_by of this ProjectTemplateDto. # noqa: E501
:rtype: UserReference
"""
return self._created_by
@created_by.setter
def created_by(self, created_by):
"""Sets the created_by of this ProjectTemplateDto.
:param created_by: The created_by of this ProjectTemplateDto. # noqa: E501
:type: UserReference
"""
self._created_by = created_by
@property
def date_created(self):
"""Gets the date_created of this ProjectTemplateDto. # noqa: E501
:return: The date_created of this ProjectTemplateDto. # noqa: E501
:rtype: datetime
"""
return self._date_created
@date_created.setter
def date_created(self, date_created):
"""Sets the date_created of this ProjectTemplateDto.
:param date_created: The date_created of this ProjectTemplateDto. # noqa: E501
:type: datetime
"""
self._date_created = date_created
@property
def workflow_steps(self):
"""Gets the workflow_steps of this ProjectTemplateDto. # noqa: E501
:return: The workflow_steps of this ProjectTemplateDto. # noqa: E501
:rtype: list[WorkflowStepDto]
"""
return self._workflow_steps
@workflow_steps.setter
def workflow_steps(self, workflow_steps):
"""Sets the workflow_steps of this ProjectTemplateDto.
:param workflow_steps: The workflow_steps of this ProjectTemplateDto. # noqa: E501
:type: list[WorkflowStepDto]
"""
self._workflow_steps = workflow_steps
@property
def workflow_settings(self):
"""Gets the workflow_settings of this ProjectTemplateDto. # noqa: E501
:return: The workflow_settings of this ProjectTemplateDto. # noqa: E501
:rtype: list[WorkflowStepSettingsDto]
"""
return self._workflow_settings
@workflow_settings.setter
def workflow_settings(self, workflow_settings):
"""Sets the workflow_settings of this ProjectTemplateDto.
:param workflow_settings: The workflow_settings of this ProjectTemplateDto. # noqa: E501
:type: list[WorkflowStepSettingsDto]
"""
self._workflow_settings = workflow_settings
@property
def business_unit(self):
"""Gets the business_unit of this ProjectTemplateDto. # noqa: E501
:return: The business_unit of this ProjectTemplateDto. # noqa: E501
:rtype: BusinessUnitReference
"""
return self._business_unit
@business_unit.setter
def business_unit(self, business_unit):
"""Sets the business_unit of this ProjectTemplateDto.
:param business_unit: The business_unit of this ProjectTemplateDto. # noqa: E501
:type: BusinessUnitReference
"""
self._business_unit = business_unit
@property
def notify_providers(self):
"""Gets the notify_providers of this ProjectTemplateDto. # noqa: E501
:return: The notify_providers of this ProjectTemplateDto. # noqa: E501
:rtype: ProjectTemplateNotifyProviderDto
"""
return self._notify_providers
@notify_providers.setter
def notify_providers(self, notify_providers):
"""Sets the notify_providers of this ProjectTemplateDto.
:param notify_providers: The notify_providers of this ProjectTemplateDto. # noqa: E501
:type: ProjectTemplateNotifyProviderDto
"""
self._notify_providers = notify_providers
@property
def assigned_to(self):
"""Gets the assigned_to of this ProjectTemplateDto. # noqa: E501
:return: The assigned_to of this ProjectTemplateDto. # noqa: E501
:rtype: list[AssignmentPerTargetLangDto]
"""
return self._assigned_to
@assigned_to.setter
def assigned_to(self, assigned_to):
"""Sets the assigned_to of this ProjectTemplateDto.
:param assigned_to: The assigned_to of this ProjectTemplateDto. # noqa: E501
:type: list[AssignmentPerTargetLangDto]
"""
self._assigned_to = assigned_to
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ProjectTemplateDto, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProjectTemplateDto):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import json
from argparse import Namespace
class Config(object):
'''创建一个配置类来管理系统的所有变量'''
def __init__(self, config_dict):
'''
Args:
config_dict: 配置文件转换生成的字典
'''
self.args = Namespace(**config_dict)
# 读取JSON格式的配置文件,并创建Config实例
@classmethod
def from_config_json(cls, json_file_path):
with open(json_file_path, 'r', encoding='utf-8') as json_file:
config_dict = json.load(json_file)
return cls(config_dict)
|
# Generated by Django 2.1.5 on 2019-02-01 12:18
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Productlar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('kod', models.IntegerField()),
('adi', models.CharField(max_length=250)),
('date_created', models.DateTimeField(default=datetime.datetime(2019, 2, 1, 12, 18, 8, 639363, tzinfo=utc), verbose_name='date_created')),
('status', models.BooleanField(default=True)),
],
),
]
|
"""A module for testing validator classes."""
import yaml
from tests import PROJECT_ROOT
from variation.tokenizers import Tokenize, GeneSymbol
from variation.tokenizers.caches import AminoAcidCache
from gene.query import QueryHandler as GeneQueryHandler
class ValidatorBase:
"""The validator base class."""
def setUp(self):
"""Set up the test cases."""
with open(f'{PROJECT_ROOT}/tests/fixtures/validators.yml') as stream:
self.all_fixtures = yaml.safe_load(stream)
self.fixtures = self.all_fixtures.get(
self.fixture_name(),
{'should_match': [], 'should_not_match': []}
)
self.tokenizer = Tokenize(AminoAcidCache(),
GeneSymbol(GeneQueryHandler()))
self.classifier = self.classifier_instance()
self.validator = self.validator_instance()
def classifier_instance(self):
"""Check that the classifier_instance method is implemented."""
raise NotImplementedError()
def validator_instance(self):
"""Check that the validator_instance method is implemented."""
raise NotImplementedError()
def fixture_name(self):
"""Check that the fixture_name method is implemented."""
raise NotImplementedError()
def test_matches(self):
"""Test that validator matches correctly."""
for x in self.fixtures['should_match']:
tokens = self.tokenizer.perform(x['query'], [])
classification = self.classifier.match(tokens)
validation_results = self.validator.validate(
classification, normalize_endpoint=True,
hgvs_dup_del_mode="default"
)
is_valid = False
for vr in validation_results:
if vr.is_valid:
is_valid = True
break
self.assertTrue(is_valid, msg=x)
self.assertIsNotNone(validation_results, msg=x)
def test_not_matches(self):
"""Test that validator matches correctly."""
for x in self.fixtures['should_not_match']:
tokens = self.tokenizer.perform(x['query'], [])
classification = self.classifier.match(tokens)
validation_results = self.validator.validate(
classification, normalize_endpoint=True
)
is_valid = False
for vr in validation_results:
if vr.is_valid:
is_valid = True
break
self.assertFalse(is_valid, msg=x)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import torch
from torchknickknacks import metrics
model = torch.hub.load('pytorch/vision:v0.10.0', 'alexnet', pretrained = True)
data = torch.rand(64, 3, 224, 224)
output = model(data)
# This is just an example where class coded by 999 has more occurences
# No train test splits are applied to lead to the overrepresentation of class 999
p = [(1-0.05)/1000]*999
p.append(1-sum(p))
labels = np.random.choice(list(range(1000)),
size = (10000,),
p = p)#imbalanced 1000-class labels
labels = torch.Tensor(labels).long()
weight, label_weight = metrics.class_weights(labels)
loss = torch.nn.CrossEntropyLoss(weight = weight)
l = loss(output, labels[:64])
|
import sys
from lib import is_palindrome
largest = 0
lx=0
ly=0
for x in range(100,999):
for y in range(100,999):
if(is_palindrome(int(x*y))):
if(largest< (int(x*y)) ):
largest = (int(x*y))
lx=x
ly=y
print(largest) |
from PIL import Image
try:
from io import BytesIO
except ImportError:
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
def resize_image(image, size, content_type, quality=98, file_path=None):
image.thumbnail(size, Image.ANTIALIAS)
if not file_path:
save_to = BytesIO()
else:
save_to = file_path
image.save(
save_to,
quality=quality,
format=content_type
)
return save_to
|
import os
import glob
import argparse
import configparser as confp
import ruamel.yaml as yaml
def parse_file(path):
"""Parse the INI file provided at the command line"""
parser = confp.ConfigParser(interpolation=None)
parser.optionxform = str
with open(path,'r') as config_file:
parser.readfp(config_file)
return parser
def fix_ini(confs):
for confpath in confs:
print('Modifying config at %s'%confpath)
conf = parse_file(confpath)
try:
period = conf.getfloat('Parameters','array_period')
except confp.NoSectionError:
print("Path is not a sim config")
continue
val = str(int(period*500))
conf.set(args.section,args.key,val)
with open(confpath,'w') as configfile:
conf.write(configfile)
def parse_yaml(path):
"""Parse the YAML file provided at the command line"""
with open(path,'r') as cfile:
text = cfile.read()
conf = yaml.load(text,Loader=yaml.Loader)
return conf
def fix_yaml(confs):
for confpath in confs:
print('Modifying config at %s'%confpath)
conf = parse_yaml(confpath)
conf['General']['ignore_h'] = True
with open(confpath,'w') as configfile:
configfile.write(yaml.dump(conf))
def main():
parser = argparse.ArgumentParser(description="""Add a missing option to all the config files
belows a node""")
parser.add_argument('node',type=str,help="Path to node")
# parser.add_argument('--key',required=True,type=str,help="Key name to be added to config file")
# #parser.add_argument('--val',required=True,type=str,help="Value of key")
# parser.add_argument('--section',required=True,type=str,help="""Section in which to add key value
# pair""")
parser.add_argument('--type',required=True,choices=['ini','yaml'],type=str,help="""Type of config file""")
args = parser.parse_args()
if not os.path.isdir(args.node):
print("Node doesn't exist")
quit()
if args.type == 'ini':
confs = glob.glob(os.path.join(args.node,'**/*.ini'),recursive=True)
fix_ini(confs)
else:
confs = glob.glob(os.path.join(args.node,'**/sim_conf.yml'), recursive=True)
fix_yaml(confs)
if __name__ == '__main__':
main()
|
from __future__ import division
import numpy as np
array1= np.array([[1,2,3,4],[5,6,7,8]])
print array1
#multiplication
array2=array1*array1
print array2
#exp multiplication
array3=array1**3
print array3
#subtraction
array4=array1-array1
print array4
array5=array2 - array1
print array5
#reciprocal
print 1/array1
array6=1/array1
print array6
|
import cv2
from more_itertools import unique_everseen
import numpy as np
'''
##폴더 경로 설정
from tkinter import filedialog
from tkinter import *
root = Tk()
root.dirName = filedialog.askdirectory();
data_path = root.dirName
print(data_path)
### 폴더 열기
import os
path="D:\workspace\AceVision\Samsung\AAA"
path=os.path.realpath(path)
os.startfile(path)
'''
'''
from more_itertools import unique_everseen # pip install more_itertools==4.3.0
import copy
list1 = [[10, 20], [30, 40], [50, 60], [423, 34], [234, 876], [5, 5]]
list2 = [[10, 20], [30, 40], [50, 60], [423, 34], [587, 333]]
list3 = []
list3 = copy.deepcopy(list1)
for i in list2:
for j in list1:
if i == j:
list3.remove(i)
print(list3)
list4 = [1,2,3,4,5]
del list4[0]
print(list4)
'''
'''
import cv2
import numpy as np
img = cv2.imread('C:/AceVision/abcde.png', cv2.IMREAD_COLOR)
print(img[745, 652])
'''
'''
list1 = [[10,20], [30, 40], [50, 60]]
list2 = [[70, 80], [90, 100]]
list3 = list1+list2
print(list3)
'''
'''
def Rotate(src, num):
if num == 0:
dst = src
elif num == 1:
dst = cv2.transpose(src)
dst = cv2.flip(dst, 1)
elif num == 2:
dst = cv2.transpose(src)
dst = cv2.flip(src, -1)
elif num == 3:
dst = cv2.transpose(src)
dst = cv2.flip(dst, 0)
elif num == -1:
dst = cv2.transpose(src)
dst = cv2.flip(src, -1)
dst = Rotate(dst, 1)
elif num == -2:
dst = cv2.transpose(src)
dst = cv2.flip(src, 1)
dst = Rotate(dst, 2)
dst = cv2.flip(src, -1)
elif num == -3:
dst = cv2.transpose(src)
dst = cv2.flip(src, -1)
dst = Rotate(dst, 3)
return dst
img = cv2.imread('C:/AceVision/2019-03-21/rivet\pass/61000163006321.png')
img = Rotate(img, 0)
img_1 = Rotate(img, -1)
img_2 = Rotate(img, -2)
img_3 = Rotate(img, -3)
#cv2.imshow("img", img)
#cv2.imshow("img_1", img_1)
cv2.imshow("img_2", img_2)
cv2.imshow("img_3", img_3)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
import numpy as np
import cv2
'''
cap1 = cv2.VideoCapture(1)
cap1.set(cv2.CAP_PROP_FRAME_WIDTH, 4608) # Width 4608
cap1.set(cv2.CAP_PROP_FRAME_HEIGHT, 3288) # Height 3288
while True:
ret, img = cap1.read()
img = cv2.resize(img, (1280, 960), interpolation=cv2.INTER_LINEAR)
img1 = img.copy()
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap1.release()
cv2.destroyAllWindows()
'''
import numpy as np # pip install numpy==1.15.4
import cv2 # pip install opencv-python==3.4.4.19
from PIL import Image as Img # pip install image==1.5.27
import pyzbar.pyzbar as pyzbar # pip install pyzbar==0.1.7
from more_itertools import unique_everseen # pip install more_itertools==4.3.0
import serial # pip install pyserial==3.4
from PIL import ImageTk
from math import *
import datetime
import time
import os
from tkinter import filedialog
from tkinter import *
import socket
import copy
from pylibdmtx.pylibdmtx import decode
def decode(im):
global Serial_No, pre_Serial_No
# global RV_SN, RV_TIME, RV_ACC, RV_PASS, RV_NG, RV_TACT
im = Reformat_Image(im, 2, 2)
decodedObjects = str(pyzbar.decode(im)) # 바코드와 QR코드를 찾아냄
print("decodedObjects", decodedObjects)
Serial_No = decodedObjects[16:29]
print(Serial_No, len(Serial_No))
return len(Serial_No)
def Reformat_Image(image, ratio_w, ratio_h):
height, width = image.shape[:2]
width = int(width*ratio_w)
height = int(height*ratio_h)
#res = cv2.resize(image, (width, height), interpolation=cv2.INTER_AREA)
res = cv2.resize(image, (width, height), interpolation=cv2.INTER_LINEAR)
return res
a = 3
img = cv2.imread('C:/AceVision/barcode13.png')
while True:
img = Reformat_Image(img, a, a)
lens = decode(img)
a += 0.1
if lens != 0:
print(Serial_No)
break
|
class DataGridViewBindingCompleteEventArgs(EventArgs):
"""
Provides data for the System.Windows.Forms.DataGridView.DataBindingComplete event.
DataGridViewBindingCompleteEventArgs(listChangedType: ListChangedType)
"""
@staticmethod
def __new__(self, listChangedType):
""" __new__(cls: type,listChangedType: ListChangedType) """
pass
ListChangedType = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value specifying how the list changed.
Get: ListChangedType(self: DataGridViewBindingCompleteEventArgs) -> ListChangedType
"""
|
"""defines the MainWindow class"""
# coding: utf-8
# pylint: disable=C0111
# standard library
import sys
import os.path
import importlib
#import traceback
import webbrowser
#webbrowser.open("http://xkcd.com/353/")
from typing import Tuple, List, Optional
from pyNastran.gui.qt_version import qt_version
from qtpy import QtCore
from qtpy.QtWidgets import QMessageBox, QApplication
import urllib
# 3rd party
import vtk # if this crashes, make sure you ran setup.py
# pyNastran
import pyNastran
from pyNastran.gui import SCRIPT_PATH, ICON_PATH
from pyNastran.gui.utils.version import check_for_newer_version
from pyNastran.gui.plugins import plugin_name_to_path
from pyNastran.gui.formats import NastranIO
from pyNastran.gui.gui_common import GuiCommon
from pyNastran.gui.menus.download import DownloadWindow
from pyNastran.gui.menus.about.about import AboutWindow
# tcolorpick.png and tabout.png trefresh.png icons on LGPL license, see
# http://openiconlibrary.sourceforge.net/gallery2/?./Icons/actions/color-picker-grey.png
# http://openiconlibrary.sourceforge.net/gallery2/?./Icons/actions/help-hint.png
# http://openiconlibrary.sourceforge.net/gallery2/?./Icons/actions/view-refresh-8.png
try:
import qdarkstyle
IS_DARK = True
except ImportError:
IS_DARK = False
def get_stylesheet():
stylesheet = None
#if IS_DARK:
#mapper = {
#'pyside2' : qdarkstyle.load_stylesheet_pyside2,
#'pyqt5' : qdarkstyle.load_stylesheet_pyqt5,
#}
#stylesheet = mapper[qt_version]()
return stylesheet
class MainWindow(GuiCommon, NastranIO):
"""
The MainWindow class combines the base GuiCommon class with all the functionality
with the remaining format holdout (Nastran). It also defines which formats
will be supported in the exe.
MainWindow -> GuiCommon -> GuiQtCommon
gui.py -> gui_common -> gui_qt_common
warp vector might finally work
http://vtk.1045678.n5.nabble.com/How-to-get-grid-result-from-vtkWarpVector-td5727100.html
glyphs
http://www.itk.org/Wiki/VTK/Examples/Python/Visualization/ElevationBandsWithGlyphs
list of VTK6 classes
http://www.vtk.org/doc/nightly/html/annotated.html
background grid
http://www.vtk.org/Wiki/VTK/Examples/Python/Visualization/CubeAxesActor
pick visible
http://www.vtk.org/Wiki/VTK/Examples/Cxx/Filtering/ExtractVisibleCells
plane projection
http://www.igstk.org/Wiki/VTK/Examples/Cxx/SimpleOperations/ProjectPointPlane
warping
http://engronline.ee.memphis.edu/eece4731/djr_lec16.pdf
banded filter
http://www.igstk.org/Wiki/VTK/Examples/Cxx/VisualizationAlgorithms/BandedPolyDataContourFilter
speeding up vtk cell loading in unstructured grids
http://vtk.1045678.n5.nabble.com/Speed-up-cell-allocation-td5733208.html#a5733214
"""
def __init__(self, inputs, **kwds):
"""
inputs=None
"""
html_logging = True
self.stylesheet = get_stylesheet()
# these are in alphabetical order except for Nastran
# this includes the bedge, surf, ugrid line (listed as AFLR in the gui)
fmt_order = [
# no results unless specified
'nastran', # results
'h5nastran',
'abaqus',
'avus',
'bedge', 'surf', 'ugrid', 'ugrid3d', # aflr
'cart3d', # results
'degen_geom',
'fast',
'lawgs',
'obj',
'openfoam_hex', 'openfoam_shell', 'openfoam_faces', # openfoam - results
'panair', # results
'shabp', # results
'stl',
'su2',
'tecplot', # results
'tetgen',
'usm3d', # results
'avl', # no results
'vrml', # no results
]
#GuiCommon2.__init__(self, fmt_order, html_logging, inputs, parent)
kwds['inputs'] = inputs
kwds['fmt_order'] = fmt_order
kwds['html_logging'] = html_logging
super(MainWindow, self).__init__(**kwds)
#fmt_order=fmt_order, inputs=inputs,
#html_logging=html_logging,
if qt_version in ['pyqt5', 'pyside2']:
NastranIO.__init__(self)
else: # pragma: no cover
raise NotImplementedError('qt_version=%r is not supported' % qt_version)
self.build_fmts(fmt_order, stop_on_failure=False)
self.logo = os.path.join(ICON_PATH, 'logo.png')
self.set_script_path(SCRIPT_PATH)
self.set_icon_path(ICON_PATH)
is_gui = True
if 'is_gui' in inputs:
is_gui = inputs['is_gui']
assert isinstance(is_gui, bool), is_gui
self.start_logging()
self._load_plugins()
self.setup_gui(is_gui)
self.setup_post(inputs)
self._check_for_latest_version()
def _load_plugins(self, plugin_name_to_path: Optional[List[Tuple[str, str, str]]]=None):
"""loads the plugins from pyNastran/gui/plugins.py
plugin_name_to_path = [
('auto_wireframe', os.path.join(PLUGIN_DIR, 'auto_wireframe.py'), 'AutoWireframe'),
('rfs_viewer', os.path.join(PLUGIN_DIR, 'rfs', 'rfs_viewer.py'), 'RFSViewer'),
]
.. see:: https://stackoverflow.com/questions/19009932/import-arbitrary-python-source-file-python-3-3
"""
if plugin_name_to_path is None:
return
for module_name, plugin_file, class_name in plugin_name_to_path: # list
if module_name in self.modules:
raise RuntimeError('module_name=%r is already defined' % module_name)
if not os.path.exists(plugin_file):
# auto_wireframe is a test module and is not intended to
# actually load unless you're testing
#print('Failed to load plugin %r because %s doesnt exist' % (
#module_name, plugin_file))
if module_name != 'auto_wireframe':
self.log_warning('Failed to load plugin %r because %s doesnt exist' % (
module_name, plugin_file))
continue
loader = importlib.machinery.SourceFileLoader(module_name, plugin_file)
module = loader.load_module()
try:
my_class = getattr(module, class_name)
except AttributeError:
self.log_warning('Failed to load plugin %r because class %s doesnt exist' % (
module_name, class_name))
return
class_obj = my_class(self)
self.modules[module_name] = class_obj
# tools/checkables
tools, checkables = class_obj.get_tools_checkables()
self.tools += tools
for key, is_active in checkables.items():
self.checkables[key] = is_active
def _check_for_latest_version(self, check: bool=True) -> bool:
"""
checks the website for information regarding the latest gui version
Looks for:
## pyNastran v0.7.2 has been Released (4/25/2015)
"""
#import time
#time0 = time.time()
version_latest, unused_version_current, is_newer = check_for_newer_version()
if is_newer and check:
url = pyNastran.__website__
win = DownloadWindow(url, version_latest, win_parent=self)
win.show()
return True
#dt = time.time() - time0
#print('dt_version_check = %.2f' % dt)
return False
def mousePressEvent(self, event):
if not self.run_vtk:
return
#print('press x,y = (%s, %s)' % (ev.x(), ev.y()))
if self.is_pick:
#self.___saveX = ev.x()
#self.___saveY = ev.y()
pass
else:
self.vtk_interactor.mousePressEvent(event)
#def LeftButtonPressEvent(self, ev):
def mouseReleaseEvent(self, event):
#print('release x,y = (%s, %s)' % (ev.x(), ev.y()))
if self.is_pick:
pass
else:
self.vtk_interactor.mousePressEvent(event)
def open_website(self):
"""loads the pyNastran main website"""
self._urlopen(pyNastran.__website__)
def open_docs(self):
"""loads the pyNastran docs website"""
url = pyNastran.__docs__
try:
urllib.request.urlopen(url)
except (urllib.error.HTTPError, urllib.error.URLError):
url = pyNastran.__docs_rtd__
self._urlopen(url)
def open_issue(self):
"""loads the pyNastran issue tracker"""
self._urlopen(pyNastran.__issue__)
def open_discussion_forum(self):
"""loads the pyNastran discussion forum website"""
self._urlopen(pyNastran.__discussion_forum__)
def _urlopen(self, url):
"""opens a URL"""
if self.is_gui:
webbrowser.open(url)
def about_dialog(self):
"""Display about dialog"""
data = {
'font_size': self.settings.font_size,
}
win = AboutWindow(data, win_parent=self, show_tol=True)
win.show()
def on_reload(self):
"""
Runs the reload button.
Reload allows you to edit the input model and "reload" the data
without having to go to the pulldown menu. If you don't like
this behavior, implement the self.on_reload_nastran() or similar
method for a given format.
"""
camera = self.get_camera_data()
unused_title = self.title
case = self.icase
on_reload_name = 'on_reload_%s' % self.format
if hasattr(self, on_reload_name):
getattr(self, on_reload_name)() # on_reload_nastran
else:
self.on_load_geometry(self.infile_name, self.format, raise_error=False)
if self.out_filename is None:
msg = '%s - %s' % (self.format, self.infile_name)
else:
msg = '%s - %s - %s' % (self.format, self.infile_name, self.out_filename)
self.window_title = msg
self.log_command('on_reload()')
self.cycle_results(case)
self.on_set_camera_data(camera, show_log=False)
def closeEvent(self, *args):
"""
Handling saving state before application when application is
being closed.
"""
settings = QtCore.QSettings()
settings.clear()
self.settings.save(settings)
q_app = QApplication.instance()
if q_app is None:
sys.exit()
q_app.quit()
|
#Copyright 2010 Brian E. Chapman
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""This is an alternative implementation of the pyConText package where I make
use of graphs to indicate relationships between targets and modifiers. Nodes of
thegraphs are the targets and modifiers identified in the text; edges of the
graphs are relationships between the targets. This provides for much simpler
code than what exists in the other version of pyConText where each object has a
dictionary of __modifies and __modifiedby that must be kept in sync with each
other.
Also it is hoped that the use of a directional graph could ultimately simplify
our itemData structures as we could chain together items"""
import os
version = {}
with open(os.path.join(os.path.dirname(__file__),"version.py")) as f0:
exec(f0.read(), version)
__version__ = version['__version__']
|
# MIT License
#
# Copyright (c) 2020 - Present nxtlo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Standard implementation of Bungie activity and entities."""
from __future__ import annotations
__all__: tuple[str, ...] = (
"Activity",
"PostActivity",
"ActivityValues",
"ExtendedValues",
"ExtendedWeaponValues",
"PostActivityPlayer",
"PostActivityTeam",
"AvailableActivity",
"Rewards",
"Challenges",
"Matchmaking",
"GuidedGame",
"Location",
"CharacterActivity",
)
import typing
import attrs
from aiobungie.internal import enums
from aiobungie.internal import helpers
if typing.TYPE_CHECKING:
import collections.abc as collections
import datetime
from aiobungie import traits
from aiobungie import typedefs
from aiobungie.crate import entity
from aiobungie.crate import user
@typing.final
class Difficulty(int, enums.Enum):
"""An enum for activities difficulties."""
TRIVIAL = 0
EASY = 1
NORMAL = 2
CHALLENGING = 3
HARD = 4
BRAVE = 5
ALMOST_IMPOSSIBLE = 6
IMPOSSIBLE = 7
@attrs.define(kw_only=True)
class Rewards:
"""Represents rewards achieved from activities."""
net: traits.Netrunner = attrs.field(repr=False, hash=False, eq=False)
hash: int
"""Reward's hash."""
instance_id: typing.Optional[int]
"""An optional instance id for this reward. `None` if not found."""
quantity: int
"""Reward's quantity."""
has_conditional_visibility: bool
"""???"""
async def fetch_self(self) -> entity.InventoryEntity:
"""Fetch the definition of this reward.
Returns
-------
`aiobungie.crate.InventoryEntity`
An inventory item entity of the associated hash.
"""
return await self.net.request.fetch_inventory_item(self.hash)
@attrs.define(kw_only=True)
class Challenges:
"""Represents challenges found in activities."""
net: traits.Netrunner = attrs.field(repr=False, hash=False, eq=False)
objective_hash: int
"""The challenge's objective hash."""
dummy_rewards: collections.Sequence[Rewards]
"""A sequence of the challenge rewards as they're represented in the UI."""
async def fetch_objective(self) -> entity.ObjectiveEntity:
"""Fetch the objective of this challenge."""
return await self.net.request.fetch_objective_entity(self.objective_hash)
@attrs.define(kw_only=True)
class Matchmaking:
"""Represents activity's matchmaking information."""
is_matchmaking: bool
"""Whether the activity is matchmaking or not."""
min_party: int
"""The minimum number of how many player can join this activity as a party."""
max_party: int
"""The maximum number of how many player can join this activity as a party."""
max_player: int
"""The maximum number of how many player can join this activity."""
requires_guardian_oath: bool
"""If true, you have to Solemnly Swear to be up to Nothing But Good(tm) to play."""
@attrs.define(kw_only=True)
class GuidedGame:
"""Represents information about a guided game activity."""
max_lobby_size: int
"""The max amount of people that can be in the lobby."""
min_lobby_size: int
"""The minimum amount of people that can be in the lobby."""
disband_count: int
"""If 1, the guided group cannot be disbanded.
Otherwise, take the total number of players in the activity and subtract this number
That is the total number of votes needed for the guided group to disband.
"""
@attrs.define(kw_only=True)
class Location:
"""Represents information about an activity location."""
hash: typing.Union[typedefs.IntAnd[enums.Place], typedefs.IntAnd[enums.Planet]]
"""Location hash."""
activision_source: str
"""A hint that the UI uses to figure out how this location is activated by the player."""
item_hash: typing.Optional[int]
"""The items hash if populated."""
objective_hash: typing.Optional[int]
"""The objective hash if populated."""
activity_hash: typing.Optional[int]
"""The activity hash if populated."""
@attrs.define(kw_only=True)
class CharacterActivity:
"""Represents a character activity profile component."""
date_started: datetime.datetime
"""The start datetime of the activity."""
current_hash: int
"""The current activity hash that the player is now playing."""
current_mode_hash: int
"""The current activity mode hash that the player is now playing."""
current_mode: typing.Optional[typedefs.IntAnd[enums.GameMode]]
"""The current activity mode presented an an enum."""
current_mode_types: typing.Optional[
collections.Sequence[typedefs.IntAnd[enums.GameMode]]
]
"""A sequence of the current activity game-mode types presented as an enum."""
current_mode_hashes: typing.Optional[collections.Sequence[int]]
"""A sequence of the current activity's mode hashes."""
current_playlist_hash: typing.Optional[int]
"""The current activity playlist hash."""
last_story_hash: int
"""The last completed story hash."""
available_activities: collections.Sequence[AvailableActivity]
"""A sequence of the available activities associated with this character."""
@attrs.define(kw_only=True)
class AvailableActivity:
"""Represents an available activity that can be found in character activities profile component."""
hash: int
"""Activity's hash."""
is_new: bool
"""Whether the activity is new or not."""
can_lead: bool
"""Whether the character can lead this activity or not."""
can_join: bool
"""Whether the character can join this activity or not."""
is_completed: bool
"""Whether the character completed this activity before or not."""
is_visible: bool
"""Whether the activity is visible to this character or not."""
display_level: typing.Optional[int]
"""The activity's display level."""
recommended_light: typing.Optional[int]
"""The recommended light power to enter this activity."""
difficulty: typedefs.IntAnd[Difficulty]
"""Activity's difficulty tier."""
@helpers.unimplemented(available_in="0.2.7")
async def fetch_self(self) -> entity.ActivityEntity:
"""Fetch the definition of this activity."""
...
@attrs.define(kw_only=True)
class ActivityValues:
"""Information about values found in an activity.
fields here include kills, deaths, K/D, assists, completion time, etc.
"""
assists: int
"""Activity's assists"""
is_completed: bool
"""Whether the activity was completed or no."""
kills: int
"""Activity's kills."""
deaths: int
"""Activity's deaths."""
opponents_defeated: int
"""The amount of opponents killed in this activity."""
efficiency: float
"""Activity's efficiently."""
kd_ratio: float
"""Activity's kill/death ratio."""
kd_assists: float
"""Activity's Kill/Death/Assists."""
score: int
"""If the activity has a score, This will be available otherwise 0."""
played_time: tuple[int, str]
"""The total time the player was in this activity represented as a tuple of int, str."""
team: typing.Optional[int]
"""???"""
completion_reason: str
"""The reason why the activity was completed. usually its Unknown."""
fireteam_id: int
"""The fireteam id associated with this activity."""
player_count: int
"""Activity's player count."""
start_seconds: tuple[int, str]
"""A tuple of int and str of when did the player start the activity in seconds."""
duration: tuple[int, str]
"""A tuple of int, string of The activity's duration, Example int, string format `1845`, `30m 45s`"""
# activity_id: typing.Optional[int]
# """When a stat represents the best, most, longest, fastest or some other personal best,
# the actual activity ID where that personal best was established is available on this property.
# """
team_score: int
"""???"""
@attrs.define(kw_only=True)
class ExtendedWeaponValues:
"""Information about post activity extended player's weapon values data."""
reference_id: int
"""Weapon's hash or reference id."""
kills: int
"""Weapon's total kills."""
precision_kills: int
"""Weapon's total precision kills."""
assists: typing.Optional[int]
"""Optional weapon assists number."""
assists_damage: typing.Optional[int]
"""Optional weapon assists damage number."""
precision_kills_percentage: tuple[int, str]
"""A tuple of weapon's precision kills percentage as an int and a str.
A string version will be formatted as: `100%`
and the int version will be formatted as: `1`
"""
@attrs.define(kw_only=True)
class ExtendedValues:
"""Information about post activity extended player values data."""
precision_kills: int
"""Player precision kills."""
grenade_kills: int
"""Player grenade kills."""
melee_kills: int
"""Player melee kills."""
super_kills: int
"""Player super kills."""
ability_kills: int
"""Player ability kills."""
weapons: typing.Optional[collections.Collection[ExtendedWeaponValues]]
"""Collection of unique player weapons used in this activity. if no weapons found None will be returned."""
@attrs.define(kw_only=True)
class PostActivityTeam:
"""Represents a post activity team information.
Teams will be available in PvP gamemodes, e.g., Gambit, Crucible, Iron Banner. etc.
"""
id: int
"""Team id."""
name: str
"""Team name."""
is_defeated: bool
"""Whether the team has been defeated or won."""
score: int
"""Team score"""
@attrs.define(kw_only=True)
class PostActivityPlayer:
"""Represents a post activity Destiny 2 player."""
standing: int
"""Sanding of the player."""
destiny_user: user.DestinyMembership
"""An object of the destiny membership bound to this player."""
score: int
"""Score of the player."""
character_id: int
"""The id of the character the player finished this activity with."""
character_class: str
"""A string of the character class the player finished this activity with."""
class_hash: int
"""The hash of the player's character class."""
race_hash: int
"""The hash of the player's character race."""
gender_hash: int
"""The hash of the player's character gender."""
character_level: int
"""The player's character's level."""
light_level: int
"""The light level of the player's character."""
emblem_hash: int
"""The emblem hash of the player's character."""
values: ActivityValues
"""Player's information that occurred in this activity."""
extended_values: ExtendedValues
"""Extended player information occurred in this activity.
This include weapon, super, grenade kills and more.
"""
@attrs.define(kw_only=True)
class PostActivity:
"""Represents a Destiny 2 post activity details."""
net: traits.Netrunner = attrs.field(repr=False, hash=False, eq=False)
"""A network state used for making external requests."""
starting_phase: int
"""If this activity has "phases", this is the phase at which the activity was started."""
hash: int
"""The activity's reference id or hash."""
membership_type: enums.MembershipType
"""The activity player's membership type."""
instance_id: int
"""The activity's instance id."""
mode: enums.GameMode
"""The activity mode or type."""
modes: collections.Sequence[enums.GameMode]
"""A sequence of the activity's gamemodes."""
is_private: bool
"""Whether this activity is private or not."""
occurred_at: datetime.datetime
"""A datetime of when did this activity occurred."""
players: collections.Collection[PostActivityPlayer]
"""Collection of players that were in the activity."""
teams: typing.Optional[collections.Collection[PostActivityTeam]]
"""Collections the teams that were playing against each other.
This field is optional and will be `None` if the activity don't have teams.
"""
@property
def is_flawless(self) -> bool:
"""Whether this activity was a flawless run or not."""
return all(player.values.deaths == 0 for player in self.players)
@property
def is_solo(self) -> bool:
"""Whether this activity was completed solo or not."""
return len(self.players) == 1
@property
def is_solo_flawless(self) -> bool:
"""Whether this activity was completed solo and flawless."""
return self.is_solo & self.is_flawless
@property
def reference_id(self) -> int:
"""An alias to the activity's hash"""
return self.hash
def __int__(self) -> int:
return self.hash
@attrs.define(kw_only=True)
class Activity:
"""Represents a Bungie Activity."""
net: traits.Netrunner = attrs.field(repr=False, hash=False, eq=False)
"""A network state used for making external requests."""
hash: int
"""The activity's reference id or hash."""
membership_type: enums.MembershipType
"""The activity player's membership type."""
instance_id: int
"""The activity's instance id."""
mode: enums.GameMode
"""The activity mode or type."""
modes: collections.Sequence[enums.GameMode]
"""Sequence of the activity's gamemodes."""
is_private: bool
"""Whether this activity is private or not."""
occurred_at: datetime.datetime
"""A datetime of when did this activity occurred."""
values: ActivityValues
"""Information occurred in this activity."""
@property
def is_flawless(self) -> bool:
"""Whether this activity was a flawless run or not."""
return self.values.deaths == 0 and self.values.is_completed is True
@property
def is_solo(self) -> bool:
"""Whether this activity was completed solo or not."""
return self.values.player_count == 1 and self.values.is_completed
@property
def is_solo_flawless(self) -> bool:
"""Whether this activity was completed solo and flawless."""
return self.is_solo & self.is_flawless
@property
def reference_id(self) -> int:
"""An alias to the activity's hash"""
return self.hash
async def fetch_post(self) -> PostActivity:
"""Fetch this activity's data after it was finished.
Returns
-------
`PostActivity`
A post activity object.
"""
return await self.net.request.fetch_post_activity(self.instance_id)
def __int__(self) -> int:
return self.instance_id
|
_SERVICES = {
"http": 80,
}
def get_port(scheme):
return _SERVICES[scheme]
if __name__ == '__main__':
import unittest
class Test(unittest.TestCase):
def test_http(self):
self.assertEqual(get_port("http"), 80)
def test_https(self):
self.assertEqual(get_port("https"), 443)
unittest.main()
|
"""."""
import numpy as np
from ..optimization import PSO, SimulAnneal, GA
class SHBPSO(PSO):
"""."""
C = 299792458
E0 = 0.51099895e6
EMIN = E0 + 90e3
DRIFT = 615e-3
FREQUENCY = 499.658e6
WAVELEN = C/FREQUENCY
NPOINT = 51
BUN_LEN = np.pi
def initialization(self):
"""."""
self._upper_limits = np.array([np.pi, 40e3])
self._lower_limits = np.array([-np.pi, 10e3])
self.beta0 = self.calc_beta(self.EMIN)
self._nswarm = 10 + 2 * int(np.sqrt(len(self._upper_limits)))
def phase_drift(self, phi_c, vg):
"""."""
phi_min = phi_c - self.BUN_LEN / 2
phi_max = phi_c + self.BUN_LEN / 2
phi0 = np.linspace(phi_min, phi_max, self.NPOINT)
E = self.EMIN - vg * np.sin(phi0)
beta = self.calc_beta(E)
dphi = (2 * np.pi / self.WAVELEN) * (1/self.beta0 - 1/beta)
phif = phi0 + self.DRIFT * dphi
return phi0, phif
def calc_beta(self, E):
"""."""
gamma = E/self.E0
return np.sqrt(1 - 1/gamma**2)
def calc_merit_function(self):
"""."""
f_out = np.zeros(self._nswarm)
for i in range(self._nswarm):
phi_c, vg = self._position[i]
phi_init, phi_final = self.phase_drift(phi_c, vg)
t0 = np.std(phi_init)
tf = np.std(phi_final)
f_out[i] = t0/tf
return - f_out
class SHBSimulAnneal(SimulAnneal):
"""."""
C = 299792458
E0 = 0.51099895e6
EMIN = E0 + 90e3
DRIFT = 615e-3
FREQUENCY = 499.658e6
WAVELEN = C/FREQUENCY
NPOINT = 51
BUN_LEN = np.pi
def initialization(self):
"""."""
self._upper_limits = np.array([np.pi, 40e3])
self._lower_limits = np.array([-np.pi, 10e3])
self._max_delta = np.array([2*np.pi, 30e3])
self.beta0 = self.calc_beta(self.EMIN)
self._temperature = 0
def phase_drift(self, phi_c, vg):
"""."""
phi_min = phi_c - self.BUN_LEN / 2
phi_max = phi_c + self.BUN_LEN / 2
phi0 = np.linspace(phi_min, phi_max, self.NPOINT)
E = self.EMIN - vg * np.sin(phi0)
beta = self.calc_beta(E)
dphi = (2 * np.pi / self.WAVELEN) * (1/self.beta0 - 1/beta)
phif = phi0 + self.DRIFT * dphi
return phi0, phif
def calc_beta(self, E):
"""."""
gamma = E/self.E0
return np.sqrt(1 - 1/gamma**2)
def calc_merit_function(self):
"""."""
phi_c, vg = self._position[0], self._position[1]
phi_init, phi_final = self.phase_drift(phi_c, vg)
t0 = np.std(phi_init)
tf = np.std(phi_final)
return - t0/tf
class SHBGA(GA):
"""."""
C = 299792458
E0 = 0.51099895e6
EMIN = E0 + 90e3
DRIFT = 615e-3
FREQUENCY = 499.658e6
WAVELEN = C/FREQUENCY
NPOINT = 51
BUN_LEN = np.pi
def __init__(self, npop, nparents, mutrate):
"""."""
super().__init__(npop=npop, nparents=nparents, mutrate=mutrate)
def initialization(self):
"""."""
self._upper_limits = np.array([np.pi, 40e3])
self._lower_limits = np.array([-np.pi, 10e3])
self.beta0 = self.calc_beta(self.EMIN)
self._temperature = 0
def phase_drift(self, phi_c, vg):
"""."""
phi_min = phi_c - self.BUN_LEN / 2
phi_max = phi_c + self.BUN_LEN / 2
phi0 = np.linspace(phi_min, phi_max, self.NPOINT)
E = self.EMIN - vg * np.sin(phi0)
beta = self.calc_beta(E)
dphi = (2 * np.pi / self.WAVELEN) * (1/self.beta0 - 1/beta)
phif = phi0 + self.DRIFT * dphi
return phi0, phif
def calc_beta(self, E):
"""."""
gamma = E/self.E0
return np.sqrt(1 - 1/gamma**2)
def calc_merit_function(self):
"""."""
f_out = np.zeros(self._npop)
for i in range(self._npop):
phi_c, vg = self._indiv[i, 0], self._indiv[i, 1]
phi_init, phi_final = self.phase_drift(phi_c, vg)
t0 = np.std(phi_init)
tf = np.std(phi_final)
f_out[i] = t0/tf
return - f_out
|
import os
import shutil
import json
import pytest
from archive import Archive
def test_archive_rar():
"""
Test extractall for rar arvhives
"""
path = os.getcwd()
file_path = 'tests/assets/rar.rar'
destination_path = ('tests/assets/extracted')
if not os.path.exists(destination_path):
os.makedirs(destination_path)
rarfile = Archive(file_path)
rarfile.extractall(destination_path)
assert len(os.listdir(destination_path)) != 0
shutil.rmtree(destination_path)
def test_archive_zip():
"""
Test extractall for rar arvhives
"""
path = os.getcwd()
file_path = 'tests/assets/zip.zip'
destination_path = ('tests/assets/extracted')
if not os.path.exists(destination_path):
os.makedirs(destination_path)
zipfile = Archive(file_path)
zipfile.extractall(destination_path)
assert len(os.listdir(destination_path)) != 0
shutil.rmtree(destination_path)
def test_archive_tar():
"""
Test extractall for rar arvhives
"""
path = os.getcwd()
file_path = 'tests/assets/tar.tar.xz'
destination_path = ('tests/assets/extracted')
if not os.path.exists(destination_path):
os.makedirs(destination_path)
tarfile = Archive(file_path)
tarfile.extractall(destination_path)
assert len(os.listdir(destination_path)) != 0
shutil.rmtree(destination_path)
|
from django.core.management.base import BaseCommand
class BaseMonitorCommand(BaseCommand):
help = 'Base events, resource, and errors monitor.'
def add_arguments(self, parser):
parser.add_argument('--sleep_interval',
type=int,
default=1)
parser.add_argument('--persist',
default=False,
help='Persist collected events.', )
|
#!/usr/bin/env python
#
# Code for dumping the bitcoin Berkeley db files in a human-readable format
#
from bsddb3.db import *
import logging
import sys
from address import dump_addresses
from wallet import dump_wallet, dump_accounts
from blkindex import dump_blkindex_summary
from transaction import dump_transaction
from block import dump_block, dump_block_n, search_blocks, check_block_chain
from util import determine_db_dir, create_env
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--datadir", dest="datadir", default=None,
help="Look for files here (defaults to bitcoin default)")
parser.add_option("--wallet", action="store_true", dest="dump_wallet", default=False,
help="Print out contents of the wallet.dat file")
parser.add_option("--wallet-tx", action="store_true", dest="dump_wallet_tx", default=False,
help="Print transactions in the wallet.dat file")
parser.add_option("--wallet-tx-filter", action="store", dest="wallet_tx_filter", default="",
help="Only print transactions that match given string/regular expression")
parser.add_option("--accounts", action="store_true", dest="dump_accounts", default="",
help="Print out account names, one per line")
parser.add_option("--blkindex", action="store_true", dest="dump_blkindex", default=False,
help="Print out summary of blkindex.dat file")
parser.add_option("--check-block-chain", action="store_true", dest="check_chain", default=False,
help="Scan back and forward through the block chain, looking for inconsistencies")
parser.add_option("--address", action="store_true", dest="dump_addr", default=False,
help="Print addresses in the addr.dat file")
parser.add_option("--transaction", action="store", dest="dump_transaction", default=None,
help="Dump a single transaction, given hex transaction id (or abbreviated id)")
parser.add_option("--block", action="store", dest="dump_block", default=None,
help="Dump a single block, given its hex hash (or abbreviated hex hash) OR block height")
parser.add_option("--search-blocks", action="store", dest="search_blocks", default=None,
help="Search the block chain for blocks containing given regex pattern")
(options, args) = parser.parse_args()
if options.datadir is None:
db_dir = determine_db_dir()
else:
db_dir = options.datadir
try:
db_env = create_env(db_dir)
except DBNoSuchFileError:
logging.error("Couldn't open " + db_dir)
sys.exit(1)
dump_tx = options.dump_wallet_tx
if len(options.wallet_tx_filter) > 0:
dump_tx = True
if options.dump_wallet or dump_tx:
dump_wallet(db_env, options.dump_wallet, dump_tx, options.wallet_tx_filter)
if options.dump_accounts:
dump_accounts(db_env)
if options.dump_addr:
dump_addresses(db_env)
if options.check_chain:
check_block_chain(db_env)
if options.dump_blkindex:
dump_blkindex_summary(db_env)
if options.dump_transaction is not None:
dump_transaction(db_dir, db_env, options.dump_transaction)
if options.dump_block is not None:
if len(options.dump_block) < 7: # Probably an integer...
try:
dump_block_n(db_dir, db_env, int(options.dump_block))
except ValueError:
dump_block(db_dir, db_env, options.dump_block)
else:
dump_block(db_dir, db_env, options.dump_block)
if options.search_blocks is not None:
search_blocks(db_dir, db_env, options.search_blocks)
db_env.close()
if __name__ == '__main__':
main()
|
# Stdlib imports
import os
import json
# Django imports
from django.conf import settings
from django.core.files import File
# Pip imports
from web3 import Web3
# App imports
from .contract import Contract
class VotingManagerContract(Contract):
def __init__(self, client, abi=None, address=None):
"""
:param client: (EthClient)
:param abi: contract abi
:param address: contract address
"""
if not abi:
abi = self.load_default_abi()
if not address:
address = settings.VOTING_MANAGER_CONTRACT_ADDRESS
super().__init__(client, abi, address)
@classmethod
def voting_details_log_parser(cls, log):
args = log.get('args')
return {
"proposal_id": args["proposalId"],
"is_voting_open": args["isVotingOpen"],
"block_number": log["blockNumber"]
}
@classmethod
def votes_log_parser(cls, log):
args = log.get('args')
return {
"proposal_id": args["proposalId"],
"voter": Web3.toChecksumAddress(args["voter"]),
"selected_option": args["selectedOption"],
"block_number": log["blockNumber"]
}
def load_voting_details_logs(self, from_block):
logs = super().fetch_events('VotingDetails', from_block)
return map(lambda l: self.voting_details_log_parser(l), logs)
def load_votes_logs(self, from_block):
logs = super().fetch_events('Vote', from_block)
return map(lambda l: self.votes_log_parser(l), logs)
def load_default_abi(self):
artifacts_path = os.path.join(settings.STATIC_ROOT, 'contracts/VotingManager.json')
artifacts = json.load(open(artifacts_path, 'rb'))
return artifacts.get('abi') |
from unittest import TestCase
import pytest
from todo_sample.entities.todo import Todo
from todo_sample.use_cases.exceptions import TodoInvalidIdFormatException, TodoNotFoundException
from todo_sample.use_cases.get_todo import GetTodo
MODULE = "todo_sample.use_cases.get_todo"
class TestGetTodo(TestCase):
@pytest.fixture(autouse=True)
def _fake_todo_fixture(self, mocker):
self.fake_todo = Todo(title="title", description="description")
@pytest.fixture(autouse=True)
def _fake_todo_repo_fixture(self, mocker):
self.fake_todo_repository = mocker.Mock()
self.get_todo_uc = GetTodo(todo_repo=self.fake_todo_repository)
def test_get_todo(self):
self.fake_todo_repository.find_by_id.return_value = self.fake_todo
todo = self.get_todo_uc.call(id=str(self.fake_todo.id))
self.fake_todo_repository.find_by_id.assert_called_once_with(id=self.fake_todo.id)
assert self.fake_todo == todo
def test_get_todo_wrong_id_format(self):
fake_id = "some-wrong-format-id"
try:
self.get_todo_uc.call(id=fake_id)
except TodoInvalidIdFormatException as exc:
assert fake_id in str(exc)
def test_get_todo_not_found(self):
self.fake_todo_repository.find_by_id.return_value = None
try:
self.get_todo_uc.call(id=str(self.fake_todo.id))
except TodoNotFoundException as exc:
assert exc.id == self.fake_todo.id
self.fake_todo_repository.find_by_id.assert_called_once_with(id=self.fake_todo.id)
|
import discord
from discord.ext import commands
from discord.ext.commands import has_permissions
import module
class Filters(commands.Cog):
def __init__(self, client):
self.client = client
self.bad_words = []
@commands.Cog.listener()
async def on_message(self,msg):
with open("src/bad-words.txt", "r") as f:
self.bad_words = f.read().splitlines()
for word in self.bad_words:
if word.lower() in msg.content.lower():
module.user(str(msg.author.id)).addInfraction()
await msg.delete()
await msg.channel.send(f"{msg.author.mention}, please don't use inappropriate language")
#await self.client.process_commands(msg)
@commands.command()
@has_permissions(administrator=True)
async def blacklist(self, ctx, word : str):
with open("src/bad-words.txt", "a+") as f:
f.write(word + "\n")
@commands.command()
@has_permissions(administrator=True)
async def whitelist(self, ctx, word : str):
words = []
with open("src/bad-words.txt", "r") as f:
words = f.read().splitlines()
if word.lower() in words:
words.pop(words.index(word))
with open("src/bad-words.txt", "w") as f:
f.write("\n".join(words))
def setup(client):
client.add_cog(Filters(client)) |
from udp_server import UdpServer
def main():
server = UdpServer(5000)
server.startWork()
if __name__ == '__main__':
main()
|
from libs.config import alias, gget, gset, color
@alias(True, func_alias="sw")
def run():
"""
switch
(for input Non-ascii) Switch input between raw input and better input.
"""
switch = not gget("raw_input", default=False)
print(
f"\nRaw input: {color.green('On') if switch else color.red('Off')}\n")
gset("raw_input", switch, True)
|
import connexion
import six
from openapi_server import query_manager
from openapi_server.utils.vars import SOFTWARECONFIGURATION_TYPE_NAME, SOFTWARECONFIGURATION_TYPE_URI
from openapi_server.models.software_configuration import SoftwareConfiguration # noqa: E501
from openapi_server import util
def softwareconfigurations_get(username=None, label=None, page=None, per_page=None): # noqa: E501
"""List all instances of SoftwareConfiguration
Gets a list of all instances of SoftwareConfiguration (more information in https://w3id.org/okn/o/sd#SoftwareConfiguration) # noqa: E501
:param username: Name of the user graph to query
:type username: str
:param label: Filter by label
:type label: str
:param page: Page number
:type page: int
:param per_page: Items per page
:type per_page: int
:rtype: List[SoftwareConfiguration]
"""
return query_manager.get_resource(
username=username,
label=label,
page=page,
per_page=per_page,
rdf_type_uri=SOFTWARECONFIGURATION_TYPE_URI,
rdf_type_name=SOFTWARECONFIGURATION_TYPE_NAME,
kls=SoftwareConfiguration)
def softwareconfigurations_id_delete(id, user=None): # noqa: E501
"""Delete an existing SoftwareConfiguration
Delete an existing SoftwareConfiguration (more information in https://w3id.org/okn/o/sd#SoftwareConfiguration) # noqa: E501
:param id: The ID of the SoftwareConfiguration to be retrieved
:type id: str
:param user: Username
:type user: str
:rtype: None
"""
return query_manager.delete_resource(id=id,
user=user,
rdf_type_uri=SOFTWARECONFIGURATION_TYPE_URI,
rdf_type_name=SOFTWARECONFIGURATION_TYPE_NAME,
kls=SoftwareConfiguration)
def softwareconfigurations_id_get(id, username=None): # noqa: E501
"""Get a single SoftwareConfiguration by its id
Gets the details of a given SoftwareConfiguration (more information in https://w3id.org/okn/o/sd#SoftwareConfiguration) # noqa: E501
:param id: The ID of the SoftwareConfiguration to be retrieved
:type id: str
:param username: Name of the user graph to query
:type username: str
:rtype: SoftwareConfiguration
"""
return query_manager.get_resource(id=id,
username=username,
rdf_type_uri=SOFTWARECONFIGURATION_TYPE_URI,
rdf_type_name=SOFTWARECONFIGURATION_TYPE_NAME,
kls=SoftwareConfiguration)
def softwareconfigurations_id_put(id, user=None, software_configuration=None): # noqa: E501
"""Update an existing SoftwareConfiguration
Updates an existing SoftwareConfiguration (more information in https://w3id.org/okn/o/sd#SoftwareConfiguration) # noqa: E501
:param id: The ID of the SoftwareConfiguration to be retrieved
:type id: str
:param user: Username
:type user: str
:param software_configuration: An old SoftwareConfigurationto be updated
:type software_configuration: dict | bytes
:rtype: SoftwareConfiguration
"""
if connexion.request.is_json:
software_configuration = SoftwareConfiguration.from_dict(connexion.request.get_json()) # noqa: E501
return query_manager.put_resource(id=id,
user=user,
body=software_configuration,
rdf_type_uri=SOFTWARECONFIGURATION_TYPE_URI,
rdf_type_name=SOFTWARECONFIGURATION_TYPE_NAME,
kls=SoftwareConfiguration)
def softwareconfigurations_post(user=None, software_configuration=None): # noqa: E501
"""Create one SoftwareConfiguration
Create a new instance of SoftwareConfiguration (more information in https://w3id.org/okn/o/sd#SoftwareConfiguration) # noqa: E501
:param user: Username
:type user: str
:param software_configuration: Information about the SoftwareConfigurationto be created
:type software_configuration: dict | bytes
:rtype: SoftwareConfiguration
"""
if connexion.request.is_json:
software_configuration = SoftwareConfiguration.from_dict(connexion.request.get_json()) # noqa: E501
return query_manager.post_resource(
user=user,
body=software_configuration,
rdf_type_uri=SOFTWARECONFIGURATION_TYPE_URI,
rdf_type_name=SOFTWARECONFIGURATION_TYPE_NAME,
kls=SoftwareConfiguration)
|
import sqlite3
import os
FILE_PATH = os.path.abspath(os.path.join(os.getcwd(), "../.."))
DATA_PATH = os.path.join(FILE_PATH,'config')
async def connectDB(userid,uid = None,mys = None):
conn = sqlite3.connect('ID_DATA.db')
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS UIDDATA
(USERID INT PRIMARY KEY NOT NULL,
UID TEXT,
MYSID TEXT);''')
c.execute("INSERT OR IGNORE INTO UIDDATA (USERID,UID,MYSID) \
VALUES (?, ?,?)",(userid,uid,mys))
if uid:
c.execute("UPDATE UIDDATA SET UID = ? WHERE USERID=?",(uid,userid))
if mys:
c.execute("UPDATE UIDDATA SET MYSID = ? WHERE USERID=?",(mys,userid))
conn.commit()
conn.close()
async def selectDB(userid):
conn = sqlite3.connect('ID_DATA.db')
c = conn.cursor()
cursor = c.execute("SELECT * FROM UIDDATA WHERE USERID = ?",(userid,))
for row in cursor:
if row[0]:
if row[2]:
return [row[2],3]
elif row[1]:
return [row[1],2]
else:
return None
else:
return None |
import numpy as np
from dataset._shared import Dataset
def count(items, T=8, classes=5):
# Create initial value
X = np.random.uniform(0, classes, size=(items, 1))
# Create targe by incrementing
inc = np.tile(np.arange(0, T), (items, 1))
t = np.mod(X + inc, classes)
t = np.floor(t)
# add <EOS>
t = t + 1
t = np.hstack([t, np.zeros((items, 1))])
return Dataset(
(X / classes).astype('float32'),
t.astype('int32'),
classes + 1
)
def floor(items, classes=9):
# Create initial value
X = np.random.uniform(1, classes + 1, size=(items, 1))
# Create targe by incrementing
t = np.zeros((items, 2))
t[:, 0] = np.floor(X[:, 0])
return Dataset(
X.astype('float32'),
t.astype('int32'),
classes + 1
)
def memorize(items, classes=9):
# Create initial value
X = np.random.randint(1, classes + 1, size=(items, 1))
# Create targe by incrementing
t = np.zeros((items, 10))
for i, x_i in enumerate(X[:, 0]):
t[i, 0:x_i] = np.arange(1, x_i + 1)
return Dataset(
X.astype('float32'),
t.astype('int32'),
classes + 1
)
def copy(items, T=9, classes=10):
X = np.random.randint(1, classes, size=(items, T - 1)).astype('int32')
t = np.zeros((items, T), dtype='int32')
t[:, 0:(T - 1)] = X
return Dataset(
(X / (classes - 1)).astype('float32'), t, classes
)
|
class Solution:
def toLowerCase(self, str: str) -> str:
string = ""
for idx in range(len(str)):
if str[idx].isupper():
string += str[idx].lower()
else:
string += str[idx]
return string
class Solution:
def toLowerCase(self, str: str) -> str:
return "".join(chr(ord(c) + 32) if 65 <= ord(c) <= 90 else c for c in str)
class Solution:
def toLowerCase(self, str):
string = ""
for char in str:
if ord(char) >= 65 and ord(char) <= 90:
string += chr(ord(char)+32)
else:
string += char
return string
|
import os
import yaml
from env_alias.utils import logger
from env_alias.exceptions.EnvAliasException import EnvAliasException
class EnvAliasConfig:
debug = None
config = None
config_root = None
def __init__(self, config_root, debug=False):
self.debug = debug
self.config_root = config_root
def load_config(self, configuration_file, return_config=False):
if configuration_file is None or not os.path.isfile(configuration_file):
raise EnvAliasException('Unable to locate configuration file', configuration_file)
logger.debug('Loading config: {}'.format(configuration_file))
self.config = self.__load_config(configuration_file)
if return_config:
return self.config
return
def __load_config(self, config_filename):
loaded_config = {}
with open(config_filename, 'r') as f:
try:
loaded_config = yaml.safe_load(f.read())
except yaml.YAMLError as e:
raise EnvAliasException(e)
def replace_env_values(input):
if input is None:
return input
elif type(input) in (int, bool):
return input
elif type(input) is str:
if input.lower()[0:4] == 'env:':
env_name = input.replace('env:', '')
logger.debug('Config element set via env value {}'.format(env_name))
value = os.getenv(env_name, None)
if value is None or len(value) < 1:
raise EnvAliasException('Config requested env value not set', env_name)
return value
return input
elif type(input) is list:
r = []
for item in input:
r.append(replace_env_values(item))
return r
elif type(input) is dict:
r = {}
for item_k, item_v in input.items():
r[item_k] = replace_env_values(item_v)
return r
else:
raise EnvAliasException('Unsupported type in replace_env_values()', input)
loaded_config = replace_env_values(loaded_config)
if type(loaded_config) is not dict or self.config_root not in loaded_config.keys():
raise EnvAliasException('Unable to locate config root', self.config_root)
logger.debug('Config successfully loaded: {}'.format(config_filename))
return loaded_config[self.config_root]
|
# Copyright (c) Microsoft Corporation
#
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# compat imports
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from builtins import ( # noqa
bytes, dict, int, list, object, range, str, ascii, chr, hex, input,
next, oct, open, pow, round, super, filter, map, zip)
# stdlib imports
import json
import logging
import zlib
# non-stdlib imports
import azure.common.credentials
import azure.keyvault
# local imports
from . import settings
from . import util
# create logger
logger = logging.getLogger(__name__)
util.setup_logger(logger)
# global defines
_SECRET_ENCODED_FORMAT_KEY = 'format'
_SECRET_ENCODED_FORMAT_VALUE = 'zlib+base64'
def _explode_secret_id(uri):
# type: (str) -> Tuple[str, str, str]
"""Explode Secret Id URI into parts
:param str uri: secret id uri
:rtype: tuple
:return: base url, secret name, version
"""
tmp = uri.split('/')
base_url = '/'.join(tmp[:3])
nparam = len(tmp[4:])
if nparam == 1:
return base_url, tmp[4], ''
elif nparam == 2:
return base_url, tmp[4], tmp[5]
else:
raise ValueError(
'cannot handle keyvault secret id uri: {}'.format(uri))
def fetch_credentials_json(
client, keyvault_uri, keyvault_credentials_secret_id):
# type: (azure.keyvault.KeyVaultClient, str, str) -> dict
"""Fetch credentials json from KeyVault
:param azure.keyvault.KeyVaultClient client: keyvault client
:param str keyvault_uri: keyvault uri
:param str keyvault_credentials_secret_id: secret id for creds json
:rtype: dict
:return: credentials dict
"""
if client is None:
raise RuntimeError(
'KeyVault client not initialized, please ensure proper AAD '
'credentials and KeyVault parameters have been provided')
logger.debug('fetching credentials json from keyvault')
if util.is_none_or_empty(keyvault_credentials_secret_id):
raise RuntimeError(
'cannot fetch credentials json from keyvault without a valid '
'keyvault credentials secret id')
cred = client.get_secret(
*_explode_secret_id(keyvault_credentials_secret_id))
if util.is_none_or_empty(cred.value):
raise ValueError(
'credential json from secret id {} is invalid'.format(
keyvault_credentials_secret_id))
# check for encoding and decode/decompress if necessary
if cred.tags is not None:
try:
if (cred.tags[_SECRET_ENCODED_FORMAT_KEY] ==
_SECRET_ENCODED_FORMAT_VALUE):
cred.value = util.decode_string(
zlib.decompress(util.base64_decode_string(cred.value)))
else:
raise RuntimeError(
'{} encoding format is invalid'.format(
cred.tags[_SECRET_ENCODED_FORMAT_KEY]))
except KeyError:
pass
return json.loads(cred.value)
def store_credentials_json(client, config, keyvault_uri, secret_name):
# type: (azure.keyvault.KeyVaultClient, dict, str, str) -> None
"""Store credentials json in KeyVault
:param azure.keyvault.KeyVaultClient client: keyvault client
:param dict config: configuration dict
:param str keyvault_uri: keyvault uri
:param str secret_name: secret name for creds json
"""
if client is None:
raise RuntimeError(
'KeyVault client not initialized, please ensure proper AAD '
'credentials and KeyVault parameters have been provided')
creds = {
'credentials': settings.raw_credentials(config, True)
}
creds = json.dumps(creds).encode('utf8')
# first zlib compress and encode as base64
encoded = util.base64_encode_string(zlib.compress(creds))
# store secret
logger.debug('storing secret in keyvault {} with name {}'.format(
keyvault_uri, secret_name))
bundle = client.set_secret(
keyvault_uri, secret_name, encoded,
tags={_SECRET_ENCODED_FORMAT_KEY: _SECRET_ENCODED_FORMAT_VALUE}
)
logger.info('keyvault secret id for name {}: {}'.format(
secret_name,
azure.keyvault.key_vault_id.parse_secret_id(bundle.id).base_id))
def delete_secret(client, keyvault_uri, secret_name):
# type: (azure.keyvault.KeyVaultClient, str, str) -> None
"""Delete secret from KeyVault
:param azure.keyvault.KeyVaultClient client: keyvault client
:param str keyvault_uri: keyvault uri
:param str secret_name: secret name for creds json
"""
if client is None:
raise RuntimeError(
'KeyVault client not initialized, please ensure proper AAD '
'credentials and KeyVault parameters have been provided')
logger.info('deleting secret in keyvault {} with name {}'.format(
keyvault_uri, secret_name))
client.delete_secret(keyvault_uri, secret_name)
def list_secrets(client, keyvault_uri):
# type: (azure.keyvault.KeyVaultClient, str) -> None
"""List all secret ids and metadata from KeyVault
:param azure.keyvault.KeyVaultClient client: keyvault client
:param str keyvault_uri: keyvault uri
"""
if client is None:
raise RuntimeError(
'KeyVault client not initialized, please ensure proper AAD '
'credentials and KeyVault parameters have been provided')
logger.debug('listing secret ids in keyvault {}'.format(keyvault_uri))
secrets = client.get_secrets(keyvault_uri)
for secret in secrets:
logger.info('id={} enabled={} tags={} content_type={}'.format(
secret.id, secret.attributes.enabled, secret.tags,
secret.content_type))
def get_secret(client, secret_id, value_is_json=False):
# type: (azure.keyvault.KeyVaultClient, str, bool) -> str
"""Get secret from KeyVault
:param azure.keyvault.KeyVaultClient client: keyvault client
:param str secret_id: secret id to retrieve
:param bool value_is_json: expected value is json
:rtype: str
:return: secret value
"""
if client is None:
raise RuntimeError(
'cannot retrieve secret {} with invalid KeyVault client'.format(
secret_id))
value = client.get_secret(*_explode_secret_id(secret_id)).value
if value_is_json and util.is_not_empty(value):
return json.loads(value)
else:
return value
def parse_secret_ids(client, config):
# type: (azure.keyvault.KeyVaultClient, dict) -> None
"""Parse secret ids in credentials, fetch values from KeyVault, and add
appropriate values to config
:param azure.keyvault.KeyVaultClient client: keyvault client
:param dict config: configuration dict
"""
# batch account key
secid = settings.credentials_batch_account_key_secret_id(config)
if secid is not None:
logger.debug('fetching batch account key from keyvault')
bakey = get_secret(client, secid)
if util.is_none_or_empty(bakey):
raise ValueError(
'batch account key retrieved for secret id {} is '
'invalid'.format(secid))
settings.set_credentials_batch_account_key(config, bakey)
# storage account keys
for ssel in settings.iterate_storage_credentials(config):
secid = settings.credentials_storage_account_key_secret_id(
config, ssel)
if secid is None:
continue
logger.debug(
'fetching storage account key for link {} from keyvault'.format(
ssel))
sakey = get_secret(client, secid)
if util.is_none_or_empty(sakey):
raise ValueError(
'storage account key retrieved for secret id {} is '
'invalid'.format(secid))
settings.set_credentials_storage_account_key(config, ssel, sakey)
# docker registry passwords
for reg in settings.iterate_docker_registry_servers(config):
secid = settings.credentials_docker_registry_password_secret_id(
config, reg)
if secid is None:
continue
logger.debug(
('fetching docker registry password for registry {} '
'from keyvault').format(reg))
password = get_secret(client, secid)
if util.is_none_or_empty(password):
raise ValueError(
'docker registry password retrieved for secret id {} is '
'invalid'.format(secid))
settings.set_credentials_docker_registry_password(
config, reg, password)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017~2999 - cologler <skyoflw@gmail.com>
# ----------
#
# ----------
import typing
from .descriptors import Descriptor
class ServicesMap:
def __init__(self, services: typing.List[Descriptor]):
self._type_map: typing.Dict[type, typing.List[Descriptor]] = {}
for service in services:
ls = self._type_map.get(service.service_type)
if ls is None:
ls = []
self._type_map[service.service_type] = ls
ls.append(service)
def get(self, service_type: type) -> Descriptor:
'''return None is not found.'''
ls = self._type_map.get(service_type)
if ls:
return ls[-1] # has one item at least.
def getall(self, service_type: type) -> typing.List[Descriptor]:
'''return None is not found.'''
return self._type_map.get(service_type)
|
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import numpy as np
import matplotlib.pyplot as plt
from nn.module import Linear
from nn.loss import MSELoss
from tools.basic import *
a=50
b=10
x= np.random.uniform(-5,5,50).reshape((-1,1))
y = a * x + b +( np.random.uniform(-100,100,50).reshape((-1,1)) )
n = x.shape[1]
d = 1
iteration = 100
gradient_step = 1e-4
loss_mse = MSELoss()
lin_layer = Linear(n, d,type=1)
nb_itr = 10000
loss =[]
for _ in range(nb_itr) :
hidden_layer = lin_layer.forward(x)
l = loss_mse.forward(y,hidden_layer).mean()
loss.append(l)
loss_back = loss_mse.backward(y,hidden_layer)
delta_linear = lin_layer.backward_delta(x,loss_back)
lin_layer.backward_update_gradient(x, loss_back)
lin_layer.update_parameters(gradient_step=gradient_step)
lin_layer.zero_grad()
pred = lin_layer.forward(x)
plt.figure()
plt.scatter(x,y,label="data",color='black')
plt.plot(x,pred,color='red',label='predection')
plt.figure()
plt.scatter(x,y,label="data",color='black')
plt.plot(x,pred,color='red',label='predection')
for i in range(len(x)):
plt.plot([x[i],x[i]],[y[i], pred[i]], c="blue", linewidth=1)
plt.legend()
plt.xlabel("datax")
plt.ylabel("datay")
plt.title("prediction ligne for ax+b")
plt.show()
plt.figure()
plt.plot(np.arange(nb_itr),loss)
plt.title("loss on each iteration for ax+b")
plt.xlabel("iteration")
plt.ylabel("loss")
plt.show()
plt.show()
batchsize = 1000
datax, datay = gen_arti(centerx=1, centery=1, sigma=0.4, nbex=batchsize, data_type=0, epsilon=0.1)
testx, testy = gen_arti(centerx=1, centery=1, sigma=0.4, nbex=batchsize, data_type=0, epsilon=0.1)
datay = np.where(datay==-1,0,1).reshape((-1,1))
testy = np.where(testy==-1,0,1).reshape((-1,1))
n = datax.shape[1]
d = 1
type=2
iteration = 150000
gradient_step = 1e-5
loss_mse = MSELoss()
lin_layer = Linear(n, d,type=type)
for _ in range(iteration):
#forward
hidden_l = lin_layer.forward(datax)
#backward
loss_back = loss_mse.backward(datay, hidden_l)
delta_linear = lin_layer.backward_delta(datax, loss_back)
lin_layer.backward_update_gradient(datax, loss_back)
lin_layer.update_parameters(gradient_step=gradient_step)
lin_layer.zero_grad()
def predict(x):
hidden_l = lin_layer.forward(x)
return np.where(hidden_l >= 0.5,1, 0)
acc = np.where(testy == predict(testx),1,0).mean()
print("accuracy : ",acc)
plt.figure()
plot_frontiere(testx, predict, step=100)
plot_data(testx, testy.reshape(-1))
plt.title("accuracy = "+str(acc))
plt.show() |
import pytest
from dagster import seven
@pytest.mark.skipif(seven.IS_WINDOWS, reason="docker doesn't work on windows tests")
def test_build_container(dagster_docker_image):
assert "buildkite-test-image-core" in dagster_docker_image
@pytest.mark.skipif(seven.IS_WINDOWS, reason="docker doesn't work on windows tests")
def test_run_grpc_server_in_container(grpc_port, grpc_host):
assert grpc_host
assert grpc_port == 8090
|
from django.http import HttpResponse
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from .models import Book, Review
from .utils import average_rating
def book_list(request):
books = Book.objects.all()
book_list = []
for book in books:
reviews = book.review_set.all()
if reviews:
book_rating = average_rating([review.rating for review in reviews])
number_of_reviews = len(reviews)
else:
book_rating = None
number_of_reviews = 0
book_list.append(
{"book": book, "book_rating": book_rating, "number_of_reviews": number_of_reviews}
)
context = {"book_list": book_list}
return render(request, "reviews/books_list.html", context)
def book_details(request, pkid):
book = get_object_or_404(Book, pk=pkid)
reviews = book.review_set.all()
book.avg = average_rating([review.rating for review in reviews])
return render(request, "reviews/book_details.html", {"book": book, "reviews": reviews})
def fake_db_query_with_many(*args):
return []
def index(request):
name = request.GET.get("name") or "world"
return render(request, "base.html", {"name": name})
def index_bad_or(request):
# Breaks front end with empty string. Just becomes hello
name = request.GET.get("name", "world")
# the commented out one is the correct one
# name = request.GET.get("name") or "world"
return HttpResponse(f"Hello {name}")
def search(request):
search = request.GET.get("search") or None
book_list = fake_db_query_with_many(search)
return render(request, "book-search.html", {"search": search, "book_list": book_list})
def welcome_view(request):
return render(request, "base.html")
|
import requests
import pandas as pd
import deepdish as dd
import os
import pickle
import warnings
from .analyze import analyze
from ..datageometry import DataGeometry
BASE_URL = 'https://docs.google.com/uc?export=download'
homedir = os.path.expanduser('~/')
datadir = os.path.join(homedir, 'hypertools_data')
datadict = {
'weights' : '1-zzaUMHuXHSzFcGT4vNlqcV8tMY4q7jS',
'weights_avg' : '1v_IrU6n72nTOHwD3AnT2LKgtKfHINyXt',
'weights_sample' : '1CiVSP-8sjdQN_cdn3uCrBH5lNOkvgJp1',
'spiral' : '1JB4RIgNfzGaTFWRBCzi8CQ2syTE-BnWg',
'mushrooms' : '1wRXObmwLjSHPAUWC8QvUl37iY2qRObg8',
'wiki' : '1e5lCi17bLbOXuRjiGO2eqkEWVpeCuRvM',
'sotus' : '1D2dsrLAXkC3eUUaw2VV_mldzxX5ufmkm',
'nips' : '1Vva4Xcc5kUX78R0BKkLtdCWQx9GI-FG2',
'wiki_model' : '1OrN1F39GkMPjrB2bOTgNRT1pNBmsCQsN',
'nips_model' : '1orgxWJdWYzBlU3EF2u7EDsZrp3jTNNLG',
'sotus_model' : '1g2F18WLxfFosIqhiLs79G0MpiG72mWQr'
}
def load(dataset, reduce=None, ndims=None, align=None, normalize=None):
"""
Load a .geo file or example data
Parameters
----------
dataset : string
The name of the example dataset. Can be a `.geo` file, or one of a
number of example datasets listed below.
`weights` is list of 2 numpy arrays, each containing average brain
activity (fMRI) from 18 subjects listening to the same story, fit using
Hierarchical Topographic Factor Analysis (HTFA) with 100 nodes. The rows
are fMRI measurements and the columns are parameters of the model.
`weights_sample` is a sample of 3 subjects from that dataset.
`weights_avg` is the dataset split in half and averaged into two groups.
`spiral` is numpy array containing data for a 3D spiral, used to
highlight the `procrustes` function.
`mushrooms` is a numpy array comprised of features (columns) of a
collection of 8,124 mushroomm samples (rows).
`sotus` is a collection of State of the Union speeches from 1989-2018.
`wiki` is a collection of wikipedia pages used to fit wiki_model.
`wiki_model` is a sklearn Pipeline (CountVectorizer->LatentDirichletAllocation)
trained on a sample of wikipedia articles. It can be used to transform
text to topic vectors.
normalize : str or False or None
If set to 'across', the columns of the input data will be z-scored
across lists (default). That is, the z-scores will be computed with
with respect to column n across all arrays passed in the list. If set
to 'within', the columns will be z-scored within each list that is
passed. If set to 'row', each row of the input data will be z-scored.
If set to False, the input data will be returned with no z-scoring.
reduce : str or dict
Decomposition/manifold learning model to use. Models supported: PCA,
IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,
FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,
TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be
passed as a string, but for finer control of the model parameters, pass
as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}.
See scikit-learn specific model docs for details on parameters supported
for each model.
ndims : int
Number of dimensions to reduce
align : str or dict
If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be
hyperalignment. If 'SRM', alignment algorithm will be shared response
model. You can also pass a dictionary for finer control, where the 'model'
key is a string that specifies the model and the params key is a dictionary
of parameter values (default : 'hyper').
Returns
----------
data : Numpy Array
Example data
"""
if dataset[-4:] == '.geo':
geo = dd.io.load(dataset)
if 'dtype' in geo:
if 'list' in geo['dtype']:
geo['data'] = list(geo['data'])
elif 'df' in geo['dtype']:
geo['data'] = pd.DataFrame(geo['data'])
geo['xform_data'] = list(geo['xform_data'])
data = DataGeometry(**geo)
elif dataset in datadict.keys():
data = _load_data(dataset, datadict[dataset])
else:
raise RuntimeError('No data loaded. Please specify a .geo file or '
'one of the following sample files: weights, '
'weights_avg, weights_sample, spiral, mushrooms, '
'wiki, nips or sotus.')
if data is not None:
if dataset in ('wiki_model', 'nips_model', 'sotus_model'):
return data
if isinstance(data, DataGeometry):
if any([reduce, ndims, align, normalize]):
from ..plot.plot import plot
if ndims:
if reduce is None:
reduce='IncrementalPCA'
d = analyze(data.get_data(), reduce=reduce, ndims=ndims, align=align, normalize=normalize)
return plot(d, show=False)
else:
return data
else:
return analyze(data, reduce=reduce, ndims=ndims, align=align, normalize=normalize)
def _load_data(dataset, fileid):
fullpath = os.path.join(homedir, 'hypertools_data', dataset)
if not os.path.exists(datadir):
os.makedirs(datadir)
if not os.path.exists(fullpath):
try:
_download(dataset, _load_stream(fileid))
data = _load_from_disk(dataset)
except:
raise ValueError('Download failed.')
else:
try:
data = _load_from_disk(dataset)
except:
try:
_download(dataset, _load_stream(fileid))
data = _load_from_disk(dataset)
except:
raise ValueError('Download failed. Try deleting cache data in'
' /Users/homedir/hypertools_data.')
return data
def _load_stream(fileid):
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
url = BASE_URL + fileid
session = requests.Session()
response = session.get(BASE_URL, params = { 'id' : fileid }, stream = True)
token = _get_confirm_token(response)
if token:
params = { 'id' : fileid, 'confirm' : token }
response = session.get(BASE_URL, params = params, stream = True)
return response
def _download(dataset, data):
fullpath = os.path.join(homedir, 'hypertools_data', dataset)
with open(fullpath, 'wb') as f:
f.write(data.content)
def _load_from_disk(dataset):
fullpath = os.path.join(homedir, 'hypertools_data', dataset)
if dataset in ('wiki_model', 'nips_model', 'sotus_model',):
try:
with open(fullpath, 'rb') as f:
return pickle.load(f)
except ValueError as e:
print(e)
else:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
geo = dd.io.load(fullpath)
if 'dtype' in geo:
if 'list' in geo['dtype']:
geo['data'] = list(geo['data'])
elif 'df' in geo['dtype']:
geo['data'] = pd.DataFrame(geo['data'])
geo['xform_data'] = list(geo['xform_data'])
return DataGeometry(**geo)
|
# Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Aggregate simple PRM contribution from the project level to the PRM zone level
for each period.
"""
from __future__ import print_function
from builtins import next
from builtins import str
import csv
import os.path
from pyomo.environ import Expression, value
from db.common_functions import spin_on_database_lock
from gridpath.auxiliary.db_interface import setup_results_import
from gridpath.auxiliary.dynamic_components import prm_balance_provision_components
def add_model_components(m, d, scenario_directory, subproblem, stage):
"""
:param m:
:param d:
:return:
"""
def total_prm_provision_rule(mod, z, p):
"""
:param mod:
:param z:
:param p:
:return:
"""
return sum(
mod.PRM_Simple_Contribution_MW[g, p]
for g in mod.PRM_PROJECTS_BY_PRM_ZONE[z]
if (g, p) in mod.PRM_PRJ_OPR_PRDS
)
m.Total_PRM_Simple_Contribution_MW = Expression(
m.PRM_ZONE_PERIODS_WITH_REQUIREMENT, rule=total_prm_provision_rule
)
# Add to emission imports to carbon balance
getattr(d, prm_balance_provision_components).append(
"Total_PRM_Simple_Contribution_MW"
)
def export_results(scenario_directory, subproblem, stage, m, d):
"""
:param scenario_directory:
:param subproblem:
:param stage:
:param m:
:param d:
:return:
"""
with open(
os.path.join(
scenario_directory,
str(subproblem),
str(stage),
"results",
"prm_elcc_simple.csv",
),
"w",
newline="",
) as results_file:
writer = csv.writer(results_file)
writer.writerow(["prm_zone", "period", "elcc_mw"])
for (z, p) in m.PRM_ZONE_PERIODS_WITH_REQUIREMENT:
writer.writerow([z, p, value(m.Total_PRM_Simple_Contribution_MW[z, p])])
def import_results_into_database(
scenario_id, subproblem, stage, c, db, results_directory, quiet
):
"""
:param scenario_id:
:param c:
:param db:
:param results_directory:
:param quiet:
:return:
"""
if not quiet:
print("system prm simple elcc")
# Delete prior results and create temporary import table for ordering
setup_results_import(
conn=db,
cursor=c,
table="results_system_prm",
scenario_id=scenario_id,
subproblem=subproblem,
stage=stage,
)
# Load results into the temporary table
results = []
with open(
os.path.join(results_directory, "prm_elcc_simple.csv"), "r"
) as emissions_file:
reader = csv.reader(emissions_file)
next(reader) # skip header
for row in reader:
prm_zone = row[0]
period = row[1]
elcc = row[2]
results.append((scenario_id, prm_zone, period, subproblem, stage, elcc))
insert_temp_sql = """
INSERT INTO
temp_results_system_prm{}
(scenario_id, prm_zone, period, subproblem_id, stage_id,
elcc_simple_mw)
VALUES (?, ?, ?, ?, ?, ?);""".format(
scenario_id
)
spin_on_database_lock(conn=db, cursor=c, sql=insert_temp_sql, data=results)
# Insert sorted results into permanent results table
insert_sql = """
INSERT INTO results_system_prm
(scenario_id, prm_zone, period, subproblem_id, stage_id, elcc_simple_mw)
SELECT scenario_id, prm_zone, period, subproblem_id, stage_id, elcc_simple_mw
FROM temp_results_system_prm{}
ORDER BY scenario_id, prm_zone, period, subproblem_id, stage_id;
""".format(
scenario_id
)
spin_on_database_lock(conn=db, cursor=c, sql=insert_sql, data=(), many=False)
|
import pandas as pd
import copy
from . import api
from datetime import datetime
from typing import Optional, List, Dict
class SearchAdsReporter:
api: api.SearchAdsAPI
verbose: bool = False
_date_format = '%Y-%m-%d'
def __init__(self, api):
self.api = api
def get_searchterms_report(self, start_date: datetime, end_date: datetime, columns: Optional[List[str]] = None) -> pd.DataFrame:
campaigns_report = self.get_campaigns_report(start_date=start_date, end_date=end_date, columns=columns)
df = pd.DataFrame()
if campaigns_report.empty:
return df
for campaign_id in campaigns_report.campaignId.unique():
body = self.report_request_body(
start_date=start_date,
end_date=end_date,
body_overrides={
'returnRecordsWithNoMetrics': False,
'selector': {
'orderBy': [{
'field': 'keywordId',
'sortOrder': 'DESCENDING'
}],
'pagination': {
'limit': 5000,
}
}
}
)
response = self.api.post(endpoint=f'reports/campaigns/{campaign_id}/searchterms', data=body, verbose=self.verbose)
report = self._convert_response_to_data_frame(response=response, columns=columns)
report['campaignId'] = campaign_id
report['campaignName'] = campaigns_report.loc[campaigns_report.campaignId == campaign_id].campaignName.unique()[0]
report['campaignStatus'] = campaigns_report.loc[campaigns_report.campaignId == campaign_id].campaignStatus.unique()[0]
report['adamId'] = campaigns_report.loc[campaigns_report.campaignId == campaign_id].adamId.unique()[0]
report['appName'] = campaigns_report.loc[campaigns_report.campaignId == campaign_id].appName.unique()[0]
df = df.append(report, sort=False)
df.reset_index(drop=True, inplace=True)
return df
def get_keywords_report(self, start_date: datetime, end_date: datetime, columns: Optional[List[str]] = None) -> pd.DataFrame:
campaigns_report = self.get_campaigns_report(start_date=start_date, end_date=end_date, columns=columns)
df = pd.DataFrame()
if campaigns_report.empty:
return df
for campaign_id in campaigns_report.campaignId.unique():
body = self.report_request_body(start_date, end_date)
response = self.api.post(endpoint=f'reports/campaigns/{campaign_id}/keywords', data=body, verbose=self.verbose)
report = self._convert_response_to_data_frame(response=response, columns=columns)
report['campaignId'] = campaign_id
report['campaignName'] = campaigns_report.loc[campaigns_report.campaignId == campaign_id].campaignName.unique()[0]
report['campaignStatus'] = campaigns_report.loc[campaigns_report.campaignId == campaign_id].campaignStatus.unique()[0]
report['adamId'] = campaigns_report.loc[campaigns_report.campaignId == campaign_id].adamId.unique()[0]
report['appName'] = campaigns_report.loc[campaigns_report.campaignId == campaign_id].appName.unique()[0]
df = df.append(report, sort=False)
df.reset_index(drop=True, inplace=True)
return df
def get_creative_sets_report(self, start_date: datetime, end_date: datetime, columns: Optional[List[str]] = None) -> pd.DataFrame:
campaigns_report = self.get_campaigns_report(start_date=start_date, end_date=end_date, columns=columns)
df = pd.DataFrame()
if campaigns_report.empty:
return df
for campaign_id in campaigns_report.campaignId.unique():
body = self.report_request_body(start_date, end_date)
response = self.api.post(endpoint=f'reports/campaigns/{campaign_id}/creativesets', data=body, verbose=self.verbose)
report = self._convert_response_to_data_frame(response=response, columns=columns)
report['campaignId'] = campaign_id
report['campaignName'] = campaigns_report.loc[campaigns_report.campaignId == campaign_id].campaignName.unique()[0]
report['campaignStatus'] = campaigns_report.loc[campaigns_report.campaignId == campaign_id].campaignStatus.unique()[0]
report['adamId'] = campaigns_report.loc[campaigns_report.campaignId == campaign_id].adamId.unique()[0]
report['appName'] = campaigns_report.loc[campaigns_report.campaignId == campaign_id].appName.unique()[0]
df = df.append(report, sort=False)
df.reset_index(drop=True, inplace=True)
return df
def get_adgroups_report(self, start_date: datetime, end_date: datetime, columns: Optional[List[str]] = None) -> pd.DataFrame:
campaigns_report = self.get_campaigns_report(start_date=start_date, end_date=end_date, columns=columns)
df = pd.DataFrame()
if campaigns_report.empty:
return df
for campaign_id in campaigns_report.campaignId.unique():
body = self.report_request_body(start_date, end_date)
response = self.api.post(endpoint=f'reports/campaigns/{campaign_id}/adgroups', data=body, verbose=self.verbose)
adgroup_report = self._convert_response_to_data_frame(response=response, columns=columns)
adgroup_report['campaignId'] = campaign_id
adgroup_report['campaignName'] = campaigns_report.loc[campaigns_report.campaignId == campaign_id].campaignName.unique()[0]
adgroup_report['campaignStatus'] = campaigns_report.loc[campaigns_report.campaignId == campaign_id].campaignStatus.unique()[0]
adgroup_report['adamId'] = campaigns_report.loc[campaigns_report.campaignId == campaign_id].adamId.unique()[0]
adgroup_report['appName'] = campaigns_report.loc[campaigns_report.campaignId == campaign_id].appName.unique()[0]
df = df.append(adgroup_report, sort=False)
df.reset_index(drop=True, inplace=True)
return df
def get_campaigns_report(self, start_date: datetime, end_date: datetime, columns: Optional[List[str]] = None) -> pd.DataFrame:
body = self.report_request_body(start_date, end_date)
response = self.api.post(endpoint='reports/campaigns', data=body, verbose=self.verbose)
return self._convert_response_to_data_frame(response=response, columns=columns)
def report_request_body(self, start_date: datetime, end_date: datetime, body_overrides: Dict[str, any]={}) -> Dict[str, any]:
body = {
'startTime': start_date.strftime(self._date_format),
'endTime': end_date.strftime(self._date_format),
'timeZone': 'UTC',
'granularity': 'DAILY',
'returnRowTotals': False,
'returnRecordsWithNoMetrics': True,
'selector': {
'orderBy': [{
'field': 'modificationTime',
'sortOrder': 'DESCENDING'
}],
'pagination': {
'limit': 5000,
}
}
}
body.update(body_overrides)
return body
def _convert_response_to_data_frame(self, response: Dict[str, any], columns: List[str]) -> pd.DataFrame:
pagination = response['pagination']
if pagination is not None:
total_results = pagination['totalResults']
items_per_page = pagination['itemsPerPage']
if total_results > items_per_page:
error_string = f'totalResults ({total_results}) is greater than itemsPerPage ({items_per_page})'
raise ValueError('Apple Search Ads reporting paginiation is not supported', error_string)
try:
reporting_data = response['data']['reportingDataResponse']['row']
except:
raise ValueError('Unexpected response', response) # ['data']['error']['errors'])
output = []
for row in reporting_data:
base = {}
if 'metadata' in row:
base.update(row['metadata'])
if 'app' in base:
base['adamId'] = base['app']['adamId']
base['appName'] = base['app']['appName']
del base['app']
if 'servingStateReasons' in base and type(base['servingStateReasons']) is list:
base['servingStateReasons'] = ','.join(base['servingStateReasons'])
if columns is not None:
base = {key: base[key] for key in base if key in columns}
for granularity in row['granularity']:
final_row = copy.copy(base)
final_row.update(granularity)
if columns is not None:
final_row = {key: final_row[key] for key in final_row if key in columns}
final_row = _convert_to_float_all_amounts_in_row(final_row)
if columns is not None and 'original_currency' not in columns:
del final_row['original_currency']
output.append(final_row)
df = pd.DataFrame(output)
if columns is not None:
df = df[columns]
return df
def _amount_to_float(amount):
return float(amount['amount'])
def _convert_to_float_all_amounts_in_row(row):
_row = copy.copy(row)
currencies = {}
for field_name, value in _row.items():
if isinstance(value, dict) and 'currency' in value:
if value['currency'] not in currencies.values():
currencies[field_name] = value['currency']
_row[field_name] = _amount_to_float(value)
if len(currencies) > 1:
raise ValueError('Report row includes different currencies', currencies)
else:
_row['original_currency'] = list(currencies.values())[0] if currencies else None
return _row |
__VERSION__ = '0.4.1'
|
{% extends 'estimation.py' %}
{% block estimation_method %}
model = smf.logit(formula="{{ formula }}", data=df).fit()
{% endblock %}
|
if __name__ == '__main__':
import keras as ks
import numpy as np
from agents.deep_sarsa import DeepSarsa
from environments.flappybird import FlappyBird
from q_network_sarsa_lambda import QNetworkSL
neural_network = ks.models.Sequential()
neural_network.add(ks.layers.Dense(150, activation='relu', input_shape=(8,)))
neural_network.add(ks.layers.Dense(50, activation='relu'))
neural_network.add(ks.layers.Dense(2, activation='linear'))
neural_network.compile(optimizer=ks.optimizers.Adam(lr=0.001),
loss='mse')
width, height = size = (288, 512)
env = FlappyBird(size)
actions = env.valid_actions()
def normalize_state(s):
o = np.zeros(shape=(1, 8))
o[0, 0] = s.state['player_y'] / height
o[0, 1] = s.state['player_vel']
o[0, 2] = s.state['next_pipe_dist_to_player'] / width
o[0, 3] = s.state['next_pipe_top_y'] / (height / 2)
o[0, 4] = s.state['next_pipe_bottom_y'] / (height / 2)
o[0, 5] = s.state['next_next_pipe_dist_to_player'] / width
o[0, 6] = s.state['next_next_pipe_top_y'] / (height / 2)
o[0, 7] = s.state['next_next_pipe_bottom_y'] / (height / 2)
return o
dqn = QNetworkSL(neural_network, actions, normalize_state,
lambd=0.9,
gamma=0.9,
reward_factor=0.1,
fixed_length=100,
lambda_min=1e-4
)
dql = DeepSarsa(env, dqn,
epsilon=0.05,
epsilon_step_factor=0.99995,
epsilon_min=0.05,
replay_memory_size=1000
)
q = dql.learn()
|
import numpy as np
import cv2
import Person
import time
#Contadores de entrada y salida
cnt_up = 0
cnt_down = 0
#Fuente de video
#cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture('peopleCounter.avi')
#Propiedades del video
##cap.set(3, 160) #Width
##cap.set(4, 120) #Height
# Imprime las propiedades de captura a consola
for i in range(19):
print(i, cap.get(i))
w = cap.get(3)
h = cap.get(4)
frameArea = h*w
#areaTH = frameArea/250 # Límite de área para la cual detectará una persona
areaTH = 1500
print('Area Threshold', areaTH)
#Lineas de entrada/salida
line_up = int(2*(h/5))
line_down = int(3*(h/5))
up_limit = int(1*(h/5))
down_limit = int(4*(h/5))
print("Red line y:", str(line_down))
print("Blue line y:", str(line_up))
line_down_color = (255, 0, 0)
line_up_color = (0, 0, 255)
pt1 = [0, line_down];
pt2 = [w, line_down];
pts_L1 = np.array([pt1, pt2], np.int32) # Límite inferior de decisión
pts_L1 = pts_L1.reshape((-1, 1, 2))
pt3 = [0, line_up];
pt4 = [w, line_up];
pts_L2 = np.array([pt3, pt4], np.int32) # Límite superior de decisión
pts_L2 = pts_L2.reshape((-1, 1, 2))
pt5 = [0, up_limit];
pt6 = [w, up_limit];
pts_L3 = np.array([pt5, pt6], np.int32)
pts_L3 = pts_L3.reshape((-1, 1, 2))
pt7 = [0, down_limit];
pt8 = [w, down_limit];
pts_L4 = np.array([pt7,pt8], np.int32)
pts_L4 = pts_L4.reshape((-1, 1, 2))
# Sustractor de fondo
fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows = True)
# Elementos estructurales para filtros morfológicos
kernelOp = np.ones((3, 3), np.uint8)
kernelOp2 = np.ones((5, 5), np.uint8)
kernelCl = np.ones((11, 11), np.uint8)
# Variables
font = cv2.FONT_HERSHEY_SIMPLEX
persons = []
max_p_age = 5
pid = 1
while(cap.isOpened()):
##for image in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
#Lee una imagen de la fuente de video
ret, frame = cap.read()
## frame = image.array
for i in persons:
i.age_one() #age every person one frame
#########################
# PRE-PROCESAMIENTO #
#########################
# Aplica sustracción de fondo
fgmask = fgbg.apply(frame)
fgmask2 = fgbg.apply(frame)
try:
# Binariazción para eliminar sombras (color gris)
ret, imBin= cv2.threshold(fgmask, 200, 255, cv2.THRESH_BINARY)
ret, imBin2 = cv2.threshold(fgmask2, 200, 255, cv2.THRESH_BINARY)
# Opening (erode->dilate) para quitar ruido
mask = cv2.morphologyEx(imBin, cv2.MORPH_OPEN, kernelOp)
mask2 = cv2.morphologyEx(imBin2, cv2.MORPH_OPEN, kernelOp)
# Closing (dilate -> erode) para juntar regiones blancas
mask = cv2.morphologyEx(mask , cv2.MORPH_CLOSE, kernelCl)
mask2 = cv2.morphologyEx(mask2, cv2.MORPH_CLOSE, kernelCl)
except:
print('EOF')
print('UP:', cnt_up)
print('DOWN:', cnt_down)
break
#################
# CONTORNOS #
#################
# RETR_EXTERNAL returns only extreme outer flags. All child contours are left behind.
_, contours0, hierarchy = cv2.findContours(mask2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours0:
area = cv2.contourArea(cnt)
if area > areaTH:
#################
# TRACKING #
#################
#Falta agregar condiciones para multipersonas, salidas y entradas de pantalla.
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
x, y, w, h = cv2.boundingRect(cnt)
new = True
if cy in range(up_limit, down_limit):
for i in persons:
if abs(cx-i.getX()) <= w and abs(cy-i.getY()) <= h:
# El objeto esta cerca de uno que ya se detecto antes: es el mismo
new = False
i.updateCoords(cx, cy) # Actualiza coordenadas en el objeto and resets age
if i.going_UP(line_down, line_up) == True:
cnt_up += 1;
print("ID:", i.getId(), 'crossed going up at', time.strftime("%c"))
elif i.going_DOWN(line_down, line_up) == True:
cnt_down += 1;
print("ID:", i.getId(), 'crossed going down at', time.strftime("%c"))
break
if i.getState() == '1':
if i.getDir() == 'down' and i.getY() > down_limit:
i.setDone()
elif i.getDir() == 'up' and i.getY() < up_limit:
i.setDone()
if i.timedOut():
# Sacar i de la lista persons
index = persons.index(i)
persons.pop(index)
del i #liberar la memoria de i
if new == True:
p = Person.MyPerson(pid, cx, cy, max_p_age)
persons.append(p)
pid += 1
#################
# DIBUJOS #
#################
cv2.circle(frame, (cx, cy), 5, (0, 0, 255), -1)
img = cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
#cv2.drawContours(frame, cnt, -1, (0,255,0), 3)
#END for cnt in contours0
#########################
# DIBUJAR TRAYECTORIAS #
#########################
for i in persons:
## if len(i.getTracks()) >= 2:
## pts = np.array(i.getTracks(), np.int32)
## pts = pts.reshape((-1,1,2))
## frame = cv2.polylines(frame,[pts],False,i.getRGB())
## if i.getId() == 9:
## print str(i.getX()), ',', str(i.getY())
cv2.putText(frame, str(i.getId()), (i.getX(), i.getY()), font, 0.3, i.getRGB(), 1, cv2.LINE_AA)
#################
# IMAGENES #
#################
str_up = 'UP: ' + str(cnt_up)
str_down = 'DOWN: ' + str(cnt_down)
frame = cv2.polylines(frame, [pts_L1], False, line_down_color, thickness=2)
frame = cv2.polylines(frame, [pts_L2], False, line_up_color, thickness=2)
frame = cv2.polylines(frame, [pts_L3], False, (255,255,255), thickness=1)
frame = cv2.polylines(frame, [pts_L4], False, (255,255,255), thickness=1)
cv2.putText(frame, str_up, (10, 40), font, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
cv2.putText(frame, str_up, (10, 40), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
cv2.putText(frame, str_down, (10, 90), font, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
cv2.putText(frame, str_down, (10, 90), font, 0.5, (255, 0, 0), 1, cv2.LINE_AA)
cv2.imshow('Frame', frame)
#cv2.imshow('Mask',mask)
#preisonar ESC para salir
k = cv2.waitKey(30) & 0xff
if k == 27:
break
#END while(cap.isOpened())
#################
# LIMPIEZA #
#################
cap.release()
cv2.destroyAllWindows() |
import warnings
import locale
import re
import calendar
from contextlib import contextmanager, suppress
from datetime import datetime, timedelta
# from pipeline.util import Dimension
import urllib.parse
CIRCA = 5 # years
CIRCA_D = timedelta(days=365*CIRCA)
share_re = re.compile("([0-9]+)/([0-9]+)")
_COUNTRY_NAMES = {
'Algeria': 'Algeria',
'Argentina': 'Argentina',
'Armenia': 'Armenia',
'Australia': 'Australia',
'Austria': 'Austria',
'Oesterreich': 'Austria',
'Österreich': 'Austria',
'Belgium': 'Belgium',
'België': 'Belgium',
'Belgique': 'Belgium',
'Brazil': 'Brazil',
'Brasil': 'Brazil',
'Canada': 'Canada',
'Czech Republic': 'Czech Republic',
'Ceska Republika': 'Czech Republic',
'Ceská Republika': 'Czech Republic',
'Céska republika': 'Czech Republic',
'Céska Republika': 'Czech Republic',
'Cuba': 'Cuba',
'Denmark': 'Denmark',
'Danmark': 'Denmark',
'Germany': 'Germany',
'Deutschalnd': 'Germany',
'Deutschland': 'Germany',
'Duetschland': 'Germany',
'Ireland': 'Ireland',
'Eire': 'Ireland',
'England': 'England',
'Spain': 'Spain',
'España': 'Spain',
'Espana': 'Spain',
'Espagne': 'Spain',
'France': 'France',
'Great Britain': 'Great Britain',
'Hungary': 'Hungary',
'Magyarorszag': 'Hungary',
'Magyarország': 'Hungary',
'India': 'India',
'Israel': 'Israel',
'Italy': 'Italy',
'Italia': 'Italy',
'Japan': 'Japan',
'Latvija': 'Latvia',
'Liechtenstein': 'Liechtenstein',
'Luxembourg': 'Luxembourg',
'México': 'Mexico',
'Mexico': 'Mexico',
'Netherlands': 'Netherlands',
'Nederland': 'Netherlands',
'New Zealand': 'New Zealand',
'Norway': 'Norway',
'Norge': 'Norway',
'Poland': 'Poland',
'Polska': 'Poland',
'Portugal': 'Portugal',
'Romania': 'Romania',
'Russia': 'Russia',
'Rossiya': 'Russia',
'Switzerland': 'Switzerland',
'Schweiz': 'Switzerland',
'Suisse': 'Switzerland',
'Scotland': 'Scotland',
'Slovakia': 'Slovakia',
'South Africa': 'South Africa',
'Finland': 'Finland',
'Suomen': 'Finland',
'Sweden': 'Sweden',
'Sverige': 'Sweden',
'United Kingdom': 'United Kingdom',
'UK': 'United Kingdom',
'Ukraine': 'Ukraine',
'Ukraïna': 'Ukraine',
'United States of America': 'USA',
'USA': 'USA',
'US': 'USA',
'Wales': 'Wales',
}
# These are the current countries found in the PIR data
_COUNTRIES = set(_COUNTRY_NAMES.keys())
_US_STATES = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'D.C.': 'District of Columbia', # Some of the data uses syntax with dots for DC
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming',
}
def _parse_us_location(parts, *, uri_base):
try:
city_name, state_name, country_name = parts
except ValueError:
return None
state_type = None
city_type = None
state_uri = None
city_uri = None
if state_name in _US_STATES or state_name in _US_STATES.values():
state_name = _US_STATES.get(state_name, state_name)
state_uri = f'{uri_base}PLACE,COUNTRY-' + urllib.parse.quote(country_name) + ',' + urllib.parse.quote(state_name)
city_uri = state_uri + ',' + urllib.parse.quote(city_name)
state_type = 'State'
city_type = 'City'
else:
# Not a recognized state, so fall back to just a general place
state_type = 'Place'
city_type = 'Place'
country = {
'type': 'Country',
'name': country_name,
'uri': f'{uri_base}PLACE,COUNTRY-' + urllib.parse.quote(country_name),
}
state = {
'type': state_type,
'name': state_name,
'uri': state_uri,
'part_of': country
}
city = {
'type': city_type,
'name': city_name,
'uri': city_uri,
'part_of': state,
}
for current in (city, state, country):
for p in ('part_of', 'uri'):
if p in current and not current[p]:
del current[p]
return city
def _parse_uk_location(parts, *, uri_base):
country_name = 'United Kingdom'
if len(parts) == 3 and parts[-2] == 'England':
place_name = parts[0]
return {
# The first component of the triple isn't always a city in UK data
# (e.g. "Burton Constable, England, UK" or "Castle Howard, England, UK")
# so do not assert a type for this level of the place hierarchy.
'name': place_name,
'part_of': {
'type': 'Country',
'name': country_name,
'uri': f'{uri_base}PLACE,COUNTRY-' + urllib.parse.quote(country_name),
}
}
return None
_COUNTRY_HANDLERS = {
'USA': _parse_us_location,
'United Kingdom': _parse_uk_location,
}
def parse_location_name(value, uri_base=None):
'''
Parses a string like 'Los Angeles, CA, USA' or 'Genève, Schweiz'
and returns a structure that can be passed to `pipeline.linkedart.make_la_place`, or
`None` if the string cannot be parsed.
'''
parts = value.split(', ')
return parse_location(*parts, uri_base=uri_base)
def parse_location(*parts, uri_base=None, types=None):
'''
Takes a list of hierarchical place names, and returns a structure that can be passed
to `pipeline.linkedart.make_la_place`.
If the iterable `types` is given, it supplies the type names of the associated names
(e.g. `('City', 'Country')`). Otherwise, heuristics are used to guide the parsing.
'''
value = ', '.join(parts)
if uri_base is None:
uri_base = 'tag:getty.edu,2019:digital:REPLACE-WITH-UUID:pipeline#'
if types:
current = None
uri_parts = []
for t, name in zip(reversed(types), reversed(parts)):
uri = None
if t.upper() in ('COUNTRY', 'STATE', 'PROVINCE'):
uri_parts.append(t.upper())
uri_parts.append(urllib.parse.quote(name))
uri = f'{uri_base}PLACE,' + '-'.join(uri_parts)
current = {
'type': t,
'name': name,
'uri': uri,
'part_of': current
}
for p in ('part_of', 'uri'):
if not current[p]:
del current[p]
return current
current = None
country_name = re.sub(r'[.].*$', '', parts[-1])
country_type = None
if country_name in _COUNTRIES:
country_type = 'Country'
country_name = _COUNTRY_NAMES.get(country_name, country_name)
else:
warnings.warn(f'*** Expecting country name, but found unexpected value: {country_name!r}')
# not a recognized place name format; assert a generic Place with the associated value as a name
return {'name': value}
# if country_name in _COUNTRY_HANDLERS:
# _parts = list(parts[:-1]) + [country_name]
# loc = _COUNTRY_HANDLERS[country_name](_parts, uri_base=uri_base)
# if loc:
# return loc
current = {
'type': country_type,
'name': country_name,
'uri': f'{uri_base}PLACE,COUNTRY-' + urllib.parse.quote(country_name),
}
if len(parts) == 2:
city_name = parts[0]
current = {
'type': 'City',
'name': city_name,
'part_of': current
}
else:
for v in reversed(parts[:-1]):
current = {
'type': 'Place',
'name': v,
'part_of': current
}
return current
def share_parse(value):
if value is None:
return None
else:
m = share_re.match(value)
if m:
(t,b) = m.groups()
return float(t) / float(b)
else:
print("Could not parse raw share: %s" % value)
return None
def ymd_to_datetime(year, month, day, which="begin"):
if not isinstance(year, int):
try:
year = int(year)
except:
# print("DATE CLEAN: year is %r; returning None" % year)
return None
if not isinstance(month, int):
try:
month = int(month)
except:
# print("DATE CLEAN: month is %r; continuing with %s" % (month, "earliest" if which=="begin" else "latest"))
month = None
if not isinstance(day, int):
try:
day = int(day)
except:
day = None
if not month or month > 12 or month < 1:
if which == "begin":
month = 1
else:
month = 12
maxday = calendar.monthrange(year, month)[1]
if not day or day > maxday or day < 1:
if which == "begin":
day = 1
else:
# number of days in month
day = maxday
ystr = "%04d" % abs(year)
if year < 0:
ystr = "-" + ystr
if which == "begin":
return "%s-%02d-%02dT00:00:00" % (ystr, month, day)
else:
return "%s-%02d-%02dT23:59:59" % (ystr, month, day)
def date_parse(value, delim):
# parse a / or - or . date or range
bits = value.split(delim)
if len(bits) == 2:
# YYYY/ range
b1 = bits[0].strip()
b2 = bits[1].strip()
if len(b2) < 3 :
b2 = "%s%s" % (b1[:len(b1)-len(b2)], b2)
elif len(b2) > 4:
print("Bad range: %s" % value)
return None
try:
return [datetime(int(b1),1,1), datetime(int(b2)+1,1,1)]
except:
print("Broken delim: %s" % value)
return None
elif len(bits) == 3:
# YYYY/MM/DD or YY/YY/YYYY or DD.MM.YYYY or YYYY.MM.DD
m = int(bits[1])
if len(bits[0]) == 4:
y = int(bits[0])
d = int(bits[2])
else:
y = int(bits[2])
d = int(bits[0])
if m == 0:
m = 1
if d == 0:
d = 1
if m > 12:
# swap them
d, m = m, d
try:
yearmonthday = datetime(y,m,d)
return [yearmonthday, yearmonthday+timedelta(days=1)]
except:
print("Bad // value: %s" % value)
else:
print("broken / date: %s" % value)
return None
def date_cleaner(value):
# FORMATS:
# CCth
# YYYY[?]
# YYYY/MM/DD
# DD/MM/YYYY
# ca. YYYY
# aft[er|.] YYYY
# bef[ore|.] YYYY
# YYYY.MM.DD
# YYYY/(Y|YY|YYYY)
# YYYY-YY
# YYY0s
# YYYY-
# YYYY Mon
# YYYY Month DD
if value:
value = value.replace("?",'')
value = value.replace('est', '')
value = value.replace("()", '')
value = value.replace(' or ', '/')
value = value.strip()
value = value.replace('by ', 'bef.')
value = value.replace('c.', 'ca.')
value = value.replace('CA.', 'ca.')
value = value.replace('af.', 'aft.')
if not value:
return None
elif value.startswith("|"):
# Broken? null it out
return None
elif len(value) == 4 and value.isdigit():
# year only
return [datetime(int(value),1,1), datetime(int(value)+1,1,1)]
elif value.startswith('v.'):
value = value[2:].strip()
return None
elif value.endswith('s'):
# 1950s
if len(value) == 5 and value[:4].isdigit():
y = int(value[:4])
return [datetime(y,1,1), datetime(y+10,1,1)]
else:
warnings.warn("Bad YYYYs date: %s" % value)
return None
elif len(value) == 5 and value[:4].isdigit() and value.endswith('-'):
y = int(value[:4])
return [datetime(y,1,1), None]
elif value.startswith("ca"):
# circa x
value = value[3:].strip()
if len(value) == 4 and value.isdigit():
y = int(value)
return [datetime(y-CIRCA,1,1), datetime(y+CIRCA,1,1)]
else:
# Try and parse it
if value.find('/') > -1:
val = date_parse(value, '/')
elif value.find('-') > -1:
val = date_parse(value, '-')
else:
val = None
if not val:
warnings.warn("bad circa: %s" % value)
return None
val[0] -= CIRCA_D
val[1] += CIRCA_D
return val
elif value.startswith('aft'):
# after x
value = value.replace('aft.', '')
value = value.replace('after ', '')
value = value.strip()
try:
y = int(value)
except:
warnings.warn("Bad aft value: %s" % value)
return None
return [datetime(y,1,1), datetime(y+CIRCA+1,1,1)] # GRI guideline says that 'after 1900' really means (1900 or later)
elif value.startswith('bef'):
value = value.replace('bef.', '')
value = value.replace('before ', '')
value = value.strip()
y = int(value)
return [datetime(y-CIRCA,1,1), datetime(y+1,1,1)] # GRI guideline says that 'before 1900' really means (up to and including 1900)
elif len(value) <= 4 and (value.endswith('st') or value.endswith('nd') or value.endswith('rd') or value.endswith('th')):
century = value[:len(value)-2]
try:
c = int(century)
except:
warnings.warn("Bad century value: %s" % century)
print(f'{value!r}')
return None
year = (c-1) * 100
start, end = year, year + 100
if start == 0:
start = 1
return [datetime(start,1,1), datetime(end,1,1)]
elif value.find('/') > -1:
# year/year or year/month/date
# 1885/90
# 07/02/1897
return date_parse(value, '/')
elif value.find('.') > -1:
return date_parse(value, '.')
elif value.find('-') > -1:
return date_parse(value, '-')
elif value.find(';') > -1:
return date_parse(value, ';')
else:
with c_locale(), suppress(ValueError):
yearmonthday = datetime.strptime(value, '%Y %B %d')
if yearmonthday:
return [yearmonthday, yearmonthday+timedelta(days=1)]
with c_locale(), suppress(ValueError):
yearmonth = datetime.strptime(value, '%Y %b')
if yearmonth:
year = yearmonth.year
month = yearmonth.month
maxday = calendar.monthrange(year, month)[1]
d = datetime(year, month, 1)
r = [d, d+timedelta(days=maxday)]
return r
warnings.warn(f'fell through to: {value!r}')
return None
@contextmanager
def c_locale():
l = locale.getlocale()
locale.setlocale(locale.LC_ALL, 'C')
try:
yield
finally:
locale.setlocale(locale.LC_ALL, l)
def test_date_cleaner():
import sqlite3
c = sqlite3.connect('/Users/rsanderson/Development/getty/provenance/matt/gpi.sqlite')
res = c.execute("SELECT DISTINCT person_birth_date from gpi_people")
x = 0
for d in res:
date_cleaner(d[0])
x += 1
res = c.execute("SELECT DISTINCT person_death_date from gpi_people")
for d in res:
date_cleaner(d[0])
x += 1
print("Tried %s dates" % x)
def test_share_parser():
import sqlite3
c = sqlite3.connect('/Users/rsanderson/Development/getty/pipeline/data/raw_gpi.sqlite')
res = c.execute("SELECT DISTINCT joint_own_sh_1 FROM raw_knoedler")
x = 0
for s in res:
x += 1
# print(share_parse(s[0]))
res = c.execute("SELECT DISTINCT joint_own_sh_2 FROM raw_knoedler")
for s in res:
x += 1
# print(share_parse(s[0]))
print("Tried %s shares" % x)
if __name__ == "__main__":
# test_date_cleaner()
test_share_parser()
|
import operator
import copy
from django.shortcuts import render, redirect
from django.views.generic.base import TemplateView
from .forms import SearchForm
from django.contrib.auth import get_user_model
from users.models import Speaker
from django.contrib.auth.decorators import login_required, user_passes_test
from proposals.models import Proposal
from users.models import Speaker
from proposals.forms import BulkSubmit
User = get_user_model()
# Create your views here.
def HomePageView(request):
return render(request, "home/home.html")
def check_for_organizer(user):
return user.is_organizer
@login_required
def user_search(request):
form = SearchForm()
query = None
results = []
if 'query' in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
query = form.cleaned_data['query']
results = list(User.objects.filter(username__icontains=query))
for i in results:
if i.is_organizer:
results.remove(i)
context = {
'form': form,
'query': query,
'results': results
}
return render(request, 'home/search.html',context)
@login_required
def leaderboard(request):
points = []
x = {'speaker':None, 'score_for_accepted':0, 'score_for_submission':0, 'total':0}
users = Speaker.objects.all()
for user in users:
x['speaker'] = user
proposal_submissions = user.proposal_set.all().filter(status='published')
x['score_for_submission'] = proposal_submissions.count()
for i in proposal_submissions:
if i.proposalstatus.proposal_status == 'accepted':
x['score_for_accepted'] += 1
points.append(x)
x = {'speaker':None, 'score_for_accepted':0, 'score_for_submission':0}
for i in points:
i['total'] = i['score_for_accepted'] + i['score_for_submission']
sort_by_submissions = copy.deepcopy(points)
sort_by_submissions.sort(key=operator.itemgetter('score_for_submission'), reverse=True)
sort_by_accepted = copy.deepcopy(points)
sort_by_accepted.sort(key=operator.itemgetter('score_for_accepted'), reverse=True)
sort_by_total = copy.deepcopy(points)
sort_by_total.sort(key=operator.itemgetter('total'), reverse=True)
return render(request, 'home/leaderboard.html', context={ 'user_list':sort_by_total })
|
from django.conf.urls.defaults import *
local_media_url = 'statica'
urlpatterns = patterns('assetsy.django.views',
url(r'^%s/(?P<path>.*)$' % local_media_url, 'static_serve'),
) |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
from collections import defaultdict
from data import data_utils
parser = argparse.ArgumentParser(description='Reading and processing a large gzip file')
parser.add_argument('--input', type=str, required=True,
help='Input path (in a column CONLL UD format)')
parser.add_argument('--output', type=str, required=True, help="Output file name")
parser.add_argument('--nwords', type=int, default='100000000', required=False,
help='How many words to process')
parser.add_argument('--min_freq', type=int, default='5', required=False,
help='Minimal frequency of paradigm to be included in the dictionary')
args = parser.parse_args()
nwords = 0
paradigms = defaultdict(int)
for line in data_utils.read(args.input):
if line.strip() == "" or len(line.split("\t")) < 2:
continue
else:
fields = line.split("\t")
if fields[1].isalpha():
paradigms[(fields[1], fields[2], fields[3], fields[5])] += 1
nwords += 1
if nwords > args.nwords:
break
with open(args.output, 'w') as f:
for p in paradigms:
if paradigms[p] > args.min_freq:
f.write("\t".join(el for el in p) + "\t" + str(paradigms[p]) + "\n")
f.close()
|
# Generated by Django 3.2 on 2021-04-15 22:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_weathersource_name'),
]
operations = [
migrations.AddField(
model_name='newssource',
name='name',
field=models.CharField(default='derp', max_length=30),
preserve_default=False,
),
]
|
from cupy import zeros, dot, outer, array
from cupy.linalg import cholesky
def calculate_sigmas(x, dim_x, P, kappa):
sp = zeros((2*dim_x+1, dim_x))
sp[0] = array(x)
root = cholesky((dim_x+kappa)*P).T
sp[1:dim_x+1] = array(x) + root
sp[dim_x+1:2*dim_x+2] = array(x) - root
return sp
def unscented_transform(sp, weights, noise):
x_bar = dot(weights, sp)
num_sp, Nx = sp.shape
P = zeros((Nx, Nx))
for k in range(num_sp):
P += weights[k] * outer(sp[k] - x_bar, sp[k] - x_bar)
if noise is not None:
P += array(noise)
return x_bar, P
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 10 14:46:37 2019
Vetor de cargas equivalentes a distrubuída da placa OK!
Carga de vento OK!!!
@author: markinho
"""
import sympy as sp
import numpy as np
from matplotlib import rcParams
rcParams['mathtext.fontset'] = 'stix'
rcParams['font.family'] = 'STIXGeneral'
import matplotlib.pyplot as plt
def rigidez_portico(E, A, I_z, scL):
'''
Função para a deterrminação das matrizes de rigidez do elemento de pórtico
'''
s = scL[0]
c = scL[1]
L = scL[2]
return np.array([[A * E * c**2/L + 12 * E * I_z * s**2/L**3, A * E * s * c/L - 12 * E * I_z * s * c/L**3, - 6 * E * I_z * s/L**2, - A * E * c**2/L - 12 * E * I_z * s**2/L**3, - A * E * s * c/L + 12 * E * I_z * s * c/L**3, - 6 * E * I_z * s/L**2 ],
[A * E * s * c/L - 12 * E * I_z * s * c/L**3, A * E * s**2/L + 12 * E * I_z * c**2/L**3, 6 * E * I_z * c/L**2, - A * E * s * c/L + 12 * E * I_z * s * c/L**3, - A * E * s**2/L - 12 * E * I_z * c**2/L**3, 6 * E * I_z * c/L**2 ],
[ - 6 * E * I_z * s/L**2, 6 * E * I_z * c/L**2, 4 * E * I_z/L , 6 * E * I_z * s/L**2, - 6 * E * I_z * c/L**2, 2 * E * I_z/L ],
[ - A * E * c**2/L - 12 * E * I_z * s**2/L**3, - A * E * s * c/L + 12 * E * I_z * s * c/L**3, 6 * E * I_z * s/L**2, A * E * c**2/L + 12 * E * I_z * s**2/L**3, A * E * s * c/L - 12 * E * I_z * s * c/L**3, 6 * E * I_z * s/L**2],
[ - A * E * s * c/L + 12 * E * I_z * s * c/L**3, - A * E * s**2/L - 12 * E * I_z * c**2/L**3, - 6 * E * I_z * c/L**2, A * E * s * c/L - 12 * E * I_z * s * c/L**3, A * E * s**2/L + 12 * E * I_z * c**2/L**3, - 6 * E * I_z * c/L**2],
[ - 6 * E * I_z * s/L**2, 6 * E * I_z * c/L**2, 2 * E * I_z/L, 6 * E * I_z * s/L**2, - 6 * E * I_z * c/L**2, 4 * E * I_z/L ]])
def angulos_comprimentos(nos, elementos):
'''
Função para calcular os senos e os cossenos de cada barra e o seu comprimento
no1: coordenadas do nó 1 em array([x, y])
no2: coordenadas do nó 2 em array([x, y])
retorna array com elementos na primeira dimensão e [sen, cos, comprimento] na segunda
'''
sen_cos_comp_comp = np.zeros( (elementos.shape[0], 3) )
no1 = nos[ elementos[:,0] ] #nós iniciais
no2 = nos[ elementos[:,1] ] #nós finais
sen_cos_comp_comp[:,2] = np.sqrt( (no2[:,0] - no1[:,0])**2 + (no2[:,1] - no1[:,1])**2) #comprimento
sen_cos_comp_comp[:,0] = (no2[:,1] - no1[:,1])/( sen_cos_comp_comp[:,2] ) #seno
sen_cos_comp_comp[:,1] = (no2[:,0] - no1[:,0])/( sen_cos_comp_comp[:,2] ) #cosseno
return sen_cos_comp_comp
GL = np.array([[6, 7, 8], [0, 1, 2], [3, 4, 5], [9, 10, 11]])
nos = np.array([ [-470, 0], [-470, 470], [470, 470], [470, 0] ], dtype=float)
IE = np.array([ [0, 1], [1, 2], [3, 2] ], dtype=int)
#determinação dos ângulos e comprimentos
scL = angulos_comprimentos(nos, IE)
d = 20. #cm
t_w = 1.25 #cm
b_f = 40. #cm
t_f = 1.25 #cm
h = d - 2 * t_f
I_z = b_f*d**3/12 - (b_f-2*t_w)*h**3/12 #cm4
Ar = d*b_f - h*(b_f-2*t_w)
#matriz de rigidez dos elementos
Ke1 = rigidez_portico(20000, Ar, I_z, scL[0]) #kN/cm2, cm2 e cm4
Ke2 = rigidez_portico(20000, Ar, I_z, scL[1])
Ke3 = rigidez_portico(20000, Ar, I_z, scL[2])
#montagem do coletor
C = np.zeros((IE.shape[0], GL.size), dtype=int) + (-1) #-1 para diferenciar o grau de liberdade 0 de um valor vazio no coletor
for i in range(2): #2 nos por elemento
for j in range(3): #3 graus de liberdade por elemento
for b in range(3): #3 elementos na estrutura
C[b, GL[IE[b,i], j] ] = (i+1)**2 + j - 1 #somar 1 no i pois python inicia em 0
#detrminação de Ku
Ku = np.zeros((6,6)) #6 graus de liberdade livres
for i in range(6):
for j in range(6):
if C[0, i] != -1 and C[0, j] != -1:
Ku[i, j] += Ke1[ C[0, i], C[0, j] ]
if C[1, i] != -1 and C[1, j] != -1:
Ku[i, j] += Ke2[ C[1, i], C[1, j] ]
if C[2, i] != -1 and C[2, j] != -1:
Ku[i, j] += Ke3[ C[2, i], C[2, j] ]
#detrminação de Kr
Kr = np.zeros((6,6)) #6 graus de liberdade livres e 6 graus de liberdade restringidos
for i in range(6):
for j in range(6):
if C[0, i+6] != -1 and C[0, j] != -1:
Kr[i, j] += Ke1[ C[0, i+6], C[0, j] ]
if C[1, i+6] != -1 and C[1, j] != -1:
Kr[i, j] += Ke2[ C[1, i+6], C[1, j] ]
if C[2, i+6] != -1 and C[2, j] != -1:
Kr[i, j] += Ke3[ C[2, i+6], C[2, j] ]
#determinação das forças nodais equivalentes
#para viga
r = sp.Symbol('r')
s = sp.Symbol('s')
l = sp.Symbol('l')
x1 = -l/2
x2 = l/2
u1 = sp.Symbol('u1')
u2 = sp.Symbol('u2')
u3 = sp.Symbol('u3')
u4 = sp.Symbol('u4')
Mat_Coef = sp.Matrix([[1, x1, x1**2, x1**3],
[0, 1, 2*x1, 3*x1**2],
[1, x2, x2**2, x2**3],
[0, 1, 2*x2, 3*x2**2]])
U = sp.Matrix([u1, u2, u3, u4])
Coefs = Mat_Coef.inv() * U
Acte = Coefs[0]
Bcte = Coefs[1]
Ccte = Coefs[2]
Dcte = Coefs[3]
Ns = sp.expand(Acte + Bcte*r + Ccte*r**2 + Dcte*r**3)
N1 = sp.Add(*[argi for argi in Ns.args if argi.has(u1)]).subs(u1, 1)
N2 = sp.Add(*[argi for argi in Ns.args if argi.has(u2)]).subs(u2, 1)
N3 = sp.Add(*[argi for argi in Ns.args if argi.has(u3)]).subs(u3, 1)
N4 = sp.Add(*[argi for argi in Ns.args if argi.has(u4)]).subs(u4, 1)
Nn = sp.Matrix([N1, N2, N3, N4])
##determinação da força com descontinuidade analítica
#g = sp.Symbol('g')
#r1 = sp.Symbol('r1') #espeçamento da carga a borda
#r2 = sp.Symbol('r2') #largura da carga
#Feg = - g * sp.integrate( Nn, (x, r1, r2) )
#determinação da força com descontinuidade numérico
g = 300./400 * 9.81/1000 #sp.Symbol('g') #em kN
de = 420. #sp.Symbol('de') #espeçamento da carga a borda
dp = 400. #sp.Symbol('dp') #largura da carga
A = scL[1,2] - de - dp - scL[0,2]
B = scL[1,2]/2 - de
Nnn = Nn.subs({l: scL[1,2]})
Feg = - g * sp.integrate( Nnn, (r, A, B) )
#Determinação da carga distribuída na viga superior----------------------------------------------------------------
Lvs = 940 #cm
q = 0.02 #kN/cm
Feq = -q * sp.integrate( Nnn, (r, -Lvs/2, Lvs/2) )
#Feq = np.zeros(6)
##teste com viga em balanço usando ke2
#Kvb = Ke2[:3, :3]
#Fvb = np.array([0, Feg[0], Feg[1]], dtype=float)
#Uvb = np.linalg.solve(Kvb, Fvb)
#xA = -235
#xB = 235
#Lv = xB - xA
##funções de forma com origem no nó inicial -----------------------------------------------------------------
#x1i = 0
#x2i = l
#Mat_Coefi = sp.Matrix([[1, x1i, x1i**2, x1i**3],
# [0, 1, 2*x1i, 3*x1i**2],
# [1, x2i, x2i**2, x2i**3],
# [0, 1, 2*x2i, 3*x2i**2]])
#
#Coefsi = Mat_Coefi.inv() * U
#
#Ai = Coefsi[0]
#Bi = Coefsi[1]
#Ci = Coefsi[2]
#Di = Coefsi[3]
#
#Nsi = sp.expand(Ai + Bi*r + Ci*r**2 + Di*r**3)
#
#N1i = sp.Add(*[argi for argi in Nsi.args if argi.has(u1)]).subs(u1, 1)
#N2i = sp.Add(*[argi for argi in Nsi.args if argi.has(u2)]).subs(u2, 1)
#N3i = sp.Add(*[argi for argi in Nsi.args if argi.has(u3)]).subs(u3, 1)
#N4i = sp.Add(*[argi for argi in Nsi.args if argi.has(u4)]).subs(u4, 1)
#Nni = sp.Matrix([N1i, N2i, N3i, N4i])
##------------------------------------------------------------------------------
#xA = -235
#xB = 235
#Lv = xB - xA
##determinação da força não-linear analítica com as funções de forma no início do elemento
#xA = 0.
#xB = 300.
#Lv = xB - xA
#
#vi = 0.0046587 * x**0.2
#Nvi = sp.expand(Nni * vi)
#Fevi = np.array( sp.integrate(Nvi, (r, xA, xB)).subs({l: Lv}) , dtype=float).flatten()
##com a origem no centro do elemento
#xA = -235
#xB = 235
#lv = xB - xA
#vi = 0.0046587 * (r + sp.Rational(Lv, 2) )**sp.Rational(1, 5)
#Nvi = sp.expand(sp.Matrix([N1.subs({l: lv}), N2.subs({l: lv}), N3.subs({l: lv}), N4.subs({l: lv})]) * vi)
#Fevi = sp.integrate(Nvi, (r, xA, xB)).evalf()
#resultado de acima
Fevi = -np.array([ 2.78838610441379, 238.280267104451, 3.4575987694731, -262.108293814896])
#Fevi = np.zeros(6)
#TESTANDO:
##Viga analítica com a origem da extremidade do elemento para comparação: em balanço com carga do vento vi com comprimento de 300 cm
#Ev = 20000.
#Av = 10.*40.
#Iv = 10.*40.**3/12.
##resultante equivalente Rvi e centróide xvi
#xA = 0
#xB = 470
#vi = 0.0046587 * r**(0.2) #reescrevendo para considerar a origem na extremidade do elemento!
#Rvi = sp.integrate(vi, (r, xA, xB))
#xvi = sp.integrate(vi*r, (r, xA, xB))/Rvi
##reações de apoio
#RA = Rvi
#MRA = Rvi*xvi
#
##força resultante da carga de vento na seção e centroíde
#Rvix = sp.integrate(vi, (r, xA, x))
#xvix = sp.integrate(vi*r, (r, xA, x))/Rvix
##momento na seção do vão
#Ms = sp.expand(RA*r - MRA - Rvix*(r - xvix))
##rotações da viga
#dMsdx = sp.integrate(Ms, r)
##deflexões
#w = sp.integrate(dMsdx, r)/(Ev*Iv) !!!!!!!!!!!!!!!!!!!!!!!!!ERRADO!!
#dWdx = sp.diff(w, r)
#
#wEX = w.subs({r: Lv}).evalf()
#dWdxEX = dWdx.subs({r: Lv}).evalf()
#
##matriz de rigidez dos elementos 2 nós
#Kevb = rigidez_portico(Ev, Av, Iv, [0, 1, Lv]) #kN/cm2, cm2 e cm4
#Kuvb = Kevb[3:, 3:]
#FeviP = np.array([0, Fevi[0], Fevi[1], 0, Fevi[2], Fevi[3]], dtype=float)
#Fuvb = -FeviP[3:]
#Uvb = np.linalg.solve(Kuvb, Fuvb)
#
##comparativo
#print('w', wEX, 'Uv', Uvb[1])
#print('dWdx', dWdxEX, 'Rv', Uvb[2])
#Determinação das demais cargas como pórtico (1 já com rotação)---------------------------------------------------------------------------------------
Fe3 = np.zeros(6)
Feq = np.array([0, Feq[0], Feq[1], 0, Feq[2], Feq[3]], dtype=float)
Feg = np.array([0, Feg[0], Feg[1], 0, Feg[2], Feg[3]], dtype=float)
Fe2 = Feq + Feg
RFv = np.array([[0, -1, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0 , 0 ,0 ,0, -1, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1]])
Fevi = np.array([0, Fevi[0], Fevi[1], 0, Fevi[2], Fevi[3]], dtype=float)
Fe1 = np.matmul( RFv, Fevi )
#Determinação do vetor de cargas nodais equivalentes para cálculo dos deslocamentos
FU = np.array([Fe1[3] + Fe2[0], Fe1[4] + Fe2[1], Fe1[5] + Fe2[2], Fe2[3], Fe2[4], Fe2[5]], dtype=float)
FR = np.array([Fe1[0], Fe1[1], Fe1[2], 0, 0, 0], dtype=float)
#determinação dos deslocamentos
Un = np.linalg.solve(Ku, FU)
R = np.dot(Kr, Un) - FR
U = np.zeros(12)
U[:6] = Un
#reescrevendo os deslocamentos no sistema local do elemento
ug1 = np.zeros(6)
ug2 = np.zeros(6)
ug3 = np.zeros(6)
for i in range(12):
if C[0, i] >= 0:
ug1[ C[0, i] ] = U[i]
if C[1, i] >= 0:
ug2[ C[1, i] ] = U[i]
if C[2, i] >= 0:
ug3[ C[2, i] ] = U[i]
R13 = np.array([[ 0, 1, 0, 0, 0, 0],
[-1, 0, 0, 0, 0, 0],
[ 0, 0, 1, 0, 0, 0],
[ 0 ,0 ,0 , 0, 1, 0],
[ 0, 0, 0, -1, 0, 0],
[ 0, 0, 0, 0, 0, 1],])
u1 = np.dot(R13, ug1)
u2 = ug2
u3 = np.dot(R13, ug3)
#matriz das derivadas das funções de interpolação do pórtico
Bv = -s * sp.diff( sp.diff(Nn, r), r)
Bp = sp.Matrix([[ -1/l, 0, 0, 1/l, 0, 0 ], [ 0, Bv[0], Bv[1], 0, Bv[2], Bv[3] ] ])
#Nnp = sp.Matrix([ r - 1/l, Nn[0], Nn[1], r + 1/l, Nn[2], Nn[3] ])
#Bv1 = sp.diff(Nn, r)
#Bp1 = sp.Matrix([ -1/l, Bv1[0], Bv1[1], 1/l, Bv1[2], Bv1[3] ])
#Bv2 = sp.diff(Bv1, r)
#Bp2 = sp.Matrix([ -1/l, Bv2[0], Bv2[1], 1/l, Bv2[2], Bv2[3] ])
#Bv3 = sp.diff(Bv2, r)
#Usym = sp.MatrixSymbol('U', 6, 1)
#UMsym = sp.Matrix(Usym)
#UMsymV = UMsym[[1,2,4,5],:]
#
#deslocamentosS = Nnp.T * UMsym
#deformacoesS = Bp1.T * UMsym
##tensoesS = deformacoesS * E
#rotacoesS = Bv1.T * UMsymV
#momentoS = Bv2.T * UMsymV
#cortanteS = Bv3.T * UMsymV
##calculo das deformações, tensões, momento, corte e normal em cada elemento no eixo local ------------------------------------------------------------
#def esP(U, l, E, A, h, I, pontos=100):
# r = np.linspace(-l/2, l/2, pontos)
# U = U[:, np.newaxis]
# deslocamentos = (r - 1/l)*U[0, 0] + (r + 1/l)*U[3, 0] + (1/2 - 3*r/(2*l) + 2*r**3/l**3)*U[1, 0] + (1/2 + 3*r/(2*l) - 2*r**3/l**3)*U[4, 0] + (-l/8 - r/4 + r**2/(2*l) + r**3/l**2)*U[5, 0] + (l/8 - r/4 - r**2/(2*l) + r**3/l**2)*U[2, 0]
# rotacoes = (-3/(2*l) + 6*r**2/l**3)*U[1, 0] + (3/(2*l) - 6*r**2/l**3)*U[4, 0] + (-1/4 - r/l + 3*r**2/l**2)*U[2, 0] + (-1/4 + r/l + 3*r**2/l**2)*U[5, 0]
# momento = (E * I) * ( (-1/l + 6*r/l**2)*U[2, 0] + (1/l + 6*r/l**2)*U[5, 0] + 12*r*U[1, 0]/l**3 - 12*r*U[4, 0]/l**3 )
# cortante = (E * I) * ( 6*U[2, 0]/l**2 + 6*U[5, 0]/l**2 + 12*U[1, 0]/l**3 - 12*U[4, 0]/l**3 )*np.ones(pontos)
# normal = (E * A) * ( U[0,0]*(- 1/l) + U[3, 0]*(1/l) )*np.ones(pontos)
#
# #aborgadem reversa
# tensoes = normal/A + momento/I * h/2
# deformacoes = tensoes/E
#
# return deslocamentos, rotacoes, deformacoes, tensoes, momento, cortante, normal, r
#
#E = 20000. #kN/cm2
#deslocamentos1, rotacoes1, deformacoes1, tensoes1, momentos1, corte1, normal1, varElem1 = esP(u1, scL[0, 2], E, Ar, d, I_z)
#deslocamentos2, rotacoes2, deformacoes2, tensoes2, momentos2, corte2, normal2, varElem2 = esP(u2, scL[1, 2], E, Ar, d, I_z)
#deslocamentos3, rotacoes3, deformacoes3, tensoes3, momentos3, corte3, normal3, varElem3 = esP(u3, scL[2, 2], E, Ar, d, I_z)
#deformações nos elementos
epsilon_1 = Bp.subs({l: 470}) * u1[:, np.newaxis]
epsilonA_1 = epsilon_1[0]
epsilonF_1 = epsilon_1[1]
epsilon_2 = Bp.subs({l: 940}) * u2[:, np.newaxis]
epsilonA_2 = epsilon_2[0]
epsilonF_2 = epsilon_2[1]
epsilon_3 = Bp.subs({l: 470}) * u3[:, np.newaxis]
epsilonA_3 = epsilon_3[0]
epsilonF_3 = epsilon_3[1]
#tensões nos elementos
E = 20000. #kN/cm2
sigmaA_1 = E*epsilonA_1
sigmaF_1 = E*epsilonF_1
sigmaA_2 = E*epsilonA_2
sigmaF_2 = E*epsilonF_2
sigmaA_3 = E*epsilonA_3
sigmaF_3 = E*epsilonF_3
#tensões axiais
Ap = 143.75 #cm2
N_1 = Ap * sigmaA_1
N_2 = Ap * sigmaA_2
N_3 = Ap * sigmaA_3
#momentos fletores nas barras
M1 = 2 * t_w * sp.integrate( s * sigmaF_1, (s, -h/2, h/2 ) ) + 2 * b_f * sp.integrate( s * sigmaF_1, (s, h/2, h/2 + t_f ) )
M2 = 2 * t_w * sp.integrate( s * sigmaF_2, (s, -h/2, h/2 ) ) + 2 * b_f * sp.integrate( s * sigmaF_2, (s, h/2, h/2 + t_f ) )
M3 = 2 * t_w * sp.integrate( s * sigmaF_3, (s, -h/2, h/2 ) ) + 2 * b_f * sp.integrate( s * sigmaF_3, (s, h/2, h/2 + t_f ) )
#esforço cortante ---------------------------------------------------------------------------------------------------
V1 = sp.diff(M1, r)
V2 = sp.diff(M2, r)
V3 = sp.diff(M3, r)
#grafico dos deslocamentos, normais, momento e cortante
#funcoes de forma de treliça e viga
Nt = sp.Matrix([1/2 - r/l, r/l])
Np = Nn
u1t = np.array([u1[0], u1[3]])
u1p = np.array([u1[1], u1[2], u1[4], u1[5]])
u2t = np.array([u2[0], u2[3]])
u2p = np.array([u2[1], u2[2], u2[4], u2[5]])
u3t = np.array([u3[0], u3[3]])
u3p = np.array([u3[1], u3[2], u3[4], u3[5]])
u1Nt = Nt.T*u1t[:, np.newaxis]
u1Np = Np.T*u1p[:, np.newaxis]
u2Nt = Nt.T*u2t[:, np.newaxis]
u2Np = Np.T*u2p[:, np.newaxis]
u3Nt = Nt.T*u3t[:, np.newaxis]
u3Np = Np.T*u3p[:, np.newaxis]
#convertendo para função python
u1Nt = sp.utilities.lambdify([r, l], u1Nt[0], "numpy")
u1Np = sp.utilities.lambdify([r, l], u1Np[0], "numpy")
u2Nt = sp.utilities.lambdify([r, l], u2Nt[0], "numpy")
u2Np = sp.utilities.lambdify([r, l], u2Np[0], "numpy")
u3Nt = sp.utilities.lambdify([r, l], u3Nt[0], "numpy")
u3Np = sp.utilities.lambdify([r, l], u3Np[0], "numpy")
Y = np.linspace(-235, 235, 100)
X = np.linspace(-470, 470, 100)
##gráfico dos deslocamentos !!!!!!!!!!!!!!!!!!! MUITO MAL FEITO!!!!!!!!!!!!!!!!!
#escala = 1000
#plt.plot([0, 0, 920, 920], [0, 470, 470, 0], color="gray") #elementos
#plt.scatter([0, 0, 920, 920], [0, 470, 470, 0], s=15, color="gray") #nós
#plt.plot(-u1Np(Y, 470)*escala, u1Nt(Y, 470)*escala + Y + 235, '--', color='blue')
#plt.plot(u2Nt(X, 920)*escala + X - u1Np(Y, 470)[-1]*escala/2 + 470, u2Np(X, 920)*escala + 470, '--', color='blue')
#plt.plot(-u3Np(Y, 470)*escala + 920, u3Nt(Y, 470) + Y + 235, '--', color='blue')
#plt.yticks(np.arange(0, 520, step=20))
#plt.show()
#esforço normal
escala_n = 7
plt.plot([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], color="gray") #elementos
plt.scatter([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], s=15, color="gray") #nós
plt.plot(np.ones(100)*N_1*escala_n - 470, Y)
plt.plot(X, np.ones(100)*N_2*escala_n + 235)
plt.plot(np.ones(100)*N_3*escala_n + 470, Y)
plt.show()
#esforço cortante
escala_v = 30
plt.plot([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], color="gray") #elementos
plt.scatter([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], s=15, color="gray") #nós
plt.plot(np.ones(100)*V1*escala_v - 470, Y)
plt.plot(X, np.ones(100)*V2*escala_v + 235)
plt.plot(np.ones(100)*V3*escala_v + 470, Y)
plt.show()
#momento fletor
M1f = sp.utilities.lambdify([r], M1, "numpy")
M2f = sp.utilities.lambdify([r], M2, "numpy")
M3f = sp.utilities.lambdify([r], M3, "numpy")
escala_m = 0.1
plt.plot([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], color="gray") #elementos
plt.scatter([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], s=15, color="gray") #nós
plt.plot(-M1f(Y)*escala_m - 470, Y)
plt.plot(X, M2f(X)*escala_m + 235)
plt.plot(-M3f(Y)*escala_m + 470, Y)
plt.show()
###com as funções de forma ----------------------------------------------------------------------------------
#escala_v = 20.
#plt.plot([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], color="gray") #elementos
#plt.scatter([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], s=15, color="gray") #nós
#plt.plot(-normal1*escala_v - 470, varElem1)
#plt.plot(varElem2, normal2*escala_v + 235)
#plt.plot(-normal3*escala_v + 470, varElem3)
#plt.show()
#
#escala_v = 20.
#plt.plot([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], color="gray") #elementos
#plt.scatter([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], s=15, color="gray") #nós
#plt.plot(-corte1*escala_v - 470, varElem1)
#plt.plot(varElem2, corte2*escala_v + 235)
#plt.plot(-corte3*escala_v + 470, varElem3)
#plt.show()
#
#escala_v = 0.1
#plt.plot([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], color="gray") #elementos
#plt.scatter([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], s=15, color="gray") #nós
#plt.plot(-momentos1*escala_v - 470, varElem1)
#plt.plot(varElem2, momentos2*escala_v + 235)
#plt.plot(-momentos3*escala_v + 470, varElem3)
#plt.show() |
def attach_callback(func, callback_name):
def notify(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
if callback_name == 'on_size_change':
self.on_size_change(self)
elif callback_name == 'on_item_change':
self.on_item_change(self, args[0])
else:
raise ValueError("Unknown callback name: {0}".format(callback_name))
return notify
class CallbackList(list):
def __init__(self, *args, **kwargs):
super(CallbackList, self).__init__(*args, **kwargs)
self.on_size_change = lambda x: None
self.on_item_change = lambda x, y: None
extend = attach_callback(list.extend, 'on_size_change')
append = attach_callback(list.append, 'on_size_change')
remove = attach_callback(list.remove, 'on_size_change')
insert = attach_callback(list.insert, 'on_size_change')
pop = attach_callback(list.pop, 'on_size_change')
__delitem__ = attach_callback(list.__delitem__, 'on_size_change')
__setitem__ = attach_callback(list.__setitem__, 'on_item_change')
|
#
# This file is part of JKQ QMAP library which is released under the MIT license.
# See file README.md or go to http://iic.jku.at/eda/research/quantum_verification/ for more information.
#
import pickle
from pathlib import Path
from typing import Any, Dict, Union
from .pyqmap import map, Method, InitialLayoutStrategy, LayeringStrategy, Arch
def compile(circ, arch: Union[str, Arch],
calibration: str = "",
method: Method = Method.heuristic,
initial_layout: InitialLayoutStrategy = InitialLayoutStrategy.dynamic,
layering: LayeringStrategy = LayeringStrategy.individual_gates,
use_teleportation: bool = False,
teleportation_fake: bool = False,
teleportation_seed: int = 0,
save_mapped_circuit: bool = True,
csv: bool = False,
statistics: bool = False,
verbose: bool = False
) -> Dict[str, Any]:
"""Interface to the JKQ QMAP tool for mapping quantum circuits
:param circ: Path to first circuit file, path to Qiskit QuantumCircuit pickle, or Qiskit QuantumCircuit object
:param arch: Path to architecture file or one of the available architectures (Arch)
:type arch: Union[str, Arch]
:param calibration: Path to file containing calibration information
:param method: Mapping technique to use (*heuristic* | exact)
:type method: Method
:param initial_layout: Strategy to use for determining initial layout (only relevant for heuristic mapper)
:type initial_layout: InitialLayoutStrategy
:param layering: Circuit layering strategy to use (*individual_gates* | disjoint_qubits | odd_qubits | qubit_triangle)
:type layering: LayeringStrategy
:param use_teleportation: Use teleportation in addition to swaps
:param teleportation_fake: Assign qubits as ancillary for teleportation in the initial placement but don't actually use them (used for comparisons)
:param teleportation_seed: Fix a seed for the RNG in the initial ancilla placement (0 means the RNG will be seeded from /dev/urandom/ or similar)
:param save_mapped_circuit: Include .qasm string of the mapped circuit in result
:type save_mapped_circuit: bool
:param csv: Create CSV string for result
:type csv: bool
:param statistics: Print statistics
:type statistics: bool
:param verbose: Print more detailed information during the mapping process
:type verbose: bool
:return: JSON object containing results
:rtype: Dict[str, Any]
"""
if type(circ) == str and Path(circ).suffix == '.pickle':
circ = pickle.load(open(circ, "rb"))
result = map(circ, arch, {
"calibration": calibration,
"method": method.name,
"initialLayout": initial_layout.name,
"layering": layering.name,
"use_teleportation": use_teleportation,
"teleportation_fake": teleportation_fake,
"teleportation_seed": teleportation_seed,
"saveMappedCircuit": save_mapped_circuit,
"csv": csv,
"statistics": statistics,
"verbose": verbose
})
if "error" in result:
print(result["error"])
return result
|
# -*- coding: utf-8 -*-
"""Plugin for chaining Chrome downloads to filesystem and execution events."""
from timesketch.lib.analyzers.chain_plugins import interface
from timesketch.lib.analyzers.chain_plugins import manager
class ChromeDownloadFilesystemChainPlugin(interface.BaseChainPlugin):
"""A plugin to chain Chrome downloads to filesystem events."""
NAME = 'chromefilesystem'
DESCRIPTION = (
'Plugin to chain Chrome download records to corresponding filesystem '
'events and execution events.')
SEARCH_QUERY = 'data_type:"chrome:history:file_downloaded"'
EVENT_FIELDS = ['full_path']
def get_chained_events(self, base_event):
"""Yields an event that is chained or linked to the base event.
Args:
base_event: the base event of the chain, used to construct further
queries (instance of Event).
Yields:
An event (instance of Event) object that is linked or chained to
the base event, according to the plugin.
"""
target = base_event.source.get('full_path', '')
if not target:
return
yield # pylint: disable=W0101
if '\\' in target:
separator = '\\'
else:
separator = '/'
target = target.split(separator)[-1]
# TODO: Add more checks here, eg; USB, generic execution, etc.
search_query = (
'(data_type:"fs:stat" AND filename:"*{0:s}") OR '
'(data_type:"fs:stat:ntfs" AND name:"{0:s}")').format(
target)
return_fields = ['filename', 'path_hints']
events = self.analyzer_object.event_stream(
search_query, return_fields=return_fields, scroll=False)
for event in events:
yield event
exec_query = 'executable:"*{0:s}"'.format(target)
return_fields = ['executable', 'chains']
events = self.analyzer_object.event_stream(
exec_query, return_fields=return_fields, scroll=False)
for event in events:
yield event
manager.ChainPluginsManager.register_plugin(ChromeDownloadFilesystemChainPlugin)
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from indico.core import signals
from indico.core.config import config
from indico.core.db import db
from indico.modules.users import User
def create_user(email, data, identity=None, settings=None, other_emails=None, from_moderation=True):
"""Create a new user.
This may also convert a pending user to a proper user in case the
email address matches such a user.
:param email: The primary email address of the user.
:param data: The data used to populate the user.
:param identity: An `Identity` to associate with the user.
:param settings: A dict containing user settings.
:param other_emails: A set of email addresses that are also used
to check for a pending user. They will also
be added as secondary emails to the user.
:param from_moderation: Whether the user was created through the
moderation process or manually by an admin.
"""
if other_emails is None:
other_emails = set()
if settings is None:
settings = {}
settings.setdefault('timezone', config.DEFAULT_TIMEZONE)
settings.setdefault('lang', config.DEFAULT_LOCALE)
settings.setdefault('suggest_categories', False)
# Get a pending user if there is one
user = User.query.filter(~User.is_deleted, User.is_pending,
User.all_emails.in_({email} | set(other_emails))).first()
if not user:
user = User()
if email in user.secondary_emails:
# This can happen if there's a pending user who has a secondary email
# for some weird reason which should now become the primary email...
user.make_email_primary(email)
else:
user.email = email
user.populate_from_dict(data, skip={'synced_fields'})
user.is_pending = False
user.secondary_emails |= other_emails
user.favorite_users.add(user)
if identity is not None:
user.identities.add(identity)
db.session.add(user)
db.session.flush()
user.populate_from_dict(data, keys={'synced_fields'}) # this is a setting, so the user must have an ID
user.settings.set_multi(settings)
db.session.flush()
signals.users.registered.send(user, from_moderation=from_moderation, identity=identity)
db.session.flush()
return user
|
import numpy as np
from jina.executors.crafters import BaseCrafter
from jina.flow import Flow
from jina.proto import jina_pb2
class DummyCrafter(BaseCrafter):
def craft(self, *args, **kwargs):
return 1 / 0
def test_bad_flow(mocker):
def validate(req):
bad_routes = [r for r in req.routes if r.status.code == jina_pb2.StatusProto.ERROR]
assert req.status.code == jina_pb2.StatusProto.ERROR
assert bad_routes[0].pod == 'r1'
f = (Flow().add(name='r1', uses='!BaseCrafter')
.add(name='r2', uses='!BaseEncoder')
.add(name='r3', uses='!BaseEncoder'))
on_error_mock = mocker.Mock(wrap=validate)
on_error_mock_2 = mocker.Mock(wrap=validate)
# always test two times, make sure the flow still works after it fails on the first
with f:
f.index_lines(lines=['abbcs', 'efgh'], on_error=on_error_mock)
f.index_lines(lines=['abbcs', 'efgh'], on_error=on_error_mock_2)
on_error_mock.assert_called()
on_error_mock_2.assert_called()
def test_bad_flow_customized(mocker):
def validate(req):
bad_routes = [r for r in req.routes if r.status.code == jina_pb2.StatusProto.ERROR]
assert req.status.code == jina_pb2.StatusProto.ERROR
assert bad_routes[0].pod == 'r2'
assert bad_routes[0].status.exception.name == 'ZeroDivisionError'
f = (Flow().add(name='r1')
.add(name='r2', uses='!DummyCrafter')
.add(name='r3', uses='!BaseEncoder'))
with f:
pass
on_error_mock = mocker.Mock(wrap=validate)
on_error_mock_2 = mocker.Mock(wrap=validate)
# always test two times, make sure the flow still works after it fails on the first
with f:
f.index_lines(lines=['abbcs', 'efgh'], on_error=on_error_mock)
f.index_lines(lines=['abbcs', 'efgh'], on_error=on_error_mock_2)
on_error_mock.assert_called()
on_error_mock_2.assert_called()
def test_except_with_parallel(mocker):
def validate(req):
assert req.status.code == jina_pb2.StatusProto.ERROR
err_routes = [r.status for r in req.routes if r.status.code == jina_pb2.StatusProto.ERROR]
assert len(err_routes) == 2
assert err_routes[0].exception.executor == 'DummyCrafter'
assert err_routes[1].exception.executor == 'BaseEncoder'
assert err_routes[0].exception.name == 'ZeroDivisionError'
assert err_routes[1].exception.name == 'NotImplementedError'
f = (Flow().add(name='r1')
.add(name='r2', uses='!DummyCrafter', parallel=3)
.add(name='r3', uses='!BaseEncoder'))
with f:
pass
on_error_mock = mocker.Mock(wrap=validate)
on_error_mock_2 = mocker.Mock(wrap=validate)
# always test two times, make sure the flow still works after it fails on the first
with f:
f.index_lines(lines=['abbcs', 'efgh'], on_error=on_error_mock)
f.index_lines(lines=['abbcs', 'efgh'], on_error=on_error_mock_2)
on_error_mock.assert_called()
on_error_mock_2.assert_called()
def test_on_error_callback(mocker):
def validate1():
raise NotImplementedError
def validate2(x, *args):
x = x.routes
assert len(x) == 4 # gateway, r1, r3, gateway
badones = [r for r in x if r.status.code == jina_pb2.StatusProto.ERROR]
assert badones[0].pod == 'r3'
f = (Flow().add(name='r1')
.add(name='r3', uses='!BaseEncoder'))
on_error_mock = mocker.Mock(wrap=validate2)
with f:
f.index_lines(lines=['abbcs', 'efgh'], on_done=validate1, on_error=on_error_mock)
on_error_mock.assert_called()
def test_no_error_callback(mocker):
def validate2():
raise NotImplementedError
def validate1(x, *args):
pass
f = (Flow().add(name='r1')
.add(name='r3'))
response_mock = mocker.Mock(wrap=validate1)
on_error_mock = mocker.Mock(wrap=validate2)
with f:
f.index_lines(lines=['abbcs', 'efgh'], on_done=response_mock, on_error=on_error_mock)
response_mock.assert_called()
on_error_mock.assert_not_called()
def test_flow_on_callback():
f = Flow().add()
hit = []
def f1(*args):
hit.append('done')
def f2(*args):
hit.append('error')
def f3(*args):
hit.append('always')
with f:
f.index(np.random.random([10, 10]),
on_done=f1, on_error=f2, on_always=f3)
assert hit == ['done', 'always']
hit.clear()
def test_flow_on_error_callback():
class DummyCrafter(BaseCrafter):
def craft(self, *args, **kwargs):
raise NotImplementedError
f = Flow().add(uses='DummyCrafter')
hit = []
def f1(*args):
hit.append('done')
def f2(*args):
hit.append('error')
def f3(*args):
hit.append('always')
with f:
f.index(np.random.random([10, 10]),
on_done=f1, on_error=f2, on_always=f3)
assert hit == ['error', 'always']
hit.clear()
|
from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from operators.load_dimension import LoadDimensionOperator
from helpers.sql_queries import SqlQueries
def loader_subdag(
parent_dag_name,
task_id,
redshift_conn_id,
table,
create_sql_stmt,
replace,
*args, **kwargs):
dag = DAG(
f"{parent_dag_name}.{task_id}",
**kwargs
)
loader = LoadDimensionOperator(
task_id=task_id,
dag=dag,
redshift_conn_id=redshift_conn_id,
table = table,
create_sql_stmt = create_sql_stmt,
replace = replace
)
loader
return dag
|
# Copyright 2021 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# See https://floris.readthedocs.io for documentation
import copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
def nudge_outward(x):
"""
Avoid numerical issue in grid data by slightly expanding input x.
TODO: expand this description
- What numerical issues?
- Whats the scenario when I would need this?
Args:
x (np.array): Vector of values.
Returns:
np.array: Expanded vector.
"""
nudge_val = 0.001
min_x = np.min(x)
max_x = np.max(x)
x = np.where(x == min_x, min_x - nudge_val, x)
x = np.where(x == max_x, max_x + nudge_val, x)
return x
def get_plane_from_flow_data(flow_data, normal_vector="z", x3_value=100):
"""
Get a plane of data, in form of DataFrame, from a :py:class:`~.FlowData`
object. This is used to get planes from SOWFA results and FLORIS
simulations with fixed grids, i.e. curl.
Args:
flow_data (np.array): 3D vector field of velocity data. #TODO: is this
supposed to be a :py:class:`~.FlowData` object?
normal_vector (string, optional): Vector normal to plane.
Defaults to z.
x3_value (float, optional): Value of normal vector to slice through.
Defaults to 100.
Returns:
pandas.DataFrame: Extracted data.
"""
order = "f"
if normal_vector == "z":
x1_array = flow_data.x.flatten(order=order)
x2_array = flow_data.y.flatten(order=order)
x3_array = flow_data.z.flatten(order=order)
if normal_vector == "x":
x3_array = flow_data.x.flatten(order=order)
x1_array = flow_data.y.flatten(order=order)
x2_array = flow_data.z.flatten(order=order)
if normal_vector == "y":
x3_array = flow_data.y.flatten(order=order)
x1_array = flow_data.x.flatten(order=order)
x2_array = flow_data.z.flatten(order=order)
u = flow_data.u.flatten(order=order)
v = flow_data.v.flatten(order=order)
w = flow_data.w.flatten(order=order)
search_values = np.array(sorted(np.unique(x3_array)))
nearest_idx = (np.abs(search_values - x3_value)).argmin()
nearest_value = search_values[nearest_idx]
print("Nearest value to %.2f is %.2f" % (x3_value, nearest_value))
# Select down the data
x3_select_mask = x3_array == nearest_value
# Store the un-interpolated input arrays at this slice
x1 = x1_array[x3_select_mask]
x2 = x2_array[x3_select_mask]
x3 = np.ones_like(x1) * x3_value
u = u[x3_select_mask]
v = v[x3_select_mask]
w = w[x3_select_mask]
df = pd.DataFrame({"x1": x1, "x2": x2, "x3": x3, "u": u, "v": v, "w": w})
return df
class CutPlane:
"""
A CutPlane object represents a 2D slice through the flow of a
FLORIS simulation, or other such as SOWFA result.
"""
def __init__(self, df):
"""
Initialize CutPlane object, storing the DataFrame and resolution.
Args:
df (pandas.DataFrame): Pandas DataFrame of data with
columns x1, x2, u, v, w.
"""
self.df = df
# Save the resolution as the number of unique points in x1 and x2
self.resolution = (len(self.df.x1.unique()), len(self.df.x2.unique()))
# Modification functions
def set_origin(cut_plane, center_x1=0.0, center_x2=0.0):
"""
Establish the origin of a CutPlane object.
Args:
cut_plane (:py:class:`~.tools.cut_plane.CutPlane`):
Plane of data.
center_x1 (float, optional): x1-coordinate of origin.
Defaults to 0.0.
center_x2 (float, optional): x2-coordinate of origin.
Defaults to 0.0.
Returns:
cut_plane (:py:class:`~.tools.cut_plane.CutPlane`):
Updated plane of data.
"""
# Store the un-interpolated input arrays at this slice
cut_plane.df.x1 = cut_plane.df.x1 - center_x1
cut_plane.df.x2 = cut_plane.df.x2 - center_x2
return cut_plane
def change_resolution(cut_plane, resolution=(100, 100)):
"""
Modify default resolution of a CutPlane object.
Args:
cut_plane (:py:class:`~.tools.cut_plane.CutPlane`):
Plane of data.
resolution (tuple, optional): Desired resolution in x1 and x2.
Defaults to (100, 100).
Returns:
cut_plane (:py:class:`~.tools.cut_plane.CutPlane`):
Updated plane of data.
"""
# Linearize the data
x1_lin = np.linspace(min(cut_plane.df.x1), max(cut_plane.df.x1), resolution[0])
x2_lin = np.linspace(min(cut_plane.df.x2), max(cut_plane.df.x2), resolution[1])
# x3 = np.ones_like(x1) * cut_plane.df.x3[0]
# Mesh the data
x1_mesh, x2_mesh = np.meshgrid(x1_lin, x2_lin)
x3_mesh = np.ones_like(x1_mesh) * cut_plane.df.x3[0]
# Interpolate u,v,w
u_mesh = griddata(
np.column_stack(
[nudge_outward(cut_plane.df.x1), nudge_outward(cut_plane.df.x2)]
),
cut_plane.df.u.values,
(x1_mesh.flatten(), x2_mesh.flatten()),
method="cubic",
)
v_mesh = griddata(
np.column_stack(
[nudge_outward(cut_plane.df.x1), nudge_outward(cut_plane.df.x2)]
),
cut_plane.df.v.values,
(x1_mesh.flatten(), x2_mesh.flatten()),
method="cubic",
)
w_mesh = griddata(
np.column_stack(
[nudge_outward(cut_plane.df.x1), nudge_outward(cut_plane.df.x2)]
),
cut_plane.df.w.values,
(x1_mesh.flatten(), x2_mesh.flatten()),
method="cubic",
)
# Assign back to df
cut_plane.df = pd.DataFrame(
{
"x1": x1_mesh.flatten(),
"x2": x2_mesh.flatten(),
"x3": x3_mesh.flatten(),
"u": u_mesh.flatten(),
"v": v_mesh.flatten(),
"w": w_mesh.flatten(),
}
)
# Save the resolution
cut_plane.resolution = resolution
# Return the cutplane
return cut_plane
def interpolate_onto_array(cut_plane_in, x1_array, x2_array):
"""
Interpolate a CutPlane object onto specified coordinate arrays.
Args:
cut_plane (:py:class:`~.tools.cut_plane.CutPlane`):
Plane of data.
x1_array (np.array): Specified x1-coordinate.
x2_array (np.array): Specified x2-coordinate.
Returns:
cut_plane (:py:class:`~.tools.cut_plane.CutPlane`):
Updated plane of data.
"""
cut_plane = copy.deepcopy(cut_plane_in)
# Linearize the data
x1_lin = x1_array
x2_lin = x2_array
# Save the new resolution
cut_plane.resolution = (len(np.unique(x1_lin)), len(np.unique(x2_lin)))
# Mesh the data
x1_mesh, x2_mesh = np.meshgrid(x1_lin, x2_lin)
x3_mesh = np.ones_like(x1_mesh) * cut_plane.df.x3.iloc[0]
# Interpolate u,v,w
u_mesh = griddata(
np.column_stack(
[nudge_outward(cut_plane.df.x1), nudge_outward(cut_plane.df.x2)]
),
cut_plane.df.u.values,
(x1_mesh.flatten(), x2_mesh.flatten()),
method="cubic",
)
v_mesh = griddata(
np.column_stack(
[nudge_outward(cut_plane.df.x1), nudge_outward(cut_plane.df.x2)]
),
cut_plane.df.v.values,
(x1_mesh.flatten(), x2_mesh.flatten()),
method="cubic",
)
w_mesh = griddata(
np.column_stack(
[nudge_outward(cut_plane.df.x1), nudge_outward(cut_plane.df.x2)]
),
cut_plane.df.w.values,
(x1_mesh.flatten(), x2_mesh.flatten()),
method="cubic",
)
# Assign back to df
cut_plane.df = pd.DataFrame(
{
"x1": x1_mesh.flatten(),
"x2": x2_mesh.flatten(),
"x3": x3_mesh.flatten(),
"u": u_mesh.flatten(),
"v": v_mesh.flatten(),
"w": w_mesh.flatten(),
}
)
# Return the cutplane
return cut_plane
def rescale_axis(cut_plane, x1_factor=1.0, x2_factor=1.0):
"""
Stretch or compress CutPlane coordinates.
Args:
cut_plane (:py:class:`~.tools.cut_plane.CutPlane`):
Plane of data.
x1_factor (float): Scaling factor for x1-coordinate.
x2_factor (float): Scaling factor for x2-coordinate.
Returns:
cut_plane (:py:class:`~.tools.cut_plane.CutPlane`):
Updated plane of data.
"""
# Store the un-interpolated input arrays at this slice
cut_plane.df.x1 = cut_plane.df.x1 / x1_factor
cut_plane.df.x2 = cut_plane.df.x2 / x2_factor
return cut_plane
def project_onto(cut_plane_a, cut_plane_b):
"""
Project cut_plane_a onto the x1, x2 of cut_plane_b
Args:
cut_plane_a (:py:class:`~.tools.cut_plane.CutPlane`):
Plane of data to project from.
cut_plane_b (:py:class:`~.tools.cut_plane.CutPlane`):
Plane of data to project onto.
Returns:
cut_plane (:py:class:`~.tools.cut_plane.CutPlane`):
Cut_plane_a projected onto cut_plane_b's axis.
"""
return interpolate_onto_array(
cut_plane_a, cut_plane_b.df.x1.unique(), cut_plane_b.df.x2.unique()
)
def subtract(cut_plane_a_in, cut_plane_b_in):
"""
Subtract u,v,w terms of cut_plane_b from cut_plane_a
Args:
cut_plane_a_in (:py:class:`~.tools.cut_plane.CutPlane`):
Plane of data to subtract from.
cut_plane_b_in (:py:class:`~.tools.cut_plane.CutPlane`):
Plane of data to subtract b.
Returns:
cut_plane (:py:class:`~.tools.cut_plane.CutPlane`):
Difference of cut_plane_a_in minus cut_plane_b_in.
"""
# First make copies of original
cut_plane_a = copy.deepcopy(cut_plane_a_in)
cut_plane_b = copy.deepcopy(cut_plane_b_in)
# Sort x1 and x2 and make the index
cut_plane_a.df = cut_plane_a.df.set_index(["x1", "x2"])
cut_plane_b.df = cut_plane_b.df.set_index(["x1", "x2"])
# Do subtraction
cut_plane_a.df = cut_plane_a.df.subtract(
cut_plane_b.df
).reset_index() # .sort_values(['x2','x1'])# .dropna()
# cut_plane_a.df = cut_plane_a.df.sort_values(['x1','x2'])
return cut_plane_a
# def calculate_wind_speed(cross_plane, x1_loc, x2_loc, R):
# """
# Calculate effective wind speed within specified range of a point.
# Args:
# cross_plane (:py:class:`floris.tools.cut_plane.CrossPlane`):
# plane of data.
# x1_loc (float): x1-coordinate of point of interst.
# x2_loc (float): x2-coordinate of point of interst.
# R (float): radius from point of interst to consider
# Returns:
# (float): effective wind speed
# """
# # Make a distance column
# distance = np.sqrt((cross_plane.x1_flat - x1_loc)**2 +
# (cross_plane.x2_flat - x2_loc)**2)
# # Return the mean wind speed
# return np.cbrt(np.mean(cross_plane.u_cubed[distance < R]))
# def wind_speed_profile(cross_plane,
# R,
# x2_loc,
# resolution=100,
# x1_locs=None):
# if x1_locs is None:
# x1_locs = np.linspace(
# min(cross_plane.x1_flat), max(cross_plane.x1_flat), resolution)
# v_array = np.array([calculate_wind_speed(cross_plane,x1_loc, x2_loc, R) for x1_loc in x1_locs])
# return x1_locs, v_array
# def calculate_power(cross_plane,
# x1_loc,
# x2_loc,
# R,
# ws_array,
# cp_array,
# air_density=1.225):
# """
# Calculate maximum power available in a given cross plane.
# Args:
# cross_plane (:py:class:`floris.tools.cut_plane.CrossPlane`):
# plane of data.
# x1_loc (float): x1-coordinate of point of interst.
# x2_loc (float): x2-coordinate of point of interst.
# R (float): Radius of wind turbine rotor.
# ws_array (np.array): reference wind speed for cp curve.
# cp_array (np.array): cp curve at reference wind speeds.
# air_density (float, optional): air density. Defaults to 1.225.
# Returns:
# float: Power!
# """
# # Compute the ws
# ws = calculate_wind_speed(cross_plane, x1_loc, x2_loc, R)
# # Compute the cp
# cp_value = np.interp(ws, ws_array, cp_array)
# #Return the power
# return 0.5 * air_density * (np.pi * R**2) * cp_value * ws**3
# # def get_power_profile(self, ws_array, cp_array, rotor_radius, air_density=1.225, resolution=100, x1_locs=None):
# # # Get the wind speed profile
# # x1_locs, v_array = self.get_profile(resolution=resolution, x1_locs=x1_locs)
# # # Get Cp
# # cp_array = np.interp(v_array,ws_array,cp_array)
# # # Return power array
# # return x1_locs, 0.5 * air_density * (np.pi * rotor_radius**2) * cp_array * v_array**3
# # Define horizontal subclass
# class HorPlane(_CutPlane):
# """
# Subclass of _CutPlane. Shortcut to extracting a horizontal plane.
# """
# def __init__(self, df):
# """
# Initialize horizontal CutPlane
# Args:
# flow_data (np.array): 3D vector field of velocity data
# z_value (float): vertical position through which to slice
# """
# # Set up call super
# super().__init__(df)
# # Define cross plane subclass
# class CrossPlane(_CutPlane):
# """
# Subclass of _CutPlane. Shortcut to extracting a cross-stream plane.
# """
# def __init__(self, df):
# """
# Initialize cross-stream CutPlane
# Args:
# flow_data (np.array): 3D vector field of velocity data
# x_value (float): streamwise position through which to slice
# """
# # Set up call super
# super().__init__(df)
# # Define cross plane subclass
# class VertPlane(_CutPlane):
# """
# Subclass of _CutPlane. Shortcut to extracting a streamwise-vertical plane.
# """
# def __init__(self, df):
# """
# Initialize streamwise-vertical CutPlane
# Args:
# flow_data (np.array): 3D vector field of velocity data
# y_value (float): spanwise position through which to slice
# """
# # Set up call super
# super().__init__(df)
|
'''
Interpreters package contains classes of which instances are created for every
node in the document hierarchy.
@author: Teodor G Nistor
@copyright: 2018 Teodor G Nistor
@license: MIT License
'''
# Import interpreters from subpackages using specific names
from beamr.interpreters.textual import *
from beamr.interpreters.config import Config
from beamr.interpreters.hierarchical import *
|
import torch.nn as nn
import torch
import math
from torch.nn import init
import torch.nn.functional as F
# 2D Conv
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes,
kernel_size=1, stride=stride, padding=0,
bias=False)
def conv2x2(in_planes, out_planes, stride=2):
return nn.Conv2d(in_planes, out_planes,
kernel_size=2, stride=stride, padding=0,
bias=False)
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes,
kernel_size=3, stride=stride, padding=1,
bias=False)
def conv4x4(in_planes, out_planes, stride=2):
return nn.Conv2d(in_planes, out_planes,
kernel_size=4, stride=stride, padding=1,
bias=False)
# 3D Conv
def conv1x1x1(in_planes, out_planes, stride=1):
return nn.Conv3d(in_planes, out_planes,
kernel_size=1, stride=stride, padding=0,
bias=False)
def conv3x3x3(in_planes, out_planes, stride=1):
return nn.Conv3d(in_planes, out_planes,
kernel_size=3, stride=stride, padding=1,
bias=False)
def conv4x4x4(in_planes, out_planes, stride=2):
return nn.Conv3d(in_planes, out_planes,
kernel_size=4, stride=stride, padding=1,
bias=False)
# 2D Deconv
def deconv1x1(in_planes, out_planes, stride):
return nn.ConvTranspose2d(in_planes, out_planes,
kernel_size=1, stride=stride, padding=0, output_padding=0,
bias=False)
def deconv2x2(in_planes, out_planes, stride):
return nn.ConvTranspose2d(in_planes, out_planes,
kernel_size=2, stride=stride, padding=0, output_padding=0,
bias=False)
def deconv3x3(in_planes, out_planes, stride):
return nn.ConvTranspose2d(in_planes, out_planes,
kernel_size=3, stride=stride, padding=1, output_padding=0,
bias=False)
def deconv4x4(in_planes, out_planes, stride):
return nn.ConvTranspose2d(in_planes, out_planes,
kernel_size=4, stride=stride, padding=1, output_padding=0,
bias=False)
# 3D Deconv
def deconv1x1x1(in_planes, out_planes, stride):
return nn.ConvTranspose3d(in_planes, out_planes,
kernel_size=1, stride=stride, padding=0, output_padding=0,
bias=False)
def deconv3x3x3(in_planes, out_planes, stride):
return nn.ConvTranspose3d(in_planes, out_planes,
kernel_size=3, stride=stride, padding=1, output_padding=0,
bias=False)
def deconv4x4x4(in_planes, out_planes, stride):
return nn.ConvTranspose3d(in_planes, out_planes,
kernel_size=4, stride=stride, padding=1, output_padding=0,
bias=False)
def _make_layers(in_channels, output_channels, type, batch_norm=False, activation=None):
layers = []
if type == 'conv1_s1':
layers.append(conv1x1(in_channels, output_channels, stride=1))
elif type == 'conv2_s2':
layers.append(conv2x2(in_channels, output_channels, stride=2))
elif type == 'conv3_s1':
layers.append(conv3x3(in_channels, output_channels, stride=1))
elif type == 'conv4_s2':
layers.append(conv4x4(in_channels, output_channels, stride=2))
elif type == 'deconv1_s1':
layers.append(deconv1x1(in_channels, output_channels, stride=1))
elif type == 'deconv2_s2':
layers.append(deconv2x2(in_channels, output_channels, stride=2))
elif type == 'deconv3_s1':
layers.append(deconv3x3(in_channels, output_channels, stride=1))
elif type == 'deconv4_s2':
layers.append(deconv4x4(in_channels, output_channels, stride=2))
elif type == 'conv1x1_s1':
layers.append(conv1x1x1(in_channels, output_channels, stride=1))
elif type == 'deconv1x1_s1':
layers.append(deconv1x1x1(in_channels, output_channels, stride=1))
elif type == 'deconv3x3_s1':
layers.append(deconv3x3x3(in_channels, output_channels, stride=1))
elif type == 'deconv4x4_s2':
layers.append(deconv4x4x4(in_channels, output_channels, stride=2))
else:
raise NotImplementedError('layer type [{}] is not implemented'.format(type))
if batch_norm == '2d':
layers.append(nn.BatchNorm2d(output_channels))
elif batch_norm == '3d':
layers.append(nn.BatchNorm3d(output_channels))
if activation == 'relu':
layers.append(nn.ReLU(inplace=True))
elif activation == 'sigm':
layers.append(nn.Sigmoid())
elif activation == 'leakyrelu':
layers.append(nn.LeakyReLU(0.2, True))
else:
if activation is not None:
raise NotImplementedError('activation function [{}] is not implemented'.format(activation))
return nn.Sequential(*layers)
def _init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=1.0)
elif init_stype == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
print('Initializing Weights: {}...'.format(classname))
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
elif classname.find('Sequential') == -1 and classname.find('Conv5_Deconv5_Local') == -1:
raise NotImplementedError('initialization of [{}] is not implemented'.format(classname))
print('initialize network with {}'.format(init_type))
net.apply(init_func)
def _initialize_weights(net):
for m in net.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Conv3d) or isinstance(m, nn.ConvTranspose3d):
n = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class ReconNet(nn.Module):
def __init__(self, in_planes=1, out_planes=1, gain=0.02, init_type='standard'):
super(ReconNet, self).__init__()
######### representation network - convolution layers
self.conv_layer1 = _make_layers(in_planes, 256, 'conv4_s2', False)
self.conv_layer2 = _make_layers(256, 256, 'conv3_s1', '2d')
self.relu2 = nn.ReLU(inplace=True)
self.conv_layer3 = _make_layers(256, 512, 'conv4_s2', '2d', 'relu')
self.conv_layer4 = _make_layers(512, 512, 'conv3_s1', '2d')
self.relu4 = nn.ReLU(inplace=True)
self.conv_layer5 = _make_layers(512, 1024, 'conv4_s2', '2d', 'relu')
self.conv_layer6 = _make_layers(1024, 1024, 'conv3_s1', '2d')
self.relu6 = nn.ReLU(inplace=True)
self.conv_layer7 = _make_layers(1024, 2048, 'conv4_s2', '2d', 'relu')
self.conv_layer8 = _make_layers(2048, 2048, 'conv3_s1', '2d')
self.relu8 = nn.ReLU(inplace=True)
self.conv_layer9 = _make_layers(2048, 4096, 'conv4_s2', '2d', 'relu')
self.conv_layer10 = _make_layers(4096, 4096, 'conv3_s1', '2d')
self.relu10 = nn.ReLU(inplace=True)
######### transform module
self.trans_layer1 = _make_layers(512, 512, 'conv1_s1', False, 'relu')
self.trans_layer2 = _make_layers(256, 256, 'deconv1x1_s1', False, 'relu')
######### generation network - deconvolution layers
self.deconv_layer10 = _make_layers(2048, 1024, 'deconv4x4_s2', '3d', 'relu')
self.deconv_layer8 = _make_layers(1024, 512, 'deconv4x4_s2', '3d', 'relu')
self.deconv_layer7 = _make_layers(512, 512, 'deconv3x3_s1', '3d', 'relu')
self.deconv_layer6 = _make_layers(512, 256, 'deconv4x4_s2', '3d', 'relu')
self.deconv_layer5 = _make_layers(256, 256, 'deconv3x3_s1', '3d', 'relu')
self.deconv_layer4 = _make_layers(256, 128, 'deconv4x4_s2', '3d', 'relu')
self.deconv_layer3 = _make_layers(128, 128, 'deconv3x3_s1', '3d', 'relu')
self.deconv_layer2 = _make_layers(128, 64, 'deconv4x4_s2', '3d', 'relu')
self.deconv_layer1 = _make_layers(64, 64, 'deconv3x3_s1', '3d', 'relu')
self.deconv_layer0 = _make_layers(64, 1, 'conv1x1_s1', False, 'relu')
self.output_layer = _make_layers(512, out_planes, 'conv1_s1', False)
if init_type == 'standard':
_initialize_weights(self)
else:
_init_weights(self, gain=gain, init_type=init_type)
def forward(self, x):
### representation network
conv1 = self.conv_layer1(x)
conv2 = self.conv_layer2(conv1)
relu2 = self.relu2(conv1 + conv2)
conv3 = self.conv_layer3(relu2)
conv4 = self.conv_layer4(conv3)
relu4 = self.relu4(conv3 + conv4)
conv5 = self.conv_layer5(relu4)
conv6 = self.conv_layer6(conv5)
relu6 = self.relu6(conv5 + conv6)
conv7 = self.conv_layer7(relu6)
conv8 = self.conv_layer8(conv7)
relu8 = self.relu8(conv7 + conv8)
conv9 = self.conv_layer9(relu8)
conv10 = self.conv_layer10(conv9)
relu10 = self.relu10(conv9 + conv10)
### transform module
features = self.trans_layer1(relu4)
trans_features = features.view(2,256,-1,4,8)
trans_features = self.trans_layer2(trans_features)
### generation network
# deconv10 = self.deconv_layer10(trans_features)
# deconv8 = self.deconv_layer8(deconv10)
#deconv7 = self.deconv_layer7(deconv8)
#deconv6 = self.deconv_layer6(deconv7)
#deconv5 = self.deconv_layer5(deconv6)
deconv4 = self.deconv_layer4(trans_features)
deconv3 = self.deconv_layer3(deconv4)
deconv2 = self.deconv_layer2(deconv3)
deconv1 = self.deconv_layer1(deconv2)
### output
out = self.deconv_layer0(deconv1)
out = torch.squeeze(out, 1)
out = self.output_layer(out)
out = F.interpolate(out, scale_factor=8, mode='bilinear', align_corners=True)
return out
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from . import user_reco_pb2 as user__reco__pb2
class UserRecommendStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.user_recommend = channel.unary_unary(
'/UserRecommend/user_recommend',
request_serializer=user__reco__pb2.User.SerializeToString,
response_deserializer=user__reco__pb2.Track.FromString,
)
self.article_recommend = channel.unary_unary(
'/UserRecommend/article_recommend',
request_serializer=user__reco__pb2.Article.SerializeToString,
response_deserializer=user__reco__pb2.Similar.FromString,
)
class UserRecommendServicer(object):
# missing associated documentation comment in .proto file
pass
def user_recommend(self, request, context):
"""feed recommend
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def article_recommend(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_UserRecommendServicer_to_server(servicer, server):
rpc_method_handlers = {
'user_recommend': grpc.unary_unary_rpc_method_handler(
servicer.user_recommend,
request_deserializer=user__reco__pb2.User.FromString,
response_serializer=user__reco__pb2.Track.SerializeToString,
),
'article_recommend': grpc.unary_unary_rpc_method_handler(
servicer.article_recommend,
request_deserializer=user__reco__pb2.Article.FromString,
response_serializer=user__reco__pb2.Similar.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'UserRecommend', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
# settings.py
# Settings
# Author: Tim Schlottmann
from typing import Any
from .dicts import JsonDict
class Settings(JsonDict):
""" Settings of the app """
def __getitem__(self, setting_name) -> Any:
return super().__getitem__(setting_name)
def __setitem__(self, setting_name, setting):
super().__setitem__(setting_name, setting)
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 29 08:54:59 2020
@author: Lucas
"""
print("Programa que calcula fatorial")
n = int(input("Digite o numero que quer saber o fatorial: "))
c = n
f=1
while c > 0:
print(f"{c}", end="")
print ("x" if c>1 else '=', end="")
f *= c
c -= 1
print (f"{f}") |
import collections
import itertools
import functools
import math
import re
import bisect
import random
rint = lambda: int(input())
rstr = lambda: input()
rints = lambda: list(map(int, input().split()))
rstrs = lambda: input().split()
wmat = lambda n, mat, sep: '{}\n{}'.format(n, '\n'.join(sep.join(map(str, row)) for row in mat))
warr = lambda n, arr, sep: '{}\n{}'.format(n, sep.join(map(str, arr)))
wl = lambda sep, *arr: sep.join(map(str, arr))
def main():
T, S = rstr(), rstr()
l = len(S)
S *= 2
yes = any(T[i:i+l] in S for i in range(len(T) - l + 1))
print('yes' if yes else 'no')
if __name__ == '__main__':
main()
|
from datetime import datetime
from tkinter.constants import *
from PIL import Image, ImageTk
from pyzbar.pyzbar import decode
from playsound import playsound
import sqlite3
import tkinter as tk
import argparse
import cv2
import os
import threading, time
class Application:
def __init__(self, output_path = "./"):
""" Initialize application which uses OpenCV + Tkinter. It displays
a video stream in a Tkinter window and stores current snapshot on disk """
self.vs = cv2.VideoCapture(2)
self.output_path = output_path
self.current_image = None
self.root = tk.Tk()
self.root.title("QR Guests")
self.root.wm_iconbitmap("resources\qr_icon.ico")
self.root.protocol('WM_DELETE_WINDOW', self.destructor)
self.panel = tk.Label(self.root)
self.panel.pack(expand=1, padx=10, pady=30)
self.label = tk.Label(self.root, text="Welcome!")
self.label.config(font=("helvetica", 36, "bold"))
self.label.pack(pady=80)
btn = tk.Button(self.root, text="Snapshot!", command=self.take_snapshot)
btn.pack(side=BOTTOM, fill=X)
self.database = Database()
self.attended = []
self.hashes = Database().hashes()
self.filename = dict(name='', date='')
self.video_loop()
def video_loop(self):
""" Get frame from the video stream and show it in Tkinter """
ok, frame = self.vs.read()
if ok:
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
self.current_image = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=self.current_image)
self.panel.imgtk = imgtk # anchor imgtk so it does not be deleted by garbage-collector
self.panel.config(image=imgtk)
for barcode in decode(frame):
decoded = barcode.data.decode('utf-8')
if decoded in self.database.hashes():
if not decoded in self.attended:
self.attended.append(decoded)
self.database.update_presence(decoded)
self.database.update_time(decoded)
name = self.database.hash_name(decoded)
self.filename['name'] = name
self.filename['date'] = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
self.label.config(text=f"Selamat datang, {name}")
threading.Thread(target=self.take_snapshot).start()
self.root.after(30, self.video_loop)
def take_snapshot(self):
""" Take snapshot and save it to the file """
playsound(r"C:/Users/Omen/Downloads/mantap.wav", block=True)
ts = datetime.now()
filename = f"{self.filename['date']} {self.filename['name']}.png"
path = os.path.join(self.output_path, filename)
self.current_image.save(path, "PNG")
print("[INFO] saved {}".format(filename))
def destructor(self):
""" Destroy the root object and release all resources """
print("[INFO] closing...")
self.root.destroy()
self.vs.release() # release web camera
cv2.destroyAllWindows()
class Database:
def __init__(self):
self.connection = sqlite3.connect("database.db")
self.cursor = self.connection.cursor()
self.table = "guest_list"
def update_presence(self, hash):
self.cursor.execute(f"UPDATE {self.table} SET presence=1 WHERE hash='{hash}';")
self.connection.commit()
def update_time(self, hash):
self.cursor.execute(f"UPDATE {self.table} SET time='{datetime.now().strftime('%I:%M:%S %p')}' WHERE hash='{hash}';")
self.connection.commit()
def hashes(self):
hashes = []
self.cursor.execute(f"SELECT hash FROM {self.table};")
for hash in self.cursor.fetchall():
hashes.append(hash[0])
return hashes
def hash_name(self, hash):
self.cursor.execute(f"SELECT fullname FROM {self.table} WHERE hash='{hash}';")
return self.cursor.fetchone()[0]
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-o", "--output", default="./resources/SnapShots",
help="path to output directory to store snapshots (default: current folder")
args = vars(ap.parse_args())
# start the app
print("[INFO] starting...")
pba = Application(args["output"])
pba.root.mainloop() |
"""Unit test helpers."""
# pylint: disable=too-few-public-methods
from functools import wraps
import gc
import os.path
from stenographer import CassetteAgent
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, maybeDeferred
from twisted.internet.task import Clock
from twisted.python.failure import Failure
from twisted.web.client import (Agent, ContentDecoderAgent,
RedirectAgent, GzipDecoder)
from twisted.web.test.test_agent import AbortableStringTransport
from twisted.words.protocols.irc import CHANNEL_PREFIXES
from ..connection import Connection
from ..hostmask import Hostmask
from ..message import Message
from ..message.buffering import DEFAULT_ENCODING, ReplyBuffer
from ..plugin import EventPlugin, UserVisibleError
from ..web.http import IdentifyingAgent
CASSETTE_LIBRARY = os.path.join(os.path.dirname(__file__),
'fixtures', 'cassettes')
#
# Helper objects
#
class DummyConnection(object):
"""A class that simulates the behavior of a live connection."""
def is_channel(self, venue):
return venue[0] in CHANNEL_PREFIXES
#
# Basic event plugins
#
class NoticingPlugin(EventPlugin):
"""An event plugin that caches incoming events."""
def __init__(self):
self.seen = []
def on_privmsg(self, msg):
self.seen.append(msg)
on_connected = on_disconnected = on_privmsg
on_command = on_notice = on_join = on_quit = on_privmsg
@property
def last_seen(self):
return self.seen[-1]
class OutgoingPlugin(NoticingPlugin):
"""An event plugin that caches incoming and outgoing events."""
def on_privmsg(self, msg):
super(OutgoingPlugin, self).on_privmsg(msg)
on_privmsg.outgoing = True
on_mode = on_kick = on_privmsg
on_command = on_notice = on_join = on_quit = on_privmsg
#
# Test case mixins
#
class ConnectionTestMixin(object):
"""A test case mixin that sets up a `Connection` object before each
test, and provides constants for mock users and channels."""
#: A sequence of `Hostmask` objects representing mock users.
other_users = (
Hostmask('alice', 'athena', 'ankara.test'),
Hostmask('bob', 'bellerophon', 'berlin.test'),
Hostmask('charlie', 'cronus', 'chongqing.test'))
#: A sequence of mock channel names, as strings.
channels = ('#foo', '#bar', '&baz')
#: Whether this test case's `Connection` should receive a sign-on
#: event during setup.
sign_on = True
def setUp(self):
super(ConnectionTestMixin, self).setUp()
self.transport = AbortableStringTransport()
self.connection = Connection()
self.connection.settings.set('command_prefixes', ['!'])
self.connection.reactor = Clock()
self.connection.makeConnection(self.transport)
if self.sign_on:
# The heartbeat is started here, not in signedOn().
self.connection.irc_RPL_WELCOME('irc.server.test', [])
def receive(self, line):
"""Simulate receiving a line from the IRC server."""
return self.connection._lineReceived(':{!s} {}'.format(
self.other_users[0], line))
def echo(self, line):
"""Simulate receiving an echoed action from the IRC server."""
return self.connection._lineReceived(':{}!user@host {}'.format(
self.connection.nickname, line))
def assertLoggedErrors(self, number):
"""Assert that *number* errors have been logged."""
# <http://stackoverflow.com/a/3252306>
gc.collect()
self.assertEqual(len(self.flushLoggedErrors()), number)
class CommandTestMixin(ConnectionTestMixin):
"""A subclass of `ConnectionTestMixin` that also sets up a command
plugin in addition to a connection and transport."""
#: The command plugin class to test.
command_class = None
#: Any additional help arguments to test in `test_help`.
help_arguments = tuple()
def setUp(self):
super(CommandTestMixin, self).setUp()
self.default_venue = self.connection.nickname
name = self.command_class.name
self.keyword = name.rsplit('/', 1)[-1].rsplit('.', 1)[-1].lower()
self.command = self.connection.settings.enable(name, [self.keyword])
self.reply_buffer = iter([])
self.failure = None
def command_message(self, content, **kwargs):
action = kwargs.pop('action', 'command')
kwargs.setdefault('actor', self.other_users[0])
kwargs.setdefault('venue', self.default_venue)
kwargs.setdefault('subaction', self.keyword)
return Message(self.connection, False, action,
content=content, **kwargs)
@inlineCallbacks
def send_command(self, content, **kwargs):
if isinstance(content, unicode):
content = content.encode(DEFAULT_ENCODING)
request = self.command_message(content, **kwargs)
try:
response = yield self.command.respond_to(request)
except UserVisibleError:
self.failure = Failure()
else:
if response is not None:
self.reply_buffer = ReplyBuffer(response, request)
def assert_reply(self, expected):
finished = maybeDeferred(next, self.reply_buffer, None)
finished.addCallback(self.assertEqual, expected)
return finished
def assert_no_replies(self):
finished = maybeDeferred(next, self.reply_buffer, None)
finished.addCallback(self.assertIsNone)
return finished
def assert_error(self, expected):
self.assertIsNotNone(self.failure)
self.assertIsNotNone(self.failure.check(UserVisibleError))
self.assertEqual(self.failure.getErrorMessage(), expected)
self.failure = None
@staticmethod
def use_cassette(cassette_name):
cassette_path = os.path.join(CASSETTE_LIBRARY, cassette_name + '.json')
cassette_agent = CassetteAgent(Agent(reactor), cassette_path)
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self.command.agent = IdentifyingAgent(ContentDecoderAgent(
RedirectAgent(cassette_agent), [('gzip', GzipDecoder)]))
finished = maybeDeferred(func, self, *args, **kwargs)
finished.addCallback(cassette_agent.save)
return finished
return wrapper
return decorator
def test_help(self):
"""Ensure that command help doesn't cause errors."""
for content in ('',) + self.help_arguments:
self.send_command(content, action='cmdhelp')
|
import requests
from bs4 import BeautifulSoup
YOUTUBE_TRENDING_URL = 'https://www.youtube.com/feed/trending'
# Doesnot execute Javascript and hence the data doesn't get loaded dynamically
response = requests.get(YOUTUBE_TRENDING_URL)
print('Status Code:', response.status_code)
#print('Output:', response.text[:1000])
with open('trending.html', 'w') as f:
f.write(response.text)
doc = BeautifulSoup(response.text, 'html.parser')
print('Page title:', doc.title)
print('Page title:', doc.title.text)
video_divs = doc.find_all('div', class_ = 'style-scope ytd-video-renderer')
# Find all the video divs
print(f'Found {len(video_divs)} videos')
'''
We need to create a fake browser or a headless browser and hence simulate an entire browser
A browser which doesn't have the UI ,i.e doesn't display the page, but still runs all the Javascript on the page. Then we need to pick info from the page that is loaded in the fake browser. This is where Selenium comes into picture. Selenium automates browser.
We will use it for extracting info from the trending page on youtube.
Selenium is a python API which uses the drivers to interact with the web browsers.
'''
'''
video = videos[0]
print('Title:', title)
print('URL:', url)
print('Thumbnail URL:', thumbnail_url)
print('Channel Name:', channel_name)
print('Description:', description)
print('Views:',views)
print('Uploaded Duration:',uploaded)
''' |
#!/usr/bin/env python3
import pandas as pd
import openpyxl
import re
df = pd.read_csv(snakemake.input.tsv,sep="\t",dtype=str)
c = pd.read_csv(snakemake.input.core_panel, header=None).iloc[:,0].to_list()
e = pd.read_csv(snakemake.input.extend_panel, header=None).iloc[:,0].to_list()
ped = pd.read_csv(snakemake.input.ped, sep='\t', header=None, names=['family_id','id','parental_id','maternal_id','sex','phenotype'])
#aggregate sample-level fields per family
sample_cols=['sample_id','genotype(sample,dad,mom)','depths(sample,dad,mom)', 'allele_balance(sample,dad,mom)']
sample_df=df.groupby(['#mode','chr:pos:ref:alt','family_id'])[sample_cols].agg('|'.join).reset_index()
#Extract the annotation fields
anno=df.drop(sample_cols,axis=1).drop_duplicates(subset=['#mode','chr:pos:ref:alt', 'family_id'],keep='last').sort_values(['#mode', 'chr:pos:ref:alt','family_id'])
#merge back aggregate and anno fields
out=pd.merge(sample_df,anno,on=['#mode','chr:pos:ref:alt','family_id'], how='inner')
def get_max_str(lst):
return max(lst, key=len)
tmp=out.join(out.loc[:,get_max_str(out.columns)].str.split(';', expand=True).add_prefix('ann'))
csq_column=get_max_str(out.columns).split(';')
other_column=[ c for c in tmp.columns if not(c.startswith('ann'))]
tmp.columns=other_column+csq_column
tmp=tmp.loc[:,~tmp.columns.duplicated()]
def make_OMIMlink(value):
url = "https://www.omim.org/entry/{}"
return '=HYPERLINK("%s", "%s")' % (url.format(value), value)
def make_Clinvarlink(value):
url = "https://www.ncbi.nlm.nih.gov/clinvar/variation/{}"
return '=HYPERLINK("%s", "%s")' % (url.format(value), value)
##split var_synmoous (get only clinvar accession and OMIM)
tmp['OMIM_link'] = tmp['OMIM_link'].apply(lambda x: make_OMIMlink(x))
tmp['ClinVar_link']=tmp["VAR_SYNONYMS"].str.extract(r'(VCV\d*)')
tmp['ClinVar_link'] = tmp['ClinVar_link'].apply(lambda x: make_Clinvarlink(x))
tmp=tmp.astype(str)
tmp[['OMIM_link','ClinVar_link']]=tmp[['OMIM_link','ClinVar_link']].applymap(lambda x: re.sub('.*nan.*','',x ))
#columns to drop
allvars= tmp.drop(['VAR_SYNONYMS', get_max_str(out.columns)],axis=1)
##need to remove amp-pd once it's fixed
allvars.rename(columns = {'Gene':'Ensembl_geneID', '#mode':'mode',
'NEAREST':'NEAREST_gene'}, inplace = True)
allvars[['SpliceAI_pred_DS_AG','SpliceAI_pred_DS_AL', 'SpliceAI_pred_DS_DG', 'SpliceAI_pred_DS_DL','CADD_PHRED','gnomad_AF','gnomad_nhomalt']] = allvars[['SpliceAI_pred_DS_AG','SpliceAI_pred_DS_AL', 'SpliceAI_pred_DS_DG', 'SpliceAI_pred_DS_DL','CADD_PHRED','gnomad_AF','gnomad_nhomalt']].apply(pd.to_numeric)
## rearrange columns
column_names = ['mode','chr:pos:ref:alt','family_id','gene','gene_fullname','Ensembl_geneID','BIOTYPE', 'transcript','STRAND','CANONICAL','MANE_SELECT','MANE_PLUS_CLINICAL','EXON','Codons','Amino_acids','HGVSc','HGVSp','highest_impact','ClinVar_CLNSIG','ClinVar_CLNDN','Existing_variation','ClinVar_link','OMIM_link','CNCR','CADD_PHRED','gnomAD_pLI', 'gnomAD_oe_lof_CI90','gnomAD_oe_mis_CI90', 'gnomAD_oe_syn_CI90', 'clinvar_gene_description','MOI','gnomad_AF', 'gnomad_popmax_af', 'gnomad_nhomalt', 'gnomad_AC','TOPMed8_AF','impact','NEAREST_gene', 'MAX_AF_POPS','LoF','LoF_filter','LoF_flags','LoF_info','SpliceAI_pred_DS_AG', 'SpliceAI_pred_DS_AL', 'SpliceAI_pred_DS_DG', 'SpliceAI_pred_DS_DL', 'SpliceAI_pred_SYMBOL','SpliceRegion', 'existing_InFrame_oORFs', 'existing_OutOfFrame_oORFs', 'existing_uORFs', 'five_prime_UTR_variant_annotation','five_prime_UTR_variant_consequence', 'MetaRNN_score', 'Ensembl_transcriptid', 'sample_id', 'genotype(sample,dad,mom)', 'depths(sample,dad,mom)', 'allele_balance(sample,dad,mom)','IMPACT']
allvars = allvars.reindex(columns=column_names)
allvars = allvars.loc[allvars['BIOTYPE']=='protein_coding'].sort_values(['mode','chr:pos:ref:alt','highest_impact'], ascending = (True, True,True))
fams=list(ped['family_id'].unique())
# write out the report from each family
for family, dat in allvars.groupby('family_id'):
with pd.ExcelWriter(f'panel/{family}.xlsx') as writer:
dat.loc[allvars.gene.isin(c)].to_excel(writer, sheet_name='core',index = False, header=True)
dat.loc[allvars.gene.isin(e)].to_excel(writer, sheet_name='extend',index = False, header=True)
# write out empty report if a family doesn't have variants
fam_list=list(allvars.family_id.unique())
empty_fam=[ x for x in fams if x not in fam_list]
for family in empty_fam:
print(family)
c=pd.DataFrame()
c.to_excel(f'panel/{family}.xlsx', index = False)
|
from django.conf.urls import url
from cobra.core.application import Application
from cobra.core.loading import get_class
class SummaryApplication(Application):
name = 'summary'
def get_urls(self):
urls = [
]
return self.post_process_urls(urls)
application = SummaryApplication()
|
#!/usr/bin/env python
import pyami.fft.calc_fftw3
import fftw3
import numpy
import time
import os
import sys
timing = {'create':[], 'plan':[], 'init':[], 'run':[]}
def create(shape):
global timing
t0 = time.time()
a = numpy.empty(shape, numpy.float)
timing['create'].append(time.time() - t0)
return a
def make_plan(image_array, rigor):
global timing
t0 = time.time()
input_array = numpy.empty(image_array.shape, numpy.float)
fftshape = image_array.shape[0], image_array.shape[1]/2+1
fft_array = numpy.empty(fftshape, dtype=complex)
plan_kwargs = dict(pyami.fft.calc_fftw3.global_plan_kwargs)
plan_kwargs['flags'] = [rigor]
p = fftw3.Plan(input_array, fft_array, direction='forward', **plan_kwargs)
p.input_array = input_array
p.fft_array = fft_array
timing['plan'].append(time.time() - t0)
return p
def init(image_array, plan):
global timing
t0 = time.time()
plan.input_array[:] = image_array
timing['init'].append(time.time() - t0)
def run(plan):
global timing
t0 = time.time()
plan()
timing['run'].append(time.time() - t0)
def run_timing():
try:
n = int(sys.argv[1])
shape = int(sys.argv[2]), int(sys.argv[3])
try:
rigor = sys.argv[4]
except:
rigor = 'measure'
except:
print '''
usage: %s N shape0 shape1
N - number of iterations to test
shape0,shape1 - the shape of the array to test
''' % (sys.argv[0],)
sys.exit()
pyami.fft.calc_fftw3.load_wisdom()
for i in range(n):
print i
a = create(shape)
plan = make_plan(a, rigor)
init(a, plan)
run(plan)
pyami.fft.calc_fftw3.store_wisdom()
for key in ['create','plan','init','run']:
print key, timing[key]
def wisdom_test():
pyami.fft.calc_fftw3.load_wisdom()
pyami.fft.calc_fftw3.store_wisdom()
if __name__ == '__main__':
run_timing()
|
"""
Name: job_base.py
Author: Charles Zhang <694556046@qq.com>
Propose: The main process of a grading.
Coding: UTF-8
"""
import json
import types
from pygrading.testcase import TestCases
from pygrading.exception import FunctionsTypeError, FieldMissingError, DataTypeError
class JobBase(object):
""" A Job is a work flow, using run() function to handle each testcase. """
def __init__(
self,
prework: types.FunctionType = None,
run: types.FunctionType = None,
postwork: types.FunctionType = None,
testcases: TestCases = TestCases(),
config: dict = {"debug": False}
):
"""Init Job instance"""
self.set_prework(prework)
self.set_run(run)
self.set_postwork(postwork)
self.__testcases = testcases
self.__config = config
self.is_terminate = False
self.__verdict = "Unknown"
self.__score = 0
self.__rank = {"rank": "-1.0"}
self.__comment = ""
self.__detail = ""
self.__secret = ""
self.__HTML = "enable"
self.__json = {}
self.__summary = []
def set_prework(self, prework: types.FunctionType):
""" Set prework function for job """
if not prework:
self.prework = None
return
if not callable(prework):
raise FunctionsTypeError("The prework object passed in is not of function type!")
self.prework = prework
def set_run(self, run: types.FunctionType):
""" Set run function for job """
if not run:
self.run = None
return
if not callable(run):
raise FunctionsTypeError("The run object passed in is not of function type!")
self.run = run
def set_postwork(self, postwork: types.FunctionType):
""" Set postwork function for job """
if not postwork:
self.postwork = None
return
if not callable(postwork):
raise FunctionsTypeError("The postwork object passed in is not of function type!")
self.postwork = postwork
def verdict(self, src: str):
self.__verdict = src
self.update_json()
def score(self, src: int):
self.__score = str(src)
self.update_json()
def rank(self, src: dict):
self.__rank = src
self.update_json()
def comment(self, src: str):
self.__comment = src
self.update_json()
def detail(self, src: str):
self.__detail = src
self.update_json()
def secret(self, src: str):
self.__secret = src
self.update_json()
def HTML(self, src: str):
self.__HTML = src
self.update_json()
def set_summary(self, summary: list):
self.__summary = summary
def get_summary(self):
return self.__summary
def set_config(self, config):
self.__config = config
def get_config(self):
return self.__config
def set_testcases(self, testcases):
self.__testcases = testcases
def get_testcases(self):
return self.__testcases.get_testcases()
def get_total_score(self):
ret = 0
for i in self.__summary:
if type(i) == dict and "score" in i:
ret += int(i["score"])
return ret
def update_json(self):
self.__json["verdict"] = str(self.__verdict)
try:
self.__score = int(self.__score)
except ValueError:
raise DataTypeError("Score field must be integer!")
self.__json["score"] = str(self.__score)
if "rank" not in self.__rank:
raise FieldMissingError("No 'rank' detected in super 'rank' field!")
for k, v in self.__rank.items():
try:
num = float(v)
self.__rank[k] = str(num)
except ValueError:
raise DataTypeError("Fields in 'rank' must be single-float!")
self.__json["rank"] = self.__rank
self.__json["HTML"] = str(self.__HTML)
if self.__comment:
self.__json["comment"] = str(self.__comment)
elif "comment" in self.__json:
del self.__json["comment"]
if self.__detail:
self.__json["detail"] = str(self.__detail)
elif "detail" in self.__json:
del self.__json["detail"]
if self.__secret:
self.__json["secret"] = str(self.__secret)
elif "secret" in self.__json:
del self.__json["secret"]
def get_json(self):
self.update_json()
return self.__json
def print(self, return_str=False):
""" Print result json to stdout or return a json string """
self.update_json()
str_json = json.dumps(self.__json)
if return_str is True:
return str_json
print(str_json)
|
import os
from typing import Callable, List
import pickle
import multiprocessing as mp
from functools import partial
from .._logger import progress_bar
__all__ = [
'load_pickle',
'save_pickle',
]
def load_pickle(path: str):
"""
Load pickle from path.
Args:
path (str): Path to the pickle file.
Raises:
IOError: Path does not exist.
Returns:
[any]: File saved in a pickle.
"""
if not os.path.exists(path):
raise IOError(f'{path} does not exist!')
with open(path, "rb") as f:
data = pickle.load(f)
return data
def save_pickle(data, path: str):
"""
Save data to pickle.
Args:
data (any): Data to be saved.
path (str): Filepath.
"""
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb") as f:
pickle.dump(data, f)
def remove_extension(path: str) -> str:
"""Return filename with the extension removed."""
if '.' in path:
return '.'.join(path.split('.')[:-1])
else:
return path
def remove_images(image_dir: str) -> None:
"""
Remove all images in the image folder
Args:
image_dir (str): Directory to be removed
"""
paths = [x.path for x in os.scandir(image_dir)]
if len(paths) > 0:
multiprocess_map(func=remove, lst=paths, total=len(paths),
desc='Removing images')
def remove(path: str) -> None:
if os.path.exists(path):
os.remove(path)
def flatten(l):
"""Flatten list of lists."""
if all(isinstance(x, list) for x in l):
return [item for sublist in l for item in sublist]
else:
return l
def format_seconds(n: int) -> str:
"""Format seconds into pretty string format."""
days = int(n // (24 * 3600))
n = n % (24 * 3600)
hours = int(n // 3600)
n %= 3600
minutes = int(n // 60)
n %= 60
seconds = int(n)
if days > 0:
strtime = f'{days}d{(hours)}h:{minutes}m:{seconds}s'
elif hours > 0:
strtime = f'{(hours)}h:{minutes}m:{seconds}s'
else:
strtime = f'{minutes}m:{seconds}s'
return strtime
def multiprocess_map(
func: Callable,
lst: list,
processes: int = None,
func_args: dict = {},
**kwargs
):
"""Map function to a iterable and process with multiple processes."""
results = []
if processes is None:
processes = os.cpu_count() - 1
if kwargs.get('total') is None:
try:
kwargs['total'] = len(lst)
except:
pass
func = partial(func, **func_args)
with mp.Pool(processes=processes) as p:
if kwargs.get('desc') is None or kwargs.get('desc') == "":
loop = p.imap(func, lst)
else:
loop = progress_bar(p.imap(func, lst), **kwargs)
for result in loop:
results.append(result)
return results
|
#
# 1679. Max Number of K-Sum Pairs
#
# Q: https://leetcode.com/problems/max-number-of-k-sum-pairs/
# A: https://leetcode.com/problems/max-number-of-k-sum-pairs/discuss/962118/Kt-Js-Py3-Cpp-Map
#
from typing import List
class Solution:
def maxOperations(self, A: List[int], T: int, cnt = 0) -> int:
m = {}
for x in A:
y = T - x
if y in m and m[y]:
m[y] = -1 + (m[y] if y in m else 0); cnt += 1
else:
m[x] = 1 + (m[x] if x in m else 0)
return cnt
|
import random
from card import Card
class Deck:
def __init__(self):
self.cards = []
for number in range(1, 14):
for suit in '♣♦♥♠':
self.cards.append(
Card(
number=number,
suit=suit,
),
)
self.shuffle()
def shuffle(self):
random.shuffle(self.cards)
def deal_to_player(self, player):
for _ in range(6):
card = self.cards.pop(0)
player.add_to_hand(card)
|
from setuptools import setup
import sys
if sys.version_info < (3, 6):
print("Python 3.6 or higher required, please upgrade.")
sys.exit(1)
setup(
name="jaxfenics_adjoint",
version="1.0.0",
description="JAX-FEniCS interface using dolfin-adjoint",
url="https://github.com/IvanYashchuk/jax-fenics",
author="Ivan Yashchuk",
license="MIT",
packages=["jaxfenics_adjoint"],
install_requires=["jax", "fdm", "fecr"],
)
|
# -*- coding: utf-8 -*-
import pytest
from spectron import parse_date
@pytest.mark.parametrize("s, expected", [("", 0), ("a", 0), ("1", 1), ("a1b2", 2)])
def test_num_digits(s, expected):
assert parse_date.num_digits(s) == expected
@pytest.mark.parametrize(
"s, expected",
[
("test1digit", None),
("2020", None),
("12345678", None),
("2020-05-01", "DATE"),
("20200501", "DATE"),
("2020-05-01 00:00:00", "TIMESTAMP"),
("2020-05-01T12:34:56", "TIMESTAMP"),
("2020-05-01T12:34:56.123", "TIMESTAMP"),
("2020-05-01 12:34:56.123+0000", "TIMESTAMP"),
("2020-05-01T12:34:56.123+0000", "TIMESTAMP"),
("2020-05-01T12:34:56+0000", "TIMESTAMP"),
("2020-05-01T12:34:56.123+8000", "TIMESTAMP"),
],
)
def test_guess_type(s, expected):
assert parse_date.guess_type(s) == expected
|
"""
Django settings for kboard project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
from django.core.mail import send_mail
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fbk#a_$7&@566onvmd1xfxyszz)npb+d5gq#y9q(n0wg_k)v0x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
AUTH_USER_MODEL = 'accounts.Account'
# Application definition
INSTALLED_APPS = [
'accounts',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
'board',
'django_summernote',
'djangobower',
'pipeline',
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'kboard.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'core.context_processors.navbar'
],
},
},
]
WSGI_APPLICATION = 'kboard.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
if DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, '../database/db.sqlite3'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'kboard',
'USER': 'root',
'PASSWORD': 'root'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Bower settings
BOWER_INSTALLED_APPS = [
'jquery#3.1.1',
'bootstrap#3.3.7'
]
BOWER_COMPONENTS_ROOT = os.path.join(BASE_DIR, '../')
# Summernote settings
SUMMERNOTE_CONFIG = {}
# pipeline settings
PIPELINE = {
'PIPELINE_ENABLED': False,
'COMPILERS': {
'libsasscompiler.LibSassCompiler',
},
'JAVASCRIPT': {
'main': {
'source_filenames': [
'js/*.js'
],
'output_filename': 'js/vendor.js'
},
},
'STYLESHEETS': {
'main': {
'source_filenames': [
'style/*.scss'
],
'output_filename': 'style/main.css'
},
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, '../static')
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
os.path.join(BOWER_COMPONENTS_ROOT, 'bower_components'),
]
MEDIA_URL = '/file/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'file')
# Registration
# https://django-registration.readthedocs.io/en/2.1.2/index.html
ACCOUNT_ACTIVATION_DAYS = 7
# Email Activation
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = os.environ.get('KBOARD_EMAIL')
EMAIL_HOST_PASSWORD = os.environ.get('KBOARD_PASSWORD')
SERVER_EMAIL = os.environ.get('KBOARD_EMAIL')
DEFAULT_FROM_MAIL = 'KBoard_Developer'
# When Login success, go to main page.
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"
|
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
# 作者:longanyang
# 使用说明
# 1.终端安装python3和pandas库
# 2.将下载的csv文件命名为grafana_data_export.csv并放在此py同目录下 终端执行'python3 script.py'
# 3.已做去空处理
df = pd.read_csv('./grafana_data_export.csv', skiprows=[0]) # 忽略第一行
rows = df.shape[0]
while rows > 0:
rows -= 1
a = df.values[rows-1]
a = a[0].split(';') # 按分号分割字符串为数组
df.values[rows-1] = a[2] # 用分割字符数组的最后一个代替原df对应行的值
df.columns = ['A'] # 起列名A
a = df['A']
rows = df.shape[0]
while rows > 0: # 字符转数字 为空的转为0
rows -= 1
if df.values[rows-1] != "null":
df.values[rows-1] = float(df.values[rows-1])
else:
df.values[rows-1] = 0
df = df[df.A > 0]
arr = df['A']
arr = arr.values
arr = arr.tolist()
# 平均耗时
print('平均毫秒:%s' % round(np.mean(arr), 2))
# 中位数
print('中位数:%s' % round(np.median(arr), 2))
# 1s内占比
rows = df.shape[0] # 多少行
greater_df = df[df.A >= 1000] # 取出大于1的
greater_rows = greater_df.shape[0] # 行数
percents = 1 - round((greater_rows/rows), 4)
print('1s内占比:%s%%' % (str(percents*100)))
# 方差
print('方差:%s' % np.var(arr))
# 标准差
print('标准差:%s' % np.std(arr))
# 极差
print('极差:%s' % np.ptp(arr))
# 变异系数
print('变异系数:%s%%' % round((np.std(arr)/np.mean(arr)*100), 2))
# plt.plot(arr) |
# modified file from
# https://github.com/nioinnovation/python-xbee/blob/master/xbee/ieee.py
import struct
from xbee.base import XBeeBase
from xbee.python2to3 import *
class XBeeWiFi(XBeeBase):
"""
Provides an implementation of the XBee API for IEEE 802.15.4 modules
with recent firmware.
Commands may be sent to a device by instansiating this class with
a serial port object (see PySerial) and then calling the send
method with the proper information specified by the API. Data may
be read from a device syncronously by calling wait_read_frame. For
asynchronous reads, see the definition of XBeeBase.
"""
# Packets which can be sent to an XBee
# Format:
# {name of command:
# [{name:field name, len:field length, default: default value sent}
# ...
# ]
# ...
# }
api_commands = {"at":
[{'name': 'id', 'len': 1, 'default': b'\x08'},
{'name': 'frame_id', 'len': 1, 'default': b'\x00'},
{'name': 'command', 'len': 2, 'default': None},
{'name': 'parameter', 'len': None, 'default': None}],
"queued_at":
[{'name': 'id', 'len': 1, 'default': b'\x09'},
{'name': 'frame_id', 'len': 1, 'default': b'\x00'},
{'name': 'command', 'len': 2, 'default': None},
{'name': 'parameter', 'len': None, 'default': None}],
"remote_at":
[{'name': 'id', 'len': 1, 'default': b'\x07'},
{'name': 'frame_id', 'len': 1, 'default': b'\x00'},
# dest_addr_long is 8 bytes (64 bits), so use an unsigned long long
{'name': 'dest_addr_long', 'len': 8, 'default': struct.pack('>Q', 0)},
#{'name': 'dest_addr', 'len': 2, 'default': b'\xFF\xFE'},
{'name': 'options', 'len': 1, 'default': b'\x02'},
{'name': 'command', 'len': 2, 'default': None},
{'name': 'parameter', 'len': None, 'default': None}],
"tx_long_addr":
[{'name': 'id', 'len': 1, 'default': b'\x00'},
{'name': 'frame_id', 'len': 1, 'default': b'\x00'},
{'name': 'dest_addr', 'len': 8, 'default': None},
{'name': 'options', 'len': 1, 'default': b'\x00'},
{'name': 'data', 'len': None, 'default': None}],
"tx_64":
[{'name': 'id', 'len': 1, 'default': b'\x00'},
{'name': 'frame_id', 'len': 1, 'default': b'\x00'},
{'name': 'dest_addr', 'len': 8, 'default': None},
{'name': 'options', 'len': 1, 'default': b'\x00'},
{'name': 'data', 'len': None, 'default': None}],
"tx":
[{'name': 'id', 'len': 1, 'default': b'\x01'},
{'name': 'frame_id', 'len': 1, 'default': b'\x00'},
{'name': 'dest_addr', 'len': 2, 'default': None},
{'name': 'options', 'len': 1, 'default': b'\x00'},
{'name': 'data', 'len': None, 'default': None}]
}
# Packets which can be received from an XBee
# Format:
# {id byte received from XBee:
# {name: name of response
# structure:
# [ {'name': name of field, 'len':length of field}
# ...
# ]
# parsing: [(name of field to parse,
# function which accepts an xbee object and the
# partially-parsed dictionary of data received
# and returns bytes to replace the
# field to parse's data with
# )]},
# }
# ...
# }
#
api_responses = {b"\x80":
{'name': 'wifi_rx_64',
'structure':
[{'name': 'source_addr', 'len': 8},
{'name': 'rssi', 'len': 1},
{'name': 'options', 'len': 1},
{'name': 'rf_data', 'len': None}]},
b"\x81":
{'name': 'rx',
'structure':
[{'name': 'source_addr', 'len': 2},
{'name': 'rssi', 'len': 1},
{'name': 'options', 'len': 1},
{'name': 'rf_data', 'len': None}]},
b"\x82":
{'name': 'rx_io_data_long_addr',
'structure':
[{'name': 'source_addr_long', 'len': 8},
{'name': 'rssi', 'len': 1},
{'name': 'options', 'len': 1},
{'name': 'samples', 'len': None}],
'parsing': [('samples',
lambda xbee, original: xbee._parse_samples(original['samples'])
)]},
b"\xb0":
{'name': 'wifi_rx_ipv4',
'structure':
[{'name': 'src_ip', 'len': 4},
{'name': 'dest_port', 'len': 4},
{'name': 'src_port', 'len': 4},
{'name': 'protocol', 'len': 1},
{'name': 'rf_data', 'len': None}]},
b"\x83":
{'name': 'rx_io_data',
'structure':
[{'name': 'source_addr', 'len': 2},
{'name': 'rssi', 'len': 1},
{'name': 'options', 'len': 1},
{'name': 'samples', 'len': None}],
'parsing': [('samples',
lambda xbee, original: xbee._parse_samples(original['samples'])
)]},
b"\x8f":
{'name': 'wifi_rx_io_data',
'structure':
[{'name': 'source_addr_long', 'len': 8},
{'name': 'rssi', 'len': 1},
{'name': 'options', 'len': 1},
{'name': 'samples', 'len': None}],
'parsing': [('samples',
lambda xbee, original: xbee._wifi_parse_samples(original['samples'])
)]},
b"\x89":
{'name': 'tx_status',
'structure':
[{'name': 'frame_id', 'len': 1},
{'name': 'status', 'len': 1}]},
b"\x8a":
{'name': 'status',
'structure':
[{'name': 'status', 'len': 1}]},
b"\x88":
{'name': 'at_response',
'structure':
[{'name': 'frame_id', 'len': 1},
{'name': 'command', 'len': 2},
{'name': 'status', 'len': 1},
{'name': 'parameter', 'len': None}],
'parsing': [('parameter',
lambda xbee, original: xbee._parse_IS_at_response(original))]
},
b"\x87":
{'name': 'wifi_remote_at_response',
'structure':
[{'name': 'frame_id', 'len': 1},
{'name': 'source_addr_long', 'len': 8},
{'name': 'command', 'len': 2},
{'name': 'status', 'len': 1},
{'name': 'parameter', 'len': None}],
'parsing': [('parameter',
lambda xbee, original: xbee._parse_IS_at_response(original))]
},
b"\x97":
{'name': 'remote_at_response',
'structure':
[{'name': 'frame_id', 'len': 1},
{'name': 'source_addr_long', 'len': 8},
{'name': 'source_addr', 'len': 2},
{'name': 'command', 'len': 2},
{'name': 'status', 'len': 1},
{'name': 'parameter', 'len': None}],
'parsing': [('parameter',
lambda xbee, original: xbee._parse_IS_at_response(original))]
},
}
def _parse_IS_at_response(self, packet_info):
"""
If the given packet is a successful remote AT response for an IS
command, parse the parameter field as IO data.
"""
if packet_info['id'] in ('at_response', 'remote_at_response', 'wifi_remote_at_response') and packet_info['command'].lower() == b'is' and \
packet_info['status'] == b'\x00':
return self._parse_samples(packet_info['parameter'])
else:
return packet_info['parameter']
def _wifi_parse_samples_header(self, io_bytes):
"""
_parse_samples_header: binary data in XBee IO data format ->
(int, [int ...], [int ...], int, int)
_parse_samples_header will read the first three bytes of the
binary data given and will return the number of samples which
follow, a list of enabled digital inputs, a list of enabled
analog inputs, the dio_mask, and the size of the header in bytes
"""
header_size = 4
# number of samples (always 1?) is the first byte
sample_count = byteToInt(io_bytes[0])
# part of byte 1 and byte 2 are the DIO mask ( 16 bits )
dio_mask = (byteToInt(io_bytes[1]) << 8 | byteToInt(io_bytes[2]))
# upper 7 bits of byte 1 is the AIO mask
aio_mask = byteToInt(io_bytes[3])
# sorted lists of enabled channels; value is position of bit in mask
dio_chans = []
aio_chans = []
for i in range(0,9):
if dio_mask & (1 << i):
dio_chans.append(i)
dio_chans.sort()
for i in range(0,7):
if aio_mask & (1 << i):
aio_chans.append(i)
aio_chans.sort()
return (sample_count, dio_chans, aio_chans, dio_mask, header_size)
def _wifi_parse_samples(self, io_bytes):
"""
_parse_samples: binary data in XBee IO data format ->
[ {"dio-0":True,
"dio-1":False,
"adc-0":100"}, ...]
_parse_samples reads binary data from an XBee device in the IO
data format specified by the API. It will then return a
dictionary indicating the status of each enabled IO port.
"""
sample_count, dio_chans, aio_chans, dio_mask, header_size = \
self._wifi_parse_samples_header(io_bytes)
samples = []
# split the sample data into a list, so it can be pop()'d
sample_bytes = [byteToInt(c) for c in io_bytes[header_size:]]
# repeat for every sample provided
for sample_ind in range(0, sample_count):
tmp_samples = {}
if dio_chans:
# we have digital data
digital_data_set = (sample_bytes.pop(0) << 8 | sample_bytes.pop(0))
digital_values = dio_mask & digital_data_set
for i in dio_chans:
tmp_samples['dio-{0}'.format(i)] = True if (digital_values >> i) & 1 else False
for i in aio_chans:
analog_sample = (sample_bytes.pop(0) << 8 | sample_bytes.pop(0))
tmp_samples['adc-{0}'.format(i)] = analog_sample
samples.append(tmp_samples)
return samples
def __init__(self, *args, **kwargs):
# Call the super class constructor to save the serial port
super(XBeeWiFi, self).__init__(*args, **kwargs)
|
import unittest
import numpy as np
from scipy.integrate import cumtrapz
from skfda.datasets import make_gaussian, make_gaussian_process
from skfda.misc.covariances import Gaussian
from skfda.misc.operators import LinearDifferentialOperator
from skfda.misc.regularization import TikhonovRegularization
from skfda.ml.regression import HistoricalLinearRegression, LinearRegression
from skfda.representation.basis import BSpline, FDataBasis, Fourier, Monomial
from skfda.representation.grid import FDataGrid
class TestScalarLinearRegression(unittest.TestCase):
def test_regression_single_explanatory(self):
x_basis = Monomial(n_basis=7)
x_fd = FDataBasis(x_basis, np.identity(7))
beta_basis = Fourier(n_basis=5)
beta_fd = FDataBasis(beta_basis, [1, 1, 1, 1, 1])
y = [0.9999999999999993,
0.162381381441085,
0.08527083481359901,
0.08519946930844623,
0.09532291032042489,
0.10550022969639987,
0.11382675064746171]
scalar = LinearRegression(coef_basis=[beta_basis])
scalar.fit(x_fd, y)
np.testing.assert_allclose(scalar.coef_[0].coefficients,
beta_fd.coefficients)
np.testing.assert_allclose(scalar.intercept_,
0.0, atol=1e-6)
y_pred = scalar.predict(x_fd)
np.testing.assert_allclose(y_pred, y)
scalar = LinearRegression(coef_basis=[beta_basis],
fit_intercept=False)
scalar.fit(x_fd, y)
np.testing.assert_allclose(scalar.coef_[0].coefficients,
beta_fd.coefficients)
np.testing.assert_equal(scalar.intercept_,
0.0)
y_pred = scalar.predict(x_fd)
np.testing.assert_allclose(y_pred, y)
def test_regression_multiple_explanatory(self):
y = [1, 2, 3, 4, 5, 6, 7]
X = FDataBasis(Monomial(n_basis=7), np.identity(7))
beta1 = BSpline(domain_range=(0, 1), n_basis=5)
scalar = LinearRegression(coef_basis=[beta1])
scalar.fit(X, y)
np.testing.assert_allclose(scalar.intercept_.round(4),
np.array([32.65]), rtol=1e-3)
np.testing.assert_allclose(
scalar.coef_[0].coefficients.round(4),
np.array([[-28.6443,
80.3996,
-188.587,
236.5832,
-481.3449]]), rtol=1e-3)
y_pred = scalar.predict(X)
np.testing.assert_allclose(y_pred, y, atol=0.01)
def test_regression_mixed(self):
multivariate = np.array([[0, 0], [2, 7], [1, 7], [3, 9],
[4, 16], [2, 14], [3, 5]])
X = [multivariate,
FDataBasis(Monomial(n_basis=3), [[1, 0, 0], [0, 1, 0], [0, 0, 1],
[1, 0, 1], [1, 0, 0], [0, 1, 0],
[0, 0, 1]])]
# y = 2 + sum([3, 1] * array) + int(3 * function)
intercept = 2
coefs_multivariate = np.array([3, 1])
coefs_functions = FDataBasis(
Monomial(n_basis=3), [[3, 0, 0]])
y_integral = np.array([3, 3 / 2, 1, 4, 3, 3 / 2, 1])
y_sum = multivariate @ coefs_multivariate
y = 2 + y_sum + y_integral
scalar = LinearRegression()
scalar.fit(X, y)
np.testing.assert_allclose(scalar.intercept_,
intercept, atol=0.01)
np.testing.assert_allclose(
scalar.coef_[0],
coefs_multivariate, atol=0.01)
np.testing.assert_allclose(
scalar.coef_[1].coefficients,
coefs_functions.coefficients, atol=0.01)
y_pred = scalar.predict(X)
np.testing.assert_allclose(y_pred, y, atol=0.01)
def test_regression_mixed_regularization(self):
multivariate = np.array([[0, 0], [2, 7], [1, 7], [3, 9],
[4, 16], [2, 14], [3, 5]])
X = [multivariate,
FDataBasis(Monomial(n_basis=3), [[1, 0, 0], [0, 1, 0], [0, 0, 1],
[1, 0, 1], [1, 0, 0], [0, 1, 0],
[0, 0, 1]])]
# y = 2 + sum([3, 1] * array) + int(3 * function)
intercept = 2
coefs_multivariate = np.array([3, 1])
y_integral = np.array([3, 3 / 2, 1, 4, 3, 3 / 2, 1])
y_sum = multivariate @ coefs_multivariate
y = 2 + y_sum + y_integral
scalar = LinearRegression(
regularization=[TikhonovRegularization(lambda x: x),
TikhonovRegularization(
LinearDifferentialOperator(2))])
scalar.fit(X, y)
np.testing.assert_allclose(scalar.intercept_,
intercept, atol=0.01)
np.testing.assert_allclose(
scalar.coef_[0],
[2.536739, 1.072186], atol=0.01)
np.testing.assert_allclose(
scalar.coef_[1].coefficients,
[[2.125676, 2.450782, 5.808745e-4]], atol=0.01)
y_pred = scalar.predict(X)
np.testing.assert_allclose(
y_pred,
[5.349035, 16.456464, 13.361185, 23.930295,
32.650965, 23.961766, 16.29029],
atol=0.01)
def test_regression_regularization(self):
x_basis = Monomial(n_basis=7)
x_fd = FDataBasis(x_basis, np.identity(7))
beta_basis = Fourier(n_basis=5)
beta_fd = FDataBasis(beta_basis, [1.0403, 0, 0, 0, 0])
y = [1.0000684777229512,
0.1623672257830915,
0.08521053851548224,
0.08514200869281137,
0.09529138749665378,
0.10549625973303875,
0.11384314859153018]
y_pred_compare = [0.890341,
0.370162,
0.196773,
0.110079,
0.058063,
0.023385,
-0.001384]
scalar = LinearRegression(
coef_basis=[beta_basis],
regularization=TikhonovRegularization(
LinearDifferentialOperator(2)))
scalar.fit(x_fd, y)
np.testing.assert_allclose(scalar.coef_[0].coefficients,
beta_fd.coefficients, atol=1e-3)
np.testing.assert_allclose(scalar.intercept_,
-0.15, atol=1e-4)
y_pred = scalar.predict(x_fd)
np.testing.assert_allclose(y_pred, y_pred_compare, atol=1e-4)
x_basis = Monomial(n_basis=3)
x_fd = FDataBasis(x_basis, [[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[2, 0, 1]])
beta_fd = FDataBasis(x_basis, [3, 2, 1])
y = [1 + 13 / 3, 1 + 29 / 12, 1 + 17 / 10, 1 + 311 / 30]
# Non regularized
scalar = LinearRegression()
scalar.fit(x_fd, y)
np.testing.assert_allclose(scalar.coef_[0].coefficients,
beta_fd.coefficients)
np.testing.assert_allclose(scalar.intercept_,
1)
y_pred = scalar.predict(x_fd)
np.testing.assert_allclose(y_pred, y)
# Regularized
beta_fd_reg = FDataBasis(x_basis, [2.812, 3.043, 0])
y_reg = [5.333, 3.419, 2.697, 11.366]
scalar_reg = LinearRegression(
regularization=TikhonovRegularization(
LinearDifferentialOperator(2)))
scalar_reg.fit(x_fd, y)
np.testing.assert_allclose(scalar_reg.coef_[0].coefficients,
beta_fd_reg.coefficients, atol=0.001)
np.testing.assert_allclose(scalar_reg.intercept_,
0.998, atol=0.001)
y_pred = scalar_reg.predict(x_fd)
np.testing.assert_allclose(y_pred, y_reg, atol=0.001)
def test_error_X_not_FData(self):
"""Tests that at least one of the explanatory variables
is an FData object. """
x_fd = np.identity(7)
y = np.zeros(7)
scalar = LinearRegression(coef_basis=[Fourier(n_basis=5)])
with np.testing.assert_warns(UserWarning):
scalar.fit([x_fd], y)
def test_error_y_is_FData(self):
"""Tests that none of the explained variables is an FData object
"""
x_fd = FDataBasis(Monomial(n_basis=7), np.identity(7))
y = list(FDataBasis(Monomial(n_basis=7), np.identity(7)))
scalar = LinearRegression(coef_basis=[Fourier(n_basis=5)])
with np.testing.assert_raises(ValueError):
scalar.fit([x_fd], y)
def test_error_X_beta_len_distinct(self):
""" Test that the number of beta bases and explanatory variables
are not different """
x_fd = FDataBasis(Monomial(n_basis=7), np.identity(7))
y = [1 for _ in range(7)]
beta = Fourier(n_basis=5)
scalar = LinearRegression(coef_basis=[beta])
with np.testing.assert_raises(ValueError):
scalar.fit([x_fd, x_fd], y)
scalar = LinearRegression(coef_basis=[beta, beta])
with np.testing.assert_raises(ValueError):
scalar.fit([x_fd], y)
def test_error_y_X_samples_different(self):
""" Test that the number of response samples and explanatory samples
are not different """
x_fd = FDataBasis(Monomial(n_basis=7), np.identity(7))
y = [1 for _ in range(8)]
beta = Fourier(n_basis=5)
scalar = LinearRegression(coef_basis=[beta])
with np.testing.assert_raises(ValueError):
scalar.fit([x_fd], y)
x_fd = FDataBasis(Monomial(n_basis=8), np.identity(8))
y = [1 for _ in range(7)]
beta = Fourier(n_basis=5)
scalar = LinearRegression(coef_basis=[beta])
with np.testing.assert_raises(ValueError):
scalar.fit([x_fd], y)
def test_error_beta_not_basis(self):
""" Test that all beta are Basis objects. """
x_fd = FDataBasis(Monomial(n_basis=7), np.identity(7))
y = [1 for _ in range(7)]
beta = FDataBasis(Monomial(n_basis=7), np.identity(7))
scalar = LinearRegression(coef_basis=[beta])
with np.testing.assert_raises(TypeError):
scalar.fit([x_fd], y)
def test_error_weights_lenght(self):
""" Test that the number of weights is equal to the
number of samples """
x_fd = FDataBasis(Monomial(n_basis=7), np.identity(7))
y = [1 for _ in range(7)]
weights = [1 for _ in range(8)]
beta = Monomial(n_basis=7)
scalar = LinearRegression(coef_basis=[beta])
with np.testing.assert_raises(ValueError):
scalar.fit([x_fd], y, weights)
def test_error_weights_negative(self):
""" Test that none of the weights are negative. """
x_fd = FDataBasis(Monomial(n_basis=7), np.identity(7))
y = [1 for _ in range(7)]
weights = [-1 for _ in range(7)]
beta = Monomial(n_basis=7)
scalar = LinearRegression(coef_basis=[beta])
with np.testing.assert_raises(ValueError):
scalar.fit([x_fd], y, weights)
class TestHistoricalLinearRegression(unittest.TestCase):
"""Tests for historical linear regression."""
def setUp(self) -> None:
"""Generate data according to the model."""
self.random = np.random.RandomState(1)
self.n_samples = 50
self.n_features = 20
self.intercept = make_gaussian_process(
n_samples=1,
n_features=self.n_features,
cov=Gaussian(length_scale=0.4),
random_state=self.random,
)
self.X = make_gaussian_process(
n_samples=self.n_samples,
n_features=self.n_features,
cov=Gaussian(length_scale=0.4),
random_state=self.random,
)
self.coefficients = make_gaussian(
n_samples=1,
grid_points=[np.linspace(0, 1, self.n_features)] * 2,
cov=Gaussian(length_scale=1),
random_state=self.random,
)
self.X2 = make_gaussian_process(
n_samples=self.n_samples,
n_features=self.n_features,
cov=Gaussian(length_scale=0.4),
random_state=self.random,
)
self.coefficients2 = make_gaussian(
n_samples=1,
grid_points=[np.linspace(0, 1, self.n_features)] * 2,
cov=Gaussian(length_scale=1),
random_state=self.random,
)
self.create_model()
self.create_vectorial_model()
def create_model_no_intercept(
self,
X: FDataGrid,
coefficients: FDataGrid,
) -> FDataGrid:
"""Create a functional response according to historical model."""
integral_body = (
X.data_matrix[..., 0, np.newaxis]
* coefficients.data_matrix[..., 0]
)
integral_matrix = cumtrapz(
integral_body,
x=X.grid_points[0],
initial=0,
axis=1,
)
integral = np.diagonal(integral_matrix, axis1=1, axis2=2)
return X.copy(data_matrix=integral)
def create_model(self) -> None:
"""Create a functional response according to historical model."""
model_no_intercept = self.create_model_no_intercept(
X=self.X,
coefficients=self.coefficients,
)
self.y = model_no_intercept + self.intercept
def create_vectorial_model(self) -> None:
"""Create a functional response according to historical model."""
model_no_intercept = self.create_model_no_intercept(
X=self.X,
coefficients=self.coefficients,
)
model_no_intercept2 = self.create_model_no_intercept(
X=self.X2,
coefficients=self.coefficients2,
)
self.y2 = model_no_intercept + model_no_intercept2 + self.intercept
def test_historical(self) -> None:
"""Test historical regression with data following the model."""
regression = HistoricalLinearRegression(n_intervals=6)
fit_predict_result = regression.fit_predict(self.X, self.y)
predict_result = regression.predict(self.X)
np.testing.assert_allclose(
predict_result.data_matrix,
fit_predict_result.data_matrix,
)
np.testing.assert_allclose(
predict_result.data_matrix,
self.y.data_matrix,
rtol=1e-1,
)
np.testing.assert_allclose(
regression.intercept_.data_matrix,
self.intercept.data_matrix,
rtol=1e-3,
)
np.testing.assert_allclose(
regression.coef_.data_matrix[0, ..., 0],
np.triu(self.coefficients.data_matrix[0, ..., 0]),
atol=0.3,
rtol=0,
)
def test_historical_vectorial(self) -> None:
"""Test historical regression with data following the vector model."""
X = self.X.concatenate(self.X2, as_coordinates=True)
regression = HistoricalLinearRegression(n_intervals=10)
fit_predict_result = regression.fit_predict(X, self.y2)
predict_result = regression.predict(X)
np.testing.assert_allclose(
predict_result.data_matrix,
fit_predict_result.data_matrix,
)
np.testing.assert_allclose(
predict_result.data_matrix,
self.y2.data_matrix,
atol=1e-1,
rtol=0,
)
np.testing.assert_allclose(
regression.intercept_.data_matrix,
self.intercept.data_matrix,
rtol=1e-2,
)
# Coefficient matrix not tested as it is probably
# an ill-posed problem
if __name__ == '__main__':
print()
unittest.main()
|
#!/usr/bin/env python3
# This script generates radio/radio_446x_conf.h
#
# Silabs' WDS software allows to export a header file containing register values
# for a specific configuration of the radio. For our use we want to be able to
# switch between couple of those configurations. That's where this script comes in.
# It compares a set of headers generated from WDS and compiles all the register
# values into a somewhat compressed format. As part of this format it singles out
# registers whose value does not change between all the considered configurations.
import glob
import re
propbytes = dict()
skip = ['RF_MODEM_RSSI_CONTROL_1', 'RF_MODEM_RSSI_COMP_1',
'RF_MODEM_CLKGEN_BAND_1']
for fn in glob.glob('*.h'):
with open(fn, 'r') as f:
for d in re.finditer('\n#define (RF_[^ ]+) (.+)\n', f.read()):
if d.group(1) in skip:
continue
bytes_ = eval('[' + d.group(2) + ']')
if bytes_[0] != 0x11:
continue
group, off = bytes_[1], bytes_[3]
# keep MODEM, MODEM_CHFLT, PA, SYNTH
if group < 0x20 or group >= 0x30:
continue
for i, v in enumerate(bytes_[4:]):
k = group*256 + (off + i)
if k not in propbytes:
propbytes[k] = dict()
propbytes[k][fn] = v
def print_bytes(bytes_):
print(", ".join(["0x%02x" % b for b in bytes_]), end="")
def ids_list(indices):
indices = sorted(indices)
indices_starts = [k for k in indices if k-1 not in indices]
ret = []
for i, k in enumerate(indices_starts):
last = i == len(indices_starts) - 1
l = len([k_ for k_ in indices if k_ >= k \
and (last or k_ < indices_starts[i+1])])
while l > 0:
l_ = min(l, 12)
ret += [l_, k//256, k%256]
l -= l_
k += l_
return ret + [0]
shared = sorted([k for k in propbytes if len(set(propbytes[k].values())) == 1])
print("/* see tools/build_si446x_table.py */\n")
print("__code static const uint8_t shared_prop_ids[] = {")
print("\t", end="")
print_bytes(ids_list(shared))
print("\n};\n")
print("__code static const uint8_t shared_prop_vals[] = {")
print("\t", end="")
print_bytes([list(propbytes[k].values())[0] for k in shared])
print("\n};\n")
variable = sorted([k for k in propbytes if len(set(propbytes[k].values())) != 1])
print("__code static const uint8_t variable_prop_ids[] = {")
print("\t", end="")
print_bytes(ids_list(variable))
print("\n};\n")
bands = ["433", "868", "915"]
rates = ["2", "4", "8", "16", "19", "24", "32",
"48", "64", "96", "128", "192", "250"]
for band in bands:
print("__code static const uint8_t band_%s_prop_vals[%d][%d] = {" \
% (band, len(rates), len(variable)))
for rate in rates:
fn = "%s_%skbps.h" % (band, rate)
print("\t{", end="")
print_bytes([propbytes[k][fn] for k in variable])
print("}%s" % ("" if rate=="250" else ","), end="\n")
print("};\n")
|
"""
OpenVINO DL Workbench
Tool to reshape model to a new shape
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import json
from pathlib import Path
from typing import Dict, List
from defusedxml import ElementTree
from openvino.runtime import Core, PartialShape, Model
from openvino.runtime.passes import Manager
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--config', required=True, type=Path)
return parser.parse_args()
def load_config(config_path: Path) -> dict:
with config_path.open() as config_file:
return json.load(config_file)
def construct_shape_configuration(inputs_configuration: List[Dict]) -> Dict[str, PartialShape]:
return {
input_configuration['index']: PartialShape(input_configuration['shape'])
for input_configuration in inputs_configuration
}
def reshape_model(xml_path: Path, bin_path: Path, shape_configuration: Dict[str, List[int]]) -> Model:
core = Core()
model: Model = core.read_model(
model=str(xml_path),
weights=str(bin_path),
)
node_output_per_shape = {}
for input_index, input_configuration in shape_configuration.items():
node = model.inputs[input_index].node
node_output_per_shape[node.output(0)] = input_configuration
model.reshape(node_output_per_shape)
return model
def save_model_as_ir(model: Model, network_file_name: str, output_dir: Path):
result_path = output_dir / network_file_name
output_dir.mkdir(exist_ok=True)
xml_path = f'{result_path}.xml'.encode('UTF-8')
bin_path = f'{result_path}.bin'.encode('UTF-8')
pass_manager = Manager()
pass_manager.register_pass('Serialize', xml_path, bin_path)
pass_manager.run_passes(model)
# TODO: Looks like the class to report progress can be reused in another tools, we need to generalize it and make shared
class _ProgressReporter:
def __init__(self, log_header: str, total_steps: int, progress_step: int = 1):
self._log_header = log_header
self._prev_progress = 0
self._total_steps = total_steps
self._current_step = 0
self._progress_step = progress_step
def _log_progress(self):
print(f'{self._log_header}: {self.prev_progress}%')
def next_step(self):
self._current_step += 1
progress = int(self._current_step * (100 / self._total_steps))
if progress - self._prev_progress >= self._progress_step:
self.prev_progress = progress
self._log_progress()
def main(config: dict):
progress_reporter = _ProgressReporter(log_header='[RESHAPE TOOL]',
total_steps=4)
progress_reporter.next_step()
xml_path = Path(config['xml_path'])
bin_path = Path(config['bin_path'])
dump_reshaped_model = config['dump_reshaped_model']
inputs_shape_configuration = config['inputs_shape_configuration']
shape_configuration = construct_shape_configuration(inputs_shape_configuration)
progress_reporter.next_step()
reshaped_function = reshape_model(xml_path, bin_path, shape_configuration)
progress_reporter.next_step()
if dump_reshaped_model:
output_dir = Path(config['output_dir'])
old_model_content = ElementTree.parse(xml_path)
metadata = old_model_content.find('./meta_data')
save_model_as_ir(reshaped_function, xml_path.stem, output_dir)
if metadata:
new_model_content = ElementTree.parse(output_dir / xml_path.name)
new_model_content.getroot().append(metadata)
new_model_content.write(output_dir / xml_path.name)
progress_reporter.next_step()
if __name__ == '__main__':
ARGUMENTS = parse_arguments()
CONFIGURATION = load_config(ARGUMENTS.config)
try:
main(CONFIGURATION)
except RuntimeError as e:
print(f'\n During the model reshape process, OpenVINO runtime error occurred:\n {str(e)}')
exit(1)
|
## adopted from Aaron Sharp's code. Modified to work with tab delimited file.
import sys
if len(sys.argv) != 2:
print("USAGE: python revcomp_rdp_format.py RPD_MAPPING_FILE.txt > RPD_MAPPING_FILE_REV.txt")
sys.exit()
fh = open(sys.argv[1])
for lines in fh:
lines = lines.strip()
lexemes = lines.split('\t')
tags = lexemes[0]
samples = lexemes[1]
output = "";
for i in range(0, len(tags)):
if (tags[i] == 'c' or tags[i] == 'C'):
output += "G"
if (tags[i] == 'g' or tags[i] == 'G'):
output += "C"
if (tags[i] == 't' or tags[i] == 'T'):
output += "A"
if (tags[i] == 'a' or tags[i] == 'A'):
output += "T"
print('%s\t%s\t' %(output[::-1], samples))
|
import time
import model
from sqlalchemy import orm
from TrackedFile import *
class FilesDb(object):
def __init__(self, **kwargs):
self.dbfile = kwargs.get('dbfile', '.zeng.sqlite')
from sqlalchemy import create_engine
engine = 'sqlite:///' + self.dbfile
self.engine = create_engine(engine,
connect_args={'check_same_thread': False})
from sqlalchemy.orm import sessionmaker
FilesDb.Session = orm.scoping.scoped_session(sessionmaker())
FilesDb.Session.configure(bind=self.engine)
def create(self):
model.Model.metadata.create_all(self.engine)
def makeSession(self, dbSession=None):
return dbSession if dbSession is not None else FilesDb.Session()
def get(self, dbSession=None, **kwargs):
s = self.makeSession(dbSession)
filename = kwargs.get('filename')
return s.query(TrackedFile).filter(
TrackedFile.filename == filename).one_or_none()
def list(self, dbSession=None):
s = self.makeSession(dbSession)
return s.query(TrackedFile).all()
def listByName(self):
return TrackedFile.index_by_name(self.list())
def save(self, file, dbSession=None, autocommit=True):
s = self.makeSession(dbSession)
s.merge(file)
if autocommit:
s.commit()
def saveAll(self, files, dbSession=None):
s = self.makeSession(dbSession)
for f in files:
self.save(f, s, False)
s.commit()
def main():
filesDb = FilesDb()
filesDb.create()
print ("files: ", filesDb.list())
# file = TrackedFile('zeeng-peer/db.py')
# file.status = FileStatus.Synced
files = [
TrackedFile('zeng-peer/db.py', status=FileStatus.Synced),
TrackedFile('README.md', status=FileStatus.Removed)
]
filesDb.saveAll(files)
print ("files: ", filesDb.list())
print ("files by name: ", filesDb.listByName())
if __name__ == "__main__":
main()
|
from django.db import models
from django.contrib.postgres.fields import ArrayField
class Timeline(models.Model):
RECEIVED = "RE"
TURNED_DOWN = "TU"
IN_EVALUATION = "EV"
REJECTED = "RJ"
IN_DEVELOPMENT = "DE"
DISCONTINUED = "DI"
COMPLETED = "CO"
STATUS_CHOICES = (
(RECEIVED, "Received"),
(TURNED_DOWN, "Turned down"),
(IN_EVALUATION, "In evaluation"),
(REJECTED, "Rejected"),
(IN_DEVELOPMENT, "In development"),
(DISCONTINUED, "Discontinued"),
(COMPLETED, "Completed"),
)
def get_default_state():
return list(["RE"])
state = ArrayField(
models.CharField(
max_length=2,
blank=True,
choices=STATUS_CHOICES,
),
default=get_default_state,
blank=False,
)
|
def filereader(filename):
hashes = {}
M = 0
f = open(filename)
N = int(f.readline())
imgs = {
'H': [],
'V': []
}
for i in range(N):
line = f.readline().split(" ")
n = int(line[1])
imgs[line[0]].append([None for _ in range(n+1)])
for j in range(n):
try:
imgs[line[0]][-1][j] = hashes[line[j+2].strip()]
except:
hashes[line[j+2].strip()] = M
imgs[line[0]][-1][j] = M
M += 1
imgs[line[0]][-1][:-1] = sorted(imgs[line[0]][-1][:-1])
imgs[line[0]][-1][-1] = i
imgs['H'].sort(key=len)
imgs['V'].sort(key=len)
return hashes, imgs
if __name__ == "__main__":
print(filereader("testcases/a_example.txt")) |
from ursina.application import pause, resume
from ursina.ursinastuff import destroy
from game import app
from ursina import *
from game import hand
from game.entities.menu import Menu, Exit, PlayGame
#from game.__init__ import voxel
its_menuopen = False
menu = None
exit = None
play = None
def update():
global menu, its_menuopen,exit,play
if held_keys['escape'] and not its_menuopen:
mouse.visible = True
mouse.locked = False
window.exit_button.visible = True
pause()
menu = Menu()
exit = Exit()
play = PlayGame()
exit.on_click = application.quit
def restart():
mouse.visible = False
mouse.locked = True
window.exit_button.visible = False
destroy(menu)
destroy(exit)
destroy(play)
resume()
play.on_click = restart
print("Menu Abierto")
its_menuopen= True
elif held_keys['escape'] and its_menuopen:
print("Menu Cerrado")
its_menuopen= False
menu = None
exit = None
play = None
if held_keys['left mouse'] or held_keys['right mouse']:
hand.active()
else:
hand.passive()
if __name__ == "__main__":
app.run() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.