content stringlengths 5 1.05M |
|---|
from .models import ProductModel, ProductCategory, CommentModel, Cart
from .serializers import (ProductSerializer, UserSerializer, CategorySerializer,
CommentSerializer, CartSerializer)
from rest_framework import generics
from django.contrib.auth.models import User
from rest_framework import permissions
from .permissions import IsOwnerOrReadOnly
class CartView(generics.ListCreateAPIView):
queryset = Cart.objects.all()
serializer_class = CartSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
class CartDetailView(generics.RetrieveUpdateDestroyAPIView):
queryset = Cart.objects.all()
serializer_class = CartSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
class CommentView(generics.ListCreateAPIView):
serializer_class = CommentSerializer
def get_queryset(self):
product = self.kwargs['product']
return CommentModel.objects.filter(product=product)
def perform_create(self, serializer):
serializer.save(author=self.request.user)
class CategoryView(generics.ListAPIView):
queryset = ProductCategory.objects.all()
serializer_class = CategorySerializer
class CategoryDetailView(generics.RetrieveAPIView):
queryset = ProductCategory.objects.all()
serializer_class = CategorySerializer
class ProductView(generics.ListCreateAPIView):
queryset = ProductModel.objects.all()
serializer_class = ProductSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def perform_create(self, serializer):
serializer.save(author=self.request.user)
class ProductDetailView(generics.RetrieveUpdateDestroyAPIView):
queryset = ProductModel.objects.all()
serializer_class = ProductSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly]
class UserList(generics.ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class UserDetail(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
# legacy code
# class ProductDetailView(APIView):
# def get_product(self, pk):
# try:
# return ProductModel.objects.get(pk=pk)
# except ProductModel.DoesNotExist:
# raise Http404
#
# def get(self, request, pk, format=None):
# product = self.get_product(pk=pk)
# serializer = ProductSerializer(product)
# return Response(serializer.data)
#
# def put(self, request, pk, format=None):
# product = self.get_product(pk)
# serializer = ProductSerializer(product, data=request.data)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
#
# def delete(self, request, pk, format=None):
# product = self.get_product(pk)
# product.delete()
# return Response(status=status.HTTP_204_NO_CONTENT)
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at:
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the
# License.
"""Provides utilities for determining whether two objects are equivalent under the Ion data model."""
# Python 2/3 compatibility
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import struct
from datetime import datetime
from decimal import Decimal
from math import isnan
import six
from amazon.ion.core import IonType, Timestamp, TimestampPrecision, MICROSECOND_PRECISION, OffsetTZInfo, Multimap
from amazon.ion.simple_types import _IonNature, IonPyList, IonPyDict, IonPyTimestamp, IonPyNull, IonPySymbol, \
IonPyText, IonPyDecimal, IonPyFloat
from amazon.ion.symbols import SymbolToken
def ion_equals(a, b, timestamps_instants_only=False):
"""Tests two objects for equivalence under the Ion data model.
There are three important cases:
* When neither operand specifies its `ion_type` or `annotations`, this method will only return True when the
values of both operands are equivalent under the Ion data model.
* When only one of the operands specifies its `ion_type` and `annotations`, this method will only return True
when that operand has no annotations and has a value equivalent to the other operand under the Ion data model.
* When both operands specify `ion_type` and `annotations`, this method will only return True when the ion_type
and annotations of both are the same and their values are equivalent under the Ion data model.
Note that the order of the operands does not matter.
Args:
a (object): The first operand.
b (object): The second operand.
timestamps_instants_only (Optional[bool]): False if timestamp objects (datetime and its subclasses) should be
compared according to the Ion data model (where the instant, precision, and offset must be equal); True
if these objects should be considered equivalent if they simply represent the same instant.
"""
if timestamps_instants_only:
return _ion_equals_timestamps_instants(a, b)
return _ion_equals_timestamps_data_model(a, b)
def _ion_equals_timestamps_instants(a, b):
return _ion_equals(a, b, _timestamp_instants_eq, _ion_equals_timestamps_instants)
def _ion_equals_timestamps_data_model(a, b):
return _ion_equals(a, b, _timestamps_eq, _ion_equals_timestamps_data_model)
def _ion_equals(a, b, timestamp_comparison_func, recursive_comparison_func):
"""Compares a and b according to the description of the ion_equals method."""
for a, b in ((a, b), (b, a)): # Ensures that operand order does not matter.
if isinstance(a, _IonNature):
if isinstance(b, _IonNature):
# Both operands have _IonNature. Their IonTypes and annotations must be equivalent.
eq = a.ion_type is b.ion_type and _annotations_eq(a, b)
else:
# Only one operand has _IonNature. It cannot be equivalent to the other operand if it has annotations.
eq = not a.ion_annotations
if eq:
if isinstance(a, IonPyList):
return _sequences_eq(a, b, recursive_comparison_func)
elif isinstance(a, IonPyDict):
return _structs_eq(a, b, recursive_comparison_func)
elif isinstance(a, IonPyTimestamp):
return timestamp_comparison_func(a, b)
elif isinstance(a, IonPyNull):
return isinstance(b, IonPyNull) or (b is None and a.ion_type is IonType.NULL)
elif isinstance(a, IonPySymbol) or (isinstance(a, IonPyText) and a.ion_type is IonType.SYMBOL):
return _symbols_eq(a, b)
elif isinstance(a, IonPyDecimal):
return _decimals_eq(a, b)
elif isinstance(a, IonPyFloat):
return _floats_eq(a, b)
else:
return a == b
return False
# Reaching this point means that neither operand has _IonNature.
for a, b in ((a, b), (b, a)): # Ensures that operand order does not matter.
if isinstance(a, list):
return _sequences_eq(a, b, recursive_comparison_func)
elif isinstance(a, dict):
return _structs_eq(a, b, recursive_comparison_func)
elif isinstance(a, datetime):
return timestamp_comparison_func(a, b)
elif isinstance(a, SymbolToken):
return _symbols_eq(a, b)
elif isinstance(a, Decimal):
return _decimals_eq(a, b)
elif isinstance(a, float):
return _floats_eq(a, b)
return a == b
def _annotations_eq(a, b):
return _sequences_eq(a.ion_annotations, b.ion_annotations, _symbols_eq)
def _sequences_eq(a, b, comparison_func):
assert isinstance(a, (list, tuple))
if not isinstance(b, (list, tuple)):
return False
sequence_len = len(a)
if sequence_len != len(b):
return False
for i in range(sequence_len):
if not comparison_func(a[i], b[i]):
return False
return True
def _structs_eq(a, b, comparison_func):
assert isinstance(a, (dict, Multimap))
if not isinstance(b, (dict, Multimap)):
return False
dict_len = len(a)
if dict_len != len(b):
return False
for a, b in ((a, b), (b, a)):
key_iter = six.iterkeys(a)
while True:
try:
key = next(key_iter)
except StopIteration:
break
if key not in b:
return False
if isinstance(a, Multimap) and isinstance(b, Multimap):
values_a = a.get_all_values(key)
values_b = b.get_all_values(key)
if len(values_a) != len(values_b):
return False
for value_a in values_a:
if not any(comparison_func(value_a, value_b) for value_b in values_b):
return False
else:
if not comparison_func(a[key], b[key]):
return False
return True
def _timestamps_eq(a, b):
"""Compares two timestamp operands for equivalence under the Ion data model."""
assert isinstance(a, datetime)
if not isinstance(b, datetime):
return False
# Local offsets must be equivalent.
if (a.tzinfo is None) ^ (b.tzinfo is None):
return False
if a.utcoffset() != b.utcoffset():
return False
for a, b in ((a, b), (b, a)):
if isinstance(a, Timestamp):
if isinstance(b, Timestamp):
# Both operands declare their precisions. They are only equivalent if their precisions are the same.
if a.precision is b.precision and a.fractional_precision is b.fractional_precision \
and a.fractional_seconds == b.fractional_seconds:
break
return False
elif a.precision is not TimestampPrecision.SECOND or a.fractional_precision != MICROSECOND_PRECISION:
# Only one of the operands declares its precision. It is only equivalent to the other (a naive datetime)
# if it has full microseconds precision.
return False
return a == b
def _timestamp_instants_eq(a, b):
"""Compares two timestamp operands for point-in-time equivalence only."""
assert isinstance(a, datetime)
if not isinstance(b, datetime):
return False
# datetime's __eq__ can't compare a None offset and a non-None offset. For these equivalence semantics, a None
# offset (unknown local offset) is treated equivalently to a +00:00.
if a.tzinfo is None:
a = a.replace(tzinfo=OffsetTZInfo())
if b.tzinfo is None:
b = b.replace(tzinfo=OffsetTZInfo())
# datetime's __eq__ implementation compares instants; offsets and precision need not be equal.
return a == b
def _symbols_eq(a, b):
assert isinstance(a, (six.text_type, SymbolToken))
if not isinstance(b, (six.text_type, SymbolToken)):
return False
a_text = getattr(a, 'text', a)
b_text = getattr(b, 'text', b)
if a_text == b_text:
if a_text is None:
# Both have unknown text. If they come from a local context, they are equivalent.
a_location = getattr(a, 'location', None)
b_location = getattr(b, 'location', None)
if (a_location is None) ^ (b_location is None):
return False
if a_location is not None:
# Both were imported from shared symbol tables. In this case, they are only equivalent if they were
# imported from the same position in the same shared symbol table.
if (a_location.name != b_location.name) or (a_location.position != b_location.position):
return False
a_sid = getattr(a, 'sid', None)
b_sid = getattr(b, 'sid', None)
if a_sid is None or b_sid is None:
raise ValueError('Attempted to compare malformed symbols %s, %s.' % (a, b))
if (a_sid == 0) ^ (b_sid == 0):
# SID 0 is only equal to SID 0.
return False
return True
return False
def _decimals_eq(a, b):
assert isinstance(a, Decimal)
if not isinstance(b, Decimal):
return False
if a.is_zero() and b.is_zero():
if a.is_signed() ^ b.is_signed():
# Negative-zero is not equivalent to positive-zero.
return False
# This ensures that both have equal precision.
return a.canonical().compare_total(b.canonical()) == 0
def _is_float_negative_zero(x):
return struct.pack('>d', x) == b'\x80\x00\x00\x00\x00\x00\x00\x00'
def _floats_eq(a, b):
assert isinstance(a, float)
if not isinstance(b, float):
return False
if a == 0 and b == 0:
# Negative-zero is not equivalent to positive-zero.
return not (_is_float_negative_zero(a) ^ _is_float_negative_zero(b))
# nan is always equivalent to nan.
return a == b or (isnan(a) and isnan(b))
|
class UnsupportedRuntimeError(RuntimeError):
pass
class UnsupportedFormatError(ValueError):
pass
class UnsupportedArchError(ValueError):
pass
class UnsupportedOSError(ValueError):
pass
|
from django.contrib import admin
from placeholdr.models import Place, Trip, PlaceReview, TripReview, TripNode, RepRecord
from placeholdr.models import UserProfile
class PlaceAdmin(admin.ModelAdmin):
list_display = ('userId', 'lat', 'long', 'desc', 'name')
class TripAdmin(admin.ModelAdmin):
list_display = ('userId', 'desc', 'name')
class PlaceReviewAdmin(admin.ModelAdmin):
list_display = ('userId', 'placeId', 'stars', 'review')
class TripReviewAdmin(admin.ModelAdmin):
list_display = ('userId', 'tripId', 'stars', 'review')
class TripNodeAdmin(admin.ModelAdmin):
list_display = ('id', 'placeId', 'tripId', 'tripPoint')
class RepRecordAdmin(admin.ModelAdmin):
list_display = ('id', 'userId', 'rep', 'tpSlug')
# Register your models here.
admin.site.register(Place, PlaceAdmin)
admin.site.register(Trip, TripAdmin)
admin.site.register(PlaceReview, PlaceReviewAdmin)
admin.site.register(TripReview, TripReviewAdmin)
admin.site.register(TripNode, TripNodeAdmin)
admin.site.register(UserProfile)
admin.site.register(RepRecord,RepRecordAdmin)
|
import sys
sys.path.append("[/home/lucasquemelli/datapipeline/airflow/plugins]")
from datetime import datetime
from os.path import join
from airflow.models import DAG
from airflow.operators.alura import TwitterOperator
from airflow.contrib.operators.spark_submit_operator import SparkSubmitOperator
with DAG(dag_id="twitter_dag", start_date=datetime.now()) as dag:
twitter_operator = TwitterOperator(
task_id = "twitter_aluraonline",
query = "AluraOnline",
file_path =join(
"/home/lucasquemelli/datapipeline/datalake",
"twitter_aluraonline",
"extract_date={{ ds }}",
"AluraOnline_{{ ds_nodash }}.json"
)
)
twitter_transform = SparkSubmitOperator(
task_id = "transformation_twitter_aluraonline",
application = "/home/lucasquemelli/datapipeline/spark/transformation_final_version.py",
name = "twitter_transformation",
application_args = [
"--src",
"/home/lucasquemelli/datapipeline/datalake/bronze/twitter_aluraonline/extract_date=2021-12-05",
"--dest",
"/home/lucasquemelli/datapipeline/datalake/silver/twitter_aluraonline",
"--process_date",
"{{ ds }}",
]
) |
print("\nArduboy Flashcart image builder v1.07 by Mr.Blinky Jun 2018 - Jun 2020\n")
# requires PILlow. Use 'python -m pip install pillow' to install
import sys
import time
import os
import csv
import math
try:
from PIL import Image
except:
print("The PILlow module is required but not installed!")
print("Use 'python -m pip install pillow' from the commandline to install.")
sys.exit()
#CSV indices
ID_LIST = 0
ID_TITLE = 1
ID_TITLESCREEN = 2
ID_HEXFILE = 3
ID_DATAFILE = 4
ID_SAVEFILE = 5
#Menu patcher data
MenuButtonPatch = b'\x0f\x92\x0f\xb6\x8f\x93\x9f\x93\xef\x93\xff\x93\x80\x91\xcc\x01'+ \
b'\x8d\x5f\x8d\x37\x08\xf0\x8d\x57\x80\x93\xcc\x01\xe2\xe4\xf3\xe0'+ \
b'\x80\x81\x8e\x4f\x80\x83\x91\x81\x9f\x4f\x91\x83\x82\x81\x8f\x4f'+ \
b'\x82\x83\x83\x81\x8f\x4f\x83\x83\xed\xec\xf1\xe0\x80\x81\x8f\x5f'+ \
b'\x80\x83\x81\x81\x8f\x4f\x81\x83\x82\x81\x8f\x4f\x82\x83\x83\x81'+ \
b'\x8f\x4f\x83\x83\x8f\xb1\x8f\x60\x66\x99\x1c\x9b\x88\x27\x8f\x36'+ \
b'\x81\xf4\x80\x91\xFF\x0A\x98\x1b\x96\x30\x68\xf0\xe0\xe0\xf8\xe0'+ \
b'\x87\xe7\x80\x83\x81\x83\x88\xe1\x80\x93\x60\x00\xf0\x93\x60\x00'+ \
b'\xff\xcf\x90\x93\xFF\x0A\xff\x91\xef\x91\x9f\x91\x8f\x91\x0f\xbe'+ \
b'\x0f\x90\x18\x95'
MBP_fract_lds = 14
MBP_fract_sts = 26
MBP_millis_r30 = 28
MBP_millis_r31 = 30
MBP_overflow_r30 = 56
MBP_overflow_r31 = 58
def fixPath(filename):
if os.sep == "\\": return filename.replace("/","\\")
return filename.replace("\\","/")
def DelayedExit():
time.sleep(3)
sys.exit()
def DefaultHeader():
return bytearray("ARDUBOY".encode() + (b'\xFF' * 249))
def LoadTitleScreenData(filename):
if not os.path.isabs(filename):
filename = path + filename
if not os.path.isfile(filename) :
print("Error: Title screen '{}' not found.".format(filename))
DelayedExit()
img = Image.open(filename).convert("1")
width, height = img.size
if (width != 128) or (height != 64) :
if height // (width // 128) != 64:
print("Error: Title screen '{}' is not 128 x 64 pixels or a multiple of that.".format(filename))
DelayedExit()
else:
img = img.resize((128,64), Image.NEAREST)
width, height = img.size
pixels = list(img.getdata())
bytes = bytearray(int((height // 8) * width))
i = 0
b = 0
for y in range (0,height,8):
for x in range (0,width):
for p in range (0,8):
b = b >> 1
if pixels[(y + p) * width + x] > 0:
b |= 0x80
bytes[i] = b
i += 1
return bytes
def LoadHexFileData(filename):
if not os.path.isabs(filename):
filename = path + filename
if not os.path.isfile(filename) :
return bytearray()
f = open(filename,"r")
records = f.readlines()
f.close()
bytes = bytearray(b'\xFF' * 32768)
flash_end = 0
for rcd in records :
if rcd == ":00000001FF" : break
if rcd[0] == ":" :
rcd_len = int(rcd[1:3],16)
rcd_typ = int(rcd[7:9],16)
rcd_addr = int(rcd[3:7],16)
rcd_sum = int(rcd[9+rcd_len*2:11+rcd_len*2],16)
if (rcd_typ == 0) and (rcd_len > 0) :
flash_addr = rcd_addr
checksum = rcd_sum
for i in range(1,9+rcd_len*2, 2) :
byte = int(rcd[i:i+2],16)
checksum = (checksum + byte) & 0xFF
if i >= 9:
bytes[flash_addr] = byte
flash_addr += 1
if flash_addr > flash_end:
flash_end = flash_addr
if checksum != 0 :
print("Error: Hex file '{}' contains errors.".format(filename))
DelayedExit()
flash_end = int((flash_end + 255) / 256) * 256
return bytes[0:flash_end]
def LoadDataFile(filename):
if not os.path.isabs(filename):
filename = path + filename
if not os.path.isfile(filename) :
return bytearray()
with open(filename,"rb") as file:
bytes = bytearray(file.read())
pagealign = bytearray(b'\xFF' * (256 - len(bytes) % 256))
return bytes + pagealign
def PatchMenuButton():
global program
if len(program) < 256: return ''
vector_23 = (program[0x5E] << 1) | (program[0x5F] << 9) #ISR timer0 vector addr
p = vector_23
l = 0
lds = 0
branch = 0
timer0_millis = 0
timer0_fract = 0
timer0_overflow_count = 0
while p < (len(program) - 2):
p += 2 #handle 2 byte instructions
if program[p-2:p] == b'\x08\x95': #ret instruction
l = -1
break
if (program[p-1] & 0xFC == 0xF4) & (program[p-2] & 0x07 == 0x00): # brcc instruction may jump beyond reti
branch = ((program[p-1] & 0x03) << 6) + ((program[p-2] & 0xf8) >> 2)
if branch < 128:
branch = p + branch
else:
branch = p -256 + branch
if program[p-2:p] == b'\x18\x95': #reti instruction
l = p - vector_23
if p > branch: # there was no branch beyond reti instruction
break
if l != 0: #branced beyond reti, look for rjmp instruction
if program[p-1] & 0xF0 == 0xC0:
l = p - vector_23
break
#handle 4 byte instructions
if (program[p-1] & 0xFE == 0x90) & (program[p-2] & 0x0F == 0x00): # lds instruction
lds +=1
if lds == 1:
timer0_millis = program[p] | ( program[p+1] << 8)
elif lds == 5:
timer0_fract = program[p] | ( program[p+1] << 8)
elif lds == 6:
timer0_overflow_count = program[p] | ( program[p+1] << 8)
p +=2
if (program[p-1] & 0xFE == 0x92) & (program[p-2] & 0x0F == 0x00): # sts instruction
p +=2
if l == -1:
return 'No menu patch applied. ISR contains subroutine.'
elif l < len(MenuButtonPatch):
return 'No menu patch applied. ISR size too small ({} bytes)'.format(l)
elif (timer0_millis == 0) | (timer0_fract == 0) | (timer0_overflow_count == 0):
return 'No menu patch applied. Custom ISR in use.'
else:
#patch the new ISR code with 'hold UP + DOWN for 2 seconds to start bootloader menu' feature
program[vector_23 : vector_23+len(MenuButtonPatch)] = MenuButtonPatch
#fix timer variables
program[vector_23 + MBP_fract_lds + 0] = timer0_fract & 0xFF
program[vector_23 + MBP_fract_lds + 1] = timer0_fract >> 8
program[vector_23 + MBP_fract_sts + 0] = timer0_fract & 0xFF
program[vector_23 + MBP_fract_sts + 1] = timer0_fract >> 8
program[vector_23 + MBP_millis_r30 + 0] = 0xE0 | (timer0_millis >> 0) & 0x0F
program[vector_23 + MBP_millis_r30 + 1] = 0xE0 | (timer0_millis >> 4) & 0x0F
program[vector_23 + MBP_millis_r31 + 0] = 0xF0 | (timer0_millis >> 8) & 0x0F
program[vector_23 + MBP_millis_r31 + 1] = 0xE0 | (timer0_millis >>12) & 0x0F
program[vector_23 + MBP_overflow_r30 +0] = 0xE0 | (timer0_overflow_count >> 0) & 0x0F
program[vector_23 + MBP_overflow_r30 +1] = 0xE0 | (timer0_overflow_count >> 4) & 0x0F
program[vector_23 + MBP_overflow_r31 +0] = 0xF0 | (timer0_overflow_count >> 8) & 0x0F
program[vector_23 + MBP_overflow_r31 +1] = 0xE0 | (timer0_overflow_count >>12) & 0x0F
return 'Menu patch applied'
################################################################################
if len(sys.argv) != 2 :
print("\nUsage: {} flashcart-index.csv\n".format(os.path.basename(sys.argv[0])))
DelayedExit()
previouspage = 0xFFFF
currentpage = 0
nextpage = 0
csvfile = os.path.abspath(sys.argv[1])
path = os.path.dirname(csvfile)+os.sep
if not os.path.isfile(csvfile) :
print("Error: CSV-file '{}' not found.".format(csvfile))
DelayedExit()
TitleScreens = 0
Sketches = 0
filename = path + os.path.basename(csvfile).lower().replace("-index","").replace(".csv","-image.bin")
with open(filename,"wb") as binfile:
with open(csvfile,"r") as file:
data = csv.reader(file, quotechar='"', delimiter = ";")
next(data,None)
print("Building: {}\n".format(filename))
print("List Title Curr. Prev. Next ProgSize DataSize SaveSize")
print("---- ------------------------- ----- ----- ----- -------- -------- --------")
for row in data:
while len(row) < 7: row.append('') #add missing cells
header = DefaultHeader()
title = LoadTitleScreenData(fixPath(row[ID_TITLESCREEN]))
program = LoadHexFileData(fixPath(row[ID_HEXFILE]))
programsize = len(program)
datafile = LoadDataFile(fixPath(row[ID_DATAFILE]))
datasize = len(datafile)
slotsize = ((programsize + datasize) >> 8) + 5
programpage = currentpage + 5
datapage = programpage + (programsize >> 8)
nextpage += slotsize
header[7] = int(row[ID_LIST]) #list number
header[8] = previouspage >> 8
header[9] = previouspage & 0xFF
header[10] = nextpage >> 8
header[11] = nextpage & 0xFF
header[12] = slotsize >> 8
header[13] = slotsize & 0xFF
header[14] = programsize >> 7 #program size in 128 byte pages
if programsize > 0:
header[15] = programpage >> 8
header[16] = programpage & 0xFF
if datasize > 0:
program[0x14] = 0x18
program[0x15] = 0x95
program[0x16] = datapage >> 8
program[0x17] = datapage & 0xFF
if datasize > 0:
header[17] = datapage >> 8
header[18] = datapage & 0xFF
binfile.write(header)
binfile.write(title)
patchresult = PatchMenuButton()
binfile.write(program)
binfile.write(datafile)
if programsize == 0:
print("{:4} {:25} {:5} {:5} {:5}".format(row[ID_LIST],row[ID_TITLE],currentpage,previouspage,nextpage))
else:
print("{:4} {:24} {:5} {:5} {:5} {:8} {:8} {:8} {}".format(row[ID_LIST],row[ID_TITLE][:24],currentpage,previouspage,nextpage,programsize,datasize,0,patchresult))
previouspage = currentpage
currentpage = nextpage
if programsize > 0:
Sketches += 1
else:
TitleScreens += 1
print("---- ------------------------- ----- ----- ----- -------- -------- --------")
print(" Page Page Page Bytes Bytes Bytes")
print("\nImage build complete with {} Title screens, {} Sketches, {} Kbyte used.".format(TitleScreens,Sketches,(nextpage+3) / 4))
DelayedExit
|
#!/usr/bin/env python3
# Copyright (c) 2022 Idiap Research Institute, http://www.idiap.ch/
# Written by Srikanth Madikeri <srikanth.madikeri@idiap.ch>
"""implementation of NG-SGD from Kaldi"""
from dataclasses import dataclass
from typing import Sequence
import torch
from math import exp
import logging
@torch.no_grad()
def constrain_orthonormal(M, scale, update_speed=0.125):
rows, cols = M.shape
d = rows
if rows < cols:
M = M.T
d = cols
# we don't update it. we just compute the gradient
P = M.mm(M.T)
if scale < 0.0:
trace_P_Pt = P.pow(2.0).sum()
trace_P = P.trace()
ratio = trace_P_Pt / trace_P
scale = ratio.sqrt()
ratio = ratio * d / trace_P
if ratio > 1.1:
update_speed *= 0.25
elif ratio > 1.02:
update_speed *= 0.5
scale2 = scale**2
P[range(d), range(d)] -= scale2
M.data.add_(P.mm(M), alpha=-4 * update_speed / scale2)
@dataclass
class NGState:
"""NGState value container"""
alpha: float = 4.0
num_samples_history: float = 2000.0
update_period: int = 4
# keeping this implementation in python for now. even if it is run, I don't
# expect it to be run multiple times.
# TOOD: shift it to C++ biniding
def OrthogonalizeRows(M):
"""Implementation of Gram-Schmidt"""
num_rows, num_cols = M.shape
for i in range(num_rows):
counter = 0
while True:
start_prod = M[i, :].pow(2.0).sum()
if torch.isnan(start_prod) or torch.isinf(start_prod) or start_prod == 0.0:
M[i, :].normal_()
counter += 1
if counter > 100:
raise Exception("Loop detected while orthogonalizing matrix")
continue
# TODO: vectorize this loop
for j in range(0, i):
# this product is useless. why
prod = (M[j, :] * M[i, :]).sum()
M[i, :].add_(M[j, :], alpha=-prod)
end_prod = M[i, :].pow(2.0).sum()
if end_prod <= 0.01 * start_prod:
if end_prod == 0.0:
M[i, :].normal_()
counter += 1
if counter > 100:
raise Exception("Loop detected while orthogonalizing matrix")
else:
M[i, :].mul_(1.0 / end_prod.sqrt())
break
@dataclass
class OnlineNaturalGradient:
"""NGState value container"""
alpha: float = 4.0
num_samples_history: float = 2000.0
update_period: int = 4
num_minibatches_history: int = 0
epsilon: float = 1.0e-10
delta: float = 5.0e-4
frozen: bool = False
t: int = 0
rho: float = -1e10
rank: int = 40
num_initial_updates: int = 10
def __post_init__(self):
self.d_t = None
self.W_t = None
def init_orthonormal_special(self):
R = self.W_t
num_rows, num_cols = R.shape
R.zero_()
first_elem = 1.1
first_elem2 = 1.1**2
for r in range(num_rows):
cols = [c for c in range(r, num_cols, num_rows)]
normalizer = 1.0 / (first_elem2 + len(cols) - 1) ** (0.5)
R[r, cols[0]] = first_elem * normalizer
R[r, cols[1:]] = normalizer
def init_default(self, D, device=None):
if self.rank >= D:
self.rank = D - 1
if self.rank == 0:
return
self.validate()
self.rho = self.epsilon
# TODO: decide the device
if device is None:
device = "cpu"
self.d_t = torch.zeros(self.rank, device=device).add_(self.epsilon)
self.W_t = torch.zeros(self.rank, D, device=device)
self.init_orthonormal_special()
E_tii = (1.0 / (2.0 + (D + self.rank) * self.alpha / D)) ** (0.5)
self.W_t.mul_(E_tii)
self.t = 0
def init(self, X):
D = X.shape[-1]
self.init_default(D, device=X.device)
self.t = 1
num_init_iters = 3
if X.shape[0] <= self.rank:
num_init_iters = 1
for _ in range(num_init_iters):
Xcopy = torch.zeros_like(X, requires_grad=False).copy_(X)
self.precondition_directions(Xcopy)
@torch.no_grad()
def _precondition_directions_internal(self, X, initial_product):
N, D = X.shape
R = self.rank
eta = self._compute_eta(N)
W_t = self.W_t
# H_t = X W_t^T
H_t = X.mm(W_t.T)
# print("t, X", self.t, X)
# print("t, W_t", self.t, W_t)
# print("t, H_t", self.t, H_t)
if (
self.t > self.num_initial_updates
and (self.t - self.num_initial_updates) % self.update_period != 0
):
# X <- X - H_t W_t
X.add_(H_t.mm(W_t), alpha=-1.0)
return
J_t = H_t.T.mm(X)
# TODO: compute LK together because in GPUs that would mean only one call
L_t = W_t.mm(J_t.T)
K_t = J_t.mm(J_t.T)
alpha = self.alpha
d_t = self.d_t
rho_t = self.rho
beta_t = OnlineNaturalGradient.get_beta(rho_t, alpha, d_t, D)
inv_sqrt_e_t = self._compute_et(d_t, beta_t)[-1]
# TODO: check if doing this on CPU is faster. Kaldi does that
Z_t = self.compute_zt(N, inv_sqrt_e_t, K_t, L_t)
# print("Z_t ", self.t, Z_t)
z_t_scale = Z_t.trace().clamp_min(1.0)
Z_t = Z_t.mul(1.0 / z_t_scale)
# print("Z_t ", self.t, Z_t.min(), Z_t.max(), X.min(), X.max(), K_t.min(), K_t.max(), L_t.min(), L_t.max())
Z_t = Z_t.to(dtype=torch.float)
eigvalues, U = Z_t.eig(eigenvectors=True)
eigvalues_sorted = eigvalues[:, 0].sort(descending=True)
# TODO: remove sorting. not really required
eigvalues = eigvalues_sorted.values
U = U[:, eigvalues_sorted.indices].cuda()
eigvalues.mul_(z_t_scale)
condition_threshold = 1.0e06
must_reorthogonalize = eigvalues.max() > condition_threshold * eigvalues.min()
c_t_floor = torch.tensor(
(rho_t * (1 - eta)) ** 2, device=eigvalues.device, requires_grad=False
)
if any(eigvalues < c_t_floor):
must_reorthogonalize = True
# print("Must reorth after flooring ", self.t, must_reorthogonalize)
# print("t reorth", self.t, must_reorthogonalize)
eigvalues.clamp_min_(c_t_floor)
# print("t, X updated", self.t, X)
sqrt_c_t = eigvalues.pow(0.5).cuda()
rho_t1 = (
(1.0)
/ (D - R)
* (
eta / N * initial_product
+ (1 - eta) * (D * rho_t + d_t.sum())
- sqrt_c_t.sum()
)
)
d_t1 = sqrt_c_t - rho_t1
floor_val = torch.max(torch.tensor((self.delta * sqrt_c_t.max(), self.epsilon)))
if rho_t1 < floor_val:
rho_t1 = floor_val
d_t1.clamp_min_(floor_val)
X.add_(H_t.mm(W_t), alpha=-1.0)
W_t1 = self._compute_Wt1(N, d_t1, rho_t1, U, sqrt_c_t, inv_sqrt_e_t, J_t)
if must_reorthogonalize:
self._reorthogonalize_Rt1(d_t1, rho_t1, W_t1, L_t)
self.W_t.copy_(W_t1.to(self.W_t.device))
self.d_t.copy_(d_t1.to(self.d_t.device))
self.rho = rho_t1
return
def _compute_Wt1(self, N, d_t1, rho_t1, U, sqrt_c_t, inv_sqrt_et, J_t):
d_t = self.d_t
rho_t = self.rho
W_t = self.W_t
# TOOD: do we really need to create another copy?
R, D = W_t.shape
eta = self._compute_eta(N)
beta_t1 = OnlineNaturalGradient.get_beta(rho_t1, self.alpha, d_t1, D)
sqrt_e_t1 = self._compute_et(d_t1, beta_t1)[1]
inv_sqrt_c_t = sqrt_c_t.pow(-1.0)
w_t_coeff = ((1.0 - eta) / (eta / N) * (d_t + rho_t)).cuda()
# this is in CPU
A_t = (
U.T * ((eta / N) * sqrt_e_t1 * inv_sqrt_c_t)[:, None] * inv_sqrt_et[None, :]
)
A_t = A_t.cuda()
J_t.add_(w_t_coeff[:, None] * W_t)
W_t1 = A_t.mm(J_t)
# print("W_t1 range", W_t1.min(), W_t1.max(), J_t.min(), J_t.max(), A_t.min(), A_t.max())
return W_t1
def _reorthogonalize_Rt1(self, d_t1, rho_t1, W_t1, temp_O):
threshold = 1.0e-03
R, D = W_t1.shape
beta_t1 = OnlineNaturalGradient.get_beta(rho_t1, self.alpha, d_t1, D)
e_t1, sqrt_e_t1, inv_sqrt_e_t1 = self._compute_et(d_t1, beta_t1)
# a trick to re-use memory would be to re-use temp_O
# print("in reorth ", self.t, inv_sqrt_e_t1)
temp_O.copy_(W_t1.mm(W_t1.T) * inv_sqrt_e_t1[:, None] * inv_sqrt_e_t1[None, :])
# TODO: check if temp_O is unit matrix
if _is_unit(temp_O):
return
Omat = temp_O.cpu()
# print("W_t1 ", self.t, W_t1)
# print("Omat1 ", self.t, Omat)
cholesky_ok = True
try:
Omat_inv = Omat.cholesky().cholesky_inverse()
# print("Omat inv", self.t, Omat_inv)
if Omat_inv.max() > 100.0:
logging.warning(
"Cholesky out of range. Using Gram-Schmidt t={} {}".format(
self.t, Omat_inv.max()
)
)
raise Exception("Cholesky out of range. Using Gram-Schmidt")
Omat_inv = Omat_inv.cuda()
Omat_inv.mul_(sqrt_e_t1[:, None]).mul_(inv_sqrt_e_t1[None, :])
# TODO: check if we reallyt need this copy_. I don't think temp_O is used anymore
temp_O.copy_(Omat_inv)
W_t1.copy_(temp_O.mm(W_t1))
return
except:
# must reorth with Gram-Schmidt
cholesky_ok = False
if not cholesky_ok:
logging.info("Running gram schmidt t={}".format(self.t))
W_t1_cpu = W_t1.cpu()
OrthogonalizeRows(W_t1_cpu)
W_t1.copy_(W_t1_cpu.cuda())
W_t1.mul_(sqrt_e_t1.cuda()[:, None])
return
def _compute_et(self, d_t, beta_t):
D = d_t.shape[0]
e_t = 1.0 / (beta_t / d_t + 1.0)
sqrt_e_t = e_t.pow(0.5)
inv_sqrt_e_t = sqrt_e_t.pow(-1.0)
return e_t, sqrt_e_t, inv_sqrt_e_t
@staticmethod
def get_beta(rho_t, alpha, d_t, D):
return rho_t * (1 + alpha) + alpha * d_t.sum() / D
# keeping this public so that I can test it
def compute_zt(self, N, inv_sqrt_e_t, K_t, L_t):
eta = self._compute_eta(N)
d_t = self.d_t
rho = self.rho
d_t_rho_t = d_t + rho
etaN = eta / N
eta1 = 1.0 - eta
etaN_sq = etaN * etaN
eta1_sq = eta1 * eta1
etaN_eta1 = etaN * eta1
R = d_t.shape[0]
# so far everything has been in
L_t_factor = L_t.cpu().to(torch.double)
K_t_factor = K_t.cpu().to(torch.double)
# we need to make sure L_t and K_t are symmetric!
L_t_factor = L_t_factor + L_t_factor.T
K_t_factor = K_t_factor + K_t_factor.T
L_t_factor.mul_(0.5)
K_t_factor.mul_(0.5)
inv_sqrt_e_t_cpu = inv_sqrt_e_t.cpu()
d_t_rho_t_cpu = d_t_rho_t.cpu()
factor1 = (
(inv_sqrt_e_t_cpu * etaN_sq)[:, None] * K_t_factor
) * inv_sqrt_e_t_cpu[None, :]
factor2 = ((inv_sqrt_e_t_cpu * etaN_eta1)[:, None] * L_t_factor) * (
inv_sqrt_e_t_cpu * d_t_rho_t_cpu
)[None, :]
factor3 = (
(inv_sqrt_e_t_cpu * d_t_rho_t_cpu * etaN_eta1)[:, None] * L_t_factor
) * (inv_sqrt_e_t_cpu)[None, :]
# TODO: factor 2 and factor 3 can be simplied in one expression;
# TODO: factor4 can be simplified, but need to check if it is benificial computationally
factor4 = (eta1_sq * d_t_rho_t_cpu.pow(2.0)).diag()
Z_t = factor1 + factor2 + factor3 + factor4
# TODO: avoid this by making sure factor2+3 is symmetric
Z_t = (Z_t + Z_t.T).mul(0.5)
try:
assert torch.allclose(Z_t, Z_t.T)
except AssertionError:
print("Z_t is not symmetric in ", self.t)
# print("Z_t value is ", Z_t)
# print("is factor 1 symmetric", torch.allclose(factor1, factor1.T))
# print("is factor 2 symmetric", torch.allclose(factor2, factor2.T))
# print("is factor 3 symmetric", torch.allclose(factor3, factor3.T))
# print("is factor 4 symmetric", torch.allclose(factor4, factor4.T))
# print("is factor 2+3 symmetric", torch.allclose((factor2+factor3), (factor2.T+factor3.T)))
# print("is K_t symmetric", torch.allclose(K_t_factor, K_t_factor.T))
# print("is L_t symmetric", torch.allclose(L_t_factor, L_t_factor.T))
return Z_t
@torch.no_grad()
def precondition_directions(self, X):
if self.t == 0:
self.init(X)
self.t = 0
initial_product = X.pow(2.0).sum()
self._precondition_directions_internal(X, initial_product)
if initial_product <= 0.0:
scale = 1.0
else:
final_product = X.pow(2.0).sum()
# print("ip fp ", initial_product, final_product)
scale = (initial_product / final_product).pow(0.5)
self.step()
return scale
def step(self):
self.t += 1
def validate(self):
assert self.num_samples_history > 0.0 and self.num_samples_history <= 1e06
assert self.num_minibatches_history == 0 or self.num_minibatches_history > 1.0
assert self.num_minibatches_history < 1e06
assert self.alpha >= 0.0
assert self.rank > 0
assert self.epsilon > 0.0 and self.epsilon <= 1e-05
assert self.delta > 0.0 and self.delta <= 1e-02
# TODO: implement caching
def _compute_eta(self, N):
if self.num_minibatches_history > 0.0:
return 1.0 / self.num_minibatches_history
else:
# TODO: check if num_samples_history > 0.0
return min(0.9, 1.0 - exp(-N / self.num_samples_history))
def _is_unit(symmetric_matrix):
r = symmetric_matrix.shape[0]
# TODO: much simpler implementation that doesn't require so much memory
return torch.allclose(
symmetric_matrix,
torch.eye(r, r, dtype=symmetric_matrix.dtype, device=symmetric_matrix.device),
)
# IDEA: check if diag is all 1s then set the diag to 0, temporarily to check if all elements are 0s
# the min and max should be close to 0
|
import logging
from functools import update_wrapper
from django.contrib import admin
from django.db.models import Count, DateTimeField, Min, Max
from django.db.models.functions import Trunc
from .models import Provider, ProviderLog, ReadonlyProviderLog
logger = logging.getLogger(__name__)
# noinspection PyAttributeOutsideInit,PyUnresolvedReferences
class FieldsMixin(object):
def add_view(self, request, form_url='', extra_context=None):
try:
if hasattr(self, 'add_fields'):
self.fields = self.add_fields
elif hasattr(self, 'default_fields'):
self.fields = self.default_fields
elif hasattr(self, 'fields'):
del self.fields
except Exception as e:
logger.debug(e, exc_info=True)
try:
if hasattr(self, 'add_readonly_fields'):
self.readonly_fields = self.add_readonly_fields
elif hasattr(self, 'default_readonly_fields'):
self.readonly_fields = self.default_readonly_fields
elif hasattr(self, 'readonly_fields'):
del self.readonly_fields
except Exception as e:
logger.debug(e, exc_info=True)
return super(FieldsMixin, self).add_view(request, form_url, extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
try:
if hasattr(self, 'change_fields'):
self.fields = self.change_fields
elif hasattr(self, 'default_fields'):
self.fields = self.default_fields
elif hasattr(self, 'fields'):
del self.fields
except Exception as e:
logger.debug(e, exc_info=True)
try:
if hasattr(self, 'change_readonly_fields'):
self.readonly_fields = self.change_readonly_fields
elif hasattr(self, 'default_readonly_fields'):
self.readonly_fields = self.default_readonly_fields
elif hasattr(self, 'readonly_fields'):
del self.readonly_fields
except Exception as e:
logger.debug(e, exc_info=True)
return super(FieldsMixin, self).change_view(request, object_id, form_url, extra_context)
class ProviderAdmin(FieldsMixin, admin.ModelAdmin):
list_display = ['name', 'code']
fields = ['name', 'code']
readonly_fields = ['token', 'url']
add_fields = fields
change_fields = fields + readonly_fields
class ProviderLogAdmin(FieldsMixin, admin.ModelAdmin):
list_display = ['provider', 'content_type', 'received_time', 'is_valid', ]
list_filter = ['provider__name', 'content_type', 'received_time', 'is_valid', ]
fields = ['provider', 'content_type', 'body', ('file_name', 'ext'), 'file_path', 'received_time', 'is_valid', 'uuid4', ]
readonly_fields = ['uuid4', ]
# https://medium.com/@hakibenita/how-to-turn-django-admin-into-a-lightweight-dashboard-a0e0bbf609ad
# Wonderful way to add some graphs :)
# noinspection PyProtectedMember
class ReadonlyProviderLogAdmin(FieldsMixin, admin.ModelAdmin):
change_list_template = 'admin/readonly_provider_change_list.html'
date_hierarchy = 'received_time'
list_display = ['provider', 'content_type', 'received_time', 'is_valid', ]
list_filter = ['received_time', 'provider__name', 'content_type', 'is_valid', ]
default_fields = ['provider', 'content_type', 'file_name', 'file_path', 'ext', 'received_time', 'is_valid', 'uuid4', 'parsed_body']
default_readonly_fields = []
add_fields = ['provider', 'content_type', 'body', 'file_name', 'ext', 'received_time', 'is_valid', ]
change_fields = ['provider', 'content_type', ('file_name', 'ext'), 'file_path', 'received_time', 'is_valid', 'uuid4', 'parsed_body']
change_readonly_fields = ['provider', 'content_type', 'file_name', 'ext', 'file_path', 'received_time', 'is_valid', 'uuid4', 'parsed_body']
def stats_view(self, request, extra_context=None):
def get_next_in_date_hierarchy(req, date_hierarchy):
if date_hierarchy + '__day' in req.GET:
return 'hour'
if date_hierarchy + '__month' in req.GET:
return 'day'
if date_hierarchy + '__year' in req.GET:
return 'month'
return 'month'
response = self.changelist_view(request, extra_context)
response.template_name = 'admin/readonly_provider_log_summary_change_list.html'
try:
qs = response.context_data['cl'].queryset
except (AttributeError, KeyError):
return response
metrics = {
'total': Count('id'),
}
summary = list(
qs.values('provider__name')
.annotate(**metrics)
.order_by('-total')
)
response.context_data['summary'] = summary
summary_total = dict(
qs.aggregate(**metrics)
)
response.context_data['summary_total'] = summary_total
period = get_next_in_date_hierarchy(
request,
self.date_hierarchy,
)
response.context_data['period'] = period
summary_over_time = qs.annotate(
period=Trunc(
'received_time',
period,
output_field=DateTimeField(),
),
) \
.values('period') \
.annotate(total=Count('id')) \
.order_by('period')
summary_range = summary_over_time.aggregate(
low=Min('total'),
high=Max('total'),
)
high = summary_range.get('high', 0)
low = summary_range.get('low', 0)
response.context_data['summary_over_time'] = [
{
'period': x['period'],
'total': x['total'] or 0,
'pct':
((x['total'] or 0) / high) * 100
if high > low else 100,
} for x in summary_over_time]
return response
def get_urls(self):
urlpatterns = super(ReadonlyProviderLogAdmin, self).get_urls()
from django.conf.urls import url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
wrapper.model_admin = self
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
urlpatterns = [
url(r'^statistics/$', wrap(self.stats_view), name='%s_%s_statistics' % info),
] + urlpatterns
return urlpatterns
admin.site.register(Provider, ProviderAdmin)
admin.site.register(ProviderLog, ProviderLogAdmin)
admin.site.register(ReadonlyProviderLog, ReadonlyProviderLogAdmin)
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.base")
from django.core.management import execute_from_command_line
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, "server"))
execute_from_command_line(sys.argv)
|
class Solution:
# @param haystack, a string
# @param needle, a string
# @return a string or None
def strStr(self, haystack, needle):
m = len(haystack)
n = len(needle)
if m < n:
return None
if m == n:
if haystack == needle:
return needle
else:
return None
for i in range(m-n):
result = True
for j in range(n):
if haystack[i+j] != needle[j]:
result = False
break
if result:
break
if result:
return haystack[i:]
else:
return None
a = 'abcd'
b = 'bc'
s = Solution()
print(s.strStr(a, b))
|
# Importando as libraries
import csv
import rows
import datetime
import requests
import http.client
from pathlib import Path
from time import sleep
import settings
from collections import namedtuple
from divulga import checar_timelines, google_sshet
from autenticadores import google_api_auth
from gspread.exceptions import APIError
from utils import cria_frase
http.client._MAXHEADERS = 1000
# Parametros de acesso das urls
headers = {
"User-Agent": (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/39.0.2171.95 Safari/537.36"
)
}
TOTAL_TENTATIVAS = 5
STATUS_SUCESSO = 200
# Guardando informações de hora e data da máquina
DIA = datetime.datetime.now().day
MES = datetime.datetime.now().month
ANO = datetime.datetime.now().year
data = "{:02d}/{:02d}/{:02d}".format(DIA, MES, ANO) # 11/04/2019
def plan_gs(dia, mes, ano):
"""
Cria planilha no Google Drive, envia por e-mail e preenche o cabeçalho
(data e hora no fuso horário de Brasília, data e hora no UTC, url afetada,
órgão responsável e código de resposta do acesso).
A planilha criada possui as permissões de leitura para qualquer pessoa com
o link, porém somente a conta da API do bot (que não é a mesma conta usada
pela equipe) consegue alterar os dados contidos nela.
Também é acessado uma planilha índice
(docs.google.com/spreadsheets/d/1kIwjn2K0XKAOWZLVRBx9lOU5D4TTUanvmhzmdx7bh0w)
e incluído a planilha de logs nela, na segunda tabela.
"""
todas_planilhas = google_drive_creds.list_spreadsheet_files()
lista_planilhas = [item["name"] for item in todas_planilhas]
offline_titulo = f"colaborabot-sites-offline-{dia:02d}{mes:02d}{ano:04d}"
if offline_titulo not in lista_planilhas:
# Exemplo de nome final: colaborabot-sites-offline-27022019
planilha = google_drive_creds.create(offline_titulo)
cabecalho = planilha.get_worksheet(index=0)
cabecalho.insert_row(values=["data_bsb", "data_utc", "url", "orgao", "cod_resposta"])
plan_indice = google_drive_creds.open_by_key("1kIwjn2K0XKAOWZLVRBx9lOU5D4TTUanvmhzmdx7bh0w")
tab_indice = plan_indice.get_worksheet(index=1)
endereco = f"docs.google.com/spreadsheets/d/{planilha.id}/"
tab_indice.append_row(values=[data, endereco])
else:
planilha = google_drive_creds.open(title=offline_titulo)
sleep(5)
planilha.share(None, perm_type="anyone", role="reader")
print(f"https://docs.google.com/spreadsheets/d/{planilha.id}\n")
return planilha
def preenche_csv(resultados):
"""
Armazena os resultados da última execução do bot em um arquivo CSV.
"""
pasta_logs = Path("logs")
if not pasta_logs.exists():
pasta_logs.mkdir()
arq_log = pasta_logs / f"colaborabot-log-{ANO}-{MES}-{DIA}.csv"
cabecalho = ["data_bsb", "data_utc", "url", "orgao", "cod_resposta"]
with open(arq_log, "w") as csvfile:
csv_writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
csv_writer.writerow(cabecalho)
csv_writer.writerows(resultados)
def preenche_tab_gs(planilha, dados):
"""
Escrevendo na planilha
"""
try:
tabela = google_drive_creds.open(planilha.title)
planilha = tabela.get_worksheet(index=0)
planilha.append_row(values=dados)
return True
except APIError:
return False
def carregar_dados_site():
"""
Abrindo a lista de portais da transparência e tratando
informações que serão tratados como NaN para o pandas.
"""
return rows.import_from_csv("dados/lista_portais.csv")
def busca_disponibilidade_sites(sites):
"""
Percorrendo a lista de sites para verificar
a sua disponibilidade. Caso o código de status
seja 200 (OK), então ela está disponível para acesso.
"""
resultados = []
last_exception = ""
for row in sites:
url, orgao = row.url, row.orgao
for tentativa in range(TOTAL_TENTATIVAS):
try:
if last_exception == "SSLError":
resposta = requests.get(url, headers=headers, timeout=60, verify=False)
status_code = resposta.status_code
else:
resposta = requests.get(url, headers=headers, timeout=60)
status_code = resposta.status_code
print("{} - {} - {}".format(orgao, url, status_code))
last_exception = ""
if status_code != STATUS_SUCESSO:
dados = cria_dados(url=url, portal=orgao, resposta=status_code)
if not settings.debug:
planilha_preenchida = False
while not planilha_preenchida:
planilha_preenchida = preenche_tab_gs(planilha=planilha_google, dados=dados)
resultados.append(dados)
global bots_ativos
for bot in bots_ativos:
bot.update(checa_timeline=True, mensagem=cria_frase(url=url, orgao=orgao))
except requests.exceptions.RequestException as e:
print("Tentativa {}:".format(tentativa + 1))
print(e)
if e.__class__.__name__ == "SSLError":
last_exception = e.__class__.__name__
with open("bases-sem-certificados.txt", "a", encoding="utf-8") as no_certification:
no_certification.write("{} - {} - {}\n".format(orgao, url, e))
continue
elif tentativa < TOTAL_TENTATIVAS - 1:
continue
else:
with open("bases-com-excecoes.txt", "a", encoding="utf-8") as excecoes:
excecoes.write("{} - {} - {}\n".format(orgao, url, e))
break
preenche_csv(resultados)
def filtra_inativos(sites):
"""
Percorrendo a lista de sites para verificar
a sua disponibilidade. Caso o código de status
seja 200 (OK), então ela está disponível para acesso.
Se não estiver disponível pra acessar, retorna o site.
"""
last_exception = None
for row in sites:
url, orgao = row.url, row.orgao
for tentativa in range(TOTAL_TENTATIVAS):
try:
resposta = requests.get(url,
headers=headers,
timeout=60,
verify=not(last_exception == "SSLError"))
status_code = resposta.status_code
# TODO ver esse print
print("{} - {} - {}".format(orgao, url, status_code))
last_exception = ""
if status_code != STATUS_SUCESSO:
Site = namedtuple("Site", "orgao url resposta")
site = Site(row.orgao, row.url, status_code)
yield site
break
except requests.exceptions.RequestException as e:
# TODO rever isso
print("Tentativa {}:".format(tentativa + 1))
print(e)
if e.__class__.__name__ == "SSLError":
last_exception = e.__class__.__name__
with open("bases-sem-certificados.txt", "a", encoding="utf-8") as no_certification:
no_certification.write("{} - {} - {}\n".format(orgao, url, e))
continue
elif tentativa < TOTAL_TENTATIVAS - 1:
continue
else: # TODO rever esses excecoes e colocar alguns como sites inativos
with open("bases-com-excecoes.txt", "a", encoding="utf-8") as excecoes:
excecoes.write("{} - {} - {}\n".format(orgao, url, e))
break
if __name__ == "__main__":
# se os bracos foram habilitados no settings,
# talvez nao seja mais necessario esse if debug...
if not settings.debug:
pass
sites = carregar_dados_site()
bots_ativos = tuple(bot() for bot in settings.bracos)
while True:
for site in filtra_inativos(sites):
for bot in bots_ativos:
bot.update(checa_timeline=True, dados=site)
|
# -*- coding: UTF-8 -*-
import argparse
import configparser
import os
CONFIG_FILE = 'sfpy.conf'
ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir)
parser = argparse.ArgumentParser()
parser.add_argument('--config-file', default=CONFIG_FILE,
type=argparse.FileType('r'))
args = parser.parse_args()
def get_config():
conf = configparser.ConfigParser()
conf.read_file(args.config_file)
return conf
config = get_config()
PAGE = config.get('Auth', 'Page')
USERNAME = config.get('Auth', 'Login')
DOMAIN = config.get('Auth', 'Domain')
PASSWORD = config.get('Auth', 'Password')
USER_AGENT = config.get('Auth', 'UserAgent', fallback=None)
LOGGER_NAME = 'sf-logger'
CHECK_URL = config.get('Admin', 'Check_URL', fallback=None)
NEXT_REQUEST_DELAY_MINUTES = 6
AUTH_RETRY_DELAY_SECONDS = 60
HERO_BAG_URL = 'https://portal.sf.mail.ru/cult/HeroBag:loadData'
if not all([USERNAME, DOMAIN, PASSWORD, PAGE]):
raise RuntimeError(u"Не указаны данные для подключения")
|
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
from . import neuron
from . import axon
from . import dendrite
from . import spike
from . import synapse
from . import block
from . import classifier
from . import loss
from . import io
from . import auto
from . import utils
__all__ = [
'neuron',
'axon',
'dendrite',
'spike',
'synapse',
'block',
'classifier',
'loss',
'io',
'auto',
'utils'
]
|
######################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# SPDX-License-Identifier: MIT-0 #
######################################################################
import os
import json
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import streamlit as st
if __name__ == "__main__":
st.set_page_config(layout='wide')
st.title('PM Dashboard')
data_model_id_dict = collect_data_model_id_dict()
# Reference imp
dict_keys = list(data_model_id_dict.keys())
sel_dict_key = st.sidebar.selectbox('Select Collection Type', dict_keys)
print('Selected Collection: %s'%(sel_dict_key))
sel_id = st.sidebar.selectbox('Select ID', data_model_id_dict[sel_dict_key])
print('Selected ID: %s'%(sel_id))
# Read the full path of collection + selected_id
id_path_rel = '%s/%s'%(sel_dict_key, sel_id)
id_path_full = '%s/%s'%(pm_root_path, id_path_rel)
content_dict = load_content_schema(id_path_full)
# Show Meta Data
st.markdown('### Metadata')
st.json(content_dict['metadata'])
# Loop through the png files
if (len(content_dict['png_files']) > 0):
sel_png_file = st.sidebar.selectbox('Select PNG File', content_dict['png_files'])
st.image('%s/%s'%(id_path_full, sel_png_file))
# If there is a filename in the metadata
if 'filename' in content_dict['metadata']:
# Load the filename and use it for interactive plots
with open('%s/%s'%(id_path_full, content_dict['metadata']['filename'])) as fp:
inp_out_dict = json.load(fp)
|
#! /usr/bin/env python
''' ------------------------| Python SOURCE FILE |------------------------
The Description of this file.
@copyright: Copyright (c) by Kodiak Data, Inc. All rights reserved.
'''
import socket
import time
import os
import random
from kd.util.logger import getLogger
from kd.util.url import Url
from kd.util.rc_msg import RC
from kd.tkcd.hdr_msg import HdrMsg
from kd.tkcd.dev_msg import DevMsg
from kd.tkcd.tile_msg import TileMsg
from kd.tkcd.io_msg import IoMsg
logger = getLogger(__name__)
class TkcdSess(object):
''' Short summary, should fit on one line
Attributes:
attr1 (str): description ...
attr2 ....
'''
def __init__(self, url, dFN, dFMSz=16):
self.url = url
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.hdrMsg = HdrMsg()
self.dFN = dFN
self.dFMSz = dFMSz
self.dFSzMask = (self.dFMSz << 20) - 1
if dFN is None:
self.inBuf = bytearray( 1025 * 1024 )
self.dFile = None
else:
if not os.path.isfile(self.dFN):
self.dFile = open(self.dFN, "wb")
# write extra mByte
for idx in range( dFMSz + 1 ):
byteArray = bytearray(self._randbytes(1024*1024))
self.dFile.write( byteArray )
self.dFile.flush()
self.dFile.close()
self.dFile = open(self.dFN, "r+b")
self.inBuf = bytearray( 1025 * 1024 )
def connect(self):
self.conn.connect( (self.url.hostname, self.url.port) )
def close(self):
self.conn.close()
def send(self, cmdMsg, timeout=-1):
self.cmdMsg = cmdMsg
if self.cmdMsg.msgSig != 0xF1F1:
self.conn.sendall(cmdMsg)
else:
htonMsg = self.cmdMsg.htonCopy()
self.conn.sendall(htonMsg)
# send data
dataSize = cmdMsg.payloadLen - (IoMsg.msgSize() - HdrMsg.size())
if dataSize > 0:
dBuf = bytearray( self._randbytes(dataSize) )
dBufSig = bytearray( self._randbytes(6) )
dBufSig[0] = (cmdMsg.lba >> 24) & 0xff
dBufSig[1] = (cmdMsg.lba >> 16) & 0xff
dBufSig[2] = (cmdMsg.lba >> 8) & 0xff
dBufSig[3] = (cmdMsg.lba >> 0) & 0xff
dBufSig[4] = (cmdMsg.blocks >> 8) & 0xff
dBufSig[5] = (cmdMsg.blocks >> 0) & 0xff
#print "DataSig %02x%02x %02x%02x %02x%02x" % (
# dBufSig[0], dBufSig[1], dBufSig[2],
# dBufSig[3], dBufSig[4], dBufSig[5])
idx = 0
while idx < dataSize:
dBuf[idx+0] = dBufSig[0]
dBuf[idx+1] = dBufSig[1]
dBuf[idx+2] = dBufSig[2]
dBuf[idx+3] = dBufSig[3]
dBuf[idx+4] = dBufSig[4]
dBuf[idx+5] = dBufSig[5]
idx += 4096
self.conn.sendall(dBuf)
if self.dFile is not None:
addr = (cmdMsg.lba * 4096) & self.dFSzMask
self.dFile.seek( addr )
self.dFile.write( dBuf )
self.dFile.flush()
sendline = send
def expect(self, seps=-1, timeout=-1):
# @todo need to handle timeout case
rc = RC.OK
view = memoryview(self.inBuf)
toread = HdrMsg.size() ;
while toread > 0:
nbytes = self.conn.recv_into(view, toread)
view = view[nbytes:]
toread -= nbytes
hdrMsg = HdrMsg.from_buffer_copy(self.inBuf)
if hdrMsg.msgSig == 0xF1F1:
hdrMsg = hdrMsg.ntohCopy()
toread = hdrMsg.payloadLen
while toread:
nbytes = self.conn.recv_into(view, toread)
view = view[nbytes:]
toread -= nbytes
if self.cmdMsg.msgSig == 0xF1F1:
rspMsg = IoMsg.from_buffer_copy(self.inBuf)
rspMsg = rspMsg.ntohCopy()
if rspMsg.rc != 1:
rc = RC.ERROR
elif rspMsg.cmdType == 1 and self.dFile is not None:
addr = (rspMsg.lba * 4096) & self.dFSzMask
byteCnt = rspMsg.blocks * 4096
self.dFile.seek( addr )
expected = bytearray(self.dFile.read( byteCnt ))
returned = self.inBuf[ IoMsg.msgSize():IoMsg.msgSize() + byteCnt]
for idx in range( byteCnt ):
if expected[idx] != returned[idx]:
logger.error("data corruption: offset %x expected 0x%02x return 0x%02x",
idx, expected[idx], returned[idx])
bIdx = idx & 0xfffff000
lba = (returned[bIdx ] << 24) + (returned[bIdx+1] << 16) + (returned[bIdx+2] << 8) + (returned[bIdx+3]) ;
blocks = (returned[bIdx+4] << 8) + (returned[bIdx+5])
logger.error("returned: last tx lba 0x%x blocks %d", lba, blocks)
lba = (expected[bIdx ] << 24) + (expected[bIdx+1] << 16) + (expected[bIdx+2] << 8) + (expected[bIdx+3]) ;
blocks = (expected[bIdx+4] << 8) + (expected[bIdx+5])
logger.error("expected: last tx lba 0x%x blocks %d", lba, blocks)
rc = RC.ERROR
break
elif self.cmdMsg.msgSig == 0xF2F2:
rspMsg = DevMsg.from_buffer_copy(self.inBuf)
if rspMsg.rc != 1:
rc = RC.ERROR
elif self.cmdMsg.msgSig == 0xF3F3:
rspMsg = TileMsg.from_buffer_copy(self.inBuf)
if rspMsg.rc != 1:
rc = RC.ERROR
else:
return RC.NOT_YET, "unknown message type"
logger.debug("rsp Msg %s", rspMsg)
return rc, rspMsg
def _randbytes(self, size):
for _ in xrange( size ):
yield random.getrandbits(8)
def __str__(self):
return self.__class__.__name__
__repr__ = __str__
if __name__ == '__main__':
''' Test this module here '''
'''
sess = TkcdSess(Url.fromStr("tcp://127.0.0.1:5017"))
sess.connect()
sess.close()
'''
myFile = open("/tmp/testit.txt", "wb")
for c in range(50, 70):
myFile.write(chr(c))
myFile.close()
|
"""
========================================================
Spatial Transformations (:mod:`scipy.spatial.transform`)
========================================================
.. currentmodule:: scipy.spatial.transform
This package implements various spatial transformations. For now,
only rotations are supported.
Rotations in 3 dimensions
=========================
.. autosummary::
:toctree: generated/
Rotation
Slerp
"""
from __future__ import division, print_function, absolute_import
from .rotation import Rotation, Slerp
__all__ = ['Rotation', 'Slerp']
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
|
# exercise 5.2.6
from matplotlib.pylab import figure, plot, xlabel, ylabel, legend, ylim, show
import sklearn.linear_model as lm
# requires data from exercise 5.1.4
from ex5_1_5 import *
# Fit logistic regression model
model = lm.logistic.LogisticRegression()
model = model.fit(X,y)
# Classify wine as White/Red (0/1) and assess probabilities
y_est = model.predict(X)
y_est_white_prob = model.predict_proba(X)[:, 0]
# Define a new data object (new type of wine), as in exercise 5.1.7
x = np.array([6.9, 1.09, .06, 2.1, .0061, 12, 31, .99, 3.5, .44, 12]).reshape(1,-1)
# Evaluate the probability of x being a white wine (class=0)
x_class = model.predict_proba(x)[0,0]
# Evaluate classifier's misclassification rate over entire training data
misclass_rate = np.sum(y_est != y) / float(len(y_est))
# Display classification results
print('\nProbability of given sample being a white wine: {0:.4f}'.format(x_class))
print('\nOverall misclassification rate: {0:.3f}'.format(misclass_rate))
f = figure();
class0_ids = np.nonzero(y==0)[0].tolist()
plot(class0_ids, y_est_white_prob[class0_ids], '.y')
class1_ids = np.nonzero(y==1)[0].tolist()
plot(class1_ids, y_est_white_prob[class1_ids], '.r')
xlabel('Data object (wine sample)'); ylabel('Predicted prob. of class White');
legend(['White', 'Red'])
ylim(-0.01,1.5)
show()
print('Ran Exercise 5.2.6') |
'''
Date: 2020-07-25 13:23:41
LastEditors: Jecosine
LastEditTime: 2020-08-22 02:18:40
'''
from mysql.connector import connect
class DBConnection:
def __init__(self):
self.con = None
self.cursor = None
self.init_database()
def init_database(self):
self.con = connect(user="root", password="123456", database="bananadb")
self.cursor = self.con.cursor()
def save_database(self):
self.con.commit()
def close_save(self):
self.con.commit()
self.cursor.close()
|
# -*- coding: utf-8 -*-
import os
import pytest
import ftplib
from pytest_sftpserver.sftp.server import SFTPServer
from pytest_localftpserver import plugin
from addons.ftp import utils
class TestSFTPList():
def test_sftp_list_root(self, sftpserver):
with sftpserver.serve_content({'foo_dir' : {}, 'bar.txt' : ''}):
args = {
'host' : sftpserver.host,
'port' : sftpserver.port,
'username' : 'user',
'password' : 'pw!',
'key' : None}
f_list = utils.sftp_list(**args)
assert len(f_list) == 2
assert f_list[0]['filename'] == 'foo_dir'
assert f_list[0]['is_directory'] == True
assert f_list[1]['filename'] == 'bar.txt'
assert f_list[1]['is_directory'] == False
assert f_list[1]['size'] == 0
def test_sftp_list_specific_dir(self, sftpserver):
with sftpserver.serve_content({'foo_dir' : {
'inner_dir' : {},
'inner_txt.txt' : 'test'},
'bar.txt' : ''}):
args = {
'host' : sftpserver.host,
'port' : sftpserver.port,
'username' : 'user',
'password' : 'pw!',
'key' : None,
'path' : 'foo_dir'}
f_list = utils.sftp_list(**args)
assert len(f_list) == 2
assert f_list[0]['filename'] == 'inner_dir'
assert f_list[0]['is_directory'] == True
assert f_list[1]['filename'] == 'inner_txt.txt'
assert f_list[1]['is_directory'] == False
assert f_list[1]['size'] == 4
class TestFTPList:
def _create_test_files(self, path):
try:
os.mkdir(path + '/foo_dir')
os.mkdir(path + '/foo_dir/inner_dir')
t01 = open(path + '/test01.txt', 'a')
t01.write('test')
t01.close()
open(path + '/bar.txt', 'a').close()
open(path + '/foo_dir/inner_txt.txt', 'a').close()
except OSError:
pass
def test_ftp_list_root(self, ftpserver):
self._create_test_files(ftpserver.server_home)
args = {
'host' : 'localhost',
'port' : ftpserver.server_port,
'username' : 'fakeusername',
'password' : 'qweqwe',
'protocol' : 'ftp',
'key' : None}
f_list = utils.ftp_list(**args)
assert len(f_list) == 3
assert f_list[0]['filename'] == 'bar.txt'
assert f_list[0]['is_directory'] == False
assert f_list[1]['filename'] == 'foo_dir'
assert f_list[1]['is_directory'] == True
assert f_list[2]['filename'] == 'test01.txt'
assert f_list[2]['is_directory'] == False
assert f_list[2]['size'] == 4
def test_ftp_list_specific_dir(self, ftpserver):
self._create_test_files(ftpserver.server_home)
args = {
'host' : 'localhost',
'port' : ftpserver.server_port,
'username' : 'fakeusername',
'password' : 'qweqwe',
'protocol' : 'ftp',
'path' : 'foo_dir',
'key' : None}
f_list = utils.ftp_list(**args)
assert len(f_list) == 2
assert f_list[0]['filename'] == 'inner_dir'
assert f_list[0]['is_directory'] == True
assert f_list[1]['filename'] == 'inner_txt.txt'
assert f_list[1]['is_directory'] == False
|
def search_grid(grid: list[list[str]], word: str) -> list[tuple[int, int]]:
dirs = [(0, 1), (1, 0), (0, -1), (-1, 0), (1, 1), (-1, -1), (1, -1), (-1, 1)]
def check(i, j, idx, d):
if idx == len(word):
return True
else:
di = (i + d[0] * idx) % len(grid)
dj = (j + d[1] * idx) % len(grid[0])
if grid[di][dj] != word[idx]:
return False
else:
return check(i, j, idx + 1, d)
ans = []
for i, line in enumerate(grid):
for j, c in enumerate(line):
for d in dirs:
if check(i, j, 0, d):
idx = 0
while idx < len(word):
ans += [(j + d[1] * idx, i + d[0] * idx)]
idx += 1
return ans |
import torch
from torch.nn import Linear, Module
from torch.nn.init import xavier_uniform_, constant_, xavier_normal_
from torch.nn.parameter import Parameter
from torch.nn import functional as F
class MultiheadAttention(Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See reference: Attention Is All You Need
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
\text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
Args:
embed_dim: total dimension of the model
num_heads: parallel attention layers, or heads
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
xavier_uniform_(self.in_proj_weight[:self.embed_dim, :])
xavier_uniform_(self.in_proj_weight[self.embed_dim:(self.embed_dim * 2), :])
xavier_uniform_(self.in_proj_weight[(self.embed_dim * 2):, :])
xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def forward(self, query, key, value, key_padding_mask=None, incremental_state=None,
need_weights=True, static_kv=False, attn_mask=None):
"""
Inputs of forward function
query: [target length, batch size, embed dim]
key: [sequence length, batch size, embed dim]
value: [sequence length, batch size, embed dim]
key_padding_mask: if True, mask padding based on batch size
incremental_state: if provided, previous time steps are cashed
need_weights: output attn_output_weights
static_kv: key and value are static
Outputs of forward function
attn_output: [target length, batch size, embed dim]
attn_output_weights: [batch size, target length, sequence length]
"""
qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
kv_same = key.data_ptr() == value.data_ptr()
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_key' in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert kv_same and not qkv_same
key = value = None
else:
saved_state = None
if qkv_same:
# self-attention
q, k, v = self._in_proj_qkv(query)
elif kv_same:
# encoder-decoder attention
q = self._in_proj_q(query)
if key is None:
assert value is None
k = v = None
else:
k, v = self._in_proj_kv(key)
else:
q = self._in_proj_q(query)
k = self._in_proj_k(key)
v = self._in_proj_v(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if 'prev_key' in saved_state:
prev_key = saved_state['prev_key'].view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
k = torch.cat((prev_key, k), dim=1)
if 'prev_value' in saved_state:
prev_value = saved_state['prev_value'].view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
v = torch.cat((prev_value, v), dim=1)
saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state['prev_value'] = v.view(bsz, self.num_heads, -1, self.head_dim)
self._set_input_buffer(incremental_state, saved_state)
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1)
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, self.num_heads, tgt_len, src_len)
key_padding_mask = key_padding_mask.type(torch.uint8).to(query.device)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_output_weights = F.softmax(
attn_output_weights.float(), dim=-1,
dtype=torch.float32 if attn_output_weights.dtype == torch.float16 else attn_output_weights.dtype)
attn_output_weights = F.dropout(attn_output_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = self.out_proj(attn_output)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.sum(dim=1) / self.num_heads
else:
attn_output_weights = None
return attn_output, attn_output_weights
def _in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def _in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def _in_proj_q(self, query):
return self._in_proj(query, end=self.embed_dim)
def _in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def _in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias) |
"""MovieShow Model."""
from config.database import Model
class MovieShow(Model):
"""MovieShow Model."""
__table__ = 'movie_shows'
__casts__ = {'show_time': 'string', 'show_date': 'string'}
|
text = input("Enter something: ")
print(text)
print(type(text)) |
# Generated by Django 3.0.8 on 2020-09-02 05:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0009_auto_20200828_1240'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='image',
field=models.ImageField(blank=True, default='dummy.png', upload_to='profile_pictures'),
),
]
|
#!/usr/bin/env python
#
# ********* Gen Write Example *********
#
#
# Available SCServo model on this example : All models using Protocol SCS
# This example is tested with a SCServo(STS/SMS/SCS), and an URT
# Be sure that SCServo(STS/SMS/SCS) properties are already set as %% ID : 1 / Baudnum : 6 (Baudrate : 1000000)
#
import os
if os.name == 'nt':
import msvcrt
def getch():
return msvcrt.getch().decode()
else:
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
def getch():
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
from scservo_sdk import * # Uses SCServo SDK library
# Control table address
ADDR_SCS_TORQUE_ENABLE = 40
ADDR_SCS_GOAL_ACC = 41
ADDR_SCS_GOAL_POSITION = 42
ADDR_SCS_GOAL_SPEED = 46
ADDR_SCS_PRESENT_POSITION = 56
# Default setting
SCS_ID = 1 # SCServo ID : 1
BAUDRATE = 1000000 # SCServo default baudrate : 1000000
DEVICENAME = '/dev/ttyUSB0' # Check which port is being used on your controller
# ex) Windows: "COM1" Linux: "/dev/ttyUSB0" Mac: "/dev/tty.usbserial-*"
SCS_MINIMUM_POSITION_VALUE = 0 # SCServo will rotate between this value
SCS_MAXIMUM_POSITION_VALUE = 4095 # and this value (note that the SCServo would not move when the position value is out of movable range. Check e-manual about the range of the SCServo you use.)
SCS_MOVING_STATUS_THRESHOLD = 20 # SCServo moving status threshold
SCS_MOVING_SPEED = 0 # SCServo moving speed
SCS_MOVING_ACC = 0 # SCServo moving acc
protocol_end = 0 # SCServo bit end(STS/SMS=0, SCS=1)
index = 0
scs_goal_position = [2047] # Goal position
# Initialize PortHandler instance
# Set the port path
# Get methods and members of PortHandlerLinux or PortHandlerWindows
portHandler = PortHandler(DEVICENAME)
# Initialize PacketHandler instance
# Get methods and members of Protocol
packetHandler = PacketHandler(protocol_end)
# Open port
if portHandler.openPort():
print("Succeeded to open the port")
else:
print("Failed to open the port")
print("Press any key to terminate...")
getch()
quit()
# Set port baudrate
if portHandler.setBaudRate(BAUDRATE):
print("Succeeded to change the baudrate")
else:
print("Failed to change the baudrate")
print("Press any key to terminate...")
getch()
quit()
# Write SCServo acc
scs_comm_result, scs_error = packetHandler.write1ByteTxRx(portHandler, SCS_ID, ADDR_SCS_GOAL_ACC, SCS_MOVING_ACC)
if scs_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(scs_comm_result))
elif scs_error != 0:
print("%s" % packetHandler.getRxPacketError(scs_error))
# Write SCServo speed
scs_comm_result, scs_error = packetHandler.write2ByteTxRx(portHandler, SCS_ID, ADDR_SCS_GOAL_SPEED, SCS_MOVING_SPEED)
if scs_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(scs_comm_result))
elif scs_error != 0:
print("%s" % packetHandler.getRxPacketError(scs_error))
#while 1:
for i in range(0,1):
print("Press any key to continue! (or press ESC to quit!)")
#if getch() == chr(0x1b):
#break
# Write SCServo goal position
scs_comm_result, scs_error = packetHandler.write2ByteTxRx(portHandler, SCS_ID, ADDR_SCS_GOAL_POSITION, scs_goal_position[index])
if scs_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(scs_comm_result))
elif scs_error != 0:
print("%s" % packetHandler.getRxPacketError(scs_error))
while 1:
# Read SCServo present position
scs_present_position_speed, scs_comm_result, scs_error = packetHandler.read4ByteTxRx(portHandler, SCS_ID, ADDR_SCS_PRESENT_POSITION)
if scs_comm_result != COMM_SUCCESS:
print(packetHandler.getTxRxResult(scs_comm_result))
elif scs_error != 0:
print(packetHandler.getRxPacketError(scs_error))
scs_present_position = SCS_LOWORD(scs_present_position_speed)
scs_present_speed = SCS_HIWORD(scs_present_position_speed)
print("[ID:%03d] GoalPos:%03d PresPos:%03d PresSpd:%03d"
% (SCS_ID, scs_goal_position[index], scs_present_position, SCS_TOHOST(scs_present_speed, 15)))
if not (abs(scs_goal_position[index] - scs_present_position_speed) > SCS_MOVING_STATUS_THRESHOLD):
break
# Change goal position
if index == 0:
index = 1
else:
index = 0
scs_comm_result, scs_error = packetHandler.write1ByteTxRx(portHandler, SCS_ID, ADDR_SCS_TORQUE_ENABLE, 0)
if scs_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(scs_comm_result))
elif scs_error != 0:
print("%s" % packetHandler.getRxPacketError(scs_error))
# Close port
portHandler.closePort()
|
from telegram import InlineKeyboardButton, InlineKeyboardMarkup, \
ReplyKeyboardMarkup, CallbackQuery
from telegram.ext import Filters
import translations as tr
from translations import gettext as _
LIST_KEYBOARD = 'LIST_KEYBOARD'
LIST_KEYBOARD_INDEX = 'LIST_KEYBOARD_INDEX'
MAX_INLINE_CHARACTERS = 50
MAX_INLINE_COLUMNS = 1
MAX_INLINE_ROWS = 5
MESSAGE_FILTER = Filters.text & ~Filters.command
ANSWER_FORWARD = 'FORWARD'
ANSWER_BACK = 'BACK'
ANSWER_NOT_IN_LIST = 'NOT_IN_LIST'
ANSWER_TYPE_AGAIN = 'TYPE_AGAIN'
def trunc(s, length):
return s if len(s) <= length else s[:length] + '...'
def make_inline_keyboard(rows):
return InlineKeyboardMarkup([
[InlineKeyboardButton(trunc(text, MAX_INLINE_CHARACTERS),
callback_data=data)
for text, data in row]
for row in rows
])
def get_main_keyboard(context):
reply_keyboard = [[_(tr.MENU_HELLO, context),
_(tr.MENU_SELECT_LANG, context)],
[_(tr.MENU_SEND_FEEDBACK, context),
_(tr.MENU_REVIEW, context)]]
return ReplyKeyboardMarkup(
reply_keyboard, one_time_keyboard=True
)
def main_reply(reply, context):
reply(_(tr.GOT_TO_MAIN_MENU, context),
reply_markup=get_main_keyboard(context))
def answer_query(update, context):
query: CallbackQuery = update.callback_query
data = query.data
query.answer()
return data, query.edit_message_text
def save_list_keyboard(options, context,
add_additional_buttons=lambda: []):
rows = [[]]
last_len = 0
for option in options:
if (last_len + len(option) > MAX_INLINE_CHARACTERS
or len(rows[-1]) + 1 > MAX_INLINE_COLUMNS) \
and len(rows[-1]) != 0:
rows.append([])
rows[-1].append([option] * 2)
keyboards = []
for i in range(0, len(rows), MAX_INLINE_ROWS - 1):
keyboards.append(rows[i:i + MAX_INLINE_ROWS - 1])
row = [
*add_additional_buttons()
]
if i > 0:
row.append([_(tr.LAST_PAGE, context), ANSWER_BACK])
if i + MAX_INLINE_ROWS - 1 < len(rows):
row.append([_(tr.NEXT_PAGE, context), ANSWER_FORWARD])
keyboards[-1].append(row)
context.user_data[LIST_KEYBOARD] = keyboards
def show_list_keyboard(set_message, context, title_phrase):
keyboard = context.user_data[LIST_KEYBOARD]
i = context.user_data.setdefault(LIST_KEYBOARD_INDEX, 0)
keyboard = keyboard[i]
set_message(text=_(title_phrase, context),
reply_markup=make_inline_keyboard(keyboard))
def handle_list_keyboard_query(update, context, show, choose_option):
data, reply = answer_query(update, context)
if data == ANSWER_FORWARD:
context.user_data[LIST_KEYBOARD_INDEX] += 1
show(reply, context)
return None
elif data == ANSWER_BACK:
context.user_data[LIST_KEYBOARD_INDEX] -= 1
show(reply, context)
return None
else:
context.user_data.pop(LIST_KEYBOARD)
context.user_data.pop(LIST_KEYBOARD_INDEX)
return choose_option(update, context, data, reply)
def save_teachers_read_keyboards(teachers, context):
save_list_keyboard(
teachers, context,
lambda: [[_(tr.WRITE_ONE_MORE, context), ANSWER_TYPE_AGAIN]]
)
def save_teachers_add_keyboards(teachers, context):
save_list_keyboard(
teachers, context,
lambda: [[_(tr.NOT_IN_LIST, context), ANSWER_NOT_IN_LIST]]
)
def show_teachers(set_message, context):
return show_list_keyboard(set_message, context, tr.TEACHER_LIST_TITLE)
def save_subject_keyboards(subjects, context):
save_list_keyboard(
subjects, context,
lambda: [[_(tr.NOT_IN_LIST, context), ANSWER_NOT_IN_LIST]]
)
def save_subject_read_keyboards(subjects, context):
save_list_keyboard(
subjects, context,
)
def show_subjects(set_message, context):
return show_list_keyboard(set_message, context, tr.SUBJECT_LIST_TITLE)
|
import os, sys, multiprocessing, hashlib, ast
from fractions import Fraction
from typing import Union, Callable
import numpy as np
from .kshell_exceptions import KshellDataStructureError
from .general_utilities import level_plot, level_density, gamma_strength_function_average
from .parameters import atomic_numbers
def parity_string_to_integer(parity: str):
if parity == "+":
res = 1
elif parity == "-":
res = -1
else:
msg = f"Invalid parity read from file. Got: '{parity}'."
raise KshellDataStructureError(msg)
return res
def generate_states(
start: int = 0,
stop: int = 14,
n_states: int = 100,
parity: Union[str, int] = "both"
):
"""
Generate correct string for input to `kshell_ui.py` when asked for
which states to calculate. Copy the string generated by this
function and paste it into `kshell_ui.py` when it prompts for
states.
Parameters
----------
start : int
The lowest spin value.
stop : int
The largest spin value.
n_states : int
The number of states per spin value.
parity : Union[str, int]
The parity of the states. Allowed values are: 1, -1, 'both',
'positive', 'negative', 'pos', 'neg', '+', '-'.
Examples
--------
``` python
>>> import kshell_utilities as ksutil
>>> ksutil.generate_states(start=0, stop=3, n_states=100, parity="both")
0+100, 0.5+100, 1+100, 1.5+100, 2+100, 2.5+100, 3+100, 0-100, 0.5-100, 1-100, 1.5-100, 2-100, 2.5-100, 3-100,
```
"""
allowed_positive_parity_inputs = ["positive", "pos", "+", "1", "+1", 1, "both"]
allowed_negative_parity_inputs = ["negative", "neg", "-", "-1", -1, "both"]
def correct_syntax(lst):
for elem in lst:
print(elem, end=", ")
if parity in allowed_positive_parity_inputs:
positive = [f"{i:g}{'+'}{n_states}" for i in np.arange(start, stop+0.5, 0.5)]
correct_syntax(positive)
if parity in allowed_negative_parity_inputs:
negative = [f"{i:g}{'-'}{n_states}" for i in np.arange(start, stop+0.5, 0.5)]
correct_syntax(negative)
def _generate_unique_identifier(path: str) -> str:
"""
Generate a unique identifier based on the shell script and the
save_input file from KSHELL.
Parameters
----------
path : str
The path to a summary file or a directory with a summary file.
"""
shell_file_content = ""
save_input_content = ""
msg = "Not able to generate unique identifier!"
if os.path.isfile(path):
"""
If a file is specified, extract the directory from the path.
"""
directory = path.rsplit("/", 1)[0]
if directory == path:
"""
Example: path is 'summary.txt'
"""
directory = "."
for elem in os.listdir(directory):
"""
Loop over all elements in the directory and find the shell
script and save_input file.
"""
if elem.endswith(".sh"):
with open(f"{directory}/{elem}", "r") as infile:
shell_file_content = infile.read()
elif elem.endswith(".input"):
with open(f"{directory}/{elem}", "r") as infile:
save_input_content = infile.read()
else:
print(msg)
if (shell_file_content == "") and (save_input_content == ""):
print(msg)
return hashlib.sha1((shell_file_content + save_input_content).encode()).hexdigest()
def _load_energy_levels(infile):
"""
Load excitation energy, spin and parity into a list of structure:
levels = [[energy, spin, parity], ...].
Example
-------
Energy levels
N J prty N_Jp T E(MeV) Ex(MeV) log-file
1 5/2 + 1 3/2 -16.565 0.000 log_O19_sdpf-mu_m1p.txt
2 3/2 + 1 3/2 -15.977 0.588 log_O19_sdpf-mu_m1p.txt
3 1/2 + 1 3/2 -15.192 1.374 log_O19_sdpf-mu_m1p.txt
4 9/2 + 1 3/2 -13.650 2.915 log_O19_sdpf-mu_m1p.txt
5 7/2 + 1 3/2 -13.267 3.298 log_O19_sdpf-mu_m1p.txt
6 5/2 + 2 3/2 -13.074 3.491 log_O19_sdpf-mu_m1p.txt
"""
levels = []
negative_spin_counts = 0
for _ in range(3): infile.readline()
for line in infile:
try:
tmp = line.split()
if tmp[1] == "-1":
"""
-1 spin states in the KSHELL data file indicates
bad states which should not be included.
"""
negative_spin_counts += 1 # Debug.
continue
parity = 1 if tmp[2] == "+" else -1
levels.append([float(tmp[5]), 2*float(Fraction(tmp[1])), parity])
except IndexError:
"""
End of energies.
"""
break
return levels, negative_spin_counts
def _load_transition_probabilities_old(infile):
"""
For summary files with old syntax (pre 2021-11-24).
Parameters
----------
infile:
The KSHELL summary file.
"""
reduced_transition_prob_decay_list = []
negative_spin_counts = 0
for _ in range(2): infile.readline()
for line in infile:
try:
"""
Example of possible lines in file:
J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<-
2+(11) 18.393 2+(10) 17.791 0.602 0.1( 0.0) 0.1( 0.0)
3/2+( 1) 0.072 5/2+( 1) 0.000 0.071 0.127( 0.07) 0.084( 0.05)
2+(10) 17.791 2+( 1) 5.172 12.619 0.006( 0.00) 0.006( 0.00)
3+( 8) 19.503 2+(11) 18.393 1.111 0.000( 0.00) 0.000( 0.00)
1+( 7) 19.408 2+( 9) 16.111 3.297 0.005( 0.00) 0.003( 0.00)
5.0+(60) 32.170 4.0+(100) 31.734 0.436 0.198( 0.11) 0.242( 0.14)
4.0-( 3) 3.191 3.0+(10) 3.137 0.054 0.0( 0.0) 0.0( 0.0)
0.0+(46)', '47.248', '1.0+(97)', '45.384', '1.864', '23.973(13.39)', '7.991(', '4.46)
"""
tmp = line.split()
len_tmp = len(tmp)
case_ = None # Used for identifying which if-else case reads wrong.
# Location of initial parity is common for all cases.
parity_idx = tmp[0].index("(") - 1 # Find index of initial parity.
parity_initial = 1 if tmp[0][parity_idx] == "+" else -1
parity_initial_symbol = tmp[0][parity_idx]
# Location of initial spin is common for all cases.
spin_initial = float(Fraction(tmp[0][:parity_idx]))
if (tmp[1][-1] != ")") and (tmp[3][-1] != ")") and (len_tmp == 9):
"""
Example:
J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<-
2+(11) 18.393 2+(10) 17.791 0.602 0.1( 0.0) 0.1( 0.0)
5.0+(60) 32.170 4.0+(100) 31.734 0.436 0.198( 0.11) 0.242( 0.14)
"""
case_ = 0
E_gamma = float(tmp[4])
Ex_initial = float(tmp[1])
reduced_transition_prob_decay = float(tmp[5][:-1])
reduced_transition_prob_excite = float(tmp[7][:-1])
parity_final_symbol = tmp[2].split("(")[0][-1]
spin_final = float(Fraction(tmp[2].split(parity_final_symbol)[0]))
Ex_final = float(tmp[3])
elif (tmp[1][-1] != ")") and (tmp[3][-1] == ")") and (len_tmp == 10):
"""
Example:
J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<-
2+(10) 17.791 2+( 1) 5.172 12.619 0.006( 0.00) 0.006( 0.00)
"""
case_ = 1
E_gamma = float(tmp[5])
Ex_initial = float(tmp[1])
reduced_transition_prob_decay = float(tmp[6][:-1])
reduced_transition_prob_excite = float(tmp[8][:-1])
parity_final_symbol = tmp[2].split("(")[0][-1]
# spin_final = float(Fraction(tmp[2][:-2]))
spin_final = float(Fraction(tmp[2].split(parity_final_symbol)[0]))
Ex_final = float(tmp[4])
elif (tmp[1][-1] == ")") and (tmp[4][-1] != ")") and (len_tmp == 10):
"""
Example:
J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<-
3+( 8) 19.503 2+(11) 18.393 1.111 0.000( 0.00) 0.000( 0.00)
1.0+( 1) 5.357 0.0+(103) 0.000 5.357 0.002( 0.00) 0.007( 0.00)
4.0-( 3) 3.191 3.0+(10) 3.137 0.054 0.0( 0.0) 0.0( 0.0)
"""
case_ = 2
E_gamma = float(tmp[5])
Ex_initial = float(tmp[2])
reduced_transition_prob_decay = float(tmp[6][:-1])
reduced_transition_prob_excite = float(tmp[8][:-1])
parity_final_symbol = tmp[3].split("(")[0][-1]
spin_final = float(Fraction(tmp[3].split(parity_final_symbol)[0]))
Ex_final = float(tmp[4])
elif (tmp[1][-1] == ")") and (tmp[4][-1] == ")") and (len_tmp == 11):
"""
Example:
J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<-
1+( 7) 19.408 2+( 9) 16.111 3.297 0.005( 0.00) 0.003( 0.00)
"""
case_ = 3
E_gamma = float(tmp[6])
Ex_initial = float(tmp[2])
reduced_transition_prob_decay = float(tmp[7][:-1])
reduced_transition_prob_excite = float(tmp[9][:-1])
parity_final_symbol = tmp[3].split("(")[0][-1]
# spin_final = float(Fraction(tmp[3][:-2]))
spin_final = float(Fraction(tmp[3].split(parity_final_symbol)[0]))
Ex_final = float(tmp[5])
elif (tmp[5][-1] == ")") and (tmp[2][-1] == ")") and (len_tmp == 8):
"""
Example:
J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<-
0.0+(46) 47.248 1.0+(97) 45.384 1.864 23.973(13.39) 7.991( 4.46)
"""
case_ = 4
E_gamma = float(tmp[4])
Ex_initial = float(tmp[1])
reduced_transition_prob_decay = float(tmp[5].split("(")[0])
reduced_transition_prob_excite = float(tmp[6][:-1])
parity_final_symbol = tmp[2].split("(")[0][-1]
spin_final = float(Fraction(tmp[2].split(parity_final_symbol)[0]))
Ex_final = float(tmp[3])
else:
msg = "ERROR: Structure not accounted for!"
msg += f"\n{line=}"
raise KshellDataStructureError(msg)
if parity_final_symbol == "+":
parity_final = 1
elif parity_final_symbol == "-":
parity_final = -1
else:
msg = f"Could not properly read the final parity! {case_=}"
raise KshellDataStructureError(msg)
if (spin_final == -1) or (spin_initial == -1):
"""
-1 spin states in the KSHELL data file indicates
bad states which should not be included.
"""
negative_spin_counts += 1 # Debug.
continue
reduced_transition_prob_decay_list.append([
2*spin_initial, parity_initial, Ex_initial, 2*spin_final,
parity_final, Ex_final, E_gamma, reduced_transition_prob_decay,
reduced_transition_prob_excite
])
except ValueError as err:
"""
One of the float conversions failed indicating that
the structure of the line is not accounted for.
"""
msg = "\n" + err.__str__() + f"\n{case_=}" + f"\n{line=}"
raise KshellDataStructureError(msg)
except IndexError:
"""
End of probabilities.
"""
break
return reduced_transition_prob_decay_list, negative_spin_counts
def _load_transition_probabilities(infile):
"""
Example structure:
B(E2) ( > -0.0 W.u.) mass = 50 1 W.u. = 10.9 e^2 fm^4
e^2 fm^4 (W.u.)
J_i pi_i idx_i Ex_i J_f pi_f idx_f Ex_f dE B(E2)-> B(E2)->[wu] B(E2)<- B(E2)<-[wu]
5 + 1 0.036 6 + 1 0.000 0.036 70.43477980 6.43689168 59.59865983 5.44660066
4 + 1 0.074 6 + 1 0.000 0.074 47.20641983 4.31409897 32.68136758 2.98668391
Parameters
----------
infile:
The KSHELL summary file.
"""
reduced_transition_prob_decay_list = []
negative_spin_counts = 0
for _ in range(2): infile.readline()
for line in infile:
line_split = line.split()
if not line_split: break
spin_initial = float(Fraction(line_split[0]))
parity_initial = parity_string_to_integer(line_split[1])
Ex_initial = float(line_split[3])
spin_final = float(Fraction(line_split[4]))
parity_final = parity_string_to_integer(line_split[5])
Ex_final = float(line_split[7])
E_gamma = float(line_split[8])
reduced_transition_prob_decay = float(line_split[9])
reduced_transition_prob_excite = float(line_split[11])
if (spin_final < 0) or (spin_initial < 0):
"""
-1 spin states in the KSHELL data file indicates
bad states which should not be included.
"""
negative_spin_counts += 1 # Debug.
continue
reduced_transition_prob_decay_list.append([
2*spin_initial, parity_initial, Ex_initial, 2*spin_final,
parity_final, Ex_final, E_gamma, reduced_transition_prob_decay,
reduced_transition_prob_excite
])
return reduced_transition_prob_decay_list, negative_spin_counts
def _load_parallel(arg_list):
"""
For parallel data loads.
[self.fname_summary, "Energy", self._load_energy_levels, None]
"""
fname, condition, loader = arg_list
with open(fname, "r") as infile:
for line in infile:
if condition in line:
return loader(infile)
class ReadKshellOutput:
"""
Read `KSHELL` data files and store the values as instance
attributes.
Attributes
----------
levels : np.ndarray
Array containing energy, spin, and parity for each excited
state. [[E, 2*spin, parity], ...].
transitions_BE1 : np.ndarray
Transition data for BE1 transitions. Structure:
NEW:
[2*spin_initial, parity_initial, Ex_initial, 2*spin_final,
parity_final, Ex_final, E_gamma, B(.., i->f), B(.., f<-i)]
OLD:
Mx8 array containing [2*spin_final, parity_initial, Ex_final,
2*spin_initial, parity_initial, Ex_initial, E_gamma, B(.., i->f)].
transitions_BM1 : np.ndarray
Transition data for BM1 transitions. Same structure as BE1.
transitions_BE2 : np.ndarray
Transition data for BE2 transitions. Same structure as BE1.
"""
def __init__(self, path: str, load_and_save_to_file: bool, old_or_new: str):
"""
Parameters
----------
path : string
Path of `KSHELL` output file directory, or path to a
specific `KSHELL` data file.
load_and_save_to_file : bool
Toggle saving data as `.npy` files on / off. If `overwrite`,
saved `.npy` files are overwritten.
old_or_new : str
Choose between old and new summary file syntax. All summary
files generated pre 2021-11-24 use old style.
New:
J_i pi_i idx_i Ex_i J_f pi_f idx_f Ex_f dE B(E2)-> B(E2)->[wu] B(E2)<- B(E2)<-[wu]
5 + 1 0.036 6 + 1 0.000 0.036 70.43477980 6.43689168 59.59865983 5.44660066
Old:
J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<-
2+(11) 18.393 2+(10) 17.791 0.602 0.1( 0.0) 0.1( 0.0)
"""
self.path = path
self.load_and_save_to_file = load_and_save_to_file
self.old_or_new = old_or_new
# Some attributes might not be altered, depending on the input file.
self.fname_summary = None
self.fname_ptn = None
self.nucleus = None
self.model_space = None
self.proton_partition = None
self.neutron_partition = None
self.levels = None
self.transitions_BM1 = [None]
self.transitions_BE2 = [None]
self.transitions_BE1 = [None]
self.truncation = None
# Debug.
self.negative_spin_counts = np.array([0, 0, 0, 0]) # The number of skipped -1 spin states for [levels, BM1, BE2, BE1].
if isinstance(self.load_and_save_to_file, str) and (self.load_and_save_to_file != "overwrite"):
msg = "Allowed values for 'load_and_save_to_file' are: 'True', 'False', 'overwrite'."
msg += f" Got '{self.load_and_save_to_file}'."
raise ValueError(msg)
if os.path.isdir(path):
"""
If input 'path' is a directory containing KSHELL files,
extract info from both summary and .ptn file.
"""
for elem in os.listdir(path):
if elem.startswith("summary"):
self.fname_summary = f"{path}/{elem}"
self._extract_info_from_summary_fname()
self._read_summary()
elif elem.endswith(".ptn"):
self.fname_ptn = f"{path}/{elem}"
self._extract_info_from_ptn_fname()
self.read_ptn()
else:
"""
'path' is a single file, not a directory.
"""
fname = path.split("/")[-1]
if fname.startswith("summary"):
self.fname_summary = path
self._extract_info_from_summary_fname()
self._read_summary()
elif fname.endswith(".ptn"):
self.fname_ptn = path
self._extract_info_from_ptn_fname()
self._read_ptn()
else:
msg = f"Handling for file {fname} is not implemented."
raise KshellDataStructureError(msg)
def _extract_info_from_ptn_fname(self):
"""
Extract nucleus and model space name.
"""
fname_split = self.fname_ptn.split("/")[-1]
fname_split = fname_split.split("_")
self.nucleus = fname_split[0]
self.model_space = fname_split[1]
def _read_ptn(self):
"""
Read `KSHELL` partition file (.ptn) and extract proton
partition, neutron partition, and particle-hole truncation data.
Save as instance attributes.
"""
line_number = 0
line_number_inner = 0
self.truncation = []
with open(self.fname_ptn, "r") as infile:
for line in infile:
line_number += 1
if line.startswith("# proton partition"):
for line_inner in infile:
"""
Read until next '#'.
"""
line_number_inner += 1
if line_inner.startswith("#"):
line = line_inner
break
self.proton_partition = np.loadtxt(
fname = self.fname_ptn,
skiprows = line_number,
max_rows = line_number_inner
)
line_number += line_number_inner
line_number_inner = 0
if line.startswith("# neutron partition"):
for line_inner in infile:
"""
Read until next '#'.
"""
line_number_inner += 1
if line_inner.startswith("#"):
line = line_inner
break
self.neutron_partition = np.loadtxt(
fname = self.fname_ptn,
skiprows = line_number,
max_rows = line_number_inner
)
line_number += line_number_inner
line_number_inner = 0
if line.startswith("# particle-hole truncation"):
for line_inner in infile:
"""
Loop over all particle-hole truncation lines.
"""
line_number += 1
line_inner_split = line_inner.split()
if (len(line_inner_split) < 2):
"""
Condition will probably not get fulfilled.
Safety precaution due to indexing in this
loop.
"""
break
if (line_inner_split[1]).startswith("["):
"""
'[' indicates that 'line_inner' is still
containing truncation information.
"""
for colon_index, elem in enumerate(line_inner_split):
"""
Find the index of the colon ':' to
decide the orbit numbers and occupation
numbers.
"""
if (elem == ":"): break
occupation = [int(occ) for occ in line_inner_split[colon_index + 1:]] # [min, max].
orbit_numbers = "".join(line_inner_split[1:colon_index])
orbit_numbers = orbit_numbers.replace("[", "")
orbit_numbers = orbit_numbers.replace("]", "")
orbit_numbers = orbit_numbers.replace(" ", "") # This can prob. be removed because of the earlier split.
orbit_numbers = orbit_numbers.split(",")
orbit_numbers = [int(orbit) for orbit in orbit_numbers]
for orbit in orbit_numbers:
self.truncation.append((orbit, occupation))
else:
"""
Line does not contain '[' and thus does not
contain truncation information.
"""
break
def _extract_info_from_summary_fname(self):
"""
Extract nucleus and model space name.
"""
fname_split = self.fname_summary.split("/")[-1] # Remove path.
fname_split = fname_split.split("_")
self.nucleus = fname_split[1]
self.model_space = fname_split[2][:-4] # Remove .txt and keep model space name.
def _read_summary(self):
"""
Read energy level data, transition probabilities and transition
strengths from `KSHELL` output files.
Raises
------
KshellDataStructureError
If the `KSHELL` file has unexpected structure / syntax.
"""
npy_path = "tmp"
base_fname = self.path.split("/")[-1][:-4]
try:
os.mkdir(npy_path)
except FileExistsError:
pass
unique_id = _generate_unique_identifier(self.path)
levels_fname = f"{npy_path}/{base_fname}_levels_{unique_id}.npy"
transitions_BM1_fname = f"{npy_path}/{base_fname}_transitions_BM1_{unique_id}.npy"
transitions_BE2_fname = f"{npy_path}/{base_fname}_transitions_BE2_{unique_id}.npy"
transitions_BE1_fname = f"{npy_path}/{base_fname}_transitions_BE1_{unique_id}.npy"
debug_fname = f"{npy_path}/{base_fname}_debug_{unique_id}.npy"
fnames = [
levels_fname, transitions_BE2_fname, transitions_BM1_fname,
transitions_BE1_fname, debug_fname
]
if self.load_and_save_to_file != "overwrite":
"""
Do not load files if overwrite parameter has been passed.
"""
if all([os.path.isfile(fname) for fname in fnames]) and self.load_and_save_to_file:
"""
If all files exist, load them. If any of the files do
not exist, all will be generated.
"""
self.levels = np.load(file=levels_fname, allow_pickle=True)
self.transitions_BM1 = np.load(file=transitions_BM1_fname, allow_pickle=True)
self.transitions_BE2 = np.load(file=transitions_BE2_fname, allow_pickle=True)
self.transitions_BE1 = np.load(file=transitions_BE1_fname, allow_pickle=True)
self.debug = np.load(file=debug_fname, allow_pickle=True)
msg = "Summary data loaded from .npy!"
msg += " Use loadtxt parameter load_and_save_to_file = 'overwrite'"
msg += " to re-read data from the summary file."
print(msg)
return
if self.old_or_new == "new":
parallel_args = [
[self.fname_summary, "Energy", _load_energy_levels],
[self.fname_summary, "B(M1)", _load_transition_probabilities],
[self.fname_summary, "B(E2)", _load_transition_probabilities],
[self.fname_summary, "B(E1)", _load_transition_probabilities],
]
elif self.old_or_new == "old":
parallel_args = [
[self.fname_summary, "Energy", _load_energy_levels],
[self.fname_summary, "B(M1)", _load_transition_probabilities_old],
[self.fname_summary, "B(E2)", _load_transition_probabilities_old],
[self.fname_summary, "B(E1)", _load_transition_probabilities_old],
]
pool = multiprocessing.Pool()
pool_res = pool.map(_load_parallel, parallel_args)
try:
self.levels, self.negative_spin_counts[0] = pool_res[0]
except TypeError:
"""
If no energy values are found in the file:
TypeError: cannot unpack non-iterable NoneType object
"""
pass
try:
self.transitions_BM1, self.negative_spin_counts[1] = pool_res[1]
except (TypeError, IndexError):
"""
If no BM1 values are found in the file:
TypeError: cannot unpack non-iterable NoneType object
"""
pass
try:
self.transitions_BE2, self.negative_spin_counts[2] = pool_res[2]
except (TypeError, IndexError):
"""
If no BE2 values are found in the file:
TypeError: cannot unpack non-iterable NoneType object
"""
pass
try:
self.transitions_BE1, self.negative_spin_counts[3] = pool_res[3]
except (TypeError, IndexError):
"""
If no BE1 values are found in the file:
TypeError: cannot unpack non-iterable NoneType object
"""
pass
self.levels = np.array(self.levels)
self.transitions_BM1 = np.array(self.transitions_BM1)
self.transitions_BE2 = np.array(self.transitions_BE2)
self.transitions_BE1 = np.array(self.transitions_BE1)
self.debug = "DEBUG\n"
self.debug += f"skipped -1 states in levels: {self.negative_spin_counts[0]}\n"
self.debug += f"skipped -1 states in BM1: {self.negative_spin_counts[1]}\n"
self.debug += f"skipped -1 states in BE2: {self.negative_spin_counts[2]}\n"
self.debug += f"skipped -1 states in BE1: {self.negative_spin_counts[3]}\n"
self.debug = np.array(self.debug)
if self.load_and_save_to_file:
np.save(file=levels_fname, arr=self.levels, allow_pickle=True)
np.save(file=transitions_BM1_fname, arr=self.transitions_BM1, allow_pickle=True)
np.save(file=transitions_BE2_fname, arr=self.transitions_BE2, allow_pickle=True)
np.save(file=transitions_BE1_fname, arr=self.transitions_BE1, allow_pickle=True)
np.save(file=debug_fname, arr=self.debug, allow_pickle=True)
def level_plot(self,
max_spin_states: int = 1000,
filter_spins: Union[None, list] = None
):
"""
Wrapper method to include level plot as an attribute to this
class. Generate a level plot for a single isotope. Spin on the x
axis, energy on the y axis.
Parameters
----------
max_spin_states : int
The maximum amount of states to plot for each spin. Default
set to a large number to indicate ≈ no limit.
filter_spins : Union[None, list]
Which spins to include in the plot. If `None`, all spins are
plotted. Defaults to `None`
"""
level_plot(
levels = self.levels,
max_spin_states = max_spin_states,
filter_spins = filter_spins
)
def level_density_plot(self,
bin_size: Union[int, float] = 0.2,
plot: bool = True,
save_plot: bool = False
):
"""
Wrapper method to include level density plotting as
an attribute to this class. Generate the level density with the
input bin size.
Parameters
----------
bin_size : Union[int, float]
Energy interval of which to calculate the density.
plot : bool
Toogle plotting on / off.
save_plot : bool
Toogle saving of plot (as .png with dpi=300) on / off.
Returns
-------
bins : np.ndarray
The corresponding bins (x value for plotting).
density : np.ndarray
The level density.
"""
bins, density = level_density(
energy_levels = self.levels[:, 0],
bin_size = bin_size,
plot = plot,
save_plot = save_plot
)
return bins, density
def gamma_strength_function_average_plot(self,
bin_width: Union[float, int] = 0.2,
Ex_min: Union[float, int] = 5,
Ex_max: Union[float, int] = 50,
multipole_type: str = "M1",
plot: bool = True,
save_plot: bool = False
):
"""
Wrapper method to include gamma ray strength function
calculations as an attribute to this class.
Parameters
----------
bin_width : Union[float, int]
The width of the energy bins. A bin width of 0.2 contains 20
states of uniform spacing of 0.01. Usually in MeV.
Ex_min : Union[float, int]
Lower limit for initial level excitation energy, usually in
MeV. Defaults to (somewhat arbitrary) 5 MeV. This value
shoud be set to the beginning of the (quasi?) continuum.
Ex_max : Union[float, int]
Upper limit for initial level excitation energy, usually in
MeV. Defaults to 50 MeV, which is probably way higher than
what any shell model calculation might be able to reproduce,
but 50 MeV is chosen as ≈ infinity which makes the GSF
calculations adjust from 50 to the largest value that the
dataset allows.
multipole_type : str
Choose whether to calculate for 'E1', 'M1' or 'E2'. NOTE:
Currently only M1 and E1 are implemented.
plot : bool
Toogle plotting on / off.
save_plot : bool
Toogle saving of plot (as .png with dpi=300) on / off.
"""
transitions_dict = {
"M1": self.transitions_BM1,
"E2": self.transitions_BE2,
"E1": self.transitions_BE1
}
bins, gsf = gamma_strength_function_average(
levels = self.levels,
transitions = transitions_dict[multipole_type],
bin_width = bin_width,
Ex_min = Ex_min,
Ex_max = Ex_max,
multipole_type = multipole_type,
plot = plot,
save_plot = save_plot
)
return bins, gsf
def gsf(self,
bin_width: Union[float, int] = 0.2,
Ex_min: Union[float, int] = 5,
Ex_max: Union[float, int] = 50,
multipole_type: str = "M1",
plot: bool = True,
save_plot: bool = False
):
"""
Alias for gamma_strength_function_average_plot. See that
docstring for details.
"""
return self.gamma_strength_function_average_plot(
bin_width = bin_width,
Ex_min = Ex_min,
Ex_max = Ex_max,
multipole_type = multipole_type,
plot = plot,
save_plot = save_plot
)
@property
def help(self):
"""
Generate a list of instance attributes without magic and private
methods.
Returns
-------
help_list : list
A list of non-magic instance attributes.
"""
help_list = []
for elem in dir(self):
if not elem.startswith("_"): # Omit magic and private methods.
help_list.append(elem)
return help_list
@property
def parameters(self) -> dict:
"""
Get the KSHELL parameters from the shell file.
Returns
-------
: dict
A dictionary of KSHELL parameters.
"""
path = self.path
if os.path.isfile(path):
path = path.rsplit("/", 1)[0]
return get_parameters(path)
def _process_kshell_output_in_parallel(args):
"""
Simple wrapper for parallelizing loading of KSHELL files.
"""
filepath, load_and_save_to_file, old_or_new = args
print(filepath)
return ReadKshellOutput(filepath, load_and_save_to_file, old_or_new)
def loadtxt(
path: str,
is_directory: bool = False,
filter_: Union[None, str] = None,
load_and_save_to_file: Union[bool, str] = True,
old_or_new = "new"
) -> list:
"""
Wrapper for using ReadKshellOutput class as a function.
TODO: Consider changing 'path' to 'fname' to be the same as
np.loadtxt.
Parameters
----------
path : str
Filename (and path) of `KSHELL` output data file, or path to
directory containing sub-directories with `KSHELL` output data.
is_directory : bool
If True, and 'path' is a directory containing sub-directories
with `KSHELL` data files, the contents of 'path' will be scanned
for `KSHELL` data files. Currently supports only summary files.
filter_ : Union[None, str]
NOTE: Shouldnt the type be list, not str?
load_and_save_to_file : Union[bool, str]
Toggle saving data as `.npy` files on / off. If 'overwrite',
saved `.npy` files are overwritten.
old_or_new : str
Choose between old and new summary file syntax. All summary
files generated pre 2021-11-24 use old style.
New:
J_i pi_i idx_i Ex_i J_f pi_f idx_f Ex_f dE B(E2)-> B(E2)->[wu] B(E2)<- B(E2)<-[wu]
5 + 1 0.036 6 + 1 0.000 0.036 70.43477980 6.43689168 59.59865983 5.44660066
Old:
J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<-
2+(11) 18.393 2+(10) 17.791 0.602 0.1( 0.0) 0.1( 0.0)
Returns
-------
data : list
List of instances with data from `KSHELL` data file as
attributes.
"""
all_fnames = None
data = []
if old_or_new not in (old_or_new_allowed := ["old", "new"]):
msg = f"'old_or_new' argument must be in {old_or_new_allowed}!"
msg += f" Got '{old_or_new}'."
raise ValueError(msg)
if (is_directory) and (not os.path.isdir(path)):
msg = f"{path} is not a directory"
raise NotADirectoryError(msg)
elif (not is_directory) and (not os.path.isfile(path)):
msg = f"{path} is not a file"
raise FileNotFoundError(msg)
elif (is_directory) and (os.path.isdir(path)):
all_fnames = {}
for element in sorted(os.listdir(path)):
"""
List all content in path.
"""
if os.path.isdir(path + element):
"""
If element is a directory, enter it to find data files.
"""
all_fnames[element] = [] # Create blank list entry in dict for current element.
for isotope in os.listdir(path + element):
"""
List all content in the element directory.
"""
if isotope.startswith("summary") and isotope.endswith(".txt"):
"""
Extract summary data files.
"""
try:
"""
Example: O16.
"""
n_neutrons = int(isotope[9:11])
except ValueError:
"""
Example: Ne20.
"""
n_neutrons = int(isotope[10:12])
n_neutrons -= atomic_numbers[element.split("_")[1]]
all_fnames[element].append([element + "/" + isotope, n_neutrons])
pool = multiprocessing.Pool()
for key in all_fnames:
"""
Sort each list in the dict by the number of neutrons. Loop
over all directories in 'all_fnames' and extract KSHELL data
and append to a list.
"""
if filter_ is not None:
if key.split("_")[1] not in filter_:
"""
Skip elements not in filter_.
"""
continue
all_fnames[key].sort(key=lambda tup: tup[1]) # Why not do this when directory is listed?
sub_fnames = all_fnames[key]
arg_list = [(path + i[0], load_and_save_to_file, old_or_new) for i in sub_fnames]
data += pool.map(_process_kshell_output_in_parallel, arg_list)
else:
"""
Only a single KSHELL data file.
"""
data.append(ReadKshellOutput(path, load_and_save_to_file, old_or_new))
if not data:
msg = "No KSHELL data loaded. Most likely error is that the given"
msg += f" directory has no KSHELL data files. {path=}"
raise RuntimeError(msg)
return data
def _get_timing_data(path: str):
"""
Get timing data from KSHELL log files.
Parameters
----------
path : str
Path to log file.
Examples
--------
Last 10 lines of log_Ar30_usda_m0p.txt:
```
total 20.899 2 10.44928 1.0000
pre-process 0.029 1 0.02866 0.0014
operate 3.202 1007 0.00318 0.1532
re-orthog. 11.354 707 0.01606 0.5433
thick-restart 0.214 12 0.01781 0.0102
diag tri-mat 3.880 707 0.00549 0.1857
misc 2.220 0.1062
tmp 0.002 101 0.00002 0.0001
```
"""
if "log" not in path:
msg = f"Unknown log file name! Got '{path}'"
raise KshellDataStructureError(msg)
if not os.path.isfile(path):
raise FileNotFoundError(path)
res = os.popen(f'tail -n 20 {path}').read() # Get the final 10 lines.
res = res.split("\n")
total = None
if "tr" not in path:
"""
KSHELL log.
"""
for elem in res:
tmp = elem.split()
try:
if tmp[0] == "total":
total = float(tmp[1])
break
except IndexError:
continue
elif "tr" in path:
"""
Transit log.
"""
for elem in res:
tmp = elem.split()
try:
if tmp[0] == "total":
total = float(tmp[3])
break
except IndexError:
continue
if total is None:
msg = f"Not able to extract timing data from '{path}'!"
raise KshellDataStructureError(msg)
return total
def _get_memory_usage(path: str) -> Union[float, None]:
"""
Get memory usage from KSHELL log files.
Parameters
----------
path : str
Path to a single log file.
Returns
-------
total : float, None
Memory usage in GB or None if memory usage could not be read.
"""
total = None
if "tr" not in path:
"""
KSHELL log.
"""
with open(path, "r") as infile:
for line in infile:
if line.startswith("Total Memory for Lanczos vectors:"):
try:
total = float(line.split()[-2])
except ValueError:
msg = f"Error reading memory usage from '{path}'."
msg += f" Got '{line.split()[-2]}'."
raise KshellDataStructureError(msg)
break
elif "tr" in path:
"""
Transit log. NOTE: Not yet implemented.
"""
return 0
if total is None:
msg = f"Not able to extract memory data from '{path.split('/')[-1]}'!"
raise KshellDataStructureError(msg)
return total
def _get_data_general(path: str, func: Callable):
"""
General input handling for timing data and memory data.
Parameters
----------
path : str
Path to a single log file or path to a directory of log files.
func : Callable
_get_timing_data or _get_memory_usage.
"""
if os.path.isfile(path):
return func(path)
elif os.path.isdir(path):
total = 0
for elem in os.listdir(path):
if elem.startswith("log_") and elem.endswith(".txt"):
total += func(f"{path}/{elem}")
return total
else:
msg = f"'{path}' is neither a file nor a directory!"
raise NotADirectoryError(msg)
def get_timing_data(path: str) -> float:
"""
Wrapper for _get_timing_data. Input a single log filename and get
the timing data. Input a path to a directory several log files and
get the summed timing data. In units of seconds.
Parameters
----------
path : str
Path to a single log file or path to a directory of log files.
Returns
-------
: float
The summed times for all input log files.
"""
return _get_data_general(path, _get_timing_data)
def get_memory_usage(path: str) -> float:
"""
Wrapper for _get_memory_usage. Input a single log filename and get
the memory data. Input a path to a directory several log files and
get the summed memory data. In units of GB.
Parameters
----------
path : str
Path to a single log file or path to a directory of log files.
Returns
-------
: float
The summed memory usage for all input log files.
"""
return _get_data_general(path, _get_memory_usage)
def get_parameters(path: str, verbose: bool = True) -> dict:
"""
Extract the parameters which are fed to KSHELL throught the shell
script.
Parameters
----------
path : str
Path to a KSHELL work directory.
Returns
-------
res : dict
A dictionary where the keys are the parameter names and the
values are the corresponding values.
"""
res = {}
shell_filename = None
if os.path.isdir(path):
for elem in os.listdir(path):
if elem.endswith(".sh"):
shell_filename = f"{path}/{elem}"
break
else:
print("Directly specifying path to .sh file not yet implemented!")
if shell_filename is None:
if verbose:
msg = f"No .sh file found in path '{path}'!"
print(msg)
return res
with open(shell_filename, "r") as infile:
for line in infile:
if line.startswith(r"&input"):
break
for line in infile:
if line.startswith(r"&end"):
"""
End of parameters.
"""
break
tmp = line.split("=")
key = tmp[0].strip()
value = tmp[1].strip()
try:
value = ast.literal_eval(value)
except ValueError:
"""
Cant convert strings. Keep them as strings.
"""
pass
except SyntaxError:
"""
Cant convert Fortran booleans (.true., .false.). Keep
them as strings.
"""
pass
res[key] = value
return res
|
from django.contrib import admin
from .models import UserInteractionData
# Register your models here.
admin.site.register(UserInteractionData) |
# -*- coding: utf-8 -*-
"""Image model"""
from bson import DBRef
from flask import current_app
from flask_mongoengine import Document
from mongoengine import BooleanField, StringField, CASCADE
from .helpers import ReferenceField, DocumentMixin, URLField
from .user import User
class Image(DocumentMixin, Document):
meta = {'collection': 'image',
'indexes': [
{'fields': ['filename'], 'unique': True},
{'fields': ['owner']},
{'fields': ['yo']}],
'auto_create_index': False}
# Image filename.
filename = StringField()
# Image bitly link.
short_link = URLField()
# Public access.
is_public = BooleanField()
# The image owner.
owner = ReferenceField(User, reverse_delete_rule=CASCADE)
# Optional reference to a Yo
yo = ReferenceField('Yo')
def make_full_url(self):
bucket = current_app.config.get('YO_PHOTO_BUCKET')
return 'https://s3.amazonaws.com/%s/%s' % (bucket, self.filename)
def has_dbrefs(self):
"""Checks if there are any users that could not be
dereferenced."""
if isinstance(self.owner, DBRef):
return True
return False
|
"""This module contains classes for analyzing error patterns in alignments
Its in pretty rough shape as its an early, but working, form. It works with alignqc. But it really needs some love to be a good module.
Error Analysis
I am to describe errors at several levels
Errors in the query sequence
1. Is a query base an error or not?
* Probability - Sometimes it can be ambiguous which base is in error
2. What is the basic type of error?
* Mismatch
* Insertion
* Total insertion
* Homopolymer insertion
* Deletion
* Total deletion
* Before
* After
* Homopolymer deletion
* sum of probabilities should add up to 1.)
3. What is the more specific error?
* Mismatch type
* insertion/deletion - Base, Length
"""
from seqtools.sequence import rc
import sys
valid_types = set(['match','mismatch','total_insertion','total_deletion','homopolymer_insertion','homopolymer_deletion'])
class ErrorProfileFactory:
"""This class is used to create an error profile.
It doesn't require any special input to create a new instance of it.
You add to it with the add_alignment() function."""
def __init__(self):
self._alignment_errors = []
self._target_context_errors = None
self._query_context_errors = None
self._general_errors = GeneralErrorStats()
return
def close(self):
"""Set some objects to None to hopefully free up some memory."""
self._target_context_errors = None
self._query_context_errors = None
self._general_errors = None
for ae in self._alignment_errors:
ae.close()
self._alignment_errors = None
def add_alignment_errors(self,ae):
"""If you alread have thealignment errors, add them for profile construction."""
self._target_context_errors = None
self._query_context_errors = None
self._alignment_errors.append(ae)
self._general_errors.add_alignment_errors(ae)
def add_alignment(self,align):
"""Calculate alignment errors from the alignment and add it to the profile."""
self._target_context_errors = None
self._query_context_errors = None
ae = AlignmentErrors(align)
self._alignment_errors.append(ae)
self._general_errors.add_alignment_errors(ae)
def get_alignment_errors(self):
"""Return an object that describes the errors
:returns: Alignment Errors
:rtype: GeneralErrorStats
"""
return self._general_errors
def get_target_context_error_report(self):
"""Get a report on context-specific errors relative to what is expected on the target strand.
:returns: Object with a 'header' and a 'data' where data describes context: before,after ,reference, query. A total is kept for each reference base, and individual errors are finally checked
:rtype: dict()
"""
report = {}
report['header'] = ['before','after','reference','query','fraction']
report['data'] = []
r = self.get_target_context_errors()
for b in sorted(r.keys()):
for a in sorted(r[b].keys()):
for t in sorted(r[b][a]):
for q in sorted(r[b][a]):
v = 0
if r[b][a][t]['total'] > 0:
v = float(r[b][a][t]['types'][q])/float(r[b][a][t]['total'])
report['data'].append([b,a,t,q,v])
return report
def get_min_context_count(self,context_type):
"""Calculate out which context has the minum coverage thusfar.
:param context_type: 'target' or 'query'
:type context_type: string
:returns: Minimum Coverage
:rtype: int
"""
cnt = 10000000000
bases = ['A','C','G','T']
basesplus = ['A','C','G','T','-']
r = None
if context_type == 'target':
r = self.get_target_context_errors()
elif context_type == 'query':
r = self.get_query_context_errors()
else:
sys.stderr.write("ERROR incorrect context type\n")
sys.exit()
for b1 in bases:
for b2 in bases:
for b3 in basesplus:
if r[b1][b2][b3]['total'] < cnt: cnt = r[b1][b2][b3]['total']
return cnt
def write_context_error_report(self,file,context_type):
"""Write a context error report relative to the target or query into the specified filename
:param file: The name of a file to write the report to
:param context_type: They type of profile, target or query based
:type file: string
:type context_type: string
"""
if context_type == 'target':
r = self.get_target_context_error_report()
elif context_type == 'query':
r = self.get_query_context_error_report()
else:
sys.stderr.write("ERROR invalid type must be target or query\n")
sys.exit()
of = open(file,'w')
of.write("\t".join(r['header'])+"\n")
for row in r['data']:
of.write("\t".join([str(x) for x in row])+"\n")
return
def get_query_context_error_report(self):
"""Get a report on context-specific errors relative to what is expected on the query strand.
:returns: Object with a 'header' and a 'data' where data describes context: before,after ,reference, query. A total is kept for each reference base, and individual errors are finally checked
:rtype: dict()
"""
report = {}
report['header'] = ['before','after','reference','query','fraction']
report['data'] = []
r = self.get_query_context_errors()
for b in sorted(r.keys()):
for a in sorted(r[b].keys()):
for t in sorted(r[b][a]):
for q in sorted(r[b][a]):
v = 0
if r[b][a][q]['total'] > 0:
v = float(r[b][a][q]['types'][t])/float(r[b][a][q]['total'])
report['data'].append([b,a,t,q,v])
return report
def get_target_context_errors(self):
"""Return the target context errors
:returns: Dictionary containing the error counts on context base
:rtype: dict()
"""
if not self._target_context_errors:
self.combine_context_errors()
return self._target_context_errors
def get_query_context_errors(self):
"""Return the query context errors
:returns: Dictionary containing the error counts on context base
:rtype: dict()
"""
if not self._query_context_errors:
self.combine_context_errors()
return self._query_context_errors
def combine_context_errors(self):
"""Each alignment contributes some information to the error report. These reports for each alignment need to be gone through and combined into one report.
:returns: Dictionary containing the error counts on context base
:rtype: dict()
"""
r = {}
if self._target_context_errors: r = self._target_context_errors
for k in [x.get_context_target_errors() for x in self._alignment_errors]:
for b in k:
if b not in r: r[b] = {}
for c in k[b]:
if c not in r[b]: r[b][c] = {}
for a in k[b][c]:
if a not in r[b][c]:
r[b][c][a] = {}
r[b][c][a]['total'] = 0
r[b][c][a]['types'] = {}
r[b][c][a]['total'] += k[b][c][a]['total']
for type in k[b][c][a]['types']:
if type not in r[b][c][a]['types']: r[b][c][a]['types'][type] = 0
r[b][c][a]['types'][type] += k[b][c][a]['types'][type]
self._target_context_errors = r
r = {}
if self._query_context_errors: r = self._query_context_errors
for k in [x.get_context_query_errors() for x in self._alignment_errors]:
for b in k:
if b not in r: r[b] = {}
for c in k[b]:
if c not in r[b]: r[b][c] = {}
for a in k[b][c]:
if a not in r[b][c]:
r[b][c][a] = {}
r[b][c][a]['total'] = 0
r[b][c][a]['types'] = {}
r[b][c][a]['total'] += k[b][c][a]['total']
for type in k[b][c][a]['types']:
if type not in r[b][c][a]['types']: r[b][c][a]['types'][type] = 0
r[b][c][a]['types'][type] += k[b][c][a]['types'][type]
self._query_context_errors = r
def __str__(self):
return self.get_string()
def get_string(self):
"""Make a string reprentation of the error stats.
:returns: error profile
:rtype: string
"""
ostr = ''
ostr += str(len(self._alignment_errors))+" Alignments\n"
ostr += 'Target: '+"\n"
totbases = sum([len(x.get_target_sequence()) for x in self._alignment_errors])
ostr += ' '+str(totbases)+" Target Bases\n"
adjerror = sum([sum([y.get_error_probability() for y in x.get_target_errors()]) for x in self._alignment_errors])
ostr += ' '+str(adjerror)+" Approximate error count\n"
ostr += ' '+str(float(adjerror)/float(totbases))+" Error rate\n"
ostr += 'Query: '+"\n"
totbases = sum([len(x.get_query_sequence()) for x in self._alignment_errors])
ostr += ' '+str(totbases)+" Query Bases\n"
adjerror = sum([sum([y.get_error_probability() for y in x.get_query_errors()]) for x in self._alignment_errors])
ostr += ' '+str(adjerror)+" Approximate error count\n"
ostr += ' '+str(float(adjerror)/float(totbases))+" Error rate\n"
return ostr
class BaseError():
"""Class for describing an error at a sinle base relative to the target or query."""
def __init__(self,type):
self._type = type
if type != 'query' and type != 'target':
sys.stderr.write("ERROR specify type as query or target\n")
sys.exit()
self._unobservable = BaseError.UnobservableError(self._type)
self._observable = BaseError.ObservableError(self._type)
return
def get_homopolymer(self):
"""Return the hompolymer on target and the homopolymer on query assicated with this base
:returns: homopolymer dict {tseq:sequence,qseq:sequence}
:rtype: dict()
"""
return self._observable.get_homopolymer()
# Is there any possible way to attribute this call to an error?
def is_any_error(self):
"""If theres any reason to attribute this base to an error return True otherwise false
:returns: there_is_error
:rtype: bool
"""
if self.get_observable_error_probability() > 0:
return True
if self.get_unobservable_error_probability() > 0:
return True
return False
def get_observable(self):
"""Get error information that can be seen
:returns: Observable error object
:rtype: ObservableError
"""
return self._observable
def get_unobservable(self):
"""Unobservable errors inferred, like if its relative to the target and an insertion, then it is not observed in the target, we just know it was inserted between two bases in the target.
:returns: Unobservable error object
:rtype: UnobservableError
"""
return self._unobservable
def get_error_probability(self):
"""This means for the base we are talking about how many errors between 0 and 1 do we attribute to it?
For the 'unobserved' errors, these can only count when one is adjacent to base
:returns: error probability p(error_observed)+(1-p_error_observed)*error_unobserved
:rtype: float
"""
a = self._observable.get_error_probability()
b = self._unobservable.get_error_probability()
return a+(1-a)*b
def get_observable_error_probability(self):
""" get the probability of an observable error occuring at a base
:returns: error probability
:rtype: float
"""
return self._observable.get_error_probability()
def get_unobservable_error_probability(self):
""" get the probability of an unobservable error occuring at a base
:returns: error probability
:rtype: float
"""
return self._unobservable.get_error_probability()
def set_observable(self,tseq,qseq):
"""Set the observable sequence data
:param tseq: target sequence (from the homopolymer)
:param qseq: query sequence ( from the homopolymer)
:type tseq: string
:type qseq: string
"""
tnt = None
qnt = None
if len(tseq) > 0: tnt = tseq[0]
if len(qseq) > 0: qnt = qseq[0]
self._observable.set(len(tseq),len(qseq),tnt,qnt)
def set_unobserved_before(self,tlen,qlen,nt,p):
"""Set the unobservable sequence data before this base
:param tlen: target homopolymer length
:param qlen: query homopolymer length
:param nt: nucleotide
:param p: p is the probability of attributing this base to the unobserved error
:type tlen: int
:type qlen: int
:type nt: char
:type p: float
"""
self._unobservable.set_before(tlen,qlen,nt,p)
def set_unobserved_after(self,tlen,qlen,nt,p):
"""Set the unobservable sequence data after this base
:param tlen: target homopolymer length
:param qlen: query homopolymer length
:param nt: nucleotide
:param p: p is the probability of attributing this base to the unobserved error
:type tlen: int
:type qlen: int
:type nt: char
:type p: float
"""
self._unobservable.set_after(tlen,qlen,nt,p)
def get_adjusted_error_count(self):
""" Get the total error count associated with this single base.
This would typically be one but sometimes it may be larger for instertions.
:returns: error_count
:rtype: float
"""
p1 = self._observable.get_attributable_length()
p1 += self._unobservable.get_attributable_length()
return p1
def get_base(self):
""" Get the single base at this position.
:returns: base
:rtype: char
"""
if self._type == 'query':
return self._observable.get_query_base()
return self._observable.get_target_base()
#def get_type(self):
# otype = self._observable.get_type()
# if otype[0] != 'match': return otype
# before = self._unobservable.get_before_type()
# after = self._unobservable.get_after_type()
# if before: return before
# if after: return after
# return otype
def __str__(self):
return self.get_string()
def get_string(self):
""" Get a string representation of this single base error.
:returns: report
:rtype: string
"""
ostr = ''
ostr += 'BaseError for ['+self._type+'] base: '+self.get_base()+"\n"
if self._observable.get_error_probability() > 0:
ostr += ' Homopolymer set:'+"\n"
ostr += ' '+str(self.get_homopolymer())+"\n"
ostr += ' Observable:'+"\n"
ostr += ' type is: '+str(self.get_observable_type())+"\n"
ostr += ' P(error): '+str(self._observable.get_error_probability())+"\n"
ostr += ' Elength: '+str(self._observable.get_attributable_length())+"\n"
before = self._unobservable.get_before_type()
after = self._unobservable.get_after_type()
if before or after:
ostr += ' Unobservable '
if self._type == 'query': ostr += 'deletion:'+"\n"
else: ostr += 'insertion:'+"\n"
ostr += ' P(error): '+str(self._unobservable.get_error_probability())+"\n"
ostr += ' Elength: '+str(self._unobservable.get_attributable_length())+"\n"
if before:
ostr += ' before: '+"\n"
ostr += ' P(error): '+str(self._unobservable.get_before_probability())+"\n"
ostr += ' '+str(before)+"\n"
if after:
ostr += ' after:'+"\n"
ostr += ' P(error): '+str(self._unobservable.get_after_probability())+"\n"
ostr += ' '+str(after)+"\n"
return ostr
class UnobservableError:
"""Unobservable error is a deletion for a query base
an insertion for a target base
A non base error has a probability of occuring before a base
and a probability of occuring after
:param type: Either 'query' or target
:type type: string
"""
def __init__(self,type):
# Type is the perspective
self._type = type # Type is 'query' or 'target'
if type != 'query' and type != 'target':
sys.stderr.write("ERROR specify type as query or target\n")
sys.exit()
self._before_prob = 0 # probability specific base should have some missing call before it
self._after_prob = 0 # probability specific base should have some missing call after it
self._before = {'qlen':0,'tlen':0,'nt':None}
self._after = {'qlen':0,'tlen':0,'nt':None}
def get_error_probability(self):
# P(before or after)
return self._before_prob+(1-self._before_prob)*self._after_prob
def set_after(self,tlen,qlen,nt,p):
self._after = {'qlen':qlen,'tlen':tlen,'nt':nt}
self._after_prob = float(p)
def set_before(self,tlen,qlen,nt,p):
self._before = {'qlen':qlen,'tlen':tlen,'nt':nt}
self._before_prob = float(p)
def get_before_type(self):
if self._before_prob > 0 and self._type == 'query':
return ['deletion','total_deletion',[\
[self._before['nt'],self._before['tlen']],\
[self._before['nt'],0]\
]\
]
if self._before_prob > 0 and self._type == 'target':
return ['insertion','total_insertion',[\
[self._before['nt'],0],\
[self._before['nt'],self._before['qlen']]\
]\
]
return None
def get_after_type(self):
if self._after_prob > 0 and self._type == 'query':
return ['deletion','total_deletion',[\
[self._after['nt'],self._after['tlen']],\
[self._after['nt'],0]\
]\
]
if self._after_prob > 0 and self._type == 'target':
return ['insertion','total_insertion',[\
[self._after['nt'],0],\
[self._after['nt'],self._after['qlen']]\
]\
]
return None
def get_after_probability(self):
return self._after_prob
def get_before_probability(self):
return self._before_prob
def get_attributable_length(self):
bef = self._before_prob*abs(self._before['qlen']-self._before['tlen'])
af = self._after_prob*abs(self._after['qlen']-self._after['tlen'])
return bef+af
class ObservableError:
"""Class to describe a homopolymer error or an observable
insertion or deletion. Future versions of this should probably avoid using
a nested class for this
:param type: Either 'query' or target
:type type: string
"""
def __init__(self,type):
self._type = type # Type is 'query' or 'target'
if type != 'query' and type != 'target':
sys.stderr.write("ERROR specify type as query or target\n")
sys.exit()
self._prob = 0 # proportion of error we will attribute to this base
self._details = {'qlen':0,'tlen':0,'qnt':None,'tnt':None}
def set(self,tlen,qlen,tnt,qnt):
""" Set the error we are observing for the homopolymer block
:param tlen: target homopolymer length
:param qlen: query homopolymer length
:param tnt: target nucleotide
:param qnt: query nucleotide
:type tlen: int
:type qlen: int
:type tnt: char
:type qnt: char
"""
self._details = {'qlen':qlen,'tlen':tlen,'qnt':qnt,'tnt':tnt}
# can figure out probability now
if qlen == tlen and qnt == tnt:
self._prob = float(0)
elif qnt != tnt:
self._prob = float(1)
else:
delta = self.get_changed_length()
if self._type == 'query':
if delta > qlen:
self._prob = float(1) # we could ascribe one or more insertions to each base so call them all an error
else:
self._prob = float(delta)/float(qlen)
elif self._type == 'target':
if delta > tlen:
self._prob = float(1)
else:
self._prob = float(delta)/float(tlen)
else:
sys.stderr.write("unknown perspective type\n")
sys.exit()
def get_homopolymer(self):
"""Return a class to describe the homopolymer
:returns: homopolymer details
:rtype: dict() return {'tseq':string,'seq':string}
"""
tnt = ''
qnt = ''
if self._details['tnt']: tnt = self._details['tnt']
if self._details['qnt']: qnt = self._details['qnt']
return {'tseq':self._details['tlen']*tnt,'qseq':self._details['qlen']*qnt}
# Return the basic type of observable error
def get_type(self):
"""get the type of the observable error
:returns: error details
:rtype: list with 1. main type, 2. subtype, 3. details [target [nucleotide, length],query [nucleotide, length]]
"""
if self._details['tlen'] == self._details['qlen'] and\
self._details['tnt'] == self._details['qnt']:
return ['match','match',[[self._details['tnt'],1],[self._details['qnt'],1]]]
if self._details['tlen'] == self._details['qlen'] and\
self._details['tnt'] != self._details['qnt']:
return ['mismatch','mismatch',[[self._details['tnt'],1],[self._details['qnt'],1]]]
if self._details['tlen'] > self._details['qlen']:
if self._details['qlen'] == 0:
return ['deletion','total_deletion',[\
[self._details['tnt'],self._details['tlen']],\
[self._details['tnt'],0]\
]\
]
return ['deletion','homopolymer_deletion',[\
[self._details['tnt'],self._details['tlen']],\
[self._details['qnt'],self._details['qlen']]\
]\
]
if self._details['qlen'] > self._details['tlen']:
if self._details['tlen'] == 0:
return ['insertion','total_insertion',[\
[self._details['qnt'],0],\
[self._details['qnt'],self._details['qlen']]\
]\
]
return ['insertion','homopolymer_insertion',[\
[self._details['tnt'],self._details['tlen']],\
[self._details['qnt'],self._details['qlen']]\
]\
]
return 'UNKNOWN'
def get_query_base(self):
"""Just the query base"""
return self._details['qnt']
def get_target_base(self):
"""Just the target base"""
return self._details['tnt']
def get_error_probability(self):
"""Probability that this base is the product of an error
:returns: probability
:rtype: float
"""
return self._prob
def get_attributable_length(self):
""" For calculating total error counts """
delta = self.get_changed_length()
# calculate extra
extra = 0
if self._type == 'query' and self._details['qlen'] < delta:
remainder = delta - self._details['qlen']
extra = float(remainder)/float(self._details['qlen'])
elif self._type == 'target' and self._details['tlen'] < delta:
remainder = delta - self._details['tlen']
extra = float(remainder)/float(self._details['tlen'])
return self._prob+extra
def get_changed_length(self): #if we evenly distribute the length of the change in size, how much goes with this
"""How much the homopolymer length differs between target and query
:returns: abs(qlen-tlen)
:rtype: int
"""
return abs(self._details['qlen']-self._details['tlen'])
class AlignmentErrors:
"""Take an alignment between a target and query
Uses get_strand from alignment to orient the query
All results are on the positive strand of the query
(meaning may be the reverse complement of target if negative)
:param alignment: alignment to be used in error calculation
:param min_intron_size: minmum length for an intron
:type alignment: Alignment
:type min_intron_size: int
"""
def __init__(self,alignment,min_intron_size=68):
#self._alns = []
self._min_intron_size=min_intron_size
self._aligned_query = None
self._hpas = []
self._has_quality = False # can be changed when add_alignment uses one that has quality
self._alignment = alignment
self._quality_distro = None # gets set by analyze_quality
self._deletion_type = None
self._query_errors = None
self._target_errors = None
self._context_query_errors = None
self._context_target_errors = None
astrings = self._alignment.get_alignment_strings(min_intron_size=self._min_intron_size)
if self._alignment.query_quality: self._has_quality = True
if len(astrings) == 0: return None
alns = []
for i in range(len(astrings[0])):
if self._alignment.strand == '+':
alns.append({'query':astrings[0][i],'target':astrings[1][i],'query_quality':astrings[2][i]})
else:
alns.insert(0,{'query':rc(astrings[0][i]),'target':rc(astrings[1][i]),'query_quality':astrings[2][i][::-1]})
#if self._alignment.get_strand() == '-':
# alns = alns[::-1]
#get homopolymer alignments
self._hpas = self._misalign_split(alns) # split alignment into homopolymer groups
self._query_hpas = []
self._target_hpas = []
qi = 0
for i in range(len(self._hpas)):
prev = None
if i > 0: prev = self._hpas[i-1]
foll = None
if i + 1 < len(self._hpas): foll = self._hpas[i+1]
qlen = len(self._hpas[i].get_query())
for j in range(0,qlen):
self._query_hpas.append({'hpa':self._hpas[i],'pos':j,'prev-hpa':prev,'next-hpa':foll})
qi+=qlen
ti = 0
for i in range(len(self._hpas)):
prev = None
if i > 0: prev = self._hpas[i-1]
foll = None
if i + 1 < len(self._hpas): foll = self._hpas[i+1]
tlen = len(self._hpas[i].get_target())
for j in range(0,tlen):
self._target_hpas.append({'hpa':self._hpas[i],'pos':j,'prev-hpa':prev,'next-hpa':foll})
ti+=tlen
self._target_errors = self.get_target_errors()
self._query_errors = self.get_query_errors()
self._context_target_errors = self.get_context_target_errors()
def close(self):
self._min_intron_size= None
self._aligned_query = None
self._hpas = None
self._has_quality = None # can be changed when add_alignment uses one that has quality
self._alignment = None
self._quality_distro = None # gets set by analyze_quality
self._deletion_type = None
self._query_errors = None
self._target_errors = None
self._context_query_errors = None
self._context_target_errors = None
self._hpas = None # split alignment into homopolymer groups
self._query_hpas = None
self._target_hpas = None
self._target_errors = None
self._query_errors = None
self._context_target_errors = None
return
def get_HPAGroups(self):
""" get a list of the HPA groups
:returns: list of HPA groups
:rtype: HPAGroup
"""
return self._hpas
def get_general_errors(self):
"""way to accumulate totals of error types
General error report will be relative to to the total alignment length
error rate = mismatches + insertions + deletions / alignment length
This looks oddly written, probably should be careful not to run it twice
because it looks like it would accumulate.
"""
r = GeneralErrorStats()
r.add_alignment_errors(self)
def get_context_target_errors(self):
"""A more straitfoward calculation of the context-specific errors
relative to the target
:returns: matrix of observed contexts and values
:rtype: matrix of [before][after][reference]{types} with types being any base or a deletion.
"""
if self._context_target_errors: return self._context_target_errors
if len(self._query_errors) < 3: return {}
nts = ['A','C','G','T']
poss = ['A','C','G','T','-']
r = {}
for i in nts:
if i not in r: r[i] = {}
for j in nts:
if j not in r[i]: r[i][j] = {}
for k in poss:
if k not in r[i][j]:
r[i][j][k] = {}
r[i][j][k]['types'] = {}
r[i][j][k]['total'] = 0
for l in poss:
if l not in r[i][j][k]['types']: r[i][j][k]['types'][l] = 0
# now r is initialized
for i in range(1,len(self._target_errors)-1):
tobs = self._target_errors[i].get_observable()
tunobs = self._target_errors[i].get_unobservable()
otype = tobs.get_type()
op = tobs.get_error_probability()
before = tunobs.get_before_type()
bp = tunobs.get_before_probability()
after = tunobs.get_after_type()
ap = tunobs.get_after_probability()
if otype[2][0][0] == 'N': continue
if otype[2][1][0] == 'N': continue
if before:
if before[2][0][0] == 'N': continue
if before[2][1][0] == 'N': continue
if after:
if after[2][0][0] == 'N': continue
if after[2][1][0] == 'N': continue
tbefore = self._target_errors[i-1].get_base()
t = self._target_errors[i].get_base()
tafter = self._target_errors[i+1].get_base()
if tbefore == 'N' or tafter == 'N' or t == 'N': continue
r[tbefore][t]['-']['total'] += 0.5
r[t][tafter]['-']['total'] += 0.5
r[tbefore][tafter][t]['total'] += 1
# We know we made an observation
if otype[0] == 'mismatch':
tb = otype[2][0][0]
qb = otype[2][1][0]
r[tbefore][tafter][t]['types'][qb] += op
elif otype[0] == 'match':
tb = otype[2][0][0]
qb = otype[2][1][0]
r[tbefore][tafter][t]['types'][qb] += float(1)
elif otype[0] == 'deletion':
tb = otype[2][0][0]
qb = otype[2][1][0]
r[tbefore][tafter][t]['types']['-'] += op
r[tbefore][tafter][t]['types'][qb] += (1-op)
# make sure our insertion can't be bigger than 1
hp_insert_before = 0
hp_insert_after = 0
if otype[0] == 'insertion':
tb = otype[2][0][0]
qb = otype[2][1][0]
r[tbefore][tb]['-']['types'][qb] += op/2
r[tb][tafter]['-']['types'][qb] += op/2
#homopolymer ... so we do have the correct base
r[tbefore][tafter][t]['types'][qb] += 1
# now take care of total insertions
total_bp = 0
total_ap = 0
if before:
qb = before[2][1][0]
r[tbefore][t]['-']['types'][qb] += bp
if after:
qb = after[2][1][0]
r[t][tafter]['-']['types'][qb] += ap
#r[tbefore][t]['-']['types'][qb] += total_bp+(1-total_bp)*hp_insert_before
#r[t][tafter]['-']['types'][qb] += total_ap+(1-total_ap)*hp_insert_after
##type = self._target_errors[i].get_type()
#p = self._target_errors[i].get_error_probability()
#if p > 0:
# if type[0] not in r[tbefore][t][tafter]['types']:
# r[tbefore][t][tafter]['types'][type[0]] = 0
# r[tbefore][t][tafter]['types'][type[0]] += p
#r[tbefore][t][tafter]['total']+=1
for b in r:
for a in r:
val = sum([r[b][a]['-']['types'][q] for q in nts])
r[b][a]['-']['types']['-'] = r[b][a]['-']['total'] - val
return r
def get_context_query_errors(self):
"""A more straitfoward calculation of the context-specific errors
relative to the query
:returns: matrix of observed contexts and values
:rtype: matrix of [before][after][query]{types} with types being any base or a deletion.
"""
if self._context_query_errors: return self._context_query_errors
if len(self._query_errors) < 3: return {}
nts = ['A','C','G','T']
poss = ['A','C','G','T','-']
r = {}
for i in nts:
if i not in r: r[i] = {}
for j in nts:
if j not in r[i]: r[i][j] = {}
for k in poss:
if k not in r[i][j]:
r[i][j][k] = {}
r[i][j][k]['types'] = {}
r[i][j][k]['total'] = 0
for l in poss:
if l not in r[i][j][k]['types']: r[i][j][k]['types'][l] = 0
# now r is initialized
for i in range(1,len(self._query_errors)-1):
tobs = self._query_errors[i].get_observable()
tunobs = self._query_errors[i].get_unobservable()
otype = tobs.get_type()
op = tobs.get_error_probability()
before = tunobs.get_before_type()
bp = tunobs.get_before_probability()
after = tunobs.get_after_type()
ap = tunobs.get_after_probability()
if otype[2][0][0] == 'N': continue
if otype[2][1][0] == 'N': continue
if before:
if before[2][0][0] == 'N': continue
if before[2][1][0] == 'N': continue
if after:
if after[2][0][0] == 'N': continue
if after[2][1][0] == 'N': continue
tbefore = self._query_errors[i-1].get_base()
t = self._query_errors[i].get_base()
tafter = self._query_errors[i+1].get_base()
if tbefore == 'N' or tafter == 'N' or t == 'N': continue
r[tbefore][t]['-']['total'] += 0.5
r[t][tafter]['-']['total'] += 0.5
r[tbefore][tafter][t]['total'] += 1
# We know we made an observation
if otype[0] == 'mismatch':
tb = otype[2][0][0]
qb = otype[2][1][0]
r[tbefore][tafter][t]['types'][tb] += op
elif otype[0] == 'match':
tb = otype[2][0][0]
qb = otype[2][1][0]
r[tbefore][tafter][t]['types'][tb] += float(1)
elif otype[0] == 'insertion':
tb = otype[2][0][0]
qb = otype[2][1][0]
r[tbefore][tafter][t]['types']['-'] += op
r[tbefore][tafter][t]['types'][tb] += (1-op)
# make sure our deletion can't be bigger than 1
hp_deletion_before = 0
hp_deletion_after = 0
if otype[0] == 'deletion':
tb = otype[2][0][0]
qb = otype[2][1][0]
r[tbefore][tb]['-']['types'][tb] += op/2
r[tb][tafter]['-']['types'][tb] += op/2
#homopolymer ... so we do have the correct base
r[tbefore][tafter][t]['types'][tb] += 1
# now take care of total deletions
if before:
tb = before[2][0][0]
r[tbefore][t]['-']['types'][tb] += bp
if after:
tb = after[2][0][0]
r[t][tafter]['-']['types'][tb] += ap
##type = self._target_errors[i].get_type()
#p = self._target_errors[i].get_error_probability()
#if p > 0:
# if type[0] not in r[tbefore][t][tafter]['types']:
# r[tbefore][t][tafter]['types'][type[0]] = 0
# r[tbefore][t][tafter]['types'][type[0]] += p
#r[tbefore][t][tafter]['total']+=1
for b in r:
for a in r:
val = sum([r[b][a]['-']['types'][q] for q in nts])
r[b][a]['-']['types']['-'] = r[b][a]['-']['total'] - val
return r
def get_query_errors(self):
""" Return a list of base-wise error observations for the query
:returns: list of base-wise errors
:rtype: list of HPA groups
"""
if self._query_errors: return self._query_errors
v = []
for i in range(len(self._query_hpas)):
v.append(self.get_query_error(i))
return v
# Pre: given an index in the aligned query
# Post: return the error description for that base
def get_query_error(self,i):
"""Just get a single error characterization based on the index
:param i: list index
:type i: int
:returns: base-wise error
:rtype: HPA group description
"""
x = self._query_hpas[i]
h = x['hpa']
pos = x['pos']
prob = 0
be = BaseError('query')
be.set_observable(h.get_target(),h.get_query())
if i != 0 and pos == 0: # check for a total deletion before
prev = x['prev-hpa']
if len(prev.get_query()) == 0: # total deletion
be.set_unobserved_before(len(prev.get_target()),0,prev.get_target()[0],0.5)
if i != len(self._query_hpas)-1 and pos == len(h.get_query())-1: # check for a total deletion before
if x['next-hpa']:
foll = x['next-hpa']
if len(foll.get_query()) == 0: # total deletion
be.set_unobserved_after(len(foll.get_target()),0,foll.get_target()[0],0.5)
return be
def get_target_errors(self):
"""Just get a single error characterization based on the index relative to the target
:param i: list index
:type i: int
:returns: list of base-wise errors
:rtype: list of HPA groups
"""
if self._target_errors: return self._target_errors
v = []
for i in range(len(self._target_hpas)):
v.append(self.get_target_error(i))
return v
# Pre: given an index in the aligned query
# Post: return the error description for that base
def get_target_error(self,i):
"""Just get a single error characterization based on the index relative to the target
:param i: list index
:type i: int
:returns: base-wise error
:rtype: HPA group description
"""
x = self._target_hpas[i]
h = x['hpa']
pos = x['pos']
prob = 0
be = BaseError('target')
be.set_observable(h.get_target(),h.get_query())
if i != 0 and pos == 0: # check for a total deletion before
prev = x['prev-hpa']
if len(prev.get_target()) == 0: # total insertion
ilen = len(prev.get_query())
be.set_unobserved_before(0,len(prev.get_query()),prev.get_query()[0],0.5)
if i != len(self._target_hpas)-1 and pos == len(h.get_target())-1: # check for a total deletion before
if x['next-hpa']:
foll = x['next-hpa']
if len(foll.get_target()) == 0: # total insertion
be.set_unobserved_after(0,len(foll.get_query()),foll.get_query()[0],0.5)
return be
def get_query_sequence(self):
""" return the query sequence reconstructed from the descriptions"""
return ''.join([x['hpa'].get_query()[0] for x in self._query_hpas])
def get_target_sequence(self):
""" return the target sequence reconstructed from the descriptions"""
return ''.join([x['hpa'].get_target()[0] for x in self._target_hpas])
def analyze_quality(self):
"""Go through HPAGroups and store the distro of ordinal values of
quality scores"""
res = {}
for h in self._hpas:
if h.type() not in res: res[h.type()]={}
for c in h.get_quality():
if c not in res[h.type()]: res[h.type()][c] = 0
res[h.type()][c]+=1
self._quality_distro = res
def get_quality_report_string(self):
"""get a report on quality score distribution. currently prints to stdout"""
if not self._quality_distro:
self.analyze_quality()
ostr = ""
for type in sorted(self._quality_distro.keys()):
total = sum([ord(x)*self._quality_distro[type][x] for x in self._quality_distro[type]])
cnt = sum([self._quality_distro[type][x] for x in self._quality_distro[type]])
if cnt == 0: continue
print 'type: '+type+' '+str(cnt)+' '+str(float(total)/float(cnt))
return ostr
def has_quality(self):
""" Does the current data have quality information?"""
return self._has_quality
def _misalign_split(self,alns):
"""Requires alignment strings have been set so for each exon we have
query, target and query_quality
_has_quality will specify whether or not the quality is meaningful
"""
total = []
z = 0
for x in alns:
z += 1
exon_num = z
if self._alignment.strand == '-':
exon_num = (len(alns)-z)+1
buffer = {'query':x['query'][0],'target':x['target'][0],'query_quality':x['query_quality'][0],'exon':exon_num}
if buffer['query'] == '-': buffer['nt'] = buffer['target']
elif buffer['target'] == '-': buffer['nt'] = buffer['query']
elif buffer['query'] == buffer['target']: buffer['nt'] = buffer['query']
elif buffer['query'] != buffer['target']: buffer['nt'] = '*'
else:
sys.stderr.write("WARNING unkonwn case\n")
for i in range(1,len(x['query'])):
qchar = x['query'][i]
tchar = x['target'][i]
qualchar = x['query_quality'][i]
if qchar != tchar and (qchar != '-' and tchar != '-'):
#classic mismatch
#print 'mismatch'
#print buffer
total.append(buffer)
buffer = {'query':qchar,'target':tchar,'query_quality':qualchar,'exon':exon_num}
buffer['nt'] = '*'
elif qchar == buffer['nt'] or tchar == buffer['nt']:
# its a homopolymer match
buffer['query'] += qchar
buffer['target'] += tchar
buffer['query_quality'] += qualchar
#print 'homopoly'
else:
#print 'new thing'
#print buffer
total.append(buffer)
buffer = {'query':qchar,'target':tchar,'query_quality':qualchar,'exon':exon_num}
if qchar == '-': buffer['nt'] = tchar
else: buffer['nt'] = qchar
total.append(buffer)
result = [AlignmentErrors.HPAGroup(self,y) for y in total]
return result
class HPAGroup:
"""Homopolymer alignment group
takes a chunk of homopolymer alignment
as a dictionary with 'query' and 'target' sequences set
query should always be positive strand
:param mydict: dictionary with target sequences and a parent object
:type mydict: dict() {'query':query sequence,'target':target sequence}
"""
def __init__(self,parent,mydict):
self._error_profile = parent
self._data = mydict
self._qseq = self._data['query'].replace('-','')
self._tseq = self._data['target'].replace('-','')
self._nt = self._data['nt'] # the nulceotide or * for mismatch
self._qquality = self._data['query_quality'].replace('\0','')
self._exon_number = self._data['exon']
self._type = None
if self._qseq == self._tseq:
self._type = 'match'
### handle mismatches
elif self._nt == '*':
self._type = 'mismatch'
self._code = self._tseq+'>'+self._qseq
# Total deletion
elif len(self._qseq) == 0:
self._type = 'total_deletion'
# Total insert
elif len(self._tseq) == 0:
self._type = 'total_insertion'
elif len(self._qseq) < len(self._tseq):
self._type = 'homopolymer_deletion'
elif len(self._qseq) > len(self._tseq):
self._type = 'homopolymer_insertion'
else:
sys.stderr.write("ERROR unsupported type\n")
sys.exit()
def get_nt(self):
return self._data['nt']
def get_query(self):
""" always + strand """
return self._qseq
def get_target(self):
"""could be + or - strand"""
return self._tseq
def get_exon(self):
""" return the exon number"""
return self._exon_number
def get_length(self):
"""return the lengths of the query and the target
:returns: lengths object
:rtype: dict() with {'query':query length,'target': target length}
"""
return {'query':len(self._qseq),'target':len(self._tseq)}
def __str__(self):
return self.get_string()
def get_string(self):
""" Describe the group as a string"""
ostr = ''
ostr += 'Target: '+self._tseq+"\n"
ostr += 'Query: '+self._qseq+"\n"
if self._error_profile.has_quality(): ostr += 'Quality: '+self._qquality+"\n"
ostr += 'Type: '+str(self._type)+"\n"
return ostr
def has_quality(self):
""" Do we have quality score info?"""
return self._error_profile.has_quality()
def get_quality(self):
""" get the quality score info or false if we cannot"""
if not self.has_quality(): return False
return self._qquality
def type(self):
return self._type
class GeneralErrorStats:
"""Keep track of general errors across the length of an alignment"""
def __init__(self):
self.alignment_count = 0 #number of alignments
self.alignment_length = 0 #total bp
self.mismatches = 0
self.matches = 0
self.deletions = {}
self.deletions['total'] = 0
self.deletions['specific'] = 0
self.deletions['homopolymer'] = 0
self.insertions = {}
self.insertions['total'] = 0
self.insertions['specific'] = 0
self.insertions['homopolymer'] = 0
possible = ['-','A','C','G','T']
nts = ['A','C','G','T']
#matrix holds the ref -> query changes, and the rates they occur
self.matrix = {}
for p1 in possible:
self.matrix[p1] = {}
for p2 in possible:
self.matrix[p1][p2] = 0
def __str__(self):
return self.get_string()
def get_string(self):
"""make a string representation of the general error report"""
ostr = ''
errtotal = self.deletions['total']+self.insertions['total']+self.mismatches
ostr += 'from '+str(self.alignment_length)+' bp of alignment'+"\n"
ostr += ' '+str(float(errtotal)/float(self.alignment_length))+" error rate\n"
ostr += ' '+str(float(self.mismatches)/float(self.alignment_length))+ " mismatches\n"
ostr += ' '+str(float(self.deletions['total'])/float(self.alignment_length))+ " deletions\n"
ostr += ' '+str(float(self.deletions['specific'])/float(self.alignment_length))+ " total deletions\n"
ostr += ' '+str(float(self.deletions['homopolymer'])/float(self.alignment_length))+ " homopolymer deletions\n"
ostr += ' '+str(float(self.insertions['total'])/float(self.alignment_length))+ " insertions\n"
ostr += ' '+str(float(self.insertions['specific'])/float(self.alignment_length))+ " total insertions\n"
ostr += ' '+str(float(self.insertions['homopolymer'])/float(self.alignment_length))+ " homopolymer insertions\n"
ostr += ' More specific errors'+"\n"
poss = ['-','A','C','G','T']
ostr += ' - A C G T'+"\n"
t = 0
for p1 in poss:
ostr += p1
for p2 in poss:
val = float(self.matrix[p1][p2])/float(self.alignment_length)
ostr += " "+str(round(val,3))
t += val
ostr += "\n"
ostr += "\n"
return ostr
def get_stats(self):
"""Return a string describing the stats"""
ostr = ''
errtotal = self.deletions['total']+self.insertions['total']+self.mismatches
ostr += "ALIGNMENT_COUNT\t"+str(self.alignment_count)+"\n"
ostr += "ALIGNMENT_BASES\t"+str(self.alignment_length)+"\n"
ostr += "ANY_ERROR\t"+str(errtotal)+"\n"
ostr += "MISMATCHES\t"+str(self.mismatches)+"\n"
ostr += "ANY_DELETION\t"+str(self.deletions['total'])+"\n"
ostr += "COMPLETE_DELETION\t"+str(self.deletions['specific'])+"\n"
ostr += "HOMOPOLYMER_DELETION\t"+str(self.deletions['homopolymer'])+"\n"
ostr += "ANY_INSERTION\t"+str(self.insertions['total'])+"\n"
ostr += "COMPLETE_INSERTION\t"+str(self.insertions['specific'])+"\n"
ostr += "HOMOPOLYMER_INSERTION\t"+str(self.insertions['homopolymer'])+"\n"
return ostr
def get_report(self):
"""Another report, but not context based"""
ostr = ''
ostr += "target\tquery\tcnt\ttotal\n"
poss = ['-','A','C','G','T']
for target in poss:
for query in poss:
ostr += target+ "\t"+query+"\t"+str(self.matrix[target][query])+"\t"+str(self.alignment_length)+"\n"
return ostr
def add_alignment_errors(self,ae):
"""Add alignment errors to the group
:param ae: one set of alignment errors
:param type:
"""
self.alignment_count += 1
for v in ae.get_HPAGroups():
self._add_HPAGroup(v)
def _add_HPAGroup(self,v):
# Skip over N stuff
if v.get_target():
if v.get_target()[0] == 'N': return
if v.get_query():
if v.get_query()[0] == 'N': return
l = max(v.get_length().values())
self.alignment_length += l
if v.type() == 'match':
self.matches += l
self.matrix[v.get_target()[0]][v.get_query()[0]]+=l
elif v.type() == 'mismatch':
self.mismatches += l
self.matrix[v.get_target()[0]][v.get_query()[0]]+=l
elif v.type() == 'total_deletion':
self.deletions['total'] += v.get_length()['target']
self.deletions['specific'] += v.get_length()['target']
self.matrix[v.get_target()[0]]['-'] += v.get_length()['target']
elif v.type() == 'homopolymer_deletion':
self.deletions['total'] += v.get_length()['target']-v.get_length()['query']
self.deletions['homopolymer'] += v.get_length()['target']-v.get_length()['query']
self.matches += v.get_length()['query']
self.matrix[v.get_target()[0]]['-'] += v.get_length()['target']-v.get_length()['query']
self.matrix[v.get_target()[0]][v.get_query()[0]] += v.get_length()['query']
elif v.type() == 'total_insertion':
self.insertions['total'] += v.get_length()['query']
self.insertions['specific'] += v.get_length()['query']
self.matrix['-'][v.get_query()[0]] += v.get_length()['query']
elif v.type() == 'homopolymer_insertion':
self.insertions['total'] += v.get_length()['query']-v.get_length()['target']
self.insertions['homopolymer'] += v.get_length()['query']-v.get_length()['target']
self.matches += v.get_length()['target']
self.matrix['-'][v.get_query()[0]] += v.get_length()['query']-v.get_length()['target']
self.matrix[v.get_target()[0]][v.get_query()[0]] += v.get_length()['target']
else:
sys.stderr.write("ERROR unexpected error type: "+str(v.type())+"\n")
sys.exit()
return
|
import pytest
from constance import config
from django.contrib.auth.models import User
from django.core.files import File
import tram.models as db_models
from tram.ml import base
@pytest.fixture
def dummy_model():
return base.DummyModel()
@pytest.fixture
def user():
user = User.objects.create_superuser(username="testuser")
user.set_password("12345")
user.save()
yield user
user.delete()
class TestSentence:
def test_sentence_stores_no_mapping(self):
# Arrange
text = "this is text"
order = 0
mappings = None
# Arraange / Act
s = base.Sentence(text, order, mappings)
# Assert
assert s.text == text
assert s.order == order
assert s.mappings == mappings
class TestMapping:
def test_mapping_repr_is_correct(self):
# Arrange
confidence = 95.342000
attack_id = "T1327"
expected = "Confidence=95.342000; Attack ID=T1327"
# Act
m = base.Mapping(confidence, attack_id)
assert str(m) == expected
class TestReport:
def test_report_stores_properties(self):
# Arrange
name = "Test report"
text = "Test report text"
sentences = [base.Sentence("test sentence text", 0, None)]
# Act
rpt = base.Report(name=name, text=text, sentences=sentences)
# Assert
assert rpt.name == name
assert rpt.text == text
assert rpt.sentences == sentences
@pytest.mark.django_db
class TestSkLearnModel:
"""Tests ml.base.SKLearnModel via DummyModel"""
def test__sentence_tokenize_works_for_paragraph(self, dummy_model):
# Arrange
paragraph = """Hello. My name is test. I write sentences. Tokenize, tokenize, tokenize!
When will this entralling text stop, praytell? Nobody knows; the author can't stop.
"""
expected = [
"Hello.",
"My name is test.",
"I write sentences.",
"Tokenize, tokenize, tokenize!",
"When will this entralling text stop, praytell?",
"Nobody knows; the author can't stop.",
]
# Act
sentences = dummy_model._sentence_tokenize(paragraph)
# Assert
assert expected == sentences
@pytest.mark.parametrize(
"filepath,expected",
[
("tests/data/AA20-302A.pdf", "GLEMALT With a Ransomware Chaser"),
(
"tests/data/AA20-302A.docx",
"Page 22 of 22 | Product ID: AA20-302A TLP:WHITE",
),
(
"tests/data/AA20-302A.html",
"CISA is part of the Department of Homeland Security",
),
],
)
def test__extract_text_succeeds(self, dummy_model, filepath, expected):
# Arrange
with open(filepath, "rb") as f:
doc = db_models.Document(docfile=File(f))
doc.save()
# Act
text = dummy_model._extract_text(doc)
# Cleanup
doc.delete()
# Assert
assert expected in text
def test__extract_text_unknown_extension_raises_value_error(self, dummy_model):
# Arrange
with open("tests/data/unknown-extension.fizzbuzz", "rb") as f:
doc = db_models.Document(docfile=File(f))
doc.save()
# Act / Assert
with pytest.raises(ValueError):
dummy_model._extract_text(doc)
# Cleanup
doc.delete()
def test_get_report_name_succeeds(self, dummy_model):
# Arrange
expected = "Report for AA20-302A"
with open("tests/data/AA20-302A.docx", "rb") as f:
doc = db_models.Document(docfile=File(f))
doc.save()
job = db_models.DocumentProcessingJob(document=doc)
job.save()
# Act
report_name = dummy_model._get_report_name(job)
# Cleanup
job.delete()
doc.delete()
# Assert
assert report_name.startswith(expected)
def test_get_attack_objects_succeeds_after_initialization(self, dummy_model):
# Act
objects = dummy_model.get_attack_object_ids()
# Assert
assert "T1327" in objects # Ensures mitre-pre-attack is available
assert "T1497.003" in objects # Ensures mitre-attack is available
assert "T1579" in objects # Ensures mitre-mobile-attack is available
def test_disk_round_trip_succeeds(self, dummy_model, tmpdir):
# Arrange
filepath = (tmpdir + "dummy_model.pkl").strpath
# Act
dummy_model.get_attack_object_ids() # Change the state of the DummyModel
dummy_model.save_to_file(filepath)
dummy_model_2 = base.DummyModel.load_from_file(filepath)
# Assert
assert dummy_model.__class__ == dummy_model_2.__class__
assert (
dummy_model.get_attack_object_ids() == dummy_model_2.get_attack_object_ids()
)
def test_get_training_data(self, dummy_model):
# Act
X, y = dummy_model.get_training_data()
# Assert that the fixtures have 163 accepted mappings.
assert len(X) == 163
assert len(y) == 163
def test_non_sklearn_pipeline_raises(self):
# Arrange
class NonSKLearnPipeline(base.SKLearnModel):
def get_model(self):
return "This is not an sklearn.pipeline.Pipeline instance"
# Act
with pytest.raises(TypeError):
NonSKLearnPipeline()
@pytest.mark.django_db
class TestsThatNeedTrainingData:
"""
----- Begin ModelManager Tests -----
"""
def test_modelmanager__init__loads_dummy_model(self):
# Act
model_manager = base.ModelManager("dummy")
# Assert
assert model_manager.model.__class__ == base.DummyModel
def test_modelmanager__init__raises_value_error_on_unknown_model(self):
# Act / Assert
with pytest.raises(ValueError):
base.ModelManager("this-should-raise")
def test_modelmanager_train_model_doesnt_raise(self):
# Arrange
model_manager = base.ModelManager("dummy")
# Act
model_manager.train_model()
# Assert
# TODO: Something meaningful
"""
----- End ModelManager Tests -----
"""
def test_get_mappings_returns_mappings(self):
# Arrange
dummy_model = base.DummyModel()
dummy_model.train()
dummy_model.test()
config.ML_CONFIDENCE_THRESHOLD = 0
# Act
mappings = dummy_model.get_mappings("test sentence")
# Assert
for mapping in mappings:
assert isinstance(mapping, base.Mapping)
def test_process_job_produces_valid_report(self):
# Arrange
with open("tests/data/AA20-302A.docx", "rb") as f:
doc = db_models.Document(docfile=File(f))
doc.save()
job = db_models.DocumentProcessingJob(document=doc)
job.save()
dummy_model = base.DummyModel()
dummy_model.train()
dummy_model.test()
# Act
report = dummy_model.process_job(job)
# Cleanup
job.delete()
doc.delete()
# Assert
assert report.name is not None
assert report.text is not None
assert len(report.sentences) > 0
def test_process_job_handles_image_based_pdf(self, user):
"""
Some PDFs can be saved such that the text is stored as images and therefore
cannot be extracted from the PDF. Windows PDF Printer behaves this way.
Image-based PDFs cause the processing pipeline to fail. The expected behavior
is that the job is logged as "status: error".
"""
# Arrange
image_pdf = "tests/data/GroupIB_Big_Airline_Heist_APT41.pdf"
with open(image_pdf, "rb") as f:
processing_job = db_models.DocumentProcessingJob.create_from_file(
File(f), user
)
job_id = processing_job.id
model_manager = base.ModelManager("dummy")
# Act
model_manager.run_model()
job_result = db_models.DocumentProcessingJob.objects.get(id=job_id)
# Assert
assert job_result.status == "error"
assert len(job_result.message) > 0
"""
----- Begin DummyModel Tests -----
"""
def test_dummymodel_train_and_test_passes(self, dummy_model):
# Act
dummy_model.train() # Has no effect
dummy_model.test() # Has no effect
"""
----- End DummyModel Tests -----
"""
|
from django.urls import include, path # importing module include and path
from . import views # importing views
from django.contrib import admin
urlpatterns = [
path('register', views.register, name='register'),
# path given to python to follow this route
path('login', views.login, name='login'),
# path given to python to follow this route
path('logout', views.logout, name='logout'),
]
|
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/apply.ipynb (unless otherwise specified).
__all__ = ['preprocess_db_intra_image', 'aggregate_fold_stats']
# Cell
from ifcimglib import imglmdb, utils, preprocessing, cif2lmdb
import numpy
import matplotlib.pyplot as plt
from tqdm import trange
import pickle
import logging
import lmdb
from pathlib import Path
from tqdm import tqdm
import os
import seaborn
from sklearn.model_selection import PredefinedSplit
# Cell
def preprocess_db_intra_image(db, preprocessed_output_path):
logger = logging.getLogger(__name__)
if Path(preprocessed_output_path).exists():
Path(preprocessed_output_path).unlink()
env = lmdb.open(preprocessed_output_path, lock=False, sync=False, map_size=cif2lmdb.map_size, subdir=False)
logger.info("Opened lmdb database %s" % preprocessed_output_path)
with env.begin(write=True) as txn:
txn.put(b'__targets__', pickle.dumps(db.targets))
txn.put(b'__len__', len(db).to_bytes(db.idx_byte_length, "big"))
txn.put(b'__names__', " ".join(db.names).encode("utf-8"))
for i in trange(len(db)):
x, m, _ = db.get_image(i)
x = x.astype(numpy.float32)
x = preprocessing.log_transform(x, m, [1])
x = preprocessing.min_max_normalize(x, m, "clip")
x = preprocessing.crop_and_pad_to_square(x, 70)
m = preprocessing.crop_and_pad_to_square(m.astype(numpy.uint8), 70).astype(bool)
instance = cif2lmdb.get_instance(x.shape[1:], x.shape[0])
instance = cif2lmdb.set_instance_data(instance, x.astype(numpy.float16), m)
txn.put(i.to_bytes(db.idx_byte_length, byteorder='big'), pickle.dumps(instance))
env.sync()
env.close()
# Cell
def aggregate_fold_stats(db_paths, cv_pkl_file):
preprocessed_db = imglmdb.multidbwrapper(sorted(db_paths))
with open(cv_pkl_file, "rb") as pkl:
test_fold, nested_test_folds = pickle.load(pkl)
splitter = PredefinedSplit(test_fold)
data = [{}]*splitter.get_n_splits()
for i, (nested_test_fold, (_, test_idx)) in enumerate(zip(nested_test_folds, splitter.split())):
per_pixel_stats = preprocessing.compute_per_pixel_stats(preprocessed_db, None, idx=test_idx)
std_per_pixel = numpy.where(per_pixel_stats[1] == 0.0, 1, per_pixel_stats[1])
data[i]["outer"] = (per_pixel_stats[0], std_per_pixel)
nested_splitter = PredefinedSplit(nested_test_fold)
data[i]["nested"] = [{}]*nested_splitter.get_n_splits()
for j, (train_idx, val_idx) in enumerate(nested_splitter.split()):
per_pixel_stats = preprocessing.compute_per_pixel_stats(preprocessed_db, None, idx=train_idx)
std_per_pixel = numpy.where(per_pixel_stats[1] == 0.0, 1, per_pixel_stats[1])
data[i]["nested"][j]["train"] = (per_pixel_stats[0], std_per_pixel)
per_pixel_stats = preprocessing.compute_per_pixel_stats(preprocessed_db, None, idx=val_idx)
std_per_pixel = numpy.where(per_pixel_stats[1] == 0.0, 1, per_pixel_stats[1])
data[i]["nested"][j]["val"] = (per_pixel_stats[0], std_per_pixel)
with open(os.path.splitext(cv_pkl_file)[0] + "_stats.pkl", "wb") as pkl:
pickle.dump(data, pkl)
return data |
import random
def FCFS(procesos):
tiempo=0
ejecucion = ''
t=[] #acumulador tiempo que toma el trabajo
e=[] #acumulador tiempo de espera
p=[] #acumulador T/t
for pro in procesos:
espera = tiempo - pro[0]
ejecucion += ''
if espera >= 0:
e.append(espera)
else:
tiempo = pro[0]
e.append(0)
for j in range(0,espera,-1):
ejecucion += '_'
tiempoPro = espera + pro[1]
t.append(tiempoPro)
p.append(t[-1]/pro[1])
for i in range(0,pro[1]):
ejecucion += pro[2]
tiempo += pro[1]
mostrar('FCFS:', t, e, p, ejecucion)
def RR1(procesos):
aux = {'A':0, 'B':1, 'C':2, 'D':3, 'E':4, 'F':5, 'G':6, 'H':7, 'I':8, 'J':9, 'K':10, 'L':11, 'M':12, 'N':13, 'O':14}
contador=0 #Si no existe ningun proceso esto no funciona
tiempo=0
cola=[]
ejecucion=''
t=[0]*len(procesos) #acumulador tiempo que toma el trabajo
e=[0]*len(procesos) #acumulador tiempo de espera
p=[0]*len(procesos) #acumulador T/t
i=0
x=0
eT=[]#Respaldo de tiempos de ejecucion
for s in procesos:
eT.append(s[1])
while contador < len(procesos) or len(cola) > 0:
contador += queu(procesos, tiempo, cola, contador)
if len(cola) > 0:
while i < len(cola):
ejecucion += cola[i][2]
eT[aux[cola[i][2]]] -= 1
while x < len(cola):
indice = aux[cola[x][2]]
if x != i:
e[indice] += 1
t[indice] += 1
x += 1
x=0
if eT[aux[cola[i][2]]] == 0:
cola.pop(i)
if i >= len(cola):
i=0
else:
i+=1
tiempo += 1
contador += queu(procesos, tiempo, cola, contador)
i=0
else:
ejecucion += '_'
tiempo += 1
for k in range(0,len(p)):
p[k]= t[k]/procesos[k][1]
mostrar('RR1:', t, e, p, ejecucion)
def RR4(procesos):
aux = {'A':0, 'B':1, 'C':2, 'D':3, 'E':4, 'F':5, 'G':6, 'H':7, 'I':8, 'J':9, 'K':10, 'L':11, 'M':12, 'N':13, 'O':14}
contador=0 #Si no existe ningu proceso esto no funciona
tiempo=0
cola=[]
ejecucion=''
t=[0]*len(procesos) #acumulador tiempo que toma el trabajo
e=[0]*len(procesos) #acumulador tiempo de espera
p=[0]*len(procesos) #acumulador T/t
i=0
x=0
tam=0
eT=[]
for s in procesos:
eT.append(s[1])
while contador < len(procesos) or len(cola) > 0:
contador += queu(procesos, tiempo, cola, contador)
if len(cola) > 0:
while i < len(cola):
ejecucion += cola[i][2]
eT[aux[cola[i][2]]] -= 1
while x < len(cola):
indice = aux[cola[x][2]]
if x != i:
e[indice] += 1
t[indice] += 1
x += 1
x=0
tiempo += 1
if eT[aux[cola[i][2]]] == 0:
cola.pop(i)
if i >= len(cola):
i = 0
tam = 0
else:
tam += 1
if tam == 4:
i += 1
tam = 0
contador += queu(procesos, tiempo, cola, contador)
i=0
else:
ejecucion += '_'
tiempo += 1
for k in range(0,len(p)):
p[k]= t[k]/procesos[k][1]
mostrar('RR4:', t, e, p, ejecucion)
def queu(procesos, tiempo, cola, contador):
aux = 0
for y in range(contador, len(procesos)):
if procesos[y][0] <= tiempo:
cola.append(procesos[y])
aux += 1
return aux
def mostrar(w, t, e, p, ex):
T=0
E=0
P=0
lon=len(t)
for x in range(0,lon):
T += t[x]
E += e[x]
P += p[x]
print( w + ' T=' + str(T/lon) + ', E=' + str(E/lon) + ', P=' + str(P/lon))
print(ex)
def SPN(A):
global numProcesos
tiempo=A[0][0] #Inicializamos el tiempo
t=[] #acumulador tiempo que toma el trabajo
e=[] #acumulador tiempo de espera
p=[] #acumulador T/t
E=[]*len(A) #lista de los procesos que tienen que esperar
ejecucion="" #Cadena con nombre orden ejecucion
espera=0
cont=0
Basura=[]
for j in range(0,tiempo):
ejecucion += '_ '
for i in range(numProcesos):
if A[i][0] <= tiempo:
for k in range (i,numProcesos):
if A[k][0]<= tiempo:
if (A[k] not in Basura):
E.append(A[k])
cont+=1
else:
print("YA ESTA")
i+=1
E.sort(key=lambda x:x[1])
espera=tiempo-E[0][0]
tiempo=tiempo+E[0][1]
else:
E.append(A[i])
libre=E[0][0]-tiempo
for m in range (libre):
ejecucion += '_ '
E.sort(key=lambda x:x[1])
espera=0
tiempo+=libre
tiempo=tiempo+E[0][1]
for l in range (E[0][1]):
ejecucion += E[0][2]
tiempoPro=espera+E[0][1]
e.append(espera)
t.append(tiempoPro)
p.append(t[-1]/E[0][1])
Basura.append(E[0])
E.pop(0)
def prueba(ronda):
numProcesos=3
cad=''
j=0
#Nombre de los procesos
diccionario={0:'A',1:'B',2:'C',3:'D',4:'E',5:'F',6:'G',
7:'H',8:'I',9:'J',10:'K',11:'L',12:'M',13:'N',14:'O'}
procesos=[]#random
#Lista de Procesos
for i in range (numProcesos):
procesos.append([random.randint(0,10),random.randint(1,10)])
#ordenarla por orden de llegada
ordenProcesos=sorted(procesos)
#Agregamos nombre a los procesos
for i in ordenProcesos:
i.append(diccionario[j])
cad += diccionario[j] + ': ' + str(i[0]) + ', t=' + str(i[1]) + ';'
j+=1
print('-Ronda', ronda, ':')
print(cad)
FCFS(ordenProcesos)
RR1(ordenProcesos)
RR4(ordenProcesos)
SPN(ordenProcesos)
#A=[[0,3,'A'],[1,5,'B'],[3,2,'C'],[9,5,'D'],[12,5,'E']]
#FCFS(A)
#RR1(A)
#RR4(A)
for x in range(1,6):
prueba(x) |
import os, csv
from collections import defaultdict
from dotenv import load_dotenv
from pymongo import MongoClient
#from pymongo.server_api import ServerApi
# use with python3
#TODO - change data file from env variable to commandline parameter (required)
#TODO - make this an upsert so it updates existing ids if they exist in the collection
#TODO - make the database have the proper unique keys idempotent so duplicate entries are not made
#TODO - add timing, so we know order (e.g. someone reads a beginner article then an advanced one),
# we want to recommend the advanced article after the beginner but not vice versa.
#TODO - Is there any way to create the documents to insert at the same time as reading the file to make the defaultdict?
# Currently we loop through the file to make the defaultdict, then loop through the dict to make the insert docs.
# Load config from a .env file:
load_dotenv()
MONGODB = os.environ['MONGODB_URI']
GA_DATA_FILE = os.environ['GA_DATA_FILE'] + '.csv'
DB = os.environ['DB']
PAGES_BY_ID = os.environ['PAGES_BY_ID']
### make a dictionary with a key of userID and array of pages
this_dict = defaultdict(list)
with open(GA_DATA_FILE, 'r+') as f:
read_me = csv.reader(f, delimiter=",")
for row in read_me:
if row[0] not in this_dict[row[1]]:
this_dict[row[1]].append(row[0])
### save to MongoDB:
# Connect to your MongoDB cluster:
client = MongoClient(MONGODB)
# server_api is only needed for MongoDB version 4.9+
#server_api = ServerApi('1')
#client = MongoClient(MONGODB,server_api=server_api)
# database
db = client[DB]
# collection
pageviews = db[PAGES_BY_ID]
res = pageviews.find({})
for rec in res:
for page in rec['pages']:
if page not in this_dict[rec['_id']]:
this_dict[rec['_id']].append(page)
insert_list=[]
# Insert a document for each key:
for key in this_dict:
doc={}
doc['_id']=key
doc['pages']=this_dict[key]
insert_list+=[doc]
pageviews.drop()
pageviews.insert_many(insert_list, ordered=False)
|
# Generated by Django 3.2.9 on 2021-11-10 17:02
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('Shop', '0004_auto_20211110_1733'),
]
operations = [
migrations.AddField(
model_name='category',
name='description',
field=models.CharField(default=django.utils.timezone.now, max_length=500, verbose_name='Descripción'),
preserve_default=False,
),
]
|
import http.server
import socketserver
from sys import version as python_version
from cgi import parse_header, parse_multipart
# https://stackoverflow.com/questions/4233218/python-how-do-i-get-key-value-pairs-from-the-basehttprequesthandler-http-post-h/13330449
if python_version.startswith('3'):
from urllib.parse import parse_qs
from http.server import BaseHTTPRequestHandler
else:
from urlparse import parse_qs
from BaseHTTPServer import BaseHTTPRequestHandler
PORT = 8080
class MyHandler(http.server.SimpleHTTPRequestHandler):
def parse_POST(self):
ctype, pdict = parse_header(self.headers['content-type'])
if ctype == 'multipart/form-data':
postvars = parse_multipart(self.rfile, pdict)
elif ctype == 'application/x-www-form-urlencoded':
length = int(self.headers['content-length'])
postvars = parse_qs(
self.rfile.read(length),
keep_blank_values=1)
else:
postvars = {}
return postvars
def do_POST(self):
postvars = self.parse_POST()
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
print(postvars)
lp = postvars[b'field1'][0].decode('UTF-8').strip().upper()
print(lp) # execute the external program, the result will be saved in result.jpg
import os
os.system("cd training-d ; python3 random_image.py /home/wave/Lukas_LP/ " + lp )
os.system("rm /home/wave/.keras/datasets/license/test/* ; cp training-d/rnd.jpg /home/wave/.keras/datasets/license/test/1.jpg ; python3 pix2pix.py single ~/.keras/datasets/license/; ")
redirect = """
<html>
<head>
<meta http-equiv="refresh" content="2; url='form.html'" />
</head>
</html>
Loading ...
"""
self.wfile.write(bytes(str(redirect) , encoding='utf8' ))
return
socketserver.TCPServer.allow_reuse_address = True
httpd = socketserver.TCPServer(("", PORT), MyHandler)
print("serving at port", PORT)
httpd.serve_forever()
|
"""Do some things"""
def add_two_numbers(first, second):
"""Adds together two numbers"""
return first + second
if __name__ == "__main__":
print(add_two_numbers(2, 5))
|
# DicomAligner.py by Francois Malan - 2011-06-23
# Revised as version 2.0 on 2011-07-07
from module_base import ModuleBase
from module_mixins import NoConfigModuleMixin
from module_kits.misc_kit import misc_utils
import wx
import os
import vtk
import itk
import math
import numpy
class DICOMAligner(
NoConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
# initialise our base class
ModuleBase.__init__(self, module_manager)
NoConfigModuleMixin.__init__(
self, {'Module (self)' : self})
self.sync_module_logic_with_config()
self._ir = vtk.vtkImageReslice()
self._ici = vtk.vtkImageChangeInformation()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of GUI
NoConfigModuleMixin.close(self)
def set_input(self, idx, input_stream):
if idx == 0:
self._imagedata = input_stream
else:
self._metadata = input_stream
self._input = input_stream
def get_input_descriptions(self):
return ('vtkImageData (from DICOMReader port 0)', 'Medical metadata (from DICOMReader port 1)')
def get_output_descriptions(self):
return ('vtkImageData', )
def get_output(self, idx):
return self._output
def _convert_input(self):
'''
Performs the required transformation to match the image to the world coordinate system defined by medmeta
'''
# the first two columns of the direction cosines matrix represent
# the x,y axes of the DICOM slices in the patient's LPH space
# if we want to resample the images so that x,y are always LP
# the inverse should do the trick (transpose should also work as long as boths sets of axes
# is right-handed but let's stick to inverse for safety)
dcmatrix = vtk.vtkMatrix4x4()
dcmatrix.DeepCopy(self._metadata.direction_cosines)
dcmatrix.Invert()
origin = self._imagedata.GetOrigin()
spacing = self._imagedata.GetSpacing()
extent = self._imagedata.GetExtent()
# convert our new cosines to something we can give the ImageReslice
dcm = [[0,0,0] for _ in range(3)]
for col in range(3):
for row in range(3):
dcm[col][row] = dcmatrix.GetElement(row, col)
# do it.
self._ir.SetResliceAxesDirectionCosines(dcm[0], dcm[1], dcm[2])
self._ir.SetInput(self._imagedata)
self._ir.SetAutoCropOutput(1)
self._ir.SetInterpolationModeToCubic()
isotropic_sp = min(min(spacing[0],spacing[1]),spacing[2])
self._ir.SetOutputSpacing(isotropic_sp, isotropic_sp, isotropic_sp)
self._ir.Update()
output = self._ir.GetOutput()
#We now have to check whether the origin needs to be moved from its prior position
#Yes folks - the reslice operation screws up the origin and we must fix it.
#(Since the IPP is INDEPENDENT of the IOP, a reslice operation to fix the axes' orientation
# should not rotate the origin)
#
#The origin's coordinates (as provided by the DICOMreader) are expressed in PATIENT-LPH
#We are transforming the voxels (i.e. image coordiante axes)
# FROM IMAGE TO LPH coordinates. We must not transform the origin in this
# sense- only the image axes (and therefore voxels). However, vtkImageReslice
# (for some strange reason) transforms the origin according to the
# transformation matrix (?). So we need to reset this.
#Once the image is aligned to the LPH coordinate axes, a voxel(centre)'s LPH coordinates
# = origin + image_coordinates * spacing.
#But, there is a caveat.
# Since both image coordinates and spacing are positive, the origin must be at
# the "most negative" corner (in LPH terms). Even worse, if the LPH axes are not
# perpendicular relative to the original image axes, this "most negative" corner will
# lie outside of the original image volume (in a zero-padded region) - see AutoCropOutput.
# But the original origin is defined at the "most negative" corner in IMAGE
# coordinates(!). This means that the origin should, in most cases, be
# translated from its original position, depending on the relative LPH and
# image axes' orientations.
#
#The (x,y,z) components of the new origin are, independently, the most negative x,
#most negative y and most negative z LPH coordinates of the eight ORIGINAL IMAGE corners.
#To determine this we compute the eight corner coordinates and do a minimization.
#
#Remember that (in matlab syntax)
# p_world = dcm_matrix * diag(spacing)*p_image + origin
#for example: for a 90 degree rotation around the x axis this is
# [p_x] [ 1 0 0][nx*dx] [ox]
# [p_y] = [ 0 0 1][ny*dy] + [oy]
# [p_z] [ 0 -1 0][nz*dz] [oz]
#, where p is the LPH coordinates, d is the spacing, n is the image
# coordinates and o is the origin (IPP of the slice with the most negative IMAGE z coordinate).
originn = numpy.array(origin)
dcmn = numpy.array(dcm)
corners = numpy.zeros((3,8))
#first column of the DCM is a unit LPH-space vector in the direction of the first IMAGE axis, etc.
#From this it follows that the displacements along the full IMAGE's x, y and z extents are:
sx = spacing[0]*extent[1]*dcmn[:,0]
sy = spacing[1]*extent[3]*dcmn[:,1]
sz = spacing[2]*extent[5]*dcmn[:,2]
corners[:,0] = originn
corners[:,1] = originn + sx
corners[:,2] = originn + sy
corners[:,3] = originn + sx + sy
corners[:,4] = originn + sz
corners[:,5] = originn + sx + sz
corners[:,6] = originn + sy + sz
corners[:,7] = originn + sx + sy + sz
newOriginX = min(corners[0,:]);
newOriginY = min(corners[1,:]);
newOriginZ = min(corners[2,:]);
#Since we set the direction cosine matrix to unity we have to reset the
#axis labels array as well.
self._ici.SetInput(output)
self._ici.Update()
fd = self._ici.GetOutput().GetFieldData()
fd.RemoveArray('axis_labels_array')
lut = {'L' : 0, 'R' : 1, 'P' : 2, 'A' : 3, 'F' : 4, 'H' : 5}
fd.RemoveArray('axis_labels_array')
axis_labels_array = vtk.vtkIntArray()
axis_labels_array.SetName('axis_labels_array')
axis_labels_array.InsertNextValue(lut['R'])
axis_labels_array.InsertNextValue(lut['L'])
axis_labels_array.InsertNextValue(lut['A'])
axis_labels_array.InsertNextValue(lut['P'])
axis_labels_array.InsertNextValue(lut['F'])
axis_labels_array.InsertNextValue(lut['H'])
fd.AddArray(axis_labels_array)
self._ici.Update()
output = self._ici.GetOutput()
output.SetOrigin(newOriginX, newOriginY, newOriginZ)
self._output = output
def execute_module(self):
self._convert_input() |
import datetime
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render
from django.views.generic import ListView
from projects.models import Project
from python_recipes import get_week_of_month_from_datetime, \
get_week_start_datetime_end_datetime_tuple
from tracking.models import Tracking
class WeekTracking(LoginRequiredMixin, ListView):
model = Tracking
login_url = '/login/'
def get_queryset(self):
today = self.request.GET.get('today', None)
if today is None:
today = datetime.datetime.today()
this_week = get_week_of_month_from_datetime(today)
week_start = get_week_start_datetime_end_datetime_tuple(
today.year, today.month, this_week)[0]
week = []
projects = Project.objects.filter(
proj_participants__team_member=self.request.user).values('name')
projects = list(projects)
for i in range(0, 5):
week.append(dict(
day=week_start + datetime.timedelta(days=i),
projects=projects
))
return week |
import io, sys, time
from datetime import datetime
sys.path.append('/home/pi')
from serial import Serial
import matplotlib.pyplot as plt
plt.ion()
ser = Serial('/dev/ttyS0', 9600, timeout=0.2)
sio = io.TextIOWrapper(io.BufferedRWPair(ser, ser), newline='\r')
x = []
y = []
while True:
try:
ser.flushInput()
line = sio.readline()
# Rxxxx
d = int(line[1:5])
ts = time.time()
#dt = datetime.now()
#print(ts, dt, d)
x.append(ts)
y.append(d)
plt.clf()
plt.plot(x, y, 'r')
plt.grid(True)
plt.pause(0.05)
while len(x) > 100:
x.pop(0)
while len(y) > 100:
y.pop(0)
except KeyboardInterrupt:
print('user interrupted')
break
except:
print('weird stuff happened')
f.close()
ser.close()
plt.ioff()
|
import os
import torch
import torch.cuda.nvtx as nvtx
import torch.nn.functional as F
from torch import optim
from torch.nn.parallel import DistributedDataParallel as DDP
from model import DQN
class Agent():
def __init__(self, args, action_space):
self.action_space = action_space
self.n = args.multi_step
self.discount = args.discount
self.target_update = args.target_update
self.categorical = args.categorical
self.noisy_linear = args.noisy_linear
self.double_q = args.double_q
self.max_grad_norm = args.max_grad_norm
self.device = torch.device('cuda', args.gpu)
self.num_param_updates = 0
if args.categorical:
self.atoms = args.atoms
self.v_min = args.v_min
self.v_max = args.v_max
self.support = torch.linspace(self.v_min, args.v_max, self.atoms).to(device=self.device) # Support (range) of z
self.delta_z = (args.v_max - self.v_min) / (self.atoms - 1)
self.online_net = DQN(args, self.action_space.n).to(device=self.device)
if args.model and os.path.isfile(args.model):
# Always load tensors onto CPU by default, will shift to GPU if necessary
self.online_net.load_state_dict(torch.load(args.model, map_location='cpu'))
self.online_net.train()
self.target_net = DQN(args, self.action_space.n).to(device=self.device)
self.update_target_net()
self.target_net.eval()
for param in self.target_net.parameters():
param.requires_grad = False
self.optimizer = optim.Adam(self.online_net.parameters(), lr=args.lr, eps=args.adam_eps, amsgrad=True)
if args.distributed:
self.online_net = DDP(self.online_net)
# Resets noisy weights in all linear layers (of online net only)
def reset_noise(self):
if isinstance(self.online_net, DQN):
self.online_net.reset_noise()
else:
self.online_net.module.reset_noise()
# Acts based on single state (no batch)
def act(self, state):
with torch.no_grad():
probs = self.online_net(state.to(self.device))
if self.categorical:
probs = self.support.expand_as(probs) * probs
actions = probs.sum(-1).argmax(-1).to(state.device)
return actions
# Acts with an ε-greedy policy (used for evaluation only)
def act_e_greedy(self, state, epsilon=0.001): # High ε can reduce evaluation scores drastically
actions = self.act(state)
mask = torch.rand(state.size(0), device=state.device, dtype=torch.float32) < epsilon
masked = mask.sum().item()
if masked > 0:
actions[mask] = torch.randint(0, self.action_space.n, (masked,), device=state.device, dtype=torch.long)
return actions
def learn(self, states, actions, returns, next_states, nonterminals, weights):
tactions = actions.unsqueeze(-1).unsqueeze(-1)
if self.categorical:
tactions = tactions.expand(-1, -1, self.atoms)
# Calculate current state probabilities (online network noise already sampled)
nvtx.range_push('agent:online (state) probs')
ps = self.online_net(states, log=True) # Log probabilities log p(s_t, ·; θonline)
ps_a = ps.gather(1, tactions) # log p(s_t, a_t; θonline)
nvtx.range_pop()
with torch.no_grad():
if isinstance(self.target_net, DQN):
self.target_net.reset_noise()
else:
self.target_net.module.reset_noise() # Sample new target net noise
nvtx.range_push('agent:target (next state) probs')
tns = self.target_net(next_states) # Probabilities p(s_t+n, ·; θtarget)
nvtx.range_pop()
if self.double_q:
# Calculate nth next state probabilities
nvtx.range_push('agent:online (next state) probs')
pns = self.online_net(next_states) # Probabilities p(s_t+n, ·; θonline)
nvtx.range_pop()
else:
pns = tns
if self.categorical:
pns = self.support.expand_as(pns) * pns # Distribution d_t+n = (z, p(s_t+n, ·; θonline))
# Perform argmax action selection using online network: argmax_a[(z, p(s_t+n, a; θonline))]
argmax_indices_ns = pns.sum(-1).argmax(-1).unsqueeze(-1).unsqueeze(-1)
if self.categorical:
argmax_indices_ns = argmax_indices_ns.expand(-1, -1, self.atoms)
pns_a = tns.gather(1, argmax_indices_ns) # Double-Q probabilities p(s_t+n, argmax_a[(z, p(s_t+n, a; θonline))]; θtarget)
if self.categorical:
# Compute Tz (Bellman operator T applied to z)
# Tz = R^n + (γ^n)z (accounting for terminal states)
Tz = returns.unsqueeze(-1) + nonterminals.float().unsqueeze(-1) * (self.discount ** self.n) * self.support.unsqueeze(0)
Tz = Tz.clamp(min=self.v_min, max=self.v_max) # Clamp between supported values
# Compute L2 projection of Tz onto fixed support z
b = (Tz - self.v_min) / self.delta_z # b = (Tz - Vmin) / Δz
l, u = b.floor().to(torch.int64), b.ceil().to(torch.int64)
# Fix disappearing probability mass when l = b = u (b is int)
l[(u > 0) * (l == u)] -= 1
u[(l < (self.atoms - 1)) * (l == u)] += 1
# Distribute probability of Tz
batch_size = states.size(0)
m = states.new_zeros(batch_size, self.atoms)
offset = torch.linspace(0, ((batch_size - 1) * self.atoms), batch_size).unsqueeze(1).expand(batch_size, self.atoms).to(actions)
m.view(-1).index_add_(0, (l + offset).view(-1), (pns_a.squeeze(1) * (u.float() - b)).view(-1)) # m_l = m_l + p(s_t+n, a*)(u - b)
m.view(-1).index_add_(0, (u + offset).view(-1), (pns_a.squeeze(1) * (b - l.float())).view(-1)) # m_u = m_u + p(s_t+n, a*)(b - l)
else:
Tz = returns + nonterminals.float() * (self.discount ** self.n) * pns_a.squeeze(-1).squeeze(-1)
if self.categorical:
loss = -torch.sum(m * ps_a.squeeze(1), 1) # Cross-entropy loss (minimises DKL(m||p(s_t, a_t)))
weights = weights.unsqueeze(-1)
else:
loss = F.mse_loss(ps_a.squeeze(-1).squeeze(-1), Tz, reduction='none')
nvtx.range_push('agent:loss + step')
self.optimizer.zero_grad()
weighted_loss = (weights * loss).mean()
weighted_loss.backward()
torch.nn.utils.clip_grad_norm_(self.online_net.parameters(), self.max_grad_norm)
self.optimizer.step()
nvtx.range_pop()
return loss.detach()
def update_target_net(self):
self.target_net.load_state_dict(self.online_net.state_dict())
# Save model parameters on current device (don't move model between devices)
def save(self, path):
torch.save(self.online_net.state_dict(), os.path.join(path, 'model.pth'))
# Evaluates Q-value based on single state (no batch)
def evaluate_q(self, state):
with torch.no_grad():
q = self.online_net(state.unsqueeze(0).to(self.device))
if self.categorical:
q *= self.support
return q.sum(-1).max(-1)[0].item()
def train(self):
self.online_net.train()
def eval(self):
self.online_net.eval()
def __str__(self):
return self.online_net.__str__()
|
#!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name='nginx-log-monitor',
version='0.0.1',
description='Nginx Log Monitor',
classifiers=[
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
packages=find_packages(exclude=['doc', 'tests*']),
install_requires=[
'aiohttp',
'pyyaml',
'pytz',
],
entry_points={
'console_scripts': [
'nginx-log-monitor=nginx_log_monitor:nginx_log_monitor_main'
],
})
|
# coding=utf-8
'''
@ Summary: 最终的效果是用脚本分离出1s和超过1s的,然后超过1s的手动进行剪辑,
音频文件名不改,在同一个音频文件夹路径下会生成两个音频文件夹,分别存放
超过1s和1s的音频
@ file: rm_aug_silence.py
@ version: 1.0.0
@ Update: 增加pathlib.Path() 这个库,可以无视平台差异
@ Version: 1.0.1
@ Author: Lebhoryi@gmail.com
@ Date: 2020/3/26 下午4:41
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import shutil
from pathlib import Path
from pydub import AudioSegment
def detect_leading_silence(sound, silence_threshold=-35.0, chunk_size=20):
'''
sound is a pydub.AudioSegment
silence_threshold in dB
chunk_size in ms
iterate over chunks until you find the first one with sound
'''
trim_ms = 0 # ms
assert chunk_size > 0 # to avoid infinite loop
# for i in range(1 , len(sound)+1, chunk_size):
# print(sound[i:i+chunk_size].dBFS)
while sound[trim_ms:trim_ms+chunk_size].dBFS < silence_threshold and trim_ms < len(sound):
trim_ms += chunk_size
return trim_ms
def remove_aug_silence2(dir_path):
if not os.path.exists(dir_path):
raise Exception("No " + dir_path + "!")
# x, y, z, w = 0, 0, 0, 0 # 统计超过1s语音的个数
x = 1
# 新的剪辑之后,1s长度的语音存放的文件夹
new_path = os.path.join(dir_path, "1s")
if not os.path.isdir(new_path):
os.mkdir(new_path)
# 长度超过一秒需要手动剪辑的语音存放路径
long_path = os.path.join(dir_path, "long")
if not os.path.isdir(long_path):
os.mkdir(long_path)
# 获取所有的.wav 文件路径列表
# 格式['0.wav', '1.wav', ...]
wav_files = glob.glob(os.path.join(dir_path, "*.wav"))
for i in range(len(wav_files)):
# 读取文件
sound = AudioSegment.from_file(wav_files[i], format="wav")
# 减去了两个数值是为了增加前后的静音区
start_trim = detect_leading_silence(sound, -40)
# start_trim 不能为负,否则会生成空白的语音
start_trim = start_trim - 50 if start_trim >= 50 else start_trim
end_trim = detect_leading_silence(sound.reverse(), -40)
end_trim = end_trim - 100 if end_trim >= 100 else end_trim
# durtion 单位 ms 1s=1000ms
duration = len(sound) - end_trim - start_trim
# 储存的wav文件名字
# file_name = os.path.basename(wav_files[i])
if int(x) < 10:
x = "00" + str(x)
elif int(x) < 100:
x = "0" + str(x)
else:
x = str(x)
file_name = "001" + x + ".wav"
x = int(x) + 1
# 如果剪了 头尾静音区之后的语音时长小于1s,时长限定为1s
if duration <= 1000:
new_sound = sound[start_trim: start_trim+1000]
new_sound.export(os.path.join(new_path, file_name), format="wav")
elif duration <= 1050:
start_trim2 = start_trim - 25 if start_trim >= 25 else start_trim
new_sound2 = sound[start_trim2: start_trim2+1000]
new_sound2.export(os.path.join(new_path, file_name), format="wav")
else: # 大于1s的, 需要手动剪辑
newsound = sound[start_trim: len(sound)-end_trim]
newsound.export(os.path.join(long_path, file_name), format="wav")
print("{} 的时长为: {}s...".format(file_name, duration/1000))
# print("正在剪辑第{}条语音...".format(i))
# print("有{}条语音小于1050ms...".format(x)) # 20
# print("有{}条语音小于1100ms...".format(y)) # 23
# print("有{}条语音小于1150ms...".format(z)) # 9
# print("有{}条语音大于1150ms...".format(w)) # 25
def remove_wav(wav, wav_1s, wav_long):
""" 单个音频剪掉静音区 """
assert wav, print("No audio file exists!")
if not wav_1s.exists(): wav_1s.mkdir()
# 读取文件
sound = AudioSegment.from_file(wav, format="wav")
# 减去了两个数值是为了增加前后的静音区 -35
start_trim = detect_leading_silence(sound, -30)
# start_trim 不能为负,否则会生成空白的语音
start_trim = start_trim - 50 if start_trim >= 50 else start_trim
end_trim = detect_leading_silence(sound.reverse(), -30)
end_trim = end_trim - 100 if end_trim >= 100 else end_trim
# durtion 单位 ms 1s=1000ms
duration = len(sound) - end_trim - start_trim
# 如果剪了头尾静音区之后的语音时长小于1s,时长限定为1s
start_trim2 = len(sound) - end_trim - 1000
if start_trim2 < 0:
start_trim2 = 0
if start_trim > 400:
start_trim2 = start_trim
if duration <= 1050:
new_sound = sound[start_trim2: start_trim2+1000]
new_sound.export(wav_1s/wav.name, format="wav")
print(f"{wav.name} 1s 音频剪辑成功...")
# elif duration <= 1050:
# start_trim2 = start_trim - 25 if start_trim >= 25 else start_trim
# new_sound2 = sound[start_trim2: start_trim2+1000]
# new_sound2.export(wav_1s/wav.name, format="wav")
# print(f"{wav.name} 1s 音频剪辑成功...")
else: # 大于1s的, 需要手动剪辑
newsound = sound[start_trim: len(sound)-end_trim]
# newsound = sound[start_trim: start_trim+1000]
newsound.export(wav_long/wav.name, format="wav")
print("{} 的时长为: {}s...".format(wav.name, duration/1000))
def remove_aug_silence(dir_path):
assert dir_path.exists(), Exception("No " + str(dir_path) + "!")
# 新的剪辑之后,1s长度的语音存放的文件夹
new_path = dir_path / "1s"
if not new_path.exists(): new_path.mkdir()
# 长度超过一秒需要手动剪辑的语音存放路径
long_path = dir_path / "long"
if not long_path.exists(): long_path.mkdir()
# 获取所有的.wav 文件路径列表
wav_files = dir_path.glob('*.wav')
for wav in wav_files:
remove_wav(wav, new_path, long_path)
print("剪辑完成, 剩下的需要手工剪辑啦...")
def merge_wavs(root, new_path):
""" 将所有的音频整合到一个文件夹中 """
assert root.exists(), Exception("No files path exists!")
i = 0
if not new_path.exists(): new_path.mkdir()
for dir in root.iterdir():
print(dir)
wav_paths = dir.glob('*.wav')
for wav in wav_paths:
i += 1
shutil.copy(wav, new_path / (str(i)+'.wav'))
print(i)
print(f"共有{len(list(new_path.iterdir()))}条音频文件...")
return new_path
if __name__ == "__main__":
root_path = "../../local_data/web_data_train/20200722/long"
root_path = Path(root_path)
wavs_path = root_path.parent / 'aug_xrxr'
remove_aug_silence(root_path)
# 合并所有音频文件
# wavs_path = merge_wavs(root_path, wavs_path)
# 单个音频
# file_path = '/home/lebhoryi/RT-Thread/WakeUp-Xiaorui/local_data/' \
# '328_data/audio2/60.wav'
# tmp = Path('/home/lebhoryi/RT-Thread/WakeUp-Xiaorui/local_data/'
# '328_data/tmp')
# file_path = Path(file_path)
# remove_wav(file_path, tmp, tmp)
|
# Add parent to the search path so we can reference the modules(craft, pix2pix) here without throwing and exception
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
import copy
import cv2
import numpy as np
from PIL import Image
from craft_text_detector import Craft
# import craft functions
from craft_text_detector import (
read_image,
load_craftnet_model,
load_refinenet_model,
get_prediction,
export_detected_regions,
export_extra_results,
empty_cuda_cache
)
def crop_poly_low(img, poly):
"""
find region using the poly points
create mask using the poly points
do mask op to crop
add white bg
"""
# points should have 1*x*2 shape
if len(poly.shape) == 2:
poly = np.array([np.array(poly).astype(np.int32)])
pts=poly
## (1) Crop the bounding rect
rect = cv2.boundingRect(pts)
x,y,w,h = rect
croped = img[y:y+h, x:x+w].copy()
## (2) make mask
pts = pts - pts.min(axis=0)
mask = np.zeros(croped.shape[:2], np.uint8)
cv2.drawContours(mask, [pts], -1, (255, 255, 255), -1, cv2.LINE_AA)
## (3) do bit-op
dst = cv2.bitwise_and(croped, croped, mask=mask)
## (4) add the white background
bg = np.ones_like(croped, np.uint8)*255
cv2.bitwise_not(bg,bg, mask=mask)
dst2 = bg + dst
return dst2
print('Eval')
# set image path and export folder directory
image = 'figures/padded_snippet-HCFA24.jpg' # can be filepath, PIL image or numpy array
image = 'figures/PID_10_5_0_3108.original.tif' # can be filepath, PIL image or numpy array
image = 'figures/PID_10_5_0_3110.original.tif' # can be filepath, PIL image or numpy array
image = 'figures/PID_10_5_0_3111.original.tif' # can be filepath, PIL image or numpy array
image = 'figures/PID_10_5_0_3112.original.tif' # can be filepath, PIL image or numpy array
image = 'figures//PID_10_5_0_3108.original.tif' # can be filepath, PIL image or numpy array
image = '/tmp/hicfa/PID_10_5_0_3101.original.tif'
output_dir = 'outputs/'
# create a craft instance
# craft = Craft(output_dir=output_dir, crop_type="poly", cuda=False)
# read image
image = read_image(image)
# load models
refine_net = load_refinenet_model(cuda=True)
craft_net = load_craftnet_model(cuda=True)
# perform prediction
prediction_result = get_prediction(
image=image,
craft_net=craft_net,
refine_net=refine_net,
text_threshold=0.7,
link_threshold=0.4,
low_text=0.4,
cuda=True,
long_size=2550 #1280
# long_size=1280
)
# export detected text regions
image_paths = copy.deepcopy(image)
exported_file_paths = export_detected_regions(
image=image_paths,
regions=prediction_result["boxes"],
output_dir=output_dir,
rectify=True
)
# export heatmap, detection points, box visualization
image_results = copy.deepcopy(image)
export_extra_results(
image=image_results,
regions=prediction_result["boxes"],
heatmaps=prediction_result["heatmaps"],
output_dir=output_dir
)
def imwrite(path, img):
try:
cv2.imwrite(path, img)
except Exception as ident:
print(ident)
def paste_fragment(overlay, fragment, pos=(0,0)):
# You may need to convert the color.
fragment = cv2.cvtColor(fragment, cv2.COLOR_BGR2RGB)
fragment_pil = Image.fromarray(fragment)
overlay.paste(fragment_pil, pos)
# output text only blocks
# deepcopy image so that original is not altered
image = copy.deepcopy(image)
regions=prediction_result["boxes"]
# convert imaget to BGR color
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
file_path = os.path.join(output_dir, "image_cv.png")
cv2.imwrite(file_path, image)
pil_image = Image.new('RGB', (image.shape[1], image.shape[0]), color=(255,255,255,0))
for i, region in enumerate(regions):
region = np.array(region).astype(np.int32).reshape((-1))
region = region.reshape(-1, 2)
poly = region.reshape((-1, 1, 2))
rect = cv2.boundingRect(poly)
x = rect[0]
y = rect[1]
w = rect[2]
h = rect[3]
if h < 15:
continue
rect = np.array(rect, dtype=np.int32)
snippet = crop_poly_low(image, poly)
x = rect[0]
y = rect[1]
# export corpped region
file_path = os.path.join(output_dir, 'crops', "%s.jpg" % (i))
cv2.imwrite(file_path, snippet)
paste_fragment(pil_image, snippet, (x, y))
savepath = os.path.join(output_dir, "%s.jpg" % ('txt_overlay'))
pil_image.save(savepath, format='JPEG', subsampling=0, quality=100)
# unload models from gpu
empty_cuda_cache()
# pil_padded = Image.new('RGB', (shape[1] + pad, shape[0] + pad), color=(255,255,255,0))
# paste_fragment(pil_padded, snippet, (pad//2, pad//2))
# savepath = os.path.join(debug_dir, "%s-%s.jpg" % ('padded_snippet' , key))
# pil_padded.save(savepath, format='JPEG', subsampling=0, quality=100)
# cv_snip = np.array(pil_padded)
# snippet = cv2.cvtColor(cv_snip, cv2.COLOR_RGB2BGR)# convert RGB to BGR
|
import decimal
import json
from datetime import date
from typing import Dict, List
from funds.models import Fund, Portfolio, Position
from funds.models import PortfolioSnapshot
from funds.services import prices
from funds.services.constants import CRYPTO_USD
# public
def get_market_value_on_date(portfolio, day: date) -> decimal.Decimal:
market_value = 0
for _, position in portfolio.items():
if position.symbol in CRYPTO_USD:
market_value += position.units
continue
usd_close_price = prices.get_usd_close_price_on_date(day, position.symbol)
market_value += decimal.Decimal(usd_close_price) * decimal.Decimal(position.units)
return round(market_value, 2)
def save_historical_portfolios(fund_name: str, historical_portfolios: Dict[date, Portfolio]):
"""
Given a fund name and a dictionary of historical portfolios, save the
portfolios in the database.
:param fund_name: name of the fund who owns the given historical portfolios
:param historical_portfolios: Historical snapshots of a portfolio of the same fund
:return: None
"""
fund = Fund.objects.get(name=fund_name)
snapshots = PortfolioSnapshot.objects.filter(fund=fund)
day_to_snapshot = {snapshot.date: snapshot for snapshot in snapshots}
for day, portfolio in historical_portfolios.items():
json_positions = __to_json_positions(portfolio)
market_price = get_market_value_on_date(portfolio, day)
new_snapshot = PortfolioSnapshot(
date=day,
positions=json_positions,
fund=fund,
market_price=market_price
)
snapshot = day_to_snapshot.get(day, new_snapshot)
snapshot.save()
def get_portfolio_list(fund: Fund) -> List[Position]:
return Position.objects.filter(fund=fund).order_by('symbol')
# private
def __to_json_positions(portfolio: Portfolio):
positions = []
for _, position in portfolio.items():
positions.append(position.to_dict())
return json.dumps(positions, default=str)
def __to_prices_dict(last_prices_list):
last_prices_dict = {}
for element in last_prices_list:
pair = element['symbol']
last_price = element['price']
last_prices_dict[pair] = last_price
return last_prices_dict
|
from django.shortcuts import render
def index(request):
return render(request,'inicial/index.html')
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from math import floor, ceil
# Extension module
import quadrotorsim
NO_DISPLAY = False
try:
from rlschool.quadrotor.render import RenderWindow
except Exception as e:
NO_DISPLAY = True
class Quadrotor(object):
"""
Quadrotor environment.
Args:
dt (float): duration of single step (in seconds).
nt (int): number of steps of single episode if no collision
occurs.
seed (int): seed to generate target velocity trajectory.
task (str): name of the task setting. Currently, support
`no_collision` and `velocity_control`.
map_file (None|str): path to txt map config file, default
map is a 100x100 flatten floor.
simulator_conf (None|str): path to simulator config xml file.
"""
def __init__(self,
dt=0.01,
nt=1000,
seed=0,
task='no_collision',
map_file=None,
simulator_conf=None,
**kwargs):
assert task in ['velocity_control', 'no_collision'], \
'Invalid task setting'
if simulator_conf is None:
simulator_conf = os.path.join(os.path.dirname(__file__),
'quadrotorsim', 'config.xml')
assert os.path.exists(simulator_conf), \
'Simulator config xml does not exist'
self.dt = dt
self.nt = nt
self.ct = 0
self.task = task
self.simulator = quadrotorsim.Simulator()
self.simulator.get_config(simulator_conf)
self.state = {}
self.viewer = None
self.x_offset = self.y_offset = self.z_offset = 0
if self.task == 'velocity_control':
self.velocity_targets = \
self.simulator.define_velocity_control_task(
dt, nt, seed)
elif self.task == 'no_collision':
self.map_matrix = Quadrotor.load_map(map_file)
# Only for single quadrotor, also mark its start position
y_offsets, x_offsets = np.where(self.map_matrix == -1)
assert len(y_offsets) == 1
self.y_offset = y_offsets[0]
self.x_offset = x_offsets[0]
self.z_offset = 5. # TODO: setup a better init height
self.map_matrix[self.y_offset, self.x_offset] = 0
def reset(self):
self.simulator.reset()
sensor_dict = self.simulator.get_sensor()
state_dict = self.simulator.get_state()
self._update_state(sensor_dict, state_dict)
return self.state
def step(self, action):
self.ct += 1
cmd = np.asarray(action, np.float32)
self.simulator.step(cmd.tolist(), self.dt)
sensor_dict = self.simulator.get_sensor()
state_dict = self.simulator.get_state()
old_pos = [self.state['x'], self.state['y'], self.state['z']]
self._update_state(sensor_dict, state_dict)
new_pos = [self.state['x'], self.state['y'], self.state['z']]
if self.task == 'no_collision':
is_collision = self._check_collision(old_pos, new_pos)
reward = self._get_reward(collision=is_collision)
reset = False
if is_collision:
reset = True
self.ct = 0
elif self.task == 'velocity_control':
reset = False
velocity_target = self.velocity_targets[self.ct - 1]
reward = self._get_reward(velocity_target=velocity_target)
if self.ct == self.nt:
reset = True
self.ct = 0
return self.state, reward, reset
def render(self):
if self.viewer is None:
if NO_DISPLAY:
raise RuntimeError('[Error] Cannot connect to display screen.')
self.viewer = RenderWindow(task=self.task)
if 'x' not in self.state:
# It's null state
raise Exception('You are trying to render before calling reset()')
if self.task == 'velocity_control':
self.viewer.view(
self.state, self.dt,
expected_velocity=self.velocity_targets[self.ct-1])
else:
self.viewer.view(self.state, self.dt)
def close(self):
del self.simulator
def _get_reward(self, collision=False, velocity_target=(0.0, 0.0, 0.0)):
"""
Reward function setting for different tasks.
The default penalty is the cost of energy. In addition,
for `no_collision` task, a strong penalty is added for
collision, otherwise get +1 reward; for `velocity_control`
task, an extra penalty for velocity difference is added.
"""
reward = - self.dt * self.state['power']
if self.task == 'no_collision':
if collision:
reward -= 10.0
else:
reward += 1.
elif self.task == 'velocity_control':
diff = self._get_velocity_diff(velocity_target)
reward -= diff
return reward
def _check_collision(self, old_pos, new_pos):
# TODO: update to consider the body size of the quadrotor
min_max = lambda x, y, i: \
(int(floor(min(x[i], y[i]))), int(ceil(max(x[i], y[i]))))
x_min, x_max = min_max(old_pos, new_pos, 0)
y_min, y_max = min_max(old_pos, new_pos, 1)
z_min, z_max = min_max(old_pos, new_pos, 2)
taken_pos = self.map_matrix[y_min:y_max+1, x_min:x_max+1]
if z_min < np.any(taken_pos) or z_max < np.any(taken_pos):
return True
else:
return False
def _update_state(self, sensor, state):
state['x'] = state['x'] + self.x_offset
state['y'] = state['y'] + self.y_offset
state['z'] = state['z'] + self.z_offset
for k, v in sensor.items():
self.state[k] = v
for k, v in state.items():
self.state[k] = v
if self.task == 'velocity_control':
t = min(self.ct, self.nt-1)
next_velocity_target = self.velocity_targets[t]
self.state['next_target_g_v_x'] = next_velocity_target[0]
self.state['next_target_g_v_y'] = next_velocity_target[1]
self.state['next_target_g_v_z'] = next_velocity_target[2]
def _get_velocity_diff(self, velocity_target):
vt_x, vt_y, vt_z = velocity_target
diff = abs(vt_x - self.state['g_v_x']) + \
abs(vt_y - self.state['g_v_y']) + \
abs(vt_z - self.state['g_v_z'])
return diff
@staticmethod
def load_map(map_file):
if map_file is None:
flatten_map = np.zeros([100, 100], dtype=np.int32)
flatten_map[50, 50] = -1
return flatten_map
map_lists = []
with open(map_file, 'r') as f:
for line in f.readlines():
map_lists.append([int(i) for i in line.split(' ')])
return np.array(map_lists)
if __name__ == '__main__':
import sys
if len(sys.argv) == 1:
task = 'no_collision'
else:
task = sys.argv[1]
env = Quadrotor(task=task, nt=1000)
env.reset()
env.render()
reset = False
step = 1
while not reset:
action = np.array([2., 2., 1., 1.], dtype=np.float32)
# action = np.array([1., 0., 0., 0.], dtype=np.float32)
state, reward, reset = env.step(action)
env.render()
print('---------- step %s ----------' % step)
print('state:', state)
print('reward:', reward)
step += 1
env.close()
|
import math
from itertools import combinations
from itertools import product
from scipy.special import comb
import numpy
from collections import defaultdict
from consts import CLUSTERED_SUBSTITUTIONS_MIN_SIZE
from consts import WTS_MIN_PERCENT
def compress_snd_window(window):
n = len(window) // 2 + 1
window = [ int(snd is not None) for snd in window]
starts = []
indexes = set()
for i in range(n):
if sum(window[i:i + n]) >= CLUSTERED_SUBSTITUTIONS_MIN_SIZE:
ind = tuple( i + j for j, v in enumerate(window[i:i + n]) if v == 1)
if ind not in indexes:
indexes.add(ind)
starts.append(i)
starts = starts + [ start + n for start in starts ]
return [ sum(window[start: end]) for start, end in zip(starts, starts[1:])]
def get_snds_in_window(index, snds, WINDOW_SIZE):
start = end = index
middle_coord = snds[index].coord
while start >= 0 and middle_coord - snds[start].coord < WINDOW_SIZE:
start -= 1
while end < len(snds) and snds[end].coord - middle_coord < WINDOW_SIZE:
end += 1
return snds[start + 1: end]
def get_snd_window(index, snds, WINDOW_SIZE):
window = [None] * (2 * WINDOW_SIZE - 1)
middle_coord = snds[index].coord
for snd in get_snds_in_window(index, snds, WINDOW_SIZE):
window[snd.coord - middle_coord + WINDOW_SIZE - 1] = snd
return window
def contains_cluster(window):
WINDOW_SIZE = len(window) // 2 + 1
for i in range(WINDOW_SIZE):
if len([snd for snd in window[i:i + WINDOW_SIZE] if snd]) >= CLUSTERED_SUBSTITUTIONS_MIN_SIZE:
return True
return False
def is_biased_clustered(window):
WINDOW_SIZE = len(window) // 2 + 1
for i in range(WINDOW_SIZE):
number_of_snds = len([snd for snd in window[i:i + WINDOW_SIZE] if snd])
number_biased = len([snd for snd in window[i:i + WINDOW_SIZE] if snd and snd.biased])
if number_of_snds >= CLUSTERED_SUBSTITUTIONS_MIN_SIZE and number_biased / number_of_snds >= WTS_MIN_PERCENT:
return True
return False
def binom(n, k, p):
return comb(n, k, exact = True) * (p ** k) * (1 - p) ** (n - k)
def generate_freqs_with_prob(counts, p):
def generate(counts, p):
if len(counts) == 0:
return [[[], 1]]
else:
results = []
for first_freq in range(counts[0] + 1):
freqs_with_prob = generate(counts[1:], p)
for freqs, prob in freqs_with_prob:
results.append( ([first_freq] + freqs, prob * binom(counts[0], first_freq ,p)))
return results
return [ (tuple(freqs), prob) for freqs, prob in generate(counts, p) ]
def binom_from(size, freq, p):
return sum(binom(size, freq, p) for freq in range(freq, size + 1 ))
def UBCS_fast(shifts, p):
n = len(shifts) // 2 + 1
if shifts[n - 1] == 0:
return 0
prob = 0
first_cluster_size = sum(shifts[:n])
if first_cluster_size >= CLUSTERED_SUBSTITUTIONS_MIN_SIZE:
prob = binom_from(first_cluster_size, math.ceil(WTS_MIN_PERCENT * first_cluster_size), p)
mem = defaultdict(lambda: 1)
for k in range(1, n):
conditional_counts = shifts[k : k + n - 1]
cluster_size = sum(shifts[k : k + n])
prev_cluster_size = sum(shifts[k - 1 : k + n - 1])
prev_mem = mem
mem = defaultdict(int)
for conditional_freqs, conditional_freqs_prob in generate_freqs_with_prob(conditional_counts, p):
conditional_freqs_size = sum(conditional_freqs)
n_a = 0
a = 0
a = binom_from(shifts[k + n - 1], math.ceil(WTS_MIN_PERCENT * cluster_size) - conditional_freqs_size, p)
upper_bound = min( math.ceil(WTS_MIN_PERCENT * prev_cluster_size) - conditional_freqs_size , shifts[k - 1] + 1)
if prev_cluster_size < CLUSTERED_SUBSTITUTIONS_MIN_SIZE:
upper_bound = shifts[k - 1] + 1
for freq in range(0 , upper_bound):
n_a += binom(shifts[k - 1], freq, p) * prev_mem[ (freq, ) + conditional_freqs[:-1] ]
if cluster_size >= CLUSTERED_SUBSTITUTIONS_MIN_SIZE:
prob += a * n_a * conditional_freqs_prob
mem[conditional_freqs] = n_a
return prob
|
import pickle
def atm():
file = "Account_details.pkl"
file_obj = open(file, "wb")
savings = int(50000)
pickle.dump(savings, file_obj)
user_choice = input("D: DEPOSIT "
"W: WITHDRAWAL "
"A: ACCOUNT DETAILS ")
file_obj = open(file, "rb")
pickle.load(file_obj)
if user_choice == 'D':
file_obj = open(file, "wb")
d_amt = int(input("ENTER AMOUNT TO BE DEPOSITED "))
savings = d_amt + savings
print("TOTAL AMOUNT : ", savings)
pickle.dump(savings, file_obj)
elif user_choice == 'W':
file_obj = open(file, "wb")
w_amt = int(input("ENTER AMOUNT TO BE WITHDRAWN "))
savings = savings - w_amt
print("TOTAL AMOUNT :", savings)
pickle.dump(savings, file_obj)
elif user_choice == 'A':
file_obj = open(file, 'rb')
pickle.load(file_obj)
print("TOTAL AMOUNT :", savings)
else:
print("INVALID INPUT")
file_obj.close()
|
from ftw import logchecker
import pytest
def test_logchecker():
with pytest.raises(TypeError) as excinfo:
checker = logchecker.LogChecker()
|
import random
import copy
from vertexInfo import VertexInfo
from edgeInfo import EdgeInfo
from topology.graph import GraphAsMatrix
class TopologyModel(object):
def __init__(self,G):
self._G = G
self._vertexInfoGen = VertexInfo
self._edgeInfoGen = EdgeInfo
def generateClique(self,k):
rslt = []
n = self._G.getNumberOfVertices()
for i in range(n,int(n+k)):
self._G.addVertex(i,self._vertexInfoGen())
rslt.append(i)
for j in range(n,i):
self._G.addEdge(i,j,self._edgeInfoGen())
return rslt
def setVertexInfoGenerator(self,viGen):
self._vertexInfoGen = viGen
def setEdgeInfoGenerator(self,eiGen):
self._edgeInfoGen = eiGen
def addVertex(self,n):
self._G.addVertex(n,self._vertexInfoGen())
def addEdge(self,s,t):
self._G.addEdge(s,t,self._edgeInfoGen())
def generateRandomLinks(self,sources,targets,k):
"""
generates k random links.
good for sparce graphs.
"""
rslt =[]
for i in range(k):
ok = False
while not ok:
s = random.choice(sources)
t = random.choice(targets)
if (s!= t and (not self._G.isEdge(s,t))):
self.addEdge(s,t)
rslt.append((s,t))
ok = True
return rslt
def generateLinksWithProbability(self,sources,targets,p):
"""
connects s to t with probability p.
good for dense graphs.
"""
rslt =[]
for s in sources:
for t in targets:
if(s!= t and (not self._G.isEdge(s,t))):
if (random.random()<p):
self.addEdge(s,t)
rslt.append((s,t))
return rslt
def generatePath(self,s,t,l):
vertices=[]
n=self._G.getNumberOfVertices()
for i in range(l-1):
self.addVertex(n)
vertices.append(n)
self.addEdge(s,n)
s=n
n=n+1
self.addEdge(s,t)
return vertices
class BAModel(TopologyModel):
def __init__(self,G):
TopologyModel.__init__(self,G)
self._carry = 0
self._d = 2
def setDegree(self,d):
self._d = d
def generateVertex(self):
n = self._G.getNumberOfVertices()
self.addVertex(n)
self._carry += self._d
self.connect(n)
return n
def connect(self,source):
m=self._G.getNumberOfEdges()
edges = list(self._G.getEdges())
while(self._carry>=1):
r2= random.randint(0,1)
e = random.choice(edges)
if r2==0:
target = e.getV0().getNumber()
else:
target = e.getV1().getNumber()
if(source != target and not(self._G.isEdge(source,target))):
self.addEdge(source,target)
self._carry-=1
class LayeredModel(TopologyModel):
def __init__(self,G):
TopologyModel.__init__(self,G)
def addLayerWithUniformConnectivity(self,prevLayer,layerSize,degreeDist):
"""
(LayeredModel,sequence,int,int())->list of vertices
adds layerSize vertices to the graph
each new vertex will be randomly connected to vertices from previosLayer
"""
vertices = []
n = self._G.getNumberOfVertices()
for i in range(n,n+layerSize):
self.addVertex(i)
self.connectUniform(i,degreeDist(),copy.copy(prevLayer))
vertices.append(i)
return vertices
def addChildrenLayer(self,prevLayer,minChildren,maxChildren):
vertices = []
n = self._G.getNumberOfVertices()
for i in prevLayer:
for j in range(random.randint(minChildren,maxChildren)):
self.addVertex(n)
self.addEdge(i,n)
vertices.append(n)
n+=1
return vertices
def duplicateLayerWithSourceLinkOnly(self,prevLayer):
vertices = []
n = self._G.getNumberOfVertices()
for i in range(len(prevLayer)):
v=n+i
self.addVertex(v)
self.addEdge(v,prevLayer[i])
vertices.append(v)
return vertices
def duplicateLayerWithHighInterconnection(self,prevLayer):
vertices = []
n = self._G.getNumberOfVertices()
v2 = n
#duplicate vertices in prevLayer
for v1 in prevLayer:
self.addVertex(v2)
#duplicate edges of v2
neighbours = self._G.getVertex(v1).getSuccessors()
neighbours = map(lambda x:x.getNumber(),neighbours)
#save current edge info generator to copy info of duplicated edges
tmpeigen = self._edgeInfoGen
for u in neighbours:
self._edgeInfoGen = lambda : copy.deepcopy(self._G.getEdge(v1,u).getWeight())
self.addEdge(v2,u)
self._edgeInfoGen = tmpeigen
#edge from vertex to its' clone will use default info generator
if not self._G.isEdge(v2,v1):
self.addEdge(v2,v1)
vertices.append(v2)
v2=v2+1
return vertices
def connectUniform(self,source,d,targets):
"""
(LayeredModel,int,int,sequence)->[(source,v1),(source,v2),...]
Uniformly connects source with targets bby d links
"""
edges=[]
i=0
while(i<d):
t = random.choice(targets)
targets.remove(t)
if(not self._G.isEdge(source,t)):
self.addEdge(source,t)
edges.append((source,t))
i+=1
return edges
def createRandomBAGraph(n,d,graphType=GraphAsMatrix):
G = graphType()
ba = BAModel(G)
ba.generateClique(int(d+1))
ba.setDegree(d)
for i in range(n-int(d+1)): #add n vertices
ba.generateVertex()
return G
def createRandomGraph(n,d,graphType=GraphAsMatrix):
G = graphType()
tm = TopologyModel(G)
for i in range(n):
tm.addVertex(i)
vertices = map(int,G.getVertices())
tm.generateRandomLinks(vertices,vertices,int(n*d))
return G
def createATwoRouteGraph(l,k1,k2,graphType=GraphAsMatrix):
"""
(l,k1,k2)->a graph with 2+l*k1+l*k2 vertices
Generates a graph in which s' connected to t' with
k1 vertex disjoint paths with low redundancy and
k2 vertex disjoint paths with high redundancy
"""
G = graphType()
tm = LayeredModel(G)
s=G.getNumberOfVertices()
t=s+1
tm.addVertex(s)
tm.addVertex(t)
for i in range(k1):
tm.generatePath(s,t,l)
if k2>0:
path=tm.generatePath(s,t,l)
for i in range(k2-1):
tm.duplicateLayerWithHighInterconnection(path)
return G
def createRangedSmallWorld(n, k, p, l, graphType=GraphAsMatrix):
G = graphType()
if False: assert isinstance(G,GraphAsMatrix)
tm = TopologyModel(G)
nlist = range(n)
for u in nlist:
tm.addVertex(u)
for u in nlist:
for v in range(k):
tm.addEdge(u,(u+v)%n)
for u in nlist:
if random.random() < p:
w = random.choice(nlist)
while w == u or G.isEdge(u, w):
w = random.choice(nlist)
tm.generatePath(u,w,l)
return G
|
"""
For reconfig V1
"""
import pandas
from collections import namedtuple
from typing import Tuple
from db_credentials import db_credentials
import mysql.connector
import datetime
import time
import paho.mqtt.publish as publish
from nr_funcs import float2arr
import json
RACK_ID = 1
UPDATE_RATE_S = 5
# relevant tag names
TAG_CUT_IN = "recipe_volume_cutin"
TAG_CUT_OUT = "recipe_volume_cutout"
TAG_CF = "recipe_EC"
Time_Unit = namedtuple("Time_Unit", ["keyword", "column_name", "second_mult"])
SECOND = Time_Unit("second", "Seconds Elapsed", 1)
MINUTE = Time_Unit("minute", "Minutes Elapsed", 60)
HOUR = Time_Unit("hour", "Hours Elapsed", 3600)
DAY = Time_Unit("day", "Days Elapsed", 86_400)
TIME_UNITS = [SECOND, MINUTE, HOUR, DAY]
class DatabaseError(Exception):
pass
class ColumnError(Exception):
pass
def get_recipe(file_path: str) -> pandas.DataFrame:
return pandas.read_csv(f"~/local-server-dilution-reconfig/recipes/{file_path}.csv")
def determine_time_unit(df: pandas.DataFrame) -> Time_Unit:
for time_unit in TIME_UNITS:
if time_unit.column_name in df:
return time_unit
raise ColumnError("unable to find recipe time column")
def get_rack_info(rack_id: int, db) -> Tuple[datetime.datetime, int, str]:
cursor = db.cursor()
cursor.execute(
f"""
SELECT ra.start_timestamp, p.node_red_id, re.recipe_csv_file_path
FROM rack ra
JOIN recipe re ON ra.recipe_id = re.recipe_id
JOIN plc p ON ra.plc_id = p.plc_id
WHERE ra.rack_id = {rack_id}
LIMIT 1;
"""
)
fetch = cursor.fetchall()
db.commit()
if not fetch:
raise DatabaseError(f"no rack with id {rack_id}")
if None in fetch[0]:
raise DatabaseError(f"missing information related to rack {fetch[0]}")
return fetch[0]
def find_soonest_row(recipe: pandas.DataFrame, t_delta: datetime.timedelta, time_unit: Time_Unit) -> int:
for row in range(recipe.shape[0]):
recipe_time = float(recipe[time_unit.column_name][row]) * time_unit.second_mult
if recipe_time > t_delta.seconds + t_delta.days * DAY.second_mult:
return row
return None
def calculate_c2(cf, vf, c1, v1, v2):
if v2 <= 0.0:
return 0.0
return ( (cf * vf) - (c1 * v1) ) / v2
def main():
db = mysql.connector.connect(**db_credentials)
while True:
print("getting rack info...")
try:
start_timestamp, plc_id, csv_file_path = get_rack_info(rack_id=RACK_ID, db=db)
except DatabaseError as e:
print("database error")
print(e)
print(f"trying again in {UPDATE_RATE_S} seconds")
time.sleep(UPDATE_RATE_S)
continue
print(start_timestamp)
print("reading recipe...")
try:
recipe: pandas.DataFrame = get_recipe(csv_file_path)
except FileNotFoundError as e:
print("csv not found where expected")
print(e)
print(f"trying again in {UPDATE_RATE_S} seconds")
time.sleep(UPDATE_RATE_S)
continue
print("finding time units...")
try:
time_unit: Time_Unit = determine_time_unit(recipe)
except ColumnError as e:
print(e)
print(f"trying again in {UPDATE_RATE_S} seconds")
time.sleep(UPDATE_RATE_S)
continue
print("looping through recipe")
while True:
time_since = datetime.datetime.now() - start_timestamp
soonest_row = find_soonest_row(recipe, time_since, time_unit)
if soonest_row is None:
print("reached end of recipe")
time.sleep(UPDATE_RATE_S)
break
print(f'at {recipe[time_unit.column_name][soonest_row]} {time_unit.keyword}s elapsed')
print(recipe.iloc[soonest_row])
messages = [
(f"nodered/plc/write/{plc_id}/{TAG_CUT_IN}", json.dumps(float2arr(float(recipe["Volume Cut-in (L)"][soonest_row]))), 0, True),
(f"nodered/plc/write/{plc_id}/{TAG_CUT_OUT}", json.dumps(float2arr(float(recipe["Volume Cut-out (L)"][soonest_row]))), 0, True),
(f"nodered/plc/write/{plc_id}/{TAG_CF}", json.dumps(float2arr(float(recipe["EC"][soonest_row]))), 0, True)
]
print("publishing vals...")
publish.multiple(messages)
print(f"done. waiting {UPDATE_RATE_S} seconds\n")
time.sleep(UPDATE_RATE_S)
if __name__ == "__main__":
main()
|
from classical_distinguisher import *
import time
# define cipher
CIPHER = "TEA"
# CIPHER = "XTEA"
# CIPHER = "RAIDEN"
if (CIPHER == "XTEA"):
# TODO
print(CIPHER)
if (CIPHER == "TEA"):
alpha = [0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x0000000F, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0xFFFFFFF1, 0x00000001, 0x00000000, 0x00000001, 0xFFFFFFF1, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000011, 0xFFFFFFFF, 0x00000000]
beta = [0x0000000F, 0x00000000, 0x0000000F, 0x00000000, 0xFFFFFFF1, 0x00000000, 0xFFFFFFF1, 0x00000002, 0x0000000F, 0x00000000, 0xFFFFFFF1, 0xFFFFFFFE, 0x0000000F, 0x00000000, 0x00000011, 0x00000000, 0xFFFFFFEF, 0x00000000]
# -log_2(p)
expected_probability = [3.62, 0.0, 2.87, 7.90, 3.60, 0.0, 2.78, 8.66, 3.57, 0.0, 2.87, 7.90, 3.59, 0.0, 2.79, 8.83, 3.61, 0.0]
cipher = {
"name":"TEA",
"cipher_encrypt_block":tea_encrypt_block,
"number_of_attacked_rounds":4,
"cipher_add64": add64,
"cipher_sub64": sub64,
"round_differences": compute_round_differences(alpha, beta),
"accumulated_expected_probability":[sum(expected_probability[0:i]) for i in range(0,len(expected_probability))],
"key": [0x11CAD84E, 0x96168E6B, 0x704A8B1C, 0x57BBE5D3]
}
if (CIPHER == "RAIDEN"):
# cipher_encrypt_block = raiden_encrypt_block
# number_of_attacked_rounds = 7
# cipher_add64 = add64
# cipher_sub64 = sub64
trail_length = 32+1
alpha = [0 for i in range(trail_length)]
beta = [0 for i in range(trail_length)]
expected_probability = [0.0 for i in range(trail_length)]
for i in range(0, trail_length, 3):
alpha[i ] = 0x00000000
alpha[i+1] = 0x7FFFFF00
alpha[i+2] = 0x7FFFFF00
beta[i ] = 0x00000000
beta[i+1] = 0x7FFFFF00
beta[i+2] = 0x80000100
# -log_2(p)
expected_probability[i ] = 0.0
expected_probability[i+1] = 2.0
expected_probability[i+2] = 2.0
# round_differences = compute_round_differences(alpha, beta)
trail_len = len(expected_probability)
key = random_key()
cipher = {
"name":"RAIDEN",
"cipher_encrypt_block":raiden_encrypt_block,
"number_of_attacked_rounds":6,
"cipher_add64": add64,
"cipher_sub64": sub64,
"round_differences": compute_round_differences(alpha, beta),
"accumulated_expected_probability":[sum(expected_probability[0:i]) for i in range(0,trail_len)],
"key": random_key()
}
# define number of sample
max_num_samples = 100000000 # increase/decrease if the testing device can manage more/less
num_samples = int(math.ceil(2**cipher["accumulated_expected_probability"][cipher["number_of_attacked_rounds"]]))
num_samples = min(num_samples, max_num_samples)
print("\nCipher = " + CIPHER + "\n")
print("key = {}".format(cipher["key"]))
print("number of attacked rounds = {}".format(cipher["number_of_attacked_rounds"]))
exp = cipher["accumulated_expected_probability"][cipher["number_of_attacked_rounds"]]
print("distinguisher success probability = 1/2^{} = {}".format(exp, 1 / float(2 ** exp)))
# print("accumulated expected probability = {}".format(accumulated_expected_probability))
print("input difference = {}".format(cipher["round_differences"][0]))
print("round difference = {}".format(cipher["round_differences"][cipher["number_of_attacked_rounds"]]))
exp = cipher["accumulated_expected_probability"][cipher["number_of_attacked_rounds"]]
print("number of samples = 2^{} = {}".format(exp, num_samples))
number_of_experiments = 10
# time estimation
start = time.time()
m1 = random_message()
m2 = cipher["cipher_add64"](m1,cipher["round_differences"][0])
c1 = cipher["cipher_encrypt_block"](m1, cipher["key"], nrounds=cipher["number_of_attacked_rounds"])
c2 = cipher["cipher_encrypt_block"](m2, cipher["key"], nrounds=cipher["number_of_attacked_rounds"])
end = time.time()
print("\nEstimated time to generate {} cipher output pairs: {:05.2f} [seconds]".format(num_samples, num_samples*(end-start)))
print("\nEstimated time to run {} experiments: {:05.2f} [seconds]".format(number_of_experiments, number_of_experiments*num_samples*(end-start)))
# test differential distinguisher
start = time.time()
results = test_differential_distinguisher(number_of_experiments, cipher, num_samples, verb=False)
end = time.time()
print("\nSUMMARY:")
print("--------\n")
print("ACCURACY when distinguishing real cipher: {}/{}".format(results[0][0], results[0][1]))
print("ACCURACY when distinguishing random permutation: {}/{}".format(results[1][0], results[1][1]))
print("TOTAL ACCURACY: {}/{}".format(results[0][0] + results[1][0], number_of_experiments))
print("\nTime: {:05.2f} [sec]\n".format(end-start))
|
import pytest
from yamlpath.merger.enums.outputdoctypes import (
OutputDocTypes)
class Test_merger_enums_outputdoctypes():
"""Tests for the OutputDocTypes enumeration."""
def test_get_names(self):
assert OutputDocTypes.get_names() == [
"AUTO",
"JSON",
"YAML",
]
def test_get_choices(self):
assert OutputDocTypes.get_choices() == [
"auto",
"json",
"yaml",
]
@pytest.mark.parametrize("input,output", [
("AUTO", OutputDocTypes.AUTO),
("JSON", OutputDocTypes.JSON),
("YAML", OutputDocTypes.YAML),
])
def test_from_str(self, input, output):
assert output == OutputDocTypes.from_str(input)
def test_from_str_nameerror(self):
with pytest.raises(NameError):
OutputDocTypes.from_str("NO SUCH NAME")
|
import os
import sys
# Disable Tensorflow warning/info logs.
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
# Disable Tensorflow deprecation warnings
try:
from tensorflow.python.util import module_wrapper as deprecation
except ImportError:
from tensorflow.python.util import deprecation_wrapper as deprecation
deprecation._PER_MODULE_WARNING_LIMIT = 0
import h5py
import numpy as np
from six.moves import xrange
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from google.protobuf import text_format
import data_utils
FLAGS = tf.flags.FLAGS
# General flags.
tf.flags.DEFINE_string('pbtxt', '',
'GraphDef proto text file used to construct model '
'structure.')
tf.flags.DEFINE_string('ckpt', '',
'Checkpoint directory used to fill model values.')
tf.flags.DEFINE_string('vocab_file', '', 'Vocabulary file.')
tf.flags.DEFINE_string('output_file', '',
'File to dump results.')
tf.flags.DEFINE_string('input_file', '',
'file of sentences to be evaluated')
tf.flags.DEFINE_string("mode", '', "One 'of 'surprisal', 'predictions'")
# For saving demo resources, use batch size 1 and step 1.
BATCH_SIZE = 1
NUM_TIMESTEPS = 1
MAX_WORD_LEN = 50
def _LoadModel(gd_file, ckpt_file):
"""Load the model from GraphDef and Checkpoint.
Args:
gd_file: GraphDef proto text file.
ckpt_file: TensorFlow Checkpoint file.
Returns:
TensorFlow session and tensors dict.
"""
with tf.Graph().as_default():
with tf.gfile.GFile(gd_file, 'r') as f:
s = f.read()
gd = tf.GraphDef()
text_format.Merge(s, gd)
tf.logging.info('Recovering Graph %s', gd_file)
t = {}
[t['states_init'], t['lstm/lstm_0/control_dependency'],
t['lstm/lstm_1/control_dependency'], t['softmax_out'], t['class_ids_out'],
t['class_weights_out'], t['log_perplexity_out'], t['inputs_in'],
t['targets_in'], t['target_weights_in'], t['char_inputs_in'],
t['all_embs'], t['softmax_weights'], t['global_step']
] = tf.import_graph_def(gd, {}, ['states_init',
'lstm/lstm_0/control_dependency:0',
'lstm/lstm_1/control_dependency:0',
'softmax_out:0',
'class_ids_out:0',
'class_weights_out:0',
'log_perplexity_out:0',
'inputs_in:0',
'targets_in:0',
'target_weights_in:0',
'char_inputs_in:0',
'all_embs_out:0',
'Reshape_3:0',
'global_step:0'], name='')
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run('save/restore_all', {'save/Const:0': ckpt_file})
sess.run(t['states_init'])
return sess, t
def get_predictions(sentences, model, sess, vocab):
"""
Args:
sentences: List of pre-tokenized lists of tokens
model:
sess:
vocab: CharsVocabulary instance
Yields lists of numpy arrays, one per sentence.
"""
inputs = np.zeros([BATCH_SIZE, NUM_TIMESTEPS], np.int32)
char_ids_inputs = np.zeros([BATCH_SIZE, NUM_TIMESTEPS, vocab.max_word_length], np.int32)
# Dummy inputs needed for the graph to compute
targets = np.zeros([BATCH_SIZE, NUM_TIMESTEPS], np.int32)
target_weights = np.ones([BATCH_SIZE, NUM_TIMESTEPS], np.float32)
for i, sentence in enumerate(sentences):
sess.run(model["states_init"])
# Compute token- and character-level vocabulary ID sequences
sentence_ids = [vocab.word_to_id(w) for w in sentence]
sentence_char_ids = [vocab.word_to_char_ids(w) for w in sentence]
prev_word_id, prev_word_char_ids = None, None
sentence_predictions = []
for j, (word, word_id, char_ids) in enumerate(zip(sentence, sentence_ids, sentence_char_ids)):
if j == 0:
sentence_predictions.append(None)
else:
inputs[0, 0] = prev_word_id
char_ids_inputs[0, 0, :] = prev_word_char_ids
softmax = sess.run(model["softmax_out"],
feed_dict={model["inputs_in"]: inputs,
model["char_inputs_in"]: char_ids_inputs,
model["targets_in"]: targets,
model["target_weights_in"]: target_weights})[0]
# TODO JRNN softmax distribution size is greater than the vocabulary.
# Why is that .. ?
# In any case, let's just truncate and renorm to the actual vocab
softmax = softmax[:vocab.size]
softmax /= softmax.sum()
softmax = np.log(softmax)
sentence_predictions.append(softmax)
prev_word_id = word_id
prev_word_char_ids = char_ids
yield sentence_predictions
def get_surprisals(sentences, model, sess, vocab):
predictions = get_predictions(sentences, model, sess, vocab)
for i, (sentence, sentence_preds) in enumerate(zip(sentences, predictions)):
sentence_surprisals = []
for j, (word_j, preds_j) in enumerate(zip(sentence, sentence_preds)):
if preds_j is None:
word_surprisal = 0.
else:
word_surprisal = -(preds_j[vocab.word_to_id(word_j)] / np.log(2))
sentence_surprisals.append((word_j, word_surprisal))
yield sentence_surprisals
def main(unused_argv):
vocab = data_utils.CharsVocabulary(FLAGS.vocab_file, MAX_WORD_LEN)
sess, model = _LoadModel(FLAGS.pbtxt, FLAGS.ckpt)
with open(FLAGS.input_file) as inf:
sentences = [line.strip().split(" ") for line in inf]
if FLAGS.mode == "surprisal":
outf = sys.stdout if FLAGS.output_file == "-" else open(output_file, "w")
# Print TSV header
outf.write("sentence_id\ttoken_id\ttoken\tsurprisal\n")
surprisals = get_surprisals(sentences, model, sess, vocab)
for i, (sentence, sentence_surps) in enumerate(zip(sentences, surprisals)):
for j, (word, word_surp) in enumerate(sentence_surps):
outf.write("%i\t%i\t%s\t%f\n" % (i + 1, j + 1, word, word_surp))
outf.close()
elif FLAGS.mode == "predictions":
outf = h5py.File(FLAGS.output_file, "w")
predictions = get_predictions(sentences, model, sess, vocab)
for i, (sentence, sentence_preds) in enumerate(zip(sentences, predictions)):
token_ids = [vocab.word_to_id(word) for word in sentence]
# Skip the first word, which has null predictions
sentence_preds = sentence_preds[1:]
first_word_pred = np.ones_like(sentence_preds[0])
first_word_pred /= first_word_pred.sum()
first_word_pred = np.log(first_word_pred)
sentence_preds = np.vstack([first_word_pred] + sentence_preds)
group = outf.create_group("/sentence/%i" % i)
group.create_dataset("predictions", data=sentence_preds)
group.create_dataset("tokens", data=token_ids)
vocab_encoded = np.array(vocab._id_to_word)
vocab_encoded = np.char.encode(vocab_encoded, "utf-8")
outf.create_dataset("/vocabulary", data=vocab_encoded)
outf.close()
else:
raise ValueError("Unknown --mode %s" % FLAGS.mode)
if __name__ == '__main__':
tf.app.run()
|
from flask import Flask
from flask_restx import Api
class Server():
def __init__(self):
self.app = Flask(__name__)
self.api = Api(self.app,
version='0.0.1',
title='flask api'
)
def run(self):
self.app.run(
debug=True
)
server = Server() |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'qtView.ui'
#
# Created: Tue Mar 10 18:45:33 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_facewindow(object):
def setupUi(self, facewindow):
facewindow.setObjectName(_fromUtf8("facewindow"))
facewindow.resize(1187, 673)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("../../../../git/SiMPlE/smfsmanager/D_mica.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
facewindow.setWindowIcon(icon)
self.centralwidget = QtGui.QWidget(facewindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.grafo = PlotWidget(self.centralwidget)
self.grafo.setGeometry(QtCore.QRect(0, 0, 751, 551))
self.grafo.setToolTip(_fromUtf8(""))
self.grafo.setStatusTip(_fromUtf8(""))
self.grafo.setWhatsThis(_fromUtf8(""))
self.grafo.setObjectName(_fromUtf8("grafo"))
self.slide1 = QtGui.QScrollBar(self.centralwidget)
self.slide1.setEnabled(True)
self.slide1.setGeometry(QtCore.QRect(750, 0, 16, 551))
self.slide1.setMinimum(1)
self.slide1.setMaximum(1)
self.slide1.setTracking(False)
self.slide1.setOrientation(QtCore.Qt.Vertical)
self.slide1.setObjectName(_fromUtf8("slide1"))
self.slide2 = QtGui.QDial(self.centralwidget)
self.slide2.setEnabled(True)
self.slide2.setGeometry(QtCore.QRect(790, 555, 71, 71))
self.slide2.setMinimum(1)
self.slide2.setMaximum(1)
self.slide2.setTracking(True)
self.slide2.setObjectName(_fromUtf8("slide2"))
self.griglia = QtGui.QGraphicsView(self.centralwidget)
self.griglia.setEnabled(True)
self.griglia.setGeometry(QtCore.QRect(770, 10, 121, 511))
self.griglia.setObjectName(_fromUtf8("griglia"))
self.labFilename = QtGui.QLabel(self.centralwidget)
self.labFilename.setGeometry(QtCore.QRect(480, 10, 261, 16))
self.labFilename.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.labFilename.setObjectName(_fromUtf8("labFilename"))
self.slide3 = QtGui.QSpinBox(self.centralwidget)
self.slide3.setGeometry(QtCore.QRect(770, 524, 121, 27))
self.slide3.setMinimum(1)
self.slide3.setMaximum(1)
self.slide3.setObjectName(_fromUtf8("slide3"))
self.bAddFiles = QtGui.QPushButton(self.centralwidget)
self.bAddFiles.setGeometry(QtCore.QRect(650, 560, 89, 27))
self.bAddFiles.setObjectName(_fromUtf8("bAddFiles"))
self.bAddDir = QtGui.QPushButton(self.centralwidget)
self.bAddDir.setGeometry(QtCore.QRect(650, 590, 89, 27))
self.bAddDir.setObjectName(_fromUtf8("bAddDir"))
self.groupBox = QtGui.QGroupBox(self.centralwidget)
self.groupBox.setGeometry(QtCore.QRect(10, 560, 171, 91))
self.groupBox.setStyleSheet(_fromUtf8(""))
self.groupBox.setFlat(False)
self.groupBox.setCheckable(False)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.derorder = QtGui.QSpinBox(self.groupBox)
self.derorder.setGeometry(QtCore.QRect(120, 50, 48, 27))
self.derorder.setMaximum(10)
self.derorder.setProperty("value", 1)
self.derorder.setObjectName(_fromUtf8("derorder"))
self.segment = QtGui.QCheckBox(self.groupBox)
self.segment.setGeometry(QtCore.QRect(120, 20, 41, 22))
self.segment.setObjectName(_fromUtf8("segment"))
self.layoutWidget = QtGui.QWidget(self.groupBox)
self.layoutWidget.setGeometry(QtCore.QRect(10, 20, 102, 52))
self.layoutWidget.setObjectName(_fromUtf8("layoutWidget"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.layoutWidget)
self.verticalLayout_2.setMargin(0)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.radio_view = QtGui.QRadioButton(self.layoutWidget)
self.radio_view.setStyleSheet(_fromUtf8("border: none;"))
self.radio_view.setChecked(True)
self.radio_view.setObjectName(_fromUtf8("radio_view"))
self.buttonGroup = QtGui.QButtonGroup(facewindow)
self.buttonGroup.setObjectName(_fromUtf8("buttonGroup"))
self.buttonGroup.addButton(self.radio_view)
self.verticalLayout_2.addWidget(self.radio_view)
self.radio_deriv = QtGui.QRadioButton(self.layoutWidget)
self.radio_deriv.setStyleSheet(_fromUtf8("border: none;"))
self.radio_deriv.setObjectName(_fromUtf8("radio_deriv"))
self.buttonGroup.addButton(self.radio_deriv)
self.verticalLayout_2.addWidget(self.radio_deriv)
self.groupBox_2 = QtGui.QGroupBox(self.centralwidget)
self.groupBox_2.setGeometry(QtCore.QRect(190, 560, 451, 91))
self.groupBox_2.setStyleSheet(_fromUtf8("border: none;"))
self.groupBox_2.setTitle(_fromUtf8(""))
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.layoutWidget1 = QtGui.QWidget(self.groupBox_2)
self.layoutWidget1.setGeometry(QtCore.QRect(10, 10, 431, 71))
self.layoutWidget1.setObjectName(_fromUtf8("layoutWidget1"))
self.gridLayout = QtGui.QGridLayout(self.layoutWidget1)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.sg_mm = QtGui.QSpinBox(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(8)
self.sg_mm.setFont(font)
self.sg_mm.setStyleSheet(_fromUtf8("border: none;"))
self.sg_mm.setMinimum(-90)
self.sg_mm.setMaximum(90)
self.sg_mm.setProperty("value", 45)
self.sg_mm.setObjectName(_fromUtf8("sg_mm"))
self.gridLayout.addWidget(self.sg_mm, 1, 3, 1, 1)
self.label_4 = QtGui.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(8)
self.label_4.setFont(font)
self.label_4.setStyleSheet(_fromUtf8("border: none;"))
self.label_4.setTextFormat(QtCore.Qt.AutoText)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 1, 2, 1, 1)
self.s_vth = QtGui.QDoubleSpinBox(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(8)
self.s_vth.setFont(font)
self.s_vth.setStyleSheet(_fromUtf8("border: none;"))
self.s_vth.setMaximum(9999.0)
self.s_vth.setSingleStep(0.1)
self.s_vth.setProperty("value", 10.0)
self.s_vth.setObjectName(_fromUtf8("s_vth"))
self.gridLayout.addWidget(self.s_vth, 1, 1, 1, 1)
self.label = QtGui.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(8)
self.label.setFont(font)
self.label.setStyleSheet(_fromUtf8("border: none;"))
self.label.setTextFormat(QtCore.Qt.AutoText)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.label_2 = QtGui.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(8)
self.label_2.setFont(font)
self.label_2.setStyleSheet(_fromUtf8("border: none;"))
self.label_2.setTextFormat(QtCore.Qt.AutoText)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.label_3 = QtGui.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(8)
self.label_3.setFont(font)
self.label_3.setStyleSheet(_fromUtf8("border: none;"))
self.label_3.setTextFormat(QtCore.Qt.AutoText)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 0, 2, 1, 1)
self.s_mth = QtGui.QDoubleSpinBox(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(8)
self.s_mth.setFont(font)
self.s_mth.setStyleSheet(_fromUtf8("border: none;"))
self.s_mth.setDecimals(3)
self.s_mth.setMaximum(1000.0)
self.s_mth.setSingleStep(0.1)
self.s_mth.setProperty("value", 1.5)
self.s_mth.setObjectName(_fromUtf8("s_mth"))
self.gridLayout.addWidget(self.s_mth, 0, 1, 1, 1)
self.label_5 = QtGui.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(8)
font.setKerning(True)
self.label_5.setFont(font)
self.label_5.setStyleSheet(_fromUtf8("border: none;"))
self.label_5.setFrameShape(QtGui.QFrame.NoFrame)
self.label_5.setLineWidth(1)
self.label_5.setTextFormat(QtCore.Qt.AutoText)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout.addWidget(self.label_5, 0, 4, 1, 1)
self.plath = QtGui.QDoubleSpinBox(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(8)
self.plath.setFont(font)
self.plath.setMinimum(-100000.0)
self.plath.setMaximum(100000.0)
self.plath.setProperty("value", 3000.0)
self.plath.setObjectName(_fromUtf8("plath"))
self.gridLayout.addWidget(self.plath, 0, 5, 1, 1)
self.label_6 = QtGui.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(8)
self.label_6.setFont(font)
self.label_6.setStyleSheet(_fromUtf8("border: none;"))
self.label_6.setLineWidth(1)
self.label_6.setTextFormat(QtCore.Qt.AutoText)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout.addWidget(self.label_6, 1, 4, 1, 1)
self.lasth = QtGui.QDoubleSpinBox(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(8)
self.lasth.setFont(font)
self.lasth.setMaximum(1000.0)
self.lasth.setProperty("value", 5.0)
self.lasth.setObjectName(_fromUtf8("lasth"))
self.gridLayout.addWidget(self.lasth, 1, 5, 1, 1)
self.sg_fw = QtGui.QDoubleSpinBox(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(8)
self.sg_fw.setFont(font)
self.sg_fw.setStyleSheet(_fromUtf8("border: none;"))
self.sg_fw.setMinimum(0.01)
self.sg_fw.setMaximum(99.99)
self.sg_fw.setSingleStep(0.1)
self.sg_fw.setProperty("value", 0.5)
self.sg_fw.setObjectName(_fromUtf8("sg_fw"))
self.gridLayout.addWidget(self.sg_fw, 0, 3, 1, 1)
self.isto = PlotWidget(self.centralwidget)
self.isto.setGeometry(QtCore.QRect(900, 10, 281, 251))
self.isto.setToolTip(_fromUtf8(""))
self.isto.setStatusTip(_fromUtf8(""))
self.isto.setWhatsThis(_fromUtf8(""))
self.isto.setObjectName(_fromUtf8("isto"))
self.scatter = PlotWidget(self.centralwidget)
self.scatter.setGeometry(QtCore.QRect(900, 310, 281, 241))
self.scatter.setToolTip(_fromUtf8(""))
self.scatter.setStatusTip(_fromUtf8(""))
self.scatter.setWhatsThis(_fromUtf8(""))
self.scatter.setObjectName(_fromUtf8("scatter"))
self.cIsto = QtGui.QComboBox(self.centralwidget)
self.cIsto.setGeometry(QtCore.QRect(900, 270, 181, 27))
self.cIsto.setEditable(False)
self.cIsto.setObjectName(_fromUtf8("cIsto"))
self.cIsto.addItem(_fromUtf8(""))
self.cIsto.addItem(_fromUtf8(""))
self.cIsto.addItem(_fromUtf8(""))
self.bStat = QtGui.QPushButton(self.centralwidget)
self.bStat.setGeometry(QtCore.QRect(900, 590, 89, 27))
self.bStat.setObjectName(_fromUtf8("bStat"))
self.cScatter = QtGui.QComboBox(self.centralwidget)
self.cScatter.setGeometry(QtCore.QRect(900, 560, 281, 27))
self.cScatter.setEditable(False)
self.cScatter.setObjectName(_fromUtf8("cScatter"))
self.cScatter.addItem(_fromUtf8(""))
self.cScatter.addItem(_fromUtf8(""))
self.cScatter.addItem(_fromUtf8(""))
self.nBins = QtGui.QSpinBox(self.centralwidget)
self.nBins.setGeometry(QtCore.QRect(1107, 270, 71, 27))
self.nBins.setMinimum(1)
self.nBins.setMaximum(1000)
self.nBins.setProperty("value", 20)
self.nBins.setObjectName(_fromUtf8("nBins"))
self.bClear = QtGui.QPushButton(self.centralwidget)
self.bClear.setGeometry(QtCore.QRect(650, 620, 89, 27))
self.bClear.setObjectName(_fromUtf8("bClear"))
self.bFreeze = QtGui.QPushButton(self.centralwidget)
self.bFreeze.setGeometry(QtCore.QRect(1120, 10, 61, 27))
self.bFreeze.setObjectName(_fromUtf8("bFreeze"))
self.bSSave = QtGui.QPushButton(self.centralwidget)
self.bSSave.setGeometry(QtCore.QRect(990, 590, 89, 27))
self.bSSave.setObjectName(_fromUtf8("bSSave"))
self.labDetails = QtGui.QLabel(self.centralwidget)
self.labDetails.setGeometry(QtCore.QRect(0, 20, 741, 20))
self.labDetails.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.labDetails.setObjectName(_fromUtf8("labDetails"))
facewindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(facewindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1187, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
facewindow.setMenuBar(self.menubar)
self.retranslateUi(facewindow)
self.cIsto.setCurrentIndex(2)
QtCore.QMetaObject.connectSlotsByName(facewindow)
def retranslateUi(self, facewindow):
facewindow.setWindowTitle(_translate("facewindow", "MainWindow", None))
self.labFilename.setText(_translate("facewindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">FILENAME</span></p></body></html>", None))
self.bAddFiles.setText(_translate("facewindow", "Add Files", None))
self.bAddDir.setText(_translate("facewindow", "Add DIR", None))
self.groupBox.setTitle(_translate("facewindow", "View", None))
self.segment.setText(_translate("facewindow", "S", None))
self.radio_view.setText(_translate("facewindow", "Curve", None))
self.radio_deriv.setText(_translate("facewindow", "Derivative", None))
self.label_4.setText(_translate("facewindow", "Slope (°)", None))
self.label.setText(_translate("facewindow", "Thresh (std)", None))
self.label_2.setText(_translate("facewindow", "MinLen [nm]", None))
self.label_3.setText(_translate("facewindow", "Window", None))
self.label_5.setText(_translate("facewindow", "Zdist [nm]", None))
self.label_6.setText(_translate("facewindow", "LastTH [pN]", None))
self.cIsto.setItemText(0, _translate("facewindow", "Length", None))
self.cIsto.setItemText(1, _translate("facewindow", "Number", None))
self.cIsto.setItemText(2, _translate("facewindow", "Step", None))
self.bStat.setText(_translate("facewindow", "Do stats", None))
self.cScatter.setItemText(0, _translate("facewindow", "Length vs Position", None))
self.cScatter.setItemText(1, _translate("facewindow", "Step vs Position", None))
self.cScatter.setItemText(2, _translate("facewindow", "Step vs Length", None))
self.bClear.setText(_translate("facewindow", "Clear ALL", None))
self.bFreeze.setText(_translate("facewindow", "Freeze", None))
self.bSSave.setText(_translate("facewindow", "Save stats", None))
self.labDetails.setText(_translate("facewindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">xxx</p></body></html>", None))
from pyqtgraph import PlotWidget
|
#! /usr/env/bin python3
import boto3
def lambda_handler(event, context):
users = {}
iam = boto3.client('iam')
for userlist in iam.list_users()['Users']:
userGroups = iam.list_groups_for_user(UserName=userlist['UserName'])
groups = list()
for groupName in userGroups['Groups']:
groups.append(groupName['GroupName'])
users[userlist['UserName']] = groups
return {
'statusCode': 200,
'body': users
}
|
from .imgloader_CT_clss_3D import pytorch_loader_clss3D
from .imgloader_CT_clss_3D_calcium import pytorch_loader_clss3D_calcium
|
import unittest
import time
from common import gpu_test
class TestJAX(unittest.TestCase):
def tanh(self, x):
import jax.numpy as np
y = np.exp(-2.0 * x)
return (1.0 - y) / (1.0 + y)
@gpu_test
def test_JAX(self):
# importing inside the gpu-only test because these packages can't be
# imported on the CPU image since they are not present there.
from jax import grad, jit
grad_tanh = grad(self.tanh)
ag = grad_tanh(1.0)
self.assertEqual(0.4199743, ag)
|
from sfml import sf
# python 2.* compatability
try: input = raw_input
except NameError: pass
def main():
# check that the device can capture audio
if not sf.SoundRecorder.is_available():
print("Sorry, audio capture is not supported by your system")
return
# choose the sample rate
sample_rate = int(input("Please choose the sample rate for sound capture (44100 is CD quality): "))
# wait for user input...
input("Press enter to start recording audio")
# here we'll use an integrated custom recorder, which saves the captured data into a sf.SoundBuffer
recorder = sf.SoundBufferRecorder()
# audio capture is done in a separate thread, so we can block the main thread while it is capturing
recorder.start(sample_rate)
input("Recording... press enter to stop")
recorder.stop()
# get the buffer containing the captured data
buffer = recorder.buffer
# display captured sound informations
print("Sound information:")
print("{0} seconds".format(buffer.duration))
print("{0} samples / seconds".format(buffer.sample_rate))
print("{0} channels".format(buffer.channel_count))
# choose what to do with the recorded sound data
choice = input("What do you want to do with captured sound (p = play, s = save) ? ")
if choice == 's':
# choose the filename
filename = input("Choose the file to create: ")
# save the buffer
buffer.to_file(filename);
else:
# create a sound instance and play it
sound = sf.Sound(buffer)
sound.play();
# wait until finished
while sound.status == sf.Sound.PLAYING:
# leave some CPU time for other threads
sf.sleep(sf.milliseconds(100))
# finished !
print("Done !")
# wait until the user presses 'enter' key
input("Press enter to exit...")
if __name__ == "__main__":
main()
|
"""Tests for `declared_register` package."""
import pytest
from utils import formatters
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_fill_spaces_formatter(response):
assert formatters.fill_spaces("", {"length": 0}) == ""
assert formatters.fill_spaces("", {"length": 1}) == " "
assert formatters.fill_spaces("", {"length": 2}) == " "
assert formatters.fill_spaces("", {"length": 3}) == " "
def test_fill_spaces_formatter_overflow(response):
assert formatters.fill_spaces("aaaaaaa", {"length": 0}) == ""
assert formatters.fill_spaces("aaaaaab", {"length": 1}) == "b"
assert formatters.fill_spaces("aaaaabb", {"length": 2}) == "bb"
assert formatters.fill_spaces("aaaabbb", {"length": 3}) == "bbb"
def test_fill_spaces_formatter_int(response):
assert formatters.fill_spaces(1, {"length": 0}) == ""
assert formatters.fill_spaces(1, {"length": 1}) == "1"
assert formatters.fill_spaces(11,{"length": 2}) == "11"
assert formatters.fill_spaces(1, {"length": 3}) == " 1"
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created: Aug 2017
@author: D34D9001@9R0GR4M13
"""
import kerri
import os
import subprocess
import symbo
import sys
import random
import requests
import webbrowser
from bs4 import BeautifulSoup as bs
from termcolor import colored
######################
# FAKE ID GENERATION #
######################
class FakeID(object):
""" This class gets information from http://www.fakenamegenerator.com
to generate a completely usable fake id quickly """
global nameset
nameset = {"American":"us", "Arabic":"ar", "Brazil":"br", "Chechen":"celat",
"Chinese":"ch", "Chinese-Traditional":"zhtw", "Croation":"hr",
"Czech":"cs", "Danish":"dk", "Dutch":"nl", "England Wales":"en",
"Eritrean":"er", "Finnish":"fi", "French":"fr", "German":"gr",
"Greenland":"gl", "Hispanic":"sp", "Hobbit":"hobbit",
"Hungarian":"hu", "Icelandic":"is", "Igbo":"ig", "Italian":"it",
"Japanese":"jpja", "Japanese-Anglicized":"jp", "Ninja":"ninja",
"Norwegian":"no", "Persian":"fa", "Polish":"pl", "Russian":"ru",
"Russian-Cyrillic":"rucyr", "Scottish":"gd", "Slovenian":"sl",
"Swedish":"sw", "Thai":"th", "Vietnamese":"vn"}
global country
country = {"Australia":"au", "Austria":"as", "Belgium":"bg", "Brazil":"br",
"Canada":"ca", "Cyprus Anglicized":"cyen", "Cyprus Greek":"cygk",
"Czech Republic":"cz", "Denmark":"dk", "Estonia":"ee", "Finland":"fi",
"France":"fr", "Germany":"gr", "Greenland":"gl", "Hungarian":"hu",
"Iceland":"is", "Italy":"it", "Netherlands":"nl", "New Zealand":"nz",
"Norway":"no", "Poland":"pl", "Portugal":"pt", "Slovenia":"sl",
"South Africa":"za", "Spain":"sp", "Sweden":"sw", "Switzerland":"sz",
"Tunisia":"tn", "United Kingdom":"uk", "United States":"us",
"Uruguay":"uy"}
def __str__(self):
return """ Controls Kaos' FakeID Generation Operations """
def id_gen(self, nset=nameset["American"], cntry=country["United States"], sex="random", out=None, *args):
""" Generate a fake id """
if len(args) >= 1:
raise kerri.ExcessArguments("id_gen()", 4)
else:
def srem_temp():
""" Deletes temporary files created by id_gen() """
if str(os.path.isfile("/tmp/fakeid")) == "False":
pass
else:
subprocess.call(["srm", "-f", "/tmp/fakeid"])
if str(os.path.isfile("/tmp/fakemail")) == "False":
pass
else:
subprocess.call(["srm", "-f", "/tmp/fakemail"])
if str(os.path.isfile("/tmp/fakessn")) == "False":
pass
else:
subprocess.call(["srm", "-f", "/tmp/fakessn"])
if str(os.path.isfile("/tmp/numfile")) == "False":
pass
else:
subprocess.call(["srm", "-f", "/tmp/numfile"])
fake_init2 = open("/tmp/fakeid", 'w')
fake_init2.write("Started\n")
fake_init2.close()
if nset != nameset["American"]:
try:
name_set = nameset[nset]
nset = name_set
except KeyError:
sys.stderr.write("%s is not a valid nameset option...\nUsing default option instead...\n" % nset)
nset = nameset["American"]
fake_init2 = open("/tmp/fakeid", 'a')
fake_init2.write("Nameset Set\n")
fake_init2.close()
if cntry != country["United States"]:
try:
home_land = country[cntry]
cntry = home_land
except KeyError:
sys.stderr.write("%s is not a valid country option...\nUsing default option instead...\n" % cntry)
fake_init2 = open("/tmp/fakeid", 'a')
fake_init2.write("Country Set\n")
fake_init2.close()
id_sex = sex.lower()
sex = id_sex
if sex != "male" and sex != "female" and sex != "random":
sys.stderr.write("%s is not a valid sex option...\nUsing default option instead...\n" % sex)
sex = "random"
if out == None:
try:
data = []
headers = {"User-Agent": "my web scraping program. contact me at admin@domain.com"}
page = requests.get("https://www.fakenamegenerator.com/gen-%s-%s-%s.php" % (sex, nset, cntry), headers=headers)
soup = bs(page.content, 'html.parser')
fake_init2 = open("/tmp/fakeid", 'a')
fake_init2.write("Website Found\n")
fake_init2.close()
list = soup.find_all('div', class_='info')[0].get_text().rstrip()
fake_init2 = open("/tmp/fakeid", 'a')
fake_init2.write("List Created\n")
fake_init2.close()
try:
link = soup.find_all('div', class_='adtl')[1]
fake_init2 = open("/tmp/fakeid", 'a')
fake_init2.write("Links Found\n")
fake_init2.close()
except IndexError as error:
return "THERE WAS A PROBLEM! \n PLEASE SELECT DIFFERENT OPTIONS AND TRY AGAIN"
info = list.strip()
fake_init = open("/tmp/fakeid", 'w')
fake_init.write("\n")
fake_init.close()
fakefile = open("/tmp/fakeid", 'a')
fakefile.write("%s" % info)
fakefile.close()
try:
with open("/tmp/fakemail", 'w') as fakemail:
fakemail.write(str(link))
fakemail.close()
except Exception as error:
return error
finance = subprocess.check_output(["grep", "-A", "12", "-i", "finance", "/tmp/fakeid"])
birthday = subprocess.check_output(["grep", "-A", "9", "-i", "birthday", "/tmp/fakeid"])
name = subprocess.check_output(["sed", "-n", "2p", "/tmp/fakeid"])
address = subprocess.check_output(["sed", "-n", "4p", "/tmp/fakeid"])
adr = address.strip()
maiden = subprocess.check_output(["grep", "-A", "1", "-i", "mother", "/tmp/fakeid"])
phone = subprocess.check_output(["grep", "-A", "5", "-i", "phone", "/tmp/fakeid"])
online = subprocess.check_output(["grep", "-A", "19", "Online", "/tmp/fakeid"])
employment = subprocess.check_output(["grep", "-A", "7", "Employment", "/tmp/fakeid"])
charics = subprocess.check_output(["grep", "-A", "11", "Physical", "/tmp/fakeid"])
tracking = subprocess.check_output(["grep", "-A", "11", "Tracking", "/tmp/fakeid"])
color = subprocess.check_output(["grep", "-A", "2", "Favorite", "/tmp/fakeid"])
vehicle = subprocess.check_output(["grep", "-A", "2", "Vehicle", "/tmp/fakeid"])
guid = subprocess.check_output(["grep", "-A", "2", "GUID", "/tmp/fakeid"])
geo = subprocess.check_output(["grep", "-A", "1", "Geo", "/tmp/fakeid"])
# print "%s\n%s" % (symbo.SEP, symbo.SEP)
# print "Name: %s" % name
data.append("%s" % name.decode())
# print symbo.SEP
# print "SSN:"
os.system("grep -o -P '.{0,0}SSN.{0,11}' /tmp/fakeid | cut -f2- -dN | head -1 > /tmp/fakessn")
social = subprocess.check_output(["cat", "/tmp/fakessn"])
if len(social) > 3:
social = social[:7]
# os.system("echo $((1000 + RANDOM % 9999)) > /tmp/numfile")
# end = subprocess.check_output(["cat", "/tmp/numfile"])
end = random.randint(1000,9999)
ssn = social.decode() + str(end)
# print ssn
data.append("SSN: %s" % ssn)
else:
# print "N/A"
data.append("N/A")
# print symbo.SEP
# print "Address: %s" % adr
data.append("%s\n" % adr.decode())
# print symbo.SEP
# print geo
data.append("%s" % geo.decode())
# print symbo.SEP
# print phone
data.append("%s" % phone.decode())
# print symbo.SEP
# print birthday
data.append("%s" % birthday.decode())
# print symbo.SEP
# print charics
data.append("%s" % charics.decode())
# print symbo.SEP
# print color
data.append("%s" % color.decode())
# print symbo.SEP
# print "%s" % maiden
data.append("%s" % maiden.decode())
# print symbo.SEP
# print vehicle
data.append("%s" % vehicle.decode())
# print symbo.SEP
# print employment
data.append("%s" % employment.decode())
# print symbo.SEP
# print finance
data.append("%s" % finance.decode())
# print symbo.SEP
# print online
data.append("%s" % online.decode())
# print colored("[*] Here Is The Link To Your Email [*]", symbo.out_color, attrs=['bold'])
# try:
# os.system("""grep -Eoi '<a [^>]+>' /tmp/fakemail | grep -Eo 'href="[^\"]+"'""")
# except Exception:
# print colored("N/A: ", symbo.out_color, attrs=['bold'])
# print symbo.SEP
# print tracking
data.append("%s" % tracking.decode())
# print symbo.SEP
# print guid
data.append("%s" % guid.decode())
# print symbo.SEP
complete_data = ""
for item in data:
complete_data = complete_data + "%s \n" % item
# srem_temp()
return complete_data
except requests.ConnectionError:
raise kerri.INetFailure("id_gen()")
except Exception as error:
return error
else:
try:
page = requests.get("https://www.fakenamegenerator.com/gen-%s-%s-%s.php" % (sex, nset, cntry))
soup = bs(page.content, 'html.parser')
list = soup.find_all('div', class_='info')[0].get_text().rstrip()
try:
link = soup.find_all('div', class_='adtl')[1]
except IndexError as error:
raise error
info = list.strip()
with open("/tmp/fakeid", 'w') as fakefile:
fakefile.write("%s" % info)
fakefile.close()
try:
with open("/tmp/fakemail", 'w') as fakemail:
fakemail.write(str(link))
fakemail.close()
except Exception as error:
raise error
finance = subprocess.check_output(["grep", "-A", "12", "-i", "finance", "/tmp/fakeid"])
birthday = subprocess.check_output(["grep", "-A", "9", "-i", "birthday", "/tmp/fakeid"])
name = subprocess.check_output(["sed", "-n", "1p", "/tmp/fakeid"])
address = subprocess.check_output(["sed", "-n", "4p", "/tmp/fakeid"])
adr = address.strip()
maiden = subprocess.check_output(["grep", "-A", "1", "-i", "mother", "/tmp/fakeid"])
phone = subprocess.check_output(["grep", "-A", "5", "-i", "phone", "/tmp/fakeid"])
online = subprocess.check_output(["grep", "-A", "19", "Online", "/tmp/fakeid"])
employment = subprocess.check_output(["grep", "-A", "7", "Employment", "/tmp/fakeid"])
charics = subprocess.check_output(["grep", "-A", "11", "Physical", "/tmp/fakeid"])
tracking = subprocess.check_output(["grep", "-A", "11", "Tracking", "/tmp/fakeid"])
color = subprocess.check_output(["grep", "-A", "2", "Favorite", "/tmp/fakeid"])
vehicle = subprocess.check_output(["grep", "-A", "2", "Vehicle", "/tmp/fakeid"])
guid = subprocess.check_output(["grep", "-A", "2", "GUID", "/tmp/fakeid"])
geo = subprocess.check_output(["grep", "-A", "1", "Geo", "/tmp/fakeid"])
with open("%s/%s" % (out, name.rstrip()), 'w') as kfile:
kfile.write("%s\n%s\n" % (symbo.SEP, symbo.SEP))
kfile.write("Name: %s\n" % name)
kfile.write("%s\n" % symbo.SEP)
kfile.write("SSN:\n")
os.system("grep -o -P '.{0,0}SSN.{0,11}' /tmp/fakeid | cut -f2- -dN | head -1 > /tmp/fakessn")
social = subprocess.check_output(["cat", "/tmp/fakessn"])
if len(social) > 3:
social = social[:7]
# os.system("echo $((1000 + RANDOM % 9999)) > /tmp/numfile")
last4 = ''.join(random.sample('0123456789', 4))
with open("/tmp/numfile", 'w') as outfile:
outfile.write(int(last4))
outfile.close()
end = subprocess.check_output(["cat", "/tmp/numfile"])
ssn = str(social) + str(end)
kfile.write("%s\n" % ssn)
else:
kfile.write("N/A\n")
kfile.write("%s\n" % symbo.SEP)
kfile.write("Address: %s\n" % adr)
kfile.write("%s\n" % symbo.SEP)
kfile.write("%s\n" % geo)
kfile.write("%s\n" % symbo.SEP)
kfile.write("%s\n" % phone)
kfile.write("%s\n" % symbo.SEP)
kfile.write("%s\n" % birthday)
kfile.write("%s\n" % symbo.SEP)
kfile.write("%s\n" % charics)
kfile.write("%s\n" % symbo.SEP)
kfile.write("%s\n" % color)
kfile.write("%s\n" % symbo.SEP)
kfile.write("%s\n" % maiden)
kfile.write("%s\n" % symbo.SEP)
kfile.write("%s\n" % vehicle)
kfile.write("%s\n" % symbo.SEP)
kfile.write("%s\n" % employment)
kfile.write("%s\n" % symbo.SEP)
kfile.write("%s\n" % finance)
kfile.write("%s\n" % symbo.SEP)
kfile.write("%s\n" % online)
kfile.write("[*] Here Is The Link To Your Email [*]\n")
try:
# os.system("""grep -Eoi '<a [^>]+>' /tmp/fakemail | grep -Eo 'href="[^\"]+"'""") # This will only print the email to stdout. It will not write it to the file
new_name = name.lower().replace(" ", "").replace(".", "")
providers = ('dayrep.com', 'armyspy.com', 'cuvox.de', 'einrot.com', 'fleckens.hu', 'gustr.com', 'jourrapide.com', 'rhyta.com', 'supperito.com', 'teleworm.us')
email_address = "%s/%s" % (str(random.choice(providers)), new_name)
kfile.write("https://fakemailgenerator.com/#/%s" % email_address)
except Exception:
kfile.write("N/A\n")
kfile.write("%s\n" % symbo.SEP)
kfile.write("%s\n" % tracking)
kfile.write("%s\n" % symbo.SEP)
kfile.write("%s\n" % guid)
kfile.write("%s\n" % symbo.SEP)
kfile.close()
except requests.ConnectionError:
raise kerri.INetFailure("id_gen()")
except subprocess.CalledProcessError:
raise kerri.ProcessFailure("id_gen()")
except Exception as error:
pass
# try:
# srem_temp()
# except subprocess.CalledProcessError:
# sys.stderr.write(str("%s\nCould Not Remove Temp Files" % symbo.SEP))
# return
# except Exception as error:
# sys.stderr.write(str("%s\nCould Not Remove Temp Files: %s %s" % (symbo.SEP, symbo.E_Sym, error)))
# return
def id_gen_opts(self, *args):
""" Lists options available to use with id_gen() """
if len(args) >= 1:
raise kerri.ExcessArguments("id_gen_opts()", 0)
else:
nameset_list = nameset.keys()
country_list = country.keys()
# print colored("\nAvailable NameSets:", symbo.out_color, attrs=['bold'])
# print colored("%s\n%s\n" % (nameset_list, symbo.SEP), symbo.alt_color, attrs=['bold'])
# print colored("Available Countries:", symbo.out_color, attrs=['bold'])
# print colored("%s\n%s\n" % (country_list, symbo.SEP), symbo.alt_color, attrs=['bold'])
# print colored("Sex: {male} || {female} || {random}", symbo.out_color, attrs=['bold'])
avail_opts = "NAMESETS:\n"
for item in nameset_list:
avail_opts = avail_opts + "%s | " % item
avail_opts = avail_opts + "\n\nCOUNTRIES:\n"
for item in country_list:
avail_opts = avail_opts + "%s | " % item
return avail_opts
def chk_fkmail(self, email, *args):
""" Check Email Created By FakeID """
if len(args) > 1:
raise kerri.ExcessArguments("chk_fkmail()", 1)
else:
addr = str(email)
ext = addr.split('@')[1]
un = addr.split('@')[0]
# print("ext: %s\nun: %s" % (ext, un))
# webbrowser.get("/usr/bin/chromium-nosand").open("http://www.fakemailgenerator.com/#/%s/%s/" % (ext, un))
# os.system("/usr/bin/chromium --no-sandbox http://www.fakemailgenerator.com/#/%s/%s/" % (ext, un))
# os.system("/usr/bin/firefox http://www.fakemailgenerator.com/#/%s/%s/" % (ext, un))
subprocess.Popen(["/usr/bin/firefox", "http://www.fakemailgenerator.com/#/%s/%s/" % (ext, un)])
#########
# INITs #
#########
fakeid = FakeID()
id_gen = fakeid.id_gen
id_gen_opts = fakeid.id_gen_opts
check_fmail = fakeid.chk_fkmail
|
from timeflux.nodes.window import TimeWindow
import numpy as np
import pandas as pd
class Power(TimeWindow):
""" Average of squared samples on a moving window
Attributes:
i (Port): Default input, expects DataFrame.
o (Port): Default output, provides DataFrame and meta.
Args:
length (float): Window length
step (float): Step length
average (mean|median) : Average method
"""
def __init__(self, length, step, average='median'):
super().__init__(length=length, step=step)
if average == 'mean':
self._average_method = np.mean
else:
self._average_method = np.median
def update(self):
super().update()
if not self.o.ready(): return
self.o.data = pd.DataFrame((self.o.data ** 2).apply(self._average_method),
columns=[self.i.data.index[-1]]).T |
import py
import sys
class AppTestLocaleTrivia:
spaceconfig = dict(usemodules=['_locale', 'unicodedata'])
def setup_class(cls):
if sys.platform != 'win32':
cls.w_language_en = cls.space.wrap("C")
cls.w_language_utf8 = cls.space.wrap("en_US.utf8")
cls.w_language_pl = cls.space.wrap("pl_PL.utf8")
cls.w_encoding_pl = cls.space.wrap("utf-8")
else:
cls.w_language_en = cls.space.wrap("English_US")
cls.w_language_utf8 = cls.space.wrap("English_US.65001")
cls.w_language_pl = cls.space.wrap("Polish_Poland.1257")
cls.w_encoding_pl = cls.space.wrap("cp1257")
import _locale
# check whether used locales are installed, otherwise the tests will
# fail
current = _locale.setlocale(_locale.LC_ALL)
try:
try:
# some systems are only UTF-8 oriented
try:
_locale.setlocale(_locale.LC_ALL,
cls.space.str_w(cls.w_language_en))
except _locale.Error:
_locale.setlocale(_locale.LC_ALL,
cls.space.str_w(cls.w_language_utf8))
cls.w_language_en = cls.w_language_utf8
_locale.setlocale(_locale.LC_ALL,
cls.space.str_w(cls.w_language_pl))
except _locale.Error:
py.test.skip("necessary locales not installed")
# Windows forbids the UTF-8 character set since Windows XP.
try:
_locale.setlocale(_locale.LC_ALL,
cls.space.str_w(cls.w_language_utf8))
except _locale.Error:
del cls.w_language_utf8
finally:
_locale.setlocale(_locale.LC_ALL, current)
def test_import(self):
import _locale
assert _locale
import locale
assert locale
def test_constants(self):
import sys
_CONSTANTS = (
'LC_CTYPE',
'LC_NUMERIC',
'LC_TIME',
'LC_COLLATE',
'LC_MONETARY',
'LC_ALL',
'CHAR_MAX',
# These are optional
#'LC_MESSAGES',
#'LC_PAPER',
#'LC_NAME',
#'LC_ADDRESS',
#'LC_TELEPHONE',
#'LC_MEASUREMENT',
#'LC_IDENTIFICATION',
)
import _locale
for constant in _CONSTANTS:
assert hasattr(_locale, constant)
# HAVE_LANGINFO
if sys.platform != 'win32':
_LANGINFO_NAMES = ('RADIXCHAR THOUSEP CRNCYSTR D_T_FMT D_FMT '
'T_FMT AM_STR PM_STR CODESET T_FMT_AMPM ERA ERA_D_FMT '
'ERA_D_T_FMT ERA_T_FMT ALT_DIGITS YESEXPR NOEXPR '
'_DATE_FMT').split()
for i in range(1, 8):
_LANGINFO_NAMES.append("DAY_%d" % i)
_LANGINFO_NAMES.append("ABDAY_%d" % i)
for i in range(1, 13):
_LANGINFO_NAMES.append("MON_%d" % i)
_LANGINFO_NAMES.append("ABMON_%d" % i)
for constant in _LANGINFO_NAMES:
assert hasattr(_locale, constant)
def test_setlocale(self):
import _locale
raises(TypeError, _locale.setlocale, "", self.language_en)
raises(TypeError, _locale.setlocale, _locale.LC_ALL, 6)
raises(_locale.Error, _locale.setlocale, 123456, self.language_en)
assert _locale.setlocale(_locale.LC_ALL, None)
assert _locale.setlocale(_locale.LC_ALL)
def test_string_ulcase(self):
if not hasattr(self, 'language_utf8'):
skip("No utf8 locale on this platform")
import _locale, string
lcase = "abcdefghijklmnopqrstuvwxyz"
ucase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
_locale.setlocale(_locale.LC_ALL, self.language_utf8)
assert string.lowercase == lcase
assert string.uppercase == ucase
_locale.setlocale(_locale.LC_ALL, self.language_en)
# the asserts below are just plain wrong
# assert string.lowercase != lcase
# assert string.uppercase != ucase
def test_localeconv(self):
import _locale
lconv_c = {
"currency_symbol": "",
"decimal_point": ".",
"frac_digits": _locale.CHAR_MAX,
"grouping": [],
"int_curr_symbol": "",
"int_frac_digits": _locale.CHAR_MAX,
"mon_decimal_point": "",
"mon_grouping": [],
"mon_thousands_sep": "",
"n_cs_precedes": _locale.CHAR_MAX,
"n_sep_by_space": _locale.CHAR_MAX,
"n_sign_posn": _locale.CHAR_MAX,
"negative_sign": "",
"p_cs_precedes": _locale.CHAR_MAX,
"p_sep_by_space": _locale.CHAR_MAX,
"p_sign_posn": _locale.CHAR_MAX,
"positive_sign": "",
"thousands_sep": "" }
_locale.setlocale(_locale.LC_ALL, "C")
lconv = _locale.localeconv()
for k, v in lconv_c.items():
assert lconv[k] == v
def test_strcoll(self):
import _locale
_locale.setlocale(_locale.LC_ALL, self.language_pl)
assert _locale.strcoll("a", "b") < 0
assert _locale.strcoll(
u"\N{LATIN SMALL LETTER A WITH OGONEK}".encode(self.encoding_pl),
"b") < 0
assert _locale.strcoll(
u"\N{LATIN SMALL LETTER C WITH ACUTE}".encode(self.encoding_pl),
"b") > 0
assert _locale.strcoll("c", "b") > 0
assert _locale.strcoll("b", "b") == 0
raises(TypeError, _locale.strcoll, 1, "b")
raises(TypeError, _locale.strcoll, "b", 1)
def test_strcoll_unicode(self):
import _locale
_locale.setlocale(_locale.LC_ALL, self.language_pl)
assert _locale.strcoll(u"b", u"b") == 0
assert _locale.strcoll(u"a", u"b") < 0
assert _locale.strcoll(u"b", u"a") > 0
raises(TypeError, _locale.strcoll, 1, u"b")
raises(TypeError, _locale.strcoll, u"b", 1)
def test_strxfrm(self):
# TODO more tests would be nice
import _locale
_locale.setlocale(_locale.LC_ALL, "C")
a = "1234"
b = _locale.strxfrm(a)
assert a is not b
assert a == b
raises(TypeError, _locale.strxfrm, 1)
_locale.setlocale(_locale.LC_ALL, self.language_pl)
a = "1234"
b = _locale.strxfrm(a)
assert a is not b
def test_str_float(self):
import _locale
import locale
_locale.setlocale(_locale.LC_ALL, self.language_en)
assert locale.str(1.1) == '1.1'
_locale.setlocale(_locale.LC_ALL, self.language_pl)
assert locale.str(1.1) == '1,1'
def test_text(self):
import sys
if sys.platform == 'win32':
skip("No gettext on Windows")
# TODO more tests would be nice
import _locale
assert _locale.gettext("1234") == "1234"
assert _locale.dgettext(None, "1234") == "1234"
assert _locale.dcgettext(None, "1234", _locale.LC_MESSAGES) == "1234"
assert _locale.textdomain("1234") == "1234"
def test_nl_langinfo(self):
import sys
if sys.platform == 'win32':
skip("No langinfo on Windows")
import _locale
langinfo_consts = [
'ABDAY_1',
'ABDAY_2',
'ABDAY_3',
'ABDAY_4',
'ABDAY_5',
'ABDAY_6',
'ABDAY_7',
'ABMON_1',
'ABMON_10',
'ABMON_11',
'ABMON_12',
'ABMON_2',
'ABMON_3',
'ABMON_4',
'ABMON_5',
'ABMON_6',
'ABMON_7',
'ABMON_8',
'ABMON_9',
'CODESET',
'CRNCYSTR',
'DAY_1',
'DAY_2',
'DAY_3',
'DAY_4',
'DAY_5',
'DAY_6',
'DAY_7',
'D_FMT',
'D_T_FMT',
'MON_1',
'MON_10',
'MON_11',
'MON_12',
'MON_2',
'MON_3',
'MON_4',
'MON_5',
'MON_6',
'MON_7',
'MON_8',
'MON_9',
'NOEXPR',
'RADIXCHAR',
'THOUSEP',
'T_FMT',
'YESEXPR',
'AM_STR',
'PM_STR',
]
for constant in langinfo_consts:
assert hasattr(_locale, constant)
_locale.setlocale(_locale.LC_ALL, "C")
assert _locale.nl_langinfo(_locale.ABDAY_1) == "Sun"
assert _locale.nl_langinfo(_locale.ABMON_1) == "Jan"
assert _locale.nl_langinfo(_locale.T_FMT) == "%H:%M:%S"
assert _locale.nl_langinfo(_locale.YESEXPR) == '^[yY]'
assert _locale.nl_langinfo(_locale.NOEXPR) == "^[nN]"
assert _locale.nl_langinfo(_locale.THOUSEP) == ''
raises(ValueError, _locale.nl_langinfo, 12345)
raises(TypeError, _locale.nl_langinfo, None)
def test_bindtextdomain(self):
import sys
if sys.platform == 'win32':
skip("No textdomain on Windows")
# TODO more tests would be nice
import _locale
raises(OSError, _locale.bindtextdomain, '', '')
raises(OSError, _locale.bindtextdomain, '', '1')
def test_bind_textdomain_codeset(self):
import sys
if sys.platform == 'win32':
skip("No textdomain on Windows")
import _locale
assert _locale.bind_textdomain_codeset('/', None) is None
assert _locale.bind_textdomain_codeset('/', 'UTF-8') == 'UTF-8'
assert _locale.bind_textdomain_codeset('/', None) == 'UTF-8'
assert _locale.bind_textdomain_codeset('', '') is None
def test_getdefaultlocale(self):
import sys
if sys.platform != 'win32':
skip("No _getdefaultlocale() to test")
import _locale
lang, encoding = _locale._getdefaultlocale()
assert lang is None or isinstance(lang, str)
assert encoding.startswith('cp')
|
import pkgutil
import sys, inspect
from typing import List
def list_modules_in_package(pkg_name) -> List[str]:
"""
:param pkg_name: imported package
:return: list of modules inside the package (modules will be loaded)
"""
modules = []
for importer, modname, ispkg in pkgutil.iter_modules(pkg_name.__path__):
if not ispkg:
importer.find_module(modname).load_module(modname)
modules += [modname]
return modules
def find_class_in_module(module_name: str, class_name: str):
"""
:param module_name: full class name. F.e. catcher_modules.database.postgres
:param class_name: class to search for. F.e. postgres
:return: found class or None
"""
for name, obj in inspect.getmembers(sys.modules[module_name]):
if inspect.isclass(obj) and obj.__name__.lower() == class_name:
return obj
return None
|
#!/usr/bin/env python3
# Copyright (c) 2021 CINN Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
import numpy as np
def conv2d_native(inputs_data, input_shape, filter_size, attrs, is_depthwise):
main_program = fluid.Program()
paddle.enable_static()
with fluid.program_guard(main_program, fluid.Program()):
padding = [0, 0]
stride = [1, 1]
dilation = [1, 1]
data_format = "NCHW"
groups = 1
for key in attrs.attr_store:
if key == "stride":
stride = attrs.get_attr("stride")
elif key == "padding":
padding = attrs.get_attr("padding")
elif key == "dilation":
dilation = attrs.get_attr("dilation")
elif key == "groups":
groups = attrs.get_attr("groups")
elif key == "data_format":
data_format = attrs.get_attr("data_format")
else:
raise ValueError("attr_store {} is not supported".format(key))
img = fluid.layers.data(
name='img', shape=input_shape[1:], dtype='float32')
if is_depthwise:
if data_format == "NCHW":
cin_index = 1
else:
cin_index = 3
filter_size_new = [
filter_size[1] * input_shape[cin_index],
filter_size[0] // groups, filter_size[2], filter_size[3]
]
else:
filter_size_new = filter_size
param = fluid.initializer.NumpyArrayInitializer(
np.array(
inputs_data[1]).reshape(filter_size_new).astype("float32"))
# filter: (c_out, c_in // group, kernel_h, kernel_w)
filter_hw = list(filter_size_new[2:4])
if data_format == "NHWC":
filter_hw = list(filter_size_new[1:3])
if isinstance(stride, int):
stride = [stride.copy(), stride.copy()]
if isinstance(padding, int):
padding = [padding.copy(), padding.copy()]
if isinstance(dilation, int):
dilation = [dilation.copy(), dilation.copy()]
res = fluid.layers.conv2d(
input=img,
num_filters=filter_size_new[0],
filter_size=filter_hw,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
param_attr=param,
data_format=data_format)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
x = np.array(inputs_data[0]).reshape(input_shape).astype("float32")
output = exe.run(feed={"img": x}, fetch_list=[res])
output = np.array(output)
print("output's shape is:", output.shape)
res_shape = output.shape[1:]
if is_depthwise:
return output, [res_shape]
else:
return output, [res_shape]
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
'''
list
'''
print("==========list=============")
students = ["jack", "tony", "john"]
students.append("kevin")
students.insert(1, "lisa")
students.pop()
students.pop(0)
print(students)
print(len(students))
print(students[0])
print(students[-1])
'''
tuple
'''
print("\n==========tuple=============")
t = (1, 2, 3, 4, 5, [6, 7])
print(t[2])
print(t[-1])
t[-1][1] = 8
print(t)
'''
练习
'''
print("\n==========练习=============")
L = [
['Apple', 'Google', 'Microsoft'],
['Java', 'Python', 'Ruby', 'PHP'],
['Adam', 'Bart', 'Lisa']
]
print(L[0][0])
print(L[1][1])
print(L[-1][-1])
|
from textwrap import dedent
from fjord.base.tests import TestCase
from fjord.mailinglist.tests import MailingListFactory
class TestMailingList(TestCase):
def test_recipients_empty(self):
ml = MailingListFactory(members=u'')
assert ml.recipient_list == []
def test_recipients_whitespace(self):
ml = MailingListFactory(members=u' \n ')
assert ml.recipient_list == []
def test_recipients_members(self):
ml = MailingListFactory(members=u'foo@example.com\nbar@example.com')
assert ml.recipient_list == [u'bar@example.com', u'foo@example.com']
def test_recipients_complex(self):
ml = MailingListFactory(
members=dedent("""
# foo
foo@example.com
# bar
bar@example.com
baz@example.com # baz is awesome
""")
)
assert (
ml.recipient_list ==
[u'bar@example.com', u'baz@example.com', u'foo@example.com']
)
|
from __future__ import print_function
from PIL import Image
import PIL.ImageOps as po
import PIL.ImageDraw as pd
import PIL.ImageFont as pf
import PIL.ImageChops as pc
'''
f = Image.open("Inconsolata2.png")
w,h = f.size
sx = 0
sy = 0
dx = 72
dy = 58
nx = 10
ny = 29
fx = 28
fy = 16
fw = 16
fh = 32
imgw = 32
imgh = 12
img = Image.new('RGBA',(fw*imgw,fh*imgh))
fl = []
y = sy
for i in range(ny):
x = sx
for j in range(nx):
fn = i*nx+j+33
box = (x,y,x+dx,y+dy)
x += dx
new = f.crop(box)
new.save("gen/%sbig.png"%fn)
box2 = (fx,fy,fx+fw,fy+fh)
ft = new.crop(box2)
fl.append((fn,ft))
ft.save("gen/%s.png"%fn)
y+=dy
import math
for n,ft in fl:
img.paste(ft,(n%imgw*fw,n/imgw*fh))
#print(n,(n%imgw*fw,n/imgh*fh))
# INV
r,g,b,a = img.split()
rgb = Image.merge('RGB',(r,g,b))
invrgb = PIL.ImageOps.invert(rgb)
r,g,b = invrgb.split()
inv = Image.merge('RGBA',(r,g,b,a))
black = Image.new('RGBA',(img.size),'black')
Image.blend(black,inv,0.01).save("genb.png")
#INV END
inv.save("geninv.png")
img.save("gen.png")
'''
import string
alltext = ""
for i in range(128):
c = chr(i)
if c not in string.printable:
c = ""
elif ord(c) < 20:
c = ""
alltext += c
path = '/usr/share/fonts/truetype/inconsolata/Inconsolata.otf'
font = pf.truetype(path,18)
def render(text):
w,h = font.getsize(text)
bg = Image.new('RGB',(w,h),(0,0,0))
al = Image.new("L",bg.size,'black')
#pd.Draw(bg).text((0,0),text,font=font,fill=(0,0,0))
te = Image.new('L',bg.size,0)
dr = pd.Draw(te)
dr.text((0,0),text,font=font,fill="white")
al = pc.lighter(al,te)
cl = Image.new("RGBA",bg.size,(255,255,255))
ma = Image.eval(te,lambda p: 255*(int(p!=0)))
bg = Image.composite(cl,bg,ma)
bg.putalpha(al)
#print(repr(bg.tostring()))
#bg.save("genf.png")
return bg
def genc(char):
#print(repr(s))
im = render(char)
im.save("gen/gen%d.png"%ord(char))
data = list(im.getdata())
cs = "uint32_t _font_%d[9*20] =\n{\n"%ord(char)
for y in range(20):
for x in range(9):
i = y*9+x
r,g,b,a = data[i]
num = ''.join([ "%02X"%x for x in a,r,g,b ])
cs+="0x%s,"%num
cs+='\n'
cs += '};\n'
return cs
f = open('a.txt','w')
for i in alltext:
f.write(genc(i))
glyph = "{"
for i in range(128):
if i%8==0: glyph+='\n'
if chr(i) not in alltext:
glyph+="0, "
else:
glyph+='_font_%d, '%i
glyph+='\n};'
f.write(glyph)
f.close() |
# -*- coding: utf-8 -*-
"""
Main pysimu module
Created on Thu Aug 14 20:21:56 2014
/home/jmmauricio/Documents/private/pyWork11/PyPsat/src
@author: jmmauricio-m
"""
import numpy as np
from scipy.integrate import ode
class sim:
'''
Class to perform simuations
'''
def __init__(self):
self.x = np.array([])
self.t = 0.0
self.T = np.array([])
self.X = np.array([])
self.Y = np.array([])
self.max_step = 0.1
self.nsteps = 5000
def h(self,x):
return x
def odefun(self,t,x):
self.x = x
return self.f(t,x)
def odeout(self,t,x):
self.T = np.hstack((self.T,t))
self.X = np.vstack((self.X,x))
self.Y = np.vstack((self.Y,self.h(t,self.x)))
return self.h(t,self.x)
def run(self, t_end):
r = ode(self.odefun)
r.set_integrator('dopri5', max_step=self.max_step, nsteps = self.nsteps)
r.set_solout(self.odeout)
if len(self.X)==0:
self.X = self.x_0
self.T = np.array(self.t)
self.Y = np.array(self.h(self.t,self.x_0))
r.set_initial_value(self.x_0, self.t)
r.integrate(t_end)
self.t = t_end
self.r = r
self.x = r.y
from numba import jit
@jit(cache=True)
def f(x):
L = 0.1
R = 0.1
V = 1.0
dx = 1.0/L*(V - R*x[0])
return np.array([dx])
@jit(cache=True)
def forward_euler(x,Dt):
x[0] = x[0] + Dt*(f(x))
return x
@jit
def simulate():
Dt = 0.0001
x_0 = np.array([0.0])
x= x_0
t_end = 1.0
decimation = 100
it_decimation =0
t=0
out = [[t,x[0]]]
for t in np.arange(0.0,t_end,Dt):
x = forward_euler(x,Dt)
it_decimation += 1
if it_decimation>decimation:
it_decimation==0
out += [[t,x[0]]]
return np.array(out)
if __name__ == '__main__':
import time
t_0 = time.time()
out = simulate()
print(time.time()-t_0)
# simu_rl = sim()
#
# # parameters
# R = 1.0
# L = 50.0e-3
# v = 1.0
#
# # dynamic system
# def f(t,x):
#
# i = x[0]
#
# di = 1.0/L*(v - R*i)
#
# return [di]
#
# # outputs functions
# def h(t,x):
#
# i = x[0]
#
# p = i*v
#
# return np.array(p)
#
# # initialization
# i_0 = 0
# x_0 = np.array([i_0])
#
# # system definition
# simu_rl.f = f
# simu_rl.x_0 = x_0
# simu_rl.h = h
# simu_rl.run(1.0)
#
# plot results
import matplotlib.pyplot as plt
fig = plt.figure( figsize=(6, 4))
ax = fig.add_subplot(1,1,1)
ax.plot(out[:,0],out[:,1], linewidth=2)
fig.show()
|
from files.ListObject import functions, variableExist, functionExist
from files.Operations import add, subtract, multiply, divide
from files.FunctionsForSython import is_number, error
class Show():
def __init__(self):
self.nbParameters = 1
self.returnValue = False
def call(self, parameters):
parameters = parameters[0]
if " + " in parameters:
values = parameters.split(" + ")
while len(values) > 1:
var1 = variableExist(values[0])
if not var1:
var1 = variableExist(values[1])
if not var1:
if '"' in values[0] or '"' in values[1]:
result, info = add(values[0], values[1], "str")
elif '.' in values[0] or '.' in values[1]:
result, info = add(values[0], values[1], "float")
else:
result, info = add(values[0], values[1], "int")
else:
result, info = add(values[0], values[1], var1.type_)
else:
result, info = add(values[0], values[1], var1.type_)
if result:
values[0] = info
del values[1]
else:
error("ErrorAddition", info)
return values[0]
elif "+" in parameters:
values = parameters.split("+")
while len(values) > 1:
var1 = variableExist(values[0])
if not var1:
var1 = variableExist(values[1])
if not var1:
if '"' in values[0] or '"' in values[1]:
result, info = add(values[0], values[1], "str")
elif '.' in values[0] or '.' in values[1]:
result, info = add(values[0], values[1], "float")
else:
result, info = add(values[0], values[1], "int")
else:
result, info = add(values[0], values[1], var1.type_)
else:
result, info = add(values[0], values[1], var1.type_)
if result:
values[0] = info
del values[1]
else:
error("ErrorAddition", info)
return values[0]
elif " - " in parameters:
values = parameters.split(" - ")
while len(values) > 1:
var1 = variableExist(values[0])
if not var1:
var1 = variableExist(values[1])
if not var1:
if '"' in values[0] or '"' in values[1]:
result, info = subtract(values[0], values[1], "str")
elif '.' in values[0] or '.' in values[1]:
result, info = subtract(values[0], values[1], "float")
else:
result, info = subtract(values[0], values[1], "int")
else:
result, info = subtract(values[0], values[1], var1.type_)
else:
result, info = subtract(values[0], values[1], var1.type_)
if result:
values[0] = info
del values[1]
else:
error("ErrorSubstraction", info)
return values[0]
elif "-" in parameters:
values = parameters.split("-")
while len(values) > 1:
var1 = variableExist(values[0])
if not var1:
var1 = variableExist(values[1])
if not var1:
if '"' in values[0] or '"' in values[1]:
result, info = subtract(values[0], values[1], "str")
elif '.' in values[0] or '.' in values[1]:
result, info = subtract(values[0], values[1], "float")
else:
result, info = subtract(values[0], values[1], "int")
else:
result, info = subtract(values[0], values[1], var1.type_)
else:
result, info = subtract(values[0], values[1], var1.type_)
if result:
values[0] = info
del values[1]
else:
error("ErrorSubstraction", info)
return values[0]
elif " * " in parameters:
values = parameters.split(" * ")
while len(values) > 1:
var1 = variableExist(values[0])
if not var1:
var1 = variableExist(values[1])
if not var1:
if '"' in values[0] or '"' in values[1]:
result, info = multiply(values[0], values[1], "str")
elif '.' in values[0] or '.' in values[1]:
result, info = multiply(values[0], values[1], "float")
else:
result, info = multiply(values[0], values[1], "int")
else:
result, info = multiply(values[0], values[1], var1.type_)
else:
result, info = multiply(values[0], values[1], var1.type_)
if result:
values[0] = info
del values[1]
else:
error("ErrorMultiplication", info)
return values[0]
elif "*" in parameters:
values = parameters.split("*")
while len(values) > 1:
var1 = variableExist(values[0])
if not var1:
var1 = variableExist(values[1])
if not var1:
if '"' in values[0] or '"' in values[1]:
result, info = multiply(values[0], values[1], "str")
elif '.' in values[0] or '.' in values[1]:
result, info = multiply(values[0], values[1], "float")
else:
result, info = multiply(values[0], values[1], "int")
else:
result, info = multiply(values[0], values[1], var1.type_)
else:
result, info = multiply(values[0], values[1], var1.type_)
if result:
values[0] = info
del values[1]
else:
error("ErrorMultiplication", info)
return values[0]
elif " / " in parameters:
values = parameters.split(" / ")
while len(values) > 1:
var1 = variableExist(values[0])
if not var1:
var1 = variableExist(values[1])
if not var1:
if '"' in values[0] or '"' in values[1]:
result, info = divide(values[0], values[1], "str")
elif '.' in values[0] or '.' in values[1]:
result, info = divide(values[0], values[1], "float")
else:
result, info = divide(values[0], values[1], "int")
else:
result, info = divide(values[0], values[1], var1.type_)
else:
result, info = divide(values[0], values[1], var1.type_)
if result:
values[0] = info
del values[1]
else:
error("ErrorDivision", info)
return values[0]
elif "/" in parameters:
values = parameters.split("/")
while len(values) > 1:
var1 = variableExist(values[0])
if not var1:
var1 = variableExist(values[1])
if not var1:
if '"' in values[0] or '"' in values[1]:
result, info = divide(values[0], values[1], "str")
elif '.' in values[0] or '.' in values[1]:
result, info = divide(values[0], values[1], "float")
else:
result, info = divide(values[0], values[1], "int")
else:
result, info = divide(values[0], values[1], var1.type_)
else:
result, info = divide(values[0], values[1], var1.type_)
if result:
values[0] = info
del values[1]
else:
error("ErrorDivision", info)
return values[0]
else:
var = variableExist(parameters)
if not var:
if len(parameters) > 1 and parameters[0] == '"' and parameters[-1] == '"':
return parameters[1:-1]
elif is_number(parameters):
return parameters
else:
error("ErrorConversion", "Impossible de convertir '"+parameters+"' en string")
else:
return var.value
class Pause():
def __init__(self):
self.nbParameters = 0
self.returnValue = False
def call(self):
input()
class Enter():
def __init__(self):
self.nbParameters = 1
self.returnValue = True
def call(self, parameters):
result = input(Show().call(parameters))
return result
class CanConvert():
def __init__(self):
self.nbParameters = 2
self.returnValue = True
def call(self, parameters):
value, type_ = parameters
var = variableExist(value)
if var:
value = var.value
if type_ == "int":
try:
int(value)
return True
except:
return False
elif type_ == "float":
try:
float(value)
return True
except:
return False
elif type_ == "str":
return True
else:
return False
def initBasicFunctions():
show = Show()
pause = Pause()
enter = Enter()
canConvert = CanConvert()
functions.append(["show", show])
functions.append(["pause", pause])
functions.append(["enter", enter])
functions.append(["canConvert", canConvert]) |
from collections import namedtuple
import csv
import os
import tweepy
from config import CONSUMER_KEY, CONSUMER_SECRET
from config import ACCESS_TOKEN, ACCESS_SECRET
DEST_DIR = 'data'
EXT = 'csv'
NUM_TWEETS = 100
Tweet = namedtuple('Tweet', 'id_str created_at text')
class UserTweets(object):
def __init__(self, handle, max_id=None):
"""Get handle and optional max_id.
Use tweepy.OAuthHandler, set_access_token and tweepy.API
to create api interface.
Use _get_tweets() helper to get a list of tweets.
Save the tweets as data/<handle>.csv"""
# ...
self._tweets = list(self._get_tweets())
self._save_tweets()
def _get_tweets(self):
"""Hint: use the user_timeline() method on the api you defined in init.
See tweepy API reference: http://docs.tweepy.org/en/v3.5.0/api.html
Use a list comprehension / generator to filter out fields
id_str created_at text (optionally use namedtuple)"""
pass
def _save_tweets(self):
"""Use the csv module (csv.writer) to write out the tweets.
If you use a namedtuple get the column names with Tweet._fields.
Otherwise define them as: id_str created_at text
You can use writerow for the header, writerows for the rows"""
pass
def __len__(self):
"""See http://pybit.es/python-data-model.html"""
pass
def __getitem__(self, pos):
"""See http://pybit.es/python-data-model.html"""
pass
if __name__ == "__main__":
for handle in ('pybites', 'techmoneykids', 'bbelderbos'):
print('--- {} ---'.format(handle))
user = UserTweets(handle)
for tw in user[:5]:
print(tw)
print()
|
# -*- coding: utf-8 -*-
import pytest
from bravado_core.exception import SwaggerMappingError
from bravado_core.marshal import marshal_object
from bravado_core.spec import Spec
@pytest.fixture
def address_spec():
return {
'type': 'object',
'properties': {
'number': {
'type': 'number'
},
'street_name': {
'type': 'string'
},
'street_type': {
'type': 'string',
'enum': [
'Street',
'Avenue',
'Boulevard']
}
}
}
@pytest.fixture
def address():
return {
'number': 1600,
'street_name': u'Ümlaut',
'street_type': 'Avenue'
}
def test_properties(empty_swagger_spec, address_spec, address):
result = marshal_object(empty_swagger_spec, address_spec, address)
assert address == result
def test_array(empty_swagger_spec, address_spec):
tags_spec = {
'type': 'array',
'items': {
'type': 'string'
}
}
address_spec['properties']['tags'] = tags_spec
address = {
'number': 1600,
'street_name': 'Pennsylvania',
'street_type': 'Avenue',
'tags': [
'home',
'best place on earth',
'cul de sac'
],
}
result = marshal_object(empty_swagger_spec, address_spec, address)
assert result == address
def test_nested_object(empty_swagger_spec, address_spec):
location_spec = {
'type': 'object',
'properties': {
'longitude': {
'type': 'number'
},
'latitude': {
'type': 'number'
},
}
}
address_spec['properties']['location'] = location_spec
address = {
'number': 1600,
'street_name': 'Pennsylvania',
'street_type': 'Avenue',
'location': {
'longitude': 100.1,
'latitude': 99.9,
},
}
result = marshal_object(empty_swagger_spec, address_spec, address)
assert result == address
def test_model(minimal_swagger_dict, address_spec):
location_spec = {
'type': 'object',
'properties': {
'longitude': {
'type': 'number'
},
'latitude': {
'type': 'number'
},
}
}
minimal_swagger_dict['definitions']['Location'] = location_spec
# The Location model type won't be built on schema ingestion unless
# something actually references it. Create a throwaway response for this
# purpose.
location_response = {
'get': {
'responses': {
'200': {
'description': 'A location',
'schema': {
'$ref': '#/definitions/Location',
}
}
}
}
}
minimal_swagger_dict['paths']['/foo'] = location_response
swagger_spec = Spec.from_dict(minimal_swagger_dict)
address_spec['properties']['location'] = \
swagger_spec.spec_dict['definitions']['Location']
Location = swagger_spec.definitions['Location']
address = {
'number': 1600,
'street_name': 'Pennsylvania',
'street_type': 'Avenue',
'location': Location(longitude=100.1, latitude=99.9),
}
expected_address = {
'number': 1600,
'street_name': 'Pennsylvania',
'street_type': 'Avenue',
'location': {
'longitude': 100.1,
'latitude': 99.9,
}
}
result = marshal_object(swagger_spec, address_spec, address)
assert expected_address == result
def test_object_not_dict_like_raises_error(
empty_swagger_spec, address_spec):
i_am_not_dict_like = 34
with pytest.raises(SwaggerMappingError) as excinfo:
marshal_object(empty_swagger_spec, address_spec, i_am_not_dict_like)
assert 'Expected dict' in str(excinfo.value)
def test_missing_properties_not_marshaled(
empty_swagger_spec, address_spec, address):
del address['number']
expected_address = {
'street_name': u'Ümlaut',
'street_type': 'Avenue'
}
result = marshal_object(empty_swagger_spec, address_spec, address)
assert expected_address == result
def test_property_set_to_None_not_marshaled(
empty_swagger_spec, address_spec, address):
address['number'] = None
expected_address = {
'street_name': u'Ümlaut',
'street_type': 'Avenue'
}
result = marshal_object(empty_swagger_spec, address_spec, address)
assert expected_address == result
def test_pass_through_additionalProperties_with_no_spec(
empty_swagger_spec, address_spec, address):
address_spec['additionalProperties'] = True
address['city'] = 'Swaggerville'
expected_address = {
'number': 1600,
'street_name': u'Ümlaut',
'street_type': 'Avenue',
'city': 'Swaggerville',
}
result = marshal_object(empty_swagger_spec, address_spec, address)
assert expected_address == result
def test_pass_through_property_with_no_spec(
empty_swagger_spec, address_spec, address):
del address_spec['properties']['street_name']['type']
result = marshal_object(empty_swagger_spec, address_spec, address)
assert address == result
def test_ref(minimal_swagger_dict, address_spec, address):
minimal_swagger_dict['definitions']['Address'] = address_spec
ref_spec = {'$ref': '#/definitions/Address'}
swagger_spec = Spec(minimal_swagger_dict)
result = marshal_object(swagger_spec, ref_spec, address)
assert address == result
def test_recursive_ref_with_depth_1(recursive_swagger_spec):
result = marshal_object(
recursive_swagger_spec,
{'$ref': '#/definitions/Node'},
{'name': 'foo', 'child': None})
assert result == {'name': 'foo'}
def test_recursive_ref_with_depth_n(recursive_swagger_spec):
value = {
'name': 'foo',
'child': {
'name': 'bar',
'child': {
'name': 'baz',
'child': None,
}
}
}
result = marshal_object(
recursive_swagger_spec,
{'$ref': '#/definitions/Node'},
value)
expected = {
'name': 'foo',
'child': {
'name': 'bar',
'child': {
'name': 'baz',
}
}
}
assert result == expected
def test_marshal_with_nullable_property(empty_swagger_spec):
object_spec = {
'type': 'object',
'required': ['x'],
'properties': {
'x': {
'type': 'string',
'x-nullable': True,
}
}
}
value = {'x': None}
result = marshal_object(empty_swagger_spec, object_spec, value)
assert result == value
def test_marshal_with_non_nullable_property(empty_swagger_spec):
object_spec = {
'type': 'object',
'required': ['x'],
'properties': {
'x': {
'type': 'string'
}
}
}
value = {'x': None}
with pytest.raises(SwaggerMappingError) as excinfo:
marshal_object(empty_swagger_spec, object_spec, value)
assert 'is a required value' in str(excinfo.value)
def test_marshal_with_non_required_property(empty_swagger_spec):
object_spec = {
'type': 'object',
'properties': {
'x': {
'type': 'string'
}
}
}
value = {'x': None}
result = marshal_object(empty_swagger_spec, object_spec, value)
assert result == {}
def test_marshal_with_required_property(empty_swagger_spec):
object_spec = {
'type': 'object',
'required': ['x'],
'properties': {
'x': {
'type': 'string'
}
}
}
value = {'x': None}
with pytest.raises(SwaggerMappingError) as excinfo:
marshal_object(empty_swagger_spec, object_spec, value)
assert 'is a required value' in str(excinfo.value)
|
'''
Hamed Waezi
AI HW1
A*
Heuristic => Disjoint Patter database 4-4-4-3
'''
import heapq
import copy
import pickle as p
if __name__ == '__main__':
dim = int(input('Dimensions: '))
tiles = dim*dim
class Node: # Node is the state
def __init__(self, n, data, pp, blank, g):
self.n = n
self.data = data # A 2D array
self.pp = pp
self.blank = blank
self.hash = None
self.heuristic = None
self.g = g
self.subHashes = [0,0,0,0]
self.subData = [0,0,0,0]
temp = [{1,2,5,6,0},{3,4,7,8,0},{9,10,13,14,0},{11,12,15,0}]
for idx in range(4):
data = copy.deepcopy(self.data)
for i in range(self.n):
for j in range(self.n):
if data[i][j] not in temp[idx]:
data[i][j] = -1
self.subData[idx] = data
hashBase = 293
hashMode = 100000000000000000007
hashh = 1
for i in range(0,self.n):
for j in range(0,self.n):
hashh = hashh * hashBase
hashh = hashh + data[i][j]
hashh = hashh % hashMode
self.subHashes[idx] = hashh
def __str__(self,):
ret = ''
for rows in self.data:
ret = ret + '\n' + rows.__str__()
return self.data.__str__()
def __hash__(self,):
if self.hash is not None:
return self.hash
hashBase = 67
hashMode = 1e12+7
self.hash = 1
for i in range(0,self.n):
for j in range(0,self.n):
self.hash = self.hash * hashBase
self.hash = self.hash + self.data[i][j]
self.hash = self.hash % hashMode
self.hash = int(self.hash)
return self.hash
def __gt__(self,other):
return self.f() > other.f()
def __lt__(self, other):
return self.f() < other.f()
def __eq__(self, other):
return self.hash == other.hash
def move(self, pp, direction):
if pp is None:
g = 1
else:
g = pp.g + 1
if direction == 0: # UP
newData = copy.deepcopy(self.data)
newData[self.blank[0]][self.blank[1]] = newData[self.blank[0] - 1][self.blank[1]]
newData[self.blank[0] - 1][self.blank[1]] = 0
temp = Node(n=self.n, data=newData, pp=self, blank=(self.blank[0] - 1, self.blank[1]), g=g)
return temp
elif direction == 1: # DOWN
newData = copy.deepcopy(self.data)
newData[self.blank[0]][self.blank[1]] = newData[self.blank[0] + 1][self.blank[1]]
newData[self.blank[0] + 1][self.blank[1]] = 0
temp = Node(n=self.n, data=newData, pp=self, blank=(self.blank[0] + 1, self.blank[1]), g=g)
return temp
elif direction == 2: # RIGHT
newData = copy.deepcopy(self.data)
newData[self.blank[0]][self.blank[1]] = newData[self.blank[0]][self.blank[1] + 1]
newData[self.blank[0]][self.blank[1] + 1] = 0
temp = Node(n=self.n, data=newData, pp=self, blank=(self.blank[0], self.blank[1] + 1), g=g)
return temp
elif direction == 3: # LEFT
newData = copy.deepcopy(self.data)
newData[self.blank[0]][self.blank[1]] = newData[self.blank[0]] [self.blank[1] - 1]
newData[self.blank[0]] [self.blank[1] - 1] = 0
temp = Node(n=self.n, data=newData, pp=self, blank=(self.blank[0], self.blank[1] - 1), g=g)
return temp
def f(self,):
self.res = 0
for i in range(4):
if int(self.subHashes[i]) in Node.dbs[i]:
Node.nodesFound += 1
self.res += Node.dbs[i][int(self.subHashes[i])]
else:
Node.nodesNotFound += 1
return self.res + self.g
class Puzzle: # it is the current puzzle
def countInversions(self,):
dd = []
for i in range (0, self.n):
for j in range(0,self.n):
dd.append(self.root.data[i][j])
inversions = 0
for i in range(0,self.n*self.n-1):
for j in range(i+1, self.n*self.n):
if dd[j] != 0 and dd[i] != 0 and dd[i] > dd[j]:
inversions = inversions + 1
print('# Inversions : '+str(inversions))
return inversions
def isSolvable(self,):
inversions = self.countInversions()
if self.n % 2 == 1:
return inversions % 2 == 0
else:
return (inversions % 2 == 0 and ((self.root.blank[0] - self.n) % 2 == 1)) or (inversions % 2 == 1 and ((self.root.blank[0] - self.n) % 2 == 0))
def __init__(self, n,): # `n` is the dim of puzzle
self.states = set() # it holds hashes pointing to states
self.root= []
blank = None
self.nodes = []
self.nodesExpanded = 1
self.nodesDeleted = 0
self.n = n
goal = []
for i in range(0,self.n):
temp = []
for j in range(0,self.n):
temp.append(i * self.n + j + 1)
goal.append(temp)
goal[i][j] = 0
goal = Node(n=self.n,data=goal,pp=None,blank=(self.n - 1,self.n - 1), g = 0)
self.goalhash = goal.__hash__()
print('Input your matrix')
for i in range(0, self.n):
temp = input().split()
temp = list(map(int, temp))
if len(temp) != self.n:
raise Exception("Bad Input\n"+"Dimension is: "+str(self.n))
for j in range(0, self.n):
if temp[j] == 0:
blank = (i,j)
self.root.append(temp)
# self.root=[[13,2,10,3],[1,12,8,4],[5,0,9,6],[15,14,11,7]]
# blank=(2,1)
# self.root=[[1,2,3,4],[0,6,7,8],[5,10,11,12],[9,13,14,15]]
# blank=(1,0)
#### DEVIL'S CONFIGURATION
# self.root=[[0, 15, 8, 3], [12, 11, 7, 4] ,[14, 10, 5, 6], [9, 13, 2, 1]]
# blank=(0,0)
#####
# self.root=[[3, 4, 8, 12], [7, 5, 10, 14], [0, 1, 6, 15], [2, 9, 13, 11]]
# blank=(2,0)
self.root = Node(n=self.n,data=self.root, pp=None, blank=blank, g=1)
self.solvable = self.isSolvable()
heapq.heappush(self.nodes,self.root)
self.states.add(self.root)
def verify(self, node):
return node.__hash__() == self.goalhash
def run(self,):
if not self.solvable:
print ('is not solvable')
return None
print('search started ...')
iteration = 0
while True:
iteration += 1
bestNode = heapq.heappop(self.nodes)
blank = bestNode.blank
moves = []
if blank[0] > 0:
moves.append(0)
if blank[0] < self.n-1 :
moves.append(1)
if blank[1] > 0 :
moves.append(3)
if blank[1] < self.n-1 :
moves.append(2)
for i in moves:
newNode = bestNode.move(pp=bestNode, direction=i)
if newNode in self.states:
self.nodesDeleted = self.nodesDeleted + 1
del newNode
else:
self.nodesExpanded = self.nodesExpanded + 1
if self.nodesExpanded % 5000 == 0:
print(self.nodesExpanded)
if self.verify(newNode):
print('Done : ' + str(newNode.f()))
return newNode
self.states.add(newNode)
heapq.heappush(self.nodes,newNode)
if __name__ == '__main__':
fileName = input('output file name : ')
#Disjoint Patter Databases Initialization
try:
Node.dbs
print('Databases already loaded')
except:
Node.dbs = [{},{},{},{}]
for i in range(4):
f = open('db-4443-'+str(i+1)+'.pkl','rb')
Node.dbs[i] = p.load(f)
print('Databases loaded :')
Node.nodesFound = 0
Node.nodesNotFound = 0
puzzle = Puzzle(n=dim,)
res = puzzle.run()
if res is not None:
f = open(fileName,'w+')
temp = res
f.write(str(dim)+'\n')
badChars = ['[',']']
result = [str(res)]
while temp.pp is not None:
temp = temp.pp
result.append(str(temp))
for i in range(len(result)-1,-1,-1):
temp= result[i].replace('[','')
temp= temp.replace(']','')
f.write(temp+'\n')
# f.write('NodesExpanded: '+str(puzzle.nodesExpanded))
f.close()
|
from nltk.corpus import stopwords
from src.helpers.debug import top_keys
import re
stopwords = set(stopwords.words('english'))
stopwords.remove('don')
stopwords.remove('will')
# filter out token
def valid_tkn(tkn, valid_kw, invalid_kw):
tkn = tkn.lower()
if tkn in valid_kw:
return True
if tkn in invalid_kw:
return False
# stopwords
if tkn in stopwords:
return False
# ampersand and twitter link
twitter_stop = ['&', 'rt', 'http']
if '//t.co/' in tkn or tkn in twitter_stop:
return False
# special unicode character
if any(ord(c) > 128 for c in tkn):
return False
regex = re.compile('[^a-zA-Z]')
tkn = regex.sub('', tkn)
if len(tkn) < 1:
return False
return True
def unibigrams(tokens, valid_kw, invalid_kw):
valid_movie_kw = {'the', 'in', 'on', 'of', 'this'}
prev = False
grams = {
'uni': set(),
'bi': set()
}
for tkn in tokens:
if valid_tkn(tkn, valid_kw | valid_movie_kw, invalid_kw):
if prev:
grams['bi'].add(prev + ' ' + tkn)
if tkn not in valid_movie_kw:
grams['uni'].add(tkn)
prev = tkn
return grams
def bigrams(tokens, valid_kw, invalid_kw):
prev = False
bigrams = set()
for tkn in tokens:
if valid_tkn(tkn, valid_kw, invalid_kw):
if prev:
bigrams.add(prev + ' ' + tkn)
prev = tkn
return bigrams
# tokens = [tkn for tkn in tokens if valid_tkn(tkn, valid_kw, invalid_kw)]
# # ngrams of 2
# ngrams = zip(*[tokens[i:] for i in range(2)])
# return [" ".join(ngram) for ngram in ngrams]
def trigrams(tokens, valid_kw, invalid_kw):
prev1 = False
prev2 = False
trigrams = []
for i in range(2, len(tokens)):
if valid_tkn(tokens[i], valid_kw, invalid_kw):
if prev1 and prev2:
trigrams.append(prev1 + ' ' + prev2 + ' ' + tokens[i])
prev1 = prev2
prev2 = tokens[i]
return trigrams
def merge_bigrams(lst):
lst1 = [x[0].split(" ")[0] for x in lst]
#motion, best, supporting, best"
lst2 = [x[0].split(" ")[1] for x in lst]
# picture, supporting, actor, best
lst_of_words = {}
for i, element in enumerate(lst):
word = ""
if lst1[i] == "best":
if lst2[i] in lst1:
word = lst1[i] + " " + lst2[i] + " " + lst2[lst1.index(lst2[i])]
if word in lst_of_words:
lst_of_words[word] +=1
else:
lst_of_words[word] = 1
return lst_of_words
def join_ngrams(lst, minimum):
# lst : [("name name"), 23]
threshold = 0.8
if not lst:
return lst
flag = True
counter = 0
updated_lst = lst
while flag:
#updated_lst = []
# if counter == 1:
# break
flag = False
for i in range(len(lst)):
if i >= len(lst):
break
if lst[i][1] < minimum:
break
curr = lst[i]
# ["robert downey", "downey jr"]
j = i+1
while j < len(lst) and curr[1]/lst[j][1] > threshold:
bigram1 = curr[0].split(" ")
bigram2 = lst[j][0].split(" ")
if bigram1[1:] == bigram2[:-1]:
ngram = bigram1 + [bigram2[-1]]
occurence = lst[j][1] + 100
updated_lst.append((" ".join(ngram), occurence))
flag = True
check_merge = True
# elif bigram1[-1] == bigram2[0]:
# ngram = bigram1 + bigram2[1:]
# occurence = lst[j][1]
# lst[j] = (" ".join(ngram), occurence, True)
# #updated_lst.append((" ".join(ngram), occurence))
# flag = True
# check_merge = True
# elif bigram1[0] == bigram2[-1]:
# ngram = [bigram1[-1], bigram1[0], bigram2[0]]
# occurence = lst[j][1]
# updated_lst.append((" ".join(ngram), occurence))
j += 1
# print(lst)
# print(curr)
# print('----')
# if check_merge and len(curr) < 3:
# lst.remove(curr)
# if updated_lst:
# lst = updated_lst
# else: break
return sorted(lst, key=lambda x: x[1], reverse=True)
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#老实说,目前还没搞懂这是在干啥呢
def addLayer(inputs,inputSize,outputsSize,activeFunction=None):
Weights=tf.Variable(tf.random_normal([inputSize,outputsSize]))
biases=tf.Variable(tf.zeros([1,outputsSize])+0.1)
Wxplusb=tf.matmul(inputs,Weights)+biases # 让输入乘以权重+偏差
if activeFunction is not None:
return activeFunction(Wxplusb) #应用激励函数
else:
return Wxplusb
# 创建训练数据
x_data=np.linspace(-1,1,300)[:,np.newaxis]
nosies=np.random.normal(0,0.1,x_data.shape)
y_data=np.square(x_data)-0.5+nosies
#训练数据创建完毕
#定义神经网络的placeholder
xs=tf.placeholder(tf.float32,[None,1])
ys=tf.placeholder(tf.float32,[None,1])
#定义隐藏层
layer1=addLayer(xs,1,10,tf.nn.relu)
prediction=addLayer(layer1,10,1)
loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),reduction_indices=[1]))# 这个reduction_indices是干啥的
train=tf.train.GradientDescentOptimizer(0.1).minimize(loss)
#初始化变量
init=tf.global_variables_initializer()
#绘图
figure=plt.figure()
ax=figure.add_subplot(1,1,1)
ax.scatter(x_data,y_data)
plt.ion()
plt.show()
with tf.Session() as session:
session.run(init)
for _ in range(2000):
session.run(train,feed_dict={xs:x_data,ys:y_data})
if _ % 50 ==0:
print(session.run(loss,feed_dict={xs:x_data,ys:y_data}))
try:
ax.lines.remove(lines[0])
except:
pass
lines=ax.plot(x_data,session.run(prediction,feed_dict={xs:x_data,ys:y_data}),'r-',lw=5)
plt.pause(0.3)
plt.pause(5)
|
from fenics import *
# Create mesh
mesh = UnitSquareMesh(30, 30)
# Create function space
Vele = VectorElement("Lagrange", triangle, 2)
Pele = FiniteElement("Lagrange", triangle, 1)
W = FunctionSpace(mesh, MixedElement([Vele, Pele]))
# Create boundary conditions
def noslip_boundary(x):
return near(x[1], 0.0) or near(x[1], 1.0)
def inflow_boundary(x):
return near(x[0], 0.0)
def outflow_boundary(x):
return near(x[0], 1.0)
bcs = [DirichletBC(W.sub(0), (0, 0), noslip_boundary),
DirichletBC(W.sub(1), 1, inflow_boundary),
DirichletBC(W.sub(1), 0, outflow_boundary)]
# Create forms
f = Constant((0, 0))
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
a = inner(grad(u), grad(v))*dx + dot(grad(p), v)*dx + div(u)*q*dx
L = dot(f, v)*dx
# Compute solution
w = Function(W)
solve(a == L, w,bcs)
# Plot solution
u, p = w.split()
plot(u, title="u")
plot(p, title="p")
interactive()
|
import sys
inputString=""
numberOfDataToBeStored = 0
with open(sys.argv[1], 'r') as inputFile:
inputString = inputFile.read()
with open(sys.argv[2], 'w') as outputFile:
byTabNewline = inputString.split("\n")
for line in byTabNewline:
byComma = line.split(",")
characterName = byComma[5][3:]
if(characterName == "\"\\\""):
characterName = "\\"
toWrite = "; {0}\n".format(characterName)
print(toWrite, end="")
outputFile.write(toWrite)
for data in range(0, 5):
toWrite = "retlw {0}\n".format(byComma[data][1:])
print(toWrite, end="")
outputFile.write(toWrite)
numberOfDataToBeStored = numberOfDataToBeStored + 1
print()
outputFile.write("\n")
print("Number of data to be stored is", numberOfDataToBeStored) |
#!/usr/bin/env python3
import sys
import csv
from .. import utils
import numpy as np
from asmd.asmd import audioscoredataset
if len(sys.argv) < 1:
print("Error: missing algorithm, add `ewert` or `amt`")
sys.exit(2)
if sys.argv[1] == "ewert":
from .ewert.align import audio_to_score_alignment
FNAME = "results/ewert.csv"
elif sys.argv[1] == "amt":
from .align_with_amt import audio_to_score_alignment
FNAME = "results/amt.csv"
else:
print("Error: missing algorithm, provie `ewert` or `amt`")
sys.exit(3)
SR = 22050
RES = 0.001
NJOBS = 10
EPS = 1e-15
def path_processing(i, data):
# print(f" Running Alignment on {data.paths[i][2][0]}")
aligned = data.get_score(i, score_type='precise_alignment')
misaligned = data.get_score(i, score_type='non_aligned')
audio, sr = data.get_mix(i, sr=SR)
start_errors = np.abs(np.vstack(utils.evaluate2d(misaligned, aligned)))
new_ons, new_offs = audio_to_score_alignment(misaligned,
audio,
sr,
res=RES)
misaligned[:, 1] = new_ons
misaligned[:, 2] = new_offs
end_errors = np.abs(np.vstack(utils.evaluate2d(misaligned, aligned)))
# interleaving lists
err = np.empty((2 * end_errors.shape[1], ))
err[::2] = end_errors[0]
err[1::2] = end_errors[1]
# print(
# f"{np.mean(err_ons):.2E}, {np.mean(err_offs):.2E}" +
# f", {np.std(err_ons):.2E}, {np.std(err_offs):.2E}")
return err, start_errors / (end_errors + EPS)
if __name__ == "__main__":
data = audioscoredataset.Dataset().filter(datasets=['SMD'])
results = data.parallel(path_processing, n_jobs=NJOBS)
ratios = []
errors = []
for err, ratio in results:
ratios.append(ratio)
errors.append(err)
ratios = np.hstack(ratios)
print(
f"Average ratio error after/before: {np.mean(ratios):.6E}, std: {np.std(ratios):.6E}"
)
with open(FNAME, "a", newline="") as f:
writer = csv.writer(f)
writer.writerows(errors)
|
import threading
from typing import List, Dict
import requests
class User:
def __init__(self, user_json: dict):
self.raw = user_json
self.id: int = user_json['id']
self.is_bot: bool = user_json['is_bot']
if 'last_name' in user_json:
self.name = f"{user_json['first_name']} {user_json['last_name']}"
else:
self.name = user_json['first_name']
if 'username' in user_json:
self.username: str = user_json['username']
self.link = 't.me/' + self.username
else:
self.username = ''
self.link = ''
class Bot(User):
def __init__(self, config: dict):
self.config = config
self.token: str = self.config['token']
self.base_url = 'https://api.telegram.org/bot' + self.token + '/'
if 'proxy' in self.config and self.config['proxy']['enable']:
self.proxy_kw = {'proxies': {'https': self.config['proxy']['proxy_url']}}
else:
self.proxy_kw = {}
get_me_resp: dict = requests.get(self.base_url + 'getMe', **self.proxy_kw).json()
if not get_me_resp['ok']:
raise APIError('Bot initialization failed.' + get_me_resp['description'])
super().__init__(get_me_resp['result'])
self.can_join_groups: bool = get_me_resp['result']['can_join_groups']
self.can_read_all_group_messages: bool = get_me_resp['result']['can_read_all_group_messages']
self.supports_inline_queries: bool = get_me_resp['result']['supports_inline_queries']
self.msg_tasks = []
self.query_tasks = []
self.member_status_tasks = []
def api(self, action: str, data: dict):
resp = requests.post(self.base_url + action, json=data, **self.proxy_kw).json()
if not resp['ok']:
raise APIError(f'API request "{action}" failed. {resp["description"]}')
return resp['result']
def get_updates(self, offset: int = 0, timeout: int = 60) -> list:
update_data = {'offset': offset,
'timeout': timeout,
'allowed_updates': [
# Accept all updates, but only part of them are available in catbot
'message', # Available
'edited_message',
'channel_post',
'edited_channel_post',
'inline_query',
'chosen_inline_result',
'callback_query', # Available
'shipping_query',
'pre_checkout_query',
'poll',
'poll_answer',
'my_chat_member',
'chat_member' # Available
]}
updates = self.api('getUpdates', update_data)
print(updates)
return updates
def add_msg_task(self, criteria, action, **action_kw):
"""
Add tasks for the bot to process. For message updates only. Use add_query_task for callback query updates.
:param criteria:
A function that lead flow of program into "action" function. It should take a Message-like object as the
only argument and returns a bool. When it returns True, "action" will be executed. An example is to return
True if the message starts with "/start", which is the standard starting of private chats with users.
:param action:
A function to be executed when criteria returns True. Typically it's the response on users' actions.
It should take a Message-like object as the only positional argument and accept keyword arguments. Arguments
in action_kw will be passed to it.
:param action_kw:
Keyword arguments that will be passed to action when it is called.
:return:
"""
self.msg_tasks.append((criteria, action, action_kw))
def add_query_task(self, criteria, action, **action_kw):
"""
Similar to add_msg_task, which add criteria and action for callback queries, typically clicks from
in-message buttons (I would like to call them in-message instead of inline, which is used by Telegram).
"""
self.query_tasks.append((criteria, action, action_kw))
def add_member_status_task(self, criteria, action, **action_kw):
"""
Similar to add_msg_task, which add criteria and action for chat member updates.
"""
self.member_status_tasks.append((criteria, action, action_kw))
def start(self):
old_updates = self.get_updates(timeout=0)
update_offset = old_updates[-1]['update_id'] + 1 if old_updates else 0
while True:
try:
updates = self.get_updates(update_offset)
except APIError as e:
print(e.args[0])
continue
for item in updates:
update_offset = item['update_id'] + 1
if 'message' in item.keys():
msg = Message(item['message'])
for criteria, action, action_kw in self.msg_tasks:
if criteria(msg):
threading.Thread(target=action, args=(msg,), kwargs=action_kw).start()
elif 'callback_query' in item.keys():
query = CallbackQuery(item['callback_query'])
if not hasattr(query, 'msg'):
continue
for criteria, action, action_kw in self.query_tasks:
if criteria(query):
threading.Thread(target=action, args=(query,), kwargs=action_kw).start()
elif 'chat_member' in item.keys():
member_update = ChatMemberUpdate(item['chat_member'])
for criteria, action, action_kw in self.member_status_tasks:
if criteria(member_update):
threading.Thread(target=action, args=(member_update,), kwargs=action_kw).start()
else:
continue
def send_message(self, chat_id, **kw):
"""
:param chat_id: Unique identifier for the target chat or username of the target channel
:param kw: Keyword arguments defined in Telegram bot api. See https://core.telegram.org/bots/api#sendmessage<br>
General keywords:<br>
- parse_mode: Optional. Should be one of MarkdownV2 or HTML or Markdown.<br>
- disable_web_page_preview: Optional. Should be True or False. Disables link previews for links
in this message.<br>
- disable_notification: Optional. Should be True or False. Sends the message silently. Users will
receive a notification with no sound.<br>
- reply_to_message_id: Optional. If the message is a reply, ID of the original message.<br>
- allow_sending_without_reply: Optional. Pass True, if the message should be sent even if the specified
replied-to message is not found<br>
For plain text messages:<br>
- text: Text of the message to be sent, 1-4096 characters after entities parsing.<br>
- reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard,
custom reply keyboard, instructions to remove reply keyboard or to force a reply
from the user. A common content of this param is an InlineKeyboard object.<br>
:return:
"""
if 'reply_markup' in kw.keys():
kw['reply_markup'] = kw['reply_markup'].parse()
msg_kw = {'chat_id': chat_id, **kw}
return Message(self.api('sendMessage', msg_kw))
def edit_message(self, chat_id, msg_id, **kw):
if 'reply_markup' in kw.keys():
kw['reply_markup'] = kw['reply_markup'].parse()
msg_kw = {'chat_id': chat_id, 'message_id': msg_id, **kw}
try:
return Message(self.api('editMessageText', msg_kw))
except APIError as e:
if 'message is not modified' in e.args[0]:
pass
else:
raise
def forward_message(self, from_chat_id, to_chat_id, msg_id: int, disable_notification=False):
"""
:param from_chat_id: Unique identifier for the chat where the original message was sent
:param to_chat_id: Unique identifier for the target chat or username of the target channel
:param msg_id: Message identifier in the chat specified in from_chat_id
:param disable_notification: Optional. Sends the message silently. Users will receive a
notification with no sound.
:return: The forwarded message.
"""
return Message(self.api('forwardMessage', {'from_chat_id': from_chat_id,
'chat_id': to_chat_id,
'message_id': msg_id,
'disable_notification': disable_notification}))
def answer_callback_query(self, callback_query_id, **kwargs) -> bool:
"""
:param callback_query_id: callback_query_id you receive in callback_query
:param kwargs: Keyword arguments defined in Telegram bot api. You should always call this method after receiving
a valid callback_query, even if you have nothing to send back to user.
See https://core.telegram.org/bots/api#answercallbackquery
- text: Optional. Text of the notification. If not specified, nothing will be shown to the
user, 0-200 characters.
- show_alert: Optional. If true, an alert will be shown by the client instead of a notification
at the top of the chat screen. Defaults to false.
- cache_time: Optional. The maximum amount of time in seconds that the result of the callback
query may be cached client-side. Telegram apps will support caching starting
in version 3.14. Defaults to 0.
:return:
"""
return self.api('answerCallbackQuery', {'callback_query_id': callback_query_id, **kwargs})
def get_chat(self, chat_id: int):
try:
chat = Chat(self.api('getChat', {'chat_id': chat_id}))
except APIError as e:
if e.args[0] == 'Bad Request: chat not found':
raise ChatNotFoundError
else:
raise
else:
return chat
def get_chat_member(self, chat_id: int, user_id: int):
"""
Typically, use this method to build a ChatMember object.
:param chat_id: ID of the chat that the ChatMember object will belong to.
:param user_id: ID of the target user.
:return: A ChatMember object, including info about permissions granted to the user in a specific chat.
"""
try:
chat_member = ChatMember(self.api('getChatMember', {'chat_id': chat_id, 'user_id': user_id}), chat_id)
except APIError as e:
if 'Bad Request: user not found' in e.args[0]:
raise UserNotFoundError
else:
raise
else:
return chat_member
def restrict_chat_member(self, chat_id: int, user_id: int, until: int = 5, **permissions) -> bool:
"""
:param chat_id: Unique identifier for the target chat or username of the target supergroup
:param user_id: Unique identifier of the target user
:param until: Optional. Time when restrictions will be lifted for the user, unix time.
If user is restricted for more than 366 days or less than 30 seconds from the current time,
they are considered to be restricted forever.
Default: Forever
:param permissions: Chat permissions defined in Telegram bot api. Left blank to restrict all actions except
reading.
See https://core.telegram.org/bots/api#chatpermissions
- can_send_messages: Optional. True, if the user is allowed to send text messages, contacts, locations and
venues
- can_send_media_messages: Optional. True, if the user is allowed to send audios, documents, photos, videos,
video notes and voice notes, implies can_send_messages
- can_send_polls: Optional. True, if the user is allowed to send polls, implies can_send_messages
- can_send_other_messages: Optional. True, if the user is allowed to send animations, games, stickers and
use inline bots, implies can_send_media_messages
- can_add_web_page_previews: Optional. True, if the user is allowed to add web page previews to their
messages, implies can_send_media_messages
- can_change_info: Optional. True, if the user is allowed to change the chat title, photo and other
settings. Ignored in public supergroups
- can_invite_users: Optional. True, if the user is allowed to invite new users to the chat
- can_pin_messages: Optional. True, if the user is allowed to pin messages. Ignored in public supergroups
:return: Return True on success, otherwise raise exception.
"""
try:
result = self.api('restrictChatMember', {'chat_id': chat_id, 'user_id': user_id, 'until_date': until,
'permissions': permissions})
except APIError as e:
if 'Bad Request: not enough rights to restrict/unrestrict chat member' in e.args[0]:
raise InsufficientRightError
elif 'Bad Request: user not found' in e.args[0]:
raise UserNotFoundError
elif 'Bad Request: user is an administrator' in e.args[0] or \
'Bad Request: can\'t remove chat owner' in e.args[0] or \
'Bad Request: not enough rights' in e.args[0]:
raise RestrictAdminError
else:
raise
else:
return result
def silence_chat_member(self, chat_id: int, user_id: int, until: int = 5) -> bool:
"""
Remove can_send_messages permission from specified user.
:param chat_id: Unique identifier for the target chat or username of the target supergroup
:param user_id: Unique identifier of the target user
:param until: Optional. Time when restrictions will be lifted for the user, unix time.
If user is restricted for more than 366 days or less than 30 seconds from the current time,
they are considered to be restricted forever.
Default: Forever
:return: Return True on success, otherwise raise exception.
"""
try:
result = self.api('restrictChatMember', {'chat_id': chat_id, 'user_id': user_id, 'until_date': until,
'permissions': {'can_send_messages': False}})
except APIError as e:
if 'Bad Request: not enough rights to restrict/unrestrict chat member' in e.args[0]:
raise InsufficientRightError
elif 'Bad Request: user not found' in e.args[0]:
raise UserNotFoundError
elif 'Bad Request: user is an administrator' in e.args[0] or \
'Bad Request: can\'t remove chat owner' in e.args[0] or \
'Bad Request: not enough rights' in e.args[0]:
raise RestrictAdminError
else:
raise
else:
return result
def lift_restrictions(self, chat_id: int, user_id: int) -> bool:
"""
Lift all restrictions on specified user.
:param chat_id: Unique identifier for the target chat or username of the target supergroup
:param user_id: Unique identifier of the target user
:return: Return True on success, otherwise raise exception.
"""
try:
result = self.api('restrictChatMember', {'chat_id': chat_id, 'user_id': user_id,
'permissions': {'can_send_messages': True,
'can_send_media_messages': True,
'can_send_polls': True,
'can_send_other_messages': True,
'can_add_web_page_previews': True,
'can_change_info': True,
'can_invite_users': True,
'can_pin_messages': True}})
except APIError as e:
if 'Bad Request: not enough rights to restrict/unrestrict chat member' in e.args[0]:
raise InsufficientRightError
elif 'Bad Request: user not found' in e.args[0]:
raise UserNotFoundError
elif 'Bad Request: user is an administrator' in e.args[0] or \
'Bad Request: can\'t remove chat owner' in e.args[0] or \
'Bad Request: not enough rights' in e.args[0]:
raise RestrictAdminError
else:
raise
else:
return result
def kick_chat_member(self, chat_id: int, user_id: int, until: int = 0, no_ban: bool = False) -> bool:
"""
Kick chat member out. See https://core.telegram.org/bots/api#kickchatmember
:param chat_id: Unique identifier for the target chat or username of the target supergroup
:param user_id: Unique identifier of the target user
:param until: Optional, default 0 (infinite ban). Date when the user will be unbanned, unix time. If user is
banned for more than 366 days or less than 30 seconds from the current time they are considered
to be banned forever
:param no_ban: Kick out and then allow the user to join or send messages (from channel or somewhere else)
:return: Return True on success, otherwise raise exception.
"""
try:
if no_ban:
# That the way Telegram API acts
result = self.api('unbanChatMember', {'chat_id': chat_id, 'user_id': user_id})
else:
result = self.api('kickChatMember', {'chat_id': chat_id, 'user_id': user_id, 'until_date': until})
except APIError as e:
if 'Bad Request: not enough rights to restrict/unrestrict chat member' in e.args[0]:
raise InsufficientRightError
elif 'Bad Request: user not found' in e.args[0]:
raise UserNotFoundError
elif 'Bad Request: user is an administrator' in e.args[0] or \
'Bad Request: can\'t remove chat owner' in e.args[0] or \
'Bad Request: not enough rights' in e.args[0]:
raise RestrictAdminError
else:
raise
else:
return result
def unban_chat_member(self, chat_id: int, user_id: int) -> bool:
"""
Unban a banned user. See https://core.telegram.org/bots/api#unbanchatmember
:param chat_id: Unique identifier for the target chat or username of the target supergroup
:param user_id: Unique identifier of the target user
"""
try:
result = self.api('unbanChatMember', {'chat_id': chat_id, 'user_id': user_id, 'only_if_banned': True})
except APIError as e:
if 'Bad Request: not enough rights to restrict/unrestrict chat member' in e.args[0]:
raise InsufficientRightError
elif 'Bad Request: user not found' in e.args[0]:
raise UserNotFoundError
elif 'Bad Request: user is an administrator' in e.args[0] or \
'Bad Request: can\'t remove chat owner' in e.args[0] or \
'Bad Request: not enough rights' in e.args[0]:
raise RestrictAdminError
else:
raise
else:
return result
def delete_message(self, chat_id: int, msg_id: int) -> bool:
try:
result = self.api('deleteMessage', {'chat_id': chat_id, 'message_id': msg_id})
except APIError as e:
if 'Bad Request: message to delete not found' in e.args[0] or \
'Bad Request: message can\'t be deleted' in e.args[0]:
raise DeleteMessageError
else:
raise
else:
return result
class ChatMember(User):
def __init__(self, member_json: dict, chat_id: int):
"""
Typically, build a ChatMember object from Bot.get_chat_member() method, which automatically get corresponding
Chat object.
:param member_json: Raw response from "getChatMember" API
:param chat_id: ID of the chat which this ChatMember belongs to.
"""
super().__init__(member_json['user'])
self.raw = f'{{"chat_member": {member_json}, "chat_id": {chat_id}}}'
self.chat_id: int = chat_id
# Can be “creator”, “administrator”, “member”, “restricted”, “left” or “kicked”
self.status: str = member_json['status']
if self.status == 'administrator' or self.status == 'creator':
self.is_anonymous: str = member_json['is_anonymous']
if self.status == 'administrator':
self.can_be_edited: bool = member_json['can_be_edited']
self.can_delete_messages: bool = member_json['can_delete_messages']
self.can_promote_members: bool = member_json['can_promote_members']
if self.status == 'administrator' or self.status == 'restricted':
self.can_change_info: bool = member_json['can_change_info']
self.can_invite_users: bool = member_json['can_invite_users']
self.can_pin_messages: bool = member_json['can_pin_messages']
if self.status == 'restricted':
self.until_date: int = member_json['until_date']
self.is_member: bool = member_json['is_member']
self.can_send_messages: bool = member_json['can_send_messages']
self.can_send_media_messages: bool = member_json['can_send_media_messages']
self.can_send_polls: bool = member_json['can_send_polls']
self.can_send_other_messages: bool = member_json['can_send_other_messages'] # sticker, gif and inline bot
self.can_add_web_page_previews: bool = member_json['can_add_web_page_previews'] # "embed links" in client
if self.status == 'kicked':
self.until_date: int = member_json['until_date']
if 'custom_title' in member_json.keys():
self.custom_title: str = member_json['custom_title']
def __str__(self):
return self.raw
class Message:
def __init__(self, msg_json: dict):
self.raw = msg_json
self.chat = Chat(msg_json['chat'])
self.id: int = msg_json['message_id']
# Empty for message in channels
if 'from' in msg_json.keys():
self.from_ = User(msg_json['from'])
if str(self.chat.id).startswith('-100'):
self.link = f't.me/c/{str(self.chat.id).replace("-100", "")}/{self.id}'
else:
self.link = ''
# The channel itself for channel messages. The supergroup itself for messages from anonymous group
# administrators. The linked channel for messages automatically forwarded to the discussion group
if 'sender_chat' in msg_json.keys():
self.sender_chat = Chat(msg_json['sender_chat'])
self.date: int = msg_json['date']
# Signature of the post author for messages in channels, or the custom title of an anonymous group administrator
if 'author_signature' in msg_json.keys():
self.author_signature: str = msg_json['author_signature']
if 'forward_from' in msg_json.keys():
# forwarded from users who allowed a link to their account in forwarded message
self.forward_from = User(msg_json['forward_from'])
self.forward = True
elif 'forward_sender_name' in msg_json.keys():
# forwarded from users who disallowed a link to their account in forwarded message
self.forward_sender_name: str = msg_json['forward_sender_name']
self.forward = True
elif 'forward_from_message_id' in msg_json.keys():
# forwarded from channels
self.forward_from_chat = Chat(msg_json['forward_from_chat'])
self.forward_from_message_id: int = msg_json['forward_from_message_id']
if 'forward_signature' in msg_json.keys():
self.forward_signature: str = msg_json['forward_signature']
else:
self.forward_signature = ''
self.forward = True
elif 'forward_from_chat' in msg_json.keys():
# forwarded from anonymous admins
self.forward_from_chat = Chat(msg_json['forward_from_chat'])
self.forward = True
else:
self.forward = False
if self.forward:
self.forward_date: int = msg_json['forward_date']
if 'reply_to_message' in msg_json.keys():
self.reply_to_message = Message(msg_json['reply_to_message'])
self.reply = True
else:
self.reply = False
if 'edit_date' in msg_json.keys():
self.edit_date: int = msg_json['edit_date']
self.edit = True
else:
self.edit = False
if 'text' in msg_json.keys():
self.text: str = msg_json['text']
elif 'caption' in msg_json.keys():
self.text: str = msg_json['caption']
else:
self.text: str = ''
if 'new_chat_members' in msg_json.keys():
self.new_chat_members: List[User] = []
for user_json in msg_json['new_chat_members']:
self.new_chat_members.append(User(user_json))
if 'left_chat_member' in msg_json.keys():
self.left_chat_member: User = User(msg_json['left_chat_member'])
self.mentions = []
self.hashtags = []
self.cashtags = []
self.commands = []
self.links = []
self.bolds = []
self.italics = []
self.underlines = []
self.strikethroughs = []
self.codes = []
self.text_links = []
self.text_mention = []
self.html_formatted_text = self.text
if 'entities' in msg_json.keys() or 'caption_entities' in msg_json.keys():
entity_type = 'entities' if 'entities' in msg_json.keys() else 'caption_entities'
entity_to_be_formatted = []
for item in msg_json[entity_type]:
offset = item['offset']
length = item['length']
if item['type'] == 'mention':
self.mentions.append(self.text[offset:offset + length])
elif item['type'] == 'hashtag':
self.hashtags.append(self.text[offset:offset + length])
elif item['type'] == 'cashtag':
self.cashtags.append(self.text[offset:offset + length])
elif item['type'] == 'bot_command':
self.commands.append(self.text[offset:offset + length])
elif item['type'] == 'url':
self.links.append(self.text[offset:offset + length])
elif item['type'] == 'bold':
self.bolds.append(self.text[offset:offset + length])
entity_to_be_formatted.append(item)
elif item['type'] == 'italic':
self.italics.append(self.text[offset:offset + length])
entity_to_be_formatted.append(item)
elif item['type'] == 'underline':
self.underlines.append(self.text[offset:offset + length])
entity_to_be_formatted.append(item)
elif item['type'] == 'strikethrough':
self.strikethroughs.append(self.text[offset:offset + length])
entity_to_be_formatted.append(item)
elif item['type'] == 'code':
self.codes.append(self.text[offset:offset + length])
entity_to_be_formatted.append(item)
elif item['type'] == 'text_link':
self.text_links.append((self.text[offset:offset + length], item['url']))
entity_to_be_formatted.append(item)
elif item['type'] == 'text_mention':
self.text_mention.append((self.text[offset:offset + length], User(item['user'])))
entity_to_be_formatted.append(item)
entity_to_be_formatted = sorted(entity_to_be_formatted, key=lambda x: x['offset'], reverse=True)
for item in entity_to_be_formatted:
offset = item['offset']
length = item['length']
if item['type'] == 'bold':
self.html_formatted_text = self.text[:offset] + f'<b>{self.text[offset:offset + length]}</b>' + \
self.html_formatted_text[offset + length:]
elif item['type'] == 'italic':
self.html_formatted_text = self.text[:offset] + f'<i>{self.text[offset:offset + length]}</i>' + \
self.html_formatted_text[offset + length:]
elif item['type'] == 'underline':
self.html_formatted_text = self.text[:offset] + f'<u>{self.text[offset:offset + length]}</u>' + \
self.html_formatted_text[offset + length:]
elif item['type'] == 'strikethrough':
self.html_formatted_text = self.text[:offset] + f'<s>{self.text[offset:offset + length]}</s>' + \
self.html_formatted_text[offset + length:]
elif item['type'] == 'code':
self.html_formatted_text = self.text[:offset] + \
f'<code>{self.text[offset:offset + length]}</code>' + \
self.html_formatted_text[offset + length:]
elif item['type'] == 'text_link':
self.html_formatted_text = self.text[:offset] + f"<a href=\"{item['url']}\">" \
f"{self.text[offset:offset + length]}</a>" + \
self.html_formatted_text[offset + length:]
elif item['type'] == 'text_mention':
self.html_formatted_text = self.text[:offset] + f"<a href=\"tg://user?id={item['user']['id']}\">" \
f"{self.text[offset:offset + length]}</a>" + \
self.html_formatted_text[offset + length:]
if 'dice' in msg_json.keys():
self.dice = True
self.dice_emoji = msg_json['dice']['emoji']
self.dice_value = msg_json['dice']['value']
else:
self.dice = False
if 'reply_markup' in msg_json.keys():
self.reply_markup: InlineKeyboard = InlineKeyboard.from_json(msg_json['reply_markup'])
def __str__(self):
return self.raw
class InlineKeyboardButton:
def __init__(self, text: str, **kwargs):
"""
:param text: Text showed on the button.
:param kwargs: Other optional params defined in Telegram bot api.
See https://core.telegram.org/bots/api#inlinekeyboardbutton
- url: Optional. HTTP or tg:// url to be opened when button is pressed
- callback_data: Optional. Data to be sent in a callback query to the bot when button is pressed, 1-64 bytes
"""
self.text = text
if len(kwargs) == 0:
raise APIError('Inline keyboard button must have either url or callback_data.')
if 'url' in kwargs.keys():
self.url = kwargs['url']
if 'callback_data' in kwargs.keys():
self.callback_data = kwargs['callback_data']
@classmethod
def from_json(cls, button_json: dict):
return cls(**button_json)
def parse(self) -> dict:
"""
:return: self.__dict__ for follow-up usage like json serialization.
"""
return self.__dict__
class InlineKeyboard:
def __init__(self, key_list: List[List[InlineKeyboardButton]]):
"""
:param key_list: Use InlineKeyBoardButton to structure the buttons you want and pass it into this
initializer. Each sublist represent a row. Buttons in the same sublist will be
placed in the same row.
"""
self.key_list = key_list
@classmethod
def from_json(cls, markup_json: dict):
markup_list: List[List[dict]] = markup_json['inline_keyboard']
key_list: List[List[InlineKeyboardButton]] = []
for i in range(len(markup_json)):
key_list.append([])
for j in range(len(markup_json)):
key_list[i].append(InlineKeyboardButton.from_json(markup_list[i][j]))
return cls(key_list)
def parse(self) -> Dict[str, List[List[Dict]]]:
key_list: List[List[dict]] = []
for i in range(len(self.key_list)):
key_list.append([])
for j in range(len(self.key_list[i])):
key_list[i].append(self.key_list[i][j].parse())
return {'inline_keyboard': key_list}
class CallbackQuery:
def __init__(self, query_json: dict):
self.raw = query_json
self.id: str = query_json['id']
self.from_ = User(query_json['from'])
if 'message' not in query_json.keys():
self.msg = ''
else:
self.msg = Message(query_json['message'])
self.chat_instance: str = query_json['chat_instance']
if 'data' in query_json.keys():
self.data: str = query_json['data']
else:
self.data = ''
if 'inline_message_id' in query_json.keys():
self.inline_message_id: str = query_json['inline_message_id']
else:
self.inline_message_id = ''
def __str__(self):
return self.raw
class ChatMemberUpdate:
def __init__(self, update_json: dict):
self.raw = update_json
self.chat = Chat(update_json['chat'])
self.from_ = User(update_json['from'])
self.date: int = update_json['date']
self.old_chat_member = ChatMember(update_json['old_chat_member'], self.chat.id)
self.new_chat_member = ChatMember(update_json['new_chat_member'], self.chat.id)
def __str__(self):
return self.raw
class Chat:
def __init__(self, chat_json: dict):
self.raw = chat_json
self.id: int = chat_json['id']
self.type: str = chat_json['type']
if self.type == 'supergroup' or self.type == 'group' or self.type == 'channel':
self.name: str = chat_json['title']
else:
if 'last_name' in chat_json.keys():
self.name = f'{chat_json["first_name"]} {chat_json["last_name"]}'
else:
self.name = chat_json['first_name']
if 'username' in chat_json.keys():
self.username: str = chat_json['username']
self.link = 't.me/' + self.username
else:
self.username = ''
self.link = ''
# Returned by get_chat
if 'bio' in chat_json.keys():
# If the chat is private chat
self.bio: str = chat_json['bio']
if 'description' in chat_json.keys():
# If the chat is group, supergroup or channel
self.description: str = chat_json['description']
if 'pinned_message' in chat_json.keys():
self.pinned_message = Message(chat_json['pinned_message'])
if 'slow_mode_delay' in chat_json.keys():
# If the chat is supergroup
self.slow_mode_delay: int = chat_json['slow_mode_delay']
if 'linked_chat_id' in chat_json.keys():
# If the supergroup or channel has a linked channel or supergroup, respectively
self.linked_chat_id: int = chat_json['linked_chat_id']
def __str__(self):
return self.raw
class APIError(Exception):
pass
class UserNotFoundError(APIError):
pass
class ChatNotFoundError(APIError):
pass
class InsufficientRightError(APIError):
pass
class RestrictAdminError(APIError):
pass
class DeleteMessageError(APIError):
pass
|
"""A python based DSL for InspectorTiger's testing."""
import argparse
import ast
import random
import string
import textwrap
from argparse import ArgumentParser
from collections import defaultdict
from dataclasses import dataclass, field
from enum import Enum, auto
from itertools import chain
from pathlib import Path
from typing import Any, Dict, List, NewType
import it.plugins
from it.inspector import Inspector
from it.session import Session
from it.utils import Group
try:
get_source = ast.unparse
except AttributeError:
import astor
get_source = astor.to_source
AVG = 24
AVG_RESULT = 40
BASE = Path(it.plugins.__file__).parent
DEFAULT_CONFIG = {"require_function": True}
Handler = NewType("Handler", ast.AST)
class HandlerFlag(Enum):
POSITIVE = auto()
NEGATIVE = auto()
def verify_result(self, inspection, result):
runner = getattr(self, f"_{self.name.lower()}_verifier")
return runner(inspection, result)
def _positive_verifier(self, inspection, result):
return len(result) == 1 and inspection.name.upper() in result
def _negative_verifier(self, inspection, result):
return len(result) == 0
@dataclass
class Result:
"""Result of a test run"""
flag: HandlerFlag
result: bool
test_case: Handler
@dataclass
class InspectFile:
"""Represents metadata for a inspected file"""
name: str
path: Path
documentation: str = ""
configuration: Dict[str, Any] = field(default_factory=dict)
inspection_handlers: Dict[HandlerFlag, List[Handler]] = field(
default_factory=lambda: defaultdict(list)
)
class InspectFileParser(ast.NodeVisitor):
"""Queries relevant metadata from files with searching constants"""
def __init__(self, filename):
self.result = InspectFile(filename.stem, filename)
@classmethod
def discover(cls, origin):
"""Recursively searches all `inspect` files and starts a
new InspectFileParser instances whenever it encounters with
one.
"""
results = []
for inspect_file in origin.glob("**/*.inspect"):
parser = cls(inspect_file)
parser.visit(ast.parse(inspect_file.read_text()))
if not parser.result.configuration:
parser.result.configuration = DEFAULT_CONFIG.copy()
results.append(parser.result)
return results
def visit_Constant(self, node):
if isinstance(node.value, str) and self.result.documentation == "":
self.result.documentation = node.value
else:
self.generic_visit(node)
def visit_Dict(self, node):
if self.result.configuration:
self.generic_visit(node)
else:
self.result.configuration = ast.literal_eval(node)
def visit_With(self, node):
flag = HandlerFlag[node.items[0].context_expr.id.upper()]
self.result.inspection_handlers[flag].append(node.body)
def _name_faker():
test_id = "".join(random.sample(string.ascii_letters, 8))
return "__inspection_test_" + test_id
def prepare_function(body):
function = ast.FunctionDef(
name=_name_faker(),
args=ast.arguments(
posonlyargs=[],
args=[],
vararg=None,
kwonlyargs=[],
kw_defaults=[],
kwarg=None,
defaults=[],
),
body=body,
decorator_list=[],
)
ast.fix_missing_locations(function)
return function
def prepare_module(body):
module = ast.Module(body=body)
ast.fix_missing_locations(module)
return module
def group_cases(cases, config):
if config.get("require_function"):
cases = [prepare_function(case) for case in cases]
if config.get("require_module"):
cases = [prepare_module(case) for case in cases]
return cases
def print_fails_verbose(session, fails):
for result in fails:
print("FAIL ==>")
print(f" Flag: {result.flag}")
print(
f" Result: {dict(session.single_inspection(result.test_case))}"
)
print(textwrap.indent(get_source(result.test_case), " " * 4), "\n")
def runner(origin, show_errors=False):
session = Session()
session.config.update(load_core=True, plugins={})
session.start()
available_handlers = chain.from_iterable(Inspector._hooks.values())
available_handlers = {
handler.__name__: handler for handler in available_handlers
}
results = defaultdict(list)
inspections = InspectFileParser.discover(origin)
print(f"Collected {len(inspections)} inspections...")
for inspection in inspections:
if inspection.name not in available_handlers:
print(
f"Skipping unknown plugin: {inspection.name} (from {inspection.path!s})"
)
continue
for flag, test_cases in inspection.inspection_handlers.items():
test_cases = tuple(chain.from_iterable(test_cases))
inspection.inspection_handlers[
flag
] = new_test_cases = group_cases(
test_cases, inspection.configuration
)
for index, test_case in enumerate(new_test_cases):
result = session.single_inspection(test_case, strict=True)
result = dict(session.group_by(result, Group.CODE))
result = flag.verify_result(inspection, result)
results[inspection.name].append(
Result(flag, result, test_cases[index])
)
fail = False
for test, results in results.items():
fails = []
padding = AVG_RESULT - len(results)
print(test, " =>> ", end=" ", sep=abs(AVG - len(test)) * " ")
for result in results:
if not result.result:
fails.append(result)
print(str(result.result)[0], end="")
if fails:
fail = True
print(padding * " ", "[FAILED]")
if show_errors:
print_fails_verbose(session, fails)
else:
print(padding * " ", "[SUCCEED]", sep="")
exit(bool(fails))
def main(argv=None):
parser = argparse.ArgumentParser(description="inspect file runner")
parser.add_argument("origin", type=Path)
parser.add_argument("--show-errors", action="store_true", default=False)
configuration = parser.parse_args()
return runner(**vars(configuration))
if __name__ == "__main__":
import sys
main(sys.argv[:1])
|
import argparse
import json
import logging
import os.path
import time
from logging.handlers import RotatingFileHandler
from configparser import ConfigParser
from datetime import datetime
from datetime import date
from email.mime.text import MIMEText
import smtplib
from utils import log
from google.auth.transport.requests import AuthorizedSession
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
config = ConfigParser()
config.read("/insta360-auto-converter-data/configs.txt")
def parse_args(arg_input=None):
parser = argparse.ArgumentParser(description='Upload photos to Google Photos.')
parser.add_argument('--auth ', metavar='auth_file', dest='auth_file',
help='file for reading/storing user authentication tokens')
parser.add_argument('--album', metavar='album_name', dest='album_name',
help='name of photo album to create (if it doesn\'t exist). Any uploaded photos will be added to this album.')
parser.add_argument('--log', metavar='log_file', dest='log_file',
help='name of output file for log messages')
parser.add_argument('photos', metavar='photo',type=str, nargs='*',
help='filename of a photo to upload')
return parser.parse_args(arg_input)
def auth(scopes, cred_file):
flow = InstalledAppFlow.from_client_secrets_file(
cred_file,
scopes=scopes)
credentials = flow.run_local_server(host='localhost',
port=8080,
authorization_prompt_message="",
success_message='The auth flow is complete; you may close this window.',
open_browser=True)
return credentials
def get_authorized_session(auth_token_file):
scopes=['https://www.googleapis.com/auth/photoslibrary',
'https://www.googleapis.com/auth/photoslibrary.sharing']
cred = None
if auth_token_file:
try:
cred = Credentials.from_authorized_user_file(auth_token_file, scopes)
except OSError as err:
log("Error opening auth token file - {0}".format(err))
except ValueError:
log("Error loading auth tokens - Incorrect format")
if not cred:
cred = auth(scopes, auth_token_file)
session = AuthorizedSession(cred)
if auth_token_file:
try:
save_cred(cred, auth_token_file)
except OSError as err:
log("Could not save auth tokens - {0}".format(err))
return session
def save_cred(cred, auth_file):
cred_dict = {
'token': cred.token,
'refresh_token': cred.refresh_token,
'id_token': cred.id_token,
'scopes': cred.scopes,
'token_uri': cred.token_uri,
'client_id': cred.client_id,
'client_secret': cred.client_secret
}
with open(auth_file, 'w') as f:
print(json.dumps(cred_dict), file=f)
# Generator to loop through all albums
def getAlbums(session, appCreatedOnly=False):
rtn = []
params = {
'excludeNonAppCreatedData': appCreatedOnly
}
while True:
albums = session.get('https://photoslibrary.googleapis.com/v1/albums', params=params).json()
if 'albums' in albums:
for a in albums["albums"]:
rtn.append(a)
if 'nextPageToken' in albums:
params["pageToken"] = albums["nextPageToken"]
else:
break
if len(albums) == 0:
break
time.sleep(0.25)
return rtn
def create_or_retrieve_album(session, album_title):
# Find albums created by this app to see if one matches album_title
log("create_or_retrieve_album: -- \'{0}\'".format(album_title))
albums = getAlbums(session, False)
log("got {} albums".format(len(albums)))
for a in albums:
if 'title' in a and a["title"].lower() == album_title.lower():
album_id = a["id"]
log("Uploading into EXISTING photo album -- \'{0}\'".format(album_title))
return album_id
# No matches, create new album
create_album_body = json.dumps({"album":{"title": album_title}})
resp = session.post('https://photoslibrary.googleapis.com/v1/albums', create_album_body).json()
log("Create new album - Server response: {}".format(resp))
if "id" in resp:
log("Uploading into NEW photo album -- \'{0}\'".format(album_title))
return resp['id']
else:
log("Could not find or create photo album '\{0}\'. Server Response: {1}".format(album_title, resp), True)
return None
def upload_photos(session, photo_file_list, album_name):
# 1. get album
album_id = None
try:
album_id = create_or_retrieve_album(session, album_name) if album_name else None
except Exception as e:
log('get album error: {}'.format(e), True)
# interrupt upload if an upload was requested but could not be created
if album_name and not album_id:
return
TRIED = 0
TRY_LIMIT = 3
DONE_FLAG = False
for photo_file_name in photo_file_list:
while TRIED < TRY_LIMIT and not DONE_FLAG:
TRIED +=1
file_size = os.stat(photo_file_name).st_size
headers = {
"Content-Length": "0",
"X-Goog-Upload-Command": "start",
"X-Goog-Upload-Content-Type": "image/jpeg" if 'jpg' in photo_file_name else 'video/mp4',
"X-Goog-Upload-File-Name": os.path.basename(photo_file_name),
"X-Goog-Upload-Protocol": "resumable",
"X-Goog-Upload-Raw-Size": str(file_size)
}
log("Uploading photo -- \'{}\'".format(photo_file_name))
init_res = session.post('https://photoslibrary.googleapis.com/v1/uploads', headers=headers)
log("init_res code: {}".format(init_res.status_code))
try:
if (init_res.status_code == 200):
init_res_headers = init_res.headers
real_upload_url = init_res_headers.get("X-Goog-Upload-URL")
upload_granularity = int(init_res_headers.get("X-Goog-Upload-Chunk-Granularity"))
number_of_req_s = int(file_size / upload_granularity)
log('google photos uploading, number_of_req_s: {}'.format(number_of_req_s))
with open(photo_file_name, mode="rb") as f_d:
for i in range(number_of_req_s):
current_chunk = f_d.read(upload_granularity)
offset = i * upload_granularity
part_size = len(current_chunk)
headers = {
"Content-Length": str(part_size),
"X-Goog-Upload-Command": "upload",
"X-Goog-Upload-Offset": str(offset),
}
# log('google photos uploading chunk {}/{}, part_size: {}'.format(i+1, number_of_req_s, part_size))
res = session.post(real_upload_url, headers=headers, data=current_chunk)
# log('google photos uploaded chunk {}/{}, response: {}'.format(i+1, number_of_req_s, res))
log('google photos uploading last chunk for {}'.format(photo_file_name))
current_chunk = f_d.read(upload_granularity)
headers = {
"Content-Length": str(len(current_chunk)),
"X-Goog-Upload-Command": "upload, finalize",
"X-Goog-Upload-Offset": str(number_of_req_s * upload_granularity),
}
upload_token = session.post(real_upload_url, headers=headers, data=current_chunk)
log('google photos uploaded last chunk for {}, response: {}'.format(photo_file_name, upload_token))
create_body = json.dumps({"albumId": album_id, "newMediaItems": [
{"description": "", "simpleMediaItem": {"uploadToken": upload_token.content.decode()}}]}, indent=4)
resp = session.post('https://photoslibrary.googleapis.com/v1/mediaItems:batchCreate',
create_body).json()
log('google photos creating newMediaItems, response: {}'.format(resp))
if "newMediaItemResults" in resp:
status = resp["newMediaItemResults"][0]["status"]
if status.get("code") and (status.get("code") > 0):
log("Could not add \'{0}\' to library -- {1}".format(os.path.basename(photo_file_name),
status["message"]), True)
else:
DONE_FLAG = True
log(
"Added \'{}\' to library and album \'{}\' ".format(os.path.basename(photo_file_name),
album_name))
else:
log("Could not add \'{0}\' to library. Server Response -- {1}".format(
os.path.basename(photo_file_name), resp), True)
else:
log("Could not upload \'{0}\'.".format(os.path.basename(photo_file_name)), True)
except Exception as e:
log('google photos uploading for file: {}, error: {}'.format(photo_file_name, e), True)
def upload_to_album(file_path, album_name):
session = get_authorized_session('/insta360-auto-converter-data/gphotos_auth.json')
photos_list = [file_path]
upload_photos(session, photos_list, album_name)
session.close()
|
import random
import json
stevilke = [1, 2, 3, 4, 5, 6, 7, 8, 9]
zacetek = 'Z'
class Plosca:
def __init__(self):
tabela = []
for _ in range(9):
vrstica = []
for _ in range(9):
vrstica.append(None)
tabela.append(vrstica)
self.tabela = tabela
self.preveri_ce_je_rekurzija_predolga = []
slovar = {}
for i in range(9):
for j in range(9):
indeks = (i, j)
slovar[indeks] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
self.seznam_uporabnih_stevil = slovar
self.indeks = (0,0)
def preglej_vrstico(self, stevilo, vrstica):
if stevilo in self.tabela[vrstica]:
return False
else:
return True
def preglej_stolpec(self, stevilo, stolpec):
stevila_v_stolpcu = []
for i in range(9):
stevila_v_stolpcu.append(self.tabela[i][stolpec])
if stevilo in stevila_v_stolpcu:
return False
else:
return True
def v_katerem_kvadratu(self, indeks):
'''Podamo indeks vrstice ali stolpca stevila, ki nas zanima.
Funkcija vrne seznam vrst ali stolpcev, ki nastopijo v kvadratu,
v katerem se to stevilo nahaja.'''
vrsta_ali_stolpec_v_kvadratu = []
indeks_vs = indeks // 3
for i in range(9):
if i // 3 == indeks_vs:
vrsta_ali_stolpec_v_kvadratu.append(i)
else:
pass
return vrsta_ali_stolpec_v_kvadratu
def preglej_kvadrat(self, stevilo, vrstica, stolpec):
stevila_v_kvadratu = []
vrstice_v_kvadratu = self.v_katerem_kvadratu(vrstica)
stolpci_v_kvadratu = self.v_katerem_kvadratu(stolpec)
for i in vrstice_v_kvadratu:
for j in stolpci_v_kvadratu:
stevila_v_kvadratu.append(self.tabela[i][j])
if stevilo in stevila_v_kvadratu:
return False
else:
return True
def naslednji_indeks(self):
sez = list(self.indeks)
if sez[1] == 8:
sez[1] = 0
sez[0] += 1
return tuple(sez)
else:
sez[1] += 1
return tuple(sez)
def prejsnji_indeks(self):
sez = list(self.indeks)
if sez[1] == 0:
sez[1] = 8
sez[0] -= 1
return tuple(sez)
else:
sez[1] -= 1
return tuple(sez)
def sudoku(self, indeks):
'''Program se ustavi, ko je tabela polna. Za vsak indeks preveri, če lahko da na dano mesto v tabeli neko število.
Če ja, število postavi tja in gre na naslednji indeks. Če ne, preveri drugo število, sproti že porabljene možnosti
briše iz seznama možnih števil. Če je ta seznam prazen, pa na danem mestu ni nobenega števila, se vrnem eno mesto
nazaj, pobrišem tisto število in tja postavim drugo število.
Problem: rekurzija je včasih predolga, zato sem jo omejilana 950 klicev. Če je rekurzija predolga, funkcija vrne
'Predolga rekurzija'.'''
self.indeks = indeks
if len(self.preveri_ce_je_rekurzija_predolga) == 960:
return 'Predolga rekurzija'
elif self.tabela[8][8] != None:
return self.tabela
elif self.seznam_uporabnih_stevil[self.indeks] == []:
self.seznam_uporabnih_stevil[self.indeks] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
self.indeks = self.prejsnji_indeks()
sez = list(self.indeks)
vrsta = sez[0]
stolpec = sez[1]
stevilo_na_prejsnjem_indeksu = self.tabela[vrsta][stolpec]
self.seznam_uporabnih_stevil[self.indeks].remove(stevilo_na_prejsnjem_indeksu)
self.tabela[vrsta][stolpec] = None
self.preveri_ce_je_rekurzija_predolga.append(0)
return self.sudoku(self.indeks)
else:
sez = list(self.indeks)
vrsta = sez[0]
stolpec = sez[1]
stevilo = random.choice(self.seznam_uporabnih_stevil[self.indeks])
if self.preglej_vrstico(stevilo, vrsta) == True and self.preglej_stolpec(stevilo, stolpec) == True and self.preglej_kvadrat(stevilo, vrsta, stolpec) == True:
self.tabela[vrsta][stolpec] = stevilo
indeks = self.naslednji_indeks()
self.preveri_ce_je_rekurzija_predolga.append(0)
return self.sudoku(indeks)
else:
self.seznam_uporabnih_stevil[self.indeks].remove(stevilo)
self.preveri_ce_je_rekurzija_predolga.append(0)
return self.sudoku(self.indeks)
class PripravljenaMreza:
def __init__(self, tezavnost):
self.tezavnost = tezavnost
self.polna_plosca = self.pripravi_polno_plosco()
self.resitve = self.izbrisana_mesta()
self.pripravljena_plosca = self.pripravi_sudoku()
def pripravi_polno_plosco(self):
plosca = Plosca()
polna_plosca = plosca.sudoku(plosca.indeks)
if polna_plosca == 'Predolga rekurzija':
return self.pripravi_polno_plosco()
else:
return polna_plosca
def nakljucna_mesta(self):
izbiram_med = [0, 1, 2, 3, 4, 5, 6, 7, 8]
stevilo1 = random.choice(izbiram_med)
stevilo2 = random.choice(izbiram_med)
mesto = [stevilo1, stevilo2]
return mesto
def izbrisana_mesta(self):
seznam = []
stevilo = self.tezavnost
while len(seznam) < stevilo:
mesto = self.nakljucna_mesta()
if mesto not in seznam:
seznam.append(mesto)
else:
pass
return seznam
def pripravi_sudoku(self):
prazni_prostori = self.resitve
mreza = self.kopija_mreze(self.polna_plosca)
for prostor in prazni_prostori:
vrsta = prostor[0]
stolpec = prostor[1]
mreza[vrsta][stolpec] = '_'
return mreza
def kopija_mreze(self, mreza):
nova_mreza = []
for vrsta in mreza:
nova_mreza.append([stevilka for stevilka in vrsta])
return nova_mreza
class Igra:
def __init__(self, tezavnost, polna = None, sudoku = None, resitve = None):
self.tezavnost = tezavnost
if polna == None and sudoku == None and resitve == None:
self.mreza = PripravljenaMreza(self.tezavnost)
self.polna = self.mreza.polna_plosca
self.sudoku = self.mreza.pripravljena_plosca
self.resitve = self.mreza.resitve
else:
self.polna = polna
self.sudoku = sudoku
self.resitve = resitve
def ugibaj(self, stevilka, vrsta, stolpec):
mozne_resitve = [1, 2, 3, 4, 5, 6, 7, 8, 9]
if stevilka not in mozne_resitve:
return 'Napaka 1.'
elif [vrsta, stolpec] not in self.resitve:
return 'Napaka 2.'
elif self.polna[vrsta][stolpec] == stevilka:
self.sudoku[vrsta][stolpec] = stevilka
if self.zmaga():
return 'Zmaga.'
else:
return 'Pravilno.'
else:
self.sudoku[vrsta][stolpec] = stevilka
return 'Napacno.'
def zmaga(self):
if self.sudoku == self.polna:
return True
else:
return False
class Sudoku:
def __init__(self, datoteka_s_stanjem):
'''V self.igre sta pod id-ji shranjena igra in stanje. '''
self.igre = {}
self.datoteka_s_stanjem = datoteka_s_stanjem
def nalozi_igre_iz_datoteke(self):
with open(self.datoteka_s_stanjem, 'r', encoding = 'utf-8') as f:
igre = json.load(f)
self.igre = {int(id_igre) : (Igra(igre[id_igre]['tezavnost'], igre[id_igre]['polna_mreza'], igre[id_igre]['resevana_mreza'], igre[id_igre]['resitve']), igre[id_igre]['stanje']) for id_igre in igre}
return
def zapisi_igro_v_datoteko(self):
'''Iz self.igre zapiše vse elemente v datoteko.'''
with open(self.datoteka_s_stanjem, 'w', encoding = 'utf-8') as f:
igre = {}
for id_igre, (igra, stanje) in self.igre.items():
igre[id_igre] = {'tezavnost': igra.tezavnost, 'polna_mreza': igra.polna, 'resevana_mreza': igra.sudoku, 'resitve': igra.resitve, 'stanje': stanje}
json.dump(igre, f)
return
def prost_id_igre(self):
if len(self.igre) == 0:
return 0
else:
return max(self.igre.keys()) + 1
def nova_igra(self, tezavnost):
self.nalozi_igre_iz_datoteke()
id_igre = self.prost_id_igre()
igra = Igra(tezavnost)
self.igre[id_igre] = (igra, zacetek)
self.zapisi_igro_v_datoteko()
return id_igre
def ugibaj(self, id_igre, stevilka, vrsta, stolpec):
self.nalozi_igre_iz_datoteke()
igra = self.igre[id_igre][0]
novo_stanje = igra.ugibaj(stevilka, vrsta, stolpec)
self.igre[id_igre] = (igra, novo_stanje)
self.zapisi_igro_v_datoteko()
return
#def nova_igra(tezavnost):
# return Igra(tezavnost)
|
import sentencepiece as spm
import torch
class SMPEncoder:
def __init__(self, model_file):
self.model_file = model_file
self.sp = spm.SentencePieceProcessor(model_file=self.model_file)
def decode(self, tokens):
return self.sp.decode(tokens)
def encode(self, example):
code_tokens = self.sp.encode(example, out_type=str)
return " ".join(code_tokens)
# encoder = SMPEncoder()
# encoder.encode("def main(): pass")
def to_cpu(obj):
if isinstance(obj, (dict,)):
for key, value in obj.items():
obj[key] = to_cpu(value)
return obj
elif isinstance(obj, torch.Tensor):
return obj.to("cpu")
elif isinstance(obj, (list, tuple)):
return [to_cpu(value) for value in obj]
else:
return obj
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import datetime
import logging
from collections import defaultdict
from functools import lru_cache
from typing import (
Any, Callable, FrozenSet, Iterable, List, Mapping, MutableMapping,
NamedTuple, NewType, Optional, Sequence, Tuple, Union, cast
)
from amundsen_common.models.table import Application, Column, Table
from amundsen_common.models.user import User
from gremlin_python.process.graph_traversal import GraphTraversalSource, __
from gremlin_python.process.traversal import Column as MapColumn
from gremlin_python.process.traversal import T
from amundsen_gremlin.gremlin_model import (
EdgeType, EdgeTypes, GremlinCardinality, MagicProperties, Property,
VertexType, VertexTypes, WellKnownProperties
)
from amundsen_gremlin.gremlin_shared import ( # noqa: F401
append_traversal, make_cluster_uri, make_column_statistic_uri,
make_column_uri, make_database_uri, make_description_uri, make_schema_uri,
make_table_uri
)
from amundsen_gremlin.utils.streams import chunk
LOGGER = logging.getLogger(__name__)
EXISTING_KEY = FrozenSet[Tuple[str, str]]
EXISTING = NewType('EXISTING',
Mapping[Union[VertexType, EdgeType], MutableMapping[EXISTING_KEY, Mapping[str, Any]]])
ENTITIES = NewType('ENTITIES', Mapping[Union[VertexType, EdgeType], MutableMapping[str, Mapping[str, Any]]])
def new_entities() -> ENTITIES:
return cast(ENTITIES, defaultdict(dict))
def new_existing() -> EXISTING:
return cast(EXISTING, defaultdict(dict))
def _get_existing_key_from_entity(_entity: Mapping[str, Any]) -> EXISTING_KEY:
"""
Used for testing.
"""
_entity = dict(_entity)
label: str = _entity.pop(MagicProperties.LABEL.value.name)
assert isinstance(label, str) # appease the types
return _get_existing_key(_type=label, **_entity)
def _get_existing_key(_type: Union[VertexType, EdgeType, VertexTypes, EdgeTypes, str], **_entity: Any) -> EXISTING_KEY:
"""
Maybe this should be a part of EdgeType and VertexType. But, this function certainly shouldn't be used
away from EXISTING (or testing)
"""
if isinstance(_type, str):
vertex_type = VertexTypes.by_label().get(_type)
edge_type = EdgeTypes.by_label().get(_type)
assert bool(vertex_type) != bool(edge_type), \
f'expected exactly one of VertexTypes or EdgeTypes to match {_type}'
vertex_or_edge_type = vertex_type or edge_type
assert vertex_or_edge_type is not None # appease mypy
_type = vertex_or_edge_type
assert isinstance(_type, (VertexTypes, EdgeTypes))
if isinstance(_type, (VertexTypes, EdgeTypes)):
_type = _type.value
assert isinstance(_type, (VertexType, EdgeType))
key_properties = _get_key_properties(_type)
# this eliding (the created and label fields) feels icky, but makes sense.
if isinstance(_type, EdgeType):
key_properties = key_properties.difference({WellKnownProperties.Created.value})
if isinstance(_type, VertexType):
key_properties = key_properties.difference({WellKnownProperties.TestShard.value})
key_properties = key_properties.difference({MagicProperties.LABEL.value})
assert WellKnownProperties.Created.value not in key_properties
assert MagicProperties.LABEL.value not in key_properties
return frozenset([(p.name, p.format(_entity.get(p.name))) for p in key_properties])
@lru_cache(maxsize=len(list(VertexTypes)) + len(list(EdgeTypes)) + 100)
def _get_key_properties(_type: Union[VertexType, EdgeType]) -> FrozenSet[Property]:
assert isinstance(_type, (EdgeType, VertexType))
return frozenset([_type.properties_as_map()[n] for n in _discover_parameters(_type.id_format)])
def _discover_parameters(format_string: str) -> FrozenSet[str]:
"""
use this to discover what the parameters to a format string are
"""
parameters: FrozenSet[str] = frozenset()
while True:
try:
format_string.format(**dict((k, '') for k in parameters))
return parameters
except KeyError as e:
updated = parameters.union(set(e.args))
assert updated != parameters
parameters = updated
def date_string_to_date(a_date: str) -> datetime.date:
return datetime.datetime.strptime(a_date, '%Y-%m-%d').date()
class TableUris(NamedTuple):
database: str
cluster: str
schema: str
table: str
@staticmethod
def get(*, database: str, cluster: str, schema: str, table: str) -> "TableUris":
database_uri = make_database_uri(database_name=database)
cluster_uri = make_cluster_uri(database_uri=database_uri, cluster_name=cluster)
schema_uri = make_schema_uri(cluster_uri=cluster_uri, schema_name=schema)
table_uri = make_table_uri(schema_uri=schema_uri, table_name=table)
return TableUris(database=database_uri, cluster=cluster_uri, schema=schema_uri, table=table_uri)
HISTORICAL_APP_PREFIX = 'app-'
ENVIRONMENT_APP_SUFFIXES = frozenset(['-development', '-devel', '-staging', '-stage', '-production', '-prod'])
def possible_application_names_application_key(app_key: str) -> Iterable[str]:
# get both the app- and not
app_keys = [app_key]
if app_key.startswith(HISTORICAL_APP_PREFIX):
app_keys.append(app_key[len(HISTORICAL_APP_PREFIX):])
else:
app_keys.append(f'{HISTORICAL_APP_PREFIX}{app_key}')
for suffix in ENVIRONMENT_APP_SUFFIXES:
if app_key.endswith(suffix):
without = [_[:-len(suffix)] for _ in app_keys]
app_keys.extend(without)
break
return tuple(app_keys)
def possible_existing_keys_for_application_key(*app_keys: str) -> FrozenSet[EXISTING_KEY]:
return frozenset([_get_existing_key(VertexTypes.Application, key=key)
for app_key in app_keys for key in possible_application_names_application_key(app_key)])
def possible_vertex_ids_for_application_key(*app_keys: str) -> FrozenSet[str]:
return frozenset([
VertexTypes.Application.value.id(**dict(key)) for key in possible_existing_keys_for_application_key(*app_keys)])
def ensure_edge_type(edge_type: Union[str, EdgeTypes, EdgeType]) -> EdgeType:
if isinstance(edge_type, str):
edge_type = EdgeTypes.by_label()[edge_type].value
if isinstance(edge_type, EdgeTypes):
edge_type = edge_type.value
assert isinstance(edge_type, EdgeType)
return edge_type
def ensure_vertex_type(vertex_type: Union[str, VertexTypes, VertexType]) -> VertexType:
if isinstance(vertex_type, str):
vertex_type = VertexTypes.by_label()[vertex_type].value
if isinstance(vertex_type, VertexTypes):
vertex_type = vertex_type.value
assert isinstance(vertex_type, VertexType)
return vertex_type
class _FetchExisting:
@classmethod
def _fake_into_existing_edges_for_testing(cls, _existing: EXISTING, _type: Union[EdgeType, EdgeTypes], _from: str,
_to: str, **entity: Any) -> Mapping[str, Any]:
_type = ensure_edge_type(_type)
_entity = _type.create(**entity, **{
MagicProperties.LABEL.value.name: _type.label,
MagicProperties.FROM.value.name: _from,
MagicProperties.TO.value.name: _to,
})
_key = _get_existing_key(_type=_type, **_entity)
assert _key not in _existing[_type]
_existing[_type][_key] = _entity
return _entity
@classmethod
def _fake_into_existing_vertexes_for_testing(cls, _existing: EXISTING, _type: Union[VertexType, VertexTypes],
**entity: Any) -> Mapping[str, Any]:
_type = ensure_vertex_type(_type)
_entity = _type.create(** entity, **{
MagicProperties.LABEL.value.name: _type.label,
})
_key = _get_existing_key(_type=_type, **_entity)
assert _key not in _existing[_type]
_existing[_type][_key] = _entity
return _entity
@classmethod # noqa: C901
def _honor_cardinality_once(cls, _property: Property, value: Any) -> Any:
# use the types to figure out if we should take the element instead
if _property.cardinality == GremlinCardinality.single or _property.cardinality is None:
# is this the most general type?
if isinstance(value, Sequence):
assert len(value) <= 1, f'single cardinality property has more than one value! {value}'
value = value[0] if value else None
if value is not None:
_property.type.value.is_allowed(value)
return value
elif _property.cardinality == GremlinCardinality.list:
# is this the most general type?
if value is None:
value = ()
elif isinstance(value, Iterable) and not isinstance(value, tuple):
value = tuple(value)
for e in value:
_property.type.value.is_allowed(e)
return value
elif _property.cardinality == GremlinCardinality.set:
# is this the most general type?
if value is None:
value = frozenset()
elif isinstance(value, Iterable) and not isinstance(value, FrozenSet):
value = frozenset(value)
for e in value:
_property.type.value.is_allowed(e)
return value
raise AssertionError('never')
@classmethod
def _honor_cardinality(cls, _type: Union[VertexType, EdgeType], **entity: Any) -> Mapping[str, Any]:
_properties = _type.properties_as_map()
result = dict()
for k, v in entity.items():
if not _properties.get(k):
LOGGER.error(f'Trying to honor cardinality for property {k} which isnt allowed for {_type.label}')
continue
result[k] = cls._honor_cardinality_once(_properties[k], v)
return result
@classmethod
def _into_existing(cls, value_maps: Sequence[Union[Mapping[Any, Any], Sequence[Any]]], existing: EXISTING) -> None:
"""
value_map for an edge should be the result of .union(__.outV().id(), __.valueMap(True), __.inV().id()).fold()
value_map for a vertex should be the result of valueMap(True)
"""
assert all(isinstance(e, (Mapping, Sequence)) for e in value_maps)
edge_value_maps = [e for e in value_maps if isinstance(e, Sequence)]
vertex_value_maps = [e for e in value_maps if isinstance(e, Mapping)]
assert len(value_maps) == len(edge_value_maps) + len(vertex_value_maps)
for _from, entity, _to in edge_value_maps:
entity = dict(entity)
_type = EdgeTypes.by_label()[entity.pop(T.label)].value
_id = entity.pop(T.id)
# clear out the other special values. eventually we'll be able to ask for just the id and label, but that's
# not supported in Neptune (you can only do valueMap(True))
for v in iter(T):
entity.pop(v, None)
_entity = _type.create(**entity, **{
MagicProperties.LABEL.value.name: _type.label,
MagicProperties.ID.value.name: _id,
MagicProperties.FROM.value.name: _from,
MagicProperties.TO.value.name: _to,
})
_key = _get_existing_key(_type=_type, **_entity)
# should we expect only one? things like the CLUSTER, and SCHEMA will duplicate
if _key in existing[_type]:
if existing[_type][_key] != _entity:
LOGGER.info(f'we already have a type: {_type.label}, id={_id} that is different: '
f'{existing[_type][_key]} != {_entity}')
else:
# should the magic properties go in here too? It might be nicer to not, but is convenient
existing[_type][_key] = _entity
for entity in vertex_value_maps:
entity = dict(entity)
_type = VertexTypes.by_label()[entity.pop(T.label)].value
_id = entity.pop(T.id)
# clear out the other special values. eventually we'll be able to ask for just the id and label, but that's
# not supported in Neptune (you can only do valueMap(True))
for v in iter(T):
entity.pop(v, None)
_entity = _type.create(**cls._honor_cardinality(_type, **entity), **{
MagicProperties.LABEL.value.name: _type.label,
MagicProperties.ID.value.name: _id,
})
_key = _get_existing_key(_type=_type, **_entity)
# should we expect only one? things like the CLUSTER, and SCHEMA will duplicate
if _key in existing[_type]:
if existing[_type][_key] != _entity:
LOGGER.error(f'we already have a type: {_type.label}, id={_id} that is different: '
f'{existing[_type][_key]} != {_entity}')
else:
# should the magic properties go in here too? It might be nicer to not, but is convenient
existing[_type][_key] = _entity
@classmethod
def table_entities(cls, *, _g: GraphTraversalSource, table_data: List[Table], existing: EXISTING) -> None:
all_tables_ids = list(set([
VertexTypes.Table.value.id(key=TableUris.get(
database=t.database, cluster=t.cluster, schema=t.schema, table=t.name).table)
for t in table_data]))
all_owner_ids = list(set([VertexTypes.User.value.id(key=key)
for key in [t.table_writer.id for t in table_data if t.table_writer is not None]]))
all_application_ids = list(set(list(possible_vertex_ids_for_application_key(
*[t.table_writer.id for t in table_data if t.table_writer is not None]))))
# chunk these since 100,000s seems to choke
for tables_ids in chunk(all_tables_ids, 1000):
LOGGER.info(f'fetching for tables: {tables_ids}')
# fetch database -> cluster -> schema -> table links
g = _g.V(tuple(tables_ids)).as_('tables')
g = g.coalesce(__.inE(EdgeTypes.Table.value.label).dedup().fold()).as_(EdgeTypes.Table.name)
g = g.coalesce(__.unfold().outV().hasLabel(VertexTypes.Schema.value.label).
inE(EdgeTypes.Schema.value.label).dedup().
fold()).as_(EdgeTypes.Schema.name)
g = g.coalesce(__.unfold().outV().hasLabel(VertexTypes.Cluster.value.label).
inE(EdgeTypes.Cluster.value.label).dedup().
fold()).as_(EdgeTypes.Cluster.name)
# fetch table <- links
for t in (EdgeTypes.BelongToTable, EdgeTypes.Generates, EdgeTypes.Tag):
g = g.coalesce(
__.select('tables').inE(t.value.label).fold()).as_(t.name)
# fetch table -> column et al links
for t in (EdgeTypes.Column, EdgeTypes.Description, EdgeTypes.LastUpdatedAt,
EdgeTypes.Source, EdgeTypes.Stat):
g = g.coalesce(
__.select('tables').outE(t.value.label).fold()).as_(t.name)
# TODO: add owners, watermarks, last timestamp existing, source
aliases = set([t.name for t in (
EdgeTypes.Table, EdgeTypes.Schema, EdgeTypes.Cluster, EdgeTypes.BelongToTable, EdgeTypes.Generates,
EdgeTypes.Tag, EdgeTypes.Column, EdgeTypes.Description, EdgeTypes.LastUpdatedAt,
EdgeTypes.Source, EdgeTypes.Stat)])
g = g.select(*aliases).unfold().select(MapColumn.values).unfold()
g = g.local(__.union(__.outV().id(), __.valueMap(True), __.inV().id()).fold())
cls._into_existing(g.toList(), existing)
cls._column_entities(_g=_g, tables_ids=tables_ids, existing=existing)
# fetch Application, User
for ids in chunk(list(set(all_application_ids + all_owner_ids)), 5000):
LOGGER.info(f'fetching for application/owners: {ids}')
g = _g.V(ids).valueMap(True)
cls._into_existing(g.toList(), existing)
@classmethod
def _column_entities(cls, *, _g: GraphTraversalSource, tables_ids: Iterable[str], existing: EXISTING) -> None:
# fetch database -> cluster -> schema -> table links
g = _g.V(tuple(tables_ids))
g = g.outE(EdgeTypes.Column.value.label)
g = g.inV().hasLabel(VertexTypes.Column.value.label).as_('columns')
# fetch column -> links (no Stat)
for t in [EdgeTypes.Description]:
g = g.coalesce(__.select('columns').outE(t.value.label).fold()).as_(t.name)
g = g.select(EdgeTypes.Description.name).unfold()
g = g.local(__.union(__.outV().id(), __.valueMap(True), __.inV().id()).fold())
cls._into_existing(g.toList(), existing)
@classmethod
def expire_connections_for_other(cls, *, _g: GraphTraversalSource, vertex_type: VertexType, keys: FrozenSet[str],
existing: EXISTING) -> None:
# V().has(label, 'key', P.without(keys)) is more intuitive but doesn't scale, so instead just find all those
g = _g.V().hasLabel(vertex_type.label).where(__.bothE())
g = g.values(WellKnownProperties.Key.value.name)
all_to_expire_keys = set(g.toList()).difference(keys)
# TODO: when any vertex ids that need something besides key
all_to_expire = set(vertex_type.id(key=key) for key in all_to_expire_keys)
for to_expire in chunk(all_to_expire, 1000):
g = _g.V(tuple(to_expire)).bothE()
g = g.local(__.union(__.outV().id(), __.valueMap(True), __.inV().id()).fold())
cls._into_existing(g.toList(), existing)
class _GetGraph:
@classmethod
def expire_previously_existing(cls, *, edge_types: Sequence[Union[EdgeTypes, EdgeType]], entities: ENTITIES,
existing: EXISTING) -> None:
_edge_types = [e.value if isinstance(e, EdgeTypes) else e for e in edge_types]
assert all(isinstance(e, EdgeType) for e in _edge_types), \
f'expected all EdgeTypes or EdgeType: {edge_types}'
for edge_type in _edge_types:
for entity in existing[edge_type].values():
entity_id = entity[MagicProperties.ID.value.name]
if entity_id in entities[edge_type]:
continue
del entities[edge_type][entity_id]
@classmethod
def _create(cls, _type: Union[VertexTypes, VertexType, EdgeTypes, EdgeType], _entities: ENTITIES,
_existing: EXISTING, **_kwargs: Any) -> Mapping[str, Any]:
if isinstance(_type, (VertexTypes, EdgeTypes)):
_type = _type.value
assert isinstance(_type, (VertexType, EdgeType))
# Let's prefer the new properties unless it's part of the the id properties (e.g. Created)
_existing_key = _get_existing_key(_type, **_kwargs)
if _existing_key in _existing[_type]:
names = frozenset(p.name for p in _get_key_properties(_type))
_kwargs.update((k, v) for k, v in _existing[_type][_existing_key].items() if k in names)
# need to do this after that update, otherwise we'll miss out on crucial properties when generating ~id
_entity = _type.create(**_kwargs)
else:
_entity = _type.create(**_kwargs)
# also put this in _existing. Say, we're creating a Column or Table and a subsequence Description expects
# to find it. (TODO: This isn't perfect, it will miss tables_by_app, and neighbors_by_capability)
_existing[_type][_existing_key] = _entity
_id = _entity.get(MagicProperties.ID.value.name, None)
if _id in _entities[_type]:
# it'd be nice to assert _id not in _entities[_type], but we generate duplicates (e.g. Database, Cluster,
# Schema, and their links) so let's at least ensure we're not going to be surprised with a different result
# TODO: reenable this after we figure out why these conflict
# assert _entities[_type][_id] == _entity, \
if _entities[_type][_id] != _entity:
LOGGER.info(f'we already have a type: {_type.label}, id={_id} that is different: '
f'{_entities[_type][_id]} != {_entity}')
else:
_entities[_type][_id] = _entity
return _entities[_type][_id]
@classmethod
def table_metric(cls, table: Table) -> int:
"""
:returns a number like the number of vertexes that would be added due to this table
"""
return sum((2, 1 if table.description is not None else 0,
len(table.programmatic_descriptions or ()), len(table.programmatic_descriptions or ()),
len(table.tags or ()), sum(map(cls._column_metric, table.columns))))
@classmethod
def table_entities(cls, *, table_data: List[Table], entities: ENTITIES, existing: EXISTING, # noqa: C901
created_at: datetime.datetime) -> None:
"""
existing: must cover exactly the set of data. (previously existing edges will be expired herein, and possibly
otherwise duplicate edges will be created)
"""
for table in table_data:
uris = TableUris.get(database=table.database, cluster=table.cluster, schema=table.schema, table=table.name)
database = cls._create(
VertexTypes.Database, entities, existing, name=table.database, key=uris.database)
cluster = cls._create(VertexTypes.Cluster, entities, existing, name=table.cluster, key=uris.cluster)
cls._create(EdgeTypes.Cluster, entities, existing, created=created_at, **{
MagicProperties.FROM.value.name: database[MagicProperties.ID.value.name],
MagicProperties.TO.value.name: cluster[MagicProperties.ID.value.name]})
schema = cls._create(VertexTypes.Schema, entities, existing, name=table.schema, key=uris.schema)
cls._create(EdgeTypes.Schema, entities, existing, created=created_at, **{
MagicProperties.FROM.value.name: cluster[MagicProperties.ID.value.name],
MagicProperties.TO.value.name: schema[MagicProperties.ID.value.name]})
table_vertex = cls._create(VertexTypes.Table, entities, existing, name=table.name, key=uris.table,
is_view=table.is_view)
cls._create(EdgeTypes.Table, entities, existing, created=created_at, **{
MagicProperties.FROM.value.name: schema[MagicProperties.ID.value.name],
MagicProperties.TO.value.name: table_vertex[MagicProperties.ID.value.name]})
if table.table_writer:
cls._application_entities(app_key=table.table_writer.id, table=table_vertex, entities=entities,
existing=existing, created_at=created_at)
if table.description is not None:
cls._description_entities(
subject_uri=table_vertex['key'], to_vertex_id=table_vertex[MagicProperties.ID.value.name],
source='user', entities=entities, existing=existing, created_at=created_at,
description=table.description)
for description in table.programmatic_descriptions:
cls._description_entities(
subject_uri=table_vertex['key'], to_vertex_id=table_vertex[MagicProperties.ID.value.name],
source=description.source, entities=entities, existing=existing, created_at=created_at,
description=description.text)
# TODO: need to call expire source != 'user' description links after
# create tags
for tag in table.tags:
vertex = cls._create(VertexTypes.Tag, entities, existing, key=tag.tag_name, **vars(tag))
cls._create(EdgeTypes.Tag, entities, existing, created=created_at, **{
MagicProperties.FROM.value.name: vertex[MagicProperties.ID.value.name],
MagicProperties.TO.value.name: table_vertex[MagicProperties.ID.value.name]})
# since users can tag these, we shouldn't expire any of them (unlike Description where source
# distinguishes)
# update timestamp
# Amundsen global timestamp
cls._create(VertexTypes.Updatedtimestamp, entities, existing, key='amundsen_updated_timestamp',
latest_timestamp=created_at)
# Table-specific timestamp
vertex = cls._create(VertexTypes.Updatedtimestamp, entities, existing, key=table_vertex['key'],
latest_timestamp=created_at)
cls._create(EdgeTypes.LastUpdatedAt, entities, existing, created=created_at, **{
MagicProperties.FROM.value.name: table_vertex[MagicProperties.ID.value.name],
MagicProperties.TO.value.name: vertex[MagicProperties.ID.value.name]})
cls._column_entities(table_vertex=table_vertex, column_data=table.columns, entities=entities,
existing=existing, created_at=created_at)
@classmethod
def _application_entities(cls, *, app_key: str, table: Mapping[str, Mapping[str, Any]], entities: ENTITIES,
existing: EXISTING, created_at: datetime.datetime) -> None:
# use existing to find what Application really exists, which is a bit different than how it's used for edges
actual_keys = dict([
(VertexTypes.Application.value.id(**dict(v)), v)
for v in possible_existing_keys_for_application_key(app_key)])
actual_keys = dict([(k, v) for k, v in actual_keys.items() if v in existing[VertexTypes.Application.value]])
if actual_keys:
vertex_id = list(actual_keys.items())[0][0]
cls._create(EdgeTypes.Generates, entities, existing, created=created_at, **{
MagicProperties.FROM.value.name: vertex_id,
MagicProperties.TO.value.name: table[MagicProperties.ID.value.name]})
return
# if app isn't found, the owner may be a user
actual_keys = dict([(VertexTypes.User.value.id(key=app_key), _get_existing_key(VertexTypes.User, key=app_key))])
actual_keys = dict([(k, v) for k, v in actual_keys.items() if v in existing[VertexTypes.User.value]])
if actual_keys:
vertex_id = list(actual_keys.items())[0][0]
LOGGER.debug(f'{app_key} is not a real app but it was marked as owner: {table["key"]}')
cls._create(EdgeTypes.Owner, entities, existing, created=created_at, **{
MagicProperties.FROM.value.name: table[MagicProperties.ID.value.name],
MagicProperties.TO.value.name: vertex_id})
return
LOGGER.info(f'{app_key} is not a real Application, nor can we find a User to be an Owner for {table["key"]}')
@classmethod
def _description_entities(cls, *, description: str, source: str, subject_uri: str,
to_vertex_id: str, entities: ENTITIES, existing: EXISTING,
created_at: datetime.datetime) -> None:
vertex = cls._create(VertexTypes.Description, entities, existing,
key=make_description_uri(subject_uri=subject_uri, source=source),
description=description, description_source=source)
cls._create(EdgeTypes.Description, entities, existing, created=created_at, **{
MagicProperties.FROM.value.name: to_vertex_id,
MagicProperties.TO.value.name: vertex[MagicProperties.ID.value.name]})
@classmethod
def _column_metric(cls, column: Column) -> int:
"""
:returns a number like the number of vertexes that would be added due to this column
"""
return sum((1, 1 if column.description is not None else 0, len(column.stats or ())))
@classmethod
def _column_entities(cls, *, table_vertex: Mapping[str, str], column_data: Sequence[Column], entities: ENTITIES,
existing: EXISTING, created_at: datetime.datetime) -> None:
for column in column_data:
column_vertex = cls._create(VertexTypes.Column, entities, existing, name=column.name,
key=make_column_uri(table_uri=table_vertex['key'], column_name=column.name),
col_type=column.col_type, sort_order=column.sort_order)
cls._create(EdgeTypes.Column.value, entities, existing, created=created_at, **{
MagicProperties.FROM.value.name: table_vertex[MagicProperties.ID.value.name],
MagicProperties.TO.value.name: column_vertex[MagicProperties.ID.value.name]})
# Add the description if present
if column.description is not None:
cls._description_entities(
subject_uri=column_vertex['key'], to_vertex_id=column_vertex[MagicProperties.ID.value.name],
source='user', entities=entities, existing=existing, created_at=created_at,
description=column.description)
# Add stats if present
if column.stats:
for stat in column.stats:
vertex = cls._create(
VertexTypes.Stat, entities, existing,
key=make_column_statistic_uri(column_uri=column_vertex['key'], statistic_type=stat.stat_type),
# stat.stat_val is a str, but some callers seem to put ints in there
stat_val=(None if stat.stat_val is None else str(stat.stat_val)),
**dict([(k, v) for k, v in vars(stat).items() if k != 'stat_val']))
cls._create(EdgeTypes.Stat, entities, existing, created=created_at, **{
MagicProperties.FROM.value.name: column_vertex[MagicProperties.ID.value.name],
MagicProperties.TO.value.name: vertex[MagicProperties.ID.value.name]})
@classmethod
def user_entities(cls, *, user_data: List[User], entities: ENTITIES, existing: EXISTING,
created_at: datetime.datetime) -> None:
for user in user_data:
# TODO: handle this properly
cls._create(VertexTypes.User, entities, existing, key=user.user_id,
**dict([(k, v) for k, v in vars(user).items() if k != 'other_key_values']))
@classmethod
def app_entities(cls, *, app_data: List[Application], entities: ENTITIES, existing: EXISTING,
created_at: datetime.datetime) -> None:
for app in app_data:
cls._create(VertexTypes.Application, entities, existing, key=app.id,
**dict((k, v) for k, v in vars(app).items()))
@classmethod
def _expire_other_edges(
cls, *, edge_type: Union[EdgeTypes, EdgeType], vertex_id: str, to_or_from_vertex: MagicProperties,
entities: ENTITIES, existing: EXISTING, created_at: datetime.datetime) -> None:
"""
Use this in lieu of expire_previously_existing.
:param edge_type:
:param vertex_id:
:param to_or_from_vertex:
:param entities:
:param existing:
:param created_at:
:return:
"""
assert to_or_from_vertex in (MagicProperties.FROM, MagicProperties.TO), \
f'only FROM or TO allowed for {to_or_from_vertex}'
edge_type = ensure_edge_type(edge_type)
# edges of that type....
edges = tuple(e for e in existing.get(edge_type, {}).values()
# to/from the vertex
if e[to_or_from_vertex.value.name] == vertex_id
# edges that aren't recreated
and e[MagicProperties.ID.value.name] not in entities.get(edge_type, {}))
# expire those:
for entity in edges:
del entities[edge_type][entity[MagicProperties.ID.value.name]]
class GetGraph:
def __init__(self, *, g: GraphTraversalSource, created_at: Optional[datetime.datetime] = None) -> None:
self.g = g
self.created_at = datetime.datetime.now() if created_at is None else created_at
self.existing = new_existing()
self.entities = new_entities()
self._expire_previously_existing_callables: List[Callable[[], None]] = list()
@staticmethod
def table_metric(table: Table) -> int:
return _GetGraph.table_metric(table)
def add_table_entities(self, table_data: List[Table]) -> "GetGraph":
_FetchExisting.table_entities(table_data=table_data, _g=self.g, existing=self.existing)
_GetGraph.table_entities(
table_data=table_data, entities=self.entities, existing=self.existing, created_at=self.created_at)
self._expire_previously_existing_callables.append(self._expire_previously_existing_table_entities)
return self
def _expire_previously_existing_table_entities(self) -> None:
_GetGraph.expire_previously_existing(
edge_types=(EdgeTypes.Column, EdgeTypes.Generates, EdgeTypes.Owner),
entities=self.entities, existing=self.existing)
def add_user_entities(self, user_data: List[User]) -> "GetGraph":
_GetGraph.user_entities(
user_data=user_data, entities=self.entities, existing=self.existing, created_at=self.created_at)
self._expire_previously_existing_callables.append(self._expire_previously_existing_user_entities)
return self
def _expire_previously_existing_user_entities(self) -> None:
pass
def add_app_entities(self, app_data: List[Application]) -> "GetGraph":
_GetGraph.app_entities(
app_data=app_data, entities=self.entities, existing=self.existing, created_at=self.created_at)
self._expire_previously_existing_callables.append(self._expire_previously_existing_app_entities)
return self
def _expire_previously_existing_app_entities(self) -> None:
pass
def complete(self) -> ENTITIES:
for c in self._expire_previously_existing_callables:
c()
entities = self.entities
del self.entities
del self.existing
return entities
@classmethod
def default_created_at(cls, created_at: Optional[datetime.datetime]) -> datetime.datetime:
return datetime.datetime.now() if created_at is None else created_at
@classmethod
def table_entities(cls, *, table_data: List[Table], g: GraphTraversalSource,
created_at: Optional[datetime.datetime] = None) -> ENTITIES:
return GetGraph(g=g, created_at=created_at).add_table_entities(table_data).complete()
@classmethod
def user_entities(cls, *, user_data: List[User], g: GraphTraversalSource,
created_at: Optional[datetime.datetime] = None) -> ENTITIES:
return GetGraph(g=g, created_at=created_at).add_user_entities(user_data).complete()
@classmethod
def app_entities(cls, *, app_data: List[Application], g: GraphTraversalSource,
created_at: Optional[datetime.datetime] = None) -> ENTITIES:
return GetGraph(g=g, created_at=created_at).add_app_entities(app_data).complete()
@classmethod
def expire_connections_for_other(
cls, *, vertex_type: Union[VertexTypes, VertexType], keys: Iterable[str], g: GraphTraversalSource,
created_at: Optional[datetime.datetime] = None) -> ENTITIES:
"""
There's no builder style for this since the expiration implementation is presumptive.
"""
if created_at is None:
created_at = datetime.datetime.now()
assert created_at is not None
if not isinstance(keys, frozenset):
keys = frozenset(keys)
assert isinstance(keys, frozenset)
vertex_type = ensure_vertex_type(vertex_type)
existing = new_existing()
entities = new_entities()
_FetchExisting.expire_connections_for_other(vertex_type=vertex_type, keys=keys, existing=existing, _g=g)
_GetGraph.expire_previously_existing(edge_types=tuple(t for t in EdgeTypes), entities=entities,
existing=existing)
return entities
|
#Dependencies
import os
import shutil
#Main
def dir_files(dir_path):
try:
files = []
for ( root, _, filenames ) in os.walk(dir_path):
files.extend(filenames)
break
return None, files
except:
return "Unable to get directory files.", False
def recursive_dir_files(dir_path):
try:
files = []
for root, directories, filenames in os.walk(dir_path):
for filename in filenames:
files.append(os.path.join(root, filename))
return None, files
except:
return "Unable to recursive in the directory.", False
def read_file(file_path):
try:
file = open(file_path, "r")
return None, file.read()
except:
return "Unable to find/read the file.", False
def write_file(file_path, content):
try:
file = open(file_path, "w")
file.write(content)
file.close()
except:
return "Unable to write the file.", False
return None, True
def remove_file(file_path):
try:
os.remove(file_path)
return None, True
except:
return "Unable to remove the file.", False
def remove_dir(dir_path):
try:
shutil.rmtree(dir_path)
return None, True
except:
return "Unable to remove the directory.", False
def rename_file(file_path, new_name):
try:
os.rename(file_path, new_name)
return None, True
except:
return "Unable to rename the file.", False
def copy_file(file_path, new_path):
try:
shutil.copy(file_path, new_path)
return None, True
except:
return "Unable to copy the file.", False
def file_exists(file_path):
try:
return os.path.isfile(file_path), True
except:
return "File does not exist.", False
def dir_exists(dir_path):
try:
return os.path.isdir(dir_path), True
except:
return "Directory does not exist.", False
|
from django.db import models
from smart_bookmarks.scrapers.db import ScrapePageTaskManager
class ScrapePageTask(models.Model):
id = models.AutoField(primary_key=True)
bookmark = models.OneToOneField(
"core.Bookmark", on_delete=models.CASCADE, related_name="_scrape_page_task"
)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
objects = ScrapePageTaskManager()
class Meta:
db_table = "scrape_page_task"
constraints = [
models.UniqueConstraint(
fields=["bookmark"], name="uq_scrape_page_task_bookmark_id"
)
]
class ScrapePageError(models.Model):
id = models.AutoField(primary_key=True)
bookmark = models.OneToOneField(
"core.Bookmark", on_delete=models.CASCADE, related_name="_scrape_page_error"
)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
message = models.CharField(max_length=1024)
class Meta:
db_table = "scrape_page_error"
|
from ctypes import alignment
import discord
from discord.commands import Option, slash_command
from discord.ext import commands
from discord.ext.commands.context import Context
guild_id_config = [CHANGEME]
static_file_locatin = 'CHANGEME'
class CRSDChart1H(commands.Cog):
def __init__(self, bot):
self.bot = bot
@slash_command(guild_ids=guild_id_config, name="crsdchart1h", description="Show 1 Hour CRSD Chart on TinyChart (updated every 15 minutes)")
async def crsdchart1h(self, ctx):
await ctx.respond(file=discord.File(static_file_location + 'crsd_chart_1H.png'))
@crsdchart1h.error
async def crsdchart1h_error(self, ctx: Context, error):
return await ctx.respond(
error, ephemeral=True
) # ephemeral makes "Only you can see this" message
def setup(bot):
bot.add_cog(CRSDChart1H(bot)) |
from PyQt5.QtWidgets import QLabel, QWidget, QPushButton, QHBoxLayout, QVBoxLayout, QGroupBox, QPlainTextEdit
from PyQt5.QtCore import pyqtSignal, Qt
from sys import float_info as fli
import dataDisplay as dd
import numpy as np
class metaInfoWidget(QGroupBox):
#show_all = pyqtSignal()
#zoom_by_increment = pyqtSignal(str, int)
#scale_font_size_changed = pyqtSignal(np.float64)
#label_font_size_changed = pyqtSignal(np.float64)
#fig_size_changed = pyqtSignal(tuple)
#annotation_changed = pyqtSignal(str)
#annotation_font_size_changed = pyqtSignal(np.float64)
#resizing_changed = pyqtSignal(bool)
def __init__(self):
QWidget.__init__(self)
hbox_main = QHBoxLayout()
#self.__lblNotes = QLabel("Notes")
self.__edtNotes = QPlainTextEdit("")
self.__edtNotes.setFixedHeight(120)
#hbox_main.addWidget(self.__lblNotes)
hbox_main.addWidget(self.__edtNotes)
self.setLayout(hbox_main)
self.setTitle("Notes")
self.setEnabled(False)
def reset(self, enabled=True):
self.setEnabled(enabled)
self.setNotes('')
def getNotes(self):
return self.__edtNotes.toPlainText()
def setNotes(self, value):
self.__edtNotes.setPlainText(value)
def load_from_meta_string(self, view_string):
if view_string is not None:
split_string = view_string.split('\v')
try:
for i in range(0, len(split_string)):
item = split_string[i].split('=')
if len(item) == 2:
if (item[0] == 'nts'):
self.setNotes(str(item[1]).replace('\\n', '\n'))
except:
return 'Meta data might be corrupted.'
def get_meta_string(self):
print('getmetastring')
nts = self.getNotes().replace('\n', '\\n')
return 'nts=' + nts
|
"""
OpenTMI client exceptions
"""
class OpentmiException(Exception):
"""
Default Opentmi Exception
"""
def __init__(self, message):
"""
Constructor
:param message:
"""
Exception.__init__(self, message)
self.message = message
class TransportException(OpentmiException):
"""
Transport exception
"""
def __init__(self, message, code=None):
"""
Constructor for Transport exceptions
:param message: string
:param code: status_code or None
"""
OpentmiException.__init__(self, message)
self.code = code
|
from collections import defaultdict
def num_occupied_adjacent_seats(grid, r, c):
count = 0
for i in range(-1, 2):
for j in range(-1, 2):
if not (i == 0 and j == 0):
count += int(grid[r + i, c + j] == "#")
return count
def iterate(grid):
new_grid = defaultdict(str)
did_change = False
for (r, c), state in list(grid.items()):
if state == "L" and num_occupied_adjacent_seats(grid, r, c) == 0:
new_grid[r, c] = "#"
did_change = True
elif state == "#" and num_occupied_adjacent_seats(grid, r, c) >= 4:
new_grid[r, c] = "L"
did_change = True
else:
new_grid[r, c] = grid[r, c]
return new_grid, did_change
def num_occupied_adjacent_seats_v2(grid, r, c):
count = 0
# UP
i = r - 1
while grid[i, c] == ".":
i -= 1
count += int(grid[i, c] == "#")
# DOWN
i = r + 1
while grid[i, c] == ".":
i += 1
count += int(grid[i, c] == "#")
# LEFT
i = c - 1
while grid[r, i] == ".":
i -= 1
count += int(grid[r, i] == "#")
# RIGHT
i = c + 1
while grid[r, i] == ".":
i += 1
count += int(grid[r, i] == "#")
# UP-LEFT
i = r - 1
j = c - 1
while grid[i, j] == ".":
i -= 1
j -= 1
count += int(grid[i, j] == "#")
# UP-RIGHT
i = r - 1
j = c + 1
while grid[i, j] == ".":
i -= 1
j += 1
count += int(grid[i, j] == "#")
# DOWN-LEFT
i = r + 1
j = c - 1
while grid[i, j] == ".":
i += 1
j -= 1
count += int(grid[i, j] == "#")
# DOWN-RIGHT
i = r + 1
j = c + 1
while grid[i, j] == ".":
i += 1
j += 1
count += int(grid[i, j] == "#")
return count
def iterate_v2(grid):
new_grid = defaultdict(str)
did_change = False
for (r, c), state in list(grid.items()):
if state == "L" and num_occupied_adjacent_seats_v2(grid, r, c) == 0:
new_grid[r, c] = "#"
did_change = True
elif state == "#" and num_occupied_adjacent_seats_v2(grid, r, c) >= 5:
new_grid[r, c] = "L"
did_change = True
else:
new_grid[r, c] = grid[r, c]
return new_grid, did_change
def main():
with open("input.txt", "r") as fp:
original_grid = defaultdict(str)
for r, line in enumerate(fp):
for c, char in enumerate(line.strip()):
original_grid[r, c] = char
grid = original_grid
did_change = True
while did_change:
grid, did_change = iterate(grid)
num_occupied = len(list(filter(lambda x: x == "#", grid.values())))
print("Part I:", num_occupied)
grid = original_grid
did_change = True
while did_change:
grid, did_change = iterate_v2(grid)
num_occupied = len(list(filter(lambda x: x == "#", grid.values())))
print("Part II:", num_occupied)
if __name__ == "__main__":
main()
|
import typing
import sys
def main() -> typing.NoReturn:
s = list(input())
k = int(input())
n = len(s)
a = set()
a.add(tuple(s))
for _ in range(min(k, 1000)):
b = set()
for t in a:
t = list(t)
for i in range(n - 1):
t[i], t[i + 1] = t[i + 1], t[i]
b.add(tuple(t))
t[i], t[i + 1] = t[i + 1], t[i]
a |= b
print(len(a))
main() |
"""
Copyright (c) 2021 Ricardo Baylon rbaylon@outlook.com
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
from views import app
from flask import request, redirect, url_for, jsonify, make_response
import jwt
import datetime
from functools import wraps
from Utils.variables import AF, IP
from Utils.validators import AccountValidator, IpValidator
import netifaces
from controllers import InterfaceController, OsController, UserController
from ipaddress import IPv4Interface
from werkzeug.security import check_password_hash
av = AccountValidator()
ipv = IpValidator()
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token = None
auth_header = request.headers.get('Authorization')
if auth_header:
try:
token = auth_header.split(" ")[1]
except IndexError:
return jsonify({'message' : 'Token is missing!'}), 401
else:
return jsonify({'message' : 'Authorization required!'}), 401
try:
data = jwt.decode(token, app.config['SECRET_KEY'], algorithms=["HS256"])
current_user = data['username']
except Exception as e:
return jsonify({'message' : 'Token failed {}'.format(str(e))}), 401
return f(current_user, *args, **kwargs)
return decorated
@app.route('/api/login')
def apilogin():
auth = request.authorization
if not auth or not auth.username:
return make_response('Username required!', 401,
{'WWW-Authenticate' : 'Basic realm="Username required!"'})
if not auth.password:
return make_response('Password required!', 401,
{'WWW-Authenticate' : 'Basic realm="Password required!"'})
if av.is_username_valid(auth.username) and av.is_password_valid(auth.password):
uc = UserController()
user = uc.getuser(username=auth.username)
if user:
if check_password_hash(user.password, auth.password):
token = jwt.encode({'username' : auth.username,
'exp' : datetime.datetime.utcnow() + datetime.timedelta(minutes=60)},
app.config['SECRET_KEY'])
return jsonify({ 'token' : token })
return make_response('Invalid user or password!', 401,
{'WWW-Authenticate' : 'Basic realm="Invalid user!"'})
@app.route('/api', methods=['GET'])
@token_required
def apihome(current_user):
osc = OsController()
return jsonify({'Hostname' : osc.gethostname()})
@app.route('/api/interfaces', methods=['GET'])
@token_required
def interfaces(current_user):
ic = InterfaceController()
return jsonify({'interfaces' : ic.getinterfaces()})
@app.route('/api/interfaces/<iface>', defaults={'af': 'all', 'index': '0'}, methods=['GET'])
@app.route('/api/interfaces/<iface>/<af>', defaults={'index': 'all'}, methods=['GET', 'POST'])
@app.route('/api/interfaces/<iface>/<af>/<string:index>', methods=['GET', 'PUT', 'DELETE'])
@token_required
def iface_addr(current_user, iface, af, index):
if af != 'all':
oldaf = af
if af == 'inet':
af = netifaces.AF_INET
elif af == 'inet6':
af = netifaces.AF_INET6
elif af == 'mac':
af = netifaces.AF_LINK
else:
return jsonify({
'Error' : 'Invalid AF {}'.format(af),
'Valid Options' : 'inet, inet6, mac'
}), 400
ic = InterfaceController()
if request.method == 'GET':
if af == 'all':
cfgdata = ic.getifaddresses(iface)
else:
if index:
cfgdata = ic.getifaddresses(iface, af, index)
else:
cfgdata = ic.getifaddresses(iface, af)
return jsonify(cfgdata)
if request.method == 'POST':
"""
JSON input format:
{"addr": "192.168.254.100", "netmask": "255.255.255.0"}
"""
cfgdata = ic.getinterface(iface)
if 'Failed' in cfgdata:
return jsonify(cfgdata), 400
if request.is_json:
data = request.get_json()
for key in data:
if key not in IP['interface_keys']:
return jsonify({'Error' : 'invalid paramter {}'.format(key)}), 400
else:
return jsonify({'Error' : 'Request must be application/json'}), 400
if oldaf in AF.values():
inetaddr = ic.getifaddresses(iface, af)
if AF[af] == 'inet':
if 'Failed' in inetaddr:
# assume interface has no ip
iip = ipv.isIpInterface(data['addr'], data['netmask'])
if iip['status']:
ret = ic.addifaf(iface, data, af)
if type(ret) != 'str':
return redirect(url_for('iface_addr', iface=iface, af=AF[af]))
else:
return jsonify({'Error' : 'Failed to add address: {}'.format(ret)}), 400
else:
return jsonify({'Error' : '{}'.format(iip['interface'])}), 400
else:
# assume vip addition
for i in inetaddr:
if i['netmask'] != '255.255.255.255' or i['netmask'] != '32':
interface = IPv4Interface('{}/{}'.format(i['addr'],i['netmask']))
break
iip = ipv.isIpInterface(data['addr'], '255.255.255.255')
if iip['status']:
if ipv.isIpInNetwork(iip['interface'].ip, interface.network):
data['netmask'] = '255.255.255.255'
ret = ic.addifaddr(iface, data, af)
if type(ret) != 'str':
return redirect(url_for('iface_addr', iface=iface, af=AF[af]))
else:
return jsonify({'Error' : 'Failed to add address: {}'.format(ret)}), 400
else:
return jsonify({'Error' : 'Failed to add address: {}. Outside of subnet {}'.format(iip['interface'].ip, interface.network)}), 400
else:
return jsonify({'Error' : '{}'.format(iip['interface'])}), 400
elif AF[af] == 'inet6':
return jsonify({'Message' : 'IPv6 not yet supported'}), 200
elif AF[af] == 'mac':
return jsonify({'Message' : 'manual mac address entry not yet supported'}), 200
if request.method == 'DELETE':
"""
Valid input { 'confirm_delete': 'yes' }
"""
if request.is_json:
data = request.get_json()
else:
return jsonify({'Error' : 'Request must be application/json'}), 400
for key in data:
if key not in IP['delete_keys']:
return jsonify({'Error' : 'Invalid delete key: {}'.format(key)}), 400
ret = ic.delifaddr(iface, data, af, index)
if type(ret) != 'str':
return redirect(url_for('iface_addr', iface=iface, af=AF[af]))
else:
return jsonify({'Error' : 'Failed to add address: {}'.format(ret)}), 400
if request.method == 'PUT':
"""
Valid input {"addr": "192.168.254.100", "netmask": "255.255.255.0"}
"""
if request.is_json:
data = request.get_json()
else:
return jsonify({'Error' : 'Request must be application/json'}), 400
for key in data:
if key not in IP['interface_keys']:
return jsonify({'Error' : 'Invalid update key: {}'.format(key)}), 400
iip = ipv.isIpInterface(data['addr'], data['netmask'])
if iip['status']:
if ic.isifipindex(iface, af, index):
inetaddr = ic.getifaddresses(iface, af, index)
interface = IPv4Interface('{}/{}'.format(inetaddr['addr'],inetaddr['netmask']))
if ipv.isIpInNetwork(iip['interface'].ip, interface.network):
ret = ic.modifaddr(iface, data, af, index)
if type(ret) != 'str':
return redirect(url_for('iface_addr', iface=iface, af=AF[af]))
else:
return jsonify({'Error' : 'Failed to add address {}'.format(ret)}), 400
else:
return jsonify({'Error' : 'Failed to update address: {}. Outside of subnet {}'.format(data['addr'], interface.network)}), 400
else:
return jsonify({'Error' : 'Invalid index {}'.format(index)}), 400
else:
return jsonify({'Error' : '{}'.format(iip['interface'])}), 400
|
"""Embed a list of images in a PDF.
Images are placed top-centered.
"""
from typing import Sequence
from . import analyze, render
def convert(image_fnames: Sequence[str], pdf_name: str) -> None:
"""Converts the given image files into a PDF document.
Images are embedded as they are and not converted to e.g. JPGs.
:param image_fnames: Sequence of image file names.
:param pdf_name: Name of PDF to store images in.
:raises FileNotFound: if one of the files in `image_fnames` could not be opened.
"""
images = analyze.images(image_fnames)
render.images(images, pdf_name)
|
"""
NAME
tarea_POO.py
VERSION
0.0.1
AUTHOR
Rodrigo Daniel Hernandez Barrera <<rodrigoh@lcg.unam.mx>>
DESCRIPTION
This is the example of a class to create characters in a video game,
use inheritance to create villains and heroes.
CATEGORY
Video game
GITHUB REPOSITORY
https://github.com/rod13-afk/python_class/blob/master/Tareas/tarea_POO.py
"""
# This is a class to create the characters of a video game
class character():
def __init__(self, name, gender):
self.name = name
self.gender = gender
self.attack = 10
self.defense = 10
self.max_speed = 8
self.speed = 0
self.health = 13
self.stamina = 9
def move_up(self):
self.speed = self.max_speed - 1
def move_down(self):
self.speed = self.max_speed + 1
def move_right(self):
self.speed = self.max_speed
def move_left(self):
self.speed = self.max_speed
def attack(self):
self.stamina = self.stamina - 1
def defend(self):
self.health = self.health - 1
class villain(character):
def __init__(self, name, gender):
super().__init__(name, gender)
self.rob_a_bank = True
def move(self):
pass
class hero(character):
def __init__(self, name, gender):
super().__init__(name, gender)
self.defeat_the_villain = True
def move_up(self):
pass
villain = villain("Chucho", "Hombre")
print(villain.__dict__)
hero = hero("Mateo", "Hombre")
print(hero.__dict__)
|
"""Train Faster-RCNN end to end."""
import argparse
import os
# disable autotune
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
import logging
import time
import numpy as np
import mxnet as mx
from mxnet import nd
from mxnet import gluon
from mxnet import autograd
from gluoncv.model_zoo import get_model
from gluoncv.data import batchify
from gluoncv.data.transforms.presets.rcnn import FasterRCNNDefaultTrainTransform
from gluoncv.data.transforms.presets.rcnn import FasterRCNNDefaultValTransform
from gluoncv.utils.metrics.voc_detection import VOC07MApMetric
from gluoncv.utils.metrics.coco_detection import COCODetectionMetric
from gluoncv.utils.metrics.accuracy import Accuracy
from dataset import Dataset
from gluoncv import model_zoo
from light_head_rcnn import My_LHRCNN
from faster_rcnn import Faster_rcnn
def parse_args():
parser = argparse.ArgumentParser(description='Train Faster-RCNN networks e2e.')
parser.add_argument('--network', type=str, default='resnet50_v2a',
help="Base network name which serves as feature extraction base.")
parser.add_argument('--dataset', type=str, default='voc',
help='Training dataset. Now support voc.')
parser.add_argument('--short', type=str, default='',
help='Resize image to the given short side side, default to 600 for voc.')
parser.add_argument('--max-size', type=str, default='',
help='Max size of either side of image, default to 1000 for voc.')
parser.add_argument('--num-workers', '-j', dest='num_workers', type=int,
default=4, help='Number of data workers, you can use larger '
'number to accelerate data loading, if you CPU and GPUs are powerful.')
parser.add_argument('--gpus', type=str, default='0',
help='Training with GPUs, you can specify 1,3 for example.')
parser.add_argument('--epochs', type=str, default='',
help='Training epochs.')
parser.add_argument('--resume', type=str, default='',
help='Resume from previously saved parameters if not None. '
'For example, you can resume from ./faster_rcnn_xxx_0123.params')
parser.add_argument('--start-epoch', type=int, default=0,
help='Starting epoch for resuming, default is 0 for new training.'
'You can specify it to 100 for example to start from 100 epoch.')
parser.add_argument('--lr', type=str, default='',
help='Learning rate, default is 0.001 for voc single gpu training.')
parser.add_argument('--lr-decay', type=float, default=0.1,
help='decay rate of learning rate. default is 0.1.')
parser.add_argument('--lr-decay-epoch', type=str, default='',
help='epoches at which learning rate decays. default is 14,20 for voc.')
parser.add_argument('--lr-warmup', type=str, default='',
help='warmup iterations to adjust learning rate, default is 0 for voc.')
parser.add_argument('--momentum', type=float, default=0.9,
help='SGD momentum, default is 0.9')
parser.add_argument('--wd', type=str, default='',
help='Weight decay, default is 5e-4 for voc')
parser.add_argument('--log-interval', type=int, default=100,
help='Logging mini-batch interval. Default is 100.')
parser.add_argument('--save-prefix', type=str, default='',
help='Saving parameter prefix')
parser.add_argument('--save-interval', type=int, default=1,
help='Saving parameters epoch interval, best model will always be saved.')
parser.add_argument('--val-interval', type=int, default=1,
help='Epoch interval for validation, increase the number will reduce the '
'training time if validation is slow.')
parser.add_argument('--seed', type=int, default=233,
help='Random seed to be fixed.')
parser.add_argument('--verbose', dest='verbose', action='store_true',
help='Print helpful debugging info once set.')
args = parser.parse_args()
args.short = int(args.short) if args.short else 800
args.max_size = int(args.max_size) if args.max_size else 1024
args.epochs = int(args.epochs) if args.epochs else 200
args.lr_decay_epoch = args.lr_decay_epoch if args.lr_decay_epoch else '14,20'
args.lr = float(args.lr) if args.lr else 0.001
args.lr_warmup = args.lr_warmup if args.lr_warmup else -1
args.wd = float(args.wd) if args.wd else 5e-4
return args
class RPNAccMetric(mx.metric.EvalMetric):
def __init__(self):
super(RPNAccMetric, self).__init__('RPNAcc')
def update(self, labels, preds):
# label: [rpn_label, rpn_weight]
# preds: [rpn_cls_logits]
rpn_label, rpn_weight = labels
rpn_cls_logits = preds[0]
# calculate num_inst (average on those fg anchors)
num_inst = mx.nd.sum(rpn_weight)
# cls_logits (b, c, h, w) red_label (b, 1, h, w)
# pred_label = mx.nd.argmax(rpn_cls_logits, axis=1, keepdims=True)
pred_label = mx.nd.sigmoid(rpn_cls_logits) >= 0.5
# label (b, 1, h, w)
num_acc = mx.nd.sum((pred_label == rpn_label) * rpn_weight)
self.sum_metric += num_acc.asscalar()
self.num_inst += num_inst.asscalar()
class RPNL1LossMetric(mx.metric.EvalMetric):
def __init__(self):
super(RPNL1LossMetric, self).__init__('RPNL1Loss')
def update(self, labels, preds):
# label = [rpn_bbox_target, rpn_bbox_weight]
# pred = [rpn_bbox_reg]
rpn_bbox_target, rpn_bbox_weight = labels
rpn_bbox_reg = preds[0]
# calculate num_inst (average on those fg anchors)
num_inst = mx.nd.sum(rpn_bbox_weight) / 4
# calculate smooth_l1
loss = mx.nd.sum(rpn_bbox_weight * mx.nd.smooth_l1(rpn_bbox_reg - rpn_bbox_target, scalar=3))
self.sum_metric += loss.asscalar()
self.num_inst += num_inst.asscalar()
class RCNNAccMetric(mx.metric.EvalMetric):
def __init__(self):
super(RCNNAccMetric, self).__init__('RCNNAcc')
def update(self, labels, preds):
# label = [rcnn_label]
# pred = [rcnn_cls]
rcnn_label = labels[0]
rcnn_cls = preds[0]
# calculate num_acc
pred_label = mx.nd.argmax(rcnn_cls, axis=1)
num_acc = mx.nd.sum(pred_label == rcnn_label)
self.sum_metric += num_acc.asscalar()
self.num_inst += rcnn_label.size
class RCNNL1LossMetric(mx.metric.EvalMetric):
def __init__(self):
super(RCNNL1LossMetric, self).__init__('RCNNL1Loss')
def update(self, labels, preds):
# label = [rcnn_bbox_target, rcnn_bbox_weight]
# pred = [rcnn_reg]
rcnn_bbox_target, rcnn_bbox_weight = labels
rcnn_bbox_reg = preds[0]
# calculate num_inst
num_inst = mx.nd.sum(rcnn_bbox_weight) / 4
# calculate smooth_l1
loss = mx.nd.sum(rcnn_bbox_weight * mx.nd.smooth_l1(rcnn_bbox_reg - rcnn_bbox_target, scalar=1))
self.sum_metric += loss.asscalar()
self.num_inst += num_inst.asscalar()
def get_dataloader(net, train_dataset,batch_size, num_workers,short=600, max_size=1000):
"""Get dataloader."""
train_bfn = batchify.Tuple(*[batchify.Append() for _ in range(5)])
train_loader = mx.gluon.data.DataLoader(
train_dataset.transform(FasterRCNNDefaultTrainTransform(short, max_size, net)),
batch_size, True, batchify_fn=train_bfn, last_batch='rollover', num_workers=num_workers)
return train_loader
def save_params(net, logger, best_map, current_map, epoch, save_interval, prefix):
current_map = float(current_map)
if current_map > best_map[0]:
logger.info('[Epoch {}] mAP {} higher than current best {} saving to {}'.format(
epoch, current_map, best_map, '{:s}_best.params'.format(prefix)))
best_map[0] = current_map
net.save_parameters('{:s}_best.params'.format(prefix))
with open(prefix+'_best_map.log', 'a') as f:
f.write('\n{:04d}:\t{:.4f}'.format(epoch, current_map))
if save_interval and (epoch + 1) % save_interval == 0:
logger.info('[Epoch {}] Saving parameters to {}'.format(
epoch, '{:s}_{:04d}_{:.4f}.params'.format(prefix, epoch, current_map)))
net.save_parameters('{:s}_{:04d}_{:.4f}.params'.format(prefix, epoch, current_map))
def split_and_load(batch, ctx_list):
"""Split data to 1 batch each device."""
num_ctx = len(ctx_list)
new_batch = []
for i, data in enumerate(batch):
new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]
new_batch.append(new_data)
return new_batch
def validate(net, val_data, ctx, eval_metric):
"""Test on validation dataset."""
eval_metric.reset()
# set nms threshold and topk constraint
net.set_nms(nms_thresh=0.3, nms_topk=400)
net.hybridize(static_alloc=True)
for batch in val_data:
batch = split_and_load(batch, ctx_list=ctx)
det_bboxes = []
det_ids = []
det_scores = []
gt_bboxes = []
gt_ids = []
gt_difficults = []
for x, y, im_scale in zip(*batch):
# get prediction results
ids, scores, bboxes = net(x)
det_ids.append(ids.expand_dims(0))
det_scores.append(scores.expand_dims(0))
# clip to image size
det_bboxes.append(mx.nd.Custom(bboxes, x, op_type='bbox_clip_to_image').expand_dims(0))
# rescale to original resolution
im_scale = im_scale.reshape((-1)).asscalar()
det_bboxes[-1] *= im_scale
# split ground truths
gt_ids.append(y.slice_axis(axis=-1, begin=4, end=5))
gt_bboxes.append(y.slice_axis(axis=-1, begin=0, end=4))
gt_bboxes[-1] *= im_scale
gt_difficults.append(y.slice_axis(axis=-1, begin=5, end=6) if y.shape[-1] > 5 else None)
# update metric
for det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff in zip(det_bboxes, det_ids, det_scores, gt_bboxes, gt_ids, gt_difficults):
eval_metric.update(det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff)
return eval_metric.get()
def get_lr_at_iter(alpha):
return 1. / 3. * (1 - alpha) + alpha
def train(args):
######################################
# hyper parmars set
######################################
ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]
ctx = ctx if ctx else [mx.cpu()]
print(ctx)
args.batch_size = len(ctx)
#net = Faster_rcnn(pretrained_base=True)
net = My_LHRCNN()
#-----init-------------
for param in net.collect_params().values():
if param._data is not None:
continue
param.initialize()
#----------------------
train_dataset = Dataset()
args.batch_size = 1
args.num_workers = 1
train_data = get_dataloader(net, train_dataset, args.batch_size, args.num_workers)
#####################################
"""Training pipeline"""
net.collect_params().reset_ctx(ctx)
'''only train parms without pretrained!!!'''
trainer = gluon.Trainer(
net.collect_train_params(), # fix batchnorm, fix first stage, etc...
'sgd',
{'learning_rate': args.lr,
'wd': args.wd,
'momentum': args.momentum,
'clip_gradient': 5})
# lr decay policy
lr_decay = float(args.lr_decay)
lr_steps = sorted([float(ls) for ls in args.lr_decay_epoch.split(',') if ls.strip()])
lr_warmup = int(args.lr_warmup)
# TODO(zhreshold) losses?
rpn_cls_loss = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
rpn_box_loss = mx.gluon.loss.HuberLoss(rho=1/9.) # == smoothl1
rcnn_cls_loss = mx.gluon.loss.SoftmaxCrossEntropyLoss()
rcnn_box_loss = mx.gluon.loss.HuberLoss() # == smoothl1
metrics = [mx.metric.Loss('RPN_Conf'),
mx.metric.Loss('RPN_SmoothL1'),
mx.metric.Loss('RCNN_CrossEntropy'),
mx.metric.Loss('RCNN_SmoothL1'),]
rpn_acc_metric = RPNAccMetric()
rpn_bbox_metric = RPNL1LossMetric()
rcnn_acc_metric = RCNNAccMetric()
rcnn_bbox_metric = RCNNL1LossMetric()
metrics2 = [rpn_acc_metric, rpn_bbox_metric, rcnn_acc_metric, rcnn_bbox_metric]
# set up logger
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_path = args.save_prefix + '_train.log'
log_dir = os.path.dirname(log_file_path)
if log_dir and not os.path.exists(log_dir):
os.makedirs(log_dir)
fh = logging.FileHandler(log_file_path)
logger.addHandler(fh)
logger.info(args)
if args.verbose:
logger.info('Trainable parameters:')
logger.info(net.collect_train_params().keys())
logger.info('Start training from [Epoch {}]'.format(args.start_epoch))
best_map = [0]
for epoch in range(args.start_epoch, args.epochs):
while lr_steps and epoch >= lr_steps[0]:
new_lr = trainer.learning_rate * lr_decay
lr_steps.pop(0)
trainer.set_learning_rate(new_lr)
logger.info("[Epoch {}] Set learning rate to {}".format(epoch, new_lr))
trainer.set_learning_rate(trainer.learning_rate*0.9)
for metric in metrics:
metric.reset()
tic = time.time()
btic = time.time()
net.hybridize(static_alloc=True)
base_lr = trainer.learning_rate
for i, batch in enumerate(train_data):
if epoch == 0 and i <= lr_warmup:
new_lr = base_lr * get_lr_at_iter((i // 500) / (lr_warmup / 500.))
if new_lr != trainer.learning_rate:
logger.info('[Epoch 0 Iteration {}] Set learning rate to {}'.format(i, new_lr))
trainer.set_learning_rate(new_lr)
batch = split_and_load(batch, ctx_list=ctx)
batch_size = len(batch[0])
losses = []
metric_losses = [[] for _ in metrics]
add_losses = [[] for _ in metrics2]
with autograd.record():
for data, label, rpn_cls_targets, rpn_box_targets, rpn_box_masks in zip(*batch):
gt_label = label[:, :, 4:5]
gt_box = label[:, :, :4]
#print(data.shape)
#print(gt_box.shape)
#print(gt_label.shape)
cls_pred, box_pred, roi, samples, matches, rpn_score, rpn_box, anchors = net(data, gt_box)
# losses of rpn
rpn_score = rpn_score.squeeze(axis=-1)
num_rpn_pos = (rpn_cls_targets >= 0).sum()
rpn_loss1 = rpn_cls_loss(rpn_score, rpn_cls_targets, rpn_cls_targets >= 0) * rpn_cls_targets.size / num_rpn_pos
rpn_loss2 = rpn_box_loss(rpn_box, rpn_box_targets, rpn_box_masks) * rpn_box.size / num_rpn_pos
# rpn overall loss, use sum rather than average
rpn_loss = rpn_loss1 + rpn_loss2
# generate targets for rcnn
cls_targets, box_targets, box_masks = net.target_generator(roi, samples, matches, gt_label, gt_box)
# losses of rcnn
num_rcnn_pos = (cls_targets >= 0).sum()
rcnn_loss1 = rcnn_cls_loss(cls_pred, cls_targets, cls_targets >= 0) * cls_targets.size / cls_targets.shape[0] / num_rcnn_pos
rcnn_loss2 = rcnn_box_loss(box_pred, box_targets, box_masks) * box_pred.size / box_pred.shape[0] / num_rcnn_pos
rcnn_loss = rcnn_loss1 + rcnn_loss2
# overall losses
losses.append(rpn_loss.sum() + rcnn_loss.sum())
#total_loss = rpn_loss.sum()+rcnn_loss.sum()
metric_losses[0].append(rpn_loss1.sum())
metric_losses[1].append(rpn_loss2.sum())
metric_losses[2].append(rcnn_loss1.sum())
metric_losses[3].append(rcnn_loss2.sum())
add_losses[0].append([[rpn_cls_targets, rpn_cls_targets>=0], [rpn_score]])
add_losses[1].append([[rpn_box_targets, rpn_box_masks], [rpn_box]])
add_losses[2].append([[cls_targets], [cls_pred]])
add_losses[3].append([[box_targets, box_masks], [box_pred]])
autograd.backward(losses)
for metric, record in zip(metrics, metric_losses):
metric.update(0, record)
for metric, records in zip(metrics2, add_losses):
for pred in records:
metric.update(pred[0], pred[1])
trainer.step(batch_size)
# update metrics
if args.log_interval and not (i + 1) % args.log_interval:
# msg = ','.join(['{}={:.3f}'.format(*metric.get()) for metric in metrics])
msg = ','.join(['{}={:.3f}'.format(*metric.get()) for metric in metrics + metrics2])
logger.info('[Epoch {}][Batch {}], Speed: {:.3f} samples/sec, {}'.format(
epoch, i, batch_size/(time.time()-btic), msg))
btic = time.time()
msg = ','.join(['{}={:.3f}'.format(*metric.get()) for metric in metrics])
logger.info('[Epoch {}] Training cost: {:.3f}, {}'.format(epoch, (time.time()-tic), msg))
net.save_parameters('weights/frcnn_0.pkl')
if __name__ == '__main__':
args = parse_args()
# training
train(args)
|
from distutils.core import setup
import py2exe
import glob
setup(name='Crystal Defense',
version='1.0',
author='Taylor Tamblin',
author_email='opethiantaylor@gmail.com',
py_modules=[ 'main', 'graphics', 'enemy', 'tower', 'towermenu',
'projectile', 'towerradius', 'easymap', 'tower_stats', 'buttons',
'mainmenu', 'player', 'wave', 'mediummap', 'hardmap'],
data_files=[("Graphics", glob.glob("Graphics/*.png"))],
options={"py2exe": {"optimize": 0,
"bundle_files": 3,}},
zipfile = None,
console=['main.py'],
#windows = [{"script": 'main.py'}],
) |
from humanmark.backends import available_backends
def test_backends():
"""Ensure plugin-registered backends are present and valid."""
backends = available_backends()
# Make sure our default backend is available at least.
assert 'markdown_it' in backends
# Ensure required fields are available on every backend.
for v in backends.values():
assert v.DESCRIPTION is not None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.