content stringlengths 5 1.05M |
|---|
import re
if __name__ == '__main__':
vowels = "aeiou"
consonants = "qwrtypsdfghjklzxcvbnm"
matcher = re.findall(r"(?<=[%s])([%s]{2,})[%s]" % (consonants, vowels, consonants), input(), flags = re.I)
print('\n'.join(matcher or ['-1']))
|
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
import datetime as dt
# Database / ORM setup
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
Station = Base.classes.station
Measurement = Base.classes.measurement
# Flask setup
app = Flask(__name__)
# Route setup
@app.route('/')
def home():
return (
f"Welcome to the Hawaii vacation API. Note: Dates are formatted YYYY-MM-DD"
f"Available Routes:<br/>"
f"/api/v1.0/precipitation - Precipitation figures<br/>"
f"/api/v1.0/stations - Measurement stations<br/>"
f"/api/v1.0/tobs - Temperature observations (F)<br/>"
f"/api/v1.0/*start* - Minimum, Average, and Max temps for a date range starting on *start*<br/>"
f"/api/v1.0/*start*/*end* - Minimum, Average, and Max temps for a date range starting on *start* & ending on *end*<br/>"
)
@app.route('/api/v1.0/precipitation')
def precipitation():
session = Session(engine)
'''get all precipitation data + dates for 8/23/2016 - 8/23/2017'''
results = session.query(Measurement.date, Measurement.prcp) \
.filter(Measurement.date <= '2017-08-23') \
.filter(Measurement.date >= '2016-08-23').all()
session.close()
all_prcp = {}
for date, prcp in results:
# prcp_dict = {}
all_prcp[date] = prcp
# all_prcp.append(prcp_dict)
return jsonify(all_prcp)
@app.route('/api/v1.0/stations')
def stations():
session = Session(engine)
'''return a list of all stations in the dataset'''
station_list = session.query(Station.station).all()
session.close()
d = []
for x, in station_list:
d.append(x)
return jsonify(d)
@app.route('/api/v1.0/tobs')
def tobs():
session = Session(engine)
'''query for the dates and temperature observations from a year from the last data point'''
temps = session.query(Measurement.date, Measurement.tobs) \
.filter(Measurement.date <= '2017-08-23') \
.filter(Measurement.date >= '2016-08-23').all()
session.close()
d = []
'''Return a JSON list of Temperature Observations (tobs) for the previous year.'''
for date, tobs in temps:
t = {}
t['date'] = date
t['tobs'] = tobs
d.append(t)
return jsonify(d)
# Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.
@app.route('/api/v1.0/<start_date>')
def date(start_date):
session = Session(engine)
# calculate `TMIN`, `TAVG`, and `TMAX` for all dates greater than and equal to the start date.
temps = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)) \
.filter(Measurement.date >= start_date).group_by(Measurement.date).all()
session.close()
d = []
for date, min_tobs, avg_tobs, max_tobs in temps:
tobs = {}
tobs['DATE'] = date
tobs['TMIN'] = min_tobs
tobs['TAVG'] = avg_tobs
tobs['TMAX'] = max_tobs
d.append(tobs)
return jsonify(d)
@app.route('/api/v1.0/<start_date>/<end_date>')
def two_date(start_date, end_date):
session = Session(engine)
'''When given the start and the end date, calculate the `TMIN`, `TAVG`,
and `TMAX` for dates between the start and end date inclusive.'''
temps = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)) \
.filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).group_by(Measurement.date).all()
session.close()
d = []
for date, min_tobs, avg_tobs, max_tobs in temps:
tobs = {}
tobs['DATE'] = date
tobs['TMIN'] = min_tobs
tobs['TAVG'] = avg_tobs
tobs['TMAX'] = max_tobs
d.append(tobs)
return jsonify(d)
if __name__ == "__main__":
app.run(debug=True) |
# Copyright (c) 2021 War-Keeper
import discord
from discord.ext import commands
import os
import csv
# -----------------------------------------------------------
# This File contains commands for joining a group, leaving a group,
# and displaying which groups are available
# -----------------------------------------------------------
class Groups(commands.Cog):
student_pool = {}
groups = {}
# -----------------------------------------------------------
# initialize
# -----------------------------------------------------------
def __init__(self, bot):
self.bot = bot
# -------------------------------------------------------------------------------------------------------
# Function: join(self, ctx, arg='group', arg2='-1')
# Description: joins the user to the given group
# Inputs:
# - self: used to access parameters passed to the class through the constructor
# - ctx: used to access the values passed through the current context
# - arg: the name of the group
# - arg2: the number of the group
# Outputs: adds the user to the given group or returns an error if the group is invalid or in case of
# syntax errors
# -------------------------------------------------------------------------------------------------------
@commands.command(name='join', help='To use the join command, do: $join \'Group\' <Num> \n \
( For example: $join Group 0 )', pass_context=True)
async def join(self, ctx, arg='group', arg2='-1'):
# load the groups from the csv
groups = load_groups()
# get the name of the caller
member_name = ctx.message.author.display_name.upper()
# get the arguments for the group to join
group_num = arg.upper() + ' ' + arg2
# if the the group is a valid option
if group_num in groups:
# check if group has more than 6 people
if len(groups[group_num]) == 6:
await ctx.send('A group cannot have more than 6 people!')
return
# check if member is already in another group
for key in groups.keys():
if member_name in groups[key]:
await ctx.send('You are already in ' + key.title())
return
# add the member to the group and send confirmation
groups[group_num].append(member_name)
await ctx.send('You are now in ' + group_num.title() + '!')
print_groups(groups)
# error handling
else:
await ctx.send('Not a valid group')
await ctx.send('To use the join command, do: $join \'Group\' <Num> \n ( For example: $join Group 0 )')
# this handles errors related to the join command
@join.error
async def join_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send('To use the join command, do: $join \'Group\' <Num> \n ( For example: $join Group 0 )')
# -------------------------------------------------------------------------------------------------------
# Function: remove(self, ctx, arg='group', arg2='-1')
# Description: removes the user from the given group
# Inputs:
# - self: used to access parameters passed to the class through the constructor
# - ctx: used to access the values passed through the current context
# - arg: the name of the group
# - arg2: the number of the group
# Outputs: removes the user from the given group or returns an error if the group is invalid or in
# case of syntax errors
# -------------------------------------------------------------------------------------------------------
@commands.command(name='remove', help='To use the remove command, do: $remove \'Group\' <Num> \n \
( For example: $remove Group 0 )', pass_context=True)
async def remove(self, ctx, arg='group', arg2='-1'):
# load groups csv
groups = load_groups()
# get the name of the caller
member_name = ctx.message.author.display_name.upper()
# get the arguments for the group to join
group_num = arg.upper() + ' ' + arg2
# if the the group is a valid option
if group_num in groups:
# if member in is the group, then remove them from it
if member_name in groups[group_num]:
groups[group_num].remove(member_name)
await ctx.send('You have been removed from ' + group_num.title() + '!')
# else error message
else:
await ctx.send('You are not in ' + group_num.title())
print_groups(groups)
# if the arguments are not listed, then try to find out what group the member is in and remove them
elif arg2 == '-1':
for key in groups.keys():
if member_name in groups[key]:
groups[key].remove(member_name)
await ctx.send('You are been removed from ' + key.title() + '!')
print_groups(groups)
# error handling
else:
await ctx.send(group_num.title() + ' is not a valid group')
await ctx.send('To use the remove command, do: $remove \'Group\' <Num> \n \
( For example: $remove Group 0 )')
# this handles errors related to the remove command
@remove.error
async def remove_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send('To use the remove command, do: $remove \'Group\' <Num> \n \
( For example: $remove Group 0 )')
# -------------------------------------------------------------------------------------------------------
# Function: group(self, ctx)
# Description: prints the list of groups
# Inputs:
# - self: used to access parameters passed to the class through the constructor
# - ctx: used to access the values passed through the current context
# Outputs: prints the list of groups
# -------------------------------------------------------------------------------------------------------
@commands.command(name='group', help='print amount of groups that are full', pass_context=True)
@commands.dm_only()
async def group(self, ctx):
# load groups csv
groups = load_groups()
# create embedded objects
embed = discord.Embed(title='Group List', color=discord.Color.teal())
embed.set_thumbnail(url="https://i.pinimg.com/474x/e7/e3/bd/e7e3bd1b5628510a4e9d7a9a098b7be8.jpg")
embed2 = discord.Embed(title='Group List', color=discord.Color.teal())
embed2.set_thumbnail(url="https://i.pinimg.com/474x/e7/e3/bd/e7e3bd1b5628510a4e9d7a9a098b7be8.jpg")
# ignoring the first line, add all group member counts to the embedded objects
count = 0
for key in groups.keys():
if key != 'GROUP_NUM':
if count < 20:
embed.add_field(name=key, value=str(len(groups[key])), inline=True)
else:
embed2.add_field(name=key, value=str(len(groups[key])), inline=True)
count += 1
# print the embedded objects
embed.set_footer(text="Number Represents the Group Size")
embed2.set_footer(text="Number Represents the Group Size")
await ctx.send(embed=embed)
if count >= 20:
await ctx.send(embed=embed2)
# -----------------------------------------------------------
# This is a testing arg, not really used for anything else but adding to the csv file
# -----------------------------------------------------------
# @commands.command(name='test_name', help='add a name to the name_mapping.csv', pass_context=True)
# async def test_name(self, ctx, arg, arg2):
# student_pool = load_pool()
# display_name = ctx.message.author.display_name
# display_name_upper = display_name.upper()
#
# if student_pool.get(display_name_upper) is None:
# student_pool[display_name_upper] = arg.upper() + ' ' + arg2.upper()
# else:
# member_name = student_pool[display_name_upper]
# await ctx.send('You have already registered with the name: ' + member_name.title())
#
# print_pool(student_pool)
# -----------------------------------------------------------
# Used to load the groups from the csv file into a dictionary
# -----------------------------------------------------------
def load_groups() -> dict:
dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.chdir(dir)
os.chdir('data')
os.chdir('server_data')
with open('groups.csv', mode='r') as infile:
reader = csv.reader(infile)
group = {rows[0].upper(): [rows[1].upper(), rows[2].upper(), rows[3].upper(), rows[4].upper(),
rows[5].upper(), rows[6].upper()] for rows in reader}
for key in group.keys():
group[key] = list(filter(None, group[key]))
return group
# -----------------------------------------------------------
# Used to print the groups to the csv file
# -----------------------------------------------------------
def print_groups(group):
dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.chdir(dir)
os.chdir('data')
os.chdir('server_data')
with open('groups.csv', mode='w', newline="") as outfile:
writer = csv.writer(outfile)
for key in group.keys():
while len(group[key]) < 6:
group[key].append(None)
writer.writerow([key] + group[key])
# ------------------------------------------------------------
# Used to load the members from the csv file into a dictionary
# ------------------------------------------------------------
def load_pool() -> dict:
dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.chdir(dir)
os.chdir('data')
os.chdir('server_data')
with open('name_mapping.csv', mode='r') as infile:
reader = csv.reader(infile)
student_pools = {rows[0].upper(): rows[1].upper() for rows in reader}
return student_pools
# -----------------------------------------------------------
# Used to print the members to the csv file
# -----------------------------------------------------------
def print_pool(pools):
dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.chdir(dir)
os.chdir('data')
os.chdir('server_data')
with open('name_mapping.csv', mode='w', newline="") as outfile:
writer = csv.writer(outfile)
for key, value in pools.items():
writer.writerow([key, value])
# -----------------------------------------------------------
# add the file to the bot's cog system
# -----------------------------------------------------------
def setup(bot):
bot.add_cog(Groups(bot))
|
import scrapeLinks
import pandas as pd
#base_path = 'https://www.tabletennisdaily.com/forum/forumdisplay.php?102-Rubbers'
#base_path='https://www.tabletennisdaily.com/forum/forumdisplay.php?164-Butterfly'
base_path = 'https://www.tabletennisdaily.com/forum/forumdisplay.php?177-Tibhar'
for i in range(1,3):
page_path = "{base_path}/page{num}".format(base_path=base_path,num=i)
print(page_path)
cur_links = scrapeLinks.scrape_links(page_path,local=False)
print(cur_links.shape)
if i ==1:
all_links = cur_links
else:
all_links = pd.concat([all_links,cur_links],axis=0)
print(all_links.shape)
all_links.drop_duplicates(inplace=True)
print(all_links.shape)
output_path = '../output/tabletennisdaily_rubber_tibhar_threads.csv'
all_links.to_csv(output_path,index=False)
print('export is completed')
|
# Generated by Django 2.0.1 on 2018-02-02 13:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20180202_1048'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='is_active',
new_name='active',
),
]
|
import json
import uuid
from django.utils import timezone
from pathvalidate import sanitize_filename
from ..aws import sign_upload, sign_download, s3_copy, s3_delete, s3_update_and_return_size
from ..models import File, GroupUser
# 폴더/파일 목록
def list_item(request):
# Check Login
if request.user_id is None:
return {'result': False, 'error': '로그인을 해주세요.'}
# Validate
if request.GET.get('is_public') != 'true' \
and request.GET.get('is_starred') != 'true' \
and request.GET.get('is_trashed') != 'true':
return {'result': False, 'error': '잘못된 요청입니다.'}
# Query Files
files = None
if request.GET.get('is_public') == 'true':
files = File.objects.filter(owner_user_id=request.user_id, is_public=1, deleted_at__isnull=True)
elif request.GET.get('is_starred') == 'true':
files = File.objects.filter(owner_user_id=request.user_id, is_starred=1, deleted_at__isnull=True)
elif request.GET.get('is_trashed') == 'true':
files = File.objects.filter(owner_user_id=request.user_id, is_trashed=1, deleted_at__isnull=True)
# Serialize
data = []
for file in files:
data.append({
'id': file.id,
'type': file.type,
'parent_id': file.parent_id,
'name': file.name,
'size': file.size,
'is_public': file.is_public,
'is_starred': file.is_starred,
'is_trashed': file.is_trashed,
'created_at': file.created_at,
})
return {'result': True, 'data': data}
# 폴더 생성, 파일 업로드
def create(request):
# Check Login
if request.user_id is None:
return {'result': False, 'error': '로그인을 해주세요.'}
# Load
try:
received = json.loads(request.body.decode('utf-8'))
except json.decoder.JSONDecodeError:
return {'result': False, 'error': '잘못된 요청입니다.'}
# Validate
if 'parent_id' not in received \
or 'type' not in received \
or 'name' not in received:
return {'result': False, 'error': '잘못된 요청입니다.'}
if (received['type'] != 'folder' and received['type'] != 'file') \
or received['name'] == '':
return {'result': False, 'error': '잘못된 요청입니다.'}
# Get Parent
parent = File.objects.filter(id=received['parent_id'], is_trashed=0, deleted_at__isnull=True)
# Check Exists
if len(parent) == 0:
return {'result': False, 'error': '경로가 잘못되었습니다.'}
# Check Owner
is_auth = False
if parent[0].owner_user_id == request.user_id:
is_auth = True
is_my_group = GroupUser.objects.filter(group_id=parent[0].owner_group_id, user_id=request.user_id)
if len(is_my_group) != 0:
is_auth = True
if is_auth is False:
return {'result': False, 'error': '경로가 잘못되었습니다.'}
# Insert
file_id = uuid.uuid4()
File.objects.create(
id=file_id,
parent_id=received['parent_id'],
owner_user_id=parent[0].owner_user_id,
owner_group_id=parent[0].owner_group_id,
uploader_id=request.user_id,
type=received['type'],
name=sanitize_filename(received['name']),
size=0,
created_at=timezone.now()
)
# Return Folder
if received['type'] == 'folder':
return {'result': True, 'file_id': file_id}
# Return File
upload_url = sign_upload(str(file_id))
return {'result': True, 'file_id': file_id, 'upload_url': upload_url}
# 휴지통 비우기
def empty_trash(request):
# Check Login
if request.user_id is None:
return {'result': False, 'error': '로그인을 해주세요.'}
# Query Files
files = File.objects.filter(owner_user_id=request.user_id, is_trashed=1, deleted_at__isnull=True)
# First Depth
del_list = []
del_check = []
for del_file in files:
del_check.append(del_file.id)
# Child Depth
while True:
if not del_check:
break
child_files = File.objects.filter(parent_id__in=del_check)
del_list.extend(del_check)
del_check.clear()
for del_file in child_files:
del_check.append(del_file.id)
# S3 Delete
s3_delete(del_list)
# Update
File.objects.filter(id__in=del_list).update(is_trashed=1, deleted_at=timezone.now())
return {'result': True, 'affected': del_list}
# 폴더/파일 조회
def find_item(request, file_id):
# Query
file = File.objects.filter(id=file_id, deleted_at__isnull=True)
# Check Exists
if len(file) == 0:
return {'result': False, 'error': '잘못된 요청입니다.'}
# Check Owner
is_auth = False
if file[0].owner_user_id == request.user_id:
is_auth = True
is_my_group = GroupUser.objects.filter(group_id=file[0].owner_group_id, user_id=request.user_id)
if len(is_my_group) != 0:
is_auth = True
# Check Public
if file[0].is_public == 1:
is_auth = True
parent_id = file[0].parent_id
while True:
if parent_id is None or is_auth:
break
parent_file = File.objects.filter(id=parent_id)
if parent_file[0].is_public == 1:
is_auth = True
parent_id = parent_file[0].parent_id
# Check Auth
if is_auth is False:
return {'result': False, 'error': '권한이 없습니다.'}
# Return File
if file[0].type == 'file':
download_url = sign_download(file[0].id)
data = {
'id': file[0].id,
'type': file[0].type,
'parent_id': file[0].parent_id,
'name': file[0].name,
'size': file[0].size,
'is_public': file[0].is_public,
'is_starred': file[0].is_starred,
'is_trashed': file[0].is_trashed,
'created_at': file[0].created_at,
'download_url': download_url,
}
return {'result': True, 'data': data}
# Query
files = File.objects.filter(parent_id=file[0].id, is_trashed=0, deleted_at__isnull=True)
# Structure
data = {
'id': file[0].id,
'type': file[0].type,
'parent_id': file[0].parent_id,
'name': file[0].name,
'size': file[0].size,
'is_public': file[0].is_public,
'is_starred': file[0].is_starred,
'is_trashed': file[0].is_trashed,
'created_at': file[0].created_at,
}
file_list = []
for file in files:
file_list.append({
'id': file.id,
'type': file.type,
'parent_id': file.parent_id,
'name': file.name,
'size': file.size,
'is_public': file.is_public,
'is_starred': file.is_starred,
'is_trashed': file.is_trashed,
'created_at': file.created_at,
})
# Return Folder
return {'result': True, 'data': data, 'files': file_list}
# 폴더/파일 수정
def update_item(request, file_id):
# Check Login
if request.user_id is None:
return {'result': False, 'error': '로그인을 해주세요.'}
# Load
try:
received = json.loads(request.body.decode('utf-8'))
except json.decoder.JSONDecodeError:
return {'result': False, 'error': '잘못된 요청입니다.'}
# Validate
if 'name' not in received \
and 'parent_id' not in received \
and 'is_public' not in received \
and 'is_starred' not in received \
and 'is_trashed' not in received:
return {'result': False, 'error': '잘못된 요청입니다.'}
# Query
file = File.objects.filter(id=file_id, deleted_at__isnull=True)
# Check Exists
if len(file) == 0:
return {'result': False, 'error': '잘못된 요청입니다.'}
# Check Owner
is_auth = False
if file[0].owner_user_id == request.user_id:
is_auth = True
is_my_group = GroupUser.objects.filter(group_id=file[0].owner_group_id, user_id=request.user_id)
if len(is_my_group) != 0 \
and 'is_public' not in received \
and 'is_starred' not in received \
and 'is_trashed' not in received:
is_auth = True
# Check Parent
if 'parent_id' in received:
parent = File.objects.filter(id=received['parent_id'], type='folder', deleted_at__isnull=True)
if len(parent) == 0:
return {'result': False, 'error': '잘못된 요청입니다.'}
if (is_auth is True or len(is_my_group) != 0) \
and parent[0].owner_user_id == file[0].owner_user_id \
and parent[0].owner_group_id == file[0].owner_group_id \
and file_id != received['parent_id']:
is_auth = True
else:
is_auth = False
# Check Auth
if is_auth is False:
return {'result': False, 'error': '잘못된 요청입니다.'}
# Update
if 'name' in received:
if received['name'] == '':
return {'result': False, 'error': '이름을 제대로 입력해주세요.'}
file[0].name = sanitize_filename(received['name'])
if file[0].type == 'file':
s3_update_and_return_size(file_id, file[0].name)
if 'parent_id' in received:
file[0].parent_id = received['parent_id']
if 'is_public' in received:
file[0].is_public = 1 if received['is_public'] is True else 0
if 'is_starred' in received:
file[0].is_starred = 1 if received['is_starred'] is True else 0
if 'is_trashed' in received:
if file[0].parent_id is None:
return {'result': False, 'error': '잘못된 요청입니다.'}
file[0].is_trashed = 1 if received['is_trashed'] is True else 0
file[0].save()
return {'result': True}
# 파일 복제
def copy(request, file_id):
# Check Login
if request.user_id is None:
return {'result': False, 'error': '로그인을 해주세요.'}
# Get File
file = File.objects.filter(id=file_id, type='file', is_trashed=0, deleted_at__isnull=True)
# Check Exists
if len(file) == 0:
return {'result': False, 'error': '잘못된 요청입니다.'}
# Check Owner
is_auth = False
if file[0].owner_user_id == request.user_id:
is_auth = True
is_my_group = GroupUser.objects.filter(group_id=file[0].owner_group_id, user_id=request.user_id)
if len(is_my_group) != 0:
is_auth = True
if is_auth is False:
return {'result': False, 'error': '경로가 잘못되었습니다.'}
# Create UUID
new_file_id = uuid.uuid4()
# S3 Copy
s3_copy(file_id, new_file_id)
# Create
File.objects.create(
id=new_file_id,
parent_id=file[0].parent_id,
owner_user_id=file[0].owner_user_id,
owner_group_id=file[0].owner_group_id,
uploader_id=request.user_id,
type=file[0].type,
name='%s의 사본' % file[0].name,
size=file[0].size,
created_at=timezone.now()
)
return {'result': True, 'file_id': file_id}
|
from peri import states, runner
from peri.comp import ilms, objs, comp, exactpsf
def makestate(im, pos, rad, slab=None, mem_level='hi'):
"""
Workhorse for creating & optimizing states with an initial centroid
guess.
This is an example function that works for a particular microscope. For
your own microscope, you'll need to change particulars such as the psf
type and the orders of the background and illumination.
Parameters
----------
im : :class:`~peri.util.RawImage`
A RawImage of the data.
pos : [N,3] element numpy.ndarray.
The initial guess for the N particle positions.
rad : N element numpy.ndarray.
The initial guess for the N particle radii.
slab : :class:`peri.comp.objs.Slab` or None, optional
If not None, a slab corresponding to that in the image. Default
is None.
mem_level : {'lo', 'med-lo', 'med', 'med-hi', 'hi'}, optional
A valid memory level for the state to control the memory overhead
at the expense of accuracy. Default is `'hi'`
Returns
-------
:class:`~peri.states.ImageState`
An ImageState with a linked z-scale, a ConfocalImageModel, and
all the necessary components with orders at which are useful for
my particular test case.
"""
if slab is not None:
o = comp.ComponentCollection(
[
objs.PlatonicSpheresCollection(pos, rad, zscale=zscale),
slab
],
category='obj'
)
else:
o = objs.PlatonicSpheresCollection(pos, rad, zscale=zscale)
p = exactpsf.FixedSSChebLinePSF()
npts, iorder = _calc_ilm_order(im.get_image().shape)
i = ilms.BarnesStreakLegPoly2P1D(npts=npts, zorder=iorder)
b = ilms.LegendrePoly2P1D(order=(9 ,3, 5), category='bkg')
c = comp.GlobalScalar('offset', 0.0)
s = states.ImageState(im, [o, i, b, c, p])
runner.link_zscale(s)
if mem_level != 'hi':
s.set_mem_level(mem_level)
opt.do_levmarq(s, ['ilm-scale'], max_iter=1, run_length=6, max_mem=1e4)
return s
def _calc_ilm_order(imshape):
"""
Calculates an ilm order based on the shape of an image. This is based on
something that works for our particular images. Your mileage will vary.
Parameters
----------
imshape : 3-element list-like
The shape of the image.
Returns
-------
npts : tuple
The number of points to use for the ilm.
zorder : int
The order of the z-polynomial.
"""
zorder = int(imshape[0] / 6.25) + 1
l_npts = int(imshape[1] / 42.5)+1
npts = ()
for a in range(l_npts):
if a < 5:
npts += (int(imshape[2] * [59, 39, 29, 19, 14][a]/512.) + 1,)
else:
npts += (int(imshape[2] * 11/512.) + 1,)
return npts, zorder
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
from rfid_node.msg import TagReading
def tagCallback(lastTag):
global tag_pub
timest = lastTag.timestamp.secs * 1000 + lastTag.timestamp.nsecs / 1000000
higher = (timest) >> 32
lower = timest - ((higher >> 32) << 32)
fields = ['0', lastTag.ID, str(lastTag.rssi), str(lastTag.phase), str(lastTag.frequency), str(higher), str(lower)]
separator=':'
tag_pub.publish(separator.join(fields))
if __name__ == "__main__":
global tag_pub
rospy.init_node("readingParser")
# Get the ~private namespace parameters from command line or launch file.
tagTopicName = rospy.get_param('~tagTopicName', 'lastTag')
stringTopicName = rospy.get_param('~stringTopicName', 'rfid/rfid_detect')
# Create publisher/subuscriber
tag_pub=rospy.Publisher(stringTopicName, String,queue_size=0)
rospy.Subscriber(tagTopicName, TagReading, tagCallback)
#and wait
rospy.spin()
|
# -----------------
# settings
pw = ph = 500
amount = 36
threshold = .98 # a value between 0 and 1
st_w = 3
ele_s = pw / (amount + 2)
# -----------------
# drawing
newPage(pw, ph)
translate(ele_s*1.5, ele_s*1.5)
fill(None)
stroke(0)
strokeWidth(st_w)
lineCap('round')
for x in range(amount):
for y in range(amount + 1):
with savedState():
translate(x * ele_s, y * ele_s - ele_s/2)
if random() > threshold:
rotate(random() * 180)
line( (-ele_s/2, 0), (ele_s/2, 0) )
# saveImage('random_lines_rotation.jpg') |
import os
import numpy as np
import json
from PIL import Image
import preprocessing
import utilities
import postprocessing
import time
import matplotlib.pyplot as plt
import ip_algorithms as ipa
def compute_convolution(I, T, stride=(None, None), pixel_group=None, img_name=''):
'''
This function takes an image <I> and a template <T> (both numpy arrays)
and returns a heatmap where each grid represents the output produced by
convolution at each location. You can add optional parameters (e.g. stride,
window_size, padding) to create additional functionality.
'''
'''
BEGIN YOUR CODE
'''
if np.max(I) > 1:
I = I/255
I = I - np.mean(I, axis=(0, 1))
if len(I.shape) != 3:
I = np.expand_dims(I, axis=2)
(n_rows, n_cols, n_channels) = np.shape(I)
if len(T.shape) == 2 and n_channels != 1:
(Trows, Tcols) = np.shape(T)
T = np.expand_dims(T, axis=2)
repT = np.tile(T, reps=3)
elif len(T.shape) == 3 and n_channels == 1:
T = np.mean(T, axis=2)
(Trows, Tcols) = np.shape(T)
repT = T
else:
(Trows, Tcols, Tchannels) = np.shape(T)
repT = T
avg_repT = np.mean(repT, axis=(0, 1))
center_repT = repT - avg_repT
norm_repT = np.linalg.norm(center_repT, axis=(0, 1))
cnT = center_repT / norm_repT
x, y = np.where(repT[:, :, 0] == np.max(repT[:, :, 0]))
hot_x = round(np.mean(x).item())
hot_y = round(np.mean(y).item())
tr_Trows = hot_x
br_Trows = Trows - tr_Trows
lc_Tcols = hot_y
rc_Tcols = Tcols - lc_Tcols
heatmap = np.zeros(shape=(n_rows, n_cols))
if stride is (None, None):
stride = (1, 1)
xvals = range(0, n_rows, stride[0])
yvals = range(0, n_cols, stride[1])
targeted = False
yvals_master = None
if pixel_group is not None:
targeted = True
xvals, yvals_master = zip(*pixel_group)
xval_count = 0
for i in xvals:
if targeted:
yvals = [yvals_master[xval_count]]
xval_count += 1
for j in yvals:
padded_patch = np.zeros(shape=(T.shape[0], T.shape[1], n_channels))
tr = max(0, i - tr_Trows)
br = min(n_rows, i + br_Trows)
lc = max(0, j - lc_Tcols)
rc = min(n_cols, j + rc_Tcols)
offsetr = abs(i - tr_Trows - tr) + abs(i + br_Trows - br)
offsetc = abs(j - lc_Tcols - lc) + abs(j + rc_Tcols - rc)
patch = I[tr:br, lc:rc, :]
if patch.shape != T.shape:
padded_patch[offsetr:, offsetc:, :] = patch
patch = padded_patch
center_patch = patch - np.mean(patch, axis=(0, 1))
norm_patch = np.linalg.norm(center_patch, axis=(0, 1))
# print(norm_patch)
cnp = center_patch/norm_patch
for k in range(3):
heatmap[i, j] += (np.dot(cnT[:, :, k].flatten(), cnp[:, :, k].flatten()))/3
'''
END YOUR CODE
'''
return heatmap
def predict_boxes(cmc_list, heatmap):
'''
This function takes heatmap and returns the bounding boxes and associated
confidence scores.
'''
output = []
'''
BEGIN YOUR CODE
'''
'''
As an example, here's code that generates between 1 and 5 random boxes
of fixed size and returns the results in the proper format.
'''
groups, _, _ = postprocessing.group_pixels(heatmap)
_, _, output = postprocessing.groups_to_bounding_boxes(groups, cmc_list, heatmap)
'''
END YOUR CODE
'''
return output
def detect_red_light_mf(I, img=None, name=''):
'''
This function takes a numpy array <I> and returns a list <output>.
The length of <output> is the number of bounding boxes predicted for <I>.
Each entry of <output> is a list <[row_TL,col_TL,row_BR,col_BR,score]>.
The first four entries are four integers specifying a bounding box
(the row and column index of the top left corner and the row and column
index of the bottom right corner).
<score> is a confidence score ranging from 0 to 1.
Note that PIL loads images in RGB order, so:
I[:,:,0] is the red channel
I[:,:,1] is the green channel
I[:,:,2] is the blue channel
'''
'''
BEGIN YOUR CODE
'''
# You may use multiple stages and combine the results
st = time.time()
kernel_list = []
kernel_sizes = []
exclude = [0, 1, 3, 4, 5, 7, 8, 10, 13, 14, 15, 17, 6, 9, 11, 16]
n_kernels = 6 * 3
for i in range(n_kernels):
if i in exclude:
continue
kernel_list.append(utilities.load_kernel(str(i), '../data/kernels/'))
kernel_sizes.append(preprocessing.get_patch_hot_spot_size(kernel_list[-1]))
rgb_pixel_array = np.load('../data/red_light_pixels/rgb_pixel_array.npy')
print('preprocessing...')
map_mih = preprocessing.color_match_red_lights(I, rgb_pixel_array, stride=(1, 2))
thresholded_mih_map = postprocessing.threshold_convolved_image(map_mih, 0.94, mode='down')
smoothed_thresholded_mih_map = ipa.neighbor_max_smooth_heatmap(thresholded_mih_map, np.zeros(shape=(5, 5)))
groups1, group_centers1, pixels = postprocessing.group_pixels(smoothed_thresholded_mih_map)
# print(time.time() - st)
# plt.imshow(smoothed_thresholded_mih_map)
# plt.show()
heatmaps = []
group_kernel_scores = np.zeros(shape=(len(kernel_list), len(groups1)))
print('match filtering... #pixels : ', np.sum(smoothed_thresholded_mih_map > 0))
exclude_group = []
for k, kernel in enumerate(kernel_list):
kernel_heatmaps = []
for i, group in enumerate(groups1):
if i in exclude_group:
continue
group = postprocessing.group_center_to_pixel_group(group_centers1[i], group, kernel, img_size=I.shape)
hmap = compute_convolution(I, kernel, stride=(1, 1), pixel_group=group)
kernel_heatmaps.append(hmap)
group_kernel_scores[k, i] = np.max(hmap)
# vis = False
# if (282, 478) in group and vis:
# plt.subplot(131)
# plt.imshow(hmap)
# plt.subplot(132)
# mask = np.copy(hmap)
# mask[mask > 0] = 1
# mask[mask < np.mean(mask)] = 0.25
# plt.imshow(I/255 * mask[:, :, None])
# plt.subplot(133)
# plt.imshow(kernel)
# plt.show()
kernel_heatmap = np.max(kernel_heatmaps, axis=0)
heatmaps.append(kernel_heatmap)
print(time.time() - st)
heatmap = np.max(heatmaps, axis=0)
# print('postprocessing...')
output = []
if len(groups1) > 0:
thresh_heatmap = postprocessing.threshold_convolved_image(heatmap, 0.83, mode='down')
groups, group_centers, _ = postprocessing.group_pixels(thresh_heatmap)
matched_indices = postprocessing.match_group_centers_to_groups(group_centers, groups1)
bb_heatmap = np.zeros(shape=heatmap.shape)
cmc_list = []
for i, ind in enumerate(matched_indices):
if ind != -1:
gc = group_centers[i]
kind = int(np.argmax(group_kernel_scores[:, ind]))
kernel = kernel_list[kind]
cmc = postprocessing.color_match_score(gc, kernel, I)
if cmc > 0.88:
postprocessing.add_kernel_patch(gc, kernel, heatmap, bb_heatmap)
cmc_list.append(cmc)
output = predict_boxes(cmc_list, bb_heatmap)
print(time.time() - st)
'''
END YOUR CODE
'''
for i in range(len(output)):
assert len(output[i]) == 5
assert (output[i][4] >= 0.0) and (output[i][4] <= 1.0)
return output
# Note that you are not allowed to use test data for training.
# set the path to the downloaded data:
data_path = '../data/RedLights2011_Medium'
# load splits:
split_path = '../data/hw02_splits'
file_names_train = np.load(os.path.join(split_path,'file_names_train.npy'))
file_names_test = np.load(os.path.join(split_path,'file_names_test.npy'))
# set a path for saving predictions:
preds_path = '../data/hw02_weakened2_preds'
os.makedirs(preds_path, exist_ok=True) # create directory if needed
# Set this parameter to True when you're done with algorithm development:
done_tweaking = True
'''
Make predictions on the training set.
'''
preds_train = {}
# print(file_names_train)
st = time.time()
# with open(os.path.join(preds_path, 'preds_train.json'), 'r') as f:
# preds_train = json.load(f)
for i in range(len(file_names_train)):
# if i < 139:
# continue
if i % 10 == 0:
print('Time Elapsed : ', time.time() - st)
with open(os.path.join(preds_path, 'preds_train.json'), 'w') as f:
json.dump(preds_train, f)
print(str(i) + '/' + str(len(file_names_train)) + ' : ' + file_names_train[i])
# read image using PIL:
img = Image.open(os.path.join(data_path,file_names_train[i]))
# convert to numpy array:
I = np.asarray(img)
preds_train[file_names_train[i]] = detect_red_light_mf(I, img, file_names_train[i])
print('Finished train, ' + str(time.time() - st))
# save preds (overwrites any previous predictions!)
with open(os.path.join(preds_path,'preds_train.json'),'w') as f:
json.dump(preds_train,f)
if done_tweaking:
'''
Make predictions on the test set.
'''
preds_test = {}
for i in range(len(file_names_test)):
if i % 10 == 0:
print('Time Elapsed : ', time.time() - st)
with open(os.path.join(preds_path, 'preds_test.json'), 'w') as f:
json.dump(preds_train, f)
print(str(i) + '/' + str(len(file_names_test)) + ' : ' + file_names_test[i])
# read image using PIL:
I = Image.open(os.path.join(data_path,file_names_test[i]))
# convert to numpy array:
I = np.asarray(I)
preds_test[file_names_test[i]] = detect_red_light_mf(I)
# save preds (overwrites any previous predictions!)
with open(os.path.join(preds_path, 'preds_test.json'),'w') as f:
json.dump(preds_test,f)
print('Finished test, ' + str(time.time() - st))
|
import jsonschema
from base64 import urlsafe_b64encode
from datetime import timedelta
from marshmallow import Schema, fields, post_load
from marshmallow.exceptions import ValidationError
from zeus.models import Hook
from zeus.utils import timezone
from zeus.providers import InvalidProvider, get_provider, VALID_PROVIDER_NAMES
class HookConfigField(fields.Field):
def _serialize(self, value, attr, obj, **kwargs):
return dict(value) if value else {}
def _deserialize(self, value, attr, data, **kwargs):
provider_name = data.get("provider")
if provider_name:
try:
provider_cls = get_provider(provider_name)
except InvalidProvider:
raise ValidationError("Invalid provider")
try:
provider_cls.validate_config(value)
except jsonschema.ValidationError as e:
raise ValidationError from e
return value
class HookSchema(Schema):
id = fields.UUID(dump_only=True)
provider = fields.Str(
validate=[fields.validate.OneOf(choices=VALID_PROVIDER_NAMES)]
)
provider_name = fields.Method("get_provider_name", dump_only=True)
token = fields.Method("get_token", dump_only=True)
secret_uri = fields.Method("get_secret_uri", dump_only=True)
public_uri = fields.Method("get_public_uri", dump_only=True)
is_required = fields.Boolean()
created_at = fields.DateTime(attribute="date_created", dump_only=True)
config = HookConfigField()
@post_load(pass_many=False)
def make_hook(self, data, **kwargs):
if self.context.get("hook"):
hook = self.context["hook"]
for key, value in data.items():
setattr(hook, key, value)
else:
hook = Hook(**data)
return hook
def get_token(self, obj):
# we allow visibility of tokens for 24 hours
if obj.date_created > timezone.now() - timedelta(days=1):
return urlsafe_b64encode(obj.token).decode("utf-8")
return None
def get_public_uri(self, obj):
return "/hooks/{}/public".format(str(obj.id))
def get_secret_uri(self, obj):
return "/hooks/{}/{}".format(str(obj.id), obj.get_signature())
def get_provider_name(self, obj):
provider_cls = get_provider(obj.provider)
return provider_cls.get_name(obj.config or {})
|
from typing import Dict, Union, TYPE_CHECKING
if TYPE_CHECKING:
import numpy
import pandas
import pyarrow
DataBatchType = Union[
"numpy.ndarray", "pandas.DataFrame", "pyarrow.Table", Dict[str, "numpy.ndarray"]
]
|
#!/usr/bin/env python
# coding=utf-8
#
# File: StickyNotes/src/python/setup.py
# Author: Hankso
# Webpage: https://github.com/hankso
# Time: Sat 29 Feb 2020 04:21:01 PM CST
from setuptools import setup
setup(
name = 'stickynotes',
version = '1.0.0',
url = 'https://github.com/hankso/StickyNotes',
author = 'hankso',
author_email = 'hankso1106@gmail.com',
license = 'MIT',
description = 'Create, edit and manage your cloud notes with QRCode',
package_data = {},
install_requires = ['bottle', ],
)
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def home(request):
return HttpResponse('Alô, mundo')
|
#!/usr/bin/env python
import sys
from test_support import verbose
import string_tests
# UserString is a wrapper around the native builtin string type.
# UserString instances should behave similar to builtin string objects.
# The test cases were in part derived from 'test_string.py'.
from UserString import UserString
if __name__ == "__main__":
verbose = 0
tested_methods = {}
def test(methodname, input, *args):
global tested_methods
tested_methods[methodname] = 1
if verbose:
print '%s.%s(%s) ' % (input, methodname, args),
u = UserString(input)
objects = [input, u, UserString(u)]
res = [""] * 3
for i in range(3):
object = objects[i]
try:
f = getattr(object, methodname)
res[i] = apply(f, args)
except:
res[i] = sys.exc_type
if res[0] != res[1]:
if verbose:
print 'no'
print `input`, f, `res[0]`, "<>", `res[1]`
else:
if verbose:
print 'yes'
if res[1] != res[2]:
if verbose:
print 'no'
print `input`, f, `res[1]`, "<>", `res[2]`
else:
if verbose:
print 'yes'
string_tests.run_method_tests(test)
|
"""
Authorize actions.
"""
import falcon
from talons.auth import middleware
from dgi_repo.auth.drupal import SiteBasicIdentifier as Identifier
from dgi_repo.auth.drupal import authenticate as drupal_auth
from dgi_repo.auth.system import (authenticate as system_authenticator,
Authorize as SystemAuthorize)
from dgi_repo.auth.utilities import Authenticator, Authorizer
def authorize(identity, action):
"""
An external authorizor, as for talons.auth.external.Authorizer.
Args:
identity: A talons.auth.interfaces.Identity instance
action: A talons.auth.interfaces.ResourceAction instance, with
properties:
request: The falcon.Request object of the HTTP request.
params: The dict of parameters.
Returns:
A boolean indicating if the action should be allowed by the given
agent.
"""
# TODO: Apply "global" and object-level policies.
return True
class AuthMiddleware(object):
def __init__(self):
"""
Constructor for the authentication middleware.
"""
authenticator = Authenticator(
drupal_auth,
system_authenticator
)
authorizer = Authorizer(
authorize,
SystemAuthorize().authorize
)
self._auth_middleware = middleware.create_middleware(
identify_with=[Identifier],
authenticate_with=authenticator,
authorize_with=authorizer
)
self._auth_middleware.raise_401_no_identity = self._raise_no_ident
self._auth_middleware.raise_401_fail_authenticate = self._raise_failed
def _raise_no_ident(self):
"""
Raise an unauthorized exception for no identity information.
"""
raise falcon.HTTPUnauthorized('Authentication required',
'No identity information found.',
['basic'])
def _raise_failed(self):
"""
Raise an unauthorized exception for failure.
"""
raise falcon.HTTPUnauthorized('Authentication required',
'Authentication failed.', ['basic'])
def process_request(self, req, resp):
"""
Route the request through talons.
"""
return self._auth_middleware(req, resp, req.params)
|
"""
Dechat - Distributed, Encrypted CHAT client
"""
from . import errors, messaging, user
|
from __future__ import absolute_import
from .htchirp import HTChirp, condor_chirp
|
# -*- encoding: utf-8 -*-
from openerp import http
from openerp.http import request
import json
try: import httplib
except ImportError:
import http.client as httplib
import logging
_logger = logging.getLogger(__name__)
class tmalljd_callback(http.Controller):
@http.route(['/tmalljd_callback/jd'], type='http', auth="public", website=True)
def tmalljd_callback(self,**arg):
header = {
'Content-type': 'application/x-www-form-urlencoded;charset=UTF-8'
}
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
shop_id = pool.get('loewieec.shop').search(cr,uid,[('appkey', '=', '0095ABD0C47C84AE9C393DF018A41A1F')], order="name asc")
if shop_id :
shop_obj = pool.get('loewieec.shop').browse(cr, uid, shop_id, context=context)
auth_code = ''
if 'code' in request.params.keys():
auth_code = request.params['code']
if shop_id : shop_obj.tokenurl = auth_code
url = '/oauth/token?grant_type=authorization_code&client_id=%s&redirect_uri=%s&scope=read&code=%s&client_secret=%s' % (shop_obj.appkey,shop_obj.authurl,auth_code,shop_obj.appsecret)
connection = httplib.HTTPSConnection('oauth.jd.com')
connection.request("POST", url,headers=header)
response = connection.getresponse();
if response.status is not 200:
raise Exception('invalid http status ' + str(response.status) + ',detail body:' + response.read())
result = response.read()
jsonobj = json.loads(result)
shop_obj.last_log = jsonobj
shop_obj.access_token = jsonobj.get('access_token')
shop_obj.access_token = jsonobj.get('access_token')
return 'OK, Thanks'
|
import pytest
from starlette.testclient import TestClient
from .fastapi_app import app, deps, Inner, ContextLoaded
def test_request_singletons_are_the_same_within_a_request_context():
client = TestClient(app)
response = client.get("/")
data = response.json()
assert data["outer_one"] == data["outer_two"]
def test_request_singletons_are_different_for_new_requests():
client = TestClient(app)
data_one = client.get("/").json()
data_two = client.get("/").json()
assert data_one["outer_one"] != data_two["outer_one"]
def test_deps_can_be_overridden_during_test():
client = TestClient(app)
with deps.override_for_test() as c:
c[Inner] = Inner("test_message")
call_under_test = client.get("/inner").json()
call_after_test = client.get("/inner").json()
assert call_under_test["data"] == "test_message"
assert call_after_test["data"] != "test_message"
@pytest.fixture
def fixture_fake_deps():
with deps.override_for_test() as test_container:
test_container[Inner] = Inner("fixture_deps")
yield test_container
def test_overriding_with_fixtures_works(fixture_fake_deps):
client = TestClient(app)
resp = client.get("/inner").json()
assert resp["data"] == "fixture_deps"
def test_deps_can_be_overridden_during_test_multiple_times():
client = TestClient(app)
with deps.override_for_test() as c1:
with deps.override_for_test() as c2:
c1[Inner] = Inner("first_level")
c2[Inner] = Inner("second_level")
second = client.get("/inner").json()
first = client.get("/inner").json()
outer = client.get("/inner").json()
assert outer["data"] is None
assert first["data"] == "first_level"
assert second["data"] == "second_level"
def test_deps_can_use_contexts_for_cleanup_tasks():
ContextLoaded.cleaned_up = False
client = TestClient(app)
response = client.get("with_some_context")
assert response.json() == {"cleaned_up": "False"}
assert ContextLoaded.cleaned_up
|
#!/usr/bin/env python3
"""
Pre-processing pipeline for Beehive Dataset
Train/Val Set - Hive 3
Test Set - Hive 1
https://zenodo.org/record/2667806
"""
import copy
import logging
from typing import Any, Dict
import hearpreprocess.pipeline as pipeline
from . import beehive_states_fold0
logger = logging.getLogger("luigi-interface")
TRAIN_HIVE = "hive3"
TEST_HIVE = "hive1"
generic_task_config: Dict[str, Any] = copy.deepcopy(
beehive_states_fold0.generic_task_config
)
generic_task_config["task_name"] = "beehive_states_fold1"
class ExtractMetadata(beehive_states_fold0.ExtractMetadata):
@property
def train_hive(self):
return TRAIN_HIVE
@property
def test_hive(self):
return TEST_HIVE
def extract_metadata_task(task_config: Dict[str, Any]) -> pipeline.ExtractMetadata:
# Build the dataset pipeline with the custom metadata configuration task
download_tasks = pipeline.get_download_and_extract_tasks(task_config)
return ExtractMetadata(
outfile="process_metadata.csv", task_config=task_config, **download_tasks
)
|
"""
Counting Organizations
This application will read the mailbox data (mbox.txt) and count the
number of email messages per organization (i.e. domain name of the
email address) using a database with the following schema to maintain
the counts.
+-----------------------------------------------+
| CREATE TABLE Counts (org TEXT, count INTEGER) |
+-----------------------------------------------+
When you have run the program on mbox.txt upload the resulting database
file above for grading. If you run the program multiple times in
testing or with dfferent files, make sure to empty out the data before
each run.
You can use this code as a starting point for your application:
http://www.py4e.com/code3/emaildb.py.
The data file for this application is the same as in previous
assignments: http://www.py4e.com/code3/mbox.txt.
Because the sample code is using an UPDATE statement and committing the
results to the database as each record is read in the loop, it might
take as long as few minutes to process all the data. The commit insists
on completely writing all the data to disk every time it is called.
The program can be speeded up greatly by moving the commit operation
outside of the loop. In any database program, there is a balance
between the number of operations you execute between commits and the
importance of not losing the results of operations that have not yet
been committed.
"""
import sqlite3
conn = sqlite3.connect('emaildb.sqlite')
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS Counts')
cur.execute('CREATE TABLE Counts (org TEXT, count INTEGER)')
fn = "py4e/data/mbox.txt"
fh = open(fn)
for line in fh:
if not line.startswith('From: '):
continue
pieces = line.split()
email = pieces[1]
org = email.split('@')[1]
cur.execute('SELECT count FROM Counts WHERE org = ?', (org,))
row = cur.fetchone()
if row is None:
cur.execute('INSERT INTO Counts (org, count) VALUES (?, 1)', (org,))
else:
cur.execute('UPDATE Counts SET count = count + 1 WHERE org = ?', (org,))
conn.commit() |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-28 13:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0003_auto_20170221_1632'),
]
operations = [
migrations.AlterField(
model_name='accomadation',
name='loyalto',
field=models.CharField(default='nil', max_length=70),
),
migrations.AlterField(
model_name='hospital',
name='loyalto',
field=models.CharField(default='nil', max_length=70),
),
migrations.AlterField(
model_name='pronearea',
name='loyalto',
field=models.CharField(default='nil', max_length=70),
),
migrations.AlterField(
model_name='volunteer',
name='loyalto',
field=models.CharField(default='nil', max_length=70),
),
]
|
"""
==========================================================
Generalized Canonical Correlation Analysis (GCCA) Tutorial
==========================================================
In this tutorial we demonstrate the use of GCCA to uncover latent correlated
information across views when we have more than 2 views. In this case, we
use 3 views from the UCI Multiple Features Dataset.
"""
# License: MIT
from mvlearn.datasets import load_UCImultifeature
from mvlearn.embed import GCCA
from mvlearn.plotting import crossviews_plot
###############################################################################
# Load Data
# ---------
#
# We load three views from the UCI handwritten digits multi-view data set.
# Specificallym the Profile correlations, Karhunen-Love coefficients, and pixel
# averages from 2x3 windows.
# Load the data
Xs, y = load_UCImultifeature(views=[1, 2, 3])
# Inspect the dataset
print(f'There are {len(Xs)} views.')
print(f'There are {Xs[0].shape[0]} observations')
print(f'The feature sizes are: {[X.shape[1] for X in Xs]}')
###############################################################################
# Embed Views
# ^^^^^^^^^^^
# Create GCCA object and embed the
gcca = GCCA()
Xs_latents = gcca.fit_transform(Xs)
print(f'The feature sizes are: {[X.shape[1] for X in Xs_latents]}')
###############################################################################
# Plot the first two views against each other
# -------------------------------------------
# The top three dimensions from the latents spaces of the profile correlation
# and pixel average views are plotted against each other. However, their latent
# spaces are influenced the the Karhunen-Love coefficients, not plotted.
crossviews_plot(Xs_latents[[0, 2]], dimensions=[0, 1, 2], labels=y,
cmap='Set1', title='Profile correlations vs Pixel Averages',
scatter_kwargs={'alpha': 0.4, 's': 2.0})
|
import os
import fnmatch
import sqlite3
import json
import csv
from utils import dataset_params_to_search
import logging
from twarc import json2csv
import zipfile
from flask import current_app
from flask_mail import Mail, Message
logger = logging.getLogger(__name__)
def generate_tasks(self, task_defs, dataset_params, total_tweets, dataset_path, generate_update_increment=None,
zip_bytes_threshold=1000000000):
generate_update_increment = generate_update_increment or 10000
tasks = []
task_args = [self, total_tweets, dataset_path, generate_update_increment]
for task_name, task_kwargs in task_defs.items():
if task_name in task_class_map:
tasks.append(task_class_map[task_name](*task_args, **task_kwargs))
search = dataset_params_to_search(dataset_params)
source = set()
for task in tasks:
# Delete existing files
if task.file_filter:
# Unzipped files
for filename in fnmatch.filter(os.listdir(dataset_path), task.file_filter):
os.remove(os.path.join(dataset_path, filename))
# Zipped files
for filename in fnmatch.filter(os.listdir(dataset_path), '{}.zip'.format(task.file_filter)):
os.remove(os.path.join(dataset_path, filename))
task.on_start()
source.update(task.source)
if source:
search.source(list(source))
tweet_count = 0
for tweet_count, hit in enumerate(search.scan()):
# This is to support limiting the number of tweets
if tweet_count + 1 > total_tweets:
break
for task in tasks:
task.on_hit(hit, tweet_count)
if (tweet_count + 1) % generate_update_increment == 0:
self.update_state(state='PROGRESS',
meta={'current': tweet_count + 1, 'total': total_tweets,
'status': '{:,d} of {:,d} tweet ids'.format(tweet_count + 1,
total_tweets)})
for task in tasks:
task.on_end()
# Zip files
z = None
zip_filepath = None
file_count = 1
for filename in sorted(fnmatch.filter(os.listdir(dataset_path), task.file_filter)):
if z is None or os.path.getsize(zip_filepath) > zip_bytes_threshold:
if z:
z.close()
zip_filepath = os.path.join(dataset_path,
'{}.zip'.format(task.file_filter.replace('*', str(file_count).zfill(3))))
z = zipfile.ZipFile(zip_filepath, 'w', compression=zipfile.ZIP_DEFLATED)
file_count += 1
filepath = os.path.join(dataset_path, filename)
z.write(filepath, arcname=filename)
os.remove(os.path.join(dataset_path, filename))
if z:
z.close()
generate_task_filepath = os.path.join(dataset_path, 'generate_tasks.json')
if os.path.exists(generate_task_filepath):
os.remove(generate_task_filepath)
# Notify user if email provided
if task_defs.get('requester_email'):
send_email(email_address=task_defs['requester_email'],
dataset_name=dataset_params['dataset_name'],
url_for_extract=task_defs['dataset_url'])
return {'current': tweet_count + 1, 'total': total_tweets,
'status': 'Completed.'}
def send_email(email_address, dataset_name, url_for_extract):
'''Sends an email on task completion to the user requesting the extract.'''
# Get current Flask app context (for configuration variables)
app = current_app._get_current_object()
mail = Mail(app)
msg = Message(subject='TweetSets Data Extract Complete',
sender=app.config['EMAIL_FROM'],
recipients=[email_address])
msg.html = 'Your data extract for dataset <em>{}</em> is ready <a href={}>for downloading</a>.'.format(dataset_name, url_for_extract)
mail.send(msg)
return
class BaseGenerateTask:
def __init__(self, state, total_tweets, dataset_path, generate_update_increment, file_filter=None, source=None):
self.state = state
self.dataset_path = dataset_path
self.total_tweets = total_tweets
self.file_filter = file_filter
self.source = source or []
self.generate_update_increment = generate_update_increment
def on_start(self):
pass
def on_hit(self, hit, tweet_count):
raise NotImplementedError('on_hit must be implemented')
def on_end(self):
pass
def update_state(self, current, total, status, state='PROGRESS'):
self.state.update_state(state=state,
meta={'current': current, 'total': total,
'status': status})
class GenerateTweetIdsTask(BaseGenerateTask):
def __init__(self, *args, max_per_file=None):
super(GenerateTweetIdsTask, self).__init__(*args, file_filter='tweet-ids-*.txt')
self.max_per_file = max_per_file or 10000000
self.file = None
self.file_count = 1
def on_hit(self, hit, tweet_count):
# Cycle tweet id files
if tweet_count % self.max_per_file == 0:
if self.file:
self.file.close()
self.file = open(
os.path.join(self.dataset_path, 'tweet-ids-{}.txt'.format(str(self.file_count).zfill(3))), 'w')
self.file_count += 1
# Write to tweet id file
self.file.write(hit.meta.id)
self.file.write('\n')
def on_end(self):
if self.file:
self.file.close()
class GenerateTweetJSONTask(BaseGenerateTask):
def __init__(self, *args, max_per_file=None):
super(GenerateTweetJSONTask, self).__init__(*args, file_filter='tweets-*.jsonl', source=['tweet'])
self.max_per_file = max_per_file or 10000000
self.file = None
self.file_count = 1
def on_hit(self, hit, tweet_count):
# Cycle tweet id files
if tweet_count % self.max_per_file == 0:
if self.file:
self.file.close()
self.file = open(
os.path.join(self.dataset_path, 'tweets-{}.jsonl'.format(str(self.file_count).zfill(3))), 'w')
self.file_count += 1
# Write to tweet file
self.file.write(hit.tweet)
self.file.write('\n')
def on_end(self):
if self.file:
self.file.close()
class GenerateTweetCSVTask(BaseGenerateTask):
def __init__(self, *args, max_per_file=None):
super(GenerateTweetCSVTask, self).__init__(*args, file_filter='tweets-*.csv', source=['tweet'])
self.max_per_file = max_per_file or 250000
self.file = None
self.sheet = None
self.file_count = 1
def on_hit(self, hit, tweet_count):
# Cycle tweet id files
if tweet_count % self.max_per_file == 0:
if self.file:
self.file.close()
self.file = open(
os.path.join(self.dataset_path, 'tweets-{}.csv'.format(str(self.file_count).zfill(3))), 'w')
self.sheet = csv.writer(self.file)
self.sheet.writerow(json2csv.get_headings())
self.file_count += 1
# Write to tweet file
self.sheet.writerow(json2csv.get_row(json.loads(hit.tweet), excel=True))
def on_end(self):
if self.file:
self.file.close()
class GenerateMentionsTask(BaseGenerateTask):
def __init__(self, *args, max_per_file=None):
super(GenerateMentionsTask, self).__init__(*args, file_filter='mention-*.csv',
source=['mention_user_ids', 'user_id'])
self.max_per_file = max_per_file or 10000000
self.db_filepath = os.path.join(self.dataset_path, "mentions.db")
self.edges_file = None
self.nodes_file = None
self.mention_count = 0
self.conn = None
self.file_count = 1
def on_start(self):
# Create db
if os.path.exists(self.db_filepath):
os.remove(self.db_filepath)
self.conn = sqlite3.connect(self.db_filepath)
with self.conn:
self.conn.execute('create table user_ids (user_id primary key);')
self.nodes_file = open(os.path.join(self.dataset_path, 'mention-nodes.csv'), 'w')
def on_hit(self, hit, tweet_count):
# Cycle edges files
if tweet_count % self.max_per_file == 0:
if self.edges_file:
self.edges_file.close()
self.edges_file = open(
os.path.join(self.dataset_path, 'mention-edges-{}.csv'.format(str(self.file_count).zfill(3))), 'w')
self.file_count += 1
# Write to mentions to file
if hasattr(hit, 'mention_user_ids'):
for i, mention_user_id in enumerate(hit.mention_user_ids):
# Encountered instances where mention_user_id is null.
if mention_user_id:
self.mention_count += 1
# Write mention user id (edge)
self.edges_file.write(','.join([hit.user_id, mention_user_id]))
self.edges_file.write('\n')
# Possibly write mention user id to mention screen name (node)
try:
with self.conn:
self.conn.execute('insert into user_ids(user_id) values (?);', (mention_user_id,))
self.nodes_file.write(','.join([mention_user_id, hit.mention_screen_names[i]]))
self.nodes_file.write('\n')
except sqlite3.IntegrityError:
# A dupe, so skipping writing to nodes file
pass
def on_end(self):
if self.edges_file:
self.edges_file.close()
self.nodes_file.close()
self.conn.close()
os.remove(self.db_filepath)
class GenerateTopMentionsTask(BaseGenerateTask):
def __init__(self, *args, max_per_file=None):
super(GenerateTopMentionsTask, self).__init__(*args, file_filter='top-mentions-*.csv',
source=['mention_user_ids', 'mention_screen_names'])
self.max_per_file = max_per_file or 250000
self.db_filepath = os.path.join(self.dataset_path, "top-mentions.db")
self.conn = None
self.mention_count = 0
self.total_user_count = 0
self.count_buf = dict()
self.user_buf = set()
def on_start(self):
# Create db
if os.path.exists(self.db_filepath):
os.remove(self.db_filepath)
self.conn = sqlite3.connect(self.db_filepath)
with self.conn:
self.conn.execute('create table mentions(user_id primary key, mention_count int);')
self.conn.execute('create table users(user_id int primary key, screen_name text);')
self.conn.execute('create unique index users_idx on users(user_id, screen_name);')
def on_hit(self, hit, tweet_count):
if hasattr(hit, 'mention_user_ids'):
for i, mention_user_id in enumerate(hit.mention_user_ids):
# Encountered unexpected blank user ids
if mention_user_id:
self.mention_count += 1
mention_screen_name = hit.mention_screen_names[i]
if mention_user_id in self.count_buf:
self.count_buf[mention_user_id] += 1
else:
cur = self.conn.cursor()
cur.execute('update mentions set mention_count=mention_count+1 where user_id=?',
(mention_user_id,))
if not cur.rowcount:
self.count_buf[mention_user_id] = 1
self.conn.commit()
self.user_buf.add((mention_user_id, mention_screen_name))
if len(self.count_buf) and len(self.count_buf) % 1000 == 0:
with self.conn:
self.conn.executemany(
'insert into mentions(user_id, mention_count) values (?, ?);',
_mention_iter(self.count_buf))
self.total_user_count += len(self.count_buf)
self.count_buf = dict()
if len(self.user_buf) and len(self.user_buf) % 1000 == 0:
with self.conn:
self.conn.executemany(
'insert or ignore into users(user_id, screen_name) values (?, ?);',
self.user_buf)
self.user_buf = set()
def on_end(self):
# Final write of buffer
if len(self.count_buf):
with self.conn:
self.conn.executemany('insert into mentions(user_id, mention_count) values (?, ?);',
_mention_iter(self.count_buf))
self.total_user_count += len(self.count_buf)
if len(self.user_buf):
with self.conn:
self.conn.executemany('insert or ignore into users(user_id, screen_name) values (?, ?);',
self.user_buf)
file_count = 1
file = None
try:
cur = self.conn.cursor()
for user_count, row in enumerate(
cur.execute("select user_id, mention_count from mentions order by mention_count desc")):
user_id = row[0]
mention_count = row[1]
# Cycle tweet id files
if user_count % self.max_per_file == 0:
if file:
file.close()
file = open(
os.path.join(self.dataset_path, 'top-mentions-{}.csv'.format(str(file_count).zfill(3))), 'w')
file_count += 1
# Get screen names
screen_names = []
for user_row in self.conn.execute("select screen_name from users where user_id=?", (user_id,)):
screen_names.append(user_row[0])
# Write to mentions to file
line = [user_id, str(mention_count)]
line.extend(screen_names)
file.write(','.join(line))
file.write('\n')
if (user_count + 1) % self.generate_update_increment == 0:
self.update_state(user_count + 1, self.total_user_count,
'{:,d} of {:,d} mentioners in {:,d} files'.format(
user_count + 1, self.total_user_count, file_count))
finally:
if file:
file.close()
os.remove(self.db_filepath)
class GenerateTopUsersTask(BaseGenerateTask):
def __init__(self, *args, max_per_file=None):
super(GenerateTopUsersTask, self).__init__(*args, file_filter='top-users-*.csv',
source=['user_id', 'user_screen_name'])
self.max_per_file = max_per_file or 250000
self.db_filepath = os.path.join(self.dataset_path, "top-users.db")
self.conn = None
self.total_user_count = 0
self.count_buf = dict()
self.user_buf = set()
def on_start(self):
# Create db
if os.path.exists(self.db_filepath):
os.remove(self.db_filepath)
self.conn = sqlite3.connect(self.db_filepath)
with self.conn:
self.conn.execute('create table tweets(user_id primary key, tweet_count int);')
self.conn.execute('create table users(user_id int primary key, screen_name text);')
self.conn.execute('create unique index users_idx on users(user_id, screen_name);')
def on_hit(self, hit, tweet_count):
screen_name = hit.user_screen_name
user_id = hit.user_id
if user_id in self.count_buf:
self.count_buf[user_id] += 1
else:
cur = self.conn.cursor()
cur.execute('update tweets set tweet_count=tweet_count+1 where user_id=?',
(user_id,))
if not cur.rowcount:
self.count_buf[user_id] = 1
self.conn.commit()
self.user_buf.add((user_id, screen_name))
if len(self.count_buf) and len(self.count_buf) % 1000 == 0:
with self.conn:
self.conn.executemany(
'insert into tweets(user_id, tweet_count) values (?, ?);', self.count_buf.items())
self.total_user_count += len(self.count_buf)
self.count_buf = dict()
if len(self.user_buf) and len(self.user_buf) % 1000 == 0:
with self.conn:
self.conn.executemany(
'insert or ignore into users(user_id, screen_name) values (?, ?);',
self.user_buf)
self.user_buf = set()
def on_end(self):
# Final write of buffer
if len(self.count_buf):
with self.conn:
self.conn.executemany('insert into tweets(user_id, tweet_count) values (?, ?);', self.count_buf.items())
self.total_user_count += len(self.count_buf)
if len(self.user_buf):
with self.conn:
self.conn.executemany('insert or ignore into users(user_id, screen_name) values (?, ?);',
self.user_buf)
file_count = 1
file = None
try:
cur = self.conn.cursor()
for user_count, row in enumerate(
cur.execute("select user_id, tweet_count from tweets order by tweet_count desc")):
user_id = row[0]
tweet_count = row[1]
# Cycle tweet id files
if user_count % self.max_per_file == 0:
if file:
file.close()
file = open(
os.path.join(self.dataset_path, 'top-users-{}.csv'.format(str(file_count).zfill(3))), 'w')
file_count += 1
# Get screen names
screen_names = []
for user_row in self.conn.execute("select screen_name from users where user_id=?", (user_id,)):
screen_names.append(user_row[0])
# Write to mentions to file
line = [user_id, str(tweet_count)]
line.extend(screen_names)
file.write(','.join(line))
file.write('\n')
if (user_count + 1) % self.generate_update_increment == 0:
self.update_state(user_count + 1,
self.total_user_count,
'{:,d} of {:,d} users in {:,d} files'.format(user_count + 1,
self.total_user_count,
file_count))
finally:
if file:
file.close()
os.remove(self.db_filepath)
task_class_map = {
'tweet_ids': GenerateTweetIdsTask,
'tweet_json': GenerateTweetJSONTask,
'tweet_csv': GenerateTweetCSVTask
}
def _mention_iter(buf):
for mention_user_id, count in buf.items():
yield mention_user_id, count
|
import os
def ensure_directory_exists(path):
try:
os.mkdir(path)
except OSError:
pass #print ("Creation of the directory %s failed" % path) #probably it already exists
|
#!/usr/bin/env python3
# Module Docstring
"""
REST API EXCEPTION CLASS
This module would account for all possible
exceptions for REST API testing.
"""
# Create a custom exception class
class StringSpaceException(Exception):
"""Custom exception to handle spaces in string"""
pass
|
"""delete link field
Revision ID: f3a6881426d7
Revises: 20abd4a1b35f
Create Date: 2020-08-27 22:50:48.709969
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'f3a6881426d7'
down_revision = '20abd4a1b35f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('courses', 'link')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('courses', sa.Column('link', mysql.VARCHAR(length=140), nullable=False))
# ### end Alembic commands ###
|
# -*- coding: utf-8 -*-
"""
Settings for running app tests when not part of another project.
"""
from __future__ import unicode_literals
# Requred by Django, though we don't actually use the database.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = (
'donottrack',
)
# Again, required by Django.
SECRET_KEY = 'super-secret!'
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 10 15:40:38 2021
@author: E440
"""
import sklearn
print(sklearn.__version__)
import numpy
print(numpy.__version__)
import scipy
print(scipy.__version__)
import matplotlib
print(matplotlib.__version__)
import pandas
print(pandas.__version__)
import torch
print(torch.__version__)
import seaborn
print(seaborn.__version__)
#pip install wordcloud
import wordcloud
print(wordcloud.__version__)
import bs4
print(bs4.__version__)
import requests
print(requests.__version__)
#pip install Theano
import theano
print(theano.__version__)
import networkx
print(networkx.__version__)
import cv2
print(cv2.__version__)
#pip install gym
import gym
print(gym.__version__)
|
from model.net.constrastive.text_encoder_finetune import TextEncoder
from model.net.constrastive.vision_encoder_finetune import VisionEncoder
from model.net.constrastive.audio_encoder_fintune import AudioEncoder
import torch
import config as default_config
from torch import nn
from model.decoder.classifier import BaseClassifier
from util.metrics import cont_NTXentLoss
import numpy as np
from util.common import check_dir
class projector(nn.Module):
def __init__(self, input_dim, output_dim, dropout=0.5):
super(projector, self).__init__()
self.fc = nn.Sequential(
nn.LayerNorm(input_dim),
nn.Linear(input_dim, output_dim),
# nn.ReLU(),
# nn.Linear(output_dim, output_dim),
nn.Tanh(),
nn.Dropout(dropout)
)
def forward(self, x):
x = self.fc(x)
return x
class TVA_fusion(nn.Module):
def __init__(self, name=None, encoder_fea_dim=None, drop_out=None, config=default_config):
super(TVA_fusion, self).__init__()
self.config = config
self.text_encoder = TextEncoder(name=name, with_projector=False, config=config)
self.vision_encoder = VisionEncoder(config=config)
self.audio_encoder = AudioEncoder(config=config)
if encoder_fea_dim is None:
encoder_fea_dim = config.SIMS.downStream.encoder_fea_dim
if drop_out is None:
drop_out = config.SIMS.downStream.text_drop_out
self.T_simi_proj = projector(encoder_fea_dim, int(encoder_fea_dim / 2))
self.V_simi_proj = projector(encoder_fea_dim, int(encoder_fea_dim / 2))
self.A_simi_proj = projector(encoder_fea_dim, int(encoder_fea_dim / 2))
self.T_dissimi_proj = projector(encoder_fea_dim, int(encoder_fea_dim / 2))
self.V_dissimi_proj = projector(encoder_fea_dim, int(encoder_fea_dim / 2))
self.A_dissimi_proj = projector(encoder_fea_dim, int(encoder_fea_dim / 2))
hidden_size = [int(encoder_fea_dim), int(encoder_fea_dim / 2), int(encoder_fea_dim / 4),
int(encoder_fea_dim / 8), ]
self.TVA_decoder = BaseClassifier(input_size=int(encoder_fea_dim * 3),
hidden_size=hidden_size,
output_size=1, drop_out=drop_out,
name='TVARegClassifier', )
self.mono_decoder = BaseClassifier(input_size=int(encoder_fea_dim / 2),
hidden_size=hidden_size[2:],
output_size=1, drop_out=drop_out,
name='TVAMonoRegClassifier', )
self.device = config.DEVICE
self.criterion = torch.nn.MSELoss()
self.model_path = config.SIMS.path.model_path + str(config.seed) + '/'
check_dir(self.model_path)
self.batch_size = config.SIMS.downStream.TVAExp_fusion.batch_size
self.heat = config.SIMS.downStream.const_heat
self.ntxent_loss = cont_NTXentLoss(temperature=self.heat)
self.set_train()
def forward(self, sample1, sample2, return_loss=True, return_emb=False, device=None, return_all_fea=False):
if device is None:
device = self.device
text1 = sample1['raw_text']
vision1 = sample1['vision'].clone().detach().to(device).float()
audio1 = sample1['audio'].clone().detach().to(device).float()
label1 = sample1['regression_labels'].clone().detach().to(device).float().squeeze()
label_T1 = sample1['regression_labels_T'].clone().detach().to(device).float().squeeze()
label_V1 = sample1['regression_labels_V'].clone().detach().to(device).float().squeeze()
label_A1 = sample1['regression_labels_A'].clone().detach().to(device).float().squeeze()
key_padding_mask_V1, key_padding_mask_A1 = (sample1['vision_padding_mask'].clone().detach().to(device),
sample1['audio_padding_mask'].clone().detach().to(device))
x_t_embed = self.text_encoder(text1, device=device).squeeze()
x_v_embed = self.vision_encoder(vision1, key_padding_mask=key_padding_mask_V1, device=device).squeeze()
x_a_embed = self.audio_encoder(audio1, key_padding_mask=key_padding_mask_A1, device=device).squeeze()
x_t_simi1 = self.T_simi_proj(x_t_embed)
x_v_simi1 = self.V_simi_proj(x_v_embed)
x_a_simi1 = self.A_simi_proj(x_a_embed)
x_t_dissimi1 = self.T_dissimi_proj(x_t_embed)
x_v_dissimi1 = self.V_dissimi_proj(x_v_embed)
x_a_dissimi1 = self.A_dissimi_proj(x_a_embed)
if return_all_fea:
return x_t_simi1, x_v_simi1, x_a_simi1, x_t_dissimi1, x_v_dissimi1, x_a_dissimi1
x1_s = torch.cat((x_t_simi1, x_v_simi1, x_a_simi1), dim=-1)
x1_ds = torch.cat((x_t_dissimi1, x_v_dissimi1, x_a_dissimi1), dim=-1)
x1_all = torch.cat((x1_s, x1_ds), dim=-1)
x1_sds = torch.cat((x_t_simi1, x_v_simi1, x_a_simi1, x_t_dissimi1, x_v_dissimi1, x_a_dissimi1,
), dim=0)
label1_sds = torch.cat((label1, label1, label1, label_T1, label_V1, label_A1,), dim=0)
x_sds = x1_sds
label_sds = label1_sds
x2 = None
x = x1_all
label_all = label1
if sample2 is not None:
text2 = sample2['raw_text']
vision2 = sample2['vision'].clone().detach().to(device).float()
audio2 = sample2['audio'].clone().detach().to(device).float()
label2 = sample2['regression_labels'].clone().detach().to(device).float().squeeze()
label_T2 = sample2['regression_labels_T'].clone().detach().to(device).float().squeeze()
label_V2 = sample2['regression_labels_V'].clone().detach().to(device).float().squeeze()
label_A2 = sample2['regression_labels_A'].clone().detach().to(device).float().squeeze()
key_padding_mask_V2, key_padding_mask_A2 = (sample2['vision_padding_mask'].clone().detach().to(device),
sample2['audio_padding_mask'].clone().detach().to(device))
x_t_embed2 = self.text_encoder(text2, device=device).squeeze()
x_v_embed2 = self.vision_encoder(vision2, key_padding_mask=key_padding_mask_V2, device=device).squeeze()
x_a_embed2 = self.audio_encoder(audio2, key_padding_mask=key_padding_mask_A2, device=device).squeeze()
x_t_simi2 = self.T_simi_proj(x_t_embed2)
x_v_simi2 = self.V_simi_proj(x_v_embed2)
x_a_simi2 = self.A_simi_proj(x_a_embed2)
x_t_dissimi2 = self.T_dissimi_proj(x_t_embed2)
x_v_dissimi2 = self.V_dissimi_proj(x_v_embed2)
x_a_dissimi2 = self.A_dissimi_proj(x_a_embed2)
x2_s = torch.cat((x_t_simi2, x_v_simi2, x_a_simi2), dim=-1)
x2_ds = torch.cat((x_t_dissimi2, x_v_dissimi2, x_a_dissimi2), dim=-1)
x2_all = torch.cat((x2_s, x2_ds), dim=-1)
x2_sds = torch.cat((x_t_simi2, x_v_simi2, x_a_simi2, x_t_dissimi2, x_v_dissimi2, x_a_dissimi2,
), dim=0)
label2_sds = torch.cat((label2, label2, label2, label_T2, label_V2, label_A2,), dim=0)
x = torch.cat((x1_all, x2_all), dim=0)
label_all = torch.cat((label1.squeeze(), label2.squeeze()), dim=0)
x_sds = torch.cat((x1_sds, x2_sds), dim=0)
label_sds = torch.cat((label1_sds, label2_sds), dim=0)
if return_loss:
pred = self.TVA_decoder(x)
pred_mono = self.mono_decoder(x_sds)
sup_const_loss = 0
# sds_loss = 0
if sample2 is not None:
# [Ts,T1s,T2s,T3s,T4s,T5s,T6s,V1s,V2s,V3s,....]
t1, p, t2, n = torch.tensor([0, 0, 7, 7, 14, 14, # TsT1sT2s vsv1sV2s AsA1sA2s
0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6], # Ts Vs As 0-6
device=device), \
torch.tensor([1, 2, 8, 9, 15, 16,
7, 14, 8, 15, 9, 16, 10, 17, 11, 18, 12, 19, 13, 20],
device=device), \
torch.tensor([0, 0, 0, 0, 7, 7, 7, 7, 14, 14, 14, 14,
0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],
device=device), \
torch.tensor([3, 4, 5, 6, 10, 11, 12, 13, 17, 18, 19, 20,
21, 28, 35, 22, 29, 36, 23, 30, 37, 24, 31, 38, 25, 32, 39, 26, 33, 40, 27,
34, 41], device=device)
indices_tuple = (t1, p, t2, n)
pre_sample_label = torch.tensor([0, 0, 0, 1, 2, 3, 4, 0, 0, 0, 1, 2, 3, 4, 0, 0, 0, 1, 2, 3, 4,
5, 5, 5, 6, 7, 8, 9, 5, 5, 5, 6, 7, 8, 9, 5, 5, 5, 6, 7, 8, 9, ])
for i in range(len(x1_all)):
pre_sample_x = []
for fea1, fea2 in zip([x_t_simi1, x_v_simi1, x_a_simi1, x_t_dissimi1, x_v_dissimi1, x_a_dissimi1, ],
[x_t_simi2, x_v_simi2, x_a_simi2, x_t_dissimi2, x_v_dissimi2,
x_a_dissimi2, ]):
pre_sample_x.append(torch.cat((fea1[i].unsqueeze(0), fea2[6 * i:6 * (i + 1)]), dim=0))
sup_const_loss += self.ntxent_loss(torch.cat(pre_sample_x, dim=0), pre_sample_label,
indices_tuple=indices_tuple)
sup_const_loss /= len(x1_all)
pred_loss = self.criterion(pred.squeeze(), label_all)
mono_task_loss = self.criterion(pred_mono.squeeze(), label_sds)
loss = pred_loss + 0.1 * sup_const_loss + 0.01 * mono_task_loss
if return_emb:
return pred, x1_all, loss, pred_loss, sup_const_loss
else:
return pred, (x_t_embed, x_v_embed, x_a_embed), loss, pred_loss, sup_const_loss
else:
if return_emb:
return x1_all
else:
return (x_t_embed, x_v_embed, x_a_embed)
def save_model(self, name):
# save all modules
mode_path = self.model_path + 'TVA_fusion' + '_model.ckpt'
print('model saved at:')
print(mode_path)
torch.save(self.state_dict(), mode_path)
def load_model(self, name, load_pretrain=False):
if load_pretrain:
text_encoder_path = self.config.SIMS.path.encoder_path + name + '_text_encoder.ckpt'
vision_encoder_path = self.config.SIMS.path.encoder_path + name + '_vision_encoder.ckpt'
audio_encoder_path = self.config.SIMS.path.encoder_path + name + '_audio_encoder.ckpt'
print('model loaded from:')
print(text_encoder_path)
print(vision_encoder_path)
print(audio_encoder_path)
self.text_encoder.load_state_dict(torch.load(text_encoder_path, map_location=self.device))
self.vision_encoder.load_state_dict(torch.load(vision_encoder_path, map_location=self.device))
self.audio_encoder.load_state_dict(torch.load(audio_encoder_path, map_location=self.device))
else:
mode_path = self.model_path + 'TVA_fusion' + '_model.ckpt'
print('model loaded from:')
print(mode_path)
self.load_state_dict(torch.load(mode_path, map_location=self.device))
def set_train(self, train_module=None):
if train_module is None:
train_module = [False, False, True, True]
for param in self.parameters():
param.requires_grad = train_module[3]
self.text_encoder.set_train(train_module=train_module[0:2])
self.vision_encoder.set_train(train_module=train_module[2])
self.audio_encoder.set_train(train_module=train_module[2])
|
from setuptools import find_packages, setup
requirements = [
"numpy>=1.16",
"backends",
"varz>=0.5.3",
"stheno",
"wbml",
"jax",
"jaxlib",
]
setup(
packages=find_packages(exclude=["docs"]),
python_requires=">=3.6",
install_requires=requirements,
include_package_data=True,
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from core.alert import write
from core.alert import warn
from core.alert import info
from core.alert import messages
from core.compatible import check
from core.compatible import version
from core.compatible import os_name
from core.load_modules import load_all_graphs
from core.config import _core_config
from core.config_builder import _builder
from core._die import __die_success
from core._die import __die_failure
from core.color import finish
from core.wizard import __wizard
from core.config_builder import _core_default_config
from core.config_builder import default_profiles
from core.config import _profiles
from core.alert import write_to_api_console
from core.update import _update_check
# temporary use fixed version of argparse
if os_name() == "win32" or os_name() == "win64":
if version() == 2:
from lib.argparse.v2 import argparse
else:
from lib.argparse.v3 import argparse
else:
import argparse
def load_all_args(module_names, graph_names):
"""
create the ARGS and help menu
Args:
module_names: all module names
graph_names: all graph names
Returns:
the parser, the ARGS
"""
# Language Options
# import libs
default_config = _builder(_core_config(), _core_default_config())
_all_profiles = [key for key in _builder(_profiles(), default_profiles())]
_all_profiles.append("all")
language_list = [lang for lang in messages(-1, 0)]
if "-L" in sys.argv or "--language" in sys.argv:
try:
index = sys.argv.index("-L") + 1
except Exception:
index = sys.argv.index("--language") + 1
else:
index = -1
if index == -1:
language = "en"
else:
_error_flag = False
try:
language = sys.argv[index]
except Exception:
_error_flag = True
if _error_flag or language not in language_list:
__die_failure(
"Please select one of these languages {0}".format(
language_list
)
)
# Check if compatible
check(language)
finish()
# Start Parser
parser = argparse.ArgumentParser(prog="Nettacker", add_help=False)
# parser = OptionParser(usage=messages(language,"options"),
# description=messages(language,"help_menu"),
# epilog=messages(language,"license"))
# Engine Options
engineOpt = parser.add_argument_group(
messages(language, "engine"), messages(language, "engine_input")
)
engineOpt.add_argument(
"-L",
"--language",
action="store",
dest="language",
default=default_config["language"],
help=messages(language, "select_language").format(language_list),
)
engineOpt.add_argument(
"-v",
"--verbose",
action="store",
type=int,
dest="verbose_level",
default=default_config["verbose_level"],
help=messages(language, "verbose_level"),
)
engineOpt.add_argument(
"-V",
"--version",
action="store_true",
default=default_config["show_version"],
dest="show_version",
help=messages(language, "software_version"),
)
engineOpt.add_argument(
"-c",
"--update",
action="store_true",
default=default_config["check_update"],
dest="check_update",
help=messages(language, "check_updates"),
)
engineOpt.add_argument(
"-o",
"--output",
action="store",
default=default_config["log_in_file"],
dest="log_in_file",
help=messages(language, "save_logs"),
)
engineOpt.add_argument(
"--graph",
action="store",
default=default_config["graph_flag"],
dest="graph_flag",
help=messages(language, "available_graph").format(graph_names),
)
engineOpt.add_argument(
"-h",
"--help",
action="store_true",
default=default_config["help_menu_flag"],
dest="help_menu_flag",
help=messages(language, "help_menu"),
)
engineOpt.add_argument(
"-W",
"--wizard",
action="store_true",
default=default_config["wizard_mode"],
dest="wizard_mode",
help=messages(language, "wizard_mode"),
)
engineOpt.add_argument(
"--profile",
action="store",
default=default_config["profile"],
dest="profile",
help=messages(language, "select_profile").format(_all_profiles),
)
# Target Options
target = parser.add_argument_group(
messages(language, "target"), messages(language, "target_input")
)
target.add_argument(
"-i",
"--targets",
action="store",
dest="targets",
default=default_config["targets"],
help=messages(language, "target_list"),
)
target.add_argument(
"-l",
"--targets-list",
action="store",
dest="targets_list",
default=default_config["targets_list"],
help=messages(language, "read_target"),
)
# Exclude Module Name
exclude_names = module_names[:]
exclude_names.remove("all")
# Methods Options
method = parser.add_argument_group(
messages(language, "Method"), messages(language, "scan_method_options")
)
method.add_argument(
"-m",
"--method",
action="store",
dest="scan_method",
default=default_config["scan_method"],
help=messages(language, "choose_scan_method").format(module_names),
)
method.add_argument(
"-x",
"--exclude",
action="store",
dest="exclude_method",
default=default_config["exclude_method"],
help=messages(language, "exclude_scan_method").format(exclude_names),
)
method.add_argument(
"-u",
"--usernames",
action="store",
dest="users",
default=default_config["users"],
help=messages(language, "username_list"),
)
method.add_argument(
"-U",
"--users-list",
action="store",
dest="users_list",
default=default_config["users_list"],
help=messages(language, "username_from_file"),
)
method.add_argument(
"-p",
"--passwords",
action="store",
dest="passwds",
default=default_config["passwds"],
help=messages(language, "password_seperator"),
)
method.add_argument(
"-P",
"--passwords-list",
action="store",
dest="passwds_list",
default=default_config["passwds_list"],
help=messages(language, "read_passwords"),
)
method.add_argument(
"-g",
"--ports",
action="store",
dest="ports",
default=default_config["ports"],
help=messages(language, "port_seperator"),
)
method.add_argument(
"-T",
"--timeout",
action="store",
dest="timeout_sec",
default=default_config["timeout_sec"],
type=float,
help=messages(language, "read_passwords"),
)
method.add_argument(
"-w",
"--time-sleep",
action="store",
dest="time_sleep",
default=default_config["time_sleep"],
type=float,
help=messages(language, "time_to_sleep"),
)
method.add_argument(
"-r",
"--range",
action="store_true",
default=default_config["check_ranges"],
dest="check_ranges",
help=messages(language, "range"),
)
method.add_argument(
"-s",
"--sub-domains",
action="store_true",
default=default_config["check_subdomains"],
dest="check_subdomains",
help=messages(language, "subdomains"),
)
method.add_argument(
"-t",
"--thread-connection",
action="store",
default=default_config["thread_number"],
type=int,
dest="thread_number",
help=messages(language, "thread_number_connections"),
)
method.add_argument(
"-M",
"--thread-hostscan",
action="store",
default=default_config["thread_number_host"],
type=int,
dest="thread_number_host",
help=messages(language, "thread_number_hosts"),
)
method.add_argument(
"-R",
"--socks-proxy",
action="store",
dest="socks_proxy",
default=default_config["socks_proxy"],
help=messages(language, "outgoing_proxy"),
)
method.add_argument(
"--retries",
action="store",
dest="retries",
type=int,
default=default_config["retries"],
help=messages(language, "connection_retries"),
)
method.add_argument(
"--ping-before-scan",
action="store_true",
dest="ping_flag",
default=default_config["ping_flag"],
help=messages(language, "ping_before_scan"),
)
method.add_argument(
"--method-args",
action="store",
dest="methods_args",
default=default_config["methods_args"],
help=messages(language, "method_inputs"),
)
method.add_argument(
"--method-args-list",
action="store_true",
dest="method_args_list",
default=default_config["method_args_list"],
help=messages(language, "list_methods"),
)
# API Options
api = parser.add_argument_group(
messages(language, "API"), messages(language, "API_options"))
api.add_argument("--start-api", action="store_true",
dest="start_api", default=default_config["start_api"],
help=messages(language, "start_API"))
api.add_argument("--api-host", action="store",
dest="api_host", default=default_config["api_host"],
help=messages(language, "API_host"))
api.add_argument("--api-port", action="store",
dest="api_port", default=default_config["api_port"],
help=messages(language, "API_port"))
api.add_argument("--api-debug-mode", action="store_true",
dest="api_debug_mode", default=default_config["api_debug_mode"],
help=messages(language, "API_debug"))
api.add_argument("--api-access-key", action="store",
dest="api_access_key", default=default_config["api_access_key"],
help=messages(language, "API_access_key"))
api.add_argument("--api-client-white-list", action="store_true",
dest="api_client_white_list", default=default_config["api_client_white_list"],
help=messages(language, "white_list_API"))
api.add_argument("--api-client-white-list-ips", action="store",
dest="api_client_white_list_ips", default=default_config["api_client_white_list_ips"],
help=messages(language, "define_whie_list"))
api.add_argument("--api-access-log", action="store_true",
dest="api_access_log", default=default_config["api_access_log"],
help=messages(language, "gen_API_access_log"))
api.add_argument("--api-access-log-filename", action="store",
dest="api_access_log_filename", default=default_config["api_access_log_filename"],
help=messages(language, "API_access_log_file"))
api.add_argument("--api-cert", action="store", dest="api_cert", help=messages(language, "API_cert"))
api.add_argument("--api-cert-key", action="store", dest="api_cert_key", help=messages(language, "API_cert_key"))
# Return Options
return [
parser,
parser.parse_args(),
default_config["startup_check_for_update"],
]
def check_all_required(targets, targets_list, thread_number, thread_number_host,
log_in_file, scan_method, exclude_method, users, users_list,
passwds, passwds_list, timeout_sec, ports, parser, module_names, language, verbose_level,
show_version, check_update, socks_proxy, retries, graph_flag, help_menu_flag, methods_args,
method_args_list, wizard_mode, profile, start_api, api_host, api_port, api_debug_mode,
api_access_key, api_client_white_list, api_client_white_list_ips, api_access_log,
api_access_log_filename, api_cert, api_cert_key):
"""
check all rules and requirements for ARGS
Args:
targets: targets from CLI
targets_list: targets_list from CLI
thread_number: thread numbers from CLI
thread_number_host: thread number for hosts from CLI
log_in_file: output file from CLI
scan_method: modules from CLI
exclude_method: exclude modules from CLI
users: usernames from CLI
users_list: username file from CLI
passwds: passwords from CLI
passwds_list: passwords file from CLI
timeout_sec: timeout seconds from CLI
ports: ports from CLI
parser: parser (argparse)
module_names: all module names
language: language from CLI
verbose_level: verbose level from CLI
show_version: show version flag from CLI
check_update: check for update flag from CLI
socks_proxy: socks proxy from CLI
retries: retries from from CLI
graph_flag: graph name from CLI
help_menu_flag: help menu flag from CLI
methods_args: modules ARGS flag from CLI
method_args_list: modules ARGS from CLI
wizard_mode: wizard mode flag from CLI
profile: profiles from CLI
start_api: start API flag from CLI
api_host: API host from CLI
api_port: API port from CLI
api_debug_mode: API debug mode flag from CLI
api_access_key: API access key from CLI
api_client_white_list: API client white list flag from CLI
api_client_white_list_ips: API client white list IPs from CLI
api_access_log: API access log log flag from CLI
api_access_log_filename: API access log filename from CLI
Returns:
all ARGS with applied rules
"""
# Checking Requirements
# import libs
from core import compatible
# Check Help Menu
if help_menu_flag:
parser.print_help()
write("\n\n")
write(messages(language, "license"))
__die_success()
# Check if method args list called
if method_args_list:
from core.load_modules import load_all_method_args
load_all_method_args(language)
__die_success()
# Check version
if show_version:
from core import color
info(
messages(language, "current_version").format(
color.color("yellow"),
compatible.__version__,
color.color("reset"),
color.color("cyan"),
compatible.__code_name__,
color.color("reset"),
color.color("green"),
)
)
__die_success()
# API mode
if start_api:
from api.engine import _start_api
from core.targets import target_type
from core.ip import _generate_IPRange
try:
api_port = int(api_port)
except Exception:
__die_failure(messages(language, "API_port_int"))
if api_client_white_list:
if type(api_client_white_list_ips) != type([]):
api_client_white_list_ips = list(
set(api_client_white_list_ips.rsplit(","))
)
hosts = []
for data in api_client_white_list_ips:
if target_type(data) == "SINGLE_IPv4":
if data not in hosts:
hosts.append(data)
elif target_type(data) == "RANGE_IPv4":
for cidr in _generate_IPRange(data):
for ip in cidr:
if ip not in hosts:
hosts.append(ip)
elif target_type(data) == "CIDR_IPv4":
for ip in _generate_IPRange(data):
if ip not in hosts:
hosts.append(str(ip))
else:
__die_failure(messages(language, "unknown_ip_input"))
api_client_white_list_ips = hosts[:]
if api_access_log:
try:
open(api_access_log_filename, "a")
except Exception:
write_to_api_console(
" * "
+ messages(language, "file_write_error").format(
api_access_log_filename
)
+ "\n"
)
__die_failure("")
_start_api(api_host, api_port, api_debug_mode, api_access_key, api_client_white_list,
api_client_white_list_ips, api_access_log, api_access_log_filename, api_cert, api_cert_key, language)
# Wizard mode
if wizard_mode:
(
targets,
thread_number,
thread_number_host,
log_in_file,
scan_method,
exclude_method,
users,
passwds,
timeout_sec,
ports,
verbose_level,
socks_proxy,
retries,
graph_flag,
) = __wizard(
targets,
thread_number,
thread_number_host,
log_in_file,
module_names,
exclude_method,
users,
passwds,
timeout_sec,
ports,
verbose_level,
socks_proxy,
retries,
load_all_graphs(),
language,
)
# Check the target(s)
if targets is None and targets_list is None:
parser.print_help()
write("\n")
__die_failure(messages(language, "error_target"))
# Select a Profile
if scan_method is None and profile is None:
__die_failure(messages(language, "scan_method_select"))
if profile is not None:
if scan_method is None:
scan_method = ""
else:
scan_method += ","
_all_profiles = _builder(_profiles(), default_profiles())
if "all" in profile.rsplit(","):
profile = ",".join(_all_profiles)
tmp_sm = scan_method
for pr in profile.rsplit(","):
try:
for sm in _all_profiles[pr]:
if sm not in tmp_sm.rsplit(","):
tmp_sm += sm + ","
except Exception:
__die_failure(messages(language, "profile_404").format(pr))
if tmp_sm[-1] == ",":
tmp_sm = tmp_sm[0:-1]
scan_method = ",".join(list(set(tmp_sm.rsplit(","))))
# Check Socks
if socks_proxy is not None:
e = False
if socks_proxy.startswith("socks://"):
socks_flag = 5
socks_proxy = socks_proxy.replace("socks://", "")
elif socks_proxy.startswith("socks5://"):
socks_flag = 5
socks_proxy = socks_proxy.replace("socks5://", "")
elif socks_proxy.startswith("socks4://"):
socks_flag = 4
socks_proxy = socks_proxy.replace("socks4://", "")
else:
socks_flag = 5
if "://" in socks_proxy:
socks_proxy = socks_proxy.rsplit("://")[1].rsplit("/")[0]
try:
if (
len(socks_proxy.rsplit(":")) < 2
or len(socks_proxy.rsplit(":")) > 3
):
e = True
elif (
len(socks_proxy.rsplit(":")) == 2
and socks_proxy.rsplit(":")[1] == ""
):
e = True
elif (
len(socks_proxy.rsplit(":")) == 3
and socks_proxy.rsplit(":")[2] == ""
):
e = True
except Exception:
e = True
if e:
__die_failure(messages(language, "valid_socks_address"))
if socks_flag == 4:
socks_proxy = "socks4://" + socks_proxy
if socks_flag == 5:
socks_proxy = "socks5://" + socks_proxy
# Check update
if check_update and _update_check(language):
from core.update import _update
_update(
compatible.__version__,
compatible.__code_name__,
language,
socks_proxy,
)
__die_success()
else:
if targets is not None:
targets = list(set(targets.rsplit(",")))
elif targets_list is not None:
try:
targets = list(set(open(targets_list, "rb").read().rsplit()))
except Exception:
__die_failure(
messages(language, "error_target_file").format(
targets_list
)
)
# Check thread number
if thread_number > 101 or thread_number_host > 101:
warn(messages(language, "thread_number_warning"))
# Check timeout number
if timeout_sec is not None and timeout_sec >= 15:
warn(messages(language, "set_timeout").format(timeout_sec))
# Check scanning method
if scan_method is not None and "all" in scan_method.rsplit(","):
scan_method = module_names
scan_method.remove("all")
elif (
scan_method is not None
and len(scan_method.rsplit(",")) == 1
and "*_" not in scan_method
):
if scan_method in module_names:
scan_method = scan_method.rsplit()
else:
__die_failure(
messages(language, "scan_module_not_found").format(scan_method)
)
else:
if scan_method is not None:
if scan_method not in module_names:
if "*_" in scan_method or "," in scan_method:
scan_method = scan_method.rsplit(",")
scan_method_tmp = scan_method[:]
for sm in scan_method_tmp:
scan_method_error = True
if sm.startswith("*_"):
scan_method.remove(sm)
found_flag = False
for mn in module_names:
if mn.endswith("_" + sm.rsplit("*_")[1]):
scan_method.append(mn)
scan_method_error = False
found_flag = True
if found_flag is False:
__die_failure(
messages(
language, "module_pattern_404"
).format(sm)
)
elif sm == "all":
scan_method = module_names
scan_method_error = False
scan_method.remove("all")
break
elif sm in module_names:
scan_method_error = False
elif sm not in module_names:
__die_failure(
messages(
language, "scan_module_not_found"
).format(sm)
)
else:
scan_method_error = True
if scan_method_error:
__die_failure(
messages(language, "scan_module_not_found").format(
scan_method
)
)
else:
__die_failure(messages(language, "scan_method_select"))
scan_method = list(set(scan_method))
# Check for exluding scanning method
if exclude_method is not None:
exclude_method = exclude_method.rsplit(",")
for exm in exclude_method:
if exm in scan_method:
if "all" == exm:
__die_failure(messages(language, "error_exclude_all"))
else:
scan_method.remove(exm)
if len(scan_method) == 0:
__die_failure(messages(language, "error_exclude_all"))
else:
__die_failure(
messages(language, "exclude_module_error").format(exm)
)
# Check port(s)
if type(ports) is not list and ports is not None:
tmp_ports = []
for port in ports.rsplit(","):
try:
if "-" not in port:
if int(port) not in tmp_ports:
tmp_ports.append(int(port))
else:
t_ports = range(
int(port.rsplit("-")[0]), int(port.rsplit("-")[1]) + 1
)
for p in t_ports:
if p not in tmp_ports:
tmp_ports.append(p)
except Exception:
__die_failure(messages(language, "ports_int"))
if len(tmp_ports) == 0:
ports = None
else:
ports = tmp_ports[:]
# Check user list
if users is not None:
users = list(set(users.rsplit(",")))
elif users_list is not None:
try:
# fix later
users = list(set(open(users_list).read().rsplit("\n")))
except Exception:
__die_failure(
messages(language, "error_username").format(targets_list)
)
# Check password list
if passwds is not None:
passwds = list(set(passwds.rsplit(",")))
if passwds_list is not None:
try:
passwds = list(
set(open(passwds_list).read().rsplit("\n"))
) # fix later
except Exception:
__die_failure(
messages(language, "error_password_file").format(targets_list)
)
# Check output file
try:
open(log_in_file, "w")
except Exception:
__die_failure(
messages(language, "file_write_error").format(log_in_file)
)
# Check Graph
if graph_flag is not None:
if graph_flag not in load_all_graphs():
__die_failure(
messages(language, "graph_module_404").format(graph_flag)
)
if not (log_in_file.endswith(".html") or log_in_file.endswith(".htm")):
warn(messages(language, "graph_output"))
graph_flag = None
# Check Methods ARGS
if methods_args is not None:
new_methods_args = {}
methods_args = methods_args.rsplit("&")
for imethod_args in methods_args:
if len(imethod_args.rsplit("=")) == 2:
if imethod_args.rsplit("=")[1].startswith("read_from_file:"):
try:
read_data = list(
set(
open(
imethod_args.rsplit("=read_from_file:")[1]
)
.read()
.rsplit("\n")
)
)
except Exception:
__die_failure(messages(language, "error_reading_file"))
new_methods_args[imethod_args.rsplit("=")[0]] = read_data
else:
new_methods_args[
imethod_args.rsplit("=")[0]
] = imethod_args.rsplit("=")[1].rsplit(",")
else:
new_methods_args[imethod_args] = ["True"]
methods_args = new_methods_args
# Return the values
return [targets, targets_list, thread_number, thread_number_host,
log_in_file, scan_method, exclude_method, users, users_list,
passwds, passwds_list, timeout_sec, ports, parser, module_names, language, verbose_level,
show_version, check_update, socks_proxy, retries, graph_flag, help_menu_flag, methods_args,
method_args_list, wizard_mode, profile, start_api, api_host, api_port, api_debug_mode,
api_access_key, api_client_white_list, api_client_white_list_ips, api_access_log,
api_access_log_filename, api_cert, api_cert_key]
|
from enum import Enum
class TupleEnum(tuple,Enum):
pass
class IntEnum(int,Enum):
pass
class Player(IntEnum):
"""
BLACK: Player that starts first.
WHITE: Player that starts second.
"""
BLACK = 0
WHITE = 1
class RewardMethod(Enum):
"""
REAL: 0 = game is ongoing, +15 = Black won, -15 = White won"
HEURISTIC: if the game is ongoing the reward is the movement steps between Black and White
Otherwise the game has ended and if player 1 has won the reward is +15 and if player 2 has won the reward is -15
INVALID: Heuristic reward however a -1 reward if action is invalid for current player
"""
REAL = 'real'
HEURISTIC = 'heuristic'
INVALID = 'invalid'
class Color(TupleEnum):
GRAY = (100, 100, 100)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
ORANGE = (255, 128, 0)
PURPLE = (255, 0, 255)
CYAN = (0, 255, 255)
BLACK = (0, 0, 0)
class WallDir(IntEnum):
VERTICAL = 1
HORIZONTAL = 2
class MovementDir(TupleEnum):
UP = (0,-1)
DOWN = (0,1)
LEFT = (-1,0)
RIGHT = (1,0)
UPUP = (0,-2)
UPLEFT = (-1,-1)
UPRIGHT = (1,-1)
DOWNDOWN = (0,2)
DOWNLEFT = (-1,1)
DOWNRIGHT = (1,1)
LEFTLEFT = (-2,0)
RIGHTRIGHT = (2,0)
class MovementIndex(IntEnum):
UP = 0
DOWN = 1
LEFT = 2
RIGHT = 3
UPUP = 4
UPLEFT = 5
UPRIGHT = 6
DOWNDOWN = 7
DOWNLEFT = 8
DOWNRIGHT = 9
LEFTLEFT = 10
RIGHTRIGHT = 11
class CompassDir(TupleEnum):
NW = (-1,-1)
NE = (0,-1)
SW = (-1,0)
SE = (0,0)
class CompassIndex(IntEnum):
NW = 0
NE = 1
SW = 2
SE = 3
|
import os
import sys
#! <------------------------------------------------>
#* Logging script
#*
#* Version: 1.0
#*
#! <------------------------------------------------>
filepath = ""
sysargvs = sys.argv
def logFolderCreate(name="", filepath="F:/PythonProjectLogs"):
if len(sysargvs) == 0:
print("You forgot to pass required arguments!")
print("If you need help, please enter [scriptname] --help ")
sys.exit()
if len(sysargvs) == 1:
if name == "":
print("Invalid name. Exiting program...")
sys.exit()
'''
else:
print("")
#print("Creating Folder...")
name = name+"Logs"
print("Path set to basic.\nPath = F:/PythonProjectLogs")
opt = str(input("Do you wish to change the path? [Y/N]: "))
if opt.lower() == "y":
filepath = str(input("Enter path: "))
'''
if len(sysargvs) == 2:
if name == "":
sys.exit()
else:
#print("Arguments accepted!")
#print("Proceeding to creation...")
name = name + "Logs"
try:
os.chdir(filepath)
except Exception as e:
print("Invalid Path!")
print("Shutting down.")
sys.exit()
try:
os.mkdir(name)
except Exception as e:
print("Folder Already Exists")
prompt = str(input("Do you want to continue? [Y/N] : "))
try:
if prompt.lower() == "y":
nameTemplate = name+"({index})"
i = 1
folders = os.listdir()
while True:
name = nameTemplate.format(index=i)
if name in folders:
i = i+1
continue
else:
#print("Folder Name: {folder}".format(folder=name))
os.mkdir(name)
#print("Folder Created")
break
except Exception as n:
print(n)
else:
#print("Folder Name: {folder}".format(folder=name))
#print("Folder Created")
a = ""
out = filepath+"/"+name
return out
if len(sysargvs) == 3:
name = sysargvs[1]
filepath = sysargvs[2]
logFolderCreate(name,filepath)
if len(sysargvs) == 2:
name = sysargvs[1]
logFolderCreate(name)
|
import torch
from torch import distributed, optim
import torch.nn as nn
from torch.optim import Adam, lr_scheduler
from utils import *
from datetime import datetime
import os
import json
import numpy as np
import cv2
import torch.nn.functional as F
from skimage.measure import compare_psnr, compare_ssim
from DRAN import DRAN
from model.srcnn import Net
# from model.fsrcnn import Net
from model.SRMDNF import SRMD
# from loss import *
import math
import time
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
device_ids = range(torch.cuda.device_count())
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class SRNN(object):
def __init__(self, args, trainable):
"""初始化模块"""
self.p = args
self.trainable = trainable
self._compile()
def _compile(self):
"""编译模块(神经网络结构,损失函数,优化器等)"""
# Model
self.model = DRAN(self.p)
# loss functiong
self.content_loss = nn.L1Loss()
self.start_epoch = 1
# optimizer and loss
if self.trainable:
# self.loss = self.loss.to(device)
self.content_loss = self.content_loss.to(device)
self.optim = Adam(self.model.parameters(), lr=self.p.lr)
# adjust earning rate
self.scheduler = lr_scheduler.StepLR(optimizer=self.optim, step_size=300, gamma=0.5, last_epoch=-1)
# CUDA support
self.use_cuda = torch.cuda.is_available() and self.p.cuda
if self.use_cuda:
self.model = self.model.to(device)
if len(device_ids) > 1:
print('dataparallel')
self.model = nn.DataParallel(self.model, device_ids=device_ids)
if self.p.load_ckpt and self.trainable:
print('Loading checkpoint from{}\n'.format(self.p.ckpt_file))
# original saved file with DataParallel
state_dict = torch.load(self.p.ckpt_file)
# create new OrderedDict that does not contain `module.`
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict['model'].items():
# name = k[7:] # remove `module.`
# new_state_dict[name] = v
if 'module.' not in k:
k = 'module.' + k
else:
k = k.replace('features.module.', 'module.features.')
new_state_dict[k] = v
# load params
self.model.load_state_dict(new_state_dict)
self.start_epoch = state_dict['epoch'] + 1 # epoch
self.optim.load_state_dict(state_dict['optimizer'])
for state in self.optim.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device)
self.optim.param_groups[0]['lr'] = self.p.lr
# self.scheduler.load_state_dict(state_dict['lr_scheduler'])
print("current epoch: {}".format(self.start_epoch))
def save_model(self, epoch, best_psnr, best_epoch, stats):
# checkpiont and stats
if not(self.p.load_ckpt) and epoch<10:
localtime = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
save_dir_name = 'DRAN' + localtime
self.ckpt_dir = os.path.join(self.p.ckpt_save_dir, save_dir_name)
self.stats_dir = os.path.join(self.p.stats_save_dir, save_dir_name)
if not os.path.isdir(self.p.ckpt_save_dir):
os.mkdir(self.p.ckpt_save_dir)
if not os.path.isdir(self.ckpt_dir):
os.mkdir(self.ckpt_dir)
if not os.path.isdir(self.p.stats_save_dir):
os.mkdir(self.p.loss_save_dir)
if not os.path.isdir(self.stats_dir):
os.mkdir(self.stats_dir)
save_args(self.p, self.stats_dir)
if self.p.load_ckpt:
save_dir_name = self.p.ckpt_file.split('/')[3]
self.ckpt_dir = os.path.dirname(self.p.ckpt_file)
self.stats_dir = os.path.join(self.p.stats_save_dir, save_dir_name)
# save
save_args(self.p, self.stats_dir)
# checkpoint
if self.p.ckpt_overwrite:
filename = 'latest.pt'
else:
psnr = stats['valid_psnr'][epoch-self.start_epoch]
# valid_loss = stats['valid_loss'][epoch-]
filename = 'epoch{}-{:.4f}.pt'.format(epoch, psnr)
filedir = '{}/{}'.format(self.ckpt_dir, filename)
print('Saving checkpoint to : {}\n'.format(filename))
torch.save({'model': self.model.module.state_dict(), 'optimizer': self.optim.state_dict(), 'epoch': epoch, 'lr_scheduler': self.scheduler.state_dict()}, filedir)
if epoch == best_epoch:
torch.save(self.model.state_dict(), '{}/best.pt'.format(self.ckpt_dir))
with open('{}/best_epoch.txt'.format(self.stats_dir), 'w') as f:
f.write('Best: {:.2f} @epoch {}'.format(best_psnr, best_epoch))
# stats(保存loss等状态信息)保存为JSON
stats_dict = '{}/stats.json'.format(self.stats_dir)
with open(stats_dict, 'w') as sd:
json.dump(stats, sd, indent=2)
def mixup_data(self, x, y, alpha=1.0, use_cuda=True):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
if use_cuda:
index = torch.randperm(batch_size).to(device)
else:
index = torch.randperm(batch_size)
mixed_x = lam * x + (1 - lam) * x[index, :]
mixed_y = lam * y + (1 - lam) * y[index, :]
return torch.cat((mixed_x, x), 0), torch.cat((mixed_y, y), 0)
def train(self, train_loader, valid_loader):
'''Train on training set.'''
self.model.train(True)
num_batches = len(train_loader)
# Dictionaries of tracked stats
stats = {'learning_rate': [],
'train_loss': [],
'loss': [],
'valid_psnr': [],
'valid_ssim': []}
# Main training loop
train_start = datetime.now()
epoch_save = 5
for _epoch in range(self.start_epoch, self.p.epochs+1):
epoch_start = datetime.now()
print('EPOCH : {}/{}'.format(_epoch, self.p.epochs))
train_loss_tracker = Tracker()
# Minibatch SGD
for batch_idx, (source, target) in enumerate(train_loader):
progress_bar(batch_idx, num_batches, train_loss_tracker.avg)
if self.use_cuda:
source = source.to(device)
target = target.to(device)
# print(source.shape, target.shape)
# source, target = self.mixup_data(source, target)
# source = source.type(torch.cuda.DoubleTensor)
# target = target.type(torch.cuda.DoubleTensor)
source_SR = self.model(source)
#loss
content_loss = self.content_loss(source_SR, target)
# perceptual_loss = self.perceptual_loss(source_SR, target)
# loss = self.content_loss_factor * content_loss + self.perceptual_loss_factor * perceptual_loss
loss = content_loss
train_loss_tracker.update(loss.item())
self.optim.zero_grad()
loss.backward()
self.optim.step()
self.scheduler.step()
epoch_end = datetime.now()
# epoch end
stats['learning_rate'].append(self.optim.param_groups[0]['lr'])
stats['train_loss'].append(train_loss_tracker.avg)
train_loss_tracker.reset()
print("Begin Evaluation")
# content_loss, perceptual_loss, loss, valid_psnr, valid_ssim = self.eval(valid_loader)
loss, valid_psnr, valid_ssim = self.eval(valid_loader)
stats['loss'].append(loss)
stats['valid_psnr'].append(valid_psnr)
stats['valid_ssim'].append(valid_ssim)
best_psnr = max(stats['valid_psnr'])
best_epoch = stats['valid_psnr'].index(best_psnr) + self.start_epoch
epoch_time = int((datetime.now() - epoch_start).total_seconds())
eva_time = int((datetime.now() - epoch_end).total_seconds())
#####print
print(
'Epoch time : {} s| Evalu time : {} s| Valid Loss : {:.4f}|(Best: {:.2f} @epoch {})'.format(epoch_time,
eva_time,
loss,
best_psnr,
best_epoch))
print('Valid PSNR : {:.2f} dB | Valid SSIM : {:.4f} | lr:{:.8f}'.format(valid_psnr,
valid_ssim,
self.optim.param_groups[0]['lr']))
if _epoch % epoch_save == 0:
if self.p.load_ckpt:
self.save_model(_epoch, best_psnr, best_epoch, stats)
else:
self.save_model(_epoch, best_psnr, best_epoch, stats)
train_time = str(datetime.now() - train_start)[:-7]
print('Training done! Total train time: {}\n'.format(train_time))
def eval(self, valid_loader):
'''Evaluate on validation set.'''
self.model.train(False)
# content_loss_tracker = Tracker()
# perceptual_loss_tracker = Tracker()
loss_tracker = Tracker()
psnr_tracker = Tracker()
ssim_tracker = Tracker()
for batch_idx, (source, target) in enumerate(valid_loader):
if self.use_cuda:
source = source.to(device)
target = target.to(device)
# source = source.type(torch.cuda.DoubleTensor)
# target = target.type(torch.cuda.DoubleTensor)
source_SR = self.model(source)
content_loss = self.content_loss(source_SR, target)
loss = content_loss
loss_tracker.update(loss.item())
# Compute PSRN, SSIM
psnr_tracker.update(calc_psnr(source_SR, target, self.p.scale, 255))
source_SR = np.transpose(source_SR.cpu().detach().squeeze(0).numpy(),[1,2,0])
target = np.transpose(target.cpu().detach().squeeze(0).numpy(),[1,2,0])
ssim_tracker.update(calculate_ssim(bgr2ycbcr(source_SR), bgr2ycbcr(target), self.p.scale))
loss = loss_tracker.avg
psnr_avg = psnr_tracker.avg
ssim_avg = ssim_tracker.avg
# return content_loss, perceptual_loss, loss, psnr_avg, ssim_avg
return loss, psnr_avg, ssim_avg
def test(self, test_loader):
'''Test on test set.'''
self.model.eval()
print('Loading checkpoint from{}\n'.format(self.p.ckpt_file))
# original saved file with DataParallel
if self.use_cuda:
state_dict = torch.load(self.p.ckpt_file)
print('cuda')
# create new OrderedDict that does not contain `module.`
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict['model'].items():
if len(device_ids) > 1 and 'module.' not in k:
k = 'module.' + k
else:
k = k.replace('features.module.', 'module.features.')
new_state_dict[k] = v
# load params
self.model.load_state_dict(new_state_dict)
else:
state_dict = torch.load(self.p.ckpt_file, map_location=torch.device('cpu'))
print('cpu')
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict['model'].items():
# print(k, v)
if 'module.' in k:
k = k[7:] # remove `module.`
print('remove `module.`')
new_state_dict[k] = v
self.model.load_state_dict(new_state_dict)
num_batches = len(test_loader)
source_imgs = []
result_imgs = []
valid_loss_tracker = Tracker()
psnr_tracker = Tracker()
ssim_tracker = Tracker()
for batch_idx, (source, target) in enumerate(test_loader):
print(' : {}/{}'.format(batch_idx + 1, num_batches))
source_imgs.append(source)
print(source.shape)
if self.use_cuda:
source = source.to(device)
target = target.to(device)
source = source.type(torch.DoubleTensor)
target = target.type(torch.DoubleTensor)
print(source.shape)
print(target.shape)
source_SR = self.model(source)
img_name = test_loader.dataset.imgs[batch_idx][:-4]
img = source_SR.cpu().detach().squeeze(0).numpy()
img = np.transpose(img, [1, 2, 0])
# img = result_imgs[i]
cv2.imwrite('{}/{}.png'.format('./../dataset/microscope_dataset/result', img_name), img)
result_imgs.append(source_SR)
psnr_tracker.update(calc_psnr(source_SR, target, self.p.scale, 255))
# Compute PSRN, SSIM
source_SR = np.transpose(source_SR.cpu().detach().squeeze(0).numpy(),[1,2,0])
target = np.transpose(target.cpu().detach().squeeze(0).numpy(),[1,2,0])
# psnr_tracker.update(compare_psnr(source_SR, target, 255))
ssim_tracker.update(calculate_ssim(bgr2ycbcr(source_SR), bgr2ycbcr(target), self.p.scale))
# valid_loss = valid_loss_tracker.avg
psnr_avg = psnr_tracker.avg
ssim_avg = ssim_tracker.avg
# print('Valid Loss : {:.4f}'.format(valid_loss))
print('Valid PSNR : {:.4f} dB'.format(psnr_avg))
print('Valid SSIM : {:.4f}'.format(ssim_avg))
def calc_psnr(sr, hr, scale, rgb_range, test=True):
# if hr.nelement() == 1: return 0
diff = (sr - hr) / rgb_range
if test:
shave = scale
# print(diff.size)
# print(diff.shape)
if diff.shape[1] > 1:
convert = diff.new(1, 3, 1, 1)
convert[0, 0, 0, 0] = 65.738
convert[0, 1, 0, 0] = 129.057
convert[0, 2, 0, 0] = 25.064
diff.mul_(convert).div_(256)
diff = diff.sum(dim=1, keepdim=True)
else:
shave = scale + 6
valid = diff[..., shave:-shave, shave:-shave]
mse = valid.pow(2).mean()
return -10 * math.log10(mse)
def ssim(img1, img2):
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def calculate_ssim(img1, img2, scale):
'''calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255]
'''
# 确定相同的shape,shape输出维度的数值,几个数字代表每个维度的大小
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
else:
# 减去shave值,去掉边界影响
shave = scale
img1 = img1[shave:-shave, shave:-shave]
img2 = img2[shave:-shave, shave:-shave]
# 确定输入的维度是几,维度为2则为Y通道,直接输出;维度为3,则确定第3个维度是不是无有有效数值,无效则去掉维度为1的部分
'''
numpy.squeeze(a,axis = None)
1)a表示输入的数组;
2)axis用于指定需要删除的维度,但是指定的维度必须为单维度,否则将会报错;
3)axis的取值可为None 或 int 或 tuple of ints, 可选。若axis为空,则删除所有单维度的条目;
4)返回值:数组
5) 不会修改原数组;'''
if img1.ndim == 2:
return ssim(img1, img2)
elif img1.ndim == 3:
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(ssim(img1, img2))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError('Wrong input image dimensions.')
def reduce_tensor(tensor: torch.Tensor) -> torch.Tensor:
rt = tensor.clone()
distributed.all_reduce(rt, op=distributed.ReduceOp.SUM)
rt /= distributed.get_world_size()
return rt
def bgr2ycbcr(img, only_y=True):
'''bgr version of rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
dtype查看type
astype转换type
'''
in_img_type = img.dtype
img.astype(np.float64)
if in_img_type != np.uint8:
rlt = img * 255.0
# convert
if only_y:
rlt = np.dot(rlt, [24.966, 128.553, 65.481]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.0
return rlt.astype(in_img_type)
|
from collections import defaultdict
def frequency_table(A):
freq = defaultdict(int)
for a in A:
freq[a] += 1
return freq
def reverse_frequency_table(A):
return [(value, key) for key, value in frequency_table(A).iteritems()]
def majority_element(A):
l = len(A)
elements = filter(lambda x: x[0] > l / 2, reverse_frequency_table(A))
return max(elements)[1] if elements else -1
def max_heapify(A, i, l):
left = 2 * i + 1
right = 2 * i + 2
largest = i
if left < l and A[largest] < A[left]:
largest = left
if right < l and A[largest] < A[right]:
largest = right
if largest != i:
A[i], A[largest] = A[largest], A[i]
max_heapify(A, largest, l)
def max_heap(A):
l = len(A)
for i in xrange(l // 2, -1, -1):
max_heapify(A, i, l)
return A
def min_heapify(A, i, l):
left = 2 * i + 1
right = 2 * i + 2
smallest = i
if left < l and A[smallest] > A[left]:
smallest = left
if right < l and A[smallest] > A[right]:
smallest = right
if smallest != i:
A[i], A[smallest] = A[smallest], A[i]
min_heapify(A, smallest, l)
def min_heap(A):
l = len(A)
for i in xrange(l // 2, -1, -1):
min_heapify(A, i, l)
return A
def count_inversions(A):
l = len(A)
c = 0
for i in xrange(len(A) - 1):
for j in xrange(i + 1, len(A)):
if A[i] > A[j]:
c += 1
return c
def indexed_text_array(text):
indexed = []
counter = defaultdict(int)
for c in text:
counter[c] += 1
indexed.append((c, counter[c]))
return indexed
def last_to_first(first, last):
ltof = {}
for index in xrange(len(last)):
ltof[index] = first.index(last[index])
return ltof
def count_matrix(last):
row = {c: 0 for c in last}
c = [row.copy()]
for l in last:
row[l] += 1
c.append(row.copy())
return c
def checkpoint_matrix(last, k):
row = {c: 0 for c in last}
i = 0
c = {i:row.copy()}
for l in last:
i += 1
row[l] += 1
if i % k == 0:
c[i] = row.copy()
return c
def first_occurrence(first):
fo = {}
for f in first:
if f not in fo:
fo[f] = first.index(f)
return fo
def suffix_array(string):
if string[-1] != '$':
string += '$'
sa = []
for i in xrange(len(string)):
sa.append((i, string[i:]))
return sorted(sa, key = lambda x: x[1])
def partial_suffix_array(string, k):
psa = []
for index, value in enumerate(suffix_array(string)):
if value[0] % k == 0:
psa.append((index, value[0]))
return psa
def lcp_array(suffix_array):
lcp = [(0, -1)]
for i in xrange(1, len(suffix_array)):
string1 = suffix_array[i - 1][1]
string2 = suffix_array[i][1]
for j in xrange(min(len(string1), len(string2))):
if string1[j] != string2[j]:
lcp.append((i, j))
break
return lcp
|
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def averageOfLevels(self,root):
result = []
q = [root]
while q:
total, count = 0, 0
next_q = []
for n in q:
total += n.val
count += 1
if n.left:
next_q.append(n.left)
if n.right:
next_q.append(n.right)
q = next_q
result.append(float(total)/count)
return result
root = TreeNode(3)
root.left = TreeNode(9)
root.right = TreeNode(20)
root.right.left = TreeNode(15)
root.right.right = TreeNode(7)
res = Solution().averageOfLevels(root)
print(res) |
"""
CCT 建模优化代码
绘图代码
作者:赵润晓
日期:2021年5月1日
"""
import multiprocessing # since v0.1.1 多线程计算
import time # since v0.1.1 统计计算时长
from typing import Callable, Dict, Generic, Iterable, List, NoReturn, Optional, Tuple, TypeVar, Union
import matplotlib.pyplot as plt
import math
import random # since v0.1.1 随机数
import sys
import os # since v0.1.1 查看CPU核心数
import numpy
from scipy.integrate import solve_ivp # since v0.1.1 ODE45
import warnings # since v0.1.1 提醒方法过时
from packages.point import *
from packages.constants import *
from packages.base_utils import BaseUtils
from packages.local_coordinate_system import LocalCoordinateSystem
from packages.line2s import *
from packages.trajectory import Trajectory
from packages.particles import *
from packages.magnets import *
from packages.cct import CCT
from packages.beamline import Beamline
class Plot3:
INIT: bool = False # 是否初始化
ax = None
PLT = plt
@staticmethod
def __init():
"""
初始化 Plot3
自动检查,无需调用
"""
plt.rcParams["font.sans-serif"] = ["SimHei"] # 用来正常显示中文标签
plt.rcParams["axes.unicode_minus"] = False # 用来正常显示负号
fig = plt.figure()
Plot3.ax = fig.gca(projection="3d")
Plot3.ax.grid(False)
Plot3.INIT = True
@staticmethod
def plot_xyz(x: float, y: float, z: float, describe="r.") -> None:
"""
绘制点 (x,y,z)
绘制图象时只有 plot_xyz 和 plot_xyz_array 访问底层,所以需要判断是否初始化
"""
if not Plot3.INIT:
Plot3.__init()
Plot3.ax.plot(x, y, z, describe)
@staticmethod
def plot_xyz_array(
xs: List[float], ys: List[float], zs: List[float], describe="r-"
) -> None:
"""
绘制多个点
按照 x y z 分别给值
绘制图象时只有 plot_xyz 和 plot_xyz_array 访问底层,所以需要判断是否初始化
"""
if not Plot3.INIT:
Plot3.__init()
Plot3.ax.plot(xs, ys, zs, describe)
@staticmethod
def plot_p3(p: P3, describe="r.") -> None:
"""
绘制点 P3
"""
Plot3.plot_xyz(p.x, p.y, p.z, describe)
@staticmethod
def plot_p3s(ps: List[P3], describe="r-") -> None:
"""
绘制点 P3 数组,多个点
"""
Plot3.plot_xyz_array(
[p.x for p in ps], [p.y for p in ps], [p.z for p in ps], describe
)
@staticmethod
def plot_line2(line2: Line2, step: float = 1 * MM, describe="r") -> None:
"""
绘制 line2
"""
Plot3.plot_p3s(line2.disperse3d(step=step), describe)
@staticmethod
def plot_line2s(
line2s: List[Line2], steps: List[float] = [1 * MM], describes: List[str] = ["r"]
) -> None:
"""
绘制多个 line2
"""
length = len(line2s)
for i in range(length):
Plot3.plot_line2(
line2=line2s[i],
step=steps[i] if i < len(steps) else steps[-1],
describe=describes[i] if i < len(describes) else describes[-1],
)
@staticmethod
def plot_line3(line3: Line3, step: float = 1 * MM, describe="r") -> None:
"""
绘制 line3
"""
Plot3.plot_p3s(line3.disperse3d(step=step), describe)
@staticmethod
def plot_beamline(beamline: Beamline, describes=["r-"]) -> None:
"""
绘制 beamline
包括 beamline 上的磁铁和设计轨道
"""
size = len(beamline.magnets)
for i in range(1, size+1):
b = beamline.magnets[i-1]
d = describes[i] if i < len(describes) else describes[-1]
if isinstance(b, QS):
Plot3.plot_qs(b, d)
elif isinstance(b, CCT):
Plot3.plot_cct(b, d)
elif isinstance(b, LocalUniformMagnet):
Plot3.plot_local_uniform_magnet(b, d)
else:
print(f"无法绘制{b}")
Plot3.plot_line2(beamline.trajectory, describe=describes[0])
@staticmethod
def plot_ndarry3ds(narray: numpy.ndarray, describe="r-") -> None:
"""
绘制 numpy 数组
"""
x = narray[:, 0]
y = narray[:, 1]
z = narray[:, 2]
Plot3.plot_xyz_array(x, y, z, describe)
@staticmethod
def plot_cct(cct: CCT, describe="r-") -> None:
"""
绘制 cct
"""
cct_path3d: numpy.ndarray = cct.dispersed_path3
cct_path3d_points: List[P3] = P3.from_numpy_ndarry(cct_path3d)
cct_path3d_points: List[P3] = [
cct.local_coordinate_system.point_to_global_coordinate(p)
for p in cct_path3d_points
]
Plot3.plot_p3s(cct_path3d_points, describe)
@staticmethod
def plot_qs(qs: QS, describe="r-") -> None:
"""
绘制 qs
"""
# 前中后三个圈
front_circle_local = [
P3(
qs.aperture_radius * math.cos(i / 180 * numpy.pi),
qs.aperture_radius * math.sin(i / 180 * numpy.pi),
0.0,
)
for i in range(360)
]
mid_circle_local = [p + P3(0, 0, qs.length / 2)
for p in front_circle_local]
back_circle_local = [p + P3(0, 0, qs.length)
for p in front_circle_local]
# 转到全局坐标系中
front_circle = [
qs.local_coordinate_system.point_to_global_coordinate(p)
for p in front_circle_local
]
mid_circle = [
qs.local_coordinate_system.point_to_global_coordinate(p)
for p in mid_circle_local
]
back_circle = [
qs.local_coordinate_system.point_to_global_coordinate(p)
for p in back_circle_local
]
Plot3.plot_p3s(front_circle, describe)
Plot3.plot_p3s(mid_circle, describe)
Plot3.plot_p3s(back_circle, describe)
# 画轴线
for i in range(0, 360, 10):
Plot3.plot_p3s([front_circle[i], back_circle[i]], describe)
@staticmethod
def plot_q(q: Q, describe="r-") -> None:
"""
绘制 q
"""
# 前中后三个圈
front_circle_local = [
P3(
q.aperture_radius * math.cos(i / 180 * numpy.pi),
q.aperture_radius * math.sin(i / 180 * numpy.pi),
0.0,
)
for i in range(360)
]
mid_circle_local = [p + P3(0, 0, q.length / 2)
for p in front_circle_local]
back_circle_local = [p + P3(0, 0, q.length)
for p in front_circle_local]
# 转到全局坐标系中
front_circle = [
q.local_coordinate_system.point_to_global_coordinate(p)
for p in front_circle_local
]
mid_circle = [
q.local_coordinate_system.point_to_global_coordinate(p)
for p in mid_circle_local
]
back_circle = [
q.local_coordinate_system.point_to_global_coordinate(p)
for p in back_circle_local
]
Plot3.plot_p3s(front_circle, describe)
Plot3.plot_p3s(mid_circle, describe)
Plot3.plot_p3s(back_circle, describe)
# 画轴线
for i in range(0, 360, 10):
Plot3.plot_p3s([front_circle[i], back_circle[i]], describe)
@staticmethod
def plot_local_uniform_magnet(local_uniform_magnet: LocalUniformMagnet, describe="r-") -> None:
"""
绘制 LocalUniformMagnet
"""
# 前中后三个圈
front_circle_local = [
P3(
local_uniform_magnet.aperture_radius *
math.cos(i / 180 * numpy.pi),
local_uniform_magnet.aperture_radius *
math.sin(i / 180 * numpy.pi),
0.0,
)
for i in range(360)
]
mid_circle_local = [p + P3(0, 0, local_uniform_magnet.length / 2)
for p in front_circle_local]
back_circle_local = [p + P3(0, 0, local_uniform_magnet.length)
for p in front_circle_local]
# 转到全局坐标系中
front_circle = [
local_uniform_magnet.local_coordinate_system.point_to_global_coordinate(
p)
for p in front_circle_local
]
mid_circle = [
local_uniform_magnet.local_coordinate_system.point_to_global_coordinate(
p)
for p in mid_circle_local
]
back_circle = [
local_uniform_magnet.local_coordinate_system.point_to_global_coordinate(
p)
for p in back_circle_local
]
Plot3.plot_p3s(front_circle, describe)
Plot3.plot_p3s(mid_circle, describe)
Plot3.plot_p3s(back_circle, describe)
# 画轴线
for i in range(0, 360, 10):
Plot3.plot_p3s([front_circle[i], back_circle[i]], describe)
@staticmethod
def plot_local_coordinate_system(
local_coordinate_syste: LocalCoordinateSystem,
axis_lengths: List[float] = [100 * MM] * 3,
describe="r-",
) -> None:
"""
绘制 local_coordinate_syste
axis_lengths 各个轴的长度
"""
origin = local_coordinate_syste.location
xi = local_coordinate_syste.XI
yi = local_coordinate_syste.YI
zi = local_coordinate_syste.ZI
Plot3.plot_p3s(ps=[origin, origin + xi *
axis_lengths[0]], describe=describe)
Plot3.plot_p3s(ps=[origin, origin + yi *
axis_lengths[1]], describe=describe)
Plot3.plot_p3s(ps=[origin, origin + zi *
axis_lengths[2]], describe=describe)
@staticmethod
def plot_running_particle(p: RunningParticle, describe="r.") -> None:
"""
绘制单个粒子,实际上绘制粒子的位置
"""
Plot3.plot_p3(p.position, describe=describe)
@staticmethod
def plot_running_particles(ps: List[RunningParticle], describe="r.") -> None:
"""
绘制多个粒子,实际上绘制粒子的位置
"""
Plot3.plot_p3s([p.position for p in ps], describe=describe)
@staticmethod
def set_center(center: P3 = P3.origin(), cube_size: float = 1.0) -> None:
"""
设置视界中心和范围
因为范围是一个正方体,所以这个方法类似于 Plot2.equal()
"""
p = P3(cube_size, cube_size, cube_size)
Plot3.plot_p3(center - p, "w")
Plot3.plot_p3(center + p, "w")
@staticmethod
def set_box(front_down_left: P3, back_top_right: P3) -> None:
"""
设置视界范围
按照立方体两个点设置
"""
Plot3.plot_p3(front_down_left, "w")
Plot3.plot_p3(back_top_right, "w")
@staticmethod
def off_axis() -> None:
"""
去除坐标轴
"""
Plot3.PLT.axis("off")
@staticmethod
def remove_background_color() -> None:
"""
去除背景颜色
"""
Plot3.ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
Plot3.ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
Plot3.ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
@staticmethod
def show():
"""
展示图象
"""
if not Plot3.INIT:
raise RuntimeError("Plot3::请在show前绘制图象")
plt.show()
@staticmethod
def __logo__():
"""
绘制 logo 并展示
"""
LOGO = Trajectory.__cctpy__()
Plot3.plot_line2s(LOGO, [1 * M], ["r-", "r-", "r-", "b-", "b-"])
Plot3.plot_local_coordinate_system(
LocalCoordinateSystem(location=P3(z=-0.5e-6)),
axis_lengths=[1000, 200, 1e-6],
describe="k-",
)
Plot3.off_axis()
Plot3.remove_background_color()
Plot3.ax.view_init(elev=20, azim=-79)
Plot3.show()
class Plot2:
INIT = False # 是否初始化
PLT = plt
@staticmethod
def __init():
"""
初始化 Plot3
自动检查,无需调用
"""
plt.rcParams["font.sans-serif"] = ["SimHei"] # 用来正常显示中文标签
plt.rcParams["axes.unicode_minus"] = False # 用来正常显示负号
Plot2.INIT = True
@staticmethod
def plot(*data, describe="r-") -> None:
"""
绘制任意数据
"""
param_length = len(data)
if param_length == 1:
param1 = data[0]
if isinstance(param1, P2):
Plot2.plot_p2(param1, describe=describe)
elif isinstance(param1, P3):
Plot2.plot_p3(param1, describe=describe)
elif isinstance(param1, List):
if isinstance(param1[0], P2):
Plot2.plot_p2s(param1, describe=describe)
elif isinstance(param1[0], P3):
Plot2.plot_p3s(param1, describe=describe)
else:
print(f"无法绘制{data}")
elif isinstance(param1, numpy.ndarray):
Plot2.plot_ndarry2ds(param1, describe=describe)
elif isinstance(param1, CCT):
Plot2.plot_cct_outline(param1, describe=describe)
elif isinstance(param1, QS):
Plot2.plot_qs(param1, describe=describe)
elif isinstance(param1, LocalUniformMagnet):
Plot2.plot_local_uniform_magnet(param1, describe=describe)
elif isinstance(param1, Beamline):
Plot2.plot_beamline(param1, describes=["k-", "r-"])
elif isinstance(param1, Line2):
Plot2.plot_line2(param1, describe=describe)
elif isinstance(param1, BaseUtils.Ellipse):
p2s = param1.uniform_distribution_points_along_edge(64)
p2s.append(p2s[0])
Plot2.plot(p2s, describe=describe)
else:
print(f"无法绘制{data}")
elif param_length == 2:
param1 = data[0]
param2 = data[1]
if (isinstance(param1, int) or isinstance(param1, float)) and (
isinstance(param2, int) or isinstance(param2, float)
):
Plot2.plot_xy(param1, param2, describe=describe)
elif isinstance(param1, List) and isinstance(param2, List):
Plot2.plot_xy_array(param1, param2, describe=describe)
else:
Plot2.plot(param1, describe=describe)
Plot2.plot(param2, describe=describe)
else:
for d in data:
Plot2.plot(d, describe=describe)
@staticmethod
def plot_xy(x: float, y: float, describe="r.") -> None:
"""
绘制点 (x,y)
绘制图象时只有 plot_xy 和 plot_xy_array 访问底层,所以需要判断是否初始化
"""
if not Plot2.INIT:
Plot2.__init()
if describe is None:
plt.plot(x, y)
else:
plt.plot(x, y, describe)
@staticmethod
def plot_xy_array(xs: List[float], ys: List[float], describe="r-") -> None:
"""
绘制多个点
按照 x y 分别给值
绘制图象时只有 plot_xy 和 plot_xy_array 访问底层,所以需要判断是否初始化
"""
if not Plot2.INIT:
Plot2.__init()
if describe is None:
plt.plot(xs, ys)
else:
plt.plot(xs, ys, describe)
@staticmethod
def plot_function(
func: Callable[[float], float], start: float, end: float,
number: int = 1000, describe="r-"
) -> None:
"""
绘制函数
func 函数
start 自变量起点
end 自变量终点
number 点数
describe 绘图控制信息
"""
xs = BaseUtils.linspace(start, end, number)
ys = [func(x) for x in xs]
Plot2.plot_xy_array(xs, ys, describe=describe)
@staticmethod
def plot_p2(p: P2, describe="r") -> None:
"""
绘制点 P2
"""
Plot2.plot_xy(p.x, p.y, describe)
@staticmethod
def plot_p3(
p: P3, p3_to_p2: Callable = lambda p3: P2(p3.x, p3.y), describe="r"
) -> None:
"""
绘制点 P3
P3 按照策略 p3_to_p2z 转为 P2
"""
Plot2.plot_p2(p3_to_p2(p), describe)
@staticmethod
def plot_p2s(ps: List[P2], describe="r-", circle: bool = False) -> None:
"""
绘制点 P2 数组,多个点
circle 是否画一个封闭的圆
"""
ps_c = ps + [ps[0]] if circle else ps
Plot2.plot_xy_array([p.x for p in ps_c], [p.y for p in ps_c], describe)
@staticmethod
def plot_p3s(
ps: List[P3], p3_to_p2: Callable[[P3], P2] = lambda p3: P2(p3.x, p3.y), describe="r-"
) -> None:
"""
绘制点 P3 数组,多个点
P3 按照策略 p3_to_p2z 转为 P2
"""
Plot2.plot_p2s([p3_to_p2(p) for p in ps], describe)
@staticmethod
def plot_ndarry2ds(narray: numpy.ndarray, describe="r-") -> None:
"""
绘制 numpy 数组
"""
x = narray[:, 0]
y = narray[:, 1]
Plot2.plot_xy_array(x, y, describe)
@staticmethod
def plot_cct_path2d(cct: CCT, describe="r-") -> None:
"""
绘制 cct 二维图象,即 (ξ, φ)
"""
cct_path2: numpy.ndarray = cct.dispersed_path2
Plot2.plot_ndarry2ds(cct_path2, describe)
@staticmethod
def plot_cct_path3d_in_2d(cct: CCT, describe="r-") -> None:
"""
绘制 cct
仅仅将三维 CCT 路径映射到 xy 平面
"""
cct_path3d: numpy.ndarray = cct.dispersed_path3
cct_path3d_points: List[P3] = P3.from_numpy_ndarry(cct_path3d)
cct_path3d_points: List[P3] = [
cct.local_coordinate_system.point_to_global_coordinate(p)
for p in cct_path3d_points
]
cct_path2d_points: List[P2] = [p.to_p2() for p in cct_path3d_points]
Plot2.plot_p2s(cct_path2d_points, describe)
@staticmethod
def plot_cct_outline(cct: CCT, describe="r-") -> None:
R = cct.big_r
r = cct.small_r
lcs = cct.local_coordinate_system
center = lcs.location
phi0 = cct.starting_point_in_ksi_phi_coordinate.y
phi1 = cct.end_point_in_ksi_phi_coordinate.y
clockwise: bool = phi1 < phi0
phi_length = abs(phi1 - phi0)
arc = ArcLine2(
starting_phi=phi0 + lcs.XI.to_p2().angle_to_x_axis(), # 这个地方搞晕了
center=center.to_p2(),
radius=R,
total_phi=phi_length,
clockwise=clockwise,
)
left_points = []
right_points = []
for t in BaseUtils.linspace(0, arc.get_length(), 100):
left_points.append(arc.left_hand_side_point(t, r))
right_points.append(arc.right_hand_side_point(t, r))
Plot2.plot_p2s(left_points, describe=describe)
Plot2.plot_p2s(right_points, describe=describe)
Plot2.plot_p2s([left_points[0], right_points[0]], describe=describe)
Plot2.plot_p2s([left_points[-1], right_points[-1]], describe=describe)
@staticmethod
def plot_cct_outline_straight(location: float, cct: CCT, length: float, describe="r-") -> None:
"""
直线版本,配合 plot_beamline_straight
"""
start_point = P2(x=location)
x = P2.x_direct()
y = P2.y_direct()
p1 = start_point + y*cct.small_r
p4 = start_point - y*cct.small_r
p2 = p1 + x*length
p3 = p4 + x*length
Plot2.plot_p2s([p1, p2, p3, p4, p1], describe=describe)
@staticmethod
def plot_qs(qs: QS, describe="r-") -> None:
"""
绘制 qs
"""
length = qs.length
aper = qs.aperture_radius
lsc = qs.local_coordinate_system
origin = lsc.location
outline = [
origin,
origin + lsc.XI * aper,
origin + lsc.XI * aper + lsc.ZI * length,
origin - lsc.XI * aper + lsc.ZI * length,
origin - lsc.XI * aper,
origin,
]
outline_2d = [p.to_p2() for p in outline]
Plot2.plot_p2s(outline_2d, describe)
@staticmethod
def plot_q(q: Q, describe="r-") -> None:
"""
绘制 qs
"""
length = q.length
aper = q.aperture_radius
lsc = q.local_coordinate_system
origin = lsc.location
outline = [
origin,
origin + lsc.XI * aper,
origin + lsc.XI * aper + lsc.ZI * length,
origin - lsc.XI * aper + lsc.ZI * length,
origin - lsc.XI * aper,
origin,
]
outline_2d = [p.to_p2() for p in outline]
Plot2.plot_p2s(outline_2d, describe)
@staticmethod
def plot_local_uniform_magnet(local_uniform_magnet: LocalUniformMagnet, describe="r-") -> None:
"""
绘制 LocalUniformMagnet
"""
length = local_uniform_magnet.length
aper = local_uniform_magnet.aperture_radius
lsc = local_uniform_magnet.local_coordinate_system
origin = lsc.location
outline = [
origin,
origin + lsc.XI * aper,
origin + lsc.XI * aper + lsc.ZI * length,
origin - lsc.XI * aper + lsc.ZI * length,
origin - lsc.XI * aper,
origin,
]
outline_2d = [p.to_p2() for p in outline]
Plot2.plot_p2s(outline_2d, describe)
@staticmethod
def plot_qs_straight(location: float, qs: QS, length: float, describe="k-") -> None:
"""
绘制 qs
轨道绘制为直线,配合 plot_beamline_straight
"""
start_point = P2(x=location)
x = P2.x_direct()
y = None
if qs.gradient >= 0:
y = P2.y_direct()
else:
y = -P2.y_direct()
p1 = start_point + x*length
p2 = p1 + y*qs.aperture_radius
p3 = start_point + y*qs.aperture_radius
Plot2.plot_p2s([start_point, p1, p2, p3, start_point],
describe=describe)
@staticmethod
def plot_qs_straight(location: float, q: Q, length: float, describe="k-") -> None:
"""
绘制 qs
轨道绘制为直线,配合 plot_beamline_straight
"""
start_point = P2(x=location)
x = P2.x_direct()
y = None
if q.gradient >= 0:
y = P2.y_direct()
else:
y = -P2.y_direct()
p1 = start_point + x*length
p2 = p1 + y*q.aperture_radius
p3 = start_point + y*q.aperture_radius
Plot2.plot_p2s([start_point, p1, p2, p3, start_point],
describe=describe)
@staticmethod
def plot_local_uniform_magnet_straight(location: float, local_uniform_magnet: LocalUniformMagnet, length: float, describe="k-") -> None:
"""
绘制 local_uniform_magnet
轨道绘制为直线,配合 plot_beamline_straight
"""
start_point = P2(x=location)
x = P2.x_direct()
y = None
if local_uniform_magnet.gradient >= 0:
y = P2.y_direct()
else:
y = -P2.y_direct()
p1 = start_point + x*length
p2 = p1 + y*local_uniform_magnet.aperture_radius
p3 = start_point + y*local_uniform_magnet.aperture_radius
Plot2.plot_p2s([start_point, p1, p2, p3, start_point],
describe=describe)
@staticmethod
def plot_beamline(beamline: Beamline, describes=["r-"]) -> None:
"""
绘制 beamline
包括 beamline 上的磁铁和设计轨道
注意:以轨道实际分布绘图
"""
size = len(beamline.magnets)
for i in range(size):
b = beamline.magnets[i]
d = describes[i + 1] if i < (len(describes) - 1) else describes[-1]
if isinstance(b, QS):
Plot2.plot_qs(b, d)
elif isinstance(b, CCT):
Plot2.plot_cct_outline(b, d)
elif isinstance(b, LocalUniformMagnet):
Plot2.plot_local_uniform_magnet(b, d)
else:
print(f"无法绘制{b}")
Plot2.plot_line2(beamline.trajectory, describe=describes[0])
@staticmethod
def plot_beamline_straight(beamline: Beamline, describes=["k-"]) -> None:
"""
绘制 beamline
包括 beamline 上的磁铁和设计轨道
注意:同上方法一致,但是将轨道绘制为直线
CCT 磁铁绘制为二极铁形式
QS 磁铁按照 Q 值是聚焦还是散焦,绘制为四极铁样式
"""
size = len(beamline.elements)
for i in range(size):
loc = beamline.elements[i][0]
b = beamline.elements[i][1]
length = beamline.elements[i][2]
d = describes[i + 1] if i < (len(describes) - 1) else describes[-1]
if b == None:
pass
else:
if isinstance(b, QS):
Plot2.plot_qs_straight(loc, b, length, describe=d)
elif isinstance(b, CCT):
Plot2.plot_cct_outline_straight(loc, b, length, describe=d)
elif isinstance(b, LocalUniformMagnet):
Plot2.plot_local_uniform_magnet_straight(
loc, b, length, describe=d)
else:
print(f"无法绘制{b}")
Plot2.plot_p2s(
[P2.origin(), P2(x=beamline.trajectory.get_length())], describe=describes[0])
@staticmethod
def plot_line2(line: Line2, step: float = 1 * MM, describe="r-") -> None:
"""
绘制 line2
refactor 0.1.1 分开绘制 Line2 和 Trajectory
"""
if isinstance(line, Trajectory):
Plot2.plot_trajectory(line, describes=describe)
else:
p2s = line.disperse2d(step)
Plot2.plot_p2s(p2s, describe)
@staticmethod
def plot_trajectory(trajectory: Trajectory, describes=['r-', 'b-', 'k-']) -> None:
"""
绘制 trajectory
直线和弧线使用不同颜色
since 0.1.1
"""
line2_list = trajectory.get_line2_list()
describe_straight_line = 'r-'
describe_arc_line = 'b-'
describe_aperture_objrcts = 'k-'
if isinstance(describes, List):
describe_straight_line = describes[0] if len(
describes) >= 1 else 'r-'
describe_arc_line = describes[1] if len(
describes) >= 2 else describe_straight_line
describe_aperture_objrcts = describes[2] if len(
describes) >= 3 else describe_straight_line
elif isinstance(describes, str):
describe_straight_line = describes
describe_arc_line = describes
describe_aperture_objrcts = describes
else:
print(f"Plot2.plot_trajectory 参数describes异常{describes}")
for l2 in line2_list:
if isinstance(l2, StraightLine2):
Plot2.plot_line2(l2, describe=describe_straight_line)
elif isinstance(l2, ArcLine2):
Plot2.plot_line2(l2, describe=describe_arc_line)
elif isinstance(l2, Trajectory):
Plot2.plot_trajectory(l2, describes)
elif isinstance(l2, Line2):
Plot2.plot_line2(l2, describe=describe_straight_line)
else:
print(f"无法绘制{l2}")
# 绘制轮廓
for a in trajectory.get_aperture_objrcts():
Plot2.plot_line2(a, describe=describe_aperture_objrcts)
@staticmethod
def equal():
"""
设置坐标轴比例相同
"""
if not Plot2.INIT:
Plot2.__init()
plt.axis("equal")
@staticmethod
def xlim(x_min: float, x_max: float):
"""
设置坐标轴范围
since 0.1.4
"""
if not Plot2.INIT:
Plot2.__init()
plt.xlim(x_min, x_max)
@staticmethod
def ylim(y_min: float, y_max: float):
"""
设置坐标轴范围
since 0.1.4
"""
if not Plot2.INIT:
Plot2.__init()
plt.ylim(y_min, y_max)
@staticmethod
def info(
x_label: str = "",
y_label: str = "",
title: str = "",
font_size: int = 24,
font_family: str = "Times New Roman",
):
"""
设置文字标记
"""
if not Plot2.INIT:
Plot2.__init()
font_label = {
"family": font_family,
"weight": "normal",
"size": font_size,
}
plt.xlabel(xlabel=x_label, fontdict=font_label)
plt.ylabel(ylabel=y_label, fontdict=font_label)
plt.title(label=title, fontdict=font_label)
plt.xticks(fontproperties=font_family, size=font_size)
plt.yticks(fontproperties=font_family, size=font_size)
@staticmethod
def legend(*labels: Tuple, font_size: int = 24, font_family: str = "Times New Roman"):
"""
设置图例
since v0.1.1
"""
if not Plot2.INIT:
Plot2.__init()
font_label = {
"family": font_family,
"weight": "normal",
"size": font_size,
}
plt.legend(labels=list(labels), prop=font_label)
@staticmethod
def subplot(info)->None:
if not Plot2.INIT:
Plot2.__init()
plt.subplot(info)
@staticmethod
def show():
"""
展示图象
"""
if not Plot2.INIT:
print("Plot2::请在show前调用plot")
plt.show()
|
import sys
n, *h = map(int, sys.stdin.read().split())
def main():
ans = 'Yes'
for i in range(n - 1, 0, -1):
if h[i-1] > h[i]: h[i-1] -= 1
if h[i-1] > h[i]: ans = 'No'; break
print(ans)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from flask import escape
import requests
import re
def get_orders(installation):
return [OrderListItem(order, installation.hostname)
for order
in requests.get(installation.api_url + "/orders", headers={"AUTHORIZATION": "Bearer %s" % installation.access_token}).json().get("_embedded").get("orders")]
def get_order(installation, order_id):
order_json = requests.get(installation.api_url + "/orders/%s" % order_id,
headers={"AUTHORIZATION": "Bearer %s" % installation.access_token}).json()
shop_logo = get_shop_logo_url(installation.api_url)
shop_json = requests.get(installation.api_url + "/shop").json()
shop_json["logo_url"] = shop_logo
return Order(order_json, shop_json, installation.hostname)
def get_shop_logo_url(api_url):
shop_images = requests.get(api_url + "/shop/images").json()
shop_images = [img for img \
in shop_images.get('_embedded', {}).get('images', []) \
if img.get('label', '') == 'logo']
logo_url = ''
if shop_images:
logo_url = shop_images[0].get('_links', {}).get('data', {}).get('href', '')
# Hack to remove image link template params
logo_url = re.sub(r'\{.*\}', '', logo_url)
logo_url += '&height=128'
return logo_url
class OrderListItem(object):
def __init__(self, order, hostname):
billing_address = order.get('billingAddress')
grand_total = order.get("grandTotal")
self.pdf_link = '/api/%s/pdfs/%s.pdf' % (hostname, order.get("_id"))
self.order_number = order.get("orderNumber")
self.customer = escape('%s %s' % (billing_address.get('firstName', ''),
billing_address.get('lastName', '')))
self.grand_total = "%s %s" % (grand_total.get("amount"), grand_total.get("currency"))
class Order(OrderListItem):
def __init__(self, order, shop, hostname):
super().__init__(order, hostname)
self.shop_name = escape(shop.get('name', ''))
self.shop_email = shop.get('address', {}).get('email', '')
self.logo_url = shop.get("logo_url", "")
billing_address = order.get('billingAddress')
self.billing_name = escape(self.customer)
self.billing_street = escape('%s %s' % (billing_address.get('street', ''),
billing_address.get('houseNumber', '') or ""))
self.billing_postcode = escape(billing_address.get('postalCode', ''))
self.billing_town = escape(billing_address.get('city', ''))
shipping_lineitem_price = order.get('shippingLineItem', {}).get('lineItemPrice', {})
self.shipping_total = '%s %s' % (shipping_lineitem_price.get('amount', ''),
shipping_lineitem_price.get('currency', ''))
self.products = [ProductLineItem(product) for product \
in order.get('productLineItems', [])]
class ProductLineItem(object):
def __init__(self, product):
self.name = escape(product.get('product', {}).get('name', ''))
self.quantity = product.get('quantity')
self.tax = "%.0f" % (float(product.get('lineItemTax', {}).get('taxRate', 0.0)) * 100.0)
unit_price = product.get('unitPrice', {})
self.price_per_item = u'%s %s' % (unit_price.get('amount', ''),
unit_price.get('currency', ''))
line_item_price = product.get('lineItemPrice', {})
self.price_total = u'%s %s' % (line_item_price.get('amount', ''),
line_item_price.get('currency', ''))
self.icon = product.get('product', {}).get('_links', {}) \
.get('default-image-data', {}).get('href', None)
# Hack to remove the templated parameters breaking valid HTML hyperlinks
if self.icon:
self.icon = re.sub(r'\{.*\}', '', self.icon)
self.icon += '&width=32'
else:
self.icon = ''
def __str__(self):
return 'BydProduct(%s)' % self.name
|
from cipher_ljt2138 import cipher_ljt2138
|
# Arquivo de mapeamento objeto-relacional para matérias.
from sqlalchemy.orm import relationship
from sqlalchemy import Column, Integer, String
from db import Base
class Subject(Base):
__tablename__ = 'subjects'
id = Column(Integer, primary_key=True, autoincrement=True)
code = Column(String, unique=True) # Código da matéria, por exemplo 'BD2' para banco de dados II
fullname = Column(String) # Nome completo da matéria, por exemplo 'Banco de Dados II'
semester = Column(Integer, nullable=False)
registered_by = relationship('Registered', back_populates='subject', cascade="all, delete-orphan")
def __str__(self):
sem = ""
if int(self.semester) == 0:
sem = "Elo"
else:
sem = str(self.semester)
return f"Semestre: {sem} | Código: {self.code} | Matéria: {self.fullname}"
|
import os
import sys
import json
from collections import OrderedDict
# Updates header and binary files to local conan cache
def getjson(fn):
try:
with open(fn) as f:
return json.load(f, object_pairs_hook=OrderedDict)
except FileNotFoundError as ex:
print("<<== "+fn+" not found. " + ex.strerror)
except json.decoder.JSONDecodeError as ex:
print("<<== json error in "+fn)
def find(path, name):
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
def find_cache(sourcePath = None):
result = ''
print(sourcePath)
current_path = os.getcwd()
if (sourcePath != None and os.path.isdir(sourcePath)):
current_path = sourcePath
conanfile_path = current_path
parent_path = os.path.join(current_path, '..')
conanfile_py = 'conanfile.py'
if os.path.isfile(os.path.join(current_path, conanfile_py)):
conanfile_path = current_path
if os.path.isfile(os.path.join(parent_path, conanfile_py)):
conanfile_path = parent_path
build_path = os.path.join(conanfile_path, 'build')
if os.path.isdir(build_path) == False:
print(f'{build_path} is not found.')
return result
graph_info = getjson(os.path.join(build_path, 'graph_info.json'))
root_name = graph_info['root']['name']
root_version = graph_info['root']['version']
name_version = f'{root_name}/{root_version}'
conan_search_file = os.path.join(build_path, 'conan_search.txt')
os.system(f'conan search {name_version} > {conan_search_file}')
if os.path.isfile(conan_search_file) == False:
print(f'{conan_search_file} is not found.')
return result
f = open(conan_search_file, "r")
conan_search = f.readlines()
f.close()
package_name = ''
for line in conan_search:
print(line)
if line.startswith(name_version):
package_name = line.strip('\n')
if (len(package_name) == 0):
print(f'{name_version} package not found')
return result
user_channel_split = package_name.split('@')[1].split('/')
user, channel = user_channel_split
user_profile = os.getenv('USERPROFILE')
if (user_profile == None):
user_profile = '~'
conan_main = os.path.join(user_profile, '.conan', 'data', root_name, root_version, user, channel)
if (os.path.isdir(conan_main) == False):
print(f'{conan_main} is not found.')
conan_main_package = os.path.join(conan_main, 'package')
print(conan_main_package)
conan_link_file = find(conan_main_package, '.conan_link')
if (conan_link_file == None):
print('conan_link is not found.')
return result
f = open(conan_link_file, "r")
conan_link = f.read().strip('\n')
f.close()
result = conan_link
return result
if __name__ == "__main__":
current_path = os.path.dirname(os.path.realpath(__file__))
if (len(sys.argv) >= 2):
current_path = sys.argv[1]
print(current_path)
conan_link = find_cache(current_path)
print(conan_link)
conan_link_bin = os.path.join(conan_link, 'bin')
conan_link_lib = os.path.join(conan_link, 'lib')
|
# ©2018 The Arizona Board of Regents for and on behalf of Arizona State University and the Laboratory for Energy And Power Solutions, All Rights Reserved.
#
# Universal Power System Controller
# USAID Middle East Water Security Initiative
#
# Developed by: Nathan Webster
# Primary Investigator: Nathan Johnson
#
# Version History (mm_dd_yyyy)
# 1.00 07_13_2018_NW
#
######################################################
# Import Libraries
import sqlite3
import shutil
import datetime
import csv
import os
import logging
from Archive_Controller import *
# Setup event logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
fh = logging.FileHandler('UPS_Event.log')
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
Archive_Controller(("",))
source = '/home/pi/datalogger/Archive'
# destination = "/home/pi/datalogger/tmp/UPS_Export_%s.csv" % datetime.datetime.now().date()
destination = "/media/USB20FD/UPS_Export_%s" % datetime.datetime.now().date()
# os.makedir
# if not os.path.exists(destination):
# os.makedirs(destination)
try:
if not os.path.exists(destination):
os.makedirs(destination, 0o777)
os.chmod(destination, 0o777)
src_files = os.listdir(source)
for file_name in src_files:
full_file_name = os.path.join(source, file_name)
if (os.path.isfile(full_file_name)):
shutil.copy(full_file_name, destination)
logger.info('Data archive successfully transferred to USB')
except shutil.Error as e:
print("Error: %s" % e)
logger.error('Could not transfer data to USB')
except IOError as e:
print("Error: %s" % e.strerror)
logger.error('Could not transfer data to USB')
|
"""OpenTimelineIO MLT XML adapter for use with melt."""
import opentimelineio as otio
from copy import deepcopy
from fractions import Fraction
from xml.dom import minidom
from xml.etree import ElementTree as et
SUPPORTED_TIME_EFFECTS = (
otio.schema.TimeEffect,
otio.schema.LinearTimeWarp,
otio.schema.FreezeFrame
)
class MLTAdapter(object):
def __init__(self, input_otio, **profile_data):
self.input_otio = input_otio
# Check for image producer in adapter args
self.image_producer = 'image2'
if 'image_producer' in profile_data:
self.image_producer = profile_data.pop('image_producer')
if self.image_producer not in ['image2', 'pixbuf']:
raise ValueError(
'Image producer must be "image2" or "pixbuf"'
)
self.profile_data = profile_data
# MLT root tag
self.root = et.Element('mlt')
# Store media references or clips as producers
self.producers = {'audio': {}, 'video': {}}
# Store playlists so they appear in order
self.playlists = []
# Store transitions for indexing
self.transitions = []
def create_mlt(self):
profile_e = self.create_profile_element()
if self.profile_data:
self.update_profile_element(profile_e, self.profile_data)
if isinstance(self.input_otio, otio.schema.Timeline):
tracks = self.input_otio.tracks
if self.input_otio.global_start_time:
self.update_profile_element(
profile_e,
self.input_otio.global_start_time
)
elif isinstance(self.input_otio, otio.schema.Track):
tracks = otio.schema.Stack()
tracks.append(self.input_otio)
elif isinstance(self.input_otio, otio.schema.Clip):
tmp_track = otio.schema.Track()
tmp_track.append(self.input_otio)
tracks = otio.schema.Stack()
tracks.append(tmp_track)
else:
raise ValueError(
"Passed OTIO item must be Timeline, Track or Clip. "
"Not {}".format(type(self.input_otio))
)
# Main method
self.assemble_timeline(tracks)
# Below we add elements in an orderly fashion
# Add producers to root
for producer in self.producers['producer_order_']:
self.root.insert(0, producer)
# Add transition tractors
for transition in self.transitions:
self.root.insert(-1, transition)
# Add playlists to root
for playlist in self.playlists:
self.root.insert(-1, playlist)
# Add profile to the root of tree
self.root.insert(0, profile_e)
# Render the XML
tree = minidom.parseString(et.tostring(self.root, 'utf-8'))
return tree.toprettyxml(indent=" ")
def create_property_element(self, name, text=None, attrib=None):
property_e = et.Element('property', name=name)
if text is not None:
property_e.text = str(text)
if attrib:
property_e.attrib.update(attrib)
return property_e
def create_solid(self, color, length):
color_e = et.Element(
'producer',
title='color',
id='solid_{c}'.format(c=color),
attrib={'in': '0', 'out': str(length - 1)}
)
color_e.append(self.create_property_element('length', length))
color_e.append(self.create_property_element('eof', 'pause'))
color_e.append(self.create_property_element('resource', color))
color_e.append(self.create_property_element('mlt_service', 'color'))
return color_e
def get_producer(self, otio_item, audio_track=False):
"""
Get or create a producer element. Will prevent duplicates.
:param otio_item: OTIO object to base producer on
:param audio_track: If item stems from an audio track or not
:type audio_track: `bool`
:return: producer element
"""
target_url = None
producer_e = None
is_sequence = False
extra_attribs = {}
if isinstance(otio_item, (otio.schema.Gap, otio.schema.Transition)):
# Create a solid producer
producer_e = self.create_solid(
'black',
otio_item.duration().value
)
id_ = producer_e.attrib['id']
else:
id_ = otio_item.name
id_key = id_
if hasattr(otio_item, 'media_reference') and otio_item.media_reference:
id_ = otio_item.media_reference.name or otio_item.name
if hasattr(otio_item.media_reference, 'target_url'):
target_url = otio_item.media_reference.target_url
available_range = otio_item.media_reference.available_range
if available_range:
in_ = available_range.start_time.value
out_ = available_range.end_time_inclusive().value
extra_attribs.update({'in': str(in_), 'out': str(out_)})
elif hasattr(otio_item.media_reference, 'abstract_target_url'):
is_sequence = True
start_number_prop = 'start_number'
if self.image_producer == 'pixbuf':
start_number_prop = 'begin'
target_url = otio_item.media_reference.abstract_target_url(
'%0{}d'.format(
otio_item.media_reference.frame_zero_padding
)
)
target_url += '?{propname}={startnum}'.format(
propname=start_number_prop,
startnum=otio_item.media_reference.start_frame
)
if target_url:
id_key += target_url
if producer_e is None:
producer_e = et.Element(
'producer',
id=id_,
attrib=extra_attribs
)
sub_key = 'video'
if audio_track:
if id_key not in self.producers['video']:
sub_key = 'audio'
# We keep track of audio and video producers to avoid duplicates
producer = self.producers[sub_key].setdefault(
id_key,
producer_e
)
if not target_url:
target_url = id_
property_e = producer.find('./property/[@name="resource"]')
if property_e is None or property_e.text == 'black':
if property_e is None:
resource = self.create_property_element(
name='resource',
text=target_url
)
producer.append(resource)
if is_sequence:
producer.append(
self.create_property_element(
name='mlt_service',
text=self.image_producer
)
)
# store producer in order list for insertion later
order = self.producers.setdefault('producer_order_', [])
if producer not in order:
order.append(producer)
return producer
def create_transition(self, trans_tuple, name, audio_track=False):
# Expand parts of transition
item_a, transition, item_b = trans_tuple
dur = transition.duration().value - 1
tractor_e = et.Element(
'tractor',
id=name,
attrib={
'in': '0',
'out': str(dur)
}
)
producer_a = self.get_producer(item_a)
if isinstance(item_a, otio.schema.Gap):
a_in = 0
a_out = item_b.duration().value - 1
else:
a_in = item_a.trimmed_range().start_time.value
a_out = item_a.trimmed_range().end_time_inclusive().value
track_a = et.Element(
'track',
producer=producer_a.attrib['id'],
attrib={
'in': str(a_in),
'out': str(a_out)
}
)
producer_b = self.get_producer(item_b)
if isinstance(item_b, otio.schema.Gap):
b_in = 0
b_out = item_b.duration().value - 1
else:
b_in = item_b.trimmed_range().start_time.value
b_out = item_b.trimmed_range().end_time_inclusive().value
track_b = et.Element(
'track',
producer=producer_b.attrib['id'],
attrib={
'in': str(b_in),
'out': str(b_out)
}
)
tractor_e.append(track_a)
tractor_e.append(track_b)
trans_e = et.Element(
'transition',
id='transition_{}'.format(name),
out=str(dur)
)
# Audio and video use different mixer services
mixer = 'luma'
if audio_track:
mixer = 'mix'
trans_e.append(self.create_property_element('a_track', 0))
trans_e.append(self.create_property_element('b_track', 1))
trans_e.append(self.create_property_element('factory'))
trans_e.append(self.create_property_element('mlt_service', mixer))
tractor_e.append(trans_e)
return tractor_e
def create_entry_element(self, producer, in_, out_):
clip_e = et.Element(
'entry',
producer=producer.attrib['id'],
attrib={
'in': str(in_),
'out': str(out_)
}
)
return clip_e
def create_clip(self, item, producer):
in_ = item.trimmed_range().start_time.value
out_ = item.trimmed_range().end_time_inclusive().value
clip_e = self.create_entry_element(producer, in_, out_)
return clip_e
def create_blank_element(self, item):
blank_e = et.Element(
'blank',
length=str(item.source_range.duration.value)
)
return blank_e
def apply_timewarp(self, item, item_e, effect):
"""
Apply a time warp effect on a copy of a producer
:param item: source OTIO item in track
:param item_e: element tag to apply effect to
:param effect: OTIO effect object
:return:
"""
# <filter>
# <property name="track">0</property>
# <property name="mlt_service">greyscale</property>
# </filter>
if item_e is None:
return
# Create a copy of the producer
orig_producer_e = self.get_producer(item)
producer_e = deepcopy(orig_producer_e)
id_ = None
if effect.effect_name == 'FreezeFrame':
# Freeze frame will always use the first frame of the
# source_range as OTIO doesn't really have any other way of
# indicating which frame was chosen to freeze
id_ = '{}_freeze{}'.format(
producer_e.attrib['id'],
item.source_range.start_time.value
)
producer_e.attrib['id'] = id_
producer_e.append(
self.create_property_element('mlt_service', 'hold')
)
producer_e.append(self.create_property_element(
'frame',
str(item.source_range.start_time.value))
)
elif effect.effect_name == 'LinearTimeWarp':
id_ = ':'.join(
[str(effect.time_scalar), item_e.attrib.get('producer')]
)
producer_e.attrib['id'] = id_
producer_e.append(
self.create_property_element('mlt_service', 'timewarp')
)
resource_e = producer_e.find('./property/[@name="resource"]')
resource_e.text = ':'.join(
[str(effect.time_scalar), resource_e.text]
)
# Add the new copy to the producers list
if id_ not in self.producers['video']:
self.producers['video'][id_] = producer_e
self.producers['producer_order_'].append(producer_e)
# Swap the old producer with the new containing the effect
item_e.attrib['producer'] = id_
def create_background_track(self, tracks, parent):
length = tracks.duration().value
bg_e = self.create_solid('black', length)
# Add producer to list
producer_e = self.producers['video'].setdefault(
bg_e.attrib['id'],
bg_e
)
# store producer in order list for insertion later
self.producers.setdefault('producer_order_', []).append(producer_e)
playlist_e = et.Element(
'playlist',
id='background'
)
self.playlists.append(playlist_e)
playlist_e.append(self.create_entry_element(bg_e, 0, length - 1))
parent.append(
et.Element('track', producer=playlist_e.attrib['id'])
)
def assemble_track(self, track, track_index, parent):
playlist_e = et.Element(
'playlist',
id=track.name or 'playlist{}'.format(track_index)
)
self.playlists.append(playlist_e)
# Transitions use track elements as children
element_type = 'track'
# Playlists use entry
if parent.tag == 'playlist':
element_type = 'entry'
# Used to check if we need to add audio elements or not
is_audio_track = False
if hasattr(track, 'kind'):
is_audio_track = track.kind == 'Audio'
# Insert audio before video
element = et.Element(element_type, producer=playlist_e.attrib['id'])
if is_audio_track:
parent.insert(1, element)
else:
parent.append(element)
# Iterate over items in track, expanding transitions
expanded_track = otio.algorithms.track_with_expanded_transitions(track)
for item in expanded_track:
item_e = None
if isinstance(item, otio.schema.Clip):
producer_e = self.get_producer(item, is_audio_track)
if is_audio_track:
# Skip "duplicate" audio elmnt for matching video producer
key_id = producer_e.attrib['id'] + producer_e[0].text
if key_id in self.producers['video']:
continue
item_e = self.create_clip(item, producer_e)
playlist_e.append(item_e)
elif isinstance(item, otio.schema.Gap):
item_e = self.create_blank_element(item)
playlist_e.append(item_e)
elif isinstance(item, tuple):
# Since we expanded transitions in the track the come as tuples
# containing (ClipA_t, Transition, ClipB_t)
transition_e = self.create_transition(
item,
'transition_tractor{}'.format(len(self.transitions)),
is_audio_track
)
self.transitions.append(transition_e)
playlist_e.append(
et.Element(
'entry',
producer=transition_e.attrib['id'],
attrib={
'in': transition_e.attrib['in'],
'out': transition_e.attrib['out']
}
)
)
# Continue as transitions have no effects, see test below
continue
elif isinstance(item, (otio.schema.Track, otio.schema.Stack)):
# NOTE! This doesn't apply effects to the nested track
# TODO create new playlist and wrap it in a new tractor
# then add filter to that tractor and place tractor in
# place of producer/playlist. See melt docs..
self.assemble_track(item, track_index, playlist_e)
# Check for effects on item
if hasattr(item, 'effects'):
for effect in item.effects:
# We only support certain time effects for now
if isinstance(effect, SUPPORTED_TIME_EFFECTS):
self.apply_timewarp(item, item_e, effect)
def assemble_timeline(self, tracks):
# We gather tracks in tractors. This is the "main one"
tractor_e = et.Element('tractor', id='tractor0')
multitrack_e = et.SubElement(
tractor_e,
'multitrack',
attrib={'id': 'multitrack0'}
)
self.root.append(tractor_e)
# Make sure there is a solid background if tracks contain gaps
self.create_background_track(tracks, multitrack_e)
for track_index, track in enumerate(tracks):
self.assemble_track(track, track_index, multitrack_e)
def rate_fraction_from_float(self, rate):
"""
Given a frame rate float, creates a frame rate fraction conforming to
known good rates where possible. This will do fuzzy matching of
23.98 to 24000/1001, for instance.
Thanks! @reinecke
"""
# Whole numbers are easy
if isinstance(rate, int) or rate.is_integer():
return Fraction(rate)
NTSC_RATES = (
Fraction(24000, 1001),
Fraction(30000, 1001),
Fraction(60000, 1001),
)
for ntsc_rate in NTSC_RATES:
# The tolerance of 0.004 comes from 24000/1001 - 23.98
if abs(rate - ntsc_rate) < 0.004:
return ntsc_rate
return Fraction(rate)
def update_profile_element(self, profile_element, profile_data):
if isinstance(profile_data, otio.opentime.RationalTime):
fractional = self.rate_fraction_from_float(profile_data.rate)
profile_data = dict(
frame_rate_den=str(fractional.denominator),
frame_rate_num=str(fractional.numerator)
)
elif not isinstance(profile_data, dict):
raise ValueError(
'Only pass global_start_time as RationalTime or'
'a dict containing profile related key/value pairs.'
)
profile_element.attrib.update(self._stringify_values(profile_data))
def create_profile_element(self):
profile_e = et.Element(
'profile',
decsription='automatic'
)
return profile_e
def _stringify_values(self, source_dict):
data = deepcopy(source_dict)
for k, v in data.items():
if not isinstance(v, str):
data[k] = str(v)
return data
def write_to_string(input_otio, **profile_data):
"""
:param input_otio: Timeline, Track or Clip
:param profile_data: Properties passed to the profile tag describing
the format, frame rate, colorspace and so on. If a passed Timeline has
`global_start_time` set, the frame rate will be set automatically.
Please note that numeric values must be passed as strings.
Please check MLT website for more info on profiles.
You may pass an "image_producer" argument with "pixbuf" to change
image sequence producer. The default image sequence producer is "image2"
:return: MLT formatted XML
:rtype: `str`
"""
mlt_adapter = MLTAdapter(input_otio, **profile_data)
return mlt_adapter.create_mlt()
|
from qunetsim.backends import EQSNBackend
from qunetsim.components import Host
from qunetsim.components import Network
from qunetsim.objects import Logger
from qunetsim.objects import Qubit
import random
import numpy as np
Logger.DISABLED = True
def expected_value(result_string_alice, result_string_bob, bases_string_alice, bases_string_bob, base_alice, base_bob):
list = [0, 0, 0, 0]
for i in range(len(result_string_alice)):
if bases_string_alice[i] == base_alice and bases_string_bob[i] == base_bob:
if result_string_alice[i] == '0' and result_string_bob[i] == '0':
list[0] += 1
elif result_string_alice[i] == '0' and result_string_bob[i] == '1':
list[1] += 1
elif result_string_alice[i] == '1' and result_string_bob[i] == '0':
list[2] += 1
elif result_string_alice[i] == '1' and result_string_bob[i] == '1':
list[3] += 1
return list
def chsh(result_string_alice, result_string_bob, bases_string_alice, bases_string_bob):
listA1B1 = expected_value(result_string_alice, result_string_bob, bases_string_alice, bases_string_bob, 'a1', 'b1')
listA1B3 = expected_value(result_string_alice, result_string_bob, bases_string_alice, bases_string_bob, 'a1', 'b3')
listA3B1 = expected_value(result_string_alice, result_string_bob, bases_string_alice, bases_string_bob, 'a3', 'b1')
listA3B3 = expected_value(result_string_alice, result_string_bob, bases_string_alice, bases_string_bob, 'a3', 'b3')
coefA1B1 = (listA1B1[0] - listA1B1[1] - listA1B1[2] + listA1B1[3]) / sum(listA1B1)
coefA1B3 = (listA1B3[0] - listA1B3[1] - listA1B3[2] + listA1B3[3]) / sum(listA1B3)
coefA3B1 = (listA3B1[0] - listA3B1[1] - listA3B1[2] + listA3B1[3]) / sum(listA3B1)
coefA3B3 = (listA3B3[0] - listA3B3[1] - listA3B3[2] + listA3B3[3]) / sum(listA3B3)
return coefA1B1 - coefA1B3 + coefA3B1 + coefA3B3
def alice(alice, bob, number_of_entanglement_pairs):
angles = [0, np.pi/4, np.pi/2]
bases_choice = [random.randint(1,3) for i in range(number_of_entanglement_pairs)]
test_results_alice = []
test_bases_alice = []
sifted_key_alice = []
for i in range(number_of_entanglement_pairs):
qubit_a = Qubit(alice)
qubit_b = Qubit(alice)
# preparation of singlet state (1/sqrt(2))*(|01> - |10>)
qubit_a.X()
qubit_b.X()
qubit_a.H()
qubit_a.cnot(qubit_b)
print('Sending EPR pair %d' % (i + 1))
_, ack_arrived = alice.send_qubit(bob, qubit_b, await_ack=True)
if ack_arrived:
#rotate qubit and measure
base_a = bases_choice[i]
qubit_a.rz(angles[base_a - 1])
meas_a = qubit_a.measure()
ack_arrived = alice.send_classical(bob, base_a, await_ack=True)
if not ack_arrived:
print("Send data failed!")
message = alice.get_next_classical(bob, wait=2)
if message is not None:
base_b = message.content
if (base_a == 2 and base_b == 1) or (base_a == 3 and base_b == 2):
sifted_key_alice.append(meas_a)
elif (base_a == 1 and base_b == 1) or (base_a == 1 and base_b == 3) or (base_a == 3 and base_b == 1) or (base_a == 3 and base_b == 3):
test_bases_alice.append('a'+str(base_a))
test_results_alice.append(str(meas_a))
else:
print("The message did not arrive")
else:
print('The EPR pair was not properly established')
ack_arrived = alice.send_classical(bob, (test_results_alice, test_bases_alice), await_ack=True)
if not ack_arrived:
print("Send data failed!")
print("Sifted_key_alice: ", sifted_key_alice)
def bob(bob, alice, number_of_entanglement_pairs):
angles = [np.pi/4, np.pi/2, 3*(np.pi/4)]
bob_bases = [random.randint(1,3) for i in range(number_of_entanglement_pairs)]
test_result_bob = []
test_bases_bob = []
sifted_key_bob = []
for i in range(number_of_entanglement_pairs):
qubit_b = bob.get_data_qubit(alice, wait=5)
if qubit_b is not None:
base_b = bob_bases[i]
#rotate qubit and measure
qubit_b.rz(angles[base_b - 1])
meas_b = qubit_b.measure()
message = bob.get_next_classical(alice, wait=2)
if message is not None:
base_a = message.content
ack_arrived = bob.send_classical(alice, base_b, await_ack=True)
if not ack_arrived:
print("Send data failed!")
if (base_a == 2 and base_b == 1) or (base_a == 3 and base_b == 2):
sifted_key_bob.append(1 - meas_b)
elif (base_a == 1 and base_b == 1) or (base_a == 1 and base_b == 3) or (base_a == 3 and base_b == 1) or (base_a == 3 and base_b == 3):
test_bases_bob.append('b'+str(base_b))
test_result_bob.append(str(meas_b))
else:
print("Host 2 did not receive the measurement base of alice")
else:
print('Host 2 did not receive an EPR pair')
message = bob.get_next_classical(alice, wait=2)
if message is not None:
test_result_alice, test_bases_alice = message.content
print(chsh(test_result_alice, test_result_bob, test_bases_alice, test_bases_bob))
print("sifted_key_bob: ", sifted_key_bob)
else:
print("Host 2 did not receive the data to compute the chsh value")
def main():
network = Network.get_instance()
backend = EQSNBackend()
number_of_entanglement_pairs = 50
nodes = ['A', 'B']
network.start(nodes, backend)
network.delay = 0.1
host_A = Host('A', backend)
host_A.add_connection('B')
host_A.delay = 0
host_A.start()
host_B = Host('B', backend)
host_B.add_connection('A')
host_B.delay = 0
host_B.start()
network.add_host(host_A)
network.add_host(host_B)
t1 = host_A.run_protocol(alice, (host_B.host_id, number_of_entanglement_pairs))
t2 = host_B.run_protocol(bob, (host_A.host_id, number_of_entanglement_pairs))
t1.join()
t2.join()
network.stop(True)
if __name__ == '__main__':
main()
|
from asap3 import *
from ase.lattice.cubic import FaceCenteredCubic
from asap3.testtools import ReportTest
from asap3.EMT2013Parameters import PtY_parameters
from asap3.mpi import world
from asap3.Internal.ParallelListOfAtoms import ParallelAtoms
import numpy as np
#DebugOutput("migration%d.log", nomaster=True)
def pot():
#return EMT2013(PtY_parameters)
return EMT()
#set_verbose(1)
master = world.rank == 0
if master:
atoms0 = FaceCenteredCubic(symbol='Pt', size=(15,15,30))
else:
atoms0 = None
atoms0 = MakeParallelAtoms(atoms0, (1,1,2))
atoms0.set_calculator(pot())
print >>sys.stderr, "*********** FIRST FORCE CALCULATION ************"
print >>sys.stderr, "len(atoms) =", len(atoms0), " no. atoms =", atoms0.get_number_of_atoms()
f0 = atoms0.get_forces()
perturbation = 0.01 * np.random.standard_normal(atoms0.get_positions().shape)
r = atoms0.get_positions() + perturbation
atoms0.set_positions(r)
print >>sys.stderr, "*********** SECOND FORCE CALCULATION ************"
f1 = atoms0.get_forces()
print >>sys.stderr, "*********** COPYING ATOMS **************"
atoms2 = ParallelAtoms((1,1,2), atoms0.comm, atoms0, distribute=False)
atoms2.set_calculator(pot())
print >>sys.stderr, "*********** THIRD FORCE CALCULATION ************"
f2 = atoms2.get_forces()
#maxdev = abs(f2 - f1).max()
#print maxdev
#ReportTest("Max error 1:", maxdev, 0.0, 1e-6)
#ReportTest.Summary()
print >>sys.stderr, "No crashes - success !!" |
from __future__ import annotations
import asyncio
import dataclasses
import datetime
import logging
from typing import AsyncGenerator, Optional, Union
from urllib.parse import urljoin
import discord
from discord_slash import SlashContext
from discord_slash.context import InteractionContext
from ElevatorBot.database.database import (
lookupDiscordID,
lookupSystem,
lookupDestinyID,
insertFailToGetPgcrInstanceId,
getLastUpdated,
getPgcrActivity,
updateLastUpdated,
get_info_on_low_man_activity,
getSeals,
getWeaponInfo,
)
from ElevatorBot.backendNetworking.dataLoading import get_pgcr, insertPgcrToDB
from ElevatorBot.backendNetworking.formating import embed_message
from ElevatorBot.networking.bungieAuth import handle_and_return_token
from ElevatorBot.networking.network import (
get_json_from_bungie_with_token,
get_json_from_url,
)
race_map = {2803282938: "Awoken", 898834093: "Exo", 3887404748: "Human"}
gender_map = {
2204441813: "Female",
3111576190: "Male",
}
class_map = {671679327: "Hunter", 2271682572: "Warlock", 3655393761: "Titan"}
dont_know_user_error_message = embed_message(
f"Error",
f"I either possess no information about that user or their authentication is outdated. \nPlease `/registerdesc` to fix this issue'",
)
@dataclasses.dataclass(eq=False)
class DestinyPlayer:
destiny_id: int
system: int
discord_id: Optional[int]
# params that shouldn't be accessed directly
_characters: dict = dataclasses.field(default_factory=dict)
_full_character_list: list[dict] = dataclasses.field(default_factory=list)
_bungie_name: str = None
_last_played: datetime.datetime = None
_clan_id: int = None
_clan_is_online: bool = None
_triumphs: dict = None
_collectibles: dict = None
_metrics: dict = None
_stats: dict = None
_character_activity_stats: dict = dataclasses.field(default_factory=dict)
_seasonal_artifact: dict = None
_gear: list[dict] = None
_all_seals: list[int] = dataclasses.field(default_factory=list)
_completed_seals: list[int] = dataclasses.field(default_factory=list)
_base_bungie_url: str = "https://stats.bungie.net/Platform/"
def __eq__(self, other: DestinyPlayer) -> bool:
return self.destiny_id == other.destiny_id
def __bool__(self) -> bool:
return bool(self.destiny_id and self.system and self.discord_id)
# @classmethod
# async def from_destiny_id(
# cls, destiny_id: int, ctx: Union[SlashContext, InteractionContext] = None
# ) -> DestinyPlayer:
# """Populate with destinyID"""
#
# system = await lookupSystem(destiny_id)
# discord_id = await lookupDiscordID(destiny_id)
#
# if ctx:
# await ctx.send(hidden=True, embed=dont_know_user_error_message)
#
# return cls(destiny_id=destiny_id, system=system, discord_id=discord_id)
#
# @classmethod
# async def from_discord_id(
# cls, discord_id: int, ctx: Union[SlashContext, InteractionContext] = None
# ) -> Optional[DestinyPlayer]:
# """Populate with discordID. Might not work"""
#
# destiny_id = await lookupDestinyID(discord_id)
# if not destiny_id:
# if ctx:
# await ctx.send(hidden=True, embed=dont_know_user_error_message)
# else:
# return None
#
# system = await lookupSystem(destiny_id)
#
# return cls(destiny_id=destiny_id, system=system, discord_id=discord_id)
#
# async def has_token(self) -> bool:
# """Returns if the user has a valid token"""
#
# return bool((await handle_and_return_token(self.discord_id)).token)
#
# async def get_clan_id_and_online_status(
# self,
# ) -> tuple[Optional[int], Optional[bool]]:
# """Get in-game clan or None"""
#
# url = urljoin(
# self._base_bungie_url, f"GroupV2/User/{self.system}/{self.destiny_id}/0/1/"
# )
# response = await get(url=url)
# if response:
# self._clan_id = int(
# response.content["Response"]["results"][0]["member"]["groupId"]
# )
# self._clan_is_online = response.content["Response"]["results"][0]["member"][
# "isOnline"
# ]
#
# return self._clan_id, self._clan_is_online
# def get_discord_user(self, client: discord.Client) -> Optional[discord.User]:
# """Get discord.User or None"""
#
# return client.get_user(self.discord_id) if self.discord_id else None
#
# def get_discord_member(self, guild: discord.Guild) -> Optional[discord.Member]:
# """Get discord.Member for specified guild or None"""
#
# return guild.get_member(self.discord_id) if self.discord_id else None
# async def has_triumph(self, triumph_hash: Union[str, int]) -> bool:
# """Returns if the triumph is gotten"""
#
# triumph_hash = str(triumph_hash)
#
# if not await self.get_triumphs():
# return False
# if triumph_hash not in self._triumphs:
# return False
#
# # calculate if the triumph is gotten
# status = True
# if "objectives" not in self._triumphs[triumph_hash]:
# # make sure it's RewardUnavailable aka legacy
# assert self._triumphs[triumph_hash]["state"] & 2
#
# # https://bungie-net.github.io/multi/schema_Destiny-DestinyRecordState.html#schema_Destiny-DestinyRecordState
# status &= self._triumphs[triumph_hash]["state"] & 1
#
# return status
#
# for part in self._triumphs[triumph_hash]["objectives"]:
# status &= part["complete"]
#
# return status
#
# async def has_collectible(self, collectible_hash: Union[str, int]) -> bool:
# """Returns if the collectible is gotten"""
#
# collectible_hash = str(collectible_hash)
#
# if not await self.get_collectibles():
# return False
#
# # look if its a profile collectible
# if (
# collectible_hash
# in self._collectibles["profileCollectibles"]["data"]["collectibles"]
# ):
# return (
# self._collectibles["profileCollectibles"]["data"]["collectibles"][
# collectible_hash
# ]["state"]
# & 1
# == 0
# )
#
# # look if its a character specific collectible
# for character_collectibles in self._collectibles["characterCollectibles"][
# "data"
# ].values():
# if collectible_hash in character_collectibles["collectibles"]:
# return (
# character_collectibles["collectibles"][collectible_hash]["state"]
# & 1
# == 0
# )
#
# return False
#
# async def get_metric_value(self, metric_hash: Union[str, int]) -> Optional[int]:
# """Returns the value of the given metric hash"""
#
# metric_hash = str(metric_hash)
#
# if not await self.get_collectibles():
# return False
#
# if metric_hash in self._metrics.keys():
# return self._metrics[metric_hash]["objectiveProgress"]["progress"]
# else:
# return None
#
# async def get_stat_value(
# self,
# stat_name: str,
# stat_category: str = "allTime",
# character_id: Union[int, str] = None,
# ) -> Optional[int]:
# """Returns the value of the given stat"""
#
# if not await self.get_stats():
# return None
#
# possible_stat_categories = [
# "allTime",
# "allPvE",
# "allPvP",
# ]
# assert (
# stat_category not in possible_stat_categories
# ), f"Stat must be one of {possible_stat_categories}"
#
# # total stats
# if not character_id:
# try:
# stat = self._stats["mergedAllCharacters"]["merged"][stat_category][
# stat_name
# ]["basic"]["value"]
# return int(stat)
# except KeyError:
# return None
#
# # character stats
# else:
# for char in self._stats["characters"]:
# if char["characterId"] == str(character_id):
# try:
# stat = self._stats["merged"][stat_category][stat_name]["basic"][
# "value"
# ]
# return int(stat)
# except KeyError:
# return None
#
# return None
#
# async def get_artifact(self) -> Optional[dict]:
# """Returns the seasonal artifact data"""
#
# if not self._seasonal_artifact:
# result = await self._get_profile([104], with_token=True)
# if result:
# self._seasonal_artifact = result["profileProgression"]["data"][
# "seasonalArtifact"
# ]
#
# return self._seasonal_artifact
#
# async def get_player_seals(self) -> tuple[list[int], list[int]]:
# """Returns all seals and the seals a player has. Returns two lists: [triumph_hash, ...] and removes wip seals like WF LW"""
#
# if not self._all_seals:
# seals = await getSeals()
# for seal in seals:
# self._all_seals.append(seal[0])
# if await self.has_triumph(seal[0]):
# self._completed_seals.append(seal)
#
# return self._all_seals, self._completed_seals
# async def get_destiny_name_and_last_played(self) -> tuple[str, datetime.datetime]:
# """Returns the current user name"""
#
# if not self._bungie_name:
# result = await self.__get_profile([100])
# if result:
# self._bungie_name = result["profile"]["data"]["userInfo"]["displayName"]
# last_played = result["profile"]["data"]["dateLastPlayed"]
# self._last_played = datetime.datetime.strptime(
# last_played, "%Y-%m-%dT%H:%M:%SZ"
# )
#
# return self._bungie_name, self._last_played
# async def get_character_info(self) -> Optional[dict]:
# """Get character info
#
# Returns existing_chars=
# {
# charID: {
# "class": str,
# "race": str,
# "gender": str,
# },
# ...
# }
# """
#
# if not self._characters:
# result = await self._get_profile([200])
#
# if result:
# self._characters = {}
#
# # loop through each character
# for characterID, character_data in result["characters"]["data"].items():
# characterID = int(characterID)
#
# # format the data correctly and convert the hashes to strings
# self._characters[characterID] = {
# "class": class_map[character_data["classHash"]],
# "race": race_map[character_data["raceHash"]],
# "gender": gender_map[character_data["genderHash"]],
# }
#
# return self._characters
# async def get_character_id_by_class(self, character_class: str) -> Optional[int]:
# """Return the matching character id if exists"""
#
# # make sure the class exists
# class_names = list(class_map.values())
# if character_class not in class_names:
# return None
#
# # loop through the chars and return the matching one
# characters = await self.get_character_info()
# if characters:
# for character_id, character_data in characters.items():
# if character_data["class"] == character_class:
# return character_id
# return None
# async def get_character_activity_stats(self, character_id: int) -> Optional[dict]:
# """Get destiny stats for the specified character"""
#
# if character_id not in self._character_activity_stats:
# url = urljoin(
# self._base_bungie_url,
# f"Destiny2/{self.system}/Account/{self.destiny_id}/Character/{character_id}/Stats/AggregateActivityStats/",
# )
# response = await get(url=url)
# if response:
# self._character_activity_stats[character_id] = response.content[
# "Response"
# ]
#
# return (
# self._character_activity_stats[character_id]
# if character_id in self._character_activity_stats
# else None
# )
# async def get_player_gear(self) -> Optional[list[dict]]:
# """Returns a list of items - equipped and unequipped"""
#
# if not self._gear:
# characters = await self.get_character_info()
#
# # not equipped on characters
# used_items = await self._get_profile([201, 205, 300], with_token=True)
# if used_items:
# item_power = {
# weapon_id: int(
# weapon_data.get("primaryStat", {"value": 0})["value"]
# )
# for weapon_id, weapon_data in used_items["itemComponents"][
# "instances"
# ]["data"].items()
# }
# item_power["none"] = 0
# for character_id in characters.keys():
# character_items = (
# used_items["characterInventories"]["data"][character_id][
# "items"
# ]
# + used_items["characterEquipment"]["data"][character_id][
# "items"
# ]
# )
# character_power_items = map(
# lambda character_item: dict(
# character_item,
# **{
# "lightlevel": item_power[
# character_item.get("itemInstanceId", "none")
# ]
# },
# ),
# character_items,
# )
# self._gear.extend(character_power_items)
#
# return self._gear
async def get_weapon_stats(self, weapon_ids: list[int], character_id: int = None, mode: int = 0) -> tuple[int, int]:
"""Returns kills, precision_kills for that weapon in the specified mode"""
# get the info from the DB
results = []
for weapon_id in weapon_ids:
if character_id:
results.extend(
await getWeaponInfo(
membershipID=self.destiny_id,
weaponID=weapon_id,
characterID=character_id,
mode=mode,
)
)
else:
results.extend(await getWeaponInfo(membershipID=self.destiny_id, weaponID=weapon_id, mode=mode))
# add stats
kills = 0
precision_kills = 0
for _, k, p_k in results:
kills += k
precision_kills += p_k
return kills, precision_kills
# async def has_lowman(
# self,
# max_player_count: int,
# activity_hashes: list[int],
# require_flawless: bool = False,
# no_checkpoints: bool = False,
# disallowed: list[tuple[datetime.datetime, datetime.datetime]] = None,
# score_threshold: bool = False,
# ) -> bool:
# """Returns if player has a lowman in the given hashes. Disallowed is a list of (start_time, end_time) with datetime objects"""
#
# if disallowed is None:
# disallowed = []
#
# low_activity_info = await get_info_on_low_man_activity(
# activity_hashes=activity_hashes,
# player_count=max_player_count,
# destiny_id=self.destiny_id,
# no_checkpoints=no_checkpoints,
# score_threshold=score_threshold,
# )
#
# for (
# instance_id,
# deaths,
# kills,
# time_played_seconds,
# period,
# ) in low_activity_info:
# # check for flawless if asked for
# if not require_flawless or deaths == 0:
# verdict = True
#
# for start_time, end_time in disallowed:
# if start_time < period < end_time:
# verdict = False
# if (
# 910380154 in activity_hashes
# and kills * 60 / time_played_seconds < 1
# ):
# verdict = False
# if verdict:
# return True
# return False
# async def get_lowman_count(
# self, activity_hashes: list[int]
# ) -> tuple[int, int, Optional[datetime.timedelta]]:
# """Returns tuple[solo_count, solo_is_flawless_count, Optional[solo_fastest]]"""
#
# solo_count, solo_is_flawless_count, solo_fastest = 0, 0, None
#
# # get player data
# records = await get_info_on_low_man_activity(
# activity_hashes=activity_hashes,
# player_count=1,
# destiny_id=self.destiny_id,
# no_checkpoints=True,
# )
#
# # prepare player data
# for solo in records:
# solo_count += 1
# if solo["deaths"] == 0:
# solo_is_flawless_count += 1
# if not solo_fastest or (solo["timeplayedseconds"] < solo_fastest):
# solo_fastest = solo["timeplayedseconds"]
#
# return (
# solo_count,
# solo_is_flawless_count,
# datetime.timedelta(seconds=solo_fastest) if solo_fastest else solo_fastest,
# )
# async def get_activity_history(
# self,
# mode: int = 0,
# earliest_allowed_datetime: datetime.datetime = None,
# latest_allowed_datetime: datetime.datetime = None,
# ) -> AsyncGenerator[Optional[dict]]:
# """
# Generator which returns all activities with an extra field < activity['charid'] = character_id >
# For more Info visit https://bungie-net.github.io/multi/schema_Destiny-HistoricalStats-DestinyHistoricalStatsPeriodGroup.html#schema_Destiny-HistoricalStats-DestinyHistoricalStatsPeriodGroup
#
# :mode - Describes the mode, see https://bungie-net.github.io/multi/schema_Destiny-HistoricalStats-Definitions-DestinyActivityModeType.html#schema_Destiny-HistoricalStats-Definitions-DestinyActivityModeType
# Everything 0
# Story 2
# Strike 3
# Raid 4
# AllPvP 5
# Patrol 6
# AllPvE 7
# ...
# :earliest_allowed_time - takes datetime.datetime and describes the lower cutoff
# :latest_allowed_time - takes datetime.datetime and describes the higher cutoff
# """
#
# for character in await self.__get_full_character_list():
# character_id = character["char_id"]
#
# br = False
# page = -1
# while True:
# page += 1
#
# url = urljoin(
# self._base_bungie_url,
# f"Destiny2/{self.system}/Account/{self.destiny_id}/Character/{character_id}/Stats/Activities/",
# )
# params = {
# "mode": mode,
# "count": 250,
# "page": page,
# }
#
# # break once threshold is reached
# if br:
# break
#
# # get activities
# rep = await get(url=url, params=params)
#
# # break process if no web response is gotten and log that
# if not rep:
# logger = logging.getLogger("update_activity_db")
# logger.error(
# "Failed to get web response for destinyID '%s': WebResponse = '%s'",
# self.destiny_id,
# rep,
# )
#
# yield None
#
# # break if empty, fe. when pages are over
# if not rep.content["Response"]:
# break
#
# # loop through all activities
# for activity in rep.content["Response"]["activities"]:
# # check times if wanted
# if earliest_allowed_datetime or latest_allowed_datetime:
# activity_time = datetime.datetime.strptime(
# activity["period"], "%Y-%m-%dT%H:%M:%SZ"
# )
#
# # check if the activity started later than the earliest allowed, else break and continue with next char
# # This works bc Bungie sorts the api with the newest entry on top
# if earliest_allowed_datetime:
# if activity_time <= earliest_allowed_datetime:
# br = True
# break
#
# # check if the time is still in the timeframe, else pass this one and do the next
# if latest_allowed_datetime:
# if activity_time > latest_allowed_datetime:
# pass
#
# # add character info to the activity
# activity["charid"] = character_id
#
# yield activity
# async def update_activity_db(self, entry_time: datetime = None) -> None:
# """Gets this users not-saved history and saves it"""
#
# async def handle(i: int, t) -> Optional[list[int, datetime.datetime, dict]]:
# # get PGCR
# pgcr = await get_pgcr(i)
# if not pgcr:
# await insertFailToGetPgcrInstanceId(i, t)
# logger.warning("Failed getting pgcr <%s>", i)
# return
# return [i, t, pgcr.content["Response"]]
#
# async def input_data(
# gather_instance_ids: list[int],
# gather_activity_times: list[datetime.datetime],
# ) -> None:
# results = await asyncio.gather(*[handle(i, t) for i, t in zip(gather_instance_ids, gather_activity_times)])
#
# for result in results:
# if result:
# i = result[0]
# t = result[1]
# pgcr = result[2]
#
# # _insert information to DB
# await insertPgcrToDB(i, t, pgcr)
#
# logger = logging.getLogger("update_activity_db")
#
# if not entry_time:
# entry_time = await getLastUpdated(self.destiny_id)
# else:
# entry_time = datetime.datetime.min
#
# logger.info("Starting activity DB _update for destinyID <%s>", self.destiny_id)
#
# instance_ids = []
# activity_times = []
# success = True
# async for activity in self.get_activity_history(
# mode=0,
# earliest_allowed_datetime=entry_time,
# ):
# # break if we dont get a result
# if not activity:
# success = False
# break
#
# instance_id = activity["activityDetails"]["instanceId"]
# activity_time = datetime.datetime.strptime(activity["period"], "%Y-%m-%dT%H:%M:%SZ")
#
# # _update with newest entry timestamp
# if activity_time > entry_time:
# entry_time = activity_time
#
# # check if info is already in DB, skip if so
# if await getPgcrActivity(instance_id):
# continue
#
# # add to gather list
# instance_ids.append(instance_id)
# activity_times.append(activity_time)
#
# # gather once list is big enough
# if len(instance_ids) < 50:
# continue
# else:
# # get and input the data
# await input_data(instance_ids, activity_times)
#
# # reset gather list and restart
# instance_ids = []
# activity_times = []
#
# # one last time to clean out the extras after the code is done
# if instance_ids:
# # get and input the data
# await input_data(instance_ids, activity_times)
#
# # _update with newest entry timestamp
# if success:
# await updateLastUpdated(self.destiny_id, entry_time)
#
# logger.info("Done with activity DB _update for destinyID <%s>", self.destiny_id)
# async def __get_full_character_list(self) -> list[dict]:
# """Get character ids including deleted characters"""
#
# if not self._full_character_list:
# result = await self._get_stats()
#
# if result:
# for char in result["characters"]:
# self._full_character_list.append(
# {
# "char_id": int(char["characterId"]),
# "deleted": char["deleted"],
# }
# )
#
# return self._full_character_list
# async def __get_profile(
# self, components: list[Union[int, str]] = None, with_token: bool = False
# ) -> Optional[dict]:
# """Return info from the profile call"""
#
# # https://bungie-net.github.io/multi/schema_Destiny-DestinyComponentType.html#schema_Destiny-DestinyComponentType
#
# if components is None:
# components = []
#
# url = urljoin(
# self._base_bungie_url, f"Destiny2/{self.system}/Profile/{self.destiny_id}/"
# )
# params = {"components": ",".join(map(str, components))}
#
# if with_token:
# response = await get_with_token(
# url=url, params=params, discord_id=self.discord_id
# )
# else:
# response = await get(
# url=url,
# params=params,
# )
#
# return response.content["Response"] if response else None
# async def get_triumphs(self) -> Optional[dict]:
# """Populate the triumphs and then return them"""
#
# if not self._triumphs:
# triumphs = await self._get_profile([900])
# if triumphs:
# # get profile triumphs
# self._triumphs = triumphs["profileRecords"]["data"]["records"]
#
# # get character triumphs
# character_triumphs = [
# character_triumphs["records"]
# for character_id, character_triumphs in triumphs[
# "characterRecords"
# ]["data"].items()
# ]
#
# # combine them
# for triumph in character_triumphs:
# self._triumphs.update(triumph)
#
# return self._triumphs
#
# async def get_collectibles(self) -> Optional[dict]:
# """Populate the collectibles and then return them"""
#
# if not self._collectibles:
# collectibles = await self._get_profile([800])
# if collectibles:
# # get profile collectibles
# self._collectibles = collectibles
#
# return self._collectibles
#
# async def get_metrics(self) -> Optional[dict]:
# """Populate the metrics and then return them"""
#
# if not self._metrics:
# metrics = await self._get_profile([1100])
# if metrics:
# # get profile metrics
# self._metrics = metrics["metrics"]["data"]["metrics"]
#
# return self._metrics
#
# async def get_stats(self) -> Optional[dict]:
# """Get destiny stats"""
#
# if not self._stats:
# url = urljoin(
# self._base_bungie_url,
# f"Destiny2/{self.system}/Account/{self.destiny_id}/Stats/",
# )
# response = await get(url=url)
# if response:
# self._stats = response.content["Response"]
#
# return self._stats
# async def get_inventory_bucket(self, bucket: int = 138197802) -> Optional[list]:
# """Returns all items in bucket. Default is vault hash, for others search "bucket" at https://data.destinysets.com/"""
#
# result = await self._get_profile([102], with_token=True)
# if not result:
# return None
# all_items = result["profileInventory"]["data"]["items"]
# items = []
# for item in all_items:
# if item["bucketHash"] == bucket:
# items.append(item)
#
# return items
|
import os
import psycopg2
from sqlalchemy import create_engine
from scraper import *
from heroku_credentials import *
def push_to_heroku(df: pd.DataFrame) -> None:
"""
Functions takes a dataframe as an argument, then connects to a Heroku Postgres database.
In the database it creates two tables pushes the information into both of them.
:param df: Takes a pandas dataframe.
:return: None, data sits on Heroku Postgres database.
"""
# Heroku connection
sql_connection = psycopg2.connect(
database=DATABASE,
user=USER,
password=PASSWORD,
host=HOST,
port=PORT
)
# Connect to DB
cur = sql_connection.cursor()
# Create two SQL tables
cur.execute('''
CREATE TABLE IF NOT EXISTS categories (
id serial PRIMARY KEY,
category varchar(250)
);
CREATE TABLE IF NOT EXISTS items (
id serial PRIMARY KEY,
category_id int,
title varchar(250),
price float(2),
item_url varchar(500),
img_url varchar(500),
FOREIGN KEY (category_id) REFERENCES categories(id)
);
''')
# Get array of unique category names
unique_categories = df['category'].unique()
# Insert unique category names to the categories table
for i in unique_categories:
cur.execute(f"INSERT INTO categories (category) VALUES ('{i}');")
# Commit and close connection
sql_connection.commit()
sql_connection.close()
# Create category id for each category
foreign_key_df = pd.DataFrame(df['category'].unique(), columns=['category']).reset_index().rename(
columns={'index': 'category_id'})
foreign_key_df['category_id'] = np.arange(1, len(foreign_key_df) + 1)
# Make a dataframe only with category id
items_df = pd.merge(
df,
foreign_key_df,
on='category'
).drop(columns='category')
# Put category id as first column
items_df.insert(0, 'category_id', items_df.pop('category_id'))
# Connect to Heroku Postgres
conn = create_engine(POSTGRES_URI)
# Push items df to Heroku DB
items_df.to_sql('items', conn, method='multi', if_exists='append', chunksize=10000, index=False)
conn.dispose()
def get_csv(path: str) -> None:
"""
This functions takes a path as an argument as saves the etsy_data.csv file in provided local directory.
:param path: Provide a path where to save a csv file.
:return: None, csv file saved in the local directory.
"""
# Heroku connection
sql_connection = psycopg2.connect(
database=DATABASE,
user=USER,
password=PASSWORD,
host=HOST,
port=PORT
)
# Connect to DB
cur = sql_connection.cursor()
# Join tables on category id
s = "SELECT items.id, categories.category, items.title, items.price, items.item_url, items.img_url" \
"FROM items JOIN categories ON categories.id = items.category_id ORDER BY id ASC"
# COPY function on the SQL we created above.
sql_for_file_output = "COPY ({0}) TO STDOUT WITH CSV HEADER".format(s)
# Set up a variable to store our file path and name
t_path_n_file = os.path.join(path, "etsy_data.csv")
# Create and save csv of etsy data
with open(t_path_n_file, 'w') as f_output:
cur.copy_expert(sql_for_file_output, f_output)
sql_connection.close()
|
import torch as th
import torch.nn as nn
import torch.nn.functional as F
class VFun(nn.Module):
def __init__(self, args):
super(VFun, self).__init__()
self.args = args
self.fc1 = nn.Linear(args.state_dim, args.hidden_dim)
self.fc2 = nn.Linear(args.hidden_dim, args.hidden_dim)
self.fc3 = nn.Linear(args.hidden_dim, 1)
def forward(self, state):
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
v = self.fc3(x)
return v
|
def parse_api_version_header(req):
if "API-VERSION" in req.headers:
return req.headers["API-VERSION"]
return None
|
import datetime
import csv
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponse
# Create your views here.
from .models import ObjectViewed
@login_required
def analytics_page(request):
if not request.user.is_superuser:
raise Http404
return render(request, "main_page.html", {})
@login_required
def analytics_download(request):
if not request.user.is_superuser:
raise Http404
from_date, to_date = None, None
try:
from_date = datetime.datetime.strptime(request.POST.get("from"), "%Y-%m-%d")
to_date = datetime.datetime.strptime(request.POST.get("to"), "%Y-%m-%d")
except:
raise Http404
if from_date > to_date:
from_date, to_date = to_date, from_date
objs = ObjectViewed.objects.filter(timestamp__gte=from_date, timestamp__lte=to_date)
response = HttpResponse(content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}-{}.csv"'.format(
from_date.date(), to_date.date()
)
writer = csv.writer(response)
for obj in objs:
try:
writer.writerow(
[
obj.user.user.user.username,
obj.ip_address,
obj.content_type,
obj.content_object,
obj.timestamp,
]
)
except:
pass
return response
|
from host import host
class host_hm(host):
pass
|
####################################################################
# Copyright (c) 2019 Nobuyuki Umetani #
# #
# This source code is licensed under the MIT license found in the #
# LICENSE file in the root directory of this source tree. #
####################################################################
import numpy
import PyDelFEM2 as dfm2
import PyDelFEM2.gl.glfw
def make_mesh():
sdf0 = dfm2.CppSDF3_Sphere(0.55,[-0.5,0,0],True)
sdf1 = dfm2.CppSDF3_Sphere(0.55,[+0.5,0,0],True)
np_xyz,np_tet = dfm2.isosurface([sdf0,sdf1])
print(np_xyz.shape,np_tet.shape)
msh = dfm2.Mesh(np_xyz,np_tet,dfm2.TET)
return msh
def poission(msh,npIdP0,npIdP1):
fem = dfm2.FEM_ScalarPoisson()
fem.updated_topology(msh)
fem.ls.bc[npIdP0] = 1
fem.ls.bc[npIdP1] = 2
fem.value[:] = 0.5
fem.value[npIdP0] = 0.0
fem.value[npIdP1] = 1.0
fem.solve()
print(fem.ls.conv_hist)
####
vis_color = dfm2.gl.VisFEM_ColorContour(fem,name_color="value")
vis_color.set_color_minmax()
axis = dfm2.gl.AxisXYZ(1.0)
dfm2.gl.glfw.winDraw3d([vis_color,axis])
def diffuse(msh,npIdP0,npIdP1):
fem = dfm2.FEM_ScalarDiffuse()
fem.updated_topology(msh)
fem.ls.bc[npIdP1] = 1
fem.value[:] = 0.0
fem.value[npIdP1] = 1.0
####
vis_color = dfm2.gl.VisFEM_ColorContour(fem,name_color="value")
vis_color.draw_val_min = 0.0
vis_color.draw_val_max = 1.0
axis = dfm2.gl.AxisXYZ(1.0)
dfm2.gl.glfw.winDraw3d([fem,vis_color,axis])
def linear_solid_static(msh,npIdP):
fem = dfm2.FEM_SolidLinearStatic()
fem.param_gravity_x = +0.3
fem.updated_topology(msh)
fem.ls.bc[npIdP,:] = 1
fem.solve()
print(fem.ls.conv_hist)
####
vis_disp = dfm2.gl.VisFEM_ColorContour(fem,name_disp="vec_val")
axis = dfm2.gl.AxisXYZ(1.0)
dfm2.gl.glfw.winDraw3d([vis_disp,axis])
vis_disp.write_vtk("linearsolid3d.vtk")
def linear_solid_dynamic(msh,npIdP):
fem = dfm2.FEM_SolidLinearDynamic()
fem.param_gravity_x = +0.3
fem.updated_topology(msh)
fem.ls.bc[npIdP,:] = 1
####
vis_disp = dfm2.gl.VisFEM_ColorContour(fem,name_disp="vec_val")
axis = dfm2.gl.AxisXYZ(1.0)
dfm2.gl.glfw.winDraw3d([fem,vis_disp,axis])
def main():
msh = make_mesh()
npIdP0 = numpy.where(msh.np_pos[:,0]>+1)
npIdP1 = numpy.where(msh.np_pos[:,0]<-1)
poission(msh,npIdP0,npIdP1)
diffuse(msh,npIdP0,npIdP1)
linear_solid_static(msh,npIdP1)
linear_solid_dynamic(msh,npIdP1)
if __name__ == "__main__":
main() |
from .utils import Config as _Config, get_logger as _get_logger
# creates a global config
config = _Config()
# creates a global logger
logger = _get_logger()
|
from crawlster.helpers.extract import Content
class HttpResponse(object):
"""Class representing a http response"""
def __init__(self, request, status_code, headers, body):
"""Initializes the http response object
Args:
request (HttpRequest):
The request that produces this response
status_code (int):
The status code as a number
headers (dict):
The response headers
body (bytes or None):
The body of the response, if any
"""
self.request = request
self.status_code = status_code
self.headers = headers
if isinstance(body, str):
body = body.encode()
if not isinstance(body, bytes):
raise TypeError(
'body must be in bytes, not {}'.format(type(body).__name__))
self.body = body
@property
def body_str(self):
"""Returns the decoded content of the request, if possible.
May raise UnicodeDecodeError if the body does not represent a valid
unicode encoded sequence.
"""
return self.body.decode()
@property
def body_bytes(self):
return self.body
@property
def server(self):
"""Returns the server header if available"""
return self.headers.get('Server')
@property
def content_type(self):
"""Returns the response content type if available"""
return self.headers.get('Content-Type')
def is_success(self):
return self.status_code < 400
@property
def extract(self):
return Content(self.body_str)
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import sys
import os
from azure.iot.hub import IoTHubRegistryManager
from azure.iot.hub.protocol.models import Twin, TwinProperties
iothub_connection_str = os.getenv("IOTHUB_CONNECTION_STRING")
device_id = os.getenv("IOTHUB_DEVICE_ID")
module_id = os.getenv("IOTHUB_MODULE_ID")
def print_module_info(title, iothub_module):
print(title + ":")
print("iothubModule.device_id = {0}".format(iothub_module.device_id))
print("iothubModule.module_id = {0}".format(iothub_module.module_id))
print("iothubModule.managed_by = {0}".format(iothub_module.managed_by))
print("iothubModule.generation_id = {0}".format(iothub_module.generation_id))
print("iothubModule.etag = {0}".format(iothub_module.etag))
print(
"iothubModule.connection_state = {0}".format(iothub_module.connection_state)
)
print(
"iothubModule.connection_state_updated_time = {0}".format(
iothub_module.connection_state_updated_time
)
)
print(
"iothubModule.last_activity_time = {0}".format(iothub_module.last_activity_time)
)
print(
"iothubModule.cloud_to_device_message_count = {0}".format(
iothub_module.cloud_to_device_message_count
)
)
print("iothubModule.authentication = {0}".format(iothub_module.authentication))
print("")
try:
# RegistryManager
iothub_registry_manager = IoTHubRegistryManager(iothub_connection_str)
# Create Module
primary_key = "aaabbbcccdddeeefffggghhhiiijjjkkklllmmmnnnoo"
secondary_key = "111222333444555666777888999000aaabbbcccdddee"
managed_by = ""
new_module = iothub_registry_manager.create_module_with_sas(
device_id, module_id, managed_by, primary_key, secondary_key
)
print_module_info("Create Module", new_module)
# Get Module
iothub_module = iothub_registry_manager.get_module(device_id, module_id)
print_module_info("Get Module", iothub_module)
# Update Module
primary_key = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
secondary_key = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy"
managed_by = "testManagedBy"
updated_module = iothub_registry_manager.update_module_with_sas(
device_id, module_id, managed_by, iothub_module.etag, primary_key, secondary_key
)
print_module_info("Update Module", updated_module)
# Get Module Twin
module_twin = iothub_registry_manager.get_module_twin(device_id, module_id)
print(module_twin)
# # Replace Twin
new_twin = Twin()
new_twin = module_twin
new_twin.properties = TwinProperties(desired={'telemetryInterval': 9000})
print(new_twin)
print("")
replaced_module_twin = iothub_registry_manager.replace_module_twin(device_id, module_id, new_twin)
print(replaced_module_twin)
print("")
# Update twin
twin_patch = Twin()
twin_patch.properties = TwinProperties(desired={'telemetryInterval': 3000})
updated_module_twin = iothub_registry_manager.update_module_twin(device_id, module_id, twin_patch, module_twin.etag)
print(updated_module_twin)
print("")
# Get all modules on the device
all_modules = iothub_registry_manager.get_modules(device_id)
for module in all_modules:
print_module_info("", module)
# Delete Module
iothub_registry_manager.delete_module(device_id, module_id)
print("Deleted Module {0}".format(module_id))
except Exception as ex:
print("Unexpected error {0}".format(ex))
except KeyboardInterrupt:
print("IoTHubRegistryManager sample stopped")
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .compat import wrap_ord
NUM_OF_CATEGORY = 6
DONT_KNOW = -1
ENOUGH_REL_THRESHOLD = 100
MAX_REL_THRESHOLD = 1000
MINIMUM_DATA_THRESHOLD = 4
# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
jp2CharContext = (
(
0,
0,
0,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
),
(
2,
4,
0,
4,
0,
3,
0,
4,
0,
3,
4,
4,
4,
2,
4,
3,
3,
4,
3,
2,
3,
3,
4,
2,
3,
3,
3,
2,
4,
1,
4,
3,
3,
1,
5,
4,
3,
4,
3,
4,
3,
5,
3,
0,
3,
5,
4,
2,
0,
3,
1,
0,
3,
3,
0,
3,
3,
0,
1,
1,
0,
4,
3,
0,
3,
3,
0,
4,
0,
2,
0,
3,
5,
5,
5,
5,
4,
0,
4,
1,
0,
3,
4,
),
(
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
),
(
0,
4,
0,
5,
0,
5,
0,
4,
0,
4,
5,
4,
4,
3,
5,
3,
5,
1,
5,
3,
4,
3,
4,
4,
3,
4,
3,
3,
4,
3,
5,
4,
4,
3,
5,
5,
3,
5,
5,
5,
3,
5,
5,
3,
4,
5,
5,
3,
1,
3,
2,
0,
3,
4,
0,
4,
2,
0,
4,
2,
1,
5,
3,
2,
3,
5,
0,
4,
0,
2,
0,
5,
4,
4,
5,
4,
5,
0,
4,
0,
0,
4,
4,
),
(
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
(
0,
3,
0,
4,
0,
3,
0,
3,
0,
4,
5,
4,
3,
3,
3,
3,
4,
3,
5,
4,
4,
3,
5,
4,
4,
3,
4,
3,
4,
4,
4,
4,
5,
3,
4,
4,
3,
4,
5,
5,
4,
5,
5,
1,
4,
5,
4,
3,
0,
3,
3,
1,
3,
3,
0,
4,
4,
0,
3,
3,
1,
5,
3,
3,
3,
5,
0,
4,
0,
3,
0,
4,
4,
3,
4,
3,
3,
0,
4,
1,
1,
3,
4,
),
(
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
(
0,
4,
0,
3,
0,
3,
0,
4,
0,
3,
4,
4,
3,
2,
2,
1,
2,
1,
3,
1,
3,
3,
3,
3,
3,
4,
3,
1,
3,
3,
5,
3,
3,
0,
4,
3,
0,
5,
4,
3,
3,
5,
4,
4,
3,
4,
4,
5,
0,
1,
2,
0,
1,
2,
0,
2,
2,
0,
1,
0,
0,
5,
2,
2,
1,
4,
0,
3,
0,
1,
0,
4,
4,
3,
5,
4,
3,
0,
2,
1,
0,
4,
3,
),
(
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
(
0,
3,
0,
5,
0,
4,
0,
2,
1,
4,
4,
2,
4,
1,
4,
2,
4,
2,
4,
3,
3,
3,
4,
3,
3,
3,
3,
1,
4,
2,
3,
3,
3,
1,
4,
4,
1,
1,
1,
4,
3,
3,
2,
0,
2,
4,
3,
2,
0,
3,
3,
0,
3,
1,
1,
0,
0,
0,
3,
3,
0,
4,
2,
2,
3,
4,
0,
4,
0,
3,
0,
4,
4,
5,
3,
4,
4,
0,
3,
0,
0,
1,
4,
),
(
1,
4,
0,
4,
0,
4,
0,
4,
0,
3,
5,
4,
4,
3,
4,
3,
5,
4,
3,
3,
4,
3,
5,
4,
4,
4,
4,
3,
4,
2,
4,
3,
3,
1,
5,
4,
3,
2,
4,
5,
4,
5,
5,
4,
4,
5,
4,
4,
0,
3,
2,
2,
3,
3,
0,
4,
3,
1,
3,
2,
1,
4,
3,
3,
4,
5,
0,
3,
0,
2,
0,
4,
5,
5,
4,
5,
4,
0,
4,
0,
0,
5,
4,
),
(
0,
5,
0,
5,
0,
4,
0,
3,
0,
4,
4,
3,
4,
3,
3,
3,
4,
0,
4,
4,
4,
3,
4,
3,
4,
3,
3,
1,
4,
2,
4,
3,
4,
0,
5,
4,
1,
4,
5,
4,
4,
5,
3,
2,
4,
3,
4,
3,
2,
4,
1,
3,
3,
3,
2,
3,
2,
0,
4,
3,
3,
4,
3,
3,
3,
4,
0,
4,
0,
3,
0,
4,
5,
4,
4,
4,
3,
0,
4,
1,
0,
1,
3,
),
(
0,
3,
1,
4,
0,
3,
0,
2,
0,
3,
4,
4,
3,
1,
4,
2,
3,
3,
4,
3,
4,
3,
4,
3,
4,
4,
3,
2,
3,
1,
5,
4,
4,
1,
4,
4,
3,
5,
4,
4,
3,
5,
5,
4,
3,
4,
4,
3,
1,
2,
3,
1,
2,
2,
0,
3,
2,
0,
3,
1,
0,
5,
3,
3,
3,
4,
3,
3,
3,
3,
4,
4,
4,
4,
5,
4,
2,
0,
3,
3,
2,
4,
3,
),
(
0,
2,
0,
3,
0,
1,
0,
1,
0,
0,
3,
2,
0,
0,
2,
0,
1,
0,
2,
1,
3,
3,
3,
1,
2,
3,
1,
0,
1,
0,
4,
2,
1,
1,
3,
3,
0,
4,
3,
3,
1,
4,
3,
3,
0,
3,
3,
2,
0,
0,
0,
0,
1,
0,
0,
2,
0,
0,
0,
0,
0,
4,
1,
0,
2,
3,
2,
2,
2,
1,
3,
3,
3,
4,
4,
3,
2,
0,
3,
1,
0,
3,
3,
),
(
0,
4,
0,
4,
0,
3,
0,
3,
0,
4,
4,
4,
3,
3,
3,
3,
3,
3,
4,
3,
4,
2,
4,
3,
4,
3,
3,
2,
4,
3,
4,
5,
4,
1,
4,
5,
3,
5,
4,
5,
3,
5,
4,
0,
3,
5,
5,
3,
1,
3,
3,
2,
2,
3,
0,
3,
4,
1,
3,
3,
2,
4,
3,
3,
3,
4,
0,
4,
0,
3,
0,
4,
5,
4,
4,
5,
3,
0,
4,
1,
0,
3,
4,
),
(
0,
2,
0,
3,
0,
3,
0,
0,
0,
2,
2,
2,
1,
0,
1,
0,
0,
0,
3,
0,
3,
0,
3,
0,
1,
3,
1,
0,
3,
1,
3,
3,
3,
1,
3,
3,
3,
0,
1,
3,
1,
3,
4,
0,
0,
3,
1,
1,
0,
3,
2,
0,
0,
0,
0,
1,
3,
0,
1,
0,
0,
3,
3,
2,
0,
3,
0,
0,
0,
0,
0,
3,
4,
3,
4,
3,
3,
0,
3,
0,
0,
2,
3,
),
(
2,
3,
0,
3,
0,
2,
0,
1,
0,
3,
3,
4,
3,
1,
3,
1,
1,
1,
3,
1,
4,
3,
4,
3,
3,
3,
0,
0,
3,
1,
5,
4,
3,
1,
4,
3,
2,
5,
5,
4,
4,
4,
4,
3,
3,
4,
4,
4,
0,
2,
1,
1,
3,
2,
0,
1,
2,
0,
0,
1,
0,
4,
1,
3,
3,
3,
0,
3,
0,
1,
0,
4,
4,
4,
5,
5,
3,
0,
2,
0,
0,
4,
4,
),
(
0,
2,
0,
1,
0,
3,
1,
3,
0,
2,
3,
3,
3,
0,
3,
1,
0,
0,
3,
0,
3,
2,
3,
1,
3,
2,
1,
1,
0,
0,
4,
2,
1,
0,
2,
3,
1,
4,
3,
2,
0,
4,
4,
3,
1,
3,
1,
3,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
4,
1,
1,
1,
2,
0,
3,
0,
0,
0,
3,
4,
2,
4,
3,
2,
0,
1,
0,
0,
3,
3,
),
(
0,
1,
0,
4,
0,
5,
0,
4,
0,
2,
4,
4,
2,
3,
3,
2,
3,
3,
5,
3,
3,
3,
4,
3,
4,
2,
3,
0,
4,
3,
3,
3,
4,
1,
4,
3,
2,
1,
5,
5,
3,
4,
5,
1,
3,
5,
4,
2,
0,
3,
3,
0,
1,
3,
0,
4,
2,
0,
1,
3,
1,
4,
3,
3,
3,
3,
0,
3,
0,
1,
0,
3,
4,
4,
4,
5,
5,
0,
3,
0,
1,
4,
5,
),
(
0,
2,
0,
3,
0,
3,
0,
0,
0,
2,
3,
1,
3,
0,
4,
0,
1,
1,
3,
0,
3,
4,
3,
2,
3,
1,
0,
3,
3,
2,
3,
1,
3,
0,
2,
3,
0,
2,
1,
4,
1,
2,
2,
0,
0,
3,
3,
0,
0,
2,
0,
0,
0,
1,
0,
0,
0,
0,
2,
2,
0,
3,
2,
1,
3,
3,
0,
2,
0,
2,
0,
0,
3,
3,
1,
2,
4,
0,
3,
0,
2,
2,
3,
),
(
2,
4,
0,
5,
0,
4,
0,
4,
0,
2,
4,
4,
4,
3,
4,
3,
3,
3,
1,
2,
4,
3,
4,
3,
4,
4,
5,
0,
3,
3,
3,
3,
2,
0,
4,
3,
1,
4,
3,
4,
1,
4,
4,
3,
3,
4,
4,
3,
1,
2,
3,
0,
4,
2,
0,
4,
1,
0,
3,
3,
0,
4,
3,
3,
3,
4,
0,
4,
0,
2,
0,
3,
5,
3,
4,
5,
2,
0,
3,
0,
0,
4,
5,
),
(
0,
3,
0,
4,
0,
1,
0,
1,
0,
1,
3,
2,
2,
1,
3,
0,
3,
0,
2,
0,
2,
0,
3,
0,
2,
0,
0,
0,
1,
0,
1,
1,
0,
0,
3,
1,
0,
0,
0,
4,
0,
3,
1,
0,
2,
1,
3,
0,
0,
0,
0,
0,
0,
3,
0,
0,
0,
0,
0,
0,
0,
4,
2,
2,
3,
1,
0,
3,
0,
0,
0,
1,
4,
4,
4,
3,
0,
0,
4,
0,
0,
1,
4,
),
(
1,
4,
1,
5,
0,
3,
0,
3,
0,
4,
5,
4,
4,
3,
5,
3,
3,
4,
4,
3,
4,
1,
3,
3,
3,
3,
2,
1,
4,
1,
5,
4,
3,
1,
4,
4,
3,
5,
4,
4,
3,
5,
4,
3,
3,
4,
4,
4,
0,
3,
3,
1,
2,
3,
0,
3,
1,
0,
3,
3,
0,
5,
4,
4,
4,
4,
4,
4,
3,
3,
5,
4,
4,
3,
3,
5,
4,
0,
3,
2,
0,
4,
4,
),
(
0,
2,
0,
3,
0,
1,
0,
0,
0,
1,
3,
3,
3,
2,
4,
1,
3,
0,
3,
1,
3,
0,
2,
2,
1,
1,
0,
0,
2,
0,
4,
3,
1,
0,
4,
3,
0,
4,
4,
4,
1,
4,
3,
1,
1,
3,
3,
1,
0,
2,
0,
0,
1,
3,
0,
0,
0,
0,
2,
0,
0,
4,
3,
2,
4,
3,
5,
4,
3,
3,
3,
4,
3,
3,
4,
3,
3,
0,
2,
1,
0,
3,
3,
),
(
0,
2,
0,
4,
0,
3,
0,
2,
0,
2,
5,
5,
3,
4,
4,
4,
4,
1,
4,
3,
3,
0,
4,
3,
4,
3,
1,
3,
3,
2,
4,
3,
0,
3,
4,
3,
0,
3,
4,
4,
2,
4,
4,
0,
4,
5,
3,
3,
2,
2,
1,
1,
1,
2,
0,
1,
5,
0,
3,
3,
2,
4,
3,
3,
3,
4,
0,
3,
0,
2,
0,
4,
4,
3,
5,
5,
0,
0,
3,
0,
2,
3,
3,
),
(
0,
3,
0,
4,
0,
3,
0,
1,
0,
3,
4,
3,
3,
1,
3,
3,
3,
0,
3,
1,
3,
0,
4,
3,
3,
1,
1,
0,
3,
0,
3,
3,
0,
0,
4,
4,
0,
1,
5,
4,
3,
3,
5,
0,
3,
3,
4,
3,
0,
2,
0,
1,
1,
1,
0,
1,
3,
0,
1,
2,
1,
3,
3,
2,
3,
3,
0,
3,
0,
1,
0,
1,
3,
3,
4,
4,
1,
0,
1,
2,
2,
1,
3,
),
(
0,
1,
0,
4,
0,
4,
0,
3,
0,
1,
3,
3,
3,
2,
3,
1,
1,
0,
3,
0,
3,
3,
4,
3,
2,
4,
2,
0,
1,
0,
4,
3,
2,
0,
4,
3,
0,
5,
3,
3,
2,
4,
4,
4,
3,
3,
3,
4,
0,
1,
3,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
4,
2,
3,
3,
3,
0,
3,
0,
0,
0,
4,
4,
4,
5,
3,
2,
0,
3,
3,
0,
3,
5,
),
(
0,
2,
0,
3,
0,
0,
0,
3,
0,
1,
3,
0,
2,
0,
0,
0,
1,
0,
3,
1,
1,
3,
3,
0,
0,
3,
0,
0,
3,
0,
2,
3,
1,
0,
3,
1,
0,
3,
3,
2,
0,
4,
2,
2,
0,
2,
0,
0,
0,
4,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
1,
2,
0,
1,
0,
1,
0,
0,
0,
1,
3,
1,
2,
0,
0,
0,
1,
0,
0,
1,
4,
),
(
0,
3,
0,
3,
0,
5,
0,
1,
0,
2,
4,
3,
1,
3,
3,
2,
1,
1,
5,
2,
1,
0,
5,
1,
2,
0,
0,
0,
3,
3,
2,
2,
3,
2,
4,
3,
0,
0,
3,
3,
1,
3,
3,
0,
2,
5,
3,
4,
0,
3,
3,
0,
1,
2,
0,
2,
2,
0,
3,
2,
0,
2,
2,
3,
3,
3,
0,
2,
0,
1,
0,
3,
4,
4,
2,
5,
4,
0,
3,
0,
0,
3,
5,
),
(
0,
3,
0,
3,
0,
3,
0,
1,
0,
3,
3,
3,
3,
0,
3,
0,
2,
0,
2,
1,
1,
0,
2,
0,
1,
0,
0,
0,
2,
1,
0,
0,
1,
0,
3,
2,
0,
0,
3,
3,
1,
2,
3,
1,
0,
3,
3,
0,
0,
1,
0,
0,
0,
0,
0,
2,
0,
0,
0,
0,
0,
2,
3,
1,
2,
3,
0,
3,
0,
1,
0,
3,
2,
1,
0,
4,
3,
0,
1,
1,
0,
3,
3,
),
(
0,
4,
0,
5,
0,
3,
0,
3,
0,
4,
5,
5,
4,
3,
5,
3,
4,
3,
5,
3,
3,
2,
5,
3,
4,
4,
4,
3,
4,
3,
4,
5,
5,
3,
4,
4,
3,
4,
4,
5,
4,
4,
4,
3,
4,
5,
5,
4,
2,
3,
4,
2,
3,
4,
0,
3,
3,
1,
4,
3,
2,
4,
3,
3,
5,
5,
0,
3,
0,
3,
0,
5,
5,
5,
5,
4,
4,
0,
4,
0,
1,
4,
4,
),
(
0,
4,
0,
4,
0,
3,
0,
3,
0,
3,
5,
4,
4,
2,
3,
2,
5,
1,
3,
2,
5,
1,
4,
2,
3,
2,
3,
3,
4,
3,
3,
3,
3,
2,
5,
4,
1,
3,
3,
5,
3,
4,
4,
0,
4,
4,
3,
1,
1,
3,
1,
0,
2,
3,
0,
2,
3,
0,
3,
0,
0,
4,
3,
1,
3,
4,
0,
3,
0,
2,
0,
4,
4,
4,
3,
4,
5,
0,
4,
0,
0,
3,
4,
),
(
0,
3,
0,
3,
0,
3,
1,
2,
0,
3,
4,
4,
3,
3,
3,
0,
2,
2,
4,
3,
3,
1,
3,
3,
3,
1,
1,
0,
3,
1,
4,
3,
2,
3,
4,
4,
2,
4,
4,
4,
3,
4,
4,
3,
2,
4,
4,
3,
1,
3,
3,
1,
3,
3,
0,
4,
1,
0,
2,
2,
1,
4,
3,
2,
3,
3,
5,
4,
3,
3,
5,
4,
4,
3,
3,
0,
4,
0,
3,
2,
2,
4,
4,
),
(
0,
2,
0,
1,
0,
0,
0,
0,
0,
1,
2,
1,
3,
0,
0,
0,
0,
0,
2,
0,
1,
2,
1,
0,
0,
1,
0,
0,
0,
0,
3,
0,
0,
1,
0,
1,
1,
3,
1,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
2,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
2,
2,
0,
3,
4,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
),
(
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
4,
0,
4,
1,
4,
0,
3,
0,
4,
0,
3,
0,
4,
0,
3,
0,
3,
0,
4,
1,
5,
1,
4,
0,
0,
3,
0,
5,
0,
5,
2,
0,
1,
0,
0,
0,
2,
1,
4,
0,
1,
3,
0,
0,
3,
0,
0,
3,
1,
1,
4,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
),
(
1,
4,
0,
5,
0,
3,
0,
2,
0,
3,
5,
4,
4,
3,
4,
3,
5,
3,
4,
3,
3,
0,
4,
3,
3,
3,
3,
3,
3,
2,
4,
4,
3,
1,
3,
4,
4,
5,
4,
4,
3,
4,
4,
1,
3,
5,
4,
3,
3,
3,
1,
2,
2,
3,
3,
1,
3,
1,
3,
3,
3,
5,
3,
3,
4,
5,
0,
3,
0,
3,
0,
3,
4,
3,
4,
4,
3,
0,
3,
0,
2,
4,
3,
),
(
0,
1,
0,
4,
0,
0,
0,
0,
0,
1,
4,
0,
4,
1,
4,
2,
4,
0,
3,
0,
1,
0,
1,
0,
0,
0,
0,
0,
2,
0,
3,
1,
1,
1,
0,
3,
0,
0,
0,
1,
2,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
3,
0,
0,
0,
0,
3,
2,
0,
2,
2,
0,
1,
0,
0,
0,
2,
3,
2,
3,
3,
0,
0,
0,
0,
2,
1,
0,
),
(
0,
5,
1,
5,
0,
3,
0,
3,
0,
5,
4,
4,
5,
1,
5,
3,
3,
0,
4,
3,
4,
3,
5,
3,
4,
3,
3,
2,
4,
3,
4,
3,
3,
0,
3,
3,
1,
4,
4,
3,
4,
4,
4,
3,
4,
5,
5,
3,
2,
3,
1,
1,
3,
3,
1,
3,
1,
1,
3,
3,
2,
4,
5,
3,
3,
5,
0,
4,
0,
3,
0,
4,
4,
3,
5,
3,
3,
0,
3,
4,
0,
4,
3,
),
(
0,
5,
0,
5,
0,
3,
0,
2,
0,
4,
4,
3,
5,
2,
4,
3,
3,
3,
4,
4,
4,
3,
5,
3,
5,
3,
3,
1,
4,
0,
4,
3,
3,
0,
3,
3,
0,
4,
4,
4,
4,
5,
4,
3,
3,
5,
5,
3,
2,
3,
1,
2,
3,
2,
0,
1,
0,
0,
3,
2,
2,
4,
4,
3,
1,
5,
0,
4,
0,
3,
0,
4,
3,
1,
3,
2,
1,
0,
3,
3,
0,
3,
3,
),
(
0,
4,
0,
5,
0,
5,
0,
4,
0,
4,
5,
5,
5,
3,
4,
3,
3,
2,
5,
4,
4,
3,
5,
3,
5,
3,
4,
0,
4,
3,
4,
4,
3,
2,
4,
4,
3,
4,
5,
4,
4,
5,
5,
0,
3,
5,
5,
4,
1,
3,
3,
2,
3,
3,
1,
3,
1,
0,
4,
3,
1,
4,
4,
3,
4,
5,
0,
4,
0,
2,
0,
4,
3,
4,
4,
3,
3,
0,
4,
0,
0,
5,
5,
),
(
0,
4,
0,
4,
0,
5,
0,
1,
1,
3,
3,
4,
4,
3,
4,
1,
3,
0,
5,
1,
3,
0,
3,
1,
3,
1,
1,
0,
3,
0,
3,
3,
4,
0,
4,
3,
0,
4,
4,
4,
3,
4,
4,
0,
3,
5,
4,
1,
0,
3,
0,
0,
2,
3,
0,
3,
1,
0,
3,
1,
0,
3,
2,
1,
3,
5,
0,
3,
0,
1,
0,
3,
2,
3,
3,
4,
4,
0,
2,
2,
0,
4,
4,
),
(
2,
4,
0,
5,
0,
4,
0,
3,
0,
4,
5,
5,
4,
3,
5,
3,
5,
3,
5,
3,
5,
2,
5,
3,
4,
3,
3,
4,
3,
4,
5,
3,
2,
1,
5,
4,
3,
2,
3,
4,
5,
3,
4,
1,
2,
5,
4,
3,
0,
3,
3,
0,
3,
2,
0,
2,
3,
0,
4,
1,
0,
3,
4,
3,
3,
5,
0,
3,
0,
1,
0,
4,
5,
5,
5,
4,
3,
0,
4,
2,
0,
3,
5,
),
(
0,
5,
0,
4,
0,
4,
0,
2,
0,
5,
4,
3,
4,
3,
4,
3,
3,
3,
4,
3,
4,
2,
5,
3,
5,
3,
4,
1,
4,
3,
4,
4,
4,
0,
3,
5,
0,
4,
4,
4,
4,
5,
3,
1,
3,
4,
5,
3,
3,
3,
3,
3,
3,
3,
0,
2,
2,
0,
3,
3,
2,
4,
3,
3,
3,
5,
3,
4,
1,
3,
3,
5,
3,
2,
0,
0,
0,
0,
4,
3,
1,
3,
3,
),
(
0,
1,
0,
3,
0,
3,
0,
1,
0,
1,
3,
3,
3,
2,
3,
3,
3,
0,
3,
0,
0,
0,
3,
1,
3,
0,
0,
0,
2,
2,
2,
3,
0,
0,
3,
2,
0,
1,
2,
4,
1,
3,
3,
0,
0,
3,
3,
3,
0,
1,
0,
0,
2,
1,
0,
0,
3,
0,
3,
1,
0,
3,
0,
0,
1,
3,
0,
2,
0,
1,
0,
3,
3,
1,
3,
3,
0,
0,
1,
1,
0,
3,
3,
),
(
0,
2,
0,
3,
0,
2,
1,
4,
0,
2,
2,
3,
1,
1,
3,
1,
1,
0,
2,
0,
3,
1,
2,
3,
1,
3,
0,
0,
1,
0,
4,
3,
2,
3,
3,
3,
1,
4,
2,
3,
3,
3,
3,
1,
0,
3,
1,
4,
0,
1,
1,
0,
1,
2,
0,
1,
1,
0,
1,
1,
0,
3,
1,
3,
2,
2,
0,
1,
0,
0,
0,
2,
3,
3,
3,
1,
0,
0,
0,
0,
0,
2,
3,
),
(
0,
5,
0,
4,
0,
5,
0,
2,
0,
4,
5,
5,
3,
3,
4,
3,
3,
1,
5,
4,
4,
2,
4,
4,
4,
3,
4,
2,
4,
3,
5,
5,
4,
3,
3,
4,
3,
3,
5,
5,
4,
5,
5,
1,
3,
4,
5,
3,
1,
4,
3,
1,
3,
3,
0,
3,
3,
1,
4,
3,
1,
4,
5,
3,
3,
5,
0,
4,
0,
3,
0,
5,
3,
3,
1,
4,
3,
0,
4,
0,
1,
5,
3,
),
(
0,
5,
0,
5,
0,
4,
0,
2,
0,
4,
4,
3,
4,
3,
3,
3,
3,
3,
5,
4,
4,
4,
4,
4,
4,
5,
3,
3,
5,
2,
4,
4,
4,
3,
4,
4,
3,
3,
4,
4,
5,
5,
3,
3,
4,
3,
4,
3,
3,
4,
3,
3,
3,
3,
1,
2,
2,
1,
4,
3,
3,
5,
4,
4,
3,
4,
0,
4,
0,
3,
0,
4,
4,
4,
4,
4,
1,
0,
4,
2,
0,
2,
4,
),
(
0,
4,
0,
4,
0,
3,
0,
1,
0,
3,
5,
2,
3,
0,
3,
0,
2,
1,
4,
2,
3,
3,
4,
1,
4,
3,
3,
2,
4,
1,
3,
3,
3,
0,
3,
3,
0,
0,
3,
3,
3,
5,
3,
3,
3,
3,
3,
2,
0,
2,
0,
0,
2,
0,
0,
2,
0,
0,
1,
0,
0,
3,
1,
2,
2,
3,
0,
3,
0,
2,
0,
4,
4,
3,
3,
4,
1,
0,
3,
0,
0,
2,
4,
),
(
0,
0,
0,
4,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
2,
0,
0,
0,
0,
0,
1,
0,
2,
0,
1,
0,
0,
0,
0,
0,
3,
1,
3,
0,
3,
2,
0,
0,
0,
1,
0,
3,
2,
0,
0,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
4,
0,
2,
0,
0,
0,
0,
0,
0,
2,
),
(
0,
2,
1,
3,
0,
2,
0,
2,
0,
3,
3,
3,
3,
1,
3,
1,
3,
3,
3,
3,
3,
3,
4,
2,
2,
1,
2,
1,
4,
0,
4,
3,
1,
3,
3,
3,
2,
4,
3,
5,
4,
3,
3,
3,
3,
3,
3,
3,
0,
1,
3,
0,
2,
0,
0,
1,
0,
0,
1,
0,
0,
4,
2,
0,
2,
3,
0,
3,
3,
0,
3,
3,
4,
2,
3,
1,
4,
0,
1,
2,
0,
2,
3,
),
(
0,
3,
0,
3,
0,
1,
0,
3,
0,
2,
3,
3,
3,
0,
3,
1,
2,
0,
3,
3,
2,
3,
3,
2,
3,
2,
3,
1,
3,
0,
4,
3,
2,
0,
3,
3,
1,
4,
3,
3,
2,
3,
4,
3,
1,
3,
3,
1,
1,
0,
1,
1,
0,
1,
0,
1,
0,
1,
0,
0,
0,
4,
1,
1,
0,
3,
0,
3,
1,
0,
2,
3,
3,
3,
3,
3,
1,
0,
0,
2,
0,
3,
3,
),
(
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
0,
2,
0,
3,
0,
0,
0,
0,
0,
0,
0,
3,
0,
0,
0,
0,
0,
0,
0,
3,
0,
3,
0,
3,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
0,
2,
0,
2,
3,
0,
0,
0,
0,
0,
0,
0,
0,
3,
),
(
0,
2,
0,
3,
1,
3,
0,
3,
0,
2,
3,
3,
3,
1,
3,
1,
3,
1,
3,
1,
3,
3,
3,
1,
3,
0,
2,
3,
1,
1,
4,
3,
3,
2,
3,
3,
1,
2,
2,
4,
1,
3,
3,
0,
1,
4,
2,
3,
0,
1,
3,
0,
3,
0,
0,
1,
3,
0,
2,
0,
0,
3,
3,
2,
1,
3,
0,
3,
0,
2,
0,
3,
4,
4,
4,
3,
1,
0,
3,
0,
0,
3,
3,
),
(
0,
2,
0,
1,
0,
2,
0,
0,
0,
1,
3,
2,
2,
1,
3,
0,
1,
1,
3,
0,
3,
2,
3,
1,
2,
0,
2,
0,
1,
1,
3,
3,
3,
0,
3,
3,
1,
1,
2,
3,
2,
3,
3,
1,
2,
3,
2,
0,
0,
1,
0,
0,
0,
0,
0,
0,
3,
0,
1,
0,
0,
2,
1,
2,
1,
3,
0,
3,
0,
0,
0,
3,
4,
4,
4,
3,
2,
0,
2,
0,
0,
2,
4,
),
(
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
2,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
3,
1,
0,
0,
0,
0,
0,
0,
0,
3,
),
(
0,
3,
0,
3,
0,
2,
0,
3,
0,
3,
3,
3,
2,
3,
2,
2,
2,
0,
3,
1,
3,
3,
3,
2,
3,
3,
0,
0,
3,
0,
3,
2,
2,
0,
2,
3,
1,
4,
3,
4,
3,
3,
2,
3,
1,
5,
4,
4,
0,
3,
1,
2,
1,
3,
0,
3,
1,
1,
2,
0,
2,
3,
1,
3,
1,
3,
0,
3,
0,
1,
0,
3,
3,
4,
4,
2,
1,
0,
2,
1,
0,
2,
4,
),
(
0,
1,
0,
3,
0,
1,
0,
2,
0,
1,
4,
2,
5,
1,
4,
0,
2,
0,
2,
1,
3,
1,
4,
0,
2,
1,
0,
0,
2,
1,
4,
1,
1,
0,
3,
3,
0,
5,
1,
3,
2,
3,
3,
1,
0,
3,
2,
3,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
4,
0,
1,
0,
3,
0,
2,
0,
1,
0,
3,
3,
3,
4,
3,
3,
0,
0,
0,
0,
2,
3,
),
(
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
2,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
1,
0,
0,
1,
0,
0,
0,
0,
0,
3,
),
(
0,
1,
0,
3,
0,
4,
0,
3,
0,
2,
4,
3,
1,
0,
3,
2,
2,
1,
3,
1,
2,
2,
3,
1,
1,
1,
2,
1,
3,
0,
1,
2,
0,
1,
3,
2,
1,
3,
0,
5,
5,
1,
0,
0,
1,
3,
2,
1,
0,
3,
0,
0,
1,
0,
0,
0,
0,
0,
3,
4,
0,
1,
1,
1,
3,
2,
0,
2,
0,
1,
0,
2,
3,
3,
1,
2,
3,
0,
1,
0,
1,
0,
4,
),
(
0,
0,
0,
1,
0,
3,
0,
3,
0,
2,
2,
1,
0,
0,
4,
0,
3,
0,
3,
1,
3,
0,
3,
0,
3,
0,
1,
0,
3,
0,
3,
1,
3,
0,
3,
3,
0,
0,
1,
2,
1,
1,
1,
0,
1,
2,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
2,
2,
1,
2,
0,
0,
2,
0,
0,
0,
0,
2,
3,
3,
3,
3,
0,
0,
0,
0,
1,
4,
),
(
0,
0,
0,
3,
0,
3,
0,
0,
0,
0,
3,
1,
1,
0,
3,
0,
1,
0,
2,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
3,
0,
2,
0,
2,
3,
0,
0,
2,
2,
3,
1,
2,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
0,
0,
2,
0,
0,
0,
0,
2,
3,
),
(
2,
4,
0,
5,
0,
5,
0,
4,
0,
3,
4,
3,
3,
3,
4,
3,
3,
3,
4,
3,
4,
4,
5,
4,
5,
5,
5,
2,
3,
0,
5,
5,
4,
1,
5,
4,
3,
1,
5,
4,
3,
4,
4,
3,
3,
4,
3,
3,
0,
3,
2,
0,
2,
3,
0,
3,
0,
0,
3,
3,
0,
5,
3,
2,
3,
3,
0,
3,
0,
3,
0,
3,
4,
5,
4,
5,
3,
0,
4,
3,
0,
3,
4,
),
(
0,
3,
0,
3,
0,
3,
0,
3,
0,
3,
3,
4,
3,
2,
3,
2,
3,
0,
4,
3,
3,
3,
3,
3,
3,
3,
3,
0,
3,
2,
4,
3,
3,
1,
3,
4,
3,
4,
4,
4,
3,
4,
4,
3,
2,
4,
4,
1,
0,
2,
0,
0,
1,
1,
0,
2,
0,
0,
3,
1,
0,
5,
3,
2,
1,
3,
0,
3,
0,
1,
2,
4,
3,
2,
4,
3,
3,
0,
3,
2,
0,
4,
4,
),
(
0,
3,
0,
3,
0,
1,
0,
0,
0,
1,
4,
3,
3,
2,
3,
1,
3,
1,
4,
2,
3,
2,
4,
2,
3,
4,
3,
0,
2,
2,
3,
3,
3,
0,
3,
3,
3,
0,
3,
4,
1,
3,
3,
0,
3,
4,
3,
3,
0,
1,
1,
0,
1,
0,
0,
0,
4,
0,
3,
0,
0,
3,
1,
2,
1,
3,
0,
4,
0,
1,
0,
4,
3,
3,
4,
3,
3,
0,
2,
0,
0,
3,
3,
),
(
0,
3,
0,
4,
0,
1,
0,
3,
0,
3,
4,
3,
3,
0,
3,
3,
3,
1,
3,
1,
3,
3,
4,
3,
3,
3,
0,
0,
3,
1,
5,
3,
3,
1,
3,
3,
2,
5,
4,
3,
3,
4,
5,
3,
2,
5,
3,
4,
0,
1,
0,
0,
0,
0,
0,
2,
0,
0,
1,
1,
0,
4,
2,
2,
1,
3,
0,
3,
0,
2,
0,
4,
4,
3,
5,
3,
2,
0,
1,
1,
0,
3,
4,
),
(
0,
5,
0,
4,
0,
5,
0,
2,
0,
4,
4,
3,
3,
2,
3,
3,
3,
1,
4,
3,
4,
1,
5,
3,
4,
3,
4,
0,
4,
2,
4,
3,
4,
1,
5,
4,
0,
4,
4,
4,
4,
5,
4,
1,
3,
5,
4,
2,
1,
4,
1,
1,
3,
2,
0,
3,
1,
0,
3,
2,
1,
4,
3,
3,
3,
4,
0,
4,
0,
3,
0,
4,
4,
4,
3,
3,
3,
0,
4,
2,
0,
3,
4,
),
(
1,
4,
0,
4,
0,
3,
0,
1,
0,
3,
3,
3,
1,
1,
3,
3,
2,
2,
3,
3,
1,
0,
3,
2,
2,
1,
2,
0,
3,
1,
2,
1,
2,
0,
3,
2,
0,
2,
2,
3,
3,
4,
3,
0,
3,
3,
1,
2,
0,
1,
1,
3,
1,
2,
0,
0,
3,
0,
1,
1,
0,
3,
2,
2,
3,
3,
0,
3,
0,
0,
0,
2,
3,
3,
4,
3,
3,
0,
1,
0,
0,
1,
4,
),
(
0,
4,
0,
4,
0,
4,
0,
0,
0,
3,
4,
4,
3,
1,
4,
2,
3,
2,
3,
3,
3,
1,
4,
3,
4,
0,
3,
0,
4,
2,
3,
3,
2,
2,
5,
4,
2,
1,
3,
4,
3,
4,
3,
1,
3,
3,
4,
2,
0,
2,
1,
0,
3,
3,
0,
0,
2,
0,
3,
1,
0,
4,
4,
3,
4,
3,
0,
4,
0,
1,
0,
2,
4,
4,
4,
4,
4,
0,
3,
2,
0,
3,
3,
),
(
0,
0,
0,
1,
0,
4,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
3,
2,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
2,
),
(
0,
2,
0,
3,
0,
4,
0,
4,
0,
1,
3,
3,
3,
0,
4,
0,
2,
1,
2,
1,
1,
1,
2,
0,
3,
1,
1,
0,
1,
0,
3,
1,
0,
0,
3,
3,
2,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
2,
0,
2,
2,
0,
3,
1,
0,
0,
1,
0,
1,
1,
0,
1,
2,
0,
3,
0,
0,
0,
0,
1,
0,
0,
3,
3,
4,
3,
1,
0,
1,
0,
3,
0,
2,
),
(
0,
0,
0,
3,
0,
5,
0,
0,
0,
0,
1,
0,
2,
0,
3,
1,
0,
1,
3,
0,
0,
0,
2,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
0,
4,
0,
0,
0,
2,
3,
0,
1,
4,
1,
0,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
2,
0,
0,
3,
0,
0,
0,
0,
0,
3,
),
(
0,
2,
0,
5,
0,
5,
0,
1,
0,
2,
4,
3,
3,
2,
5,
1,
3,
2,
3,
3,
3,
0,
4,
1,
2,
0,
3,
0,
4,
0,
2,
2,
1,
1,
5,
3,
0,
0,
1,
4,
2,
3,
2,
0,
3,
3,
3,
2,
0,
2,
4,
1,
1,
2,
0,
1,
1,
0,
3,
1,
0,
1,
3,
1,
2,
3,
0,
2,
0,
0,
0,
1,
3,
5,
4,
4,
4,
0,
3,
0,
0,
1,
3,
),
(
0,
4,
0,
5,
0,
4,
0,
4,
0,
4,
5,
4,
3,
3,
4,
3,
3,
3,
4,
3,
4,
4,
5,
3,
4,
5,
4,
2,
4,
2,
3,
4,
3,
1,
4,
4,
1,
3,
5,
4,
4,
5,
5,
4,
4,
5,
5,
5,
2,
3,
3,
1,
4,
3,
1,
3,
3,
0,
3,
3,
1,
4,
3,
4,
4,
4,
0,
3,
0,
4,
0,
3,
3,
4,
4,
5,
0,
0,
4,
3,
0,
4,
5,
),
(
0,
4,
0,
4,
0,
3,
0,
3,
0,
3,
4,
4,
4,
3,
3,
2,
4,
3,
4,
3,
4,
3,
5,
3,
4,
3,
2,
1,
4,
2,
4,
4,
3,
1,
3,
4,
2,
4,
5,
5,
3,
4,
5,
4,
1,
5,
4,
3,
0,
3,
2,
2,
3,
2,
1,
3,
1,
0,
3,
3,
3,
5,
3,
3,
3,
5,
4,
4,
2,
3,
3,
4,
3,
3,
3,
2,
1,
0,
3,
2,
1,
4,
3,
),
(
0,
4,
0,
5,
0,
4,
0,
3,
0,
3,
5,
5,
3,
2,
4,
3,
4,
0,
5,
4,
4,
1,
4,
4,
4,
3,
3,
3,
4,
3,
5,
5,
2,
3,
3,
4,
1,
2,
5,
5,
3,
5,
5,
2,
3,
5,
5,
4,
0,
3,
2,
0,
3,
3,
1,
1,
5,
1,
4,
1,
0,
4,
3,
2,
3,
5,
0,
4,
0,
3,
0,
5,
4,
3,
4,
3,
0,
0,
4,
1,
0,
4,
4,
),
(
1,
3,
0,
4,
0,
2,
0,
2,
0,
2,
5,
5,
3,
3,
3,
3,
3,
0,
4,
2,
3,
4,
4,
4,
3,
4,
0,
0,
3,
4,
5,
4,
3,
3,
3,
3,
2,
5,
5,
4,
5,
5,
5,
4,
3,
5,
5,
5,
1,
3,
1,
0,
1,
0,
0,
3,
2,
0,
4,
2,
0,
5,
2,
3,
2,
4,
1,
3,
0,
3,
0,
4,
5,
4,
5,
4,
3,
0,
4,
2,
0,
5,
4,
),
(
0,
3,
0,
4,
0,
5,
0,
3,
0,
3,
4,
4,
3,
2,
3,
2,
3,
3,
3,
3,
3,
2,
4,
3,
3,
2,
2,
0,
3,
3,
3,
3,
3,
1,
3,
3,
3,
0,
4,
4,
3,
4,
4,
1,
1,
4,
4,
2,
0,
3,
1,
0,
1,
1,
0,
4,
1,
0,
2,
3,
1,
3,
3,
1,
3,
4,
0,
3,
0,
1,
0,
3,
1,
3,
0,
0,
1,
0,
2,
0,
0,
4,
4,
),
(
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
(
0,
3,
0,
3,
0,
2,
0,
3,
0,
1,
5,
4,
3,
3,
3,
1,
4,
2,
1,
2,
3,
4,
4,
2,
4,
4,
5,
0,
3,
1,
4,
3,
4,
0,
4,
3,
3,
3,
2,
3,
2,
5,
3,
4,
3,
2,
2,
3,
0,
0,
3,
0,
2,
1,
0,
1,
2,
0,
0,
0,
0,
2,
1,
1,
3,
1,
0,
2,
0,
4,
0,
3,
4,
4,
4,
5,
2,
0,
2,
0,
0,
1,
3,
),
(
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
1,
0,
0,
0,
4,
2,
1,
1,
0,
1,
0,
3,
2,
0,
0,
3,
1,
1,
1,
2,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
0,
1,
0,
0,
0,
2,
0,
0,
0,
1,
4,
0,
4,
2,
1,
0,
0,
0,
0,
0,
1,
),
(
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
3,
1,
0,
0,
0,
2,
0,
2,
1,
0,
0,
1,
2,
1,
0,
1,
1,
0,
0,
3,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
3,
1,
0,
0,
0,
0,
0,
1,
0,
0,
2,
1,
0,
0,
0,
0,
0,
0,
0,
0,
2,
),
(
0,
4,
0,
4,
0,
4,
0,
3,
0,
4,
4,
3,
4,
2,
4,
3,
2,
0,
4,
4,
4,
3,
5,
3,
5,
3,
3,
2,
4,
2,
4,
3,
4,
3,
1,
4,
0,
2,
3,
4,
4,
4,
3,
3,
3,
4,
4,
4,
3,
4,
1,
3,
4,
3,
2,
1,
2,
1,
3,
3,
3,
4,
4,
3,
3,
5,
0,
4,
0,
3,
0,
4,
3,
3,
3,
2,
1,
0,
3,
0,
0,
3,
3,
),
(
0,
4,
0,
3,
0,
3,
0,
3,
0,
3,
5,
5,
3,
3,
3,
3,
4,
3,
4,
3,
3,
3,
4,
4,
4,
3,
3,
3,
3,
4,
3,
5,
3,
3,
1,
3,
2,
4,
5,
5,
5,
5,
4,
3,
4,
5,
5,
3,
2,
2,
3,
3,
3,
3,
2,
3,
3,
1,
2,
3,
2,
4,
3,
3,
3,
4,
0,
4,
0,
2,
0,
4,
3,
2,
2,
1,
2,
0,
3,
0,
0,
4,
1,
),
)
class JapaneseContextAnalysis:
def __init__(self):
self.reset()
def reset(self):
self._mTotalRel = 0 # total sequence received
# category counters, each interger counts sequence in its category
self._mRelSample = [0] * NUM_OF_CATEGORY
# if last byte in current buffer is not the last byte of a character,
# we need to know how many bytes to skip in next buffer
self._mNeedToSkipCharNum = 0
self._mLastCharOrder = -1 # The order of previous char
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
def feed(self, aBuf, aLen):
if self._mDone:
return
# The buffer we got is byte oriented, and a character may span in more than one
# buffers. In case the last one or two byte in last buffer is not
# complete, we record how many byte needed to complete that character
# and skip these bytes here. We can choose to record those bytes as
# well and analyse the character once it is complete, but since a
# character will not make much difference, by simply skipping
# this character will simply our logic and improve performance.
i = self._mNeedToSkipCharNum
while i < aLen:
order, charLen = self.get_order(aBuf[i : i + 2])
i += charLen
if i > aLen:
self._mNeedToSkipCharNum = i - aLen
self._mLastCharOrder = -1
else:
if (order != -1) and (self._mLastCharOrder != -1):
self._mTotalRel += 1
if self._mTotalRel > MAX_REL_THRESHOLD:
self._mDone = True
break
self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1
self._mLastCharOrder = order
def got_enough_data(self):
return self._mTotalRel > ENOUGH_REL_THRESHOLD
def get_confidence(self):
# This is just one way to calculate confidence. It works well for me.
if self._mTotalRel > MINIMUM_DATA_THRESHOLD:
return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel
else:
return DONT_KNOW
def get_order(self, aBuf):
return -1, 1
class SJISContextAnalysis(JapaneseContextAnalysis):
def __init__(self):
self.charset_name = "SHIFT_JIS"
def get_charset_name(self):
return self.charset_name
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if (0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC):
charLen = 2
if (first_char == 0x87) or (0xFA <= first_char <= 0xFC):
self.charset_name = "CP932"
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 202) and (0x9F <= second_char <= 0xF1):
return second_char - 0x9F, charLen
return -1, charLen
class EUCJPContextAnalysis(JapaneseContextAnalysis):
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE):
charLen = 2
elif first_char == 0x8F:
charLen = 3
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3):
return second_char - 0xA1, charLen
return -1, charLen
# flake8: noqa
|
import os,json,datetime,xlsxwriter,feedparser
from cart.cart import Cart
from phantomapp import forms
from phantomapp.models import *
from django.contrib import messages
from django.core.mail import EmailMessage
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from django.shortcuts import render,redirect
from django.template.loader import get_template
from django.views.decorators.http import require_POST
def error404(request,exception):
return render(request,"404.html")
def error500(request,*args):
return render(request,"505.html")
def index(request):
return render(request,"index.html")
def about(request):
return render(request,"about.html")
def portfolio(request):
return render(request,"portfolio.html")
def rss(request):
feeds = feedparser.parse('https://feeds.feedburner.com/TheHackersNews')
return render(request,"rss.html",{'feeds':feeds})
def blog(request):
posts = BlogContent.objects.all()
return render(request,"blog.html",{"posts" : posts})
def blog_post(request,id = None):
post = BlogContent.objects.get(pk=id)
return render(request,"blog_post.html",{"post" : post})
def product_detail(request,id=None):
product = ShopProduct.objects.get(pk=id)
return render(request,"product_detail.html",{
"product" : product,
}
)
def contact(request):
if request.method == "POST":
form_contact = forms.ContactForm(data=request.POST)
if form_contact.is_valid():
contact_info = form_contact.save()
contact_info.save()
messages.success(request,"<font color='#38a7bb'> We receive your information,we will reponse to you soon </font>",extra_tags="safe")
else:
print(form_contact.errors)
else:
form_contact = forms.ContactForm()
return render(request,"contact.html",{'form_contact':form_contact})
def login(request):
if request.method == "POST":
form_login = forms.LoginForm(data=request.POST)
if form_login.is_valid():
username = form_login.cleaned_data['username']
password = form_login.cleaned_data['password']
authentication = authenticate(username=username,password=password)
if authenticate is not None:
return redirect('/')
else:
return render(request,"login.html",{'form_login':form_login})
else:
form_login = forms.LoginForm()
return render(request,"login.html",{'form_login':form_login})
def register(request):
if request.method == "POST":
register_form = forms.RegisterForm(data=request.POST)
if register_form.is_valid():
username = register_form.cleaned_data['username']
messages.success(request, 'Thanks for registering.You now can login with username <font color="#38a7bb"> %s </font>' % username,extra_tags='safe')
user_profile = UserProfile(username=username)
user_profile.save()
register_info = register_form.save()
register_info.save()
#return render(request,"register.html",{'register_form':register_form})
else:
register_form = forms.RegisterForm()
return render(request,"register.html",{'register_form':register_form})
def user_profile(request,username=None):
user = User.objects.get(username=username)
user_profile = UserProfile.objects.get(username=username)
if request.method == "POST":
user_form = forms.UserProfileForm(data=request.POST)
if user_form.is_valid():
data = user_form.cleaned_data
user.first_name = data['first_name']
user.last_name = data['last_name']
user.save()
user_profile.company = data['company']
user_profile.country = data['country']
user_profile.state = data['state']
user_profile.address = data['address']
user_profile.telephone = data['telephone']
user_profile.save()
else:
user_form = forms.UserProfileForm()
return render(request,"user_profile.html",{"user_form":user_form,"user_profile" : user_profile})
def shop(request,category=None):
id = 0
if category == "Books":
id += 1
elif category == "Laptops":
id += 2
elif category == "Smartwatch":
id += 3
elif category == "NetworkDevices":
id += 4
if id == 0:
products = ShopProduct.objects.all()
else:
products = ShopProduct.objects.filter(category=id).order_by('name')
return render(request,"shop.html",{
"products" : products,
}
)
@require_POST
def cart_add(request, item_id = None):
cart = Cart(request)
product = ShopProduct.objects.get(id=item_id)
form = forms.CartForm(data=request.POST)
if form.is_valid():
cart.add(product, product.price, form.cleaned_data['quantity'])
return redirect('cart_detail')
def cart_remove(request, item_id = None):
product = ShopProduct.objects.get(id=item_id)
cart = Cart(request)
cart.remove(product)
return redirect('cart_detail')
@require_POST
def cart_update(request,item_id = None):
cart = Cart(request)
product = ShopProduct.objects.get(id = item_id)
form = forms.CartForm(data=request.POST)
if form.is_valid():
data = form.cleaned_data
cart.update(product,data['quantity'],data['unit_price'])
return redirect('cart_detail')
def cart_detail(request):
return render(request,"cart_detail.html",{'cart' : Cart(request)})
def checkout(request):
cart = Cart(request)
if request.method == "POST":
form = forms.CheckOutForm(data=request.POST)
if form.is_valid():
user_email = form.cleaned_data['email']
order = form.save()
order.save()
for item in cart:
product = ShopProduct.objects.get(pk=item.product.pk)
product.ordered_times += item.quantity
product.save()
OrderProduct.objects.create(purchase = order,product = item.product,price = item.unit_price)
send_mail(user_email,cart)
cart.clear()
messages.success(request, '<font color="#38a7bb"> Thank you for purchasing our products </font>',extra_tags='safe')
else:
form = forms.CheckOutForm()
return render(request,"checkout.html",{"checkout_form" : form})
def send_mail(user_email,cart):
subject = "Success purchased courses"
from_email = "michaelrossa0612@gmail.com"
to_email = [user_email]
ctx = {'cart' : cart}
message = get_template("mail.html").render(ctx)
email_obj = EmailMessage(subject,message,from_email,to_email)
email_obj.content_subtype="html"
email_obj.send()
def view_chart(request):
products_list = ShopProduct.objects.order_by('name')
name = []
ordered_times = []
for entry in products_list:
name.append(entry.name)
ordered_times.append(entry.ordered_times)
times = {
'name' : 'ordered times',
'data' : ordered_times,
'color' : '#38a7bb'
}
chart = {
'chart' : {'type' : 'column'},
'title' : {'text' : 'Ordered times'},
'xAxis' : {'categories' : name},
'series' : [times]
}
dump = json.dumps(chart)
result = ""
time_now = datetime.datetime.now()
ordered_times1 = []
label_list = ['id','name','ordered times']
ordered_times1.append(label_list)
for item in products_list:
product = [item.id,item.name,item.ordered_times]
ordered_times1.append(product)
if request.method == "POST":
file_name = "products" + time_now.strftime('%d-%m-%Y-%H-%M-%S') + ".xlsx"
desktop = os.path.join(os.path.join(os.environ['HOME']),'Desktop')
path = desktop + '/' + file_name
write_excel(path,ordered_times1)
result = "Saved file to" + path
return render(request,"chart.html",{"chart" : dump,"result" : result})
def write_excel(path,write_list):
workbook = xlsxwriter.Workbook(path)
worksheet = workbook.add_worksheet()
row = 0
for item in write_list:
i = 0
while i < len(item):
worksheet.write(row,i,item[i])
i+=1
row += 1
workbook.close()
return |
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
#------------------------------------------------------------------------------
import sys, os, subprocess
#------------------------------------------------------------------------------
#--- Get script absolute path
scriptDir = os.path.dirname (os.path.abspath (sys.argv [0]))
#--- Build Analyzer
childProcess = subprocess.Popen (["python", "build_analyzer.py"], cwd=scriptDir)
if childProcess.poll () == None :
childProcess.wait ()
if childProcess.returncode != 0 :
sys.exit (childProcess.returncode)
#--- Build Analyzer
childProcess = subprocess.Popen ([
"install_name_tool",
"-change",
"@executable_path/libAnalyzer.dylib",
"@rpath/libAnalyzer.dylib",
"release/libCANMolinaroAnalyzer.dylib"
], cwd=scriptDir)
if childProcess.poll () == None :
childProcess.wait ()
if childProcess.returncode != 0 :
sys.exit (childProcess.returncode)
#--- Copy analyzer
childProcess = subprocess.Popen ([
"cp",
"release/libCANMolinaroAnalyzer.dylib",
os.path.expanduser ("~/Documents/customSaleaeLogicAnalyzers")
], cwd=scriptDir)
if childProcess.poll () == None :
childProcess.wait ()
if childProcess.returncode != 0 :
sys.exit (childProcess.returncode)
#------------------------------------------------------------------------------
|
#-*-coding:utf-8-*-
"""
@FileName:
wire_net.py
@Description:
wire net class for behavior-driven simulation
@Authors:
Hanbo Sun(sun-hb17@mails.tsinghua.edu.cn)
@CreateTime:
2022/05/07 17:20
"""
from mnsim_noc.utils.component import Component
from mnsim_noc.Wire.base_wire import BaseWire
class WireNet(Component):
"""
wire net class for behavior-driven simulation
"""
REGISTRY = "wire_net"
NAME = "behavior_driven"
def __init__(self, tile_net_shape, band_width):
"""
wire net
tile_net_shape: tuple -> (row_num, column_num)
"""
super(WireNet, self).__init__()
# init wire net
self.wires = []
self.wires_map = {}
# horizontally wire
for i in range(tile_net_shape[0]):
for j in range(tile_net_shape[1] - 1):
wire_position = ((i, j), (i, j + 1))
wire = BaseWire(wire_position, band_width)
self.wires.append(wire)
self.wires_map[self._get_map_key(wire_position)] = wire
# vertically wire
for j in range(tile_net_shape[1]):
for i in range(tile_net_shape[0] - 1):
wire_position = ((i, j), (i + 1, j))
wire = BaseWire(wire_position, band_width)
self.wires.append(wire)
self.wires_map[self._get_map_key(wire_position)] = wire
def _get_map_key(self, wire_position):
"""
wire position, tuple of tuple
like: ((0, 0), (0, 1))
"""
if wire_position[0][0] + wire_position[0][1] > \
wire_position[1][0] + wire_position[1][1]:
return str((wire_position[1], wire_position[0]))
return str(wire_position)
def set_transparent_flag(self, transparent_flag):
"""
set the transparent flag
"""
for wire in self.wires:
wire.set_transparent_flag(transparent_flag)
def get_data_path_state(self, transfer_path):
"""
get data path state
return False only when all wires are idle
"""
all_state = [self.wires_map[self._get_map_key(path)].get_wire_state()
for path in transfer_path
]
return any(all_state)
def set_data_path_state(self, transfer_path, state):
"""
set data path state
"""
for path in transfer_path:
self.wires_map[self._get_map_key(path)].set_wire_state(state)
def get_wire_transfer_time(self, transfer_path, data_list, current_time):
"""
get wire transfer time
"""
transfer_end_time = current_time
for path in transfer_path:
wire = self.wires_map[self._get_map_key(path)]
transfer_end_time += wire.get_transfer_time(data_list)
return transfer_end_time
def check_finish(self):
"""
check if all wires are idle
"""
for wire in self.wires:
assert wire.get_wire_state() == False
|
import numpy as np
import tfgraph
import os
def test_ranking():
np.testing.assert_array_equal(
tfgraph.Utils.ranked(np.array([[1, 3, 2, 4]])),
np.array([[0, 2, 1, 3]]))
def test_save():
file_name = 'proof.csv'
init = np.array([[0, 0.5], [1, 0.5]])
tfgraph.Utils.save_ranks(file_name, init)
end = np.genfromtxt(file_name, delimiter=',', skip_header=1)
np.testing.assert_array_equal(init, end)
os.remove(file_name)
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gettext
import glob
import os
import subprocess
import sys
from setuptools import find_packages
from setuptools.command.sdist import sdist
# In order to run the i18n commands for compiling and
# installing message catalogs, we use DistUtilsExtra.
# Don't make this a hard requirement, but warn that
# i18n commands won't be available if DistUtilsExtra is
# not installed...
try:
from DistUtilsExtra.auto import setup
except ImportError:
from setuptools import setup
print "Warning: DistUtilsExtra required to use i18n builders. "
print "To build nova with support for message catalogs, you need "
print " https://launchpad.net/python-distutils-extra >= 2.18"
gettext.install('nova', unicode=1)
from nova.utils import parse_mailmap, str_dict_replace
from nova import version
if os.path.isdir('.bzr'):
with open("nova/vcsversion.py", 'w') as version_file:
vcs_cmd = subprocess.Popen(["bzr", "version-info", "--python"],
stdout=subprocess.PIPE)
vcsversion = vcs_cmd.communicate()[0]
version_file.write(vcsversion)
class local_sdist(sdist):
"""Customized sdist hook - builds the ChangeLog file from VC first"""
def run(self):
if os.path.isdir('.bzr'):
# We're in a bzr branch
env = os.environ.copy()
env['BZR_PLUGIN_PATH'] = os.path.abspath('./bzrplugins')
log_cmd = subprocess.Popen(["bzr", "log", "--novalog"],
stdout=subprocess.PIPE, env=env)
changelog = log_cmd.communicate()[0]
mailmap = parse_mailmap()
with open("ChangeLog", "w") as changelog_file:
changelog_file.write(str_dict_replace(changelog, mailmap))
sdist.run(self)
nova_cmdclass = {'sdist': local_sdist}
try:
from sphinx.setup_command import BuildDoc
class local_BuildDoc(BuildDoc):
def run(self):
for builder in ['html', 'man']:
self.builder = builder
self.finalize_options()
BuildDoc.run(self)
nova_cmdclass['build_sphinx'] = local_BuildDoc
except:
pass
try:
from babel.messages import frontend as babel
nova_cmdclass['compile_catalog'] = babel.compile_catalog
nova_cmdclass['extract_messages'] = babel.extract_messages
nova_cmdclass['init_catalog'] = babel.init_catalog
nova_cmdclass['update_catalog'] = babel.update_catalog
except:
pass
def find_data_files(destdir, srcdir):
package_data = []
files = []
for d in glob.glob('%s/*' % (srcdir, )):
if os.path.isdir(d):
package_data += find_data_files(
os.path.join(destdir, os.path.basename(d)), d)
else:
files += [d]
package_data += [(destdir, files)]
return package_data
setup(name='nova',
version=version.canonical_version_string(),
description='cloud computing fabric controller',
author='OpenStack',
author_email='nova@lists.launchpad.net',
url='http://www.openstack.org/',
cmdclass=nova_cmdclass,
packages=find_packages(exclude=['bin', 'smoketests']),
include_package_data=True,
test_suite='nose.collector',
data_files=find_data_files('share/nova', 'tools'),
scripts=['bin/nova-ajax-console-proxy',
'bin/nova-api',
'bin/nova-compute',
'bin/nova-console',
'bin/nova-dhcpbridge',
'bin/nova-direct-api',
'bin/nova-logspool',
'bin/nova-manage',
'bin/dodai-machine-state-monitor',
'bin/dodai-delete-machine',
'bin/dodai-db-all',
'bin/dodai-db-create',
'bin/dodai-db-drop',
'bin/dodai-db-init',
'bin/dodai-db-machine-reset',
'bin/dodai-db-show',
'bin/dodai-instances-remove',
'bin/nova-network',
'bin/nova-objectstore',
'bin/nova-scheduler',
'bin/nova-spoolsentry',
'bin/stack',
'bin/nova-volume',
'bin/nova-vncproxy',
'tools/nova-debug'],
py_modules=[])
|
from operator import add
from app.models import Address
def test_address(test_client, database):
"""
GIVEN a Address model
WHEN a new Address is created
THEN check the city, addressLine, zipcode
"""
addr = Address(city = "Delhi",addrLine = "C-28,Model Town-3",zipCode = "110009")
database.session.add(addr)
database.session.commit()
addr1 = Address.query.filter_by(city = "Delhi").first()
addr2 = Address.query.filter_by(addrLine = "C-28,Model Town-3").first()
addr3 = Address.query.filter_by(zipCode = "110009").first()
addr4 = Address.query.filter_by(id = 1).first()
'''
assert addr1 == addr ensures that the object is created properly and returned correctly when filtered by city
assert addr2 == addr ensures that the object is created properly and returned correctly when filtered by addrLine
assert addr3 == addr ensures that the object is created properly and returned correctly when filtered by zipCode
assert addr4 == addr ensures that the object is created properly and returned correctly when filtered by id
'''
assert addr1 == addr
assert addr2 == addr
assert addr3 == addr
assert addr4 == addr
|
def readfile(filename):
f = open(filename, 'r')
message = f.read()
f.close()
return message
def writefile(filename, message):
f = open(filename, 'w')
f.write(message)
f.close()
|
"""
This City Energy Analyst plugin is used to automate a number of parallelized simulations of the same scenario for a
single building, based on variable input stochastic distributions.
An output file is produced, which saves main inputs and outputs from each iteration.
"""
import numpy as np
import pandas as pd
import os
from openpyxl.reader.excel import load_workbook
from geopandas import GeoDataFrame as Gdf
from scipy.stats import beta
import random
import shutil
import multiprocessing
from itertools import repeat
import cea.config
import cea.inputlocator
from cea.datamanagement import archetypes_mapper
from cea.demand import demand_main
from cea.demand.schedule_maker import schedule_maker
from cea.resources.radiation_daysim import radiation_main
from cea.utilities.dbf import dbf_to_dataframe, dataframe_to_dbf
import cea.plugin
__author__ = "Luis Santos"
__copyright__ = "Copyright 2020, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Luis Santos, Jimeno Fonseca"]
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Luis Santos"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
class ScenarioPlugin(cea.plugin.CeaPlugin):
pass
def stochastic_scenario_generator(config, locator, dataframe_with_instances):
"""
This function loads inputs from a dataframe into the CEA database, runs CEA scripts and stores inputs and outputs.
"""
outputs_list = []
# loop inside input dataframe to replace database for every iteration
for index, instance in dataframe_with_instances.iterrows():
print("Simulation number {}".format(index))
## Replace STANDARDs to account for the correct database
typology = dbf_to_dataframe(locator.get_building_typology())
typology.STANDARD = 'STANDARD1'
dataframe_to_dbf(typology, locator.get_building_typology())
types = load_workbook(filename=locator.get_database_construction_standards())
envelope_types = types['ENVELOPE_ASSEMBLIES']
envelope_types.cell(column=2, row=2).value = 'CONSTRUCTION_AS1'
envelope_types.cell(column=3, row=2).value = 'TIGHTNESS_AS1'
envelope_types.cell(column=4, row=2).value = 'WINDOW_AS1'
envelope_types.cell(column=5, row=2).value = 'ROOF_AS1'
envelope_types.cell(column=6, row=2).value = 'WALL_AS1'
envelope_types.cell(column=7, row=2).value = 'WALL_AS1'
envelope_types.cell(column=10, row=2).value = 'SHADING_AS0'
hvac_types = types['HVAC_ASSEMBLIES']
hvac_types.cell(column=3, row=2).value = 'HVAC_COOLING_AS3' # baseline: centralized
hvac_types.cell(column=4, row=2).value = 'HVAC_HOTWATER_AS4' # baseline: high temperature in tropics
hvac_types.cell(column=5, row=2).value = 'HVAC_CONTROLLER_AS2' # baseline: PI controller
hvac_types.cell(column=6, row=2).value = 'HVAC_VENTILATION_AS1' # baseline: mechanical ventilation
supply_types = types['SUPPLY_ASSEMBLIES']
supply_types.cell(column=3, row=2).value = 'SUPPLY_HOTWATER_AS1' # baseline: electric boiler
supply_types.cell(column=4, row=2).value = 'SUPPLY_COOLING_AS1' # baseline: VCC and dry CT
types.save(locator.get_database_construction_standards())
## Replace database for inputs generated
# Changes and saves variables related to ZONE
zone_gdf = Gdf.from_file(locator.get_zone_geometry())
zone_gdf['floors_ag'] = instance.zone_floors_ag
zone_gdf['height_ag'] = instance.zone_floor_to_floor_height * zone_gdf['floors_ag']
zone_gdf['GFA_m2'] = zone_gdf.area * (zone_gdf['floors_ag'] + zone_gdf['floors_bg'])
zone_gdf.to_file(locator.get_zone_geometry())
# Changes and saves variables related to SURROUNDINGS
surroundings_gdf = Gdf.from_file(locator.get_surroundings_geometry())
surroundings_gdf.floors_ag = [instance.surrounding_floors_ag for floor in surroundings_gdf.floors_ag]
surroundings_height = instance.surroundings_floor_to_floor_height * instance.surrounding_floors_ag
surroundings_gdf['height_ag'] = [surroundings_height for height in surroundings_gdf.height_ag]
surroundings_gdf.to_file(locator.get_surroundings_geometry())
# Changes and saves variables related to CONSTRUCTION_STANDARDS
archetype_construction = load_workbook(filename=locator.get_database_construction_standards())
envelope_assemblies = archetype_construction['ENVELOPE_ASSEMBLIES']
envelope_assemblies.cell(column=11, row=2).value = instance.Es
envelope_assemblies.cell(column=12, row=2).value = instance.Hs_ag
envelope_assemblies.cell(column=14, row=2).value = instance.Ns
envelope_assemblies.cell(column=15, row=2).value = instance.void_deck
envelope_assemblies.cell(column=16, row=2).value = instance.wwr
envelope_assemblies.cell(column=17, row=2).value = instance.wwr
envelope_assemblies.cell(column=18, row=2).value = instance.wwr
envelope_assemblies.cell(column=19, row=2).value = instance.wwr
archetype_construction.save(locator.get_database_construction_standards())
# Changes and saves variables related to ENVELOPE
assemblies_envelope = load_workbook(filename=locator.get_database_envelope_systems())
construction = assemblies_envelope['CONSTRUCTION']
construction.cell(column=3, row=2).value = instance.Cm_Af
tightness = assemblies_envelope['TIGHTNESS']
tightness.cell(column=3, row=2).value = instance.n50
window = assemblies_envelope['WINDOW']
window.cell(column=3, row=2).value = instance.U_win
window.cell(column=4, row=2).value = instance.G_win
window.cell(column=5, row=2).value = instance.e_win
window.cell(column=6, row=2).value = instance.F_F
roof = assemblies_envelope['ROOF']
roof.cell(column=3, row=2).value = instance.U_roof
roof.cell(column=4, row=2).value = instance.a_roof
roof.cell(column=5, row=2).value = instance.e_roof
roof.cell(column=6, row=2).value = instance.r_roof
wall = assemblies_envelope['WALL']
wall.cell(column=3, row=2).value = instance.U_wall
wall.cell(column=4, row=2).value = instance.a_wall
wall.cell(column=5, row=2).value = instance.e_wall
wall.cell(column=6, row=2).value = instance.r_wall
shading = assemblies_envelope['SHADING']
shading.cell(column=3, row=2).value = instance.rf_sh
assemblies_envelope.save(locator.get_database_envelope_systems())
# Changes and saves variables related to USE_TYPE_PROPERTIES
archetypes_use_type = load_workbook(filename=locator.get_database_use_types_properties())
internal_loads = archetypes_use_type['INTERNAL_LOADS']
internal_loads.cell(column=2, row=5).value = instance.Occ_m2p
internal_loads.cell(column=3, row=5).value = instance.Qs_Wp
internal_loads.cell(column=4, row=5).value = instance.X_ghp
internal_loads.cell(column=5, row=5).value = instance.Ea_Wm2
internal_loads.cell(column=6, row=5).value = instance.El_Wm2
internal_loads.cell(column=9, row=5).value = instance.Vww_ldp
indoor_comfort = archetypes_use_type['INDOOR_COMFORT']
indoor_comfort.cell(column=2, row=5).value = instance.Tcs_set_C
indoor_comfort.cell(column=6, row=5).value = instance.Ve_lsp
archetypes_use_type.save(locator.get_database_use_types_properties())
# Changes and saves variables related to HVAC
archetypes_use_type = load_workbook(filename=locator.get_database_air_conditioning_systems())
internal_loads = archetypes_use_type['CONTROLLER']
internal_loads.cell(column=4, row=4).value = instance.dT_Qcs
internal_loads = archetypes_use_type['VENTILATION']
internal_loads.cell(column=5, row=3).value = instance.HEAT_REC
internal_loads.cell(column=7, row=3).value = instance.ECONOMIZER
indoor_comfort = archetypes_use_type['COOLING']
indoor_comfort.cell(column=4, row=5).value = instance.convection_cs
indoor_comfort.cell(column=6, row=5).value = instance.dTcs_C
archetypes_use_type.save(locator.get_database_air_conditioning_systems())
# Changes and saves variables related to SUPPLY
archetypes_use_type = load_workbook(filename=locator.get_database_supply_assemblies())
internal_loads = archetypes_use_type['COOLING']
internal_loads.cell(column=6, row=3).value = instance.efficiency_cooling
archetypes_use_type.save(locator.get_database_supply_assemblies())
## Run CEA scripts: archetypes, solar radiation, building schedules and energy demand
config.multiprocessing = False # assures each simulation uses a single core
config.debug = False
config.scenario = locator.scenario
config.scenario_name = config.scenario.rsplit(os.sep)[-1]
archetypes_mapper.main(config) # loads database into the scenario
radiation_main.main(config) # runs solar radiation script
schedule_maker.schedule_maker_main(locator, config) # runs schedules
demand_main.demand_calculation(locator, config) # runs demand simulation
## Process relevant outputs
# Total weekly schedules are calculated (calculated by the weekly_schedule function)
schedules = pd.read_csv(locator.get_building_weekly_schedules('B1001'), skiprows=2)
weekly_occupancy_h = round(weekly_schedule(schedules, 'OCCUPANCY'), 1)
weekly_appliances_h = round(weekly_schedule(schedules, 'APPLIANCES'), 1)
weekly_lighting_h = round(weekly_schedule(schedules, 'LIGHTING'), 1)
weekly_water_h = round(weekly_schedule(schedules, 'WATER'), 1)
schedules_cooling = schedules[['DAY', 'COOLING']].groupby('DAY').COOLING.value_counts()
weekly_cooling_h = schedules_cooling['WEEKDAY'].SETPOINT * 5 + schedules_cooling['SATURDAY'].SETPOINT + \
schedules_cooling['SUNDAY'].SETPOINT
# Extract relevant demand outputs
Total_demand = pd.read_csv(locator.get_total_demand(), usecols=['GRID_MWhyr', 'GRID_a_MWhyr', 'GRID_l_MWhyr',
'GRID_ve_MWhyr', 'GRID_ww_MWhyr',
'GRID_cs_MWhyr',
'GFA_m2', 'Af_m2', 'Aroof_m2', 'people0'])
Annual_energy_demand_MWhyr = Total_demand.GRID_MWhyr.values[0]
GFA_m2 = Total_demand.GFA_m2.values[0]
EUI_kWhyr_m2 = Annual_energy_demand_MWhyr * 1000 / GFA_m2 # Energy Use Intensity
EEI_kWhyr_m2 = EUI_kWhyr_m2 * 55 / weekly_occupancy_h # Energy Efficiency Index, as defined by BCA, Singapore
# Storage of outputs in a dict
dict_outputs = {
"instance": "ID" + str(index),
"Annual_energy_demand_MWhyr": round(Annual_energy_demand_MWhyr, 2),
"GFA_m2": round(GFA_m2, 2),
"EUI_kWhyr_m2": round(EUI_kWhyr_m2, 2),
"EEI_kWhyr_m2": round(EEI_kWhyr_m2, 2),
"weekly_occupancy_h": weekly_occupancy_h,
"weekly_appliances_h": weekly_appliances_h,
"weekly_lighting_h": weekly_lighting_h,
"weekly_water_h": weekly_water_h,
"weekly_cooling_h": weekly_cooling_h
}
# Convert inputs and outputs into a csv file
outputs_list.append(dict_outputs)
outputs_df = pd.DataFrame(outputs_list)
results = pd.concat([dataframe_with_instances, outputs_df], axis=1).reindex(dataframe_with_instances.index)
results = pd.merge(dataframe_with_instances, outputs_df, on='instance', how='outer')
# results.to_csv(locator.get_demand_results_folder() + r'\data_generation' + str(instance) + r'.csv', index=False)
results.to_csv(locator.get_demand_results_folder() + r'\data_generation.csv', index=False)
return results
def weekly_schedule(schedules, schedule_type='OCCUPANCY'):
"""
This function sums the hourly fraction for a given schedule for one week (total weekly hours for a given schedule).
"""
schedule_hours = schedules[['DAY', schedule_type]].groupby('DAY').sum()[schedule_type]
return schedule_hours['WEEKDAY'] * 5 + schedule_hours['SATURDAY'] + schedule_hours['SUNDAY']
def main(config):
"""
This function contains the general inputs and parallelization.
"""
# Simulation general inputs
number_simulations =config.scenario_generator.iterations
print("Running for {} iterations".format(number_simulations))
number_of_CPUs_to_keep_free = 1 # number cores that won't be used in this simulation (a minimum of 1 is indicated)
number_cores_assigned = multiprocessing.cpu_count() - number_of_CPUs_to_keep_free
# Prepare simulation scenarios and input data
list_of_dataframe_with_instances = sampling_function(number_simulations, number_cores_assigned) # prepare inputs
locators_list = create_scenario(number_cores_assigned, config) # prepare scenarios
# Parallelization of simulations according to the number of cores available
data_generator_parallel = cea.utilities.parallel.vectorize(stochastic_scenario_generator, number_cores_assigned)
n = len(locators_list)
data_generator_parallel(
repeat(config, n),
locators_list,
list_of_dataframe_with_instances)
print("Simulation ended with {} iteration(s) split into {} scenario(s)".format(number_simulations,number_cores_assigned))
def sampling_function(number_simulations, number_cores):
"""
This function creates a dataframe of random input data based on a distribution for each iteration.
The inputs are split according the number of cores simulated, to facilitate parallelization.
"""
## Creates input data for the simulations based on distributions.
# Define distribution of all input variables. Beta distributions are used as default
list_of_dict_variables = [{
"instance": "ID" + str(i),
"zone_floors_ag": round(beta.rvs(2, 2, scale=19, loc=2), 0),
"zone_floor_to_floor_height": round(beta.rvs(8, 8, scale=2, loc=2), 1),
"surrounding_floors_ag": round(beta.rvs(2, 2, scale=19, loc=1), 0),
"surroundings_floor_to_floor_height": round(beta.rvs(8, 8, scale=2, loc=2), 1),
"Hs_ag": round(beta.rvs(4, 4), 2),
"Es": round(beta.rvs(8, 2), 2), # TODO Es as independent variable, can make it conditional to Hs (Es> Hs)
"Ns": round(beta.rvs(10, 2), 2),
"void_deck": round(beta.rvs(3, 4, scale=2, loc=0), 0),
"wwr": round(beta.rvs(12, 8), 2),
"Cm_Af": round(beta.rvs(5, 7, scale=200, loc=100), 0) * 1000,
"n50": round(beta.rvs(3, 7, scale=6, loc=1), 0),
"U_win": round(beta.rvs(5, 6, scale=5, loc=0.5), 2),
"G_win": round(beta.rvs(4, 4), 2),
"e_win": round(beta.rvs(7, 4), 2),
"F_F": round(beta.rvs(2, 8), 2),
"U_roof": round(beta.rvs(2, 5, scale=0.85, loc=0.15), 2),
"a_roof": round(beta.rvs(4, 4), 2),
"e_roof": round(beta.rvs(8, 2), 2),
"r_roof": round(beta.rvs(4, 4), 2),
"U_wall": round(beta.rvs(5, 5, scale=3, loc=0.2), 2),
"a_wall": round(beta.rvs(4, 4), 2),
"e_wall": round(beta.rvs(10, 2), 2),
"r_wall": round(beta.rvs(4, 6), 2),
"rf_sh": round(beta.rvs(4, 6), 2),
"Occ_m2p": round(beta.rvs(2, 4, scale=55, loc=3), 0),
"Qs_Wp": round(beta.rvs(2, 10, scale=20, loc=70), 0),
"X_ghp": round(beta.rvs(2, 10, scale=40, loc=80), 0),
"Ea_Wm2": round(beta.rvs(5, 10, scale=15, loc=2), 1),
"El_Wm2": round(beta.rvs(5, 10, scale=10, loc=5), 1),
"Vww_ldp": round(beta.rvs(2, 2, scale=40, loc=0), 0),
"Tcs_set_C": round(beta.rvs(3, 8, scale=5, loc=24), 1),
"Ve_lsp": round(beta.rvs(2, 15, scale=20, loc=8), 1),
"dT_Qcs": round(beta.rvs(3, 3, scale=1.6, loc=-2.5), 1),
"ECONOMIZER": random.choice(['TRUE', 'FALSE']),
"HEAT_REC": random.choice(['TRUE', 'FALSE']),
"convection_cs": round(beta.rvs(10, 2), 1),
"dTcs_C": round(beta.rvs(3, 3, scale=0.2, loc=0.5), 2),
"efficiency_cooling": round(beta.rvs(3, 8, scale=4, loc=2.3), 2)
} for i in range(number_simulations)]
# The df is split according to the number of cores used in the simulation
variables_df = pd.DataFrame(list_of_dict_variables)
list_of_dataframe_with_instances = np.array_split(variables_df, number_cores)
return list_of_dataframe_with_instances
def create_scenario(number_cores, config):
"""
This function prepares the folders required for the simulation.
Each scenario is assigned to one core, and overwritten across multiple iterations.
"""
# Create folders to run simulations (one scenario per cpu) and saves list of locators
base_scenario_path = config.scenario
locators_list = []
for core in range(number_cores):
new_scenario_path = str(base_scenario_path) + str(core)
shutil.rmtree(new_scenario_path, ignore_errors=True)
shutil.copytree(base_scenario_path, new_scenario_path)
locators_list.append(cea.inputlocator.InputLocator(new_scenario_path, config.plugins))
return locators_list
if __name__ == '__main__':
main(cea.config.Configuration())
|
from django.shortcuts import render
from accounts.serializers import ProfileSerializer
from rest_framework import generics, permissions
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer
from .models import Profile
from django.http import HttpResponse, Http404
from django.shortcuts import render,redirect
from rest_framework import status
from django.views.generic.base import View
class ProfileView(APIView):
permission_classes = [permissions.AllowAny]
def get(self,request,pk):
try:
person = Profile.objects.get(id=pk)
print("The Person is: ", person)
serializer = ProfileSerializer(person, many=False)
return Response(serializer.data)
except:
raise Http404("User Not Found")
return Response(serializer.errors)
def patch(self,request,pk):
try:
person = Profile.objects.get(id=pk)
except:
return Response(status=status.HTTP_404_NOT_FOUND)
data = request.data
serializers = ProfileSerializer(person, data=data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data)
return Response(serializers.errors, status=status.HTTP_405_METHOD_NOT_ALLOWED)
def post(self,request):
serializer = ProfileSerializer(request.data)
if serializer.is_valid():
serializer.save()
else:
return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)
class ProfileDetailView(View):
def get(self,request,id):
return render(request, 'accounts/profile.html',{'id':id}) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from glob import glob
import numpy as np
import random
from scipy.ndimage import distance_transform_edt as distance
import torch
from monai.data import CacheDataset, DataLoader
from monai.transforms import (
AddChannelD,
Compose,
LoadImageD,
Resized,
ScaleIntensityD,
EnsureTypeD,
)
from monai.networks import one_hot
from monai.losses import FocalLoss
from monai.metrics import *
from monai.utils import set_determinism
from model import Encoder, Decoder
set_determinism(seed=2021)
def compute_dtm(img_gt, out_shape):
"""
compute the distance transform map of foreground in binary mask
input: segmentation or ground truth, shape = (batch_size, c, x, y, z)
out_shape = (batch_size, c, x, y, z)
output: the foreground Distance Map (SDM) shape = (batch_size,c, x, y, z)
dtm(x) = 0; x in segmentation boundary
inf|x-y|; x in segmentation
"""
fg_dtm = np.zeros(out_shape)
for b in range(out_shape[0]):
for c in range(1, out_shape[1]):
posmask = img_gt[b][c].astype(bool)
if posmask.any():
posdis = distance(posmask)
fg_dtm[b][c] = posdis
return fg_dtm
def hd_loss(seg_soft, gt, seg_dtm, gt_dtm):
"""
compute huasdorff distance loss for multiclass segmentation
input: seg_soft: softmax results, shape=(b,c,x,y,z)
gt: ground truth, shape=(b,c,x,y,z)
seg_dtm: segmentation distance transform map; shape=(b,c,x,y,z)
gt_dtm: ground truth distance transform map; shape=(b,c,x,y,z)
output: boundary_loss; sclar
"""
delta_s = (seg_soft - gt.float()) ** 2
s_dtm = seg_dtm ** 2
g_dtm = gt_dtm ** 2
dtm = s_dtm + g_dtm
multipled = torch.einsum('bcxyz, bcxyz->bcxyz', delta_s, dtm)
hd_loss = multipled.mean()
return hd_loss
os.environ["CUDA_VISIBLE_DEVICES"]="0"
device = torch.device('cuda' if torch.cuda.is_available else 'cpu')
# get image address
root_dir = '../data/brainstruct'
data_dir = os.path.join(root_dir, "aseg_crop")
all_filenames = glob(data_dir + '/*.nii.gz')
random.shuffle(all_filenames)
test_frac = 0.3
num_ims = len(all_filenames)
num_test = int(num_ims * test_frac)
num_train = num_ims - num_test
train_datadict = [{"im": fname, 'seg': fname} for fname in all_filenames[:num_train]]
val_datadict = [{"im": fname, 'seg': fname} for fname in all_filenames[-num_test:]]
print(f"total number of images: {num_ims}")
print(f"number of images for training: {len(train_datadict)}")
print(f"number of images for testing: {len(val_datadict)}")
batch_size = 32
num_workers = 12
transforms = Compose(
[
LoadImageD(keys=["im", 'seg']),
AddChannelD(keys=["im", 'seg']),
Resized(keys=['im', 'seg'], spatial_size=[96, 128, 96], mode='nearest'),
ScaleIntensityD(keys=["im"]),
EnsureTypeD(keys=["im", 'seg']),
]
)
train_ds = CacheDataset(train_datadict, transforms, num_workers=num_workers)
train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True)
val_ds = CacheDataset(val_datadict, transforms, num_workers=num_workers)
val_loader = DataLoader(val_ds, batch_size=batch_size, shuffle=False)
encoder = Encoder(z=354).cuda()
decoder = Decoder(z=354).cuda()
num_epochs = 500
lr = 1e-4
params_to_optimize = [
{'params': encoder.parameters()},
{'params': decoder.parameters()}
]
criterion = FocalLoss(include_background=True, to_onehot_y=True)
optimizer = torch.optim.Adam(params_to_optimize, lr=lr, weight_decay=1e-5)
save_checkpoints_dir = './checkpoints/z354_05focal05hdloss'
if not os.path.exists(save_checkpoints_dir):
os.makedirs(save_checkpoints_dir)
val_interval = 10
best_loss = 100.0
train_losses = []
test_losses = []
dice_metric = DiceMetric(include_background=True, reduction='mean')
for epoch in range(num_epochs):
val_loss = []
train_loss = []
dices = torch.zeros(1, 5).to(device)
encoder.train()
decoder.train()
alpha = 0.5
for data in train_loader:
img = data['im'].to(device)
target = data['seg'].to(device)
target_oh = one_hot(target, 5)
# ===================forward=====================
encoded_data = encoder(img)
output = decoder(encoded_data)
loss_focal = criterion(output, target)
with torch.no_grad():
gt_dtm_npy = compute_dtm(target_oh.cpu().numpy(), output.shape)
gt_dtm = torch.from_numpy(gt_dtm_npy).float().cuda(output.device.index)
output_arg = torch.argmax(output, dim=1, keepdim=True)
output_oh = one_hot(output_arg, 5)
seg_dtm_npy = compute_dtm(output_oh.cpu().numpy(), output.shape)
seg_dtm = torch.from_numpy(seg_dtm_npy).float().cuda(output.device.index)
loss_hd = hd_loss(output, target_oh, seg_dtm, gt_dtm)
loss = alpha * (loss_focal) + (1 - alpha) * loss_hd
# ===================backward====================
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ===================metric========================
dice = dice_metric(output_oh, target_oh)
dices = torch.cat((dices, dice), dim=0)
# ===================log========================
train_loss.append(loss.detach().cpu().numpy())
dice_score = torch.mean(dices[1:, ...], dim=0).cpu().numpy().tolist()
if (epoch + 1) % val_interval == 0:
encoder.eval()
decoder.eval()
with torch.no_grad():
for val_data in val_loader:
val_img = val_data['im'].to(device)
val_target = val_data['seg'].to(device)
val_target_oh = one_hot(val_target, 5)
# ===================forward=====================
val_output = decoder(encoder(val_img))
val_loss_focal = criterion(val_output, val_target)
val_gt_dtm_npy = compute_dtm(val_target_oh.cpu().numpy(), val_output.shape)
val_gt_dtm = torch.from_numpy(val_gt_dtm_npy).float().cuda(val_output.device.index)
val_output_ = torch.argmax(val_output, dim=1, keepdim=True)
val_output_ = one_hot(val_output_, 5)
val_seg_dtm_npy = compute_dtm(val_output_.cpu().numpy(), val_output.shape)
val_seg_dtm = torch.from_numpy(val_seg_dtm_npy).float().cuda(val_output.device.index)
val_loss_hd = hd_loss(val_output, val_target, val_seg_dtm, val_gt_dtm)
val_loss = alpha * (val_loss_focal) + (1 - alpha) * val_loss_hd
# ===================log========================
val_loss.append(val_loss.detach().cpu().numpy())
train_loss_one = np.mean(train_loss)
val_loss_one = np.mean(val_loss)
train_losses.append(train_loss_one)
test_losses.append(val_loss_one)
if (epoch + 1) % 50 == 0 or (epoch + 1) == num_epochs:
torch.save({
'epoch': epoch + 1,
'encoder': encoder.state_dict(),
'loss': train_loss_one,
}, save_checkpoints_dir + f'/encoder_{epoch + 1}.pth')
torch.save({
'epoch': epoch + 1,
'decoder': decoder.state_dict(),
'loss': train_loss_one,
}, save_checkpoints_dir + f'/decoder_{epoch + 1}.pth')
if val_loss_one < best_loss:
best_loss = val_loss_one
# save_model(model)
torch.save({
'epoch': epoch + 1,
'encoder': encoder.state_dict(),
'loss': val_loss_one,
}, save_checkpoints_dir + f'/encoder.pth')
torch.save({
'epoch': epoch + 1,
'decoder': decoder.state_dict(),
'loss': val_loss_one,
}, save_checkpoints_dir + f'/decoder.pth')
print('epoch [{}/{}], train loss:{:.4f}, val loss:{:.4f}'.format(epoch + 1, num_epochs, train_loss_one,
val_loss_one))
print('train dice:{}'.format(dice_score))
|
import attr
@attr.s
class ResolvedDependencies(object):
all_resolved_dependencies = attr.ib()
resolved_dependencies = attr.ib()
resolved_dependencies_by_group = attr.ib() |
"""JointVAE
Learning Disentangled Joint Continuous and Discrete Representations
http://arxiv.org/abs/1804.00104
"""
from typing import Dict
import torch
from torch import nn, Tensor
from torch.nn import functional as F
import pixyz.distributions as pxd
import pixyz.losses as pxl
from .base import BaseVAE
from ..losses.discrete_kl import CategoricalKullbackLeibler
class EncoderFunction(pxd.Deterministic):
def __init__(self, channel_num):
super().__init__(cond_var=["x"], var=["h"], name="f")
self.enc_x = nn.Sequential(
nn.Conv2d(channel_num, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 4, stride=2, padding=1),
nn.ReLU(),
)
self.fc = nn.Sequential(
nn.Linear(1024, 256),
nn.ReLU(),
)
def forward(self, x):
h = self.enc_x(x)
h = h.view(-1, 1024)
h = self.fc(h)
return {"h": h}
class ContinuousEncoder(pxd.Normal):
def __init__(self, z_dim):
super().__init__(cond_var=["h"], var=["z"], name="q_z")
self.fc11 = nn.Linear(256, z_dim)
self.fc12 = nn.Linear(256, z_dim)
def forward(self, h):
loc = self.fc11(h)
scale = F.softplus(self.fc12(h))
return {"loc": loc, "scale": scale}
class DiscreteEncoder(pxd.RelaxedCategorical):
def __init__(self, c_dim, temperature):
super().__init__(cond_var=["h"], var=["c"], name="q_c",
temperature=temperature)
self.fc1 = nn.Linear(256, c_dim)
def forward(self, h):
logits = self.fc1(h)
probs = F.softmax(logits, dim=1)
probs = torch.clamp(probs, 1e-6, 1 - 1e-6)
return {"probs": probs}
class JointDecoder(pxd.Bernoulli):
def __init__(self, channel_num, z_dim, c_dim):
super().__init__(cond_var=["z", "c"], var=["x"])
self.fc = nn.Sequential(
nn.Linear(z_dim + c_dim, 256),
nn.ReLU(),
nn.Linear(256, 1024),
nn.ReLU(),
)
self.deconv = nn.Sequential(
nn.ConvTranspose2d(64, 64, 4, stride=2, padding=1),
nn.ReLU(),
nn.ConvTranspose2d(64, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.ConvTranspose2d(32, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.ConvTranspose2d(32, channel_num, 4, stride=2, padding=1),
nn.Sigmoid(),
)
def forward(self, z, c):
h = self.fc(torch.cat([z, c], dim=1))
h = h.view(-1, 64, 4, 4)
probs = self.deconv(h)
return {"probs": probs}
class JointVAE(BaseVAE):
"""Joint VAE.
Args:
channel_num (int): Number of input channels.
z_dim (int): Dimension of continuous latents `z`.
c_dim (int): Dimension of discrete latents `c`.
temperature (float): Temperature for discrete encoder.
gamma_z (float): Gamma regularization term for `z`.
gamma_c (float): Gamma regularization term for `c`.
cap_z (float): Capacity for `z`.
cap_c (float): Capacity for `c`.
"""
def __init__(self, channel_num: int, z_dim: int, c_dim: int,
temperature: float, gamma_z: float, gamma_c: float,
cap_z: float, cap_c: float, **kwargs):
super().__init__()
self.channel_num = channel_num
self.z_dim = z_dim
self.c_dim = c_dim
self._gamma_z_value = gamma_z
self._gamma_c_value = gamma_c
self._cap_z_value = cap_z
self._cap_c_value = cap_c
# Distributions
self.prior_z = pxd.Normal(
loc=torch.zeros(z_dim), scale=torch.ones(z_dim), var=["z"])
self.prior_c = pxd.Categorical(
probs=torch.ones(c_dim, dtype=torch.float32) / c_dim, var=["c"])
self.encoder_func = EncoderFunction(channel_num)
self.encoder_z = ContinuousEncoder(z_dim)
self.encoder_c = DiscreteEncoder(c_dim, temperature)
self.decoder = JointDecoder(channel_num, z_dim, c_dim)
self.distributions = [self.prior_z, self.prior_c, self.encoder_func,
self.encoder_z, self.encoder_c, self.decoder]
# Loss
self.ce = pxl.CrossEntropy(self.encoder_z * self.encoder_c,
self.decoder)
self.kl_z = pxl.KullbackLeibler(self.encoder_z, self.prior_z)
self.kl_c = CategoricalKullbackLeibler(
self.encoder_c, self.prior_c)
# Coefficient for kl
self.gamma_z = pxl.Parameter("gamma_z")
self.gamma_c = pxl.Parameter("gamma_c")
# Capacity
self.cap_z = pxl.Parameter("cap_z")
self.cap_c = pxl.Parameter("cap_c")
def encode(self, x_dict: Dict[str, Tensor], mean: bool = False, **kwargs
) -> Dict[str, Tensor]:
"""Encodes latents given observable x.
Args:
x_dict (dict of [str, torch.Tensor]): Dict of Tensor for input
observations.
mean (bool, optional): Boolean flag for returning means or samples.
Returns:
z_dict (dict of [str, torch.Tensor]): Dict of tensor of encoded
latents.
"""
h = self.encoder_func.sample(x_dict, return_all=False)
if mean:
z = self.encoder_z.sample_mean(h)
c = self.encoder_c.sample_mean(h)
return {"z": z, "c": c}
z = self.encoder_z.sample(h, return_all=False)
c = self.encoder_c.sample(h, return_all=False)
z.update(c)
return z
def decode(self, z_dict: Dict[str, Tensor], mean: bool = False, **kwargs
) -> Dict[str, Tensor]:
"""Decodes observable x given latents.
Args:
z_dict (dict of [str, torch.Tensor]): Dict of latents tensors.
mean (bool, optional): Boolean flag for returning means or samples.
Returns:
x_dict (dict of [str, torch.Tensor]): Dict of tensor of decoded
observations.
"""
if mean:
x = self.decoder.sample_mean(z_dict)
return {"x": x}
return self.decoder.sample(z_dict, return_all=False)
def sample(self, batch_n: int) -> Dict[str, Tensor]:
"""Samples observable x from sampled latent z.
Args:
batch_n (int): Batch size.
Returns:
x_dict (dict of [str, torch.Tensor]): Dict of sampled obsercation
tensor.
"""
z = self.prior_z.sample(batch_n=batch_n)
c = self.prior_c.sample(batch_n=batch_n)
x = self.decoder.sample_mean({"z": z["z"], "c": c["c"]})
return {"x": x}
def loss_func(self, x: Tensor, **kwargs) -> Dict[str, Tensor]:
"""Calculates loss given observable x.
Args:
x (torch.Tensor): Tensor of input observations.
Returns:
loss_dict (dict of [str, torch.Tensor]): Dict of calculated losses.
"""
# TODO: update capacity values per epoch
x_dict = {
"x": x,
"gamma_z": self._gamma_z_value,
"gamma_c": self._gamma_c_value,
"cap_z": self._cap_z_value,
"cap_c": self._cap_c_value,
}
# Sample h (surrogate latent variable)
x_dict = self.encoder_func.sample(x_dict)
# Cross entropy
ce_loss = self.ce.eval(x_dict).mean()
# KL for continuous latent
kl_z_loss = (
self.gamma_z * (self.kl_z - self.cap_z).abs()).eval(x_dict).mean()
# KL for discrete latent
kl_c_loss = (
self.gamma_c * (self.kl_c - self.cap_c).abs()).eval(x_dict).mean()
loss = ce_loss + kl_z_loss + kl_c_loss
loss_dict = {"loss": loss, "ce_loss": ce_loss, "kl_z_loss": kl_z_loss,
"kl_c_loss": kl_c_loss}
return loss_dict
@property
def loss_str(self):
return str((self.ce + self.gamma_z * (self.kl_z - self.cap_z).abs()
+ self.gamma_c * (self.kl_c - self.cap_c).abs()
).expectation(self.encoder_func))
|
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 20 10:58:17 2021
@author: JohnZhong
"""
import random
def generate_ramdon_list(n):
L = []
i = 1
while i<=n:
L.append(random.randint(1,n))
i += 1
return L
def bubble_sort(L):
n = len(L)
if n == 1 or n==0:
return L
else:
while n >= 2:
j = 0
while j < n-1:
if L[j] > L[j+1]:
E = L[j+1]
L[j+1] = L[j]
L[j] = E
else:
pass
j += 1
n -= 1
return L
print(bubble_sort(generate_ramdon_list(100))) |
from anki.hooks import addHook
from PyQt5.QtWidgets import *
from .mindmap_creator_dialog import show as show_dialog
def main():
addHook('browser.setupMenus', lambda self: setup_menu(self))
def setup_menu(self):
# self is an aqt.browser.Browser instance
self.menuTags = QMenu("Mindmap")
self.menuBar().insertMenu(self.mw.form.menuTools.menuAction(), self.menuTags)
menu = self.menuTags
a = menu.addAction("Create a Mindmap")
a.triggered.connect(lambda _: show_dialog())
main()
|
import os
import logging
import configparser
import shutil
import subprocess
import re
import time
def config_logger(workspace):
logger = logging.getLogger('root')
logger.setLevel(logging.INFO)
fmt = logging.Formatter('[%(asctime)s]%(filename)s:%(lineno)d:[%(levelname)s]: %(message)s')
file_handler = logging.FileHandler(os.path.join(workspace, 'test.log'), mode='a')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(fmt)
stream_handler.setLevel(logging.INFO)
file_handler.setFormatter(fmt)
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
return logger
class Subprocess(object):
# A wrapper of subprocess.Popen
@staticmethod
def run(cmd, logger):
proc = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
start_time = time.time()
stdout, stderr = proc.communicate(timeout=7200)
stdout, stderr = stdout.decode(), stderr.decode()
runtime = int((time.time() - start_time) * 1000000) # us
except subprocess.TimeoutExpired:
# kill subprocess with it's desendants
ptree = subprocess.Popen(f'pstree -p {proc.pid}', shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()[0].decode()
for m in re.finditer(r'\d+', ptree):
cpid = int(m.group())
try:
os.kill(cpid, 9)
except:
pass
logger.error('Failed to run cmd "{}" with timeout'.format(cmd))
stdout, stderr, runtime = '', '', 1
if stderr:
logger.error('Failed to run cmd "{}"'.format(cmd))
logger.critical(stderr)
else:
logger.debug(stdout)
return stdout, stderr, runtime
class Case(object):
def __init__(self, source, workspace):
self.source = os.path.abspath(source)
self.workspace = os.path.abspath(workspace)
self.case_name = os.path.split(os.path.splitext(self.source)[0])[1]
self.case_dir = os.path.join(self.workspace, self.case_name)
os.makedirs(self.case_dir, exist_ok=True)
shutil.copy(self.source, self.case_dir)
self.source = os.path.join(self.case_dir, os.path.basename(self.source))
# logger
logfile = os.path.join(self.workspace, self.case_name, self.case_name+'.log')
self.logger = logging.getLogger('root.'+self.case_name)
self.logger.setLevel(logging.DEBUG)
fmt = logging.Formatter('[%(asctime)s]%(filename)s:%(lineno)d:[%(levelname)s]: %(message)s')
file_handler = logging.FileHandler(logfile, mode='a')
file_handler.setFormatter(fmt)
self.logger.addHandler(file_handler)
self.logger.info('run case "{}"'.format(self.case_name))
# files
#self.lut_lib = os.path.abspath(config.get('global', 'lut_lib'))
#self.abc = os.path.abspath(config.get('global', 'bin') + '/abc')
self.abc = 'abc'
self.ifpga = 'ifpga'
self.yosys = 'yosys'
def run_abc(self):
self.__run_abc_opt()
return self.__run_abc_mapping()
def __run_abc_opt(self):
# abc flow
fname = os.path.splitext(self.source)[0]
self.opt_aig = fname + ".opt.aig"
if config.getboolean('global', 'quick_run'):
self.logger.info("skip ABC opt...")
return
self.logger.info("run ABC opt...")
# abc cmd
opt_cmd_resyn = config.get('global', 'opt_cmd_resyn')
opt_cmd_resyn = opt_cmd_resyn.strip('"')
opt_cmd_resyn2 = config.get('global', 'opt_cmd_resyn2')
opt_cmd_resyn2 = opt_cmd_resyn2.strip('"')
abc_cmd = (f'read {self.source}; '
f'{opt_cmd_resyn}; '
f'{opt_cmd_resyn2}; '
f'{opt_cmd_resyn}; '
f'{opt_cmd_resyn2}; '
f'{opt_cmd_resyn}; '
f'{opt_cmd_resyn2}; '
f'{opt_cmd_resyn}; '
f'{opt_cmd_resyn2}; '
f'{opt_cmd_resyn}; '
f'{opt_cmd_resyn2}; '
f'{opt_cmd_resyn}; '
f'{opt_cmd_resyn2}; '
f'{opt_cmd_resyn}; '
f'{opt_cmd_resyn2}; '
f'{opt_cmd_resyn}; '
f'{opt_cmd_resyn2}; '
f'{opt_cmd_resyn}; '
f'{opt_cmd_resyn2}; '
f'{opt_cmd_resyn}; '
f'{opt_cmd_resyn2}; '
f'write_aiger -s {self.opt_aig}; '
)
# run abc
cmd = '{} -c "{}"'.format(self.abc, abc_cmd)
stdout, _, _ = Subprocess.run(cmd, self.logger)
return stdout
def __run_abc_mapping(self):
fname = os.path.splitext(self.source)[0]
self.abc_out = fname + ".abc.v"
lut_input = config.getint('global', 'K', fallback=6)
lut_lib = os.path.abspath(config.get('global', 'lut_lib'))
self.logger.info("run abc mapping...")
abc_cmd = (f'read {self.opt_aig}; '
f'read_lut {lut_lib}; '
#f'balance; rewrite; rewrite -z; balance;'
f'if -v -K {lut_input}; sweep;'
f'write_verilog {self.abc_out}; '
f'print_gates; print_level')
# run abc
cmd = '{} -c "{}"'.format(self.abc, abc_cmd)
stdout, _, runtime = Subprocess.run(cmd, self.logger)
return self.__extract_abc_report(stdout) + [runtime]
def __extract_abc_report(self, rpt):
lut_nums, area = self.__lut_statistics()
level_pat = r'Level\s*=\s*(\d+)'
level = 0
for m in re.finditer(level_pat, rpt):
level = max(int(m.group(1)), level)
self.logger.info('area: {} level: {}'.format(area, level))
mem_pat = r'Peak memory: (\d+) bytes'
m = re.search(mem_pat, rpt)
memory = m.group(1)
time_pat = r'Total time =\s*([\d\.]+)'
m = re.search(time_pat, rpt)
runtime = m.group(1)
return [lut_nums, str(area), str(level), memory, runtime]
def __lut_statistics(self):
with open(self.abc_out) as f:
lines = f.readlines()
lut_input = config.getint('global', 'K', fallback=6)
lut_nums = [0] * lut_input
simple_id = r'[a-zA-Z_][\w\$]*'
escaped_id = r'\\.+?\s'
identifier = re.compile(simple_id + '|' + escaped_id)
for line in lines:
if 'assign' not in line: continue
# remove const
if "1'b" in line: continue
expr = line.split('=')[-1]
inputs = re.findall(identifier, expr)
input_num = len(set(inputs))
# remove buffer
if input_num == 1 and '~' not in expr: continue
lut_nums[input_num - 1] += 1
area = sum(lut_nums)
if lut_input == 7:
area += lut_nums[-1]
return [[str(i) for i in lut_nums], area]
def run_ifpga(self):
#
fname = os.path.splitext(self.source)[0]
self.ifpga_out = fname + ".ifpga.v"
self.ifpga_lut = fname + ".ifpga.lut.v"
self.ifpga_config = config.get('global', 'config_file')
self.logger.info("run iFPGA flow...")
cmd = f'ifpga -i "{self.opt_aig}" -c "{self.ifpga_config}" -v "{self.ifpga_out}"'
stdout, _, runtime = Subprocess.run(cmd, self.logger)
return self.__extract_ifpga_report(stdout) + [runtime]
def __extract_ifpga_report(self, rpt):
lut_input = config.getint('global', 'K', fallback=6)
lut_nums = [0] * lut_input
for i in range(lut_input):
lut_pat = r'LUT fanins:{}\s*numbers\s*:(\d+)'.format(i + 1)
lut_m = re.search(lut_pat, rpt)
if lut_m:
lut_nums[i] = int(lut_m.group(1))
level_pat = r'max delay\s*:\s*(\d+)'
m = re.search(level_pat, rpt)
if m:
level = m.group(1)
else:
self.logger.error("Failed to run ifpga")
return '0', '0'
area = sum(lut_nums)
if lut_input == 7:
area += lut_nums[-1]
self.logger.info('area: {} level: {}'.format(area, level))
mem_pat = r'Peak memory: (\d+) bytes'
m = re.search(mem_pat, rpt)
mem = m.group(1)
time_pat = r'Mapping time: ([\d\.\-e]+)'
m = re.search(time_pat, rpt)
runtime = m.group(1)
return [[str(i) for i in lut_nums], str(area), level, mem, runtime]
def main(config_file):
case_dir = config.get('global', 'cases')
workspace = config.get('global', 'workspace')
os.makedirs(workspace, exist_ok=True)
shutil.copy(config_file, workspace)
logger = config_logger(workspace)
results = []
worse_cases = []
better_cases = []
for root, _, files in os.walk(case_dir):
for f in files:
if not f.endswith('.aig'):
continue
source = os.path.join(root, f)
case = Case(source, workspace)
result = [case.case_name]
#run abc
lut_num, area, level, memory, maptime, runtime = case.run_abc()
if area == '0': continue
result.extend(lut_num + [area, level, maptime, str(runtime), str(memory)])
# run ifpga
lut_num, gate, delay, peak, imaptime, time = case.run_ifpga()
#peak = f'{float(peak) / 1024 /1024:.2f}' # to MB
result.extend(lut_num + [gate, delay, imaptime, str(time), str(peak)])
time_score = float(runtime) / float(time) * 100
maptime_score = float(maptime) / float(imaptime) * 100
memory_score = float(memory) / float(peak) * 100
result.append(f'{maptime_score:.2f}')
result.append(f'{time_score:.2f}')
result.append(f'{memory_score:.2f}')
# score = abc_area / pcl_area * 100 * 0.4 + abc_level / pcl_level * 100 * 0.6
score = 30.0 * int(area) / int(gate) + 70.0 * int(level) / int(delay)
# score2 = (abc_area * abc_level) / (pcl_area * pcl_level)
score2 = 100 * int(area) * int(level) / (int(gate) * int(delay))
result.append(f'{score:.2f}')
result.append(f'{score2:.2f}')
results.append(result)
if int(gate) > int(area) and int(delay) > int(level):
worse_cases.append(case.case_name)
elif int(gate) <= int(area) and int(delay) <= int(level):
better_cases.append(case.case_name)
lut_input = config.getint('global', 'K')
header = ['case_name']
header.extend([f'abc_lut{i+1}' for i in range(lut_input)])
header.extend(['abc_area', 'abc_level', 'abc_maptime', 'abc_time', 'abc_mem'])
header.extend([f'ifpga_lut{i+1}' for i in range(lut_input)])
header.extend(['ifpga_area', 'ifpga_level', 'ifpga_maptime','ifpga_time', 'ifpga_mem'])
header.extend(['maptime_score', 'time_score', 'mem_score', 'score', 'score2'])
#header = ['case_name', 'abc_area', 'abc_level', 'ifpga_area', 'ifpga_level']
results.sort(key=lambda x: float(x[-2]))
# score
# < 1000
small_cases = [i for i in results if int(i[5]) < 1000]
small_memory_score = sum([float(i[-3]) for i in small_cases]) / len(small_cases)
small_time_score = sum([float(i[-4]) for i in small_cases]) / len(small_cases)
valid_small_cases = [float(i[-5]) for i in small_cases if float(i[-5]) > 0]
small_time2_score = sum(valid_small_cases) / len(valid_small_cases)
# 1000 ~ 10000
med_cases = [i for i in results if int(i[5]) >= 1000 and int(i[5]) <= 10000]
med_memory_score = sum([float(i[-3]) for i in med_cases]) / len(med_cases)
med_time_score = sum([float(i[-4]) for i in med_cases]) / len(med_cases)
valid_med_cases = [float(i[-5]) for i in med_cases if float(i[-5]) > 0]
med_time2_score = sum(valid_med_cases) / len(valid_med_cases)
# > 10000
large_cases = [i for i in results if int(i[5]) > 10000]
large_memory_score = sum([float(i[-3]) for i in large_cases]) / len(large_cases)
large_time_score = sum([float(i[-4]) for i in large_cases]) / len(large_cases)
valid_large_cases = [float(i[-5]) for i in large_cases if float(i[-5]) > 0]
large_time2_score = sum(valid_large_cases) / len(valid_large_cases)
score = sum([float(i[-2]) for i in results]) / len(results)
score2 = sum([float(i[-1]) for i in results]) / len(results)
results.insert(0, header)
max_list = [len(max(i, key=len)) for i in zip(*results)]
format_str = ' '.join(['{{:<{}}}'.format(i) for i in max_list])
result_str = '\n'.join([format_str.format(*i) for i in results])
logger.info('case results:\n' + result_str)
logger.info(f'worse cases: {", ".join(worse_cases)}')
logger.info(f'worse rate: {len(worse_cases) / (len(results) - 1)}')
logger.info(f'better cases: {", ".join(better_cases)}')
logger.info(f'better rate: {len(better_cases) / (len(results) - 1)}')
logger.info(f'small maptime score: {small_time2_score:.2f}')
logger.info(f'medium maptime score: {med_time2_score:.2f}')
logger.info(f'large maptime score: {large_time2_score:.2f}')
logger.info(f'small time score: {small_time_score:.2f}')
logger.info(f'medium time score: {med_time_score:.2f}')
logger.info(f'large time score: {large_time_score:.2f}')
logger.info(f'small memory score: {small_memory_score:.2f}')
logger.info(f'medium memory score: {med_memory_score:.2f}')
logger.info(f'large memory score: {large_memory_score:.2f}')
logger.info(f'QoR(weight) score: {score:.2f}')
logger.info(f'QoR(product) score: {score2:.2f}')
# write to .csv
csv_file = os.path.join(workspace, 'test.csv')
with open(csv_file, 'w') as f:
f.write('\n'.join([','.join(l) for l in results]))
f.write('\n'+ ' '.join([f'{small_time2_score}', f'{med_time2_score}', f'{large_time_score}',
f'{small_time_score}', f'{med_time_score}',
f'{large_time_score:.2f}', f'{small_memory_score:.2f}',
f'{med_memory_score:.2f}', f'{large_memory_score:.2f}',
f'{score:.2f}', f'{score2:.2f}']))
logger.info(f'Generating result file: {csv_file}')
logger.info(f"Finished to run all cases, see '{workspace}' for details.")
if __name__ == '__main__':
import sys
config = configparser.ConfigParser()
config.read(sys.argv[1])
main(sys.argv[1])
|
# -*- coding: utf-8 -*-
# for localized messages
from __future__ import print_function
from __future__ import absolute_import
from .__init__ import _
import Components.Task
from enigma import eTimer
from Components.config import config
from Plugins.Plugin import PluginDescriptor
from .NetworkBrowser import NetworkBrowser
from Components.Network import iNetwork
from .MountManager import AutoMountManager
from .AutoMount import iAutoMount
plugin_path = ""
mountagaincheckpoller = None
class MountAgainCheckPoller:
def __init__(self, session):
self.session = session
self.timer = eTimer()
self.timer.callback.append(self.onTimer)
self.timer.startLongTimer(0)
def onTimer(self):
self.timer.stop()
if config.networkbrowser.automountpoll.value:
self.mountagaincheck()
else:
self.timer.startLongTimer(30 * 60)
def mountagaincheck(self):
Components.Task.job_manager.AddJob(self.createCheckJob())
def createCheckJob(self):
job = Components.Task.Job(_("Network Browser"))
isPlaying = ""
try:
service = self.session.nav.getCurrentlyPlayingServiceReference()
isPlaying = service.toString()
if not self.session.nav.RecordTimer.isRecording() and not isPlaying.startswith('1:0:0:0:0:0:0:0:0:0:'):
print('[Networkbrowser MountAgain] Mounting network shares...')
task = Components.Task.PythonTask(job, _("Mounting network shares..."))
task.work = self.JobEpgCache
task.weighting = 1
elif self.session.nav.RecordTimer.isRecording():
print('[Networkbrowser MountAgain] Skipping, as recording is in place.')
elif isPlaying.startswith('1:0:0:0:0:0:0:0:0:0:'):
print('[Networkbrowser MountAgain] Skipping, as watching a movie file is in place.')
except:
pass
task = Components.Task.PythonTask(job, _("Adding schedule..."))
task.work = self.JobSched
task.weighting = 1
return job
def JobEpgCache(self):
print('[Networkbrowser MountAgain] mounting network shares.')
iAutoMount.getAutoMountPoints()
def JobSched(self):
self.timer.startLongTimer(int(config.networkbrowser.automountpolltimer.value) * 3600)
def autostart(reason, session=None, **kwargs):
global mountagaincheckpoller
global _session
if reason == 0:
if session is not None:
_session = session
if mountagaincheckpoller is None:
mountagaincheckpoller = MountAgainCheckPoller(session)
# session.nav.RecordTimer.isRecording()
def NetworkBrowserMain(session, iface=None, **kwargs):
session.open(NetworkBrowser, iface, plugin_path)
def MountManagerMain(session, iface=None, **kwargs):
session.open(AutoMountManager, iface, plugin_path)
def NetworkBrowserCallFunction(iface):
return NetworkBrowserMain
def MountManagerCallFunction(iface):
return MountManagerMain
def RemountMain(session, iface=None, **kwargs):
from .AutoMount import iAutoMount
iAutoMount.getAutoMountPoints()
def RemountCallFunction(iface):
return RemountMain
def SchedMount(session, **kwargs):
session.open(MountAgainCheck)
def Plugins(path, **kwargs):
global plugin_path
plugin_path = path
return [
PluginDescriptor(where=[PluginDescriptor.WHERE_AUTOSTART, PluginDescriptor.WHERE_SESSIONSTART], fnc=autostart),
PluginDescriptor(name=_("Network Browser"), description=_("Search for network shares"), where=PluginDescriptor.WHERE_NETWORKMOUNTS, fnc={"ifaceSupported": NetworkBrowserCallFunction, "menuEntryName": lambda x: _("Network Browser"), "menuEntryDescription": lambda x: _("Search for network shares...")}),
PluginDescriptor(name=_("Mount Manager"), description=_("Manage network shares"), where=PluginDescriptor.WHERE_NETWORKMOUNTS, fnc={"ifaceSupported": MountManagerCallFunction, "menuEntryName": lambda x: _("Mount Manager"), "menuEntryDescription": lambda x: _("Manage your network shares...")}),
PluginDescriptor(name=_("Mount Again"), description=_("Attempt to mount shares again"), where=PluginDescriptor.WHERE_NETWORKMOUNTS, fnc={"ifaceSupported": RemountCallFunction, "menuEntryName": lambda x: _("Mount again"), "menuEntryDescription": lambda x: _("Attempt to recover lost mounts (in background)")})
]
|
import numpy as np
import pandas as pd
from shapely.geometry import Point
from soli.utilities.geometry.geometry import rotateAroundOrigin
from soli.utilities.geometry.polygon import Polygon
from soli.utilities.geometry.rectangle import Rectangle
from soli.utilities.math.general import getGoldenestFactor, getDivisorsAndRemainders
# from utilities.geometry.geometry import rotateAroundOrigin
# from utilities.geometry.rectangle import Rectangle
# from utilities.geometry.polygon import Polygon
def squarePacking(radius, rectangle):
numberOfColumns = int(rectangle.width / (radius * 2))
numberOfRows = int(rectangle.length / (radius * 2))
totalWidth = numberOfColumns * 2 * radius
totalLength = numberOfRows * 2 * radius
lengthAdjustment = (rectangle.length - totalLength) / 2
widthAdjustment = (rectangle.width - totalWidth) / 2
coordinates = list()
for row in range(numberOfRows):
for column in range(numberOfColumns):
coordinate = ((2 * row * radius) + radius + lengthAdjustment,
(2 * column * radius) + radius + widthAdjustment)
coordinates.append(coordinate)
return np.asarray(coordinates)
def trianglePacking(radius, rectangle):
numberOfCropsInLongRow = int(rectangle.length / (2 * radius))
numberOfCropsInShortRow = int((rectangle.length - radius) / (2 * radius))
totalLength = radius * (numberOfCropsInLongRow + numberOfCropsInShortRow + 1)
lengthAdjustment = (rectangle.length - totalLength) / 2
capCoefficient = 2 - np.sqrt(3)
cap = radius * capCoefficient
widthOfOneRow = radius * np.sqrt(3)
numberOfRows = (rectangle.width - cap) // widthOfOneRow
numberOfShortRows = int(numberOfRows // 2)
numberOfLongRows = int(numberOfRows - numberOfShortRows)
totalWidth = cap + numberOfRows * widthOfOneRow
widthAdjustment = (rectangle.width - totalWidth) / 2
coordinates = list()
for row in range(numberOfLongRows):
for column in range(numberOfCropsInLongRow):
coordinate = (
(2 * column * radius) + radius + lengthAdjustment,
(2 * widthOfOneRow * row) + radius + widthAdjustment
)
coordinates.append(coordinate)
for row in range(numberOfShortRows):
for column in range(numberOfCropsInShortRow):
coordinate = (
(2 * column * radius) + 2 * radius + lengthAdjustment,
(2 * widthOfOneRow * row) + widthOfOneRow + radius + widthAdjustment
)
coordinates.append(coordinate)
coordinates = np.asarray(coordinates)
return coordinates
def packWithBestPackingScheme(radius, rectangle):
square = squarePacking(radius, rectangle)
triangle = trianglePacking(radius, rectangle)
positions = square if len(square) >= len(triangle) else triangle
return positions
def getAbsolutePositions(relativePositions, rectangle):
if relativePositions.size == 0:
return relativePositions
if rectangle.isFallenOver:
relativePositions = np.flip(relativePositions, 1)
absolutePositions = rotateAroundOrigin(rectangle.baseAngle, relativePositions)
absolutePositions = np.add(absolutePositions, rectangle.lowestPoint)
return absolutePositions
def removePositionsOutsideBounds(positions, bounds):
positionsOutsideBounds = []
shape = Polygon(bounds)
for position in positions:
if not shape.contains(Point(position)):
positionsOutsideBounds.append(position)
return positionsOutsideBounds
def packArea(radius, plot):
smallestBoundingRectangle = Rectangle(plot.smallestBoundingRectangleCoordinates)
relativeCropPositions = packWithBestPackingScheme(radius, smallestBoundingRectangle)
absoluteCropPositions = getAbsolutePositions(relativeCropPositions, smallestBoundingRectangle)
return absoluteCropPositions
def isPlotTooLong(squarestFactor):
print(squarestFactor[0] / squarestFactor[1])
return
def findAreaToPack(crop, numberOfCrops):
factors = getFactorPairs(numberOfCrops)
nextLargestNumberOfCrops = numberOfCrops + 1
if len(factors) == 0:
return
while len(factors) == 1:
factors = getFactorPairs(nextLargestNumberOfCrops)
nextLargestNumberOfCrops += 1
squarestFactor = factors[-1]
goldenestFactor = getGoldenestFactor(factors)
print(goldenestFactor)
## 1. find square packing arrangement with least area
#### this will be the arrangement(s) with the fewest number of wasted spaces
##
## 2. find triangle packing arrangement with the least area
#### this will be the arrangement(s) with the fewest number of wasted spaces
#### AND the "squarest" arrangement
##
## 3. compare the two arrangements and choose the one with the lowest area
## 4. fill in wasted space(s) and compute new crop totals
## Restrictions:
## The width cannot exceed 4ft
garden = [10, 20]
def getPerimeter(squarePacking):
return 2 * squarePacking.rows + 2 * squarePacking.cols
def getSquarePackedArea(plots, radius, walkwayWidth):
areaOfOneCrop = (radius * 2) ** 2
return plots.rows * (plots.cols + (
plots.remainder > 0)) * areaOfOneCrop + walkwayWidth * plots.perimeter + 4 * walkwayWidth ** 2
def getSquarePackingAreaData(numberOfCrops, radius):
squarePacking = pd.DataFrame(
getDivisorsAndRemainders(numberOfCrops),
columns=['rows', 'cols', 'remainder'])
squarePacking[['length', 'width']] = squarePacking[['rows', 'cols']] * radius * 2
squarePacking['perimeter'] = getPerimeter(squarePacking)
squarePacking['area'] = getSquarePackedArea(squarePacking, radius, 1)
return squarePacking.loc[squarePacking.area.idxmin()][['length', 'width', 'perimeter', 'area']]
def getHalfFactors(x):
halfFactors = []
for i in np.arange(1.5, int(np.sqrt(x)) + 1, 0.5):
longRow = int(i)
if i % 1 == 0:
shortRow = longRow - 1
else:
shortRow = longRow
columns = 0
numberOfCropsLeft = x
while numberOfCropsLeft > 0:
if columns % 2 == 0:
numberOfCropsLeft -= longRow
else:
numberOfCropsLeft -= shortRow
columns += 1
remainder = abs(numberOfCropsLeft)
halfFactors.append((longRow, shortRow, columns, remainder))
return np.array(halfFactors)
def getTrianglePackingAreaData(numberOfCrops, radius):
trianglePacking = pd.DataFrame(
getHalfFactors(numberOfCrops),
columns=["longRow", "shortRow", "cols", "remainder"])
widthOfOneRow = radius * np.sqrt(3)
capCoefficient = 2 - np.sqrt(3)
cap = radius * capCoefficient
trianglePacking["length"] = radius * 2 * trianglePacking.longRow + (
1 - (trianglePacking.longRow - trianglePacking.shortRow)) * radius
trianglePacking["width"] = (widthOfOneRow * trianglePacking.cols) + cap
return trianglePacking
def getSmallestArea(numberOfCrops, radius):
# squareAreas = getSquarePackingAreaData(numberOfCrops, radius)
triangleAreas = getTrianglePackingAreaData(numberOfCrops, radius)
return triangleAreas
# for x in range(1, 21):
# print(x)
# print(getSmallestArea(x, 0.5))
# print()
print(getSmallestArea(20, 0.5))
|
from __future__ import unicode_literals
import re
from decimal import Decimal
import ply.yacc as yacc
from .ast import Comparison, Const, Expression, List, Logical, Name
from .compat import binary_type, text_type
from .exceptions import DjangoQLParserError
from .lexer import DjangoQLLexer
unescape_pattern = re.compile(
'(' + DjangoQLLexer.re_escaped_char + '|' +
DjangoQLLexer.re_escaped_unicode + ')',
)
def unescape_repl(m):
contents = m.group(1)
if len(contents) == 2:
return contents[1]
else:
return contents.encode('utf8').decode('unicode_escape')
def unescape(value):
if isinstance(value, binary_type):
value = value.decode('utf8')
return re.sub(unescape_pattern, unescape_repl, value)
class DjangoQLParser(object):
def __init__(self, debug=False, **kwargs):
self.default_lexer = DjangoQLLexer()
self.tokens = self.default_lexer.tokens
kwargs['debug'] = debug
if 'write_tables' not in kwargs:
kwargs['write_tables'] = False
self.yacc = yacc.yacc(module=self, **kwargs)
def parse(self, input=None, lexer=None, **kwargs): # noqa: A002
lexer = lexer or self.default_lexer
return self.yacc.parse(input=input, lexer=lexer, **kwargs)
start = 'expression'
def p_expression_parens(self, p):
"""
expression : PAREN_L expression PAREN_R
"""
p[0] = p[2]
def p_expression_logical(self, p):
"""
expression : expression logical expression
"""
p[0] = Expression(left=p[1], operator=p[2], right=p[3])
def p_expression_comparison(self, p):
"""
expression : name comparison_number number
| name comparison_string string
| name comparison_equality boolean_value
| name comparison_equality none
| name comparison_in_list const_list_value
"""
p[0] = Expression(left=p[1], operator=p[2], right=p[3])
def p_name(self, p):
"""
name : NAME
"""
p[0] = Name(parts=p[1].split('.'))
def p_logical(self, p):
"""
logical : AND
| OR
"""
p[0] = Logical(operator=p[1])
def p_comparison_number(self, p):
"""
comparison_number : comparison_equality
| comparison_greater_less
"""
p[0] = p[1]
def p_comparison_string(self, p):
"""
comparison_string : comparison_equality
| comparison_greater_less
| comparison_contains
| comparison_regex
"""
p[0] = p[1]
def p_comparison_equality(self, p):
"""
comparison_equality : EQUALS
| NOT_EQUALS
"""
p[0] = Comparison(operator=p[1])
def p_comparison_greater_less(self, p):
"""
comparison_greater_less : GREATER
| GREATER_EQUAL
| LESS
| LESS_EQUAL
"""
p[0] = Comparison(operator=p[1])
def p_comparison_contains(self, p):
"""
comparison_contains : CONTAINS
| NOT_CONTAINS
"""
p[0] = Comparison(operator=p[1])
def p_comparison_regex(self, p):
"""
comparison_regex : REGEX
"""
p[0] = Comparison(operator=p[1])
def p_comparison_in_list(self, p):
"""
comparison_in_list : IN
| NOT IN
"""
if len(p) == 2:
p[0] = Comparison(operator=p[1])
else:
p[0] = Comparison(operator='%s %s' % (p[1], p[2]))
def p_const_value(self, p):
"""
const_value : number
| string
| none
| boolean_value
"""
p[0] = p[1]
def p_number_int(self, p):
"""
number : INT_VALUE
"""
p[0] = Const(value=int(p[1]))
def p_number_float(self, p):
"""
number : FLOAT_VALUE
"""
p[0] = Const(value=Decimal(p[1]))
def p_string(self, p):
"""
string : STRING_VALUE
"""
p[0] = Const(value=unescape(p[1]))
def p_none(self, p):
"""
none : NONE
"""
p[0] = Const(value=None)
def p_boolean_value(self, p):
"""
boolean_value : true
| false
"""
p[0] = p[1]
def p_true(self, p):
"""
true : TRUE
"""
p[0] = Const(value=True)
def p_false(self, p):
"""
false : FALSE
"""
p[0] = Const(value=False)
def p_const_list_value(self, p):
"""
const_list_value : PAREN_L const_value_list PAREN_R
"""
p[0] = List(items=p[2])
def p_const_value_list(self, p):
"""
const_value_list : const_value_list COMMA const_value
"""
p[0] = p[1] + [p[3]]
def p_const_value_list_single(self, p):
"""
const_value_list : const_value
"""
p[0] = [p[1]]
def p_error(self, token):
if token is None:
self.raise_syntax_error('Unexpected end of input')
else:
fragment = text_type(token.value)
if len(fragment) > 20:
fragment = fragment[:17] + '...'
self.raise_syntax_error(
'Syntax error at %s' % repr(fragment),
token=token,
)
def raise_syntax_error(self, message, token=None):
if token is None:
raise DjangoQLParserError(message)
lexer = token.lexer
if callable(getattr(lexer, 'find_column', None)):
column = lexer.find_column(token)
else:
column = None
raise DjangoQLParserError(
message=message,
value=token.value,
line=token.lineno,
column=column,
)
|
from JumpScale import j
from .WatchdogClient import *
j.base.loader.makeAvailable(j, 'tools.watchdog')
j.tools.watchdog.client=WatchdogClient()
|
class Student:
def __init__(self):
self.id = 124
self.name = "Krishal"
# To mark the variables as private, include the two underscores __ at the beginning.
self.__Id = 134
self.__Name = "LOL"
# To display the private variables:
def display(self):
print(self.__Id)
print(self.__Name)
s = Student()
print(s.id)
print(s.name)
s.display()
# The below two won't work as the variables are private
# print(s.__Id)
# print(s.__Name)
# Using name mangling:
print(s._Student__Id)
# So in python when we make variables or fields, they are not completely hidden.
# To access the private field, the syntax would be:
# object._ClassName__PrivateVariable |
from logging import getLogger
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres import search as s
from django.contrib.postgres.indexes import GinIndex
from django.db import models as m
from django.dispatch import receiver
LOG = getLogger(__name__)
class SearchFeature(m.Model):
content_type = m.ForeignKey(ContentType, on_delete=m.CASCADE, null=True)
object_id = m.PositiveIntegerField(null=True, db_index=True)
content_object = GenericForeignKey("content_type", "object_id")
text_feature = m.TextField()
search_vector = s.SearchVectorField()
class Meta:
unique_together = index_together = ["object_id", "content_type"]
indexes = [GinIndex(fields=["search_vector"])]
@receiver(m.signals.post_save, sender=SearchFeature)
def auto_update_search_vector(sender, instance, *args, **kwargs):
"""Keep the index up-to-date automatically"""
LOG.debug(f"{sender=} {instance=}")
sender.objects.filter(pk=instance.pk).update(
# use jiebaqry to maximize number of terms
search_vector=s.SearchVector("text_feature", config="jiebaqry")
)
|
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import RandomUtils
import StateTransition
from Enums import EStateElementType, EStateTransitionOrderMode
from State import State
import state_transition_test_utils as utils
from base.Sequence import Sequence
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
# This test verifies that multiple StateTransitions can be executed within a
# single test with interleaved instructions.
class MainSequence(Sequence):
def __init__(self, aGenThread, aName=None):
super().__init__(aGenThread, aName)
self._mExpectedStateData = {}
def generate(self, **kargs):
state_a = self._createStateA()
StateTransition.transitionToState(state_a, EStateTransitionOrderMode.ByPriority)
utils.verify_state(self, self._mExpectedStateData)
if self.getGlobalState("AppRegisterWidth") == 32:
instructions = (
"ADDI##RISCV",
"ADD##RISCV",
"LUI##RISCV",
"SLLI#RV32I#RISCV",
"SRLI#RV32I#RISCV",
)
else:
instructions = (
"ADDI##RISCV",
"ADDW##RISCV",
"LUI##RISCV",
"SLLI#RV64I#RISCV",
"SRLI#RV64I#RISCV",
)
for _ in range(RandomUtils.random32(200, 500)):
self.genInstruction(self.choice(instructions))
state_b = self._createStateB()
StateTransition.transitionToState(state_b)
utils.verify_state(self, self._mExpectedStateData)
for _ in range(RandomUtils.random32(200, 500)):
if self.getGlobalState("AppRegisterWidth") == 32:
self.genInstruction("SW##RISCV")
else:
self.genInstruction("SD##RISCV")
state_c = self._createStateC()
StateTransition.transitionToState(state_c)
utils.verify_state(self, self._mExpectedStateData)
for _ in range(RandomUtils.random32(200, 500)):
self.genInstruction("FMUL.D##RISCV")
# Create a State in M privilege level configured to trigger a timer
# interrupt.
def _createStateA(self):
state = State()
self._mExpectedStateData = {}
state.addPrivilegeLevelStateElementByName("M")
self._mExpectedStateData[EStateElementType.PrivilegeLevel] = 3
return state
# Create a State in S privilege level with the Sv39 virtual memory system
# enabled. Half of the GPRs are specified with a value of -1.
def _createStateB(self):
state = State()
self._mExpectedStateData = {}
state.addPrivilegeLevelStateElementByName("S")
self._mExpectedStateData[EStateElementType.PrivilegeLevel] = 1
expected_gpr_state_data = []
for gpr_index in range(1, 32, 2):
gpr_name = "x%d" % gpr_index
gpr_val = (
0xFFFFFFFF if self.getGlobalState("AppRegisterWidth") == 32 else 0xFFFFFFFFFFFFFFFF
)
state.addRegisterStateElement(gpr_name, (gpr_val,))
expected_gpr_state_data.append((gpr_name, gpr_val))
self._mExpectedStateData[EStateElementType.GPR] = expected_gpr_state_data
return state
# Create a State with the rounding mode set to round towards 0 and all of
# the floating point registers with values in the interval [0, 1.0).
def _createStateC(self):
state = State()
self._mExpectedStateData = {}
expected_sys_reg_state_data = []
fcsr_name = "fcsr"
state.addSystemRegisterStateElementByField(fcsr_name, "FRM", 1)
(fcsr_val, valid) = self.readRegister(fcsr_name)
utils.assert_valid_register_value(self, fcsr_name, valid)
fcsr_val = utils.combine_register_value_with_field_value(
self, fcsr_name, fcsr_val, "FRM", 1
)
expected_fp_reg_state_data = []
for fp_reg_index in range(0, 32):
fp_reg_val = RandomUtils.random64(0, 0x3FFFFFFFFFFFFFFF)
state.addRegisterStateElement(("D%d" % fp_reg_index), (fp_reg_val,))
self._mExpectedStateData[
EStateElementType.FloatingPointRegister
] = expected_fp_reg_state_data
sstatus_name = "sstatus"
fs_val = RandomUtils.random32(1, 3)
state.addSystemRegisterStateElementByField(sstatus_name, "FS", fs_val)
(sstatus_val, valid) = self.readRegister(sstatus_name)
utils.assert_valid_register_value(self, sstatus_name, valid)
sstatus_val = utils.combine_register_value_with_field_value(
self, sstatus_name, sstatus_val, "FS", fs_val
)
# Adjust expected value of SD bit according to architecture rules
(xs_val, valid) = self.readRegister(sstatus_name, field="XS")
utils.assert_valid_register_value(self, sstatus_name, valid)
(vs_val, valid) = self.readRegister(sstatus_name, field="VS")
utils.assert_valid_register_value(self, sstatus_name, valid)
if (fs_val == 3) or (xs_val == 3) or (vs_val == 3):
sstatus_val = utils.combine_register_value_with_field_value(
self, sstatus_name, sstatus_val, "SD", 1
)
else:
sstatus_val = utils.combine_register_value_with_field_value(
self, sstatus_name, sstatus_val, "SD", 0
)
expected_sys_reg_state_data.append((sstatus_name, sstatus_val))
self._mExpectedStateData[EStateElementType.SystemRegister] = expected_sys_reg_state_data
return state
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
|
"""
737. Sentence Similarity II
Medium
We can represent a sentence as an array of words, for example, the sentence "I am happy with leetcode" can be represented as arr = ["I","am",happy","with","leetcode"].
Given two sentences sentence1 and sentence2 each represented as a string array and given an array of string pairs similarPairs where similarPairs[i] = [xi, yi] indicates that the two words xi and yi are similar.
Return true if sentence1 and sentence2 are similar, or false if they are not similar.
Two sentences are similar if:
They have the same length (i.e., the same number of words)
sentence1[i] and sentence2[i] are similar.
Notice that a word is always similar to itself, also notice that the similarity relation is transitive. For example, if the words a and b are similar, and the words b and c are similar, then a and c are similar.
Example 1:
Input: sentence1 = ["great","acting","skills"], sentence2 = ["fine","drama","talent"], similarPairs = [["great","good"],["fine","good"],["drama","acting"],["skills","talent"]]
Output: true
Explanation: The two sentences have the same length and each word i of sentence1 is also similar to the corresponding word in sentence2.
Example 2:
Input: sentence1 = ["I","love","leetcode"], sentence2 = ["I","love","onepiece"], similarPairs = [["manga","onepiece"],["platform","anime"],["leetcode","platform"],["anime","manga"]]
Output: true
Explanation: "leetcode" --> "platform" --> "anime" --> "manga" --> "onepiece".
Since "leetcode is similar to "onepiece" and the first two words are the same, the two sentences are similar.
Example 3:
Input: sentence1 = ["I","love","leetcode"], sentence2 = ["I","love","onepiece"], similarPairs = [["manga","hunterXhunter"],["platform","anime"],["leetcode","platform"],["anime","manga"]]
Output: false
Explanation: "leetcode" is not similar to "onepiece".
Constraints:
1 <= sentence1.length, sentence2.length <= 1000
1 <= sentence1[i].length, sentence2[i].length <= 20
sentence1[i] and sentence2[i] consist of lower-case and upper-case English letters.
0 <= similarPairs.length <= 2000
similarPairs[i].length == 2
1 <= xi.length, yi.length <= 20
xi and yi consist of English letters.
"""
# V0
# IDEA : DFS
from collections import defaultdict
class Solution(object):
def areSentencesSimilarTwo(self, sentence1, sentence2, similarPairs):
# helper func
def dfs(w1, w2, visited):
for j in d[w2]:
if w1 == w2:
return True
elif j not in visited:
visited.add(j)
if dfs(w1, j, visited):
return True
return False
# edge case
if len(sentence1) != len(sentence2):
return False
d = defaultdict(list)
for a, b in similarPairs:
d[a].append(b)
d[b].append(a)
for i in range(len(sentence1)):
visited = set([sentence2[i]])
if sentence1[i] != sentence2[i] and not dfs(sentence1[i], sentence2[i], visited):
return False
return True
# V0'
# IDEA : BFS
class Solution:
def areSentencesSimilarTwo(self, words1, words2, pairs):
# Problem tells us this case will mean not similar.
if len(words1) != len(words2):
return False
# Create a bidirectional graph of similar words.
graph = collections.defaultdict(set)
for w1, w2 in pairs:
graph[w1].add(w2)
graph[w2].add(w1)
# Now for every w1, w2 combo, see if we can traverse the graph and go from one to the other.
for w1, w2 in zip(words1, words2):
# Use a queue for processing neighboring words.
q = collections.deque([])
q.append(w1)
# Keep track of the words we vitsit (so we dont get stuck in a cycle).
seen = set()
while q:
wrd = q.popleft()
# If the current word is our w2 we made it through the graph, on to the next w1, w2.
if wrd == w2:
break
# Otherwise keep traversing.
for nw in graph[wrd]:
if nw not in seen:
q.append(nw)
seen.add(nw)
# Python break syntax here, if we don't break, aka. we don't make it to w2
# we know there is no similarity, therefore we can end here and return False.
else:
return False
# If we work through all words we make it here, and we know they are similar.
return True
# V0'''
# IDEA : UNION FIND
class Solution(object):
def areSentencesSimilarTwo(self, words1, words2, pairs):
if len(words1) != len(words2):
return False
parent = dict()
def add(x):
if x not in parent:
parent[x] = x
def find(x):
if x == parent[x]:
return x
parent[x] = find(parent[x])
return parent[x]
def union(x, y):
parentX = find(x)
parentY = find(y)
if parentX == parentY:
return
parent[parentY] = parentX
for a, b in pairs:
add(a)
add(b)
union(a, b)
# print parent
for word1, word2 in zip(words1, words2):
# print word1, word2
if word1 == word2:
continue
if word1 not in parent or word2 not in parent:
return False
if find(word1) != find(word2):
return False
return True
# V0''''
# IDEA : DFS
# https://zxi.mytechroad.com/blog/hashtable/leetcode-737-sentence-similarity-ii/
# CONCEPT :
# -> 1) MAKE A GRAPH
# -> 2) PUT ALL word belong to "the same" "group" into the same connected components
# -> 3) GO THROUGH EVERY WORD IN words1, words2 AND CHECK IF THEY ARE IN THE SAME connected components (use DFS TO this)
# STEPS :
# -> 1) MAKE A GRAPH (w1 -> w2, w2 -> w1)
# -> 2) DFS GO THROUGH EVERY WORD IN words1, words2 AND CHECK IF THEY ARE IN THE SAME connected components
import collections
class Solution(object):
def areSentencesSimilarTwo(self, words1, words2, pairs):
if len(words1) != len(words2): return False
similars = collections.defaultdict(set)
for w1, w2 in pairs:
similars[w1].add(w2)
similars[w2].add(w1)
### NOTICE HERE : use DFS to check if 2 words is in the SAME "word cluster"
def dfs(words1, words2, visits):
for similar in similars[words2]:
if words1 == similar:
return True
elif similar not in visits:
visits.add(similar)
if dfs(words1, similar, visits):
return True
return False
for w1, w2 in zip(words1, words2):
if w1 != w2 and not dfs(w1, w2, set([w2])):
return False
return True
# V1
# IDEA : BFS
# https://leetcode.com/problems/sentence-similarity-ii/discuss/928878/Easy-%2B-Straightforward-Python-BFS-with-Explaination-and-Comments!
class Solution:
def areSentencesSimilarTwo(self, words1, words2, pairs):
# Problem tells us this case will mean not similar.
if len(words1) != len(words2):
return False
# Create a bidirectional graph of similar words.
graph = collections.defaultdict(set)
for w1, w2 in pairs:
graph[w1].add(w2)
graph[w2].add(w1)
# Now for every w1, w2 combo, see if we can traverse the graph and go from one to the other.
for w1, w2 in zip(words1, words2):
# Use a queue for processing neighboring words.
q = collections.deque([])
q.append(w1)
# Keep track of the words we vitsit (so we dont get stuck in a cycle).
seen = set()
while q:
wrd = q.popleft()
# If the current word is our w2 we made it through the graph, on to the next w1, w2.
if wrd == w2:
break
# Otherwise keep traversing.
for nw in graph[wrd]:
if nw not in seen:
q.append(nw)
seen.add(nw)
# Python break syntax here, if we don't break, aka. we don't make it to w2
# we know there is no similarity, therefore we can end here and return False.
else:
return False
# If we work through all words we make it here, and we know they are similar.
return True
# V1''
# IDEA : DICT + HASH TABLE + BFS/DFS
# http://bookshadow.com/weblog/2017/11/26/leetcode-sentence-similarity-ii/
# https://zxi.mytechroad.com/blog/hashtable/leetcode-737-sentence-similarity-ii/
import collections
class Solution(object):
def areSentencesSimilarTwo(self, words1, words2, pairs):
if len(words1) != len(words2): return False
similars = collections.defaultdict(set)
for w1, w2 in pairs:
similars[w1].add(w2)
similars[w2].add(w1)
def dfs(words1, words2, visits):
for similar in similars[words2]:
if words1 == similar:
return True
elif similar not in visits:
visits.add(similar)
if dfs(words1, similar, visits):
return True
return False
for w1, w2 in zip(words1, words2):
if w1 != w2 and not dfs(w1, w2, set([w2])):
return False
return True
### Test case : dev
# V1'''
# IDEA : DFS (queue format)
# https://leetcode.com/problems/sentence-similarity-ii/solution/
class Solution(object):
def areSentencesSimilarTwo(self, words1, words2, pairs):
if len(words1) != len(words2): return False
graph = collections.defaultdict(list)
for w1, w2 in pairs:
graph[w1].append(w2)
graph[w2].append(w1)
for w1, w2 in zip(words1, words2):
stack, seen = [w1], {w1}
while stack:
word = stack.pop()
if word == w2: break
for nei in graph[word]:
if nei not in seen:
seen.add(nei)
stack.append(nei)
else:
return False
return True
# V1''''
# IDEA : UnionFind
# https://leetcode.com/problems/sentence-similarity-ii/discuss/725681/Python-Union-Find-solution
class Solution(object):
def areSentencesSimilarTwo(self, words1, words2, pairs):
if len(words1) != len(words2):
return False
parent = dict()
def add(x):
if x not in parent:
parent[x] = x
def find(x):
if x == parent[x]:
return x
parent[x] = find(parent[x])
return parent[x]
def union(x, y):
parentX = find(x)
parentY = find(y)
if parentX == parentY:
return
parent[parentY] = parentX
for a, b in pairs:
add(a)
add(b)
union(a, b)
# print parent
for word1, word2 in zip(words1, words2):
# print word1, word2
if word1 == word2:
continue
if word1 not in parent or word2 not in parent:
return False
if find(word1) != find(word2):
return False
return True
# V1''''''
# IDEA : UnionFind
# https://leetcode.com/problems/sentence-similarity-ii/discuss/304988/Python-Solution%3A-standard-union-find
class UnionFind:
def __init__(self, capacity):
self.capacity = capacity
self.array = [i for i in range(capacity)]
self.size = [1 for i in range(capacity)]
self.maxedges = 0
def find(self, x):
path = x
while path != self.array[path]:
path = self.array[path]
while x != self.array[x]:
temp = self.array[x]
self.array[x] = path
x = temp
return path
def union(self, x, y):
rootx = self.find(x)
rooty = self.find(y)
if rootx == rooty:
return
if self.size[rootx] > self.size[rooty]:
self.size[rootx] += self.size[rooty]
self.array[rooty] = self.array[rootx]
self.maxedges = max(self.maxedges, self.size[rootx])
else:
self.size[rooty] += self.size[rootx]
self.array[rootx] = self.array[rooty]
self.maxedges = max(self.maxedges, self.size[rooty])
class Solution:
def areSentencesSimilarTwo(self, words1, words2, pairs):
uf = UnionFind(len(pairs) * 2)
word_dict = {}
counter = 0
for pair in pairs:
for word in pair:
if word not in word_dict:
word_dict[word] = counter
counter += 1
uf.union(word_dict[pair[0]], word_dict[pair[1]])
if len(words1) != len(words2):
return False
return words1 == words2 or all(w1 == w2 or w1 in word_dict and w2 in word_dict and uf.find(word_dict[w1]) == uf.find(word_dict[w2]) for w1, w2 in zip(words1, words2))
# V1''''''''
# IDEA : Union-Find
# https://leetcode.com/problems/sentence-similarity-ii/solution/
class DSU:
def __init__(self, N):
self.par = range(N)
def find(self, x):
if self.par[x] != x:
self.par[x] = self.find(self.par[x])
return self.par[x]
def union(self, x, y):
self.par[self.find(x)] = self.find(y)
class Solution(object):
def areSentencesSimilarTwo(self, words1, words2, pairs):
if len(words1) != len(words2): return False
index = {}
count = itertools.count()
dsu = DSU(2 * len(pairs))
for pair in pairs:
for p in pair:
if p not in index:
index[p] = next(count)
dsu.union(index[pair[0]], index[pair[1]])
return all(w1 == w2 or
w1 in index and w2 in index and
dsu.find(index[w1]) == dsu.find(index[w2])
for w1, w2 in zip(words1, words2))
# V1''''''''
# IDEA : UNION FIND
# https://leetcode.com/problems/sentence-similarity-ii/discuss/304988/Python-Solution%3A-standard-union-find
class Solution:
def areSentencesSimilarTwo(self, words1, words2, pairs):
def make_set(x):
if x not in parent:
parent[x], rank[x] = x, 0
def find(x):
if parent[x] != x:
parent[x] = find(parent[x])
return parent[x]
def union(x, y):
x_root = find(x)
y_root = find(y)
if x_root != y_root:
if rank[x_root] > rank[y_root]:
x_root, y_root = y_root, x_root
parent[x_root] = y_root
if rank[x_root] == rank[y_root]:
rank[y_root] += 1
if len(words1) != len(words2):
return False
parent, rank = dict(), dict()
for p1, p2 in pairs:
make_set(p1)
make_set(p2)
union(p1, p2)
for w1, w2 in zip(words1, words2):
if w1 == w2:
pass
else:
if w1 not in parent or w2 not in parent or find(w1) != find(w2):
return False
return True
# V1''''''''
# IDEA : UNION FIND
# https://leetcode.com/problems/sentence-similarity-ii/discuss/574395/Python-Union-Find
class Solution:
def areSentencesSimilarTwo(self, words1, words2, pairs):
if len(words1) != len(words2): return False
UF = {}
def find(x):
if UF[x] != x:
UF[x] = find(UF[x])
return UF[x]
def union(x, y):
UF.setdefault(x, x)
UF.setdefault(y, y)
UF[find(x)] = find(y)
for w1, w2 in pairs:
union(w1, w2)
return all(w1 == w2 or (w1 in UF and w2 in UF and find(w1) == find(w2)) for w1, w2 in zip(words1, words2))
# V1'''''''''''
# https://www.jiuzhang.com/solution/sentence-similarity-ii/#tag-highlight-lang-python
# IDEA : UNION FIND
class Solution():
def areSentencesSimilarTwo(self, words1, words2, pairs):
if len(words1) != len(words2):
return False
graph = collections.defaultdict(list)
for w1, w2 in pairs:
graph[w1].append(w2)
graph[w2].append(w1)
for w1, w2 in zip(words1, words2):
stack, seen = [w1], {w1}
while stack:
word = stack.pop()
if word == w2: break
for nei in graph[word]:
if nei not in seen:
seen.add(nei)
stack.append(nei)
else:
return False
return True
# V1'''''''''''''''
# IDEA : DFS
# https://leetcode.com/problems/sentence-similarity-ii/discuss/109755/SHORT-Python-DFS-with-explanation
class Solution(object):
def areSentencesSimilarTwo(self, words1, words2, pairs):
from collections import defaultdict
if len(words1) != len(words2): return False
words, similar_words = defaultdict(set), {}
[(words[w1].add(w2), words[w2].add(w1)) for w1, w2 in pairs]
def dfs(word, root_word):
if word in similar_words: return
similar_words[word] = root_word
[dfs(synonym, root_word) for synonym in words[word]]
[dfs(word, word) for word in words]
return all(similar_words.get(w1, w1) == similar_words.get(w2, w2) for w1, w2 in zip(words1, words2))
# V1'''''''''''''''
# IDEA : DFS
# https://leetcode.com/problems/sentence-similarity-ii/discuss/109755/SHORT-Python-DFS-with-explanation
class Solution(object):
def areSentencesSimilarTwo(self, words1, words2, pairs):
from collections import defaultdict
if len(words1) != len(words2):
return False
words = defaultdict(set)
# Build the graph from pairs.
for w1, w2 in pairs:
words[w1].add(w2)
words[w2].add(w1)
similar_words = {}
def dfs(word, root_word):
if word in similar_words:
return
similar_words[word] = root_word
[dfs(synonym, root_word) for synonym in words[word]]
# Assign root words.
[dfs(word, word) for word in words]
# Compare words.
return all(similar_words.get(w1, w1) == similar_words.get(w2, w2) for w1, w2 in zip(words1, words2))
# V1''''''''''''''
# IDEA : DFS
# https://leetcode.com/problems/sentence-similarity-ii/discuss/221015/Python-solution
class Solution:
def areSentencesSimilarTwo(self, words1, words2, pairs):
"""
:type words1: List[str]
:type words2: List[str]
:type pairs: List[List[str]]
:rtype: bool
"""
def dfs(word, i):
part[word] = i
if word in graph:
for nei in graph[word]:
if nei not in part:
dfs(nei, i)
if len(words1) != len(words2):
return False
# construct word graph using pairs. O(P) time.
n = len(words1)
graph = collections.defaultdict(set)
word_set = set()
for pair in pairs:
graph[pair[0]].add(pair[1])
graph[pair[1]].add(pair[0])
word_set.add(pair[0])
word_set.add(pair[1])
# use DFS to map each word to the connected component
# in the word graph it belongs to. O(P) time.
part = {}
count = 0
for word in word_set:
if word not in part:
dfs(word, count)
count += 1
# words in the same connected component of the word graph are similar. O(N) time.
for i in range(n):
if words1[i] != words2[i]:
if words1[i] not in part or words2[i] not in part or part[words1[i]] != part[words2[i]]:
return False
return True
# V2
# Time: O(n + p)
# Space: O(p)
# IDEA : UnionFind
import itertools
class UnionFind(object):
def __init__(self, n):
self.set = range(n)
def find_set(self, x):
if self.set[x] != x:
self.set[x] = self.find_set(self.set[x]) # path compression.
return self.set[x]
def union_set(self, x, y):
x_root, y_root = map(self.find_set, (x, y))
if x_root == y_root:
return False
self.set[min(x_root, y_root)] = max(x_root, y_root)
return True
class Solution(object):
def areSentencesSimilarTwo(self, words1, words2, pairs):
"""
:type words1: List[str]
:type words2: List[str]
:type pairs: List[List[str]]
:rtype: bool
"""
if len(words1) != len(words2): return False
lookup = {}
union_find = UnionFind(2 * len(pairs))
for pair in pairs:
for p in pair:
if p not in lookup:
lookup[p] = len(lookup)
union_find.union_set(lookup[pair[0]], lookup[pair[1]])
return all(w1 == w2 or
w1 in lookup and w2 in lookup and
union_find.find_set(lookup[w1]) == union_find.find_set(lookup[w2])
for w1, w2 in itertools.izip(words1, words2)) |
# -*- coding: utf-8 -*-
import re
from modules import constants
from modules.part3_commands import tpm2_part3_handle_process_templates
from modules.file_handling import FileHandling
from modules import utils
import settings
class HandleProcess:
"""
"""
def __init__(self):
self.file = None
self.file_path = constants.SRC_PATH + constants.TPM_PATH + "/main/HandleProcess.c"
self.code = ""
if settings.SPEC_VERSION == "01.16":
self.command = "TPM_CC commandCode, " # cf. TPM Library Specification, Part 4
self.selector = "commandCode"
self.table_driven_dispatch_ifndef = ""
self.table_driven_dispatch_endif = ""
else:
self.command = "COMMAND_INDEX commandIndex," # cf. TPM Library Specification, Part 4
self.selector = "GetCommandCode(commandIndex)"
self.table_driven_dispatch_ifndef = "\n#ifndef TABLE_DRIVEN_DISPATCH //%\n"
self.table_driven_dispatch_endif = "\n#endif //% TABLE_DRIVEN_DISPATCH\n"
# Generates inner code for HandleProcess file, using appropriate template
# Parameters:
# handle
# num
# Returns:
# generated inner code
@staticmethod
def create_inner_code(handle, num):
inner_code = ""
bool_text = ""
handle_with_false_flag = ["TPMI_DH_OBJECT",
"TPMI_RH_ENDORSEMENT",
"TPMI_DH_PCR",
"TPMI_RH_HIERARCHY",
"TPMI_DH_ENTITY"]
res = re.search('(\w+(_DH_|_RH_)\w+)([\+]*)', handle)
# plus found?
if res and res.group(3) == "+":
handle = res.group(1)
bool_text = ", TRUE"
elif res and handle in handle_with_false_flag: # see __init__
bool_text = ", FALSE"
inner_code += "\n"
inner_code += tpm2_part3_handle_process_templates.handle_process_template_inner.safe_substitute(
HANDLE=handle,
NUM=num,
NUM2=num + 1,
BOOL=bool_text)
return inner_code
# Generates outer code for HandleProcess file, using appropriate template
# Parameters:
# function_name
# num
# inner_code
# Returns:
# generated outer code
@staticmethod
def create_outer_code(function_name, num, inner_code):
func = function_name.replace("TPM2_", "")
return tpm2_part3_handle_process_templates.handle_process_template_outer.safe_substitute(
FUNC=func,
NUM_HANDLES=num,
EXTENSION=inner_code)
# Creates case handing for HandleProcess file using inner and outer code generation
# Parameters:
# funcname
# handles
def create_handle_process_case(self, funcname, handles=None):
num = 0
inner_code = ""
for handle in handles:
inner_code += self.create_inner_code(handle, num)
num += 1
self.code += self.create_outer_code(funcname, num, inner_code)
return
# Creates case handing for HandleProcess file using inner and outer code generation
# Parameters:
# funcname
# rows
def create_handle_process_case_pdf(self, funcname, rows):
num = 0
inner_code = ""
for row in rows:
if utils.is_handle(row):
handle = row[0]
inner_code += self.create_inner_code(handle, num)
num += 1
self.code += self.create_outer_code(funcname, num, inner_code)
return
# Write contents into HandleProcess file
def write(self):
self.code = tpm2_part3_handle_process_templates.handle_process_template.safe_substitute(
COMMAND=self.command,
SELECTOR=self.selector,
TABLE_DRIVEN_DISPATCH_IFNDEF=self.table_driven_dispatch_ifndef,
TABLE_DRIVEN_DISPATCH_ENDIF=self.table_driven_dispatch_endif,
CASES=self.code)
FileHandling.write_file(self.file_path, self.code)
|
from cx_Freeze import setup, Executable
import os
os.environ['TCL_LIBRARY'] = r'C:\Users\berjan\AppData\Local\Programs\Python\Python36-32\tcl\tcl8.6'
os.environ['TK_LIBRARY'] = r'C:\Users\berjan\AppData\Local\Programs\Python\Python36-32\tcl\tk8.6'
setup(name = 'KiCAD_PLE',
version = '18.0.2',
description = 'Partslist Editor',
executables = [Executable("PartsListEditor.py")])
|
# -*- coding: utf-8 -*-
__version__ = '0.1'
from flask import Flask
from app.ext import database, login
app = Flask('app')
app.config['UPLOAD_FOLDER'] = 'app/static/uploads'
app.config.from_pyfile('../config.cfg')
database.configure(app)
login.configure(app)
from app.controllers import *
|
import torch, torchvision
print(torch.__version__,torch.cuda.is_available())
device = torch.device("cuda")
torch.cuda.empty_cache()
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import random
import pandas as pd
import os
import sys
from matplotlib import pyplot as plt
import cv2
from PIL import Image
from skimage import measure
import sklearn
import pycocotools as pycoco
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
from detectron2.modeling import build_model
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.utils.visualizer import Visualizer
from detectron2.utils.visualizer import ColorMode
from detectron2.data import MetadataCatalog
from detectron2.structures import BoxMode
np.set_printoptions(threshold=sys.maxsize)
class trained_model:
def __init__(self):
self.cfg = get_cfg()
self.cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"))
self.cfg.MODEL.WEIGHTS = os.path.join(self.cfg.OUTPUT_DIR, "model_final_check.pth")
self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.60
self.cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2
self.cfg.MODEL.DEVICE = 'cpu'
self.predictor = DefaultPredictor(self.cfg)
MetadataCatalog.get("checknet_train").set(thing_classes = ["no", 'yes'])
def make_prediction(self,image):
#im = cv2.imread(d["file_name"])
outputs = self.predictor(image)
return image,outputs
def show_prediction(self,im,outputs):
plt.figure()
v = Visualizer(im[:, :, ::-1],
metadata=MetadataCatalog.get("checknet_train"),
scale=0.8,
instance_mode=ColorMode.IMAGE_BW
)
v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
pred_im = Image.fromarray(v.get_image()[:, :, ::-1])
return pred_im
def get_json_result(self,outputs):
scores = [k.item() for k in outputs["instances"].to("cpu").scores]
classes = [y.item() for y in outputs["instances"].to("cpu").pred_classes]
vertices = {'v1':[],'v2':[],'v3':[],'v4':[]}
for box in range(len(scores)):
box_vertices = [y.item() for y in [k for k in outputs["instances"].to("cpu").pred_boxes[box]][0]]
i=0
for v in vertices:
vertices[v].append(box_vertices[i])
i+=1
v_frame = pd.DataFrame(vertices)
v_frame['scores'] =scores
def bin_to_class(bina):
if bina==0:
return 'no'
else:
return 'yes'
v_frame['classes'] =list(map(bin_to_class,classes))
v_frame = v_frame.sort_values(by = ['v2','v4','v1','v3'],ascending=True).reset_index(drop=True)
del v_frame['v1'],v_frame['v2'], v_frame['v3'], v_frame['v4']
#v_frame2 =v_frame[['classes','scores']].to_dict(orient='list')
return v_frame
|
# I am currently testing annotations
### Let's start with some HAL code...
"""
2013-01-04
This was made way after SR1 and SR2
Idea: Self-parsing AST
Currently it is just a lexer, parser and generator
"""
#import os
#import copy
#import datetime
#@const mypi float 3.2
#@fun put
#@arg t String
def put(t):
print(t)
class MyClassOne:
"A unique identifier (e.g. function name, variable name, ...)"
#@var name String
#@fun __init__
#@arg newName String
def __init__(self, newName):
self.name = newName
#@var someOtherName String
someOtherName = name
#@fun test
#@arg a int
#@arg b int
#@arg c int
def test(self, a, b, c):
print((str(a) + str(b)) + str(c))
def test2(self):
self.test(7,8,9)
#@fun __repr__ String
def __repr__(self):
return str(self.name)
class TheSecond:
"Simple value (e.g. of a variable or constant expression)"
#@var data String
def __init__(self):
self.data = None # binary
#@fun __repr__ String
def __repr__(self):
return str(self.data)
class ThirdClass:
"Some class"
#@var ident MyClassOne
#@var vals TheSecond []
#@var name String
def __init__(self, name):
#@TODO: Static values?
#@TODO: Inheritance?
self.ident = MyClassOne(name) #MyClassOne name of module
self.vals = [] #TheSecond[] Global variables
#idents = [] #MyClassOne[] Global identifier database?
#self.block = None #SRBlock (or SRFunction, or self.funcs[0])
class TestClass:
#@fun someMethod
#@arg a int
#@arg b int
#@arg c int
def someMethod(self, a,b,c):
print("someMethod")
#@fun testFunc int
#@arg a1 int
#@arg a2 int
def testFunc(a1, a2):
#@var m int
m = a1 * a2
print(str(a1))
print(str(a1*6))
print(str(a1*a2*3))
return (a1 + a2)
### And now for some direct tests
put('Testing variable assignments...')
# "a" is a variable
#@var a int
a = 1
#@var b int
b = 5
# infer that
test = 0
#@var s str
s = "Hello"
if s == "Hello":
print("String is what it should be!")
# var test int
test = 3
#@var c float
c = 3+4
#@var d int
d = (a + b) * 2
#@var x int
if (d > 6) and (b <= 10):
x = 1
else:
x = 2
if (d < 13) or (b > 13):
x = 3
if d > 25:
x = 3
elif d > 15:
x = 2
elif d > 5:
x = 1
else:
x = 0
put('Testing "for"...')
#@var i int
for i in xrange(10):
print(str(i))
put('Testing function calls...')
put('Testing class instantiation...')
#@var v TheSecond
v = TheSecond()
#v.data = "Test" # @FIXME: Accessing remote namespaces from outside is problematic at streaming translation...
#unknownFunctionCall() # Should raise a HALNamespaceError
testFunc(123, 456)
#@var e int
e = 6
put('Testing arrays...')
#@var ar int []
ar = [1,2,3]
ar[0] = 2
|
"""implementations of simple readline edit operations
just the ones that fit the model of transforming the current line
and the cursor location
based on http://www.bigsmoke.us/readline/shortcuts"""
from bpython.lazyre import LazyReCompile
import inspect
INDENT = 4
# TODO Allow user config of keybindings for these actions
getargspec = lambda func: inspect.signature(func).parameters
class AbstractEdits:
default_kwargs = {
"line": "hello world",
"cursor_offset": 5,
"cut_buffer": "there",
}
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def add(self, key, func, overwrite=False):
if key in self:
if overwrite:
del self[key]
else:
raise ValueError(f"key {key!r} already has a mapping")
params = getargspec(func)
args = {k: v for k, v in self.default_kwargs.items() if k in params}
r = func(**args)
if len(r) == 2:
if hasattr(func, "kills"):
raise ValueError(
"function %r returns two values, but has a "
"kills attribute" % (func,)
)
self.simple_edits[key] = func
elif len(r) == 3:
if not hasattr(func, "kills"):
raise ValueError(
"function %r returns three values, but has "
"no kills attribute" % (func,)
)
self.cut_buffer_edits[key] = func
else:
raise ValueError(f"return type of function {func!r} not recognized")
def add_config_attr(self, config_attr, func):
if config_attr in self.awaiting_config:
raise ValueError(
f"config attribute {config_attr!r} already has a mapping"
)
self.awaiting_config[config_attr] = func
def call(self, key, **kwargs):
func = self[key]
params = getargspec(func)
args = {k: v for k, v in kwargs.items() if k in params}
return func(**args)
def call_without_cut(self, key, **kwargs):
"""Looks up the function and calls it, returning only line and cursor
offset"""
r = self.call_for_two(key, **kwargs)
return r[:2]
def __getitem__(self, key):
if key in self.simple_edits:
return self.simple_edits[key]
if key in self.cut_buffer_edits:
return self.cut_buffer_edits[key]
raise KeyError(f"key {key!r} not mapped")
def __delitem__(self, key):
if key in self.simple_edits:
del self.simple_edits[key]
elif key in self.cut_buffer_edits:
del self.cut_buffer_edits[key]
else:
raise KeyError(f"key {key!r} not mapped")
class UnconfiguredEdits(AbstractEdits):
"""Maps key to edit functions, and bins them by what parameters they take.
Only functions with specific signatures can be added:
* func(**kwargs) -> cursor_offset, line
* func(**kwargs) -> cursor_offset, line, cut_buffer
where kwargs are in among the keys of Edits.default_kwargs
These functions will be run to determine their return type, so no side
effects!
More concrete Edits instances can be created by applying a config with
Edits.mapping_with_config() - this creates a new Edits instance
that uses a config file to assign config_attr bindings.
Keys can't be added twice, config attributes can't be added twice.
"""
def __init__(self):
self.simple_edits = {}
self.cut_buffer_edits = {}
self.awaiting_config = {}
def mapping_with_config(self, config, key_dispatch):
"""Creates a new mapping object by applying a config object"""
return ConfiguredEdits(
self.simple_edits,
self.cut_buffer_edits,
self.awaiting_config,
config,
key_dispatch,
)
def on(self, key=None, config=None):
if not ((key is None) ^ (config is None)):
raise ValueError("Must use exactly one of key, config")
if key is not None:
def add_to_keybinds(func):
self.add(key, func)
return func
return add_to_keybinds
else:
def add_to_config(func):
self.add_config_attr(config, func)
return func
return add_to_config
class ConfiguredEdits(AbstractEdits):
def __init__(
self,
simple_edits,
cut_buffer_edits,
awaiting_config,
config,
key_dispatch,
):
self.simple_edits = dict(simple_edits)
self.cut_buffer_edits = dict(cut_buffer_edits)
for attr, func in awaiting_config.items():
for key in key_dispatch[getattr(config, attr)]:
super().add(key, func, overwrite=True)
def add_config_attr(self, config_attr, func):
raise NotImplementedError("Config already set on this mapping")
def add(self, key, func):
raise NotImplementedError("Config already set on this mapping")
edit_keys = UnconfiguredEdits()
# Because the edits.on decorator runs the functions, functions which depend
# on other functions must be declared after their dependencies
def kills_behind(func):
func.kills = "behind"
return func
def kills_ahead(func):
func.kills = "ahead"
return func
@edit_keys.on(config="left_key")
@edit_keys.on("<LEFT>")
def left_arrow(cursor_offset, line):
return max(0, cursor_offset - 1), line
@edit_keys.on(config="right_key")
@edit_keys.on("<RIGHT>")
def right_arrow(cursor_offset, line):
return min(len(line), cursor_offset + 1), line
@edit_keys.on(config="beginning_of_line_key")
@edit_keys.on("<HOME>")
def beginning_of_line(cursor_offset, line):
return 0, line
@edit_keys.on(config="end_of_line_key")
@edit_keys.on("<END>")
def end_of_line(cursor_offset, line):
return len(line), line
forward_word_re = LazyReCompile(r"\S\s")
@edit_keys.on("<Esc+f>")
@edit_keys.on("<Ctrl-RIGHT>")
@edit_keys.on("<Esc+RIGHT>")
def forward_word(cursor_offset, line):
match = forward_word_re.search(line[cursor_offset:] + " ")
delta = match.end() - 1 if match else 0
return (cursor_offset + delta, line)
def last_word_pos(string):
"""returns the start index of the last word of given string"""
match = forward_word_re.search(string[::-1])
index = match and len(string) - match.end() + 1
return index or 0
@edit_keys.on("<Esc+b>")
@edit_keys.on("<Ctrl-LEFT>")
@edit_keys.on("<Esc+LEFT>")
def back_word(cursor_offset, line):
return (last_word_pos(line[:cursor_offset]), line)
@edit_keys.on("<DELETE>")
def delete(cursor_offset, line):
return (cursor_offset, line[:cursor_offset] + line[cursor_offset + 1 :])
@edit_keys.on("<BACKSPACE>")
@edit_keys.on(config="backspace_key")
def backspace(cursor_offset, line):
if cursor_offset == 0:
return cursor_offset, line
if not line[:cursor_offset].strip(): # if just whitespace left of cursor
# front_white = len(line[:cursor_offset]) - \
# len(line[:cursor_offset].lstrip())
to_delete = ((cursor_offset - 1) % INDENT) + 1
return (
cursor_offset - to_delete,
line[: cursor_offset - to_delete] + line[cursor_offset:],
)
return (cursor_offset - 1, line[: cursor_offset - 1] + line[cursor_offset:])
@edit_keys.on(config="clear_line_key")
def delete_from_cursor_back(cursor_offset, line):
return 0, line[cursor_offset:]
delete_rest_of_word_re = LazyReCompile(r"\w\b")
@edit_keys.on("<Esc+d>") # option-d
@kills_ahead
def delete_rest_of_word(cursor_offset, line):
m = delete_rest_of_word_re.search(line[cursor_offset:])
if not m:
return cursor_offset, line, ""
return (
cursor_offset,
line[:cursor_offset] + line[m.start() + cursor_offset + 1 :],
line[cursor_offset : m.start() + cursor_offset + 1],
)
delete_word_to_cursor_re = LazyReCompile(r"\s\S")
@edit_keys.on(config="clear_word_key")
@kills_behind
def delete_word_to_cursor(cursor_offset, line):
start = 0
for match in delete_word_to_cursor_re.finditer(line[:cursor_offset]):
start = match.start() + 1
return (
start,
line[:start] + line[cursor_offset:],
line[start:cursor_offset],
)
@edit_keys.on("<Esc+y>")
def yank_prev_prev_killed_text(cursor_offset, line, cut_buffer):
# TODO not implemented - just prev
return (
cursor_offset + len(cut_buffer),
line[:cursor_offset] + cut_buffer + line[cursor_offset:],
)
@edit_keys.on(config="yank_from_buffer_key")
def yank_prev_killed_text(cursor_offset, line, cut_buffer):
return (
cursor_offset + len(cut_buffer),
line[:cursor_offset] + cut_buffer + line[cursor_offset:],
)
@edit_keys.on(config="transpose_chars_key")
def transpose_character_before_cursor(cursor_offset, line):
if cursor_offset < 2:
return cursor_offset, line
if cursor_offset == len(line):
return cursor_offset, line[:-2] + line[-1] + line[-2]
return (
min(len(line), cursor_offset + 1),
line[: cursor_offset - 1]
+ (line[cursor_offset] if len(line) > cursor_offset else "")
+ line[cursor_offset - 1]
+ line[cursor_offset + 1 :],
)
@edit_keys.on("<Esc+t>")
def transpose_word_before_cursor(cursor_offset, line):
return cursor_offset, line # TODO Not implemented
# TODO undo all changes to line: meta-r
# bonus functions (not part of readline)
@edit_keys.on("<Esc+u>")
def uppercase_next_word(cursor_offset, line):
return cursor_offset, line # TODO Not implemented
@edit_keys.on(config="cut_to_buffer_key")
@kills_ahead
def delete_from_cursor_forward(cursor_offset, line):
return cursor_offset, line[:cursor_offset], line[cursor_offset:]
@edit_keys.on("<Esc+c>")
def titlecase_next_word(cursor_offset, line):
return cursor_offset, line # TODO Not implemented
delete_word_from_cursor_back_re = LazyReCompile(r"^|\b\w")
@edit_keys.on("<Esc+BACKSPACE>")
@edit_keys.on("<Meta-BACKSPACE>")
@kills_behind
def delete_word_from_cursor_back(cursor_offset, line):
"""Whatever my option-delete does in bash on my mac"""
if not line:
return cursor_offset, line, ""
start = None
for match in delete_word_from_cursor_back_re.finditer(line):
if match.start() < cursor_offset:
start = match.start()
if start is not None:
return (
start,
line[:start] + line[cursor_offset:],
line[start:cursor_offset],
)
else:
return cursor_offset, line, ""
|
'''
Problem statement: Create a function that receives a non-negative integer and returns the factorial of that number.
Problem Link: https://edabit.com/challenge/PNbsQzmDR3CJ9JHkB
'''
def factorial(n): #function that returns the factorial of the number
if n==0:
return (1)
else:
f=1
for i in range(1,n+1):
f=f*i
return (f)
n=int(input()) #takes input from user
if n<0:
print("Factorial of negative numbers don't exist")
else:
result=factorial(n) #function call to return the factorial of the number
print(result) # print the factorial of the number
|
import pandas as pd
import numpy as np
from redcap import Project, RedcapError
import json
#Using the URL and API to connect to Project 1 using the RedCAP API
url_p1 = 'https://redcap.duke.edu/redcap/api/'
api_p1 = ''
project1 = Project(url_p1, api_p1)
#Using the URL and API to connect to Project 2 using the RedCAP API
url_p2 = 'https://redcap.duke.edu/redcap/api/'
api_p2 = ''
project2 = Project(url_p2, api_p2)
print project1
#begin pulling out all of the values of the ADOS test file in P1
forms = project1.forms[5:6]
#create a subset of that project and export the records out of the seelcted form
subset = project1.export_records(forms=forms)
#create a dataframe from the list of data that was retrieved from the selected form
ados_testp1 = pd.DataFrame.from_dict(subset)
#begin pulling out all of the values of the ADOS test file in P2
forms_2 = project2.forms[6:7]
#create a subset of that project and export the records out of the seelcted form
subset_2 = project2.export_records(forms=forms)
#create a dataframe from the list of data that was retrieved from the selected form
ados_testp2 = pd.DataFrame.from_dict(subset_2)
#create a numpy array object that stores the records data from project 1 and project 2
p1_values = ados_testp1.values[2]
p2_values = ados_testp2.values[0]
#check to see if the values are already up to date. If they are not up to date update the the records across projects.
# if (np.array_equal(p1_values, p2_values)):
# print "Form " + forms + " is up-to-date!"
# else:
# print "Records for the form: " + str(forms[0]) + " are currently being updated across projects updating..."
# response = project2.import_records(subset)
#Begin check on the individual records
p1_values = p1_values.tolist()
p2_values = p2_values.tolist()
for stuff in range(0,len(p1_values)):
if p1_values[stuff] == p2_values[stuff]:
print 'same'
else:
print p1_values[stuff] + ' != ' + p2_values[stuff]
p2_values[stuff] = p1_values[stuff]
print p2_values
# print p2_values |
arr=[7,10,8,11,9,12]
for num in range(0,len(arr),2):
print(arr(num))
|
from cobra.model.fv import RsProv, RsCons
from cobra.model.l3ext import Out, InstP
from createRoutedOutside import input_key_args as input_routed_outside
from createExternalNetwork import input_key_args as input_external_network_epg_name
from createMo import *
DEFAULT_QOS = 'unspecified'
DEFAULT_MTACH_TYPE = 'AtleastOne'
CONTRACT_TYPE_CHOICES = ['provided', 'consumed']
QOS_CHOICES = ['level1', 'level2', 'level3', 'unspecified']
MATCH_TYPE_CHOICES = ['All', 'AtleastOne', "AtmostOne", "None"]
def input_key_args(msg='\nPlease Specify the L3 EPG Contract:', type_known=False):
print msg
args = []
args.append(input_raw_input("Contract Name", default='default'))
if not type_known:
args.append(input_options("Contract type", '', CONTRACT_TYPE_CHOICES, required=True))
return args
def input_optional_args(contract_type):
args = {}
args['prio'] = input_options('QoS Class', DEFAULT_QOS, QOS_CHOICES)
print contract_type.lower(), contract_type.lower()=='provided'
if contract_type.lower() == 'provided':
args['match_type'] = input_options('Match Type', DEFAULT_MTACH_TYPE, MATCH_TYPE_CHOICES)
return args
def create_L3_epg_provider_or_consumer_contract(l3ext_instp, contract_type, contract, **args):
"""Labels the EPG as a provider/consumer in the contract. A contract defines what can be communicated along with the protocols and ports on which a provider and consumer are allowed to communicate."""
args = args['optional_args'] if 'optional_args' in args.keys() else args
if contract_type.lower() == 'consumed':
fv_l3epg_cont = RsCons(l3ext_instp, contract,
prio=get_value(args, 'prio', 'unspecified'))
elif contract_type.lower() == 'provided':
fv_l3epg_cont = RsProv(l3ext_instp, contract,
prio=get_value(args, 'prio', 'unspecified'),
matchT=get_value(args, 'match_type', 'AtleastOne'))
else:
print 'Invalid Contract Type ' + contract_type + '. Contract_type is either \"consumed\" or \"provided\".'
sys.exit()
return fv_l3epg_cont
class CreateL3EpgProviderOrConsumerContract(CreateMo):
def __init__(self):
self.description = 'Labels the EPG as a provider/consumer in the contract. A contract defines what can be communicated along with the protocols and ports on which a provider and consumer are allowed to communicate.'
self.tenant_required = True
self.routed_outside = None
self.external_network_epg = None
self.contract_type = None
self.contract = None
super(CreateL3EpgProviderOrConsumerContract, self).__init__()
def set_cli_mode(self):
super(CreateL3EpgProviderOrConsumerContract, self).set_cli_mode()
self.parser_cli.add_argument('routed_outside', help='The name for the policy controlling connectivity to the outside.')
self.parser_cli.add_argument('external_network_epg', help='The name of the layer 3 external network instance profile.')
self.parser_cli.add_argument('contract_type', choices=CONTRACT_TYPE_CHOICES, help='Defind the contract type.')
self.parser_cli.add_argument('contract', help='The provider/consumer contract name')
self.parser_cli.add_argument('-Q', '--QoS_class', dest='prio', default= DEFAULT_QOS, choices=QOS_CHOICES, help='The priority level of a sub application running behind an endpoint group.')
self.parser_cli.add_argument('-m', '--match_type', default= DEFAULT_MTACH_TYPE, choices=MATCH_TYPE_CHOICES, help='The matched EPG type. For provider only.')
def read_key_args(self):
self.routed_outside = self.args.pop('routed_outside')
self.external_network_epg = self.args.pop('external_network_epg')
self.contract_type = self.args.pop('contract_type')
self.contract = self.args.pop('contract')
def wizard_mode_input_args(self):
self.args['routed_outside'] = input_routed_outside(msg='\nPlease Specify the L3 EPG Contract:')
self.args['external_network_epg'] = input_external_network_epg_name('')
self.args['contract'], self.args['contract_type'] = input_key_args('')
if not self.delete:
self.args['optional_args'] = input_optional_args(self.args['contract_type'])
def delete_mo(self):
if self.contract_type.lower() == 'provided':
self.check_if_mo_exist('uni/tn-' + self.tenant + '/out-' + self.routed_outside + '/instP-' + self.external_network_epg + '/rsprov-', self.contract, RsProv, description='L3 EPG Provider Contract')
elif self.contract_type.lower() == 'consumed':
self.check_if_mo_exist('uni/tn-' + self.tenant + '/out-' + self.routed_outside + '/instP-' + self.external_network_epg + '/rscons-', self.contract, RsCons, description='L3 EPG Consumer Contract')
super(CreateL3EpgProviderOrConsumerContract, self).delete_mo()
def main_function(self):
self.check_if_tenant_exist()
self.check_if_mo_exist('uni/tn-' + self.tenant + '/out-', self.routed_outside, Out, description='The policy')
self.check_if_mo_exist('uni/tn-' + self.tenant + '/out-' + self.routed_outside + '/instP-', self.external_network_epg, InstP, description='External Netwrok')
create_L3_epg_provider_or_consumer_contract(self.mo, self.contract_type, self.contract, optional_args=self.optional_args)
if __name__ == '__main__':
mo = CreateL3EpgProviderOrConsumerContract() |
import gdsfactory as gf
from gdsfactory.components.bend_euler import bend_euler
from gdsfactory.components.contact import contact_heater_m3
from gdsfactory.components.coupler_ring import coupler_ring as _coupler_ring
from gdsfactory.components.straight import straight as _straight
from gdsfactory.types import ComponentFactory, CrossSectionFactory, Float2
contact_heater_m3_mini = gf.partial(contact_heater_m3, size=(4, 4))
@gf.cell
def ring_single_heater(
gap: float = 0.2,
radius: float = 10.0,
length_x: float = 4.0,
length_y: float = 0.6,
coupler_ring: ComponentFactory = _coupler_ring,
straight: ComponentFactory = _straight,
bend: ComponentFactory = bend_euler,
cross_section_heater: CrossSectionFactory = gf.cross_section.strip_heater_metal,
cross_section: CrossSectionFactory = gf.cross_section.strip,
contact: ComponentFactory = contact_heater_m3_mini,
port_orientation: float = 90,
contact_offset: Float2 = (0, 0),
**kwargs
) -> gf.Component:
"""Single bus ring made of a ring coupler (cb: bottom)
connected with two vertical straights (sl: left, sr: right)
two bends (bl, br) and horizontal straight (wg: top)
includes heater
Args:
gap: gap between for coupler
radius: for the bend and coupler
length_x: ring coupler length
length_y: vertical straight length
coupler_ring: ring coupler function
straight: straight function
bend: 90 degrees bend function
cross_section_heater:
cross_section:
contact:
port_orientation: for electrical ports to promote from contact
kwargs: cross_section settings
.. code::
bl-st-br
| |
sl sr length_y
| |
--==cb==-- gap
length_x
"""
gf.snap.assert_on_2nm_grid(gap)
coupler_ring = gf.partial(
coupler_ring,
bend=bend,
gap=gap,
radius=radius,
length_x=length_x,
cross_section=cross_section,
bend_cross_section=cross_section_heater,
**kwargs
)
straight_side = gf.partial(
straight, length=length_y, cross_section=cross_section_heater, **kwargs
)
straight_top = gf.partial(
straight, length=length_x, cross_section=cross_section_heater, **kwargs
)
bend = gf.partial(bend, radius=radius, cross_section=cross_section_heater, **kwargs)
c = gf.Component()
cb = c << coupler_ring()
sl = c << straight_side()
sr = c << straight_side()
bl = c << bend()
br = c << bend()
st = c << straight_top()
# st.mirror(p1=(0, 0), p2=(1, 0))
sl.connect(port="o1", destination=cb.ports["o2"])
bl.connect(port="o2", destination=sl.ports["o2"])
st.connect(port="o2", destination=bl.ports["o1"])
br.connect(port="o2", destination=st.ports["o1"])
sr.connect(port="o1", destination=br.ports["o1"])
sr.connect(port="o2", destination=cb.ports["o3"])
c.add_port("o2", port=cb.ports["o4"])
c.add_port("o1", port=cb.ports["o1"])
c1 = c << contact()
c2 = c << contact()
c1.xmax = -length_x / 2 + cb.x - contact_offset[0]
c2.xmin = +length_x / 2 + cb.x + contact_offset[0]
c1.movey(contact_offset[1])
c2.movey(contact_offset[1])
c.add_ports(c1.get_ports_list(orientation=port_orientation), prefix="e1")
c.add_ports(c2.get_ports_list(orientation=port_orientation), prefix="e2")
c.auto_rename_ports()
return c
if __name__ == "__main__":
c = ring_single_heater(width=0.5, gap=1, layer=(2, 0), radius=10, length_y=1)
print(c.ports)
c.show(show_subports=False)
# cc = gf.add_pins(c)
# print(c.settings)
# print(c.settings)
# cc.show()
|
import cv2
import os
from PIL import Image
import imageUtils
def getFPS(video):
return video.get(cv2.CAP_PROP_FPS)
def processVideo(videoPath, fileName, path, bold):
video = cv2.VideoCapture(videoPath)
hasSeenTheLight = False
success,image = video.read()
FPS = getFPS(video)
frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
videoLength = frames * (1 / FPS)
videoHeight = image.shape[0]
videoWidth = image.shape[1]
size = (videoWidth, videoHeight)
if videoLength > .5:
fileType = 'mp4'
os.system(f'ffmpeg -i {videoPath} -vn -acodec copy {path}/output-audio.aac -y')
processedVideo = cv2.VideoWriter(f'{path}/{fileName}-pre.{fileType}',
cv2.VideoWriter_fourcc(*'mp4v'),
FPS,
size
)
i = 0
while success:
progress = int(i / frames * 100)
print(f'Processing Video: {str(progress)}% ({i}/{frames})', end="\r")
image, hasSeenTheLightInImage = imageUtils.processImage(image, bold)
if hasSeenTheLightInImage:
hasSeenTheLight = True
processedVideo.write(image)
i = i + 1
success,image = video.read()
processedVideo.release()
if os.path.isfile(f'{path}/output-audio.aac'):
os.system(f'ffmpeg -i {path}/{fileName}-pre.{fileType} -i {path}/output-audio.aac -vcodec libx264 -c:a aac -map 0:v:0 -map 1:a:0 {path}/{fileName}-final.{fileType} -y')
os.remove(f'{path}/output-audio.aac')
else:
os.system(f'ffmpeg -i {path}/{fileName}-pre.{fileType} -vcodec libx264 -c:a aac -map 0:v:0 {path}/{fileName}-final.{fileType} -y')
os.remove(videoPath)
os.remove(f'{path}/{fileName}-pre.{fileType}')
else:
# create GIF
fileType = 'gif'
gifFrames = []
i = 0
while success:
progress = int(i / frames * 100)
print(f'Processing GIF: {str(progress)}% (frame {i}/{frames})', end="\r")
image, hasSeenTheLightInImage = imageUtils.processImage(image, bold)
if hasSeenTheLightInImage:
hasSeenTheLight = True
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
gifFrames.append(Image.fromarray(image))
i = i + 1
success,image = video.read()
gifFrames[0].save(f'{path}/{fileName}-final.{fileType}',
save_all=True,
append_images=gifFrames[1:],
duration=FPS,
loop=1
)
return (f'{path}/{fileName}-final.{fileType}', hasSeenTheLight)
|
# -*- coding: UTF-8 -*-
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens & Font Bureau
# www.pagebot.io
#
# P A G E B O T
#
# Free to use. Licensed under MIT conditions
#
# Supporting usage of DrawBot, www.drawbot.com
# Supporting usage of Flat, https://github.com/xxyxyz/flat
# -----------------------------------------------------------------------------
#
# bookCovers.py
#
import math
from pagebot.contexts import defaultContext as context
from pagebot.fonttoolbox.objects.font import getFontByName
from pagebot import Gradient, Shadow
from pagebot.toolbox.dating import now
W, H = 400, 600
def buildCoverPages1(w, h, v):
M = 10
# Page 66
context.newPage(w, h)
context.fill(0.1)
context.rect(0, 0, w, h)
c1 = (0.2, 0.7, 1)
c2 = 0.8
y = h-M
# Title of cover, make it fit in with and add shadow
coverTitleStyle = dict(font='Upgrade-Hairline', fontSize=100, textFill=c1)
bs = context.newString('THE', style=coverTitleStyle, w=w-M)
bx, by, bw, bh = bs.bounds()
context.text(bs, (w/2-(bw+bx)/2, y-bh+by))
y -= bh-by+M/2
coverTitleStyle = dict(font='Upgrade-Thin', fontSize=100, textFill=c1)
bs = context.newString('LONGEST', style=coverTitleStyle, w=w-1.5*M)
bx, by, bw, bh = bs.bounds()
context.text(bs, (w/2-(bw+bx)/2, y-bh+by))
y -= bh-by+M/2
# Title of cover, make it fit in with and add shadow
coverTitleStyle = dict(font='Upgrade-ExtraLight', fontSize=100, textFill=c1, rTracking=0.05)
bs = context.newString('MELLIFLUOUSLY', style=coverTitleStyle, w=w-1.5*M)
bx, by, bw, bh = bs.bounds()
context.text(bs, (w/2-(bw+bx)/2, y-bh+by))
y -= bh-by+M/2
coverTitleStyle = dict(font='Upgrade-Light', fontSize=100, textFill=c1, rTracking=0.07)
bs = context.newString('supercalifragilisticexpialidociously'.upper(), style=coverTitleStyle, w=w-2*M)
bx, by, bw, bh = bs.bounds()
context.text(bs, (w/2-(bw+bx)/2, y-bh+by))
y -= bh-by+M/2
coverTitleStyle = dict(font='Upgrade-Book', fontSize=100, textFill=c1, rTracking=0.07)
bs = context.newString('pneumonoultramicroscopicsilicovolcanoconiosis'.upper(), style=coverTitleStyle, w=w-2*M)
bx, by, bw, bh = bs.bounds()
context.text(bs, (w/2-(bw+bx)/2, y-bh+by))
y -= bh-by+M/2
coverTitleStyle = dict(font='Upgrade-Light', fontSize=100, textFill=c1)
bs = context.newString('INTERMIXED', style=coverTitleStyle, w=w-1.5*M)
bx, by, bw, bh = bs.bounds()
context.text(bs, (w/2-(bw+bx)/2, y-bh+by))
y -= bh-by+2*M
# Title of cover, make it fit in with and add shadow
coverTitleStyle = dict(font='Upgrade-Light', fontSize=100, textFill=c2)
bs = context.newString('MATTHEW', style=coverTitleStyle, w=w-1.5*M)
bx, by, bw, bh = bs.bounds()
context.text(bs, (w/2-(bw+bx)/2, y-bh+by))
y -= bh-by+M/2
# Title of cover, make it fit in with and add shadow
coverTitleStyle = dict(font='Upgrade-Light', fontSize=100, textFill=c2)
bs = context.newString('DOW', style=coverTitleStyle, w=w-M)
bx, by, bw, bh = bs.bounds()
context.text(bs, (w/2-(bw+bx)/2, y-bh+by))
def buildCoverPages2(w, h, v):
M = 30
for pn in range(v):
# Page 66
context.newPage(w, h)
context.fill(0.1)
context.rect(0, 0, w, h)
c1 = (0.2, 0.7, 1)
c2 = 0.8
y = h-M
# Title of cover, make it fit in with and add shadow
coverTitleStyle = dict(font='Upgrade-Book', fontSize=100, textFill=1, rTracking=0.2, openTypeFeatures=dict(smcp=True) )
bs = context.newString('One Lightyear Equals', style=coverTitleStyle, w=w-2*M)
bx, by, bw, bh = bs.bounds()
context.text(bs, (w/2-(bw+bx)/2, y-bh+by))
y -= 100
styleColors = ('Upgrade-UltraBlack', 'Upgrade-ExtraBlack', 'Upgrade-Black', 'Upgrade-Semibold', 'Upgrade-Medium',
'Upgrade-Regular', 'Upgrade-Book', 'Upgrade-Light', 'Upgrade-ExtraLight', 'Upgrade-Thin', 'Upgrade-Hairline')
if v == 1:
R = 22
else:
R = math.sin(math.radians(pn*360/v))*16
for index, name in enumerate(styleColors):
coverTitleStyle = dict(font=name, fontSize=100, textFill=list(c1)+[index/len(styleColors)], rTracking=0.2, rLeading=0.9, openTypeFeatures=dict(tnum=True) )
bs = context.newString('9460\n7304\n7258\n0800\n512', style=coverTitleStyle, w=w-M/2)
bx, by, bw, bh = bs.bounds()
context.text(bs, (w/2-(bw+bx)/2+(random()*R-R/2), -by+1.5*M+(random()*R-R/2)))
coverTitleStyle = dict(font='Upgrade-ExtraLight', fontSize=100, textFill=c1, rTracking=0.05, rLeading=0.9)
bs = context.newString('mm', style=coverTitleStyle, w=w/5)
bx, by, bw, bh = bs.bounds()
context.text(bs, (w*4/6+M, -by+3.2*M))
coverTitleStyle = dict(font='Upgrade-Regular', fontSize=100, textFill=c2, rTracking=0.2, rLeading=1.2, openTypeFeatures=dict(smcp=True) )
bs = context.newString('Design\nDesign\nSpace', style=coverTitleStyle, w=w/5)
bx, by, bw, bh = bs.bounds()
context.text(bs, (w*4/6+M, -by+0.75*M))
IMAGES = (
('docs/documents/bookCoverPages1.pdf', W, H, None, buildCoverPages1),
('docs/images/bookCoverPages1.png', W, H, None, buildCoverPages1),
('docs/documents/bookCoverPages2.pdf', W, H, 1, buildCoverPages2),
('docs/images/bookCoverPages2.gif', W, H, 40, buildCoverPages2),
)
for path, w, h, v, m in IMAGES:
newDrawing()
m(w, h, v)
saveImage(path, multipage=True)
print path
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#===================================#
# File name: #
# Author: Vitor dos Santos Batista #
# Date created: #
# Date last modified: #
# Python Version: 2.7 #
#===================================#
import evolgen as egg
"""
pop_size = 100
ind_size = 100
ind_range = [0, 1]
par_sel = [0.8, pop_size]
par_cruz = [0.7, pop_size]
par_mut = [0.1, ind_range]
parm = [par_sel, par_cruz, par_mut]
fun = ['sto', 'cop', 'mru']
fit_func = [fit.fem]
train = np.load('train_10x10.npy')
test = np.load('test_10x10.npy')
l_train = np.load('train_label.npy')
l_test = np.load('test_label.npy')
top = [100, 10, 1]
egg = Evolgen(has_pop=False, population=[pop_size, ind_size,
ind_range, 'float'],
par=parm, fun=fun, fit=fit_func, gen=100,
ann = [top, train, l_train])
egg.run()
"""
#=================#
pop_size = 100
ind_size = 2
ind_range = [-100, 100]
ind_range = [-np.pi, np.pi]
par_sel = [0.8, pop_size]
par_cruz = [0.8, pop_size]
par_mut = [0.03, [-100, 100]]
parm = [par_sel, par_cruz, par_mut]
#fun = ['sto', 'cop', 'mru']
fun = ['fcd', 'cop', 'mru']
fit_func = [fit.f1, fit.f2]
fit_func = [fit.pol_f1, fit.pol_f2]
egg = egg.evolgen(has_pop=False, population=[pop_size, ind_size,
ind_range, 'float'],
par=parm, fun=fun, fit=fit_func, gen=1000)
egg.run()
egg.plot()
|
from typing import Optional, Tuple
import jwt
from pydantic import BaseModel, validator
from .misc import now_epoch
class JWTBaseModel(BaseModel):
"""Base class to manage JWT
The pydantic model fields are the data content of the JWT.
The default expiration (exp) is set to 900 seconds. Overwrite the ClassVar exp to change the
expiration time.
"""
exp: int = None # type: ignore
@validator('exp', pre=True, always=True)
def set_id(cls, exp):
return exp or (now_epoch() + 900)
@classmethod
def decode_token(cls, key: str, token: str, verify: bool = True):
"""Decode the token and load the data content into an instance of this class
Parameters
----------
key: Secret key used to encrypt the JWT
verify: If true, verify the signature is valid, otherwise skip. Default is True
Returns
-------
Class instance
"""
data = jwt.decode(token, key, algorithms=['HS256'], verify=verify)
# Enforce conversion to satisfy typing
data = dict(data)
return cls(**data)
@classmethod
def decode_hp_s(cls, key: str, header_payload: str, signature: Optional[str] = None):
"""Decode the token provided in the format "header.payload" and signature
Parameters
----------
key: Secret key used to encrypt the JWT
signature: If provided, verify the authenticity of the token.
Returns
-------
Class instance
"""
sig = signature if signature is not None else 'DUMMYSIGNATURE'
token = header_payload + '.' + sig
return cls.decode_token(token=token, key=key, verify=(signature is not None))
def encode_token(self, key: str) -> str:
"""Encode the class data into a JWT and return a string
Parameters
----------
key: Secret key used to encrypt the JWT
Returns
-------
The JWT as a string
"""
data = self.dict()
data['exp'] = self.exp
return jwt.encode(data, key, algorithm='HS256')
def encode_hp_s(self, key: str) -> Tuple[str, str]:
"""Encode the class data into a JWT
Parameters
----------
key: Secret key used to encrypt the JWT
Returns
-------
The JWT in the format "header.payload" and the signature
"""
token = self.encode_token(key=key)
header, payload, signature = token.split('.')
return f'{header}.{payload}', signature
@property
def cookie_key(self) -> str:
"""Define the key of the cookie used to store the JWT"""
raise NotImplementedError
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Thompson Sampling with linear posterior over a learnt deep representation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy.stats import invgamma
from bandits.core.bandit_algorithm import BanditAlgorithm
from bandits.core.contextual_dataset_finite_memory import ContextualDataset
from bandits.algorithms.neural_bandit_model import NeuralBanditModel#,TextCNN
import cvxpy as cvx
import math
from scipy.special import gamma
import tensorflow as tf
class NeuralLinearPosteriorSamplingFiniteMemory(BanditAlgorithm):
"""Full Bayesian linear regression on the last layer of a deep neural net."""
def __init__(self, name, hparams,textflag ='no', optimizer='RMS'):
self.name = name
self.hparams = hparams
self.latent_dim = self.hparams.layer_sizes[-1]
self.intercept = False
if self.intercept:
self.param_dim=1+self.latent_dim
else:
self.param_dim = self.latent_dim
self.EPSILON = 0.00001
# Gaussian prior for each beta_i
self._lambda_prior = self.hparams.lambda_prior
self.before=[]
self.after=[]
self.mu = [
np.zeros(self.param_dim)
for _ in range(self.hparams.num_actions)
]
self.f = [
np.zeros(self.param_dim)
for _ in range(self.hparams.num_actions)
]
self.yy = [0 for _ in range(self.hparams.num_actions)]
self.cov = [(1.0 / self.lambda_prior) * np.eye(self.param_dim)
for _ in range(self.hparams.num_actions)]
self.precision = [
self.lambda_prior * np.eye(self.param_dim)
for _ in range(self.hparams.num_actions)
]
self.mu_prior_flag = self.hparams.mu_prior_flag
self.sigma_prior_flag = self.hparams.sigma_prior_flag
self.precision_prior=self.precision[:]
self.mu_prior = np.zeros((self.param_dim,self.hparams.num_actions))
# Inverse Gamma prior for each sigma2_i
self._a0 = self.hparams.a0
self._b0 = self.hparams.b0
self.a = [self._a0 for _ in range(self.hparams.num_actions)]
self.b = [self._b0 for _ in range(self.hparams.num_actions)]
# Regression and NN Update Frequency
self.update_freq_lr = hparams.training_freq
self.update_freq_nn = hparams.training_freq_network
self.t = 0
self.optimizer_n = optimizer
self.num_epochs = hparams.training_epochs
self.data_h = ContextualDataset(hparams.context_dim,
hparams.num_actions,
intercept=False,buffer_s=hparams.mem)
self.latent_h = ContextualDataset(self.latent_dim,
hparams.num_actions,
intercept=self.intercept,buffer_s=hparams.mem)
if textflag=='yes':
self.bnn = TextCNN('adam', self.hparams.num_actions,self.hparams.batch_size, '{}-bnn'.format(name))
else:
self.bnn = NeuralBanditModel(optimizer, hparams, '{}-bnn'.format(name))
def action(self, context):
"""Samples beta's from posterior, and chooses best action accordingly."""
# Round robin until each action has been selected "initial_pulls" times
if self.t < self.hparams.num_actions * self.hparams.initial_pulls:
return self.t % self.hparams.num_actions
# Sample sigma2, and beta conditional on sigma2
sigma2_s = [
self.b[i] * invgamma.rvs(self.a[i])
for i in range(self.hparams.num_actions)
]
try:
beta_s = [
np.random.multivariate_normal(self.mu[i], sigma2_s[i] * self.cov[i])
for i in range(self.hparams.num_actions)
]
except np.linalg.LinAlgError as e:
# Sampling could fail if covariance is not positive definite
d = self.latent_dim
beta_s = [
np.random.multivariate_normal(np.zeros((d)), np.eye(d))
for i in range(self.hparams.num_actions)
]
# Compute last-layer representation for the current context
with self.bnn.graph.as_default():
c = context.reshape((1, self.hparams.context_dim))
z_context = self.bnn.sess.run(self.bnn.nn, feed_dict={self.bnn.x: c})
if self.intercept:
z_context = np.append(z_context, 1.0).reshape((1, self.latent_dim + 1))
# Apply Thompson Sampling to last-layer representation
vals = [
np.dot(beta_s[i], z_context.T) for i in range(self.hparams.num_actions)
]
return np.argmax(vals)
def calc_precision_prior(self,contexts):
precisions_return = []
n,m = contexts.shape
prior = (self.EPSILON) * np.eye(self.param_dim)
if self.cov is not None:
for action,precision in enumerate(self.cov):
ind = np.array([i for i in range(n) if self.data_h.actions[i] == action])
if len(ind)>0:
"""compute confidence scores for old data"""
d = []
for c in self.latent_h.contexts[ind, :]:
d.append(np.dot(np.dot(c,precision),c.T))
"""compute new data correlations"""
phi = []
for c in contexts[ind, :]:
phi.append(np.outer(c,c))
X = cvx.Variable((m, m), PSD=True)
# Form objective.
obj = cvx.Minimize(sum([(cvx.trace(X*phi[i]) - d[i])**2 for i in range(len(d))]))
prob = cvx.Problem(obj)
prob.solve()
if X.value is None:
precisions_return.append(np.linalg.inv(prior))
self.cov[action] = prior
else:
precisions_return.append(np.linalg.inv(X.value+prior))
self.cov[action] = X.value+prior
else:
precisions_return.append(np.linalg.inv(prior))
self.cov[action] = prior
return (precisions_return)
def update(self, context, action, reward):
"""Updates the posterior using linear bayesian regression formula."""
self.t += 1
self.data_h.add(context, action, reward)
c = context.reshape((1, self.hparams.context_dim))
z_context = self.bnn.sess.run(self.bnn.nn, feed_dict={self.bnn.x: c})
self.latent_h.add(z_context, action, reward)
# Retrain the network on the original data (data_h)
if self.t % self.update_freq_nn == 0:
if self.hparams.reset_lr:
self.bnn.assign_lr()
self.bnn.train(self.data_h, self.num_epochs)
# Update the latent representation of every datapoint collected so far
new_z = self.bnn.sess.run(self.bnn.nn,
feed_dict={self.bnn.x: self.data_h.contexts})
self.latent_h.replace_data(contexts=new_z)
i_contexts = None
for context in new_z:
c = np.array(context[:])
if self.intercept:
c = np.append(c, 1.0).reshape((1, self.latent_dim + 1))
if i_contexts is None:
i_contexts = c
else:
i_contexts = np.vstack((i_contexts, c))
# Update the confidence prior using feature uncertainty matching
#self.before.append(self.calc_model_evidence())
if self.sigma_prior_flag==1:
self.precision_prior = self.calc_precision_prior(contexts=i_contexts)
# Update the mean prior using the weights of the NN
if self.mu_prior_flag == 1:
weights_p, bias_p = self.bnn.get_mu_prior()
self.mu_prior[:self.latent_dim] = weights_p
self.mu_prior[-1] = bias_p
#self.after.append(self.calc_model_evidence())
#print(self.before)
#print(self.after)
# Update the Bayesian Linear Regression
for action_v in range(self.hparams.num_actions):
# Update action posterior with formulas: \beta | z,y ~ N(mu_q, cov_q)
z, y = self.latent_h.get_data(action_v)
# The algorithm could be improved with sequential formulas (cheaper)
self.precision[action_v] = (np.dot(z.T, z) + self.precision_prior[action_v])
self.f[action_v] = np.dot(z.T, y)
else:
if self.intercept:
z_context = np.append(z_context, 1.0).reshape((1, self.latent_dim + 1))
self.precision[action] += np.dot(z_context.T, z_context)
self.cov[action] = np.linalg.inv(self.precision[action])
self.f[action] += (z_context.T * reward)[:, 0]
# Calc mean and precision using bayesian linear regression
self.mu[action] = np.dot(self.cov[action], (self.f[action]+np.dot(self.precision_prior[action],self.mu_prior[:,action])))
# Inverse Gamma posterior update
self.yy[action] += reward ** 2
self.a[action] += 0.5
b_upd = 0.5 * self.yy[action]
b_upd += 0.5 * np.dot(self.mu_prior[:,action].T, np.dot(self.precision_prior[action], self.mu_prior[:,action]))
b_upd -= 0.5 * np.dot(self.mu[action].T, np.dot(self.precision[action], self.mu[action]))
self.b[action] = self.b0 + b_upd
@property
def a0(self):
return self._a0
@property
def b0(self):
return self._b0
@property
def lambda_prior(self):
return self._lambda_prior
def calc_model_evidence(self):
vval = 0
for action in range(self.hparams.num_actions):
sigma0 = self.precision_prior[action]
mu_0 = self.mu_prior[:, action]
z, y = self.latent_h.get_data(action)
n = z.shape[0]
s = np.dot(z.T, z)
s_n = (sigma0 + s)
cov_a = np.linalg.inv(s_n)
mu_a = np.dot(cov_a, (np.dot(z.T, y) + np.dot(sigma0, mu_0)))
a_post = (self.a0 + n/2.0)
b_upd = 0.5 * np.dot(y.T, y)
b_upd += 0.5 * np.dot(mu_0.T, np.dot(sigma0, mu_0))
b_upd -= 0.5 * np.dot(mu_a.T, np.dot(s_n, mu_a))
b_post = self.b0 + b_upd
val = np.float128(1)
val/= ((np.float128(2.0) * math.pi) ** (n/2.0))
val*= (gamma(a_post)/gamma(self.a0))
val*= np.sqrt(np.linalg.det(sigma0)/np.linalg.det(s_n))
val*= ((self.hparams.b0**self.hparams.a0)/(b_post**a_post))
vval+=val
vval/=self.hparams.num_actions
return vval |
num = int(input('Digite o 1° termo: '))
razao = int(input('razão: '))
decimo = num + (10 - 1) * razao
for c in range(num, decimo + razao, razao):
print('{}'.format(c), end='--')
print('ACABOU')
|
import logging
import docker
# http://docker-py.readthedocs.io/en/1.10.0/api/
def _connect( connection_paramaters ):
try:
host = connection_paramaters[ 'host' ]
except KeyError:
raise ValueError( '\'host\' is required' )
logging.debug( 'docker: connecting to docker at "{0}"'.format( host ) )
return docker.DockerClient( base_url='tcp://{0}:2376'.format( host ) )
def create( paramaters ):
container_name = paramaters[ 'name' ]
connection_paramaters = paramaters[ 'connection' ]
logging.info( 'docker: creating container "{0}"'.format( container_name ) )
client = _connect( connection_paramaters )
logging.debug( 'docker: pulling "{0}"'.format( paramaters[ 'docker_image' ] ) )
try:
client.images.pull( paramaters[ 'docker_image' ] )
except Exception as e:
raise Exception( 'Error Creating Container: {0}'.format( str( e ) ) )
container_paramaters = {
'auto_remove': False,
'detach': True,
'image': paramaters[ 'docker_image' ],
'name': container_name,
'ports': paramaters[ 'port_map' ],
'environment': paramaters[ 'environment_map' ],
'command': paramaters[ 'command' ]
}
logging.debug( 'docker: creating "{0}"'.format( container_paramaters ) )
try:
container = client.containers.create( **container_paramaters )
except Exception as e:
raise Exception( 'Error Creating Container: {0}'.format( str( e ) ) )
docker_id = container.id
logging.info( 'docker: container "{0}" created'.format( container_name ) )
return { 'done': True, 'id': docker_id }
def create_rollback( paramaters ):
container_name = paramaters[ 'name' ]
# connection_paramaters = paramaters[ 'connection' ]
logging.info( 'docker: rolling back container "{0}"'.format( container_name ) )
raise Exception( 'docker rollback not implemented, yet' )
# client = _connect( connection_paramaters )
logging.info( 'docker: container "{0}" rolledback'.format( container_name ) )
return { 'rollback_done': True }
def destroy( paramaters ):
docker_id = paramaters[ 'docker_id' ]
connection_paramaters = paramaters[ 'connection' ]
container_name = paramaters[ 'name' ]
logging.info( 'docker: destroying container "{0}"({1})'.format( container_name, docker_id ) )
client = _connect( connection_paramaters )
try:
container = client.containers.get( docker_id )
except Exception as e:
raise Exception( 'Error Getting Container "{0}": {1}'.format( docker_id, str( e ) ) )
try:
container.remove( force=True )
except Exception as e:
raise Exception( 'Error Removing Container "{0}": {1}'.format( docker_id, str( e ) ) )
logging.info( 'docker: container "{0}" destroyed'.format( container_name ) )
return { 'done': True }
def _power_state_convert( state ):
print( '****************** {0}'.format(state))
if state == 'running':
return 'start'
else:
return 'stop'
# else:
# return 'unknown "{0}"'.format( state )
def start_stop( paramaters ):
docker_id = paramaters[ 'docker_id' ]
connection_paramaters = paramaters[ 'connection' ]
container_name = paramaters[ 'name' ]
desired_state = paramaters[ 'state' ]
logging.info( 'docker: setting state of "{0}"({1}) to "{2}"...'.format( container_name, docker_id, desired_state ) )
client = _connect( connection_paramaters )
try:
container = client.containers.get( docker_id )
except Exception as e:
raise Exception( 'Error Getting Container "{0}": {1}'.format( docker_id, str( e ) ) )
curent_state = _power_state_convert( container.status )
if curent_state == desired_state:
return { 'state': curent_state }
if desired_state == 'start':
try:
container.start()
except Exception as e:
raise Exception( 'Error Starting Container "{0}": {1}'.format( docker_id, str( e ) ) )
elif desired_state == 'stop':
try:
container.stop()
except Exception as e:
raise Exception( 'Error Stopping Container "{0}": {1}'.format( docker_id, str( e ) ) )
else:
raise Exception( 'Unknown desired state "{0}"'.format( desired_state ) )
logging.info( 'docker: setting state of "{0}"({1}) to "{2}" complete'.format( container_name, docker_id, desired_state ) )
return { 'state': desired_state }
def state( paramaters ):
docker_id = paramaters[ 'docker_id' ]
connection_paramaters = paramaters[ 'connection' ]
container_name = paramaters[ 'name' ]
logging.info( 'docker: getting "{0}"({1}) power state...'.format( container_name, docker_id ) )
client = _connect( connection_paramaters )
try:
container = client.containers.get( docker_id )
except Exception as e:
raise Exception( 'Error Getting Container "{0}": {1}'.format( docker_id, str( e ) ) )
return { 'state': _power_state_convert( container.status ) }
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.