text stringlengths 8 6.05M |
|---|
from django.contrib import admin
# Register your models here.
from .models import Book
"""Minimal registration of Models.
admin.site.register(Book)
"""
admin.site.register(Book)
class BooksInline(admin.TabularInline):
"""Defines format of inline book insertion (used in AuthorAdmin)"""
model = Book
class BooksInstanceInline(admin.TabularInline):
"""Defines format of inline book instance insertion (used in BookAdmin)"""
model = Book
class BookAdmin(admin.ModelAdmin):
"""Administration object for Book models.
Defines:
- fields to be displayed in list view (list_display)
- adds inline addition of book instances in book view (inlines)
"""
list_display = ('title', 'author', 'display_genre')
inlines = [BooksInstanceInline]
|
#!/usr/bin/env pybricks-micropython
from pybricks.hubs import EV3Brick
from pybricks.ev3devices import (Motor, TouchSensor, ColorSensor,
InfraredSensor, UltrasonicSensor, GyroSensor)
from pybricks.parameters import Port, Stop, Direction, Button, Color
from pybricks.tools import wait, StopWatch, DataLog
from pybricks.robotics import DriveBase
from pybricks.media.ev3dev import SoundFile, ImageFile
# This program requires LEGO EV3 MicroPython v2.0 or higher.
# Click "Open user guide" on the EV3 extension tab for more informati
# Create your objects here.
ev3 = EV3Brick()
# Write your program here.
# Motor(Port.C).run(500)
# 모터C가 500의 속도로
# Motor(Port.B).run(500)
# 모터B가 500의 속도로
# wait(1000)
# 모터가 500의 속도로 1.000초동안 이동하는것.
# Motor(Port.C).stop(Stop.HOLD)
# Motor(Port.B).stop(Stop.HOLD)
# 모터가 멈추는 것이다.
# HOLD:강제유지 정지
# COAST:부드럽게 정지
# BRAKE:강제 정지
# wait(200)
# B=Motor(Port.B)
# B는 Motor(Port.B)와 같다
# C=Motor(Port.C)
# C는 Motor(Port.C)와 같다
# B.run(500)
# B.run(500)=Motor(Port.B).run(500)이다
# C.run(500)
# B.run(500)=Motor(Port.B).run(500)이다
# wait(2000)
# B=Motor(Port.B)
# C= Motor(Port.C)
# B.run_time(500,2000)
# run_time(1~,2~)에서 1~= 속도2~=시간과 같다
# C.run_time(500,2000)
# box=input("아무거나 입력하세요")
# if box == '바보':
# print("택시를 타고 가라")
# else:
# print("걸어가라")
# 아무거나 입력하세요 일때
# 바보가 box면 택시를 타고 가라 라고 말함
# 바보가 box가 아니면 걸어가라 라고 말함
# a = 0
# while a<10:
# print('배고파')
# a=a+1
# a=0일때,
# a가 10 보다 작을때 배고파를 말하다.
# 그 다음 a에 1을 더한다
# 그 다음 다시 실행한다(반복)
# for i in range(10):
# print (i)
# 0~9까지 순서대로 보여준다
# a = ['one','two','three']
# for i in a:
# print (i)
# 순서대로 a에 있는 one,two.three를 순서대로 보여준다
# (i를 꼭 넣어야 한다)
# for i in range(3):
# Motor(Port.C).run(500)
# Motor(Port.B).run(500)
# wait(1000)
# "500의 속도로 포트CB가 1.000초로 이동한다"가 3번 반복
# def Turn_Left():
# Motor(Port.B).run(500)
# wait(800)
# Motor(Port.B).run(-300)
# Motor(Port.C).run(300)
# wait(400)
# Turn_Left()
# for i in range(4):
# Motor(Port.B).run(500)
# wait(800)
# Motor(Port.B).run(300)
# Motor(Port.C).run(-300)
# wait(400) Motor(Port.C).run(500)
# TS=TouchSensor(Port.S1)
# while a.pressed() == 0:
# Motor(Port.B).run(300)
# Motor(Port.C).run(300)
# wait(1000)
# 터치 센서를 눌르지 않으면 앞으로 갔다가 멈추는 것을 반복하고 터치 센서를 눌르면 프로그램 종료
# 터치 센서가 눌러 있지 않으면 (참)반복
# 이므로 눌르면 프로그램 종료
#TS= 포트 1(S1)에 있는터치 센서 선언
# while True:
# Motor(Port.B).run(500)
# Motor(Port.C).run(500)
# if b.distance() <= 500:
# Motor(Port.B).stop(Stop.BRAKE)
# Motor(Port.C).stop(Stop.BRAKE)
# s1 = ColorSensor(Port.S1)
s2 = ColorSensor(Port.S2)
# while True:
# brick.display.text()
# wait(5000)
# # 컬러센서로 색을 측정하는 프로그램
# while True:
# if s4.color() == Color.BLACK:
# Motor(Port.C).run(500)
# Motor(Port.B).run(400)
# if s4.color() == color.
# Motor(Port.C).run(400)
# Motor(Port.B).run(500)
# s4=UltrasonicSensor(Port.S4)
#s3 = GyroSensor(Port.S3)
# B = Motor(Port.B)
# C = Motor(Port.C)
# for i in range(10):
# while i % 2 == 0:
# B.run(500)
# C.run(500)
# wait(1000)
# B.stop(Stop.BRAKE)
# C.stop(Stop.BRAKE)
# wait(200)
# while True:
# if us.distance() > 100:
# B.run(300)
# C.run(300)
# elif us.distance() < 80:
# B.run(-300)
# C.run(-300)
# else:
# B.stop(Stop.BRAKE)
# C.stop(Stop.BRAKE)
# while True:
# ev3.screen.print(ev3.buttons.pressed())
# while True:
# if ev3.buttons.pressed() == [Button.UP]:
# B.run(300)
# C.run(300)
# else:
# C.stop(Stop.BRAKE)
# B.stop(Stop.BRAKE)
# ev3.speaker.say('')
# robot = DriveBase(B, C, wheel_diameter=55.5, axle_track=104)
# robot.straight(1000)
# ev3.sspeaker.beep()
# while True:
# B.run(200)
# C.run(200)
# if s1.pressed() == 1:
# break
# B.run(-200)
# C.run(-200)
# wait(1000)
# while True:
# robot.turn(90)
# B.run(200)
# C.run(200)
# if s2.color() == Color.BLACK:
# robot.turn(-100)
# Motor(Port.C).run(500)
# Motor(Port.B).run(400)
# if s2.color() == Color.WHITE:
# Motor(Port.C).run(400)
# Motor(Port.B).run(500)
# if s4.distance() > 200:
# break
# robot.turn(3600)
# while True:
# if CS.color() == Color.GREEN:
# B.run(480)
# C.run(680)
# elif CS.color() == Color.YELLOW:
# B.run(680)
# C.run(480)
# B.run(300)
# C.run(300)
# wait(1600)
# B.run(300)
# C.run(-300)
# wait(590)
# B.run(300)
# C.run(300)
# wait(2800)
# B.run(-300)
# C.run(300)
# wait(560)
# while True:
# if CS.color() == Color.BLACK:
# B.run(300)
# C.run(500)
# elif CS.color() == Color.WHITE:
# B.run(500)
# C.run(300)
# while True:
# if CS.color() == Color.RED:
# ev3.speaker.set_volume(5000)
# ev3.speaker.say('RED')
# if CS.color() == Color.YELLOW:
# ev3.speaker.say('YELLOW')
# if CS.color() == Color.BLUE:
# ev3.speaker.say('BLUE')
# if CS.color() == Color.GREEN:
# ev3.speaker.say('GREEN')
# while True:
# B.run(500)
# C.run(500)
# if TS.pressed() == 1:
# ev3.speaker.set_volume(10000)
# ev3.speaker.say('멈춰')
# B.run(500)
# C.run(300)
# wait(1600)
# B.run(300)
# C.run(-300)
# wait(590)
# B.run(300)
# C.run(300)
# wait(2800)
# B.run(-300)
# C.run(300)
# wait(520)
# B.run(300)
# C.run(300)
# wait(2650)
# B.run(240)
# C.run(-240)
# wait(560)
# if CS.color() == Color.YELLOW:
# ev3.speaker.say('YELLOW')
B = Motor(Port.B)
C = Motor(Port.C)
# s1 = ColorSensor(Port.S1)
s2 = ColorSensor(Port.S2)
ev3.speaker.set_volume(10000)
while True:
B.run(500)
C.run(500)
if s2.color() == Color.RED:
ev3.speaker.say('RED')
elif s2.color() == Color.YELLOW:
ev3.speaker.say('YELLOW')
elif s2.color() == Color.BLUE:
ev3.speaker.say('BLUE')
|
from aiogram import types
from aiogram.dispatcher import FSMContext
from bot.loader import dp
@dp.message_handler(state=None)
async def bot_echo(message: types.Message):
await message.answer(f"{message.text}"
f"Вы были зарегестрированы {None}")
@dp.message_handler(state="*", content_types=types.ContentTypes.ANY)
async def bot_echo_all(message: types.Message, state: FSMContext):
state = await state.get_state()
await message.answer(f"Состояние <code>{state}</code>.\n"
f"\nСообщение:\n"
f"<code>{message}</code>\n")
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'addEditCoffeeForm.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(481, 260)
self.tab_widget = QtWidgets.QTabWidget(Form)
self.tab_widget.setGeometry(QtCore.QRect(0, -10, 481, 291))
self.tab_widget.setTabBarAutoHide(True)
self.tab_widget.setObjectName("tab_widget")
self.add_tab = QtWidgets.QWidget()
self.add_tab.setObjectName("add_tab")
self.description_line = QtWidgets.QLineEdit(self.add_tab)
self.description_line.setGeometry(QtCore.QRect(100, 120, 371, 20))
self.description_line.setObjectName("description_line")
self.price_line = QtWidgets.QLineEdit(self.add_tab)
self.price_line.setGeometry(QtCore.QRect(100, 150, 371, 20))
self.price_line.setObjectName("price_line")
self.volume_line = QtWidgets.QLineEdit(self.add_tab)
self.volume_line.setGeometry(QtCore.QRect(100, 180, 371, 20))
self.volume_line.setObjectName("volume_line")
self.add_btn = QtWidgets.QPushButton(self.add_tab)
self.add_btn.setGeometry(QtCore.QRect(390, 220, 75, 23))
self.add_btn.setObjectName("add_btn")
self.sort_box = QtWidgets.QComboBox(self.add_tab)
self.sort_box.setGeometry(QtCore.QRect(100, 20, 371, 22))
self.sort_box.setObjectName("sort_box")
self.degree_box = QtWidgets.QComboBox(self.add_tab)
self.degree_box.setGeometry(QtCore.QRect(100, 50, 371, 22))
self.degree_box.setObjectName("degree_box")
self.type_box = QtWidgets.QComboBox(self.add_tab)
self.type_box.setGeometry(QtCore.QRect(100, 80, 371, 22))
self.type_box.setObjectName("type_box")
self.label_7 = QtWidgets.QLabel(self.add_tab)
self.label_7.setGeometry(QtCore.QRect(10, 50, 101, 16))
font = QtGui.QFont()
font.setPointSize(7)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.label_8 = QtWidgets.QLabel(self.add_tab)
self.label_8.setGeometry(QtCore.QRect(10, 150, 61, 16))
self.label_8.setObjectName("label_8")
self.label_9 = QtWidgets.QLabel(self.add_tab)
self.label_9.setGeometry(QtCore.QRect(10, 20, 47, 13))
self.label_9.setObjectName("label_9")
self.label_10 = QtWidgets.QLabel(self.add_tab)
self.label_10.setGeometry(QtCore.QRect(10, 90, 47, 13))
self.label_10.setObjectName("label_10")
self.label_11 = QtWidgets.QLabel(self.add_tab)
self.label_11.setGeometry(QtCore.QRect(10, 120, 61, 16))
self.label_11.setObjectName("label_11")
self.label_12 = QtWidgets.QLabel(self.add_tab)
self.label_12.setGeometry(QtCore.QRect(10, 180, 61, 16))
self.label_12.setObjectName("label_12")
self.tab_widget.addTab(self.add_tab, "")
self.change_tab = QtWidgets.QWidget()
self.change_tab.setObjectName("change_tab")
self.degree_box_edit = QtWidgets.QComboBox(self.change_tab)
self.degree_box_edit.setGeometry(QtCore.QRect(100, 50, 371, 22))
self.degree_box_edit.setObjectName("degree_box_edit")
self.volume_line_edit = QtWidgets.QLineEdit(self.change_tab)
self.volume_line_edit.setGeometry(QtCore.QRect(100, 180, 371, 20))
self.volume_line_edit.setObjectName("volume_line_edit")
self.description_line_edit = QtWidgets.QLineEdit(self.change_tab)
self.description_line_edit.setGeometry(QtCore.QRect(100, 120, 371, 20))
self.description_line_edit.setObjectName("description_line_edit")
self.edit_btn = QtWidgets.QPushButton(self.change_tab)
self.edit_btn.setGeometry(QtCore.QRect(390, 220, 75, 23))
self.edit_btn.setObjectName("edit_btn")
self.price_line_edit = QtWidgets.QLineEdit(self.change_tab)
self.price_line_edit.setGeometry(QtCore.QRect(100, 150, 371, 20))
self.price_line_edit.setObjectName("price_line_edit")
self.sort_box_edit = QtWidgets.QComboBox(self.change_tab)
self.sort_box_edit.setGeometry(QtCore.QRect(100, 20, 371, 22))
self.sort_box_edit.setObjectName("sort_box_edit")
self.type_box_edit = QtWidgets.QComboBox(self.change_tab)
self.type_box_edit.setGeometry(QtCore.QRect(100, 80, 371, 22))
self.type_box_edit.setObjectName("type_box_edit")
self.label = QtWidgets.QLabel(self.change_tab)
self.label.setGeometry(QtCore.QRect(10, 20, 47, 13))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.change_tab)
self.label_2.setGeometry(QtCore.QRect(10, 50, 101, 16))
font = QtGui.QFont()
font.setPointSize(7)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.change_tab)
self.label_3.setGeometry(QtCore.QRect(10, 90, 47, 13))
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.change_tab)
self.label_4.setGeometry(QtCore.QRect(10, 120, 61, 16))
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(self.change_tab)
self.label_5.setGeometry(QtCore.QRect(10, 150, 61, 16))
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(self.change_tab)
self.label_6.setGeometry(QtCore.QRect(10, 180, 61, 16))
self.label_6.setObjectName("label_6")
self.tab_widget.addTab(self.change_tab, "")
self.retranslateUi(Form)
self.tab_widget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.description_line.setPlaceholderText(_translate("Form", "Введите описание"))
self.price_line.setPlaceholderText(_translate("Form", "Введите цену"))
self.volume_line.setPlaceholderText(_translate("Form", "Введите объём"))
self.add_btn.setText(_translate("Form", "Добавить"))
self.label_7.setText(_translate("Form", "Степень прожарки"))
self.label_8.setText(_translate("Form", "Цена"))
self.label_9.setText(_translate("Form", "Сорт"))
self.label_10.setText(_translate("Form", "Тип"))
self.label_11.setText(_translate("Form", "Описание"))
self.label_12.setText(_translate("Form", "Объём"))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.add_tab), _translate("Form", "Добавить кофе"))
self.volume_line_edit.setPlaceholderText(_translate("Form", "Введите объём"))
self.description_line_edit.setPlaceholderText(_translate("Form", "Введите описание"))
self.edit_btn.setText(_translate("Form", "Сохранить"))
self.price_line_edit.setPlaceholderText(_translate("Form", "Введите цену"))
self.label.setText(_translate("Form", "Сорт"))
self.label_2.setText(_translate("Form", "Степень прожарки"))
self.label_3.setText(_translate("Form", "Тип"))
self.label_4.setText(_translate("Form", "Описание"))
self.label_5.setText(_translate("Form", "Цена"))
self.label_6.setText(_translate("Form", "Объём"))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.change_tab), _translate("Form", "Изменить кофе"))
|
from django.test import TestCase
from rest_framework.test import APITestCase
from authentication.models import User
class TestModel(APITestCase):
def test_test(self):
self.assertEqual(1,1-0)
def test_creates_user(self):
user=User.objects.create_user('hi','hi@gmail.com', 'test123')
self.assertIsInstance(user,User)
self.assertEqual(user.email, 'hi@gmail.com')
self.assertFalse(user.is_staff)
def test_creates_superuser(self):
user=User.objects.create_superuser('hi','hi@gmail.com', 'test123')
self.assertIsInstance(user,User)
self.assertEqual(user.email, 'hi@gmail.com')
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser)
def test_raise_error_no_username(self):
with self.assertRaisesMessage(ValueError, 'The given password must be set'):
User.objects.create_user('hi','hi@gmail.com','')
# self.assertIsInstance(user,User)
# self.assertEqual(user.email, 'hi@gmail.com')
# self.assertTrue(user.is_staff)
# self.assertTrue(user.is_superuser) |
from .object import Object
# namespace
class Ns(Object):
pass
# global namespace
glob = Ns('global')
glob << glob
glob >> glob
|
# MLP
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
# set parameters
batch_size = 128
epochs = 10
n_classes = 10
# load MNIST data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# view image
for i in range(6,9):
plt.subplot(330 + (i+1))
plt.imshow(x_train[i], cmap=plt.get_cmap('gray'))
plt.title(y_train[i]);
# data preprocessing
x_train = x_train.reshape(60000, 784) # reshape input from (28,28) to 784
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
scale = np.max(x_train) # 255
x_train /= scale
x_test /= scale
mean = np.std(x_train)
x_train -= mean
x_test -= mean
input_dim = x_train.shape[1]
print('x_train shape:', x_train.shape) # (60000, 784)
print(x_train.shape[0], 'train samples') # 60000
print(x_test.shape[0], 'test samples') # 10000
y_train = keras.utils.to_categorical(y_train, n_classes)
y_test = keras.utils.to_categorical(y_test, n_classes)
# construct MLP
model = Sequential()
model.add(Dense(128, activation='relu', input_dim=input_dim)) # imput_dim = 784
model.add(Dropout(0.2))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer="rmsprop",metrics=['accuracy'])
# train MLP
model.summary()
history = model.fit(x_train, y_train, batch_size=batch_size,epochs=epochs,verbose=1,validation_data=(x_test, y_test))
# evaluate on test set
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# visualize the loss function in each epoch
history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
epochs = range(1, len(loss_values) + 1)
plt.plot(epochs, loss_values, 'bo')
plt.plot(epochs, val_loss_values, 'b+')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.show()
# visualize accuracy in each epoch
plt.clf()
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
plt.plot(epochs, acc_values, 'bo')
plt.plot(epochs, val_acc_values, 'b+')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.show() |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Operation',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=20)),
('date', models.DateTimeField(auto_now=True)),
('cash', models.IntegerField(null=True, blank=True)),
('card', models.ForeignKey(to='main.Card')),
],
options={
'verbose_name_plural': 'Operations',
},
),
]
|
import time
TABLE_SIZE = 64
HASHING = 7
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def the_print(info):
print "[CACHE]", info
table = [[] for i in range(TABLE_SIZE)]
def insert(incident_list, from_value, to_value):
if to_value is None:
to_value = 0
the_print("add data to cache table")
new_item = {
"incidents": incident_list,
"from": from_value,
"to": to_value,
"time": time.time()
}
slot_num = ((from_value + to_value) * HASHING) % TABLE_SIZE
slot = table[slot_num]
the_print("slot: " + str(slot_num) + " size: " + str(len(slot)))
found = False
for index, item in enumerate(slot):
if item['from'] == from_value and item['to'] == to_value:
slot[index] = new_item
found = True
break
if not found:
slot.append(new_item)
def get(from_value, to_value):
the_print("search cache table")
if to_value is None:
to_value = 0
slot_num = ((from_value + to_value) * HASHING) % TABLE_SIZE
slot = table[slot_num]
the_print("slot: " + str(slot_num) + " size: " + str(len(slot)))
for index, item in enumerate(slot):
if item['from'] == from_value and item['to'] == to_value:
if time.time() - item['time'] <= 30:
the_print("found in cache table")
return item['incidents']
if time.time() - item['time'] > 30:
slot.remove(item)
the_print("data in cache is out of date")
the_print("not found in cache table")
return None |
a=list(input().split())
for i in range(len(a)):
b = a[i][::-1]
print(b,end=" ")
|
from pico2d import *
import game_framework
import game_world
import time
class Player:
image = None
RUN_SPEED_PPS = 300
FIELD_MARGIN = 50
PADDLE_Y = 100
def __init__(self):
self.field_width, self.field_height = get_canvas_width(), get_canvas_height()
self.size = 60
self.mouse_control = False
self.angle = math.pi / 2
self.init()
self.life = 0
self.w = 100
self.h = 22
if (Player.image == None):
Player.image = load_image('paddle.png')
def init(self, life = 5):
self.x = self.field_width / 2
self.y = Player.PADDLE_Y
self.dx, self.dy = 0, 0
self.speed = 1
self.score = 0
def get_bb(self):
return self.x - self.w/2, self.y - self.h/2, self.x + self.w/2, self.y + self.h/2
def didBounce(self, ball):
if not ball.intersection(self):
return False
hw = self.w / 2
hh = self.h / 2
if self.x - hw + hh <= ball.x and ball.x <= self.x + hw - hh:
print('Normal bounce', self.x, ball.x)
ball.bounceUp()
return True
ox = self.x - hw + hh if ball.x < self.x else self.x + hw - hh
ball.angle = math.atan2(ball.y - self.y, ball.x - ox) % (2 * math.pi)
return True
def draw(self):
# index = int(-(self.angle - math.pi / 2) * 16 / math.pi) % 32
# self.image.clip_draw(128 * index, 0, 128, 128, self.x, self.y)
# angle = self.angle - math.pi / 2
self.image.draw(self.x, self.y)
def handle_event(self, event):
handled = False
if event.type == SDL_KEYDOWN:
if event.key in [SDLK_LEFT, SDLK_a]: self.dx += -1
elif event.key in [SDLK_RIGHT, SDLK_d]: self.dx += 1
if event.type == SDL_KEYUP:
if event.key in [SDLK_LEFT, SDLK_a]: self.dx += 1
elif event.key in [SDLK_RIGHT, SDLK_d]: self.dx += -1
if self.dx != 0 or self.dy != 0:
self.mouse_control = False
handled = True
if event.type == SDL_MOUSEBUTTONDOWN:
self.mouse_control = True
handled = True
if event.type in [SDL_MOUSEMOTION, SDL_MOUSEBUTTONDOWN]:
self.mouse_x = event.x
self.mouse_y = get_canvas_height() - event.y
return handled
def update(self):
if game_world.isPaused():
return
distance = Player.RUN_SPEED_PPS * game_framework.frame_time
if self.mouse_control:
mx, my = self.mouse_x - self.x, self.mouse_y - self.y
angle = math.atan2(my, mx)
if mx != 0 or my != 0:
self.angle = angle
dx, dy = math.cos(angle), math.sin(angle)
tx = self.x + (dx * distance)
# ty = self.y + (dy * distance)
# print(round(self.x), round(self.y), round(mx, 2), round(my), round(tx), round(ty))
if dx > 0 and tx > self.mouse_x: tx = self.mouse_x
if dx < 0 and tx < self.mouse_x: tx = self.mouse_x
# if dy > 0 and ty > self.mouse_y: ty = self.mouse_y
# if dy < 0 and ty < self.mouse_y: ty = self.mouse_y
self.x = tx
else:
self.x += (self.dx * distance)
# self.y += (self.dy * distance)
self.x = clamp(Player.FIELD_MARGIN, self.x, self.field_width - Player.FIELD_MARGIN)
# self.y = clamp(Player.FIELD_MARGIN, self.y, self.field_height - Player.FIELD_MARGIN)
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from dataclasses import dataclass
from pants.backend.python.lint.add_trailing_comma.skip_field import SkipAddTrailingCommaField
from pants.backend.python.lint.add_trailing_comma.subsystem import AddTrailingComma
from pants.backend.python.target_types import PythonSourceField
from pants.backend.python.util_rules import pex
from pants.backend.python.util_rules.pex import PexRequest, VenvPex, VenvPexProcess
from pants.core.goals.fmt import FmtResult, FmtTargetsRequest
from pants.core.util_rules.partitions import PartitionerType
from pants.engine.process import ProcessResult
from pants.engine.rules import Get, collect_rules, rule
from pants.engine.target import FieldSet, Target
from pants.util.logging import LogLevel
from pants.util.strutil import pluralize
@dataclass(frozen=True)
class AddTrailingCommaFieldSet(FieldSet):
required_fields = (PythonSourceField,)
source: PythonSourceField
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(SkipAddTrailingCommaField).value
class AddTrailingCommaRequest(FmtTargetsRequest):
field_set_type = AddTrailingCommaFieldSet
tool_subsystem = AddTrailingComma
partitioner_type = PartitionerType.DEFAULT_SINGLE_PARTITION
@rule(desc="Format with add-trailing-comma", level=LogLevel.DEBUG)
async def add_trailing_comma_fmt(
request: AddTrailingCommaRequest.Batch, add_trailing_comma: AddTrailingComma
) -> FmtResult:
add_trailing_comma_pex = await Get(VenvPex, PexRequest, add_trailing_comma.to_pex_request())
result = await Get(
ProcessResult,
VenvPexProcess(
add_trailing_comma_pex,
argv=(
"--exit-zero-even-if-changed",
*add_trailing_comma.args,
*request.files,
),
input_digest=request.snapshot.digest,
output_files=request.files,
description=f"Run add-trailing-comma on {pluralize(len(request.files), 'file')}.",
level=LogLevel.DEBUG,
),
)
return await FmtResult.create(request, result, strip_chroot_path=True)
def rules():
return [
*collect_rules(),
*AddTrailingCommaRequest.rules(),
*pex.rules(),
]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 10 10:08:15 2020
@author: TOP Artes
"""
# Importa a biblioteca para estruturação dos dados
import numpy as np
# Importa as classes necessárias
from model.regressor import Regressor
from model.validator import Validator
from control.control_validator import ControlValidator
from control.control_analise import ControlAnalise
# Importa a biblioteca para separação dos dados em Treino e Teste
from sklearn.model_selection import train_test_split
class ControlRegressor(object):
def __init__(self):
self.regressor = Regressor()
self.validator = Validator()
self.control_analise = ControlAnalise()
self.control_validator = ControlValidator()
self.lst_modelo = ['arvore', 'linear_poly','rede_neural','support_vector']
def set_kwargs(self, base, baseline):
kwargs = []
if len(baseline) > 0:
base = False
kwargs.append({'tree':{'max_depth':15, 'criterion':'friedman_mse'},'boost':{'n_estimators':1000, 'learning_rate':.00001},'tunned':{'test':'0'}}) if base is False else kwargs.append({'tree':{}})
kwargs.append({'fit_intercept':False, 'n_jobs':-1, 'normalize':True}) if base is False else kwargs.append({})
kwargs.append({'max_iter':3000,'early_stopping':True, 'activation':'tanh', 'solver':'lbfgs','alpha':0.0005,'tol':1e-5, 'shuffle':True, 'learning_rate':'adaptive'}) if base is False else kwargs.append({})
kwargs.append({'max_iter':10000,'kernel':'rbf','tol':7e-6,'C':3.7,'epsilon':0.03, 'degree':1,'gamma':'scale','coef0':0}) if base is False else kwargs.append({})
for dct in list(baseline):
if dct in list(baseline):
kwargs[dct] = {'tree':{}} if dct == 0 else {}
return kwargs
#len('boost' in {'tree':{'max_depth':15, 'criterion':'friedman_mse'},'boost':{'n_estimators':250, 'learning_rate':.1},'tunned':{}})
def train_test(self, X, y, test_size, random_state):
# Divide a base em treino e teste com valor de 15% para teste (BASE DE DADOS PEQUENA)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)
return X_train, X_test, y_train, y_test
def get_metrics(self, X, y, pre, random_state, base, scaled, plt_text, config, kwargs, validation, plot, modelo=None, selection=False) -> np.array:
dct_text = None
def get_args(X, y, i, pre, config, scaled, kwargs):
pre = pre if i not in config else config[i]['pre']
kwargs = kwargs[i] if i not in config else config[i]['kwargs'][i]
scaled = scaled if i not in config else config[i]['scaled']
X = X if i not in config else config[i]['X']
y = y if i not in config else config[i]['y']
return X, y, pre, kwargs, scaled
if plot:
dct_text = self.control_analise.set_plt_text(plt_text)
if modelo==None or modelo =='arvore':
X_ad, y_ad, pre_ad, kwargs_ad, scaled_ad = get_args(X, y, 0, pre, config, scaled, kwargs)
X_ad = (pre_ad['scaler'][0][0], pre_ad['scaler'][0][1], X_ad[2]) if scaled_ad else X_ad
y_ad = pre_ad['scaler'][1] if scaled_ad else y_ad
kwargs_ad['tree']['random_state'] = random_state
if modelo =='arvore' and validation:
X, y = np.vstack((X_ad[0], X_ad[1])), np.vstack((y_ad[0], y_ad[1]))
# Retorna array com os 30 resultados
return self.regressor.treinar_arvore(X[:,1:], y, pre_ad, scaled_ad, plot, dct_text, validation, kwargs_ad)
# Invoca a função de treinamento do modelo passando os hiperparâmetros e valores de treino(X) e teste(y)
# Regressão com Árvore de Decisão
feat_importances_arvore, score_arvore, mae_arvore, modelo_arvore = self.regressor.treinar_arvore(
X_ad, y_ad, pre_ad, scaled_ad, plot, dct_text, validation, kwargs_ad)
if modelo!=None and selection is True:
return (pre['scaler'][2],pre['scaler'][3], feat_importances_arvore, score_arvore, mae_arvore, modelo_arvore)
if modelo==None or modelo =='linear_poly':
X_lp, y_lp, pre_lp, kwargs_lp, scaled_lp = get_args(X, y, 1, pre, config, scaled, kwargs)
X_lp = (pre_lp['poly'][1].transform(pre_lp['scaler'][0][0]), #pre_lp['poly'][0][0][:,1:]
pre_lp['poly'][1].transform(pre_lp['scaler'][0][1]), #pre_lp['poly'][0][1][:,1:]
X_lp[2]) if scaled_lp else (pre_lp['poly'][0][0], pre_lp['poly'][0][1], X_lp[2])
y_lp = pre_lp['scaler'][1] if scaled_lp else y_lp
if modelo =='linear_poly' and validation:
X, y = np.vstack((X_lp[0], X_lp[1])), np.vstack((y_lp[0], y_lp[1]))
# Retorna array com os 30 resultados
return self.regressor.treinar_linear_poly(X, y, pre_lp, scaled_lp, plot, dct_text, validation, kwargs_lp)
# Invoca a função de treinamento do modelo passando os hiperparâmetros e valores de treino(X) e teste(y)
# Regressão Linear(Polinomial)
score_poly, mae_poly, modelo_poly = self.regressor.treinar_linear_poly(
X_lp, y_lp, pre_lp, scaled_lp, plot, dct_text, validation, kwargs_lp)
if modelo!=None and selection is True:
return (pre['scaler'][2],pre['scaler'][3], score_poly, mae_poly, modelo_poly)
if modelo==None or modelo =='rede_neural':
X_rn, y_rn, pre_rn, kwargs_rn, scaled_rn = get_args(X, y, 2, pre, config, scaled, kwargs)
X_rn = (pre_rn['scaler'][0][0], pre_rn['scaler'][0][1], X_rn[2])
y_rn = pre_rn['scaler'][1]
node = int((X_rn[2].shape[1])/2)+1
kwargs_rn['hidden_layer_sizes'] = (node,node)
# kwargs_rn['random_state'] = random_state
# Define a quantidade de camadas ocultas
# Invoca a função de treinamento do modelo passando os hiperparâmetros e valores de treino(X) e teste(y)
# Regressão com Rede neural(MPL)
if modelo =='rede_neural' and validation is True:
X, y = np.vstack((X_rn[0], X_rn[1])), np.vstack((y_rn[0], y_rn[1]))
# Retorna array com os 30 resultados
return self.regressor.treinar_redeneural(X[:,1:], y, pre_rn, scaled_rn, plot, dct_text, validation, kwargs_rn)
score_mpl, mae_mpl, modelo_mpl = self.regressor.treinar_redeneural(
X_rn, y_rn, pre_rn, scaled_rn, plot, dct_text, validation, kwargs_rn)
if modelo!=None and selection is True:
return (pre['scaler'][2],pre['scaler'][3], score_mpl, mae_mpl, modelo_mpl)
if modelo==None or modelo == 'support_vector':
X_sv, y_sv, pre_sv, kwargs_sv, scaled_sv = get_args(X, y, 3, pre, config, scaled, kwargs)
X_sv = (pre_sv['scaler'][0][0], pre_sv['scaler'][0][1], X_sv[2])
y_sv = pre_sv['scaler'][1]
if modelo == 'support_vector' and validation is True:
X, y = np.vstack((X_sv[0],X_sv[1])), np.vstack((y_sv[0], y_sv[1]))
# Retorna array com os 30 resultados
return self.regressor.treinar_svr(X, y, pre_sv, scaled_sv, plot, dct_text, validation, kwargs_sv)
# Invoca a função de treinamento do modelo passando os hiperparâmetros e valores de treino(X) e teste(y)
# Regressão Linear(Polinomial)
score_svr, mae_svr, modelo_svr = self.regressor.treinar_svr(
X_sv, y_sv, pre_sv, scaled_sv, plot, dct_text, validation, kwargs_sv)
if modelo!=None and selection is True:
return (pre['scaler'][2], pre['scaler'][3], score_svr, mae_svr, modelo_svr)
dict_base_line = {'arvore':(score_arvore, mae_arvore),
'linear_poly':(score_poly, mae_poly),
'rede_neural':(score_mpl, mae_mpl),
'support_vector':(score_svr, mae_svr)}
return dict_base_line
def get_predict(self, X, modelo) -> np.array:
if 'StandardScaler' in str(modelo[0]):
# Faz as predições do número de inscritos no ENEM 2019
previsoes = modelo[-1].predict(modelo[1].transform(X))
previsoes = modelo[1].inverse_transform(previsoes)
else:
previsoes = modelo[-1].predict(X)
return previsoes
def get_validation(self, X, y, pre, random_state, base, scaled, plt_text, config, kwargs, validation, plot) -> np.matrix:
"""
Validação dos modelos com StratifiedKFold(n_splits=10) com a comparação de 30 resultados de cada modelo"""
resultados = {}
random_state=None
for modelo in self.lst_modelo:
# Cross validation Regressão com Árvore de Decisão
resultados[modelo] = self.get_metrics(
X, y, pre, random_state, base, scaled, plt_text, config, kwargs, validation, plot, modelo=modelo)
results = np.c_[resultados['arvore'], resultados['linear_poly'], resultados['rede_neural'], resultados['support_vector']]
ranks, names = self.control_validator.compare(results, self.lst_modelo)
validation = False
names = {name.split(' - ')[0]: float(name.split(' - ')[1]) for name in names}
result_rank = sorted(names, key=names.get)
modelos = {}
for i in range(2):
# Seleciona o melhor modelo de acordo com as comparações feitas acima pelo método de rankeamento Friedman-Nemenyi
modelo = self.get_metrics(
X, y, pre, random_state, base, scaled, plt_text, config, kwargs, validation, plot, modelo=result_rank[i], selection=True)
modelos[result_rank[i]] = modelo
return results, modelos |
# -*- coding: utf-8 -*-
from django.contrib.auth.decorators import user_passes_test
from Aluno.models import Aluno
def check_aluno_exist(user):
if not user.is_authenticated():
return False
try:
aluno = user.aluno_set.get()
return True
except Aluno.DoesNotExist:
return False
aluno_exist = user_passes_test(lambda u: check_aluno_exist(u))
|
from classes.github.client import Client
from classes.yaml_parser import YamlParser
from classes.yaml_replacer.result import Result
from classes.model.package import Package
from typing import Any, List, Optional
class YamlReplacer:
def __init__(self, client: Client, yaml_parser: YamlParser):
self.client: Client = client
self.yaml_parser: YamlParser = yaml_parser
def replace_sha(self, packages: List[Package]) -> List[Result]:
results: List[Result] = []
data = self.yaml_parser.get_data()
for package in packages:
result: Result = Result(package.name)
current_sha: str = package.get_value(["source", "reference"])
result.set_sha_before(current_sha)
fetched_sha: str = self.client.get_last_sha(package.repository, package.branch)
try:
result.set_sha_after(fetched_sha)
if fetched_sha != current_sha:
self.replace_value(data, fetched_sha, ["packages", package.name, "source", "reference"])
result.set_success()
else:
result.set_ignored("Current sha, no required action")
except Exception as exception:
result.set_failed(self.__get_exception_message(exception))
results.append(result)
self.yaml_parser.save(data)
return results
@staticmethod
def replace_value(data: Any, value: str, fields: List[str]) -> Any:
if fields:
field: str = fields.pop(0)
data[field] = YamlReplacer.replace_value(data[field], value, fields)
return data
return value
@staticmethod
def __get_exception_message(exception: Exception) -> Optional[str]:
return exception.message if hasattr(exception, 'message') else None
|
""" Core views
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2017-10-26
:Copyright: 2017, Karr Lab
:License: MIT
"""
from datetime import datetime
from django.contrib.auth import login as auth_login, logout as auth_logout
from django.contrib.auth.forms import AuthenticationForm
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.template import RequestContext
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from minicell.site import settings
import os
###################
### pages
###################
def index(request):
return render_template(request, 'index.html')
def research(request):
return render_template(request, 'research.html')
def education(request):
return render_template(request, 'education.html')
def resources(request):
return render_template(request, 'resources.html')
def events(request):
return render_template(request, 'events.html')
def publications(request):
return render_template(request, 'publications.html')
def people(request):
return render_template(request, 'people.html')
def contact(request):
return render_template(request, 'contact.html')
@csrf_protect
@never_cache
def login(request):
next = request.REQUEST.get('next', '')
if request.method == "POST":
form = AuthenticationForm(data=request.POST)
if form.is_valid():
auth_login(request, form.get_user())
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
return HttpResponseRedirect(next)
else:
form = AuthenticationForm(request)
request.session.set_test_cookie()
return render_template(request, 'login.html', context={
'form': form,
'next': next,
})
def logout(request):
auth_logout(request)
return render_template(request, 'logout.html')
###################
### sitemap, robots
###################
def sitemap(request):
return render_template(request, 'sitemap.xml', context={'ROOT_URL': settings.ROOT_URL}, content_type='application/xml')
def robots(request):
return render_template(request, 'robots.txt', context={'ROOT_DOMAIN': settings.ROOT_DOMAIN, 'ROOT_URL': settings.ROOT_URL}, content_type='plain/text')
###################
### helper functions
###################
def render_template(request, template, context=None, content_type='text/html'):
''' Returns rendered template
Args:
request (:obj:`django.http.request.HttpRequest`): HTTP request
template (:obj:`str`): path to template to render_template
context (:obj:`dict`, optional): dictionary of data needed to render template
content_type (:obj:`str`, optional): mime type
Returns:
:obj:`django.http.HttpResponse`: HTTP response
'''
if context is None:
context = {}
#add data
context['request'] = request
context['last_updated_date'] = datetime.fromtimestamp(os.path.getmtime(os.path.join(settings.TEMPLATES[0]['DIRS'][0], template)))
#render
return render(request, template, context=context, content_type=content_type)
|
a=input("檢測的字串(end結束):")
while a!="end":
b=list(a)
two=input("檢測的單一字元:")
if a!="end":
a=b.count(two)
print("字元",two,"出現次數為:",a)
a=input("檢測的字串(end結束):")
if a=="end":
print("檢測結束") |
class Info:
__name = None
__age = None
def __init__(self):
pass
def setName(self,name):
self.__name
def setAge(self,age):
if age < 1 :
self.__age = 1
return
def disp(self):
print("이름 : {}".format(self.__name))
print("나이 : {}".format(self.__age))
in1 = Info()
in1.__name = ""
in1.setName = "김동혁"
in1.setAge = -20 |
from tastypie.resources import ModelResource
from background.models import Picture
class PictureResource(ModelResource):
class Meta:
queryset = Picture.objects.all()
resource_name = 'pic' |
from tkinter import *
from tkinter import ttk
win = Tk()
win.geometry('700x500')
win.title("Priyanshi")
combo_var = StringVar()
Label(text='Binary To All Number System Converter', font=('arial bold', 20)).grid(column=0, row=1, padx=50)
Label(text="").grid(column=0, row=2)
Label(text="Enter Binary Number:", font=('arial bold', 10)).grid(column=0, row=3, columnspan=2, sticky=W, padx=250)
Label(text="Decimal Number System :", font=('arial bold', 10)).grid(column=0, row=9, columnspan=2, sticky=W, padx=250)
Label(text="", bg="white", font=("Arial bold", 30), borderwidth=5, relief=GROOVE).grid(column=0, row=10, columnspan=2,ipadx=200)
Label(text="").grid(column=0, row=11)
Label(text="Hexadecimal Number System :", font=('arial bold', 10)).grid(column=0, row=12, columnspan=2, sticky=W,padx=250)
Label(text="", bg="white", font=("Arial bold", 30), borderwidth=5, relief=GROOVE).grid(column=0, row=13, columnspan=2,ipadx=200)
Label(text="").grid(column=0, row=14)
Label(text="Octal Number System :", font=('arial bold', 10)).grid(column=0, row=14, columnspan=2, sticky=W, padx=250)
Label(text="", bg="white", font=("Arial bold", 30), borderwidth=5, relief=GROOVE).grid(column=0, row=15, columnspan=2,ipadx=200)
bin_var = StringVar()
Entry(textvariable=bin_var, font=('arial bold', 20)).grid(column=0, row=4, columnspan=2)
def bin_dec():
binary = bin_var.get()
a = int(binary, 2)
b = hex(a)
b = b[2:]
c = oct(a)
c = c[2:]
b = b.upper()
decimal = Label(text=a, font=("Arial bold", 20), bg="white", borderwidth=1).grid(column=0, row=10, columnspan=2, ipadx=100)
hexadecimal = Label(text=b, font=("Arial bold", 20), bg="white", borderwidth=1).grid(column=0, row=13, columnspan=2, ipadx=100)
return decimal, hexadecimal, Label(text=c, font=("Arial bold", 20), bg="white", borderwidth=1).grid(column=0, row=15, columnspan=2, ipadx=100)
def dec_bin():
gg = bin_var.get()
d = bin(gg)
a = d[2:]
return Label(text=a, font=("Arial bold", 20)).grid(column=0, row=6)
Label(text="").grid(column=0, row=5)
Button(text='CONVERT', command=bin_dec, font=10,bg="deeppink",fg="white").grid(column=0, row=6, columnspan=2)
Label(text="").grid(column=0, row=7)
win.mainloop()
|
#
# Copyright (c) 2020 Carsten Igel.
#
# This file is part of pip-licenses-reader
# (see https://github.com/carstencodes/pip-licenses-reader).
#
# License: 3-clause BSD, see https://opensource.org/licenses/BSD-3-Clause
#
import unittest
import tempfile
import os
from pathlib import Path
from typing import FrozenSet, List
import pip_licenses_reader
__ITEM_SOME = """{
"Author": "Me",
"Version": "0.1.0",
"URL": "https://some.site",
"License": "GPLv3",
"Name": "SomePackage"
}"""
__ITEM_PIP_LR = """{
"Author": "Carsten Igel",
"Version": "0.8.0",
"URL": "https://github.com/carstencodes/pip-licenses-reader",
"License": "BSD",
"Name": "pip_licenses_reader"
}"""
__ITEM_SIMPLE = """{
"Author": "Somebody",
"Version": "1.2.3",
"URL": "https://any.site",
"License": "MIT",
"Name": "AnyPackage"
}"""
__ITEM_DEFUNC = '{ "Author": "Me", "Version": "0.1.0" }'
FILE_CONTENT_IS_OBJ = '{ "a": "b" }'
FILE_CONTENT_IS_EMPTY_LIST = "[]"
FILE_CONTENT_THREE_COMPLETE_ITEMS = (
"[" + __ITEM_SOME + ", " + __ITEM_SIMPLE + ", " + __ITEM_PIP_LR + "]"
)
class _MockFile:
def __init__(self, content: str) -> None:
self.__content = content
self.__temp_file = None
def __enter__(self) -> Path:
temp_file = tempfile.mktemp(".json", "pip_lic_read_test")
with open(temp_file, "w+") as handle:
handle.write(self.__content)
self.__temp_file = temp_file
return temp_file
def __exit__(self, t, val, tb) -> bool:
if self.__temp_file:
os.remove(self.__temp_file)
self.__temp_file = None
return False
class EmptyTest(unittest.TestCase):
def test_default_typing_major_type(self) -> None:
any_obj = pip_licenses_reader.read_file()
self.assertTrue(
isinstance(any_obj, pip_licenses_reader.LicenseCollection)
)
def test_default_typing_collection_type(self) -> None:
any_obj = pip_licenses_reader.read_file()
self.assertTrue(isinstance(any_obj.projects, frozenset))
def test_no_file_returns_empty(self) -> None:
any_obj = pip_licenses_reader.read_file()
self.assertIsNotNone(any_obj)
def test_no_file_returns_empty_frozen_set(self) -> None:
any_obj = pip_licenses_reader.read_file()
self.assertEqual(len(any_obj.projects), 0)
def test_file_is_object(self) -> None:
global FILE_CONTENT_IS_OBJ
with _MockFile(FILE_CONTENT_IS_OBJ) as mock:
any_obj = pip_licenses_reader.read_file(mock)
self.assertIsNotNone(any_obj)
def test_file_is_object_empty_frozenset(self) -> None:
global FILE_CONTENT_IS_OBJ
with _MockFile(FILE_CONTENT_IS_OBJ) as mock:
any_obj = pip_licenses_reader.read_file(mock)
self.assertEqual(len(any_obj.projects), 0)
def test_file_is_list(self) -> None:
global FILE_CONTENT_IS_EMPTY_LIST
with _MockFile(FILE_CONTENT_IS_EMPTY_LIST) as mock:
any_obj = pip_licenses_reader.read_file(mock)
self.assertIsNotNone(any_obj)
def test_file_is_list_empty_frozenset(self) -> None:
global FILE_CONTENT_IS_EMPTY_LIST
with _MockFile(FILE_CONTENT_IS_EMPTY_LIST) as mock:
any_obj = pip_licenses_reader.read_file(mock)
self.assertEqual(len(any_obj.projects), 0)
class FileContentTest(unittest.TestCase):
def test_file_three_complete_items_success(self) -> None:
global FILE_CONTENT_THREE_COMPLETE_ITEMS
with _MockFile(FILE_CONTENT_THREE_COMPLETE_ITEMS) as mock:
any_obj = pip_licenses_reader.read_file(mock)
self.assertIsNotNone(any_obj)
self.assertEqual(len(any_obj.projects), 3)
def test_file_three_complete_items_content_two(self) -> None:
global FILE_CONTENT_THREE_COMPLETE_ITEMS
with _MockFile(FILE_CONTENT_THREE_COMPLETE_ITEMS) as mock:
any_obj = pip_licenses_reader.read_file(mock)
project: pip_licenses_reader.ProjectInfo = (
FileContentTest.sort_items(any_obj.projects)[1]
)
self.assertEqual(project.author, "Me")
self.assertEqual(str(project.version), "0.1.0")
self.assertEqual(project.url, "https://some.site")
self.assertEqual(project.license, "GPLv3")
self.assertEqual(project.name, "SomePackage")
def test_file_three_complete_items_content_three(self) -> None:
global FILE_CONTENT_THREE_COMPLETE_ITEMS
with _MockFile(FILE_CONTENT_THREE_COMPLETE_ITEMS) as mock:
any_obj = pip_licenses_reader.read_file(mock)
project: pip_licenses_reader.ProjectInfo = (
FileContentTest.sort_items(any_obj.projects)[2]
)
self.assertEqual(project.author, "Carsten Igel")
self.assertEqual(str(project.version), "0.8.0")
self.assertEqual(
project.url,
"https://github.com/carstencodes/pip-licenses-reader",
)
self.assertEqual(project.license, "BSD")
self.assertEqual(project.name, "pip_licenses_reader")
def test_file_three_complete_items_content_one(self) -> None:
global FILE_CONTENT_THREE_COMPLETE_ITEMS
with _MockFile(FILE_CONTENT_THREE_COMPLETE_ITEMS) as mock:
any_obj = pip_licenses_reader.read_file(mock)
project: pip_licenses_reader.ProjectInfo = (
FileContentTest.sort_items(any_obj.projects)[0]
)
self.assertEqual(project.author, "Somebody")
self.assertEqual(str(project.version), "1.2.3")
self.assertEqual(project.url, "https://any.site")
self.assertEqual(project.license, "MIT")
self.assertEqual(project.name, "AnyPackage")
@staticmethod
def sort_items(
items: FrozenSet[pip_licenses_reader.ProjectInfo],
) -> List[pip_licenses_reader.ProjectInfo]:
result = list(items)
result.sort(key=lambda p: p.name)
return result
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import rospy
import numpy as np
import cv2 as cv
from yolo2_utils import infer_image
from std_msgs.msg import Float32MultiArray
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
from std_msgs.msg import String
from vision_msgs.msg import Detection2D, Detection2DArray, ObjectHypothesisWithPose
def object_predict(object_data, header, classid,confidence,image):
image_height,image_width,channels = image.shape
obj=Detection2D()
obj_hypothesis= ObjectHypothesisWithPose()
object_id= classid
object_score=confidence
#dimensions=object_data
obj.header=header
obj_hypothesis.id = object_id
obj_hypothesis.score = object_score
obj.results.append(obj_hypothesis)
obj.bbox.size_y = int(object_data[3])
obj.bbox.size_x = int(object_data[2])
obj.bbox.center.x = int(object_data[0]) + int(object_data[2])//2
obj.bbox.center.y = int(object_data[1]) + int(object_data[3])//2
return obj
def same_object(boxes,x,y,w,h):
for x_,y_,w_,h_,id in boxes:
if abs(x-x_)<=10 and abs(y-y_)<=10 and abs(h-h_)<=10 and abs(w-w_)<=10:
return id
return False
class BoundingBoxes:
def __init__(self, item="trash_marking", trash_marking=False, trash_marking_pick=False, item_specific="trash", isDustbin=False):
self.trash_marking=trash_marking
self.item_specific = item_specific
self.trash_marking_pick = trash_marking_pick
self.item = item
self.isDustbin = isDustbin
self.bridge = CvBridge()
self.check = ""
self.image_pub = rospy.Publisher('output', Image,queue_size=1, latch=True)
self.object_pub = rospy.Publisher("objects", Detection2DArray, queue_size=1, latch=True)
self.object_pub_dustbin = rospy.Publisher("objects_d", Detection2DArray, queue_size=1)
self.labels_array = ['trash','trash','markings']
self.labels = []
self.classes= []
self.colors = np.random.randint(0, 255, size=(len(self.classes), 3), dtype='uint8')
self.net = cv.dnn.readNetFromDarknet('/artpark_workspace/src/GigaRoboticsArtpark/apbot_perception/config/' + item + '.cfg', '/artpark_workspace/src/GigaRoboticsArtpark/apbot_perception/weights/' + item + '.weights')
self.layer_names = self.net.getLayerNames()
self.layer_names = [self.layer_names[i[0] - 1] for i in self.net.getUnconnectedOutLayers()]
rospy.sleep(.1)
def get_ID(self,box):
if same_object(self.labels,box[0],box[1],box[2],box[3]):
return same_object(self.labels,box[0],box[1],box[2],box[3])
self.labels.append([box[0],box[1],box[2],box[3],box[0]])
return box[0]
def image_callback(self, data):
objArray = Detection2DArray()
try:
img = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
height, width = img.shape[:2]
img, box, _, confidences, classids, idxs = infer_image(self.net, self.layer_names, height, width, img, self.colors, self.labels)
try:
self.image_pub.publish(self.bridge.cv2_to_imgmsg(img, "bgr8"))
except CvBridgeError as e:
print(e)
objArray.detections =[]
objArray.header=data.header
object_count=1
for i in range(len(box)):
if self.trash_marking:
object_count+=1
objArray.detections.append(object_predict(box[i],data.header,classids[i],confidences[i],img))
elif self.trash_marking_pick:
if( self.labels_array[classids[i]] == self.item_specific):
object_count+=1
objArray.detections.append(object_predict(box[i],data.header,classids[i],confidences[i],img))
elif self.isDustbin:
cropped_img = img[box[i][1]:box[i][1] + box[i][3], box[i][0]:box[i][0]+box[i][2] ]
hsv = cv.cvtColor(cropped_img, cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv, (36, 25, 25), (86, 255,255))
imask = mask>0
green = np.zeros_like(cropped_img, np.uint8)
green[imask] = cropped_img[imask]
m = green.mean(axis=0).mean(axis=0)
if m[0]>25 or m[1]>25 or m[2]>25:
object_count+=1
objArray.detections.append(object_predict(box[i],data.header,self.get_ID(box[i]),confidences[i],img))
else:
object_count+=1
objArray.detections.append(object_predict(box[i],data.header,self.get_ID(box[i]),confidences[i],img))
if self.isDustbin:
self.object_pub_dustbin.publish(objArray)
else:
self.object_pub.publish(objArray)
# cv.imshow("Image window", green)
# cv.imshow("full img", img)
# cv.waitKey(3)
def callback(self,data):
self.check = data.data
def listener2(self):
rospy.init_node('test_node')
rospy.Subscriber("/check", String, self.callback)
rospy.Subscriber("/camera/color/image_raw", Image, self.image_callback)
rospy.spin()
def listener(self, status=1):
if status:
self.check_sub = rospy.Subscriber("/check", String, self.callback)
self.img_sub = rospy.Subscriber("/camera/color/image_raw", Image, self.image_callback)
else:
self.check_sub.unregister()
self.img_sub.unregister()
if __name__ == '__main__':
obj = BoundingBoxes(item="trash_marking", trash_marking=True)
# obj.check = "Detect"
obj.listener2()
# start_yolo()
pass
|
import os
from bsm.util import safe_rmdir
from bsm.util import safe_mkdir
from bsm.util import call_and_log
def run(param):
version = param['version']
tar_filename = param['config_package']['source']['file'].format(version=version)
tar_file = os.path.join(param['package_path']['misc_dir'], 'download', tar_filename)
main_dir = param['config_package']['source'].get('main', '').format(version=version)
dst_dir = param['config_package'].get('path', {}).get('source')
if not dst_dir:
return {'success': False, 'message': 'Path "source" is not specified'}
safe_rmdir(dst_dir)
safe_mkdir(dst_dir)
if main_dir:
strip_number = main_dir.strip(os.sep).count(os.sep) + 1
cmd = ['tar', '--strip-components', str(strip_number), '-xvf', tar_file, main_dir]
else:
cmd = ['tar', '-xvf', tar_file]
with open(param['log_file'], 'w') as f:
ret = call_and_log(cmd, log=f, cwd=dst_dir)
return {'success': ret==0, 'message': 'Tar exit code: {0}'.format(ret)}
|
HORIZONTAL = 0
VERTICAL = 1
|
from uszipcode import ZipcodeSearchEngine
import psycopg2, pprint, sys
debug = True
zipSearch = ZipcodeSearchEngine()
zipNumber = 600
conString = "dbname=yourDBname user=postgres"
con = psycopg2.connect(conString)
con.autocommit = True
sqlString = """INSERT INTO zipcode(city, density, houseofunits, landarea, latitude, longitude,
neboundlatitude, neboundlongitude, population, state_id, swboundlatitude,
swbounglongitude, totalwages, waterarea, wealthy, zipcode, zipcodetype)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s )"""
while zipNumber < 100000:
zipString = str('%0*d' % (5, zipNumber))
if debug: print"Trying zipNumber %s as %s" % (zipNumber, zipString)
zipObj = zipSearch.by_zipcode(zipNumber)
if zipObj is not None:
if debug: pprint.pprint(zipObj.to_dict())
cur = con.cursor()
try:
cur.execute(sqlString, (zipObj.City, zipObj.Density, zipObj.HouseOfUnits,
zipObj.LandArea, zipObj.Latitude, zipObj.Longitude,
zipObj.NEBoundLatitude, zipObj.NEBoundLongitude,
zipObj.Population, zipObj.State,
zipObj.SWBoundLatitude, zipObj.SWBoungLongitude,
zipObj.TotalWages, zipObj.WaterArea, zipObj.Wealthy,
zipObj.Zipcode, zipObj.ZipcodeType))
except psycopg2.Error as e:
print(e.pgcode)
print(e.pgerror)
print(e.diag.severity)
print(e.diag.message_primary)
sys.exit(1)
cur.close()
if debug: print("%s success" % zipString)
else:
if debug: print("%s not found" % zipString)
zipNumber += 1
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from config import config
import time
import pytz
from pytz import timezone
from dateutil import parser
from datetime import datetime, timedelta
from common_cachefetcher import fetcher
from requests_oauthlib import OAuth1
CONSUMER_KEY = config['linkedin']['CONSUMER_KEY']
CONSUMER_SECRET = config['linkedin']['CONSUMER_SECRET']
OAUTH_TOKEN = config['linkedin']['OAUTH_TOKEN']
OAUTH_TOKEN_SECRET = config['linkedin']['OAUTH_TOKEN_SECRET']
ACCESS_TOKEN = config['linkedin']['ACCESS_TOKEN']
CACHE_TIMEOUT = config['linkedin']['CACHE_TIMEOUT']
def get_oauth():
oauth = OAuth1(CONSUMER_KEY,
client_secret=CONSUMER_SECRET,
resource_owner_key=OAUTH_TOKEN,
resource_owner_secret=OAUTH_TOKEN_SECRET)
return oauth
def getLinkedinNotifications(delta_days):
response = 0
try:
time_after2 = (int(datetime.now(pytz.timezone("UTC")).replace(hour=0, minute=0, second=0, microsecond=0).astimezone(pytz.utc).strftime("%s")) * 1000) - ((delta_days - 1) * 86400000)
r = fetcher.get_oauth("https://api.linkedin.com/v1/people/~/network/updates?scope=self&format=json&oauth2_access_token=%s&after=%d" % (ACCESS_TOKEN, time_after2), get_oauth(), CACHE_TIMEOUT)
linkedin_json = r.json()
if linkedin_json['_total']:
response = linkedin_json['_total']
except Exception, err:
print Exception, err
response = -1
return {'linkedin': response} |
# 5 - Camisetas
n = int(input())
pedidos = {}
for i in range(n):
nome = input()
if nome == "0":
break
camisa = input()
x, y = camisa.split(" ")
pedidos[nome] = (x,y)
x = sorted(pedidos.items(), reverse=True, key=lambda i : i[0])
dict(x)
y = sorted(x, key=lambda i : i[1])
dict(y)
for i in y:
print(i[0],"\n",i[1][0], i[1][1])
"""
#n = 3
#pedidos = {'Maria Joao': ('branco','P'),'Marcio Gues': ('vermelho','P'),'Maria Jose': ('branco ','P')}
x = sorted(pedidos.items(), reverse=True, key=lambda i : i[0])
dict(x)
y = sorted(x, key=lambda i : i[1])
dict(y)
for i in y:
print(i[0],"\n",i[1][0], i[1][1])
""" |
#!/usr/bin/env
# encoding: utf-8
"""
Created by John DiBaggio on 2018-09-04
Implement NumberToPattern
Implement NumberToPattern
Convert an integer to its corresponding DNA string.
Given: Integers index and k.
Return: NumberToPattern(index, k).
Sample Dataset
45
4
Sample Output
AGTC
Extra Dataset
Input
5353
7
Output
CCATGGC
Execute like:
python3 src/ba1m.py data/ba1m.txt output/ba1m.txt
"""
__author__ = 'johndibaggio'
import sys
import fileinput
if __name__ == '__main__':
from lib.bio_util import BioUtil
else:
from .lib.bio_util import BioUtil
argv = list(sys.argv)
input_number = -1
input_k = -1
for line in fileinput.input(argv[1]):
if len(line) > 0:
if input_number == -1:
input_number = int(line.replace('\n', ''))
elif input_k == -1:
input_k = int(line.replace('\n', ''))
output_string = BioUtil.number_to_pattern(input_number, input_k)
print("The {}-mer pattern for number {} is \"{}\"".format(str(input_k), str(input_number), output_string))
output_file = open(argv[2], "w+")
output_file.write(output_string)
output_file.close()
|
import io
from test.util import ClangTest
from ctypeslib.codegen import clangparser
from ctypeslib.codegen.handler import InvalidTranslationUnitException
class TestClang_Parser(ClangTest):
def setUp(self) -> None:
# Create a clang parser instance, no flags
self.parser = clangparser.Clang_Parser([])
def test_parse(self):
self.parser.parse('test/data/test-records.c')
self.assertTrue(self.parser.is_registered('struct_Name'))
self.assertTrue(self.parser.is_registered('struct_Name2'))
self.assertFalse(self.parser.is_registered('struct_whatever'))
def test_parse_string(self):
source_code = """
struct example_detail {
int first;
int last;
};
struct example {
int args;
int flags;
int count;
struct example_detail details[2];
};
"""
self.parser.parse_string(source_code)
self.assertTrue(self.parser.is_registered('struct_example_detail'))
self.assertTrue(self.parser.is_registered('struct_example'))
self.assertFalse(self.parser.is_registered('struct_whatever'))
return
def test_error_translationunit_does_not_exist(self):
import clang
with self.assertRaises(clang.cindex.TranslationUnitLoadError):
self.parser.parse('what/ever/path/test-error2.c')
def test_error_translationunit(self):
with self.assertRaises(InvalidTranslationUnitException):
self.parser.parse('test/data/test-error2.c')
def test_error_translationunit_include(self):
with self.assertRaises(InvalidTranslationUnitException):
self.parser.parse('test/data/test-error1.c')
|
# coding:utf8
from db import mongo_util
'''
获取app models
'''
from bson.objectid import ObjectId
from db.page import Page, DEFAULT_PAGE_SIZE
import pymongo
def find_collections(appid):
schemes=mongo_util.get_mongo_collection("scheme")
appScheme=schemes.find_one({"app_id":appid})
collections=list()
if(appScheme!= None):
for item in appScheme.get('models'):
collections.append({
'display_name':item['display_name'],
'model_name':item['model_name'],
'app_id':appid,
})
return collections
'''
获取model scheme
'''
def find_model_scheme(appid,model_name):
schemes=mongo_util.get_mongo_collection("scheme")
models=schemes.find_one({"app_id":appid}).get('models')
tempModel=None
for model in models:
if not cmp(model_name,model.get('model_name')):
tempModel=model
break
return tempModel
'''
保存
'''
def save_model(app_id,model_name,saveObj):
collection=mongo_util.get_mongo_collection(app_id+"_"+model_name)
collection.insert(saveObj)
def find_models(app_id,model_name,**pageConfig):
collection=mongo_util.get_mongo_collection(app_id+"_"+model_name)
return collection.find()
def find_page_models(app_id,model_name,**pageConfig):
gotoPage=pageConfig.get("gotoPage",1)
pageSize=pageConfig.get("pageSize",DEFAULT_PAGE_SIZE)
if gotoPage<0:
gotoPage=1
if pageSize<1:
pageSize=DEFAULT_PAGE_SIZE
collection=mongo_util.get_mongo_collection(app_id+"_"+model_name)
data=collection.find(skip=(gotoPage-1)*pageSize,limit=pageSize).sort("_id", pymongo.DESCENDING)
total=collection.count()
return Page(gotoPage, total, pageSize, data)
def del_model(app_id,model_name,objid):
collection=mongo_util.get_mongo_collection(app_id+"_"+model_name)
collection.remove({"_id":ObjectId(str(objid))})
def find_model_data(app_id,model_name,objid):
collection=mongo_util.get_mongo_collection(app_id+"_"+model_name)
return collection.find_one({"_id":ObjectId(str(objid))})
def update_model(app_id,model_name,objid,updateDict):
collection=mongo_util.get_mongo_collection(app_id+"_"+model_name)
collection.update({"_id":ObjectId(str(objid))}, {"$set": updateDict})
|
import heapq
def kthLargest(iterable, k):
largest = []
sortedArray = []
for value in iterable:
heapq.heappush(largest, value)
if len(largest) > k:
sortedArray.append(heapq.heappop(largest))
if (len(largest) < k):
return None
return largest
print(kthLargest([8, 16, 80, 55, 32, 8, 38], 3))
|
from TH_Repository import *
#this line generates a list of Hindu daily urls like : 'http://www.thehindu.com/archive/print/2017/01/01/'
dayUrls= TH_DayUrl_Generator()
#this line takes in Database password
PWD = raw_input( "Please Enter Database Password: ")
#this line extracts article urls (ending in .ece) from each day's url generated above
articl_dict = TH_Article_URL__Extractor(dayUrls)
#this line inserts the article urls got in above step into database without any filteration
TH_Article_URL_DBInsert(articl_dict, PWD)
|
from django.http import HttpResponse
from django.shortcuts import render
import operator
def home(request):
return render(request, 'home.html', {'HITHERE': 'This is me'})
def count(request):
# fulltext pass from home.html
fulltext = request.GET['fulltext']
wordlist = fulltext.split()
worddictionary = {}
for word in wordlist:
if word in worddictionary:
#increase
worddictionary[word]+=1
else:
#add to the dictionary
worddictionary[word]=1
sortedWords = sorted(worddictionary.items(), key = operator.itemgetter(1), reverse=True )
return render(request, 'count.html', {'text': fulltext, 'count':len(wordlist), 'sortedWords':sortedWords})
def about(request):
return render(request, 'about.html') |
import pandas as pd
import numpy as np
import torch
from metrics import get_metrics
from torch.autograd import Variable
from tensorboardX import SummaryWriter
import itertools
import os
import pprint
# static constants
HYPERPARAMS = ['learning_rate', 'num_iters', 'n_h', 'n_h_adv', 'dropout_rate', 'alpha']
intermediate_metrics = False
class Model(object):
def __init__(self, params):
self.params = params
self.method = self.params['method']
self.adversarial = self.method != 'basic'
self.num_classes = self.params['num_classes']
self.logpath = self.params['logpath']
self.hyperparams = self.params['hyperparams']
self.model = self.build_model()
self.data = self.process_data()
def valid_hyperparam(self, i):
return (i < 3 or i == 4 or self.adversarial)
def get_indexes(self):
num_models = []
for i in range(len(HYPERPARAMS)):
if self.valid_hyperparam(i):
num_models.append(range(len(self.hyperparams[HYPERPARAMS[i]])))
else:
num_models.append([None]) # placeholder value if no such hyperparameter
return itertools.product(*num_models)
def get_hyperparams(self, indexes):
hyperparams = []
for i in range(len(indexes)):
if self.valid_hyperparam(i):
hyperparams.append(self.hyperparams[HYPERPARAMS[i]][indexes[i]])
else:
hyperparams.append(None)
return hyperparams
def hyperparams_to_string(self, indexes):
res = ''
for i in range(len(HYPERPARAMS)):
if i > 0:
res += '-'
if self.valid_hyperparam(i):
res += HYPERPARAMS[i] + '_' + str(self.hyperparams[HYPERPARAMS[i]][indexes[i]])
return res
def build_model(self):
models = {}
for indexes in self.get_indexes():
models[indexes] = self.build_single_model(indexes)
return models
def build_single_model(self, indexes):
model = dict()
m, n = self.params['Xtrain'].shape
m_valid, n_valid = self.params['Xvalid'].shape
m_test, n_test = self.params['Xtest'].shape
n_h = self.hyperparams['n_h'][indexes[2]]
model['model'] = torch.nn.Sequential(
torch.nn.Linear(n, n_h),
torch.nn.ReLU(),
torch.nn.Dropout(self.hyperparams['dropout_rate'][indexes[4]]),
torch.nn.Linear(n_h, 1),
torch.nn.Sigmoid(),
)
model['loss_fn'] = torch.nn.BCELoss(size_average=True)
model['optimizer'] = torch.optim.Adam(model['model'].parameters(), lr=self.hyperparams['learning_rate'][indexes[0]])
if self.adversarial:
n_h_adv = self.hyperparams['n_h_adv'][indexes[3]]
if self.num_classes > 2:
n_h_out = self.num_classes
else:
n_h_out = 1
if self.method == 'parity':
n_adv = 1
elif self.method == 'odds' or 'opportunity':
n_adv = 2
else:
raise Exception('Unknown method: {}'.format(self.method))
model['adv_model'] = torch.nn.Sequential(
torch.nn.Linear(n_adv, n_h_adv),
torch.nn.ReLU(),
torch.nn.Dropout(self.hyperparams['dropout_rate'][indexes[4]]),
torch.nn.Linear(n_h_adv, n_h_out),
torch.nn.Sigmoid(),
)
if (self.num_classes > 2):
model['adv_loss_fn'] = torch.nn.CrossEntropyLoss(size_average=True)
else:
model['adv_loss_fn'] = torch.nn.BCELoss(size_average=True)
model['adv_optimizer'] = torch.optim.Adam(model['adv_model'].parameters(), lr=self.hyperparams['learning_rate'][indexes[0]])
return model
def process_data(self):
data = dict()
m, n = self.params['Xtrain'].shape
m_valid, n_valid = self.params['Xvalid'].shape
m_test, n_test = self.params['Xtest'].shape
n_h = self.hyperparams['n_h']
if self.method == 'opportunity':
data['adv_train_mask'] = self.params['ytrain'] == 1
data['adv_train_mask'] = torch.ByteTensor(data['adv_train_mask'].astype(int).values.reshape(m, 1))
data['adv_valid_mask'] = self.params['yvalid'] == 1
data['adv_valid_mask'] = torch.ByteTensor(data['adv_valid_mask'].astype(int).values.reshape(m_valid, 1))
data['adv_test_mask'] = self.params['ytest'] == 1
data['adv_test_mask'] = torch.ByteTensor(data['adv_test_mask'].astype(int).values.reshape(m_test, 1))
data['Xtrain'] = Variable(torch.tensor(self.params['Xtrain'].values).float())
data['ytrain'] = Variable(torch.tensor(self.params['ytrain'].values.reshape(m, 1)).float())
data['Xvalid'] = Variable(torch.tensor(self.params['Xvalid'].values).float())
data['yvalid'] = Variable(torch.tensor(self.params['yvalid'].values.reshape(m_valid,1)).float())
data['Xtest'] = Variable(torch.tensor(self.params['Xtest'].values).float())
data['ytest'] = Variable(torch.tensor(self.params['ytest'].values.reshape(m_test, 1)).float())
if self.num_classes > 2:
data['ztrain'] = Variable(torch.tensor(self.params['ztrain'].values.reshape(self.params['ztrain'].shape[0],)).long())
data['zvalid'] = Variable(torch.tensor(self.params['zvalid'].values.reshape(self.params['zvalid'].shape[0],)).long())
data['ztest'] = Variable(torch.tensor(self.params['ztest'].values.reshape(self.params['ztest'].shape[0],)).long())
else:
data['ztrain'] = Variable(torch.tensor(self.params['ztrain'].values.reshape(self.params['ztrain'].shape[0],)).float())
data['zvalid'] = Variable(torch.tensor(self.params['zvalid'].values.reshape(self.params['zvalid'].shape[0],)).float())
data['ztest'] = Variable(torch.tensor(self.params['ztest'].values.reshape(self.params['ztest'].shape[0],)).float())
return data
def train(self):
for indexes in self.get_indexes():
self.train_single_model(indexes)
def load_trained_models(self):
for indexes in self.get_indexes():
hyperparam_values = self.hyperparams_to_string(indexes)
modelfile = self.logpath + '-model/' + hyperparam_values + '-model.pth'
self.model[indexes]['model'] = torch.load(modelfile)
def create_dir(self, dirname):
if (not os.path.exists(dirname)):
os.makedirs(dirname)
def train_single_model(self, indexes):
# Load in model and data
model = self.model[indexes]['model']
loss_fn = self.model[indexes]['loss_fn']
optimizer = self.model[indexes]['optimizer']
Xtrain = self.data['Xtrain']
Xvalid = self.data['Xvalid']
Xtest = self.data['Xtest']
ytrain = self.data['ytrain']
yvalid = self.data['yvalid']
ytest = self.data['ytest']
ztrain = self.data['ztrain']
zvalid = self.data['zvalid']
ztest = self.data['ztest']
if self.adversarial:
adv_model = self.model[indexes]['adv_model']
adv_loss_fn = self.model[indexes]['adv_loss_fn']
adv_optimizer = self.model[indexes]['adv_optimizer']
model.train()
# Set up logging
self.create_dir(self.logpath + '-training/')
self.create_dir(self.logpath + '-metrics/')
self.create_dir(self.logpath + '-model/')
if self.adversarial:
self.create_dir(self.logpath + '-adv/')
hyperparam_values = self.hyperparams_to_string(indexes)
logfile = self.logpath + '-training/' + hyperparam_values
metrics_file = self.logpath + '-metrics/' + hyperparam_values + '-metrics.csv'
metrics = []
modelfile = self.logpath + '-model/' + hyperparam_values + '-model.pth'
if self.adversarial:
advfile = self.logpath + '-adv/' + hyperparam_values + '-adv.pth'
writer = SummaryWriter(logfile)
for t in range(self.hyperparams['num_iters'][indexes[1]]):
# Forward step
ypred_train = model(Xtrain)
loss_train = loss_fn(ypred_train, ytrain)
ypred_valid = model(Xvalid)
loss_valid = loss_fn(ypred_valid, yvalid)
ypred_test = model(Xtest)
loss_test = loss_fn(ypred_test, ytest)
if self.adversarial:
if self.method == 'parity':
adv_input_train = ypred_train
adv_input_valid = ypred_valid
adv_input_test = ypred_test
elif self.method == 'odds':
adv_input_train = torch.cat((ypred_train, ytrain), 1)
adv_input_valid = torch.cat((ypred_valid, yvalid), 1)
adv_input_test = torch.cat((ypred_test, ytest), 1)
elif self.method == 'opportunity':
adv_input_train = torch.stack((torch.masked_select(ypred_train, self.data['adv_train_mask']),
torch.masked_select(ytrain, self.data['adv_train_mask'])), 1)
adv_input_valid = torch.stack((torch.masked_select(ypred_valid, self.data['adv_valid_mask']),
torch.masked_select(yvalid, self.data['adv_valid_mask'])), 1)
adv_input_test = torch.stack((torch.masked_select(ypred_test, self.data['adv_test_mask']),
torch.masked_select(ytest, self.data['adv_test_mask'])), 1)
zpred_train = adv_model(adv_input_train)
print(zpred_train.shape)
print(ztrain.shape)
adv_loss_train = adv_loss_fn(zpred_train, torch.masked_select(ztrain, self.data['adv_train_mask']))
zpred_valid = adv_model(adv_input_valid)
adv_loss_valid = adv_loss_fn(zpred_valid, torch.masked_select(zvalid, self.data['adv_valid_mask']))
zpred_test = adv_model(adv_input_test)
adv_loss_test = adv_loss_fn(zpred_test, torch.masked_select(ztest, self.data['adv_test_mask']))
combined_loss_train = loss_train - self.hyperparams['alpha'][indexes[5]] * adv_loss_train
combined_loss_valid = loss_valid - self.hyperparams['alpha'][indexes[5]] * adv_loss_valid
combined_loss_test = loss_test - self.hyperparams['alpha'][indexes[5]] * adv_loss_test
# Train log
if t % 100 == 0:
print('Iteration: {}'.format(t))
if self.adversarial:
print('Predictor train loss: {:.4f}'.format(loss_train))
print('Predictor valid loss: {:.4f}'.format(loss_train))
print('Adversary train loss: {:.4f}'.format(adv_loss_train))
print('Adversary valid loss: {:.4f}'.format(adv_loss_valid))
print('Combined train loss: {:.4f}'.format(combined_loss_train))
print('Combined valid loss: {:.4f}'.format(combined_loss_valid))
write_log(writer, 'pred_loss_train', loss_train, t)
write_log(writer, 'pred_loss_valid', loss_valid, t)
write_log(writer, 'pred_loss_test', loss_test, t)
write_log(writer, 'adv_loss_train', adv_loss_train, t)
write_log(writer, 'adv_loss_valid', adv_loss_valid, t)
write_log(writer, 'adv_loss_test', adv_loss_test, t)
write_log(writer, 'combined_loss_train', combined_loss_train, t)
write_log(writer, 'combined_loss_valid', combined_loss_valid, t)
write_log(writer, 'combined_loss_test', combined_loss_test, t)
else:
print('Train loss: {:.4f}'.format(loss_train))
print('Valid loss: {:.4f}'.format(loss_valid))
write_log(writer, 'loss_train', loss_train, t)
write_log(writer, 'loss_valid', loss_valid, t)
write_log(writer, 'loss_test', loss_test, t)
# print('Train metrics:')
# metrics_train = metrics.get_metrics(ypred_train.data.numpy(), ytrain.data.numpy(), ztrain.data.numpy(), self.num_classes)
if (intermediate_metrics):
print('Validation metrics:')
metrics_valid = get_metrics(ypred_valid.data.numpy(), yvalid.data.numpy(), zvalid.data.numpy(), self.get_hyperparams(indexes), self.num_classes, 'valid_set')
pprint.pprint(metrics_valid)
metrics.append(metrics_valid) # -- NO LONGER COMPUTING INTERMEDIATE METRICS
# Save model
if t > 0 and t % 10000 == 0:
torch.save(model, modelfile)
if self.adversarial:
torch.save(adv_model, advfile)
# Backward step
if self.adversarial:
# adv update
adv_optimizer.zero_grad()
adv_loss_train.backward(retain_graph=True)
adv_optimizer.step()
# pred update
optimizer.zero_grad()
combined_loss_train.backward()
else:
optimizer.zero_grad()
loss_train.backward()
optimizer.step()
# save final model
torch.save(model, modelfile)
if self.adversarial:
torch.save(adv_model, advfile)
writer.close()
if (intermediate_metrics):
metrics = pd.DataFrame(metrics)
metrics.to_csv(metrics_file)
def eval(self):
evalfile = self.logpath + '-eval.csv'
test_metrics = []
for indexes in self.get_indexes():
test_metrics.append(self.eval_single_model(indexes))
pd.concat(test_metrics).to_csv(evalfile)
def eval_single_model(self, indexes):
model = self.model[indexes]['model']
# loss_fn = self.model[indexes]['loss_fn']
# optimizer = self.model[indexes]['optimizer']
Xtrain = self.data['Xtrain']
Xvalid = self.data['Xvalid']
Xtest = self.data['Xtest']
ytrain = self.data['ytrain']
yvalid = self.data['yvalid']
ytest = self.data['ytest']
ztrain = self.data['ztrain']
zvalid = self.data['zvalid']
ztest = self.data['ztest']
model.eval()
ypred_valid = model(Xvalid)
zpred_valid = None
if self.adversarial:
adv_model = self.model[indexes]['adv_model']
adv_model.eval()
if self.method == 'parity':
adv_input_valid = ypred_valid
zpred_valid = adv_model(adv_input_valid)
elif self.method == 'odds':
adv_input_valid = torch.cat((ypred_valid, yvalid), 1)
zpred_valid = adv_model(adv_input_valid)
elif self.method == 'opportunity':
zpred_valid = None
if zpred_valid is not None:
metrics_valid = pd.DataFrame(get_metrics(ypred_valid.data.numpy(), yvalid.data.numpy(), zvalid.data.numpy(), self.get_hyperparams(indexes), k=self.num_classes, evaluation_file='valid_set', zpred=zpred_valid.data.numpy()), index=[0])
else:
metrics_valid = pd.DataFrame(get_metrics(ypred_valid.data.numpy(), yvalid.data.numpy(), zvalid.data.numpy(), self.get_hyperparams(indexes), k=self.num_classes, evaluation_file='valid_set'), index=[0])
print
print('Final test metrics for model with ' + self.hyperparams_to_string(indexes) + ' on validation:')
pprint.pprint(metrics_valid)
ypred_test = model(Xtest)
zpred_test = None
if self.adversarial:
if self.method == 'parity':
adv_input_test = ypred_test
zpred_test = adv_model(adv_input_test)
elif self.method == 'odds':
adv_input_test = torch.cat((ypred_test, ytest), 1)
zpred_test = adv_model(adv_input_test)
elif self.method == 'opportunity':
zpred_test = None
if zpred_test is not None:
metrics_test = pd.DataFrame(get_metrics(ypred_test.data.numpy(), ytest.data.numpy(), ztest.data.numpy(), self.get_hyperparams(indexes), k=self.num_classes, evaluation_file='test_set', zpred=zpred_test.data.numpy()), index=[0])
else:
metrics_test = pd.DataFrame(get_metrics(ypred_test.data.numpy(), ytest.data.numpy(), ztest.data.numpy(), self.get_hyperparams(indexes), k=self.num_classes, evaluation_file='test_set'), index=[0])
print
print('Final test metrics for model with ' + self.hyperparams_to_string(indexes) + ' on test:')
pprint.pprint(metrics_test)
return pd.concat([metrics_valid, metrics_test])
def write_log(writer, key, loss, iter):
writer.add_scalar(key, loss.item(), iter)
def write_log_array(writer, key, array, iter):
writer.add_text(key, np.array_str(array), iter)
|
#!/usr/bin/python3
# Filename : json-csv_3.py
# Author by : Lily
"""
版本三
将多条json写入csv,其中几条缺少了部分键值对
"""
json_data = [{"id":"216","city":"\u4e1c\u839e\u5e02","county":"\u5e02\u3001\u53bf\u7ea7\u5e02","detail":"\u4e1c\u839e\u5e02\u5858\u53a6\u9547\u4e07\u79d1\u751f\u6d3b\u5e7f\u573aB7\u3001B8\u53f7","time":"2015-09-23"},
{"id":"72","province":"\u5e7f\u4e1c\u7701","city":"\u5e7f\u5dde\u5e02","detail":"\u5e7f\u5dde\u5e02\u9ec4\u57d4\u533a\u9ec4\u57d4\u4e1c\u8def2700\u53f7\u534e\u6da6\u4e07\u5bb6\u9996\u5c42","phone":"","time":"2015-08-03"},
{"id":"376","city":"\u60e0\u5dde\u5e02","county":"\u60e0\u57ce\u533a","detail":"\u60e0\u6c34\u533a\u73af\u57ce\u897f\u4e8c\u8def33\u53f7","phone":"","time":"2016-09-22"},
{"id":"51","province":"\u5e7f\u4e1c\u7701","city":"\u5e7f\u5dde\u5e02","county":"\u6d77\u73e0\u533a","detail":"\u5e7f\u5dde\u5e02\u6d77\u73e0\u533a\u58a9\u548c\u8def165\u53f7\u6566\u714c\u5546\u4e1a\u57ceA6\u6863","phone":"020-84217995","time":"2015-08-03"},
{"id":"391","province":"\u6e56\u5317\u7701","county":"\u5e02\u3001\u53bf\u7ea7\u5e02","detail":"\u5929\u95e8\u5e02\u5f20\u6e2f\u9547\u666f\u5cf0\u5927\u9053","phone":"","time":"2016-09-26"}]
def muti_json(file_name,json_data):
# 打开CSV
f = open(file_name,'w',encoding='utf-8')
# 定义tittle
tittle = []
# 遍历所有的键值对,取到所有不重复的键(做 title)
for l in json_data:
for k in l:
if k not in tittle:
tittle.append(k)
print(k)
# 把 tittle 写入表格
for ti in tittle:
f.write(ti)
f.write(',')
f.write('\n')
# 初始化data(一条json)
data = []
for i in range(len(tittle)):
data.append(' ')
# 遍历每一个键值对,将值写入表格
for l in json_data:
# 按照title的顺序把value存到data
for k, v in l.items():
data[tittle.index(k)] = v
# 将data写 入表格
for da in data:
f.write(da)
f.write(',')
f.write('\n')
muti_json('test3.csv',json_data)
|
import sys
filename=sys.argv[1]
prefectures=set()
with open(filename) as f:
line=f.readline()
while line:
prefectures.add(line.split()[0])
line=f.readline()
for pref.in prefctures:
print (pref)
|
import app_main.main as m
m.main()
|
import requests
# script for testing
def req_loc(action, body):
resp = requests.post("http://localhost:8080/mail2", json={
"action": action,
"body": body
})
if resp.status_code == 200:
return resp.json()
raise ValueError(resp.text)
def req_rem(action, body):
resp = requests.post("http://localhost:8081/mail2", json={
"action": action,
"body": body
})
if resp.status_code == 200:
return resp.json()
raise ValueError(resp.text)
def auth_loc():
return req_loc("auth", {
"address": "http://alice@localhost:8080/mail2",
"password": ""
})
def auth_rem():
return req_rem("auth", {
"address": "http://bob@localhost:8081/mail2",
"password": ""
})
def send_to_loc(token):
return req_loc("send", {
"token": token,
"from": "http://alice@localhost:8080/mail2",
"to": ["http://bob@localhost:8080/mail2"],
"data": "alice to bob"
})
def send_to_rem(token):
return req_loc("send", {
"token": token,
"from": "http://alice@localhost:8080/mail2",
"to": ["http://bob@localhost:8081/mail2"],
"data": "alice to remote bob"
})
def receive_loc(token):
return req_loc("receive", {
"token": token,
})
def receive_rem(token):
return req_rem("receive", {
"token": token,
})
if __name__ == "__main__":
resp_loc = auth_loc()
resp_rem = auth_rem()
if resp_loc["status"] == "ok" and resp_rem["status"] == "ok":
token_loc = resp_loc["token"]
token_rem = resp_rem["token"]
# print(send_to_loc(token_loc))
# print(send_to_rem(token_loc))
# print(receive_loc(token_loc))
print(receive_rem(token_rem))
else:
print(resp_loc["message"], resp_rem["message"])
|
from django.http import JsonResponse
from django.contrib.auth.models import User
import django.contrib.auth as auth
from django.views.decorators.csrf import csrf_exempt
from .utils import users_session_data
from .forms import UserForm
import json
@csrf_exempt
def users(request):
user = json.loads(request.body)['user']
if request.method == 'POST' and not request.user.is_authenticated:
form = UserForm(user)
if form.is_valid():
auth.login(request, form.save())
return JsonResponse(users_session_data(request))
|
# app/serializers.py
from rest_framework import serializers
from .models import Post, ViewTestModel
class UserSerializer(serializers.Serializer):
email = serializers.EmailField()
username = serializers.CharField(max_length=100)
class PostSerializer(serializers.HyperlinkedModelSerializer):
# user = UserSerializer()
class Meta:
model = Post
fields = ['title', 'body']
# fields = "__all__"
class CommentSerializer(serializers.Serializer):
email = serializers.EmailField()
content = serializers.CharField(max_length=200)
created = serializers.DateTimeField()
def create(self, validated_data):
# breakpoint()
return 'custom value from create watchdog'
def update(self, instance, validated_data):
return 'custom value from update'
# breakpoint()
class ViewTestSerializer(serializers.ModelSerializer):
class Meta:
model = ViewTestModel
fields = "__all__"
|
# Challenges proposed at https://www.codementor.io/@ilyaas97/6-python-projects-for-beginners-yn3va03fs
import random
import numpy as np
# Guess the Number
if __name__ == '__main__':
num = np.random.randint(0,20) # random integer drawn from (0,20)
# function to decide whether you have won or not
# this function takes the guessed number as the argument and prints out three different messages
# the number of available attempts is 6
count = 0
while count < 6:
guess = int(input("Guess an integer between 0 and 20: "))
if num == guess:
print("Congratulations, your guess is correct. You win!")
break
elif num < guess:
print("Sorry, your guess is too high. Please, retry. You still have " + str(6-count) + " attempts left.")
count += 1
elif num > guess:
print("Sorry, your guess is too low. Please, retry. You still have " + str(6-count) + " attempts left.")
count += 1
if num != guess:
print("Sorry, you have lost. The number to guess was " + str(num))
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 1 12:18:31 2015
@author: bolaka
submission1.csv - first pass @ 1.25643166239
submission2.csv - first pass
"""
import os
os.chdir('/home/bolaka/python-workspace/CVX-timelines/')
# imports
#import math
import time
import datetime
from cvxtextproject import *
from mlclassificationlibs import *
setPath('/home/bolaka/Bike Sharing')
trainfilename = 'train.csv'
testfilename = 'test.csv'
idCol = 'datetime'
training = pd.read_csv(trainfilename, index_col=idCol)
testing = pd.read_csv(testfilename, index_col=idCol)
# extract the date-timestamps from training and testing files
training['time_index'] = range(1, len(training) + 1 )
training['timestamp'] = [datetime.datetime.strptime(x, "%Y-%m-%d %H:%M:%S") for x in training.index]
testing['time_index'] = range(len(training) + 1, len(training) + len(testing) + 1)
testing['timestamp'] = [datetime.datetime.strptime(x, "%Y-%m-%d %H:%M:%S") for x in testing.index]
training['year'] = [x.year for x in training['timestamp'] ]
training['month'] = [x.month for x in training['timestamp'] ]
training['day'] = [x.day for x in training['timestamp'] ]
training['hour'] = [x.hour for x in training['timestamp'] ]
training['weekday'] = [x.weekday() for x in training['timestamp'] ]
#training['yearday'] = [x.timetuple().tm_yday for x in training['timestamp'] ]
testing['year'] = [x.year for x in testing['timestamp'] ]
testing['month'] = [x.month for x in testing['timestamp'] ]
testing['day'] = [x.day for x in testing['timestamp'] ]
testing['hour'] = [x.hour for x in testing['timestamp'] ]
testing['weekday'] = [x.weekday() for x in testing['timestamp'] ]
#testing['yearday'] = [x.timetuple().tm_yday for x in testing['timestamp'] ]
training[ 'weekend'] = 0
training.loc[ (training['holiday'] == 0) & (training['workingday'] == 0) ,'weekend'] = 1
testing[ 'weekend'] = 0
testing.loc[ (testing['holiday'] == 0) & (testing['workingday'] == 0) ,'weekend'] = 1
# handle skewed counts
#training['registered_sqrt'] = np.sqrt(training['registered'].values)
#training['casual_sqrt'] = np.sqrt(training['casual'].values)
#training['registered_log10'] = np.log10(training['registered'].values + 1)
#training['casual_log10'] = np.log10(training['casual'].values + 1)
training.to_csv('training-newfeatures.csv')
featuresUnused1 = [ 'casual','registered','count', 'timestamp', 'atemp', 'holiday', 'year' ] #'registered_sqrt', 'casual_sqrt', 'registered_log10', 'casual_log10' ]
results1 = analyzeMetricNumerical('registered',training, featuresUnused1)
showFeatureImportanceNumerical(training, results1['features'], 'registered')
featuresUnused2 = [ 'casual','registered','count', 'timestamp', 'atemp', 'holiday' ] #'registered_sqrt', 'casual_sqrt', 'registered_log10', 'casual_log10'
results2 = analyzeMetricNumerical('casual',training, featuresUnused2)
showFeatureImportanceNumerical(training, results2['features'], 'casual')
temp1 = predict(results1['model'], testing[results1['features']], 'registered')
testing['registered'] = temp1['registered']
#testing['registered'] = np.square( temp1['registered_sqrt'].values )
#testing['registered'] = np.power( 10, temp1['registered_log10'].values ) - 1
temp2 = predict(results2['model'], testing[results2['features']], 'casual')
testing['casual'] = temp2['casual']
#testing['casual'] = np.square( temp2['casual_sqrt'].values )
#testing['casual'] = np.power( 10, temp2['casual_log10'].values ) - 1
testing['count'] = testing['registered'] + testing['casual']
testing = testing[['count']]
testing.to_csv('submission6.csv', sep=',', encoding='utf-8') |
class India:
def states(self):
print(29)
def currency(self):
print("Rupee")
class Usa:
def states(self):
print(15)
def currency(self):
print("Dollar")
def main():
ind_obj = India()
usa_obj = Usa()
for obj in (ind_obj, usa_obj):
obj.states()
obj.currency()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nicker', models.CharField(max_length=50, verbose_name='\u79f0\u547c')),
('location', models.CharField(max_length=100, verbose_name='\u5730\u5740')),
('tel', models.CharField(max_length=50, verbose_name='\u7535\u8bdd(\u8054\u7cfb\u65b9\u5f0f)')),
('licence', models.CharField(max_length=100, verbose_name='\u5e8f\u5217\u53f7', blank=True)),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('modify_time', models.DateTimeField(auto_now_add=True, verbose_name='\u4fee\u6539\u65f6\u95f4')),
],
options={
'verbose_name': '\u8ba2\u5355 - Coffee',
},
),
]
|
# JTSK-350112
# rqballsim.py
# Taiyr Begeyev
# t.begeyev@jacobs-university.de
from random import random
def printIntro():
"""Prints an introduction to the program"""
print("This program simulates a game of racquetball \
between two players called \"A\" and \"B\". \
The abilities of each player is indicated by a \
probability ( a number between 0 and 1) that the \
player wins the point when serving. Player A \
always has the first serve")
def getInputs():
"""Get the input from the user"""
a = float(input("Probability of player A to win a serve ? "))
b = float(input("Probability of player B to win a serve ? "))
n = int(input("How many games to simulate? "))
return a, b, n
def gameOver (a , b ):
"""a and b are scores for players in a racquetball game
RETURNS True if game is over , False otherwise"""
return a == 15 or b == 15
def simOneGame(probA, probB):
"""Simulates a single game or racquetball between players A and B
RETURNS A’s final score , B’s final score"""
serving = "A"
scoreA = 0
scoreB = 0
while not gameOver ( scoreA , scoreB ):
if serving == "A":
if random () < probA:
scoreA = scoreA + 1
else:
serving = "B"
else:
if random () < probB:
scoreB = scoreB + 1
else:
serving = "A"
return scoreA, scoreB
def simNGames(n, probA, probB):
"""This function simulates n games and keeps track of how many
wins there are for each player"""
winsA = winsB = 0
for _ in range(n):
scoreA, scoreB = simOneGame(probA, probB)
if scoreA > scoreB:
winsA += 1
else:
winsB += 1
return winsA, winsB
def printSummary ( winsA , winsB ):
"""Prints a summary of wins for each player"""
n = winsA + winsB
print ("\nGames simulated: ", n)
print (" Wins for A: {0} ({1:0.1%}) ".format(winsA , winsA / n ))
print (" Wins for B: {0} ({1:0.1%}) ".format(winsB , winsB / n ))
def main():
printIntro()
probA, probB, n = getInputs()
winsA, winsB = simNGames(n, probA, probB)
printSummary(winsA, winsB)
main() |
import unittest, random, string
from main.activity.activity_login import *
from main.activity.activity_logout import *
from main.activity.activity_myshop_editor import *
from main.lib.user_data import *
from main.function.setup import *
class Test_add_etalase(unittest.TestCase):
_site = "live"
def setUp(self):
print ('TEST "Add-Etalase"')
self.driver = tsetup("firefox")
def test_2_delete_etalase(self):
print ("TEST #1 : Delete Etalase[Top]")
driver = self.driver
email = 'test.tokopedia+01@gmail.com'
pwd = 'asdasd789'
#Object activity
login = loginActivity()
myshop_etalase = myshopEditorActivity()
logout = logoutActivity()
#Object initiation
myshop_etalase.setObject(driver)
#Action
login.do_login(driver, email, pwd, self._site)
myshop_etalase.goto_myshop_editor(self._site)
myshop_etalase.click_tab_etalase(self.driver)
myshop_etalase.delete_last_etalase()
logout.do_logout(driver, self._site)
def tearDown(self):
print("Testing akan selesai dalam beberapa saat..")
time.sleep(5)
self.driver.close()
if __name__ == "__main__":
unittest.main(warnings='ignore') |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MainForm.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainForm(object):
def setupUi(self, MainForm):
MainForm.setObjectName(_fromUtf8("MainForm"))
MainForm.resize(607, 298)
self.verticalLayout_3 = QtGui.QVBoxLayout(MainForm)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.gridLayout_2 = QtGui.QGridLayout()
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.folderPathButton = QtGui.QPushButton(MainForm)
self.folderPathButton.setObjectName(_fromUtf8("folderPathButton"))
self.gridLayout_2.addWidget(self.folderPathButton, 1, 1, 1, 1)
self.folderPathLE = QtGui.QLineEdit(MainForm)
self.folderPathLE.setObjectName(_fromUtf8("folderPathLE"))
self.gridLayout_2.addWidget(self.folderPathLE, 1, 0, 1, 1)
self.label = QtGui.QLabel(MainForm)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout_2.addWidget(self.label, 0, 0, 1, 1)
self.verticalLayout_2.addLayout(self.gridLayout_2)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem)
self.saveNumpyButton = QtGui.QPushButton(MainForm)
self.saveNumpyButton.setObjectName(_fromUtf8("saveNumpyButton"))
self.horizontalLayout_4.addWidget(self.saveNumpyButton)
self.loadNumpyButton = QtGui.QPushButton(MainForm)
self.loadNumpyButton.setObjectName(_fromUtf8("loadNumpyButton"))
self.horizontalLayout_4.addWidget(self.loadNumpyButton)
self.loadFolderButton = QtGui.QPushButton(MainForm)
self.loadFolderButton.setObjectName(_fromUtf8("loadFolderButton"))
self.horizontalLayout_4.addWidget(self.loadFolderButton)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.verticalLayout_3.addLayout(self.verticalLayout_2)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.firstManCB = QtGui.QComboBox(MainForm)
self.firstManCB.setObjectName(_fromUtf8("firstManCB"))
self.gridLayout.addWidget(self.firstManCB, 1, 0, 1, 1)
self.addToListButton = QtGui.QPushButton(MainForm)
self.addToListButton.setObjectName(_fromUtf8("addToListButton"))
self.gridLayout.addWidget(self.addToListButton, 1, 1, 1, 1)
self.label_2 = QtGui.QLabel(MainForm)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
self.label_3 = QtGui.QLabel(MainForm)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 2, 0, 1, 1)
self.label_4 = QtGui.QLabel(MainForm)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 4, 0, 1, 1)
self.secondManCB = QtGui.QComboBox(MainForm)
self.secondManCB.setObjectName(_fromUtf8("secondManCB"))
self.gridLayout.addWidget(self.secondManCB, 3, 0, 1, 1)
self.columnLE = QtGui.QLineEdit(MainForm)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.columnLE.sizePolicy().hasHeightForWidth())
self.columnLE.setSizePolicy(sizePolicy)
self.columnLE.setMinimumSize(QtCore.QSize(60, 0))
self.columnLE.setObjectName(_fromUtf8("columnLE"))
self.gridLayout.addWidget(self.columnLE, 5, 0, 1, 1)
self.combineCheck = QtGui.QCheckBox(MainForm)
self.combineCheck.setObjectName(_fromUtf8("combineCheck"))
self.gridLayout.addWidget(self.combineCheck, 2, 1, 1, 1)
self.oneCheck = QtGui.QCheckBox(MainForm)
self.oneCheck.setObjectName(_fromUtf8("oneCheck"))
self.gridLayout.addWidget(self.oneCheck, 0, 1, 1, 1)
self.horizontalLayout_3.addLayout(self.gridLayout)
spacerItem1 = QtGui.QSpacerItem(80, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem1)
self.attributeGroup = QtGui.QGroupBox(MainForm)
self.attributeGroup.setMinimumSize(QtCore.QSize(100, 0))
self.attributeGroup.setTitle(_fromUtf8(""))
self.attributeGroup.setObjectName(_fromUtf8("attributeGroup"))
self.verticalLayout = QtGui.QVBoxLayout(self.attributeGroup)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.alphasRadioButton = QtGui.QRadioButton(self.attributeGroup)
self.alphasRadioButton.setObjectName(_fromUtf8("alphasRadioButton"))
self.verticalLayout.addWidget(self.alphasRadioButton)
self.widthsRadioButton = QtGui.QRadioButton(self.attributeGroup)
self.widthsRadioButton.setObjectName(_fromUtf8("widthsRadioButton"))
self.verticalLayout.addWidget(self.widthsRadioButton)
self.xymeanRadioButton = QtGui.QRadioButton(self.attributeGroup)
self.xymeanRadioButton.setObjectName(_fromUtf8("xymeanRadioButton"))
self.verticalLayout.addWidget(self.xymeanRadioButton)
self.horizontalLayout_3.addWidget(self.attributeGroup)
self.verticalLayout_6 = QtGui.QVBoxLayout()
self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6"))
self.label_5 = QtGui.QLabel(MainForm)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.verticalLayout_6.addWidget(self.label_5)
self.menLW = QtGui.QListWidget(MainForm)
self.menLW.setObjectName(_fromUtf8("menLW"))
self.verticalLayout_6.addWidget(self.menLW)
self.horizontalLayout_3.addLayout(self.verticalLayout_6)
self.verticalLayout_3.addLayout(self.horizontalLayout_3)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.anglesButton = QtGui.QPushButton(MainForm)
self.anglesButton.setObjectName(_fromUtf8("anglesButton"))
self.horizontalLayout_2.addWidget(self.anglesButton)
self.animButton = QtGui.QPushButton(MainForm)
self.animButton.setObjectName(_fromUtf8("animButton"))
self.horizontalLayout_2.addWidget(self.animButton)
self.fullCompareButton = QtGui.QPushButton(MainForm)
self.fullCompareButton.setObjectName(_fromUtf8("fullCompareButton"))
self.horizontalLayout_2.addWidget(self.fullCompareButton)
self.compareButton = QtGui.QPushButton(MainForm)
self.compareButton.setObjectName(_fromUtf8("compareButton"))
self.horizontalLayout_2.addWidget(self.compareButton)
self.histButton = QtGui.QPushButton(MainForm)
self.histButton.setObjectName(_fromUtf8("histButton"))
self.horizontalLayout_2.addWidget(self.histButton, QtCore.Qt.AlignBottom)
self.graphButton = QtGui.QPushButton(MainForm)
self.graphButton.setObjectName(_fromUtf8("graphButton"))
self.horizontalLayout_2.addWidget(self.graphButton, QtCore.Qt.AlignBottom)
self.exitButton = QtGui.QPushButton(MainForm)
self.exitButton.setObjectName(_fromUtf8("exitButton"))
self.horizontalLayout_2.addWidget(self.exitButton, QtCore.Qt.AlignBottom)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
self.retranslateUi(MainForm)
QtCore.QMetaObject.connectSlotsByName(MainForm)
def retranslateUi(self, MainForm):
MainForm.setWindowTitle(_translate("MainForm", "Походка человека", None))
self.folderPathButton.setText(_translate("MainForm", "Выбрать", None))
self.label.setText(_translate("MainForm", "Выберите местонахождение", None))
self.saveNumpyButton.setText(_translate("MainForm", "Сохранить объекты", None))
self.loadNumpyButton.setText(_translate("MainForm", "Загрузить объекты", None))
self.loadFolderButton.setText(_translate("MainForm", "Загрузить из папки", None))
self.addToListButton.setText(_translate("MainForm", "Добавить", None))
self.label_2.setText(_translate("MainForm", "Походка человека", None))
self.label_3.setText(_translate("MainForm", "Походка для сравнения", None))
self.label_4.setText(_translate("MainForm", "Кол-во столбцов гистограммы", None))
self.combineCheck.setText(_translate("MainForm", "Совместить?", None))
self.oneCheck.setText(_translate("MainForm", "Одиночный?", None))
self.alphasRadioButton.setText(_translate("MainForm", "Центр масс", None))
self.widthsRadioButton.setText(_translate("MainForm", "Ширина", None))
self.xymeanRadioButton.setText(_translate("MainForm", "Среднее ХY", None))
self.label_5.setText(_translate("MainForm", "Список для построения", None))
self.anglesButton.setText(_translate("MainForm", "Углы", None))
self.animButton.setText(_translate("MainForm", "Анимация", None))
self.fullCompareButton.setText(_translate("MainForm", "Полное сравнение", None))
self.compareButton.setText(_translate("MainForm", "Сравнение", None))
self.histButton.setText(_translate("MainForm", "Гистограмма", None))
self.graphButton.setText(_translate("MainForm", "График", None))
self.exitButton.setText(_translate("MainForm", "Выйти", None))
|
from datatypes import nil, true, false, mksym, cons, from_list, to_list, LispSymbol, LispLambda, LispPair, first, rest, LispInteger, LispClass, class_base, Environment, LispString, get_stack
from lex import tokenize
from parse import parse
# -----------------------------------------------------------------------------
# QUOTE
#
# (quote <exp>)
#
# Return the <exp> without the quote part.
#
# example:
# hi <= (quote hi)
# (+ 3 4) <= (quote (+ 3 4))
# -----------------------------------------------------------------------------
def quote_func(args, env):
args = to_list(args)
assert args[-1] is nil
assert len(args) == 2
return args[0]
# -----------------------------------------------------------------------------
# ASSIGNMENT
#
# (set! <var> <value>)
#
# Find in the environment the varable <var> and change
# it's value to <value>
#
# example:
# ok <= (define x 20)
# 20 <= x
# ok <= (set! x 44)
# 44 <= x
# -----------------------------------------------------------------------------
def set_func(args, env):
args = to_list(args)
assert args[-1] is nil
assert len(args) == 3
var = args[0]
arg = args[1]
assert isinstance(var, LispSymbol)
evaled_arg = arg.scm_eval(env)
env.set(var.name, evaled_arg)
return nil
# -----------------------------------------------------------------------------
# DEFINITION
#
# 1. (define <var> <value>)
# 2. (define (<var> <param1> ... <paramN> ) <body1> ... )
# 2. (define (<var> . <param1> ) <body1> ... )
#
# 1. Add <var> to the environment with the value eval(<value>).
# 2. Convert the second form to define a lambda expression.
# (define <var> ( lambda (<param1> ... <paramN>) <body> ))
# process this in the same way as form one.
#
# example:
# ok <= (define m 2)
# ok <= (define (add8 y) (+ 8 y) )
# 10 <= (add8 m)
# -----------------------------------------------------------------------------
def define_func(args, env):
if isinstance(first(args), LispSymbol):
# we have the first form
args = to_list(args)
assert args[-1] is nil
assert len(args) == 3
var = args[0]
value = args[1]
elif isinstance(first(args), LispPair):
# we have the second form
var = first(first(args))
param = rest(first(args))
body = rest(args)
assert isinstance(var, (LispSymbol, LispPair))
value = from_list([mksym("lambda"), param, body])
else:
raise Exception("invalid form")
assert isinstance(var, LispSymbol)
result = value.scm_eval(env)
# todo set the datatype
env.define(var.name, None, result)
return nil
# -----------------------------------------------------------------------------
# IF
#
# (if <pred> <cons> <alt> )
# (if <pred> <cons> )
#
# evaluate the predicate. If it's true then
# evaluate the consequence, otherwise
# evaluate the alternative (or nil if there is no alt)
#
# example:
# y <= (if true? 'y 'n)
# 5 <= (if (= 2 3) (- 3) (+ 2 3) )
# nil <= (if (= 2 3) 'boop)
# -----------------------------------------------------------------------------
def if_func(args, env):
args = to_list(args)
assert args[-1] is nil
assert 3 <= len(args) <= 4
predicate = args[0]
consequence = args[1]
alternative = args[2]
result = predicate.scm_eval(env)
if result is true:
return consequence.scm_eval(env)
else:
return alternative.scm_eval(env)
# -----------------------------------------------------------------------------
# LAMBDA
#
# (lambda (<param1> ... <paramN>) <body1> ... )
# (lambda <param> <body1> ... )
#
# make a procedure, <parameters> can be a symbol, proper-list or
# dotted-list. when evaluated returns the value of (eval <bodyN>)
# in an environment where <parameters> are bound to the arguments.
#
# example:
# #FUN <= (lambda (x) (+ 3 x))
# 13 <= ((lambda (x) (+ 3 x)) 10)
# 222 <= ((lambda (x) (+ 111 x) 222) 333)
# -----------------------------------------------------------------------------
def lambda_func(args, env):
param = first(args)
body = rest(args)
return LispLambda(param, body)
# -----------------------------------------------------------------------------
# BEGIN
#
# (begin <exp1> ... <expN>)
#
# evaluate each expression in turn. Returning the result
# of that last one.
#
# example:
# 5 <= (begin 2 3 4 5)
# nil <= (begin (+ x 3) nil) // shouldn't change x
# 4 <= (begin (set! x 3) 4) // should change x
# -----------------------------------------------------------------------------
def begin_func(args, env):
args = to_list(args)
assert args[-1] is nil, "invalid args for 'begin': %s" % args
assert len(args) >= 2, "invalid args for 'begin': %s" % args
for arg in args[:-1]:
result = arg.scm_eval(env)
return result
# -----------------------------------------------------------------------------
# class
#
# (class <parent1> ...)
#
# create a new class
#
# -----------------------------------------------------------------------------
def class_func(args, env):
# turn to a list and remove the trailing nil
parents = to_list(args)[:-1]
# lookup the parents
evaled_parents = [parent.scm_eval(env) for parent in parents]
return LispClass(evaled_parents)
# -----------------------------------------------------------------------------
# class-define!
#
# (class-define! <class-name> <var-name> <type>)
# (class-define! <class-name> <var-name>)
#
# add a variable to a class
#
# (class-define! Point + (Lambda Point Point Point))
# (class-define! Point y Int)
#
# -----------------------------------------------------------------------------
def class_define_func(args, env):
class_name = first(args)
var_name = first(rest(args))
# todo: deal with the datatype
# datatype = first(rest(rest(args)))
evaled_type = None
evaled_class = class_name.scm_eval(env)
evaled_var = var_name.scm_eval(env).name
evaled_class.define(evaled_var, evaled_type)
return nil
# -----------------------------------------------------------------------------
# class-set!
#
# (class-set! <class-name> <var-name> <value>)
#
# set a class variable's value
#
# -----------------------------------------------------------------------------
def class_set_func(args, env):
class_name = first(args)
var_name = first(rest(args))
value = first(rest(rest(args)))
evaled_class = class_name.scm_eval(env)
evaled_var = var_name.scm_eval(env).name
evaled_value = value.scm_eval(env)
# print "class", class_name, evaled_class
# print "param", param_name, evaled_param
# print "value", value, evaled_value
evaled_class.set(evaled_var, evaled_value)
return nil
# -----------------------------------------------------------------------------
# class-chmod!
#
# (class-chmod! <class-name> <var-name> . <flags>)
#
# set a class variable's permission
#
# (class-chmod! Point str 'read-only)
# (class-chmod! Point x 'any 'virtual)
#
# -----------------------------------------------------------------------------
def class_chmod_func(args, env):
class_name = first(args)
var_name = first(rest(args))
flags = to_list(rest(rest(args)))[:-1]
evaled_class = class_name.scm_eval(env)
evaled_var = var_name.scm_eval(env).name
evaled_flags = [flag.scm_eval(env).name for flag in flags]
evaled_class.chmod(evaled_var, evaled_flags)
return nil
# -----------------------------------------------------------------------------
# class-finalize!
#
# (class-finalize! <class-name> )
#
# set a class variable's permission
#
# (class-finalize! Point)
#
# -----------------------------------------------------------------------------
def class_finalize_func(args, env):
class_name = first(args)
evaled_class = class_name.scm_eval(env)
evaled_class.finalised = True
return nil
# -----------------------------------------------------------------------------
# Macro
#
# (mac (<param1> ... <paramN>) <body1> ... )
# (mac <param> <body1> ... )
#
# make a procedure, <parameters> can be a symbol, proper-list or
# dotted-list. when evaluated returns the value of (eval <bodyN>)
# in an environment where <parameters> are bound to the arguments.
#
# example:
# #FUN <= (mac (x) (+ 3 x))
# (+ 1)<= ((mac (x) x) (+ 1))
# #FUN <= (define when (mac (test . body) (list ('if test (cons 'begin body))))
# jam <= (when (= 4 4) 'jam)
#
# -----------------------------------------------------------------------------
def macro_func(args, env):
param = first(args)
body = rest(args)
return LispLambda(param, body, True)
# -----------------------------------------------------------------------------
# quasiquote
#
# (quasiquote <param>)
#
# If no `unquote` appear within the <param>, the result of is equivalent to
# evaluating quote. If `unquote` does appears the expression following the
# comma is evaluated and its result is inserted into the structure instead of
# the comma and the expression.
#
# It is basically a completly new way to eval a sexp. One where only unquote
# and unquote-splicing and quasiquote really do anything interesting.
#
#
# example:
#
# 'a --> a
# (quote a b) --> ERROR (too many args)
# `a --> a
# `,a --> eval(a)
# (quasiquote a b) --> ERROR (too many args)
# `(a (unquote d b)) --> ERROR (too many args)
# (unquote a b) --> ERROR (not in quasiquote)
# `(a) --> (a)
# `(a ,c) --> (a eval(c))
# `(a (b ,c)) --> (a (b eval(c)))
# ``,a --> `,a
# `(list ,(+ 1 2) 4) --> (list 3 4)
# `(a `(b ,c) d) --> (a `(b ,c) d)
#
# -----------------------------------------------------------------------------
def unquote_func(args, env):
raise Exception("Cannot call unquote outsite of quasiquote")
def quasiquote_func(args, env):
assert rest(args) is nil, "ERROR (too many args in quasiquote)"
arg = first(args)
if not isinstance(arg, LispPair):
return arg
else:
# build up new list with 'unquote' evaluated
def inner_qq(inargs):
# print "in args", inargs
if first(inargs) is mksym("unquote"):
# (unquote x) -> (eval x)
assert rest(rest(inargs)) is nil
return first(rest(inargs)).scm_eval(env)
elif first(inargs) is mksym("quasiquote"):
# (quasiquote x) -> (quasiquote x)
assert rest(rest(inargs)) is nil
return inargs
elif first(inargs) is mksym("unquote-splicing"):
raise Exception("Not implemented")
else:
# recurse the list checking each elm
# return the newly formed list
newlist = []
while isinstance(inargs, LispPair):
if isinstance(first(inargs), LispPair):
newlist.append(inner_qq(first(inargs)))
else:
newlist.append(first(inargs))
inargs = rest(inargs)
# deal with the final element (which is probably a nil)
if isinstance(inargs, LispPair):
newlist.append(inner_qq(inargs))
else:
newlist.append(inargs)
# put the list back into sexp
return from_list(newlist)
return inner_qq(arg)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def read_file(stream, env):
# tokenize, parse and eval entire file
# don't care about the result of eval
# only the changed env matters
no_env = env is None
if no_env:
# use the default (whatever that is)
# extend it so that we don't needlessly have all the default
# functions in out imported class.
env = Environment([], [], basic_environment)
for sexp in parse(tokenize(iter(stream))):
sexp.scm_eval(env)
# I guess we should kill the parent if there was no
# env before. No need having thingslike `quote` in there.
if no_env:
del env.variables["__parent__"]
# as the environment is a class we can just return it
return env
def import_func(args, env):
file_name = first(args)
assert rest(args) is nil
assert isinstance(file_name, LispString)
return read_file(open(file_name.text), None)
def include_func(args, env):
file_name = first(args)
assert rest(args) is nil
assert isinstance(file_name, LispString)
read_file(open(file_name.text), env)
return nil
# -----------------------------------------------------------------------------
def env_func(args, env):
return env
# -----------------------------------------------------------------------------
def predefined_function(inputfunction):
def func(args, env):
evaled_args = [arg.scm_eval(env) for arg in to_list(args)][:-1]
result = inputfunction(*evaled_args)
if result is None: result = nil
return result
return func
def to_scm_bool(x):
if x: return true
else: return false
def two_integer_function(inputfunction):
def func(args, env):
evaled_args = [arg.scm_eval(env) for arg in to_list(args)][:-1]
assert len(evaled_args) == 2
assert isinstance(evaled_args[0], LispInteger)
assert isinstance(evaled_args[1], LispInteger)
result = inputfunction(evaled_args[0].num, evaled_args[1].num)
if isinstance(result, int):
result = LispInteger(result)
return result
return func
def display(text):
print text
# -----------------------------------------------------------------------------
def make_basic_environment():
basic = [
("nil" , nil),
("true" , true),
("false" , false),
("quote" , quote_func),
("set!" , set_func),
("define", define_func),
("if" , if_func),
("lambda", lambda_func),
("begin" , begin_func),
("class" , class_func),
("class-define!" , class_define_func),
("class-set!" , class_set_func),
("class-chmod!" , class_chmod_func),
("class-finalize!" , class_finalize_func),
("BaseClass" , class_base),
("mac" , macro_func),
("quasiquote" , quasiquote_func),
("unquote" , unquote_func),
("import" , import_func),
("include" , include_func),
("env" , env_func),
("stack" , predefined_function(get_stack)),
("display", predefined_function(lambda a: display(str(a)))),
("newline", predefined_function(lambda a: display("\n"))),
("cons" , predefined_function(cons)),
("car" , predefined_function(first)),
("cdr" , predefined_function(rest)),
("is?" , predefined_function(lambda x, y: to_scm_bool(x is y))),
("equal?" , predefined_function(lambda x, y: to_scm_bool(x == y))),
("+", two_integer_function(lambda a, b: a + b)),
("*", two_integer_function(lambda a, b: a * b)),
("-", two_integer_function(lambda a, b: a - b)),
("<", two_integer_function(lambda a, b:to_scm_bool(a < b))),
(">", two_integer_function(lambda a, b:to_scm_bool(a > b))),
("=", two_integer_function(lambda a, b:to_scm_bool(a == b))),
("<=", two_integer_function(lambda a, b:to_scm_bool(a <= b))),
(">=", two_integer_function(lambda a, b:to_scm_bool(a >= b)))]
syms, vals = [], []
for sym,val in basic:
syms.append(sym)
vals.append(val)
return Environment(syms, vals, None)
basic_environment = make_basic_environment()
# -----------------------------------------------------------------------------
# anything that can be called or is stored in the envorinment needs to have its types checked.
# You can call something currently one of 3 ways.
# defined in function.py (special form)
# LispLambda.__call_
# LispClass.__call_
# you can change the environment by:
# define
# set!
# you can also chencge things in the environment through
# class-set!
|
## Enter tile info here
bList = ['Jason','Hercules','Theseus','Odesseus','e','90','80']
iList = ['753 B.C.','509 B.C.','27 B.C.','476 A.D.','264 B.C.','146 B.C.','60 B.C.','43 B.C.','72 B.C.', '44 B.C.']
nList = ['k','l','m','n','o','90','80']
gList = ['p','q','r','s','t','90','80']
oList = ['u','v','w','x','y','90','80']
mainList = [bList, iList, nList, gList, oList]
def CheckRun():
listCheck = ListCheck()
if listCheck == False:
print ("Lists are not the minimum length, please correct.")
elif listCheck == True:
main()
## Resets Checklist to a full list containing all items in mainList
def FillCheckList(checkList):
length = ( len(bList) + len(iList) + len(nList) + len(gList) + len(oList) )
b = len(bList)
i = len(iList)
n = len(nList)
g = len(gList)
o = len(oList)
count = -1
for x in range (length-1):
count += 1
if count <= (b-1):
checkList.append(bList[x])
elif count <= ( (b+i) -1):
checkList.append(iList[x- (b) ])
elif count <= ( (b+i+n) -1):
checkList.append(nList[x- (b+i) ])
elif count <= ( (b+i+n+g) -1):
checkList.append(gList[x- (b+i+n) ])
elif count <= ( (b+i+n+g+o) -1):
checkList.append(oList[x- (b+i+n+g) ])
return checkList
## Checks equal length for all the lists
def ListCheck():
b = len(bList)
i = len(iList)
n = len(nList)
g = len(gList)
o = len(oList)
if (b > 5) and (i > 5) and (n > 5) and (g > 5) and (i > 5):
return True
else:
return False
## Returns a random letter from a given list
def GetLetter(mainList,checkList):
final = []
for x in range(5):
randomNum = random.randint(0,4)
currentList = mainList[x]
while (currentList[randomNum] in checkList) == False:
randomNum = random.randint(0,len(currentList)-1)
final.append(currentList[randomNum])
checkList.remove(currentList[randomNum])
return final
|
# -*- coding: utf-8 -*-
"""
qr_code helper utils
"""
import platform
import qrcode # type: ignore
def qr_terminal(data: str, version=None):
"""
create qr_code
:param data: qrcode data
:param version:1-40 or None
:return:
"""
if platform.system() == 'Windows':
white_block = '▇'
black_block = ' '
new_line = '\n'
else:
white_block = '\033[0;37;47m '
black_block = '\033[0;37;40m '
new_line = '\033[0m\n'
qr = qrcode.QRCode(version)
qr.add_data(data)
if version:
qr.make()
else:
qr.make(fit=True)
output = white_block * (qr.modules_count + 2) + new_line
for mn in qr.modules:
output += white_block
for m in mn:
if m:
output += black_block
else:
output += white_block
output += white_block + new_line
output += white_block * (qr.modules_count + 2) + new_line
print(output)
|
# Create a file
myFile = open('myFiles.txt', 'w')
# Write to a file
myFile.write("I love python \n")
myFile.write("I love javascript \n")
myFile.close()
# Append to file
myFile = open('myFiles.txt', 'a')
myFile.write("I kinda like php")
# Read a file
myFile = open('myFiles.txt', 'r+')
text = myFile.read()
print(text)
|
def doit():
print("MMMMMMMMMMM")
return 999
|
def solve(arr):
arr.sort()
n = len(arr)
odds = arr[0:(len(arr)//2)+(len(arr)%2)]
evens = arr[len(odds):]
odds.sort(reverse = True)
i , j = 0 ,0
while i<n and j<len(odds):
arr[i] = odds[j]
i+=2
j+=1
i , j = 1,0
while i<n and j<len(evens):
arr[i] = evens[j]
i+=2
j+=1
print(arr)
# arr = [1,2,3,4,5,6,7]
arr = [1,2,1,4,5,6,8,8]
solve(arr) |
from django.conf.urls import url, include
from django.urls import path
from .views import *
import madadkar.views
urlpatterns = [
path('', madadkar.views.madadkarhome, name='madadkar-home'),
path('goals/', madadkar.views.madadkargoal, name='madadkar-goals'),
path('history/', madadkar.views.madadkarhistory, name='madadkar-history'),
path('chart/', madadkar.views.madadkarchart, name='madadkar-chart'),
path('contact/', MadadkarContact.as_view(), name='madadkar-contact'),
path('profile/',madadkar.views.madadkarprofile,name='madadkar-profile'),
path('editmadadju/',madadkar.views.editmadadju,name='madadkar-edit-madadju'),
path('editneed/',madadkar.views.editneed,name='editneed'),
path('instantneed/',madadkar.views.instantneed,name='instantneed'),
path('madadju-register/',madadkar.views.madadjuregister,name='madadkar-madadju-register'),
path('managesaving/',madadkar.views.managesaving,name='managesaving'),
path('receipt/',madadkar.views.receipt,name='receipt'),
path('report/',madadkar.views.report,name='report'),
path('seemsg/',madadkar.views.seemsg,name='seemsg'),
path('seereq/',madadkar.views.seereq,name='seereq'),
path('success/',madadkar.views.success,name='success'),
path('taaligh/',madadkar.views.taaligh,name='taaligh'),
]
|
from application import app
@app.template_filter('reverse')
def reverse_filter(s):
return s[::-1]
# app.jinja_env.filters['reverse'] = reverse_filte
|
import urllib
class KatSearch:
def __init__(self):
self.protocol = "http"
self.katDomain = "kat.cr"
self.includedWords = []
self.excludedWords = []
self.category = None
self.minSeeds = None
self.orderByField = None
def include(self, words):
self.includedWords += words.split()
return self
def exclude(self, words):
self.excludedWords += words.split()
return self
def inCategory(self, category):
self.category = category
return self
def withMinSeeds(self, minSeeds):
self.minSeeds = minSeeds
return self
def orderBy(self, field, order):
self.orderByField = (field, order)
return self
def toUrl(self):
if(len(self.includedWords) > 0):
keywords = " ".join(self.includedWords) + " "
if(len(self.excludedWords) > 0):
keywords += " ".join(map(lambda word: "-"+word, self.excludedWords)) + " "
if(self.category is not None):
keywords += "category:" + self.category + " "
if(self.minSeeds is not None):
keywords += "seeds:" + str(self.minSeeds) + " "
keywords = keywords.strip()
keywords = urllib.quote(keywords)
orderClause = ""
if(self.orderByField is not None):
orderClause = "?field={}&sorder={}".format(*self.orderByField)
return "{}://{}/usearch/{}/{}".format(self.protocol, self.katDomain, keywords, orderClause)
|
#python imports
import sys
import os
import subprocess
import json
import requests
from requests.auth import HTTPBasicAuth
from termcolor import colored
#third-party imports
#No third-party imports
#programmer generated imports
from logger import logger
from fileio import fileio
'''
***BEGIN DESCRIPTION***
Type: Search - Description: Retrieves any available data for a target against the IBM XForce database.
***END DESCRIPTION***
'''
def POE(POE):
APIKey = ''
APIPassword = ''
if (POE.logging == True):
LOG = logger()
newlogentry = ''
xf_malflag = ''
response_dump = ''
xf = ''
if (POE.logging == True):
newlogentry = 'Module: XForceSearch'
LOG.WriteStrongLog(POE.logdir, POE.target, newlogentry)
for apikeys in POE.apikeys:
for key, value in apikeys.items():
if (POE.debug == True):
print ('[DEBUG] API: ' + str(key) + ' | API Key: ' + str(value))
if (key == 'xforceapi'):
print ('\r\n[*] API key located!')
APIKey = value
if (key == 'xforcepassword'):
print ('[*] API password located!')
APIPassword = value
if (APIKey == ''):
print (colored('\r\n[x] An IBM X-Force Exchange API Key has not been input. Create an account and generate an API Key and then apply to /opt/static/static.conf', 'red', attrs=['bold']))
newlogentry = 'Unable to execute XForce reputation module - API Key/Password value not input. Please add one to /opt/static/static.conf'
LOG.WriteStrongSubLog(POE.logdir, POE.targetfilename, newlogentry)
return -1
if (APIPassword == ''):
print (colored('\r\n[x] An IBM X-Force Exchange API Key Password has not been input. Create an account and generate an API Key and then apply to /opt/static/static.conf', 'red', attrs=['bold']))
newlogentry = 'Unable to execute XForce reputation module - API Key/Password value not input. Please add one to /opt/static/static.conf'
LOG.WriteStrongSubLog(POE.logdir, POE.targetfilename, newlogentry)
return -1
detectioncoverage = 0
firstseen = ''
lastseen = ''
family = ''
malwaretype = ''
platform = ''
subplatform = ''
source = ''
output = POE.logdir + 'XForceReport.json'
FI = fileio()
print (colored('[*] Running X-Force Search against: ' + POE.target, 'white', attrs=['bold']))
if (POE.SHA256):
xf = 'https://api.xforce.ibmcloud.com/malware/' + POE.SHA256
print ('[*] SHA256 hash detected...')
elif (POE.MD5):
xf = 'https://api.xforce.ibmcloud.com/malware/' + POE.MD5
print ('[*] MD5 hash detected...')
elif (POE.SHA1):
xf = 'https://api.xforce.ibmcloud.com/malware/' + POE.SHA1
print ('[*] SHA1 hash detected...')
else:
print (colored('[x] A valid search hash is not present. Terminating...', 'red', attrs=['bold']))
return -1
try:
req = requests.get(xf, auth=HTTPBasicAuth(APIKey, APIPassword))
response_dump = json.loads(req.content.decode("UTF-8"))
except requests.ConnectionError:
print (colored('[x] Unable to connect to IBM X-Force\'s reputation site', 'red', attrs=['bold']))
return -1
if (req.status_code != 200):
print (colored("[-] HTTP {} returned".format(req.status_code), 'yellow', attrs=['bold']))
if (req.status_code == 404):
print (colored('[-] Target not found in dataset...', 'yellow', attrs=['bold']))
elif (req.status_code == 403):
print (colored('[x] 403 Forbidden - something is wrong with the connection or credentials...', 'red', attrs=['bold']))
return -1
try:
FI.WriteLogFile(output, json.dumps(response_dump, indent=4, sort_keys=True))
print (colored('[*] X-Force search report data had been written to file here: ', 'green') + colored(output, 'blue', attrs=['bold']))
if ((POE.logging == True) and (POE.nolinksummary == False)):
newlogentry = 'X-Force search report data has been generated to file here: <a href=\"' + output + '\"> XForce Search Output </a>'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
except:
print (colored('[x] Unable to write X-Force search data to file', 'red', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'Unable to write X-Force search data to file'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
return -1
try:
detectioncoverage = response_dump['malware']['origins']['external']['detectionCoverage']
firstseen = response_dump['malware']['origins']['external']['firstSeen']
lastseen = response_dump['malware']['origins']['external']['lastSeen']
family = response_dump['malware']['origins']['external']['family'][0]
malwaretype = response_dump['malware']['origins']['external']['malwareType']
source = response_dump['malware']['origins']['external']['source']
platform = response_dump['malware']['origins']['external']['platform']
print ('[*] Sample detection coverage: ' + str(detectioncoverage) + ' A/V vendors.')
newlogentry = 'Sample detection coverage: ' + str(detectioncoverage) + ' A/V vendors.'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
print ('[*] Sample first seen: ' + firstseen)
newlogentry = 'Sample first seen: ' + firstseen
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
print ('[*] Sample last seen: ' + lastseen)
newlogentry = 'Sample last seen: ' + lastseen
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
print ('[*] Malware family: ' + family)
newlogentry = 'Malware family: ' + family
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
print ('[*] Malware type: ' + malwaretype)
newlogentry = 'Malware type: ' + malwaretype
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
print ('[*] Malware platform: ' + platform)
newlogentry = 'Malware platform: ' + platform
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
print ('[*] Malware source: ' + source)
newlogentry = 'Sample source: ' + source
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
except:
print (colored('[-] JSON heading mismatch...', 'yellow', attrs=['bold']))
return 0
|
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QDial, QSpinBox
from PyQt5.QtGui import QFont
class MainWindow(QWidget):
def __init__(self):
super().__init__()
self.resize(400,300)
f = QFont('',16)
dial = QDial(self)
dial.resize(175,175)
dial.move(30,30)
dial.setNotchesVisible(True)
spinbox = QSpinBox(self)
spinbox.resize(50,50)
spinbox.move(250,100)
spinbox.setFont(f)
dial.valueChanged.connect(spinbox.setValue)
spinbox.valueChanged.connect(dial.setValue)
if __name__ == '__main__':
app = QApplication(sys.argv)
demo = MainWindow()
demo.show()
sys.exit(app.exec_())
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Company(models.Model):
name = models.TextField(max_length=300)
description = models.TextField(max_length=500, null=True)
def __str__(self):
return self.name
@staticmethod
def get_company_list():
companies = Company.objects.all()
ecosystems = ProductEcosystems.objects.all().select_related('product', 'product__company', 'description')
result = []
for company in companies:
ecosystem = ecosystems.filter(product__company_id=company.id).order_by('description__name')
ecolist = [eco.description.name for eco in ecosystem]
result.append((company.id, company.name,
company.description if company.description else '', ', '.join(set(ecolist))))
return result
class Product(models.Model):
company = models.ForeignKey(Company, on_delete=models.CASCADE, related_name='products')
name = models.TextField(max_length=300)
def __str__(self):
return self.name
@staticmethod
def get_product_list(company_id):
products = Product.objects.filter(company_id__exact=company_id)
result = []
for product in products:
classes = ProductClasses.objects.filter(product__company_id=company_id) \
.select_related('description', 'description__ecosystem')
classes_list = set([class_.description.name for class_ in classes])
ecosys_list = set([class_.description.ecosystem.name for class_ in classes])
result.append((product.id, product.name, ", ".join(classes_list), ", ".join(ecosys_list)))
return result
@staticmethod
def get_product_classes(product_id):
product_classes = ProductClasses.objects.filter(product_id=product_id).select_related('description__ecosystem')
classes_list = set([(class_.description.name) for class_ in product_classes])
return classes_list
@staticmethod
def get_product_ecosystems(product_id):
product_classes = ProductClasses.objects.filter(product_id=product_id).select_related('description__ecosystem')
ecosystems_list = set([class_.description.ecosystem.name for class_ in product_classes])
return ecosystems_list
class ParametersDescription(models.Model):
name = models.TextField(max_length=300)
mandatory = models.BooleanField()
def __str__(self):
return self.name
class ProductParameters(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='parameters')
description = models.ForeignKey(ParametersDescription, on_delete=models.CASCADE)
value = models.TextField(max_length=500)
def __str__(self):
return self.value
@staticmethod
def get_product_parameters(product_id):
parameters = ProductParameters.objects.filter(product_id=product_id).select_related('description')
return [(parameter.id, parameter.description.name,
parameter.description.mandatory, parameter.value) for parameter in parameters]
@staticmethod
def get_class_parameters(product_id):
classes = ProductClasses.objects.filter(product_id=product_id)
class_parameters = ProductParameters.objects.filter(product__classes__in=classes).select_related('description')
return [(class_parameter.id, class_parameter.description.name,
class_parameter.description.mandatory) for class_parameter in class_parameters]
@staticmethod
def get_parameters_distinct(product_id):
class_params = ProductParameters.get_class_parameters(product_id)
class_params_ids = set([param[0] for param in class_params])
prod_params_ids = set([param[0] for param in ProductParameters.get_product_parameters(product_id)])
distinct_ids = class_params_ids - prod_params_ids
return [cp for cp in class_params if cp[0] in distinct_ids]
@staticmethod
def construct_comparison_table(class_id):
class_parameters = ProductParameters.objects.filter(product__classes__description_id=class_id) \
.select_related('description')
param_descr_ids = set([cp.description.id for cp in class_parameters])
class_param_descr = ParametersDescription.objects.all()
first_column_data = [(parameter_descr.id, parameter_descr.name, parameter_descr.mandatory)
for parameter_descr in class_param_descr
if parameter_descr.id in param_descr_ids]
indexes = {}
for i, fcd in enumerate(first_column_data):
indexes[fcd[0]] = i
products = Product.objects.filter(classes__description_id=class_id)
product_columns = []
product_names = []
for product in products:
product_names.append(product.name)
product_parameters_column = ['' for i in range(len(first_column_data))]
product_parameters = ProductParameters.objects.filter(product=product).all()
for product_parameter in product_parameters:
product_parameters_column[indexes.get(product_parameter.description_id)] \
= product_parameter.value
product_columns.append(product_parameters_column)
comparison_table = [[''] + product_names]
for i, row in enumerate(first_column_data):
comparison_table_row = [(row[1], row[2])]
for j in range(len(product_names)):
comparison_table_row.append(product_columns[j][i])
comparison_table.append(comparison_table_row)
return comparison_table
class EcosystemsDescription(models.Model):
name = models.TextField(max_length=300)
description = models.TextField(max_length=500, null=True)
def __str__(self):
return self.name
class ProductEcosystems(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='ecosystems')
description = models.ForeignKey(EcosystemsDescription, on_delete=models.CASCADE)
class ClassesDescription(models.Model):
name = models.TextField(max_length=300)
description = models.TextField(max_length=500, null=True)
ecosystem = models.ForeignKey(EcosystemsDescription, on_delete=models.CASCADE)
def __str__(self):
return self.name
class ProductClasses(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='classes')
description = models.ForeignKey(ClassesDescription, on_delete=models.CASCADE)
class ProductLogs(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='logs')
timestamp = models.DateTimeField(auto_now_add=True)
logrecords = models.TextField(max_length=500)
|
from rest_framework_jwt import authentication
from project.serializers import UserSerializer
from project.utils import LogUtilMixin
class JSONWebTokenAuthentication(authentication.JSONWebTokenAuthentication, LogUtilMixin):
def authenticate(self, request):
"""
Returns a two-tuple of `User` and token if a valid signature has been
supplied using JWT-based authentication. Otherwise returns `None`.
"""
user_token = super(JSONWebTokenAuthentication, self).authenticate(request)
return user_token
def jwt_response_payload_handler(token, user=None, request=None):
return {
'token': token,
'user': UserSerializer(user, context={'request': request}).data
}
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import WebDriverException
from django.test import LiveServerTestCase
import time
MAX_WAIT = 3
class NewVisitorTest(LiveServerTestCase):
options = Options()
options.add_argument('--headless');
def setUp(self):
self.browser = webdriver.Chrome(options=self.options)
def tearDown(self):
self.browser.quit()
# helpers
def check_li_created(self, item_text):
# for long loads keep trying and execute until up to 10s
start_time = time.time()
while True:
try:
list = self.browser.find_element_by_id('list_general')
list_items = list.find_elements_by_tag_name("li")
self.assertIn(item_text, [item_text for item in list_items])
return
except (AssertionError, WebDriverException) as e:
if time.time() - start_time > MAX_WAIT:
raise e
time.sleep(0.5)
def test_create_and_get_list(self):
# Mike access the homepage
self.browser.get(self.live_server_url)
# He notices that the tab and the page's header mention to-do lists
self.assertIn("To-Do", self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn("To-Do", header_text)
# He's immediately prompted to create an to-do item
inputbox = self.browser.find_element_by_id('new_item')
self.assertEqual(inputbox.get_attribute('placeholder'), 'Add a task')
# He types 'meditate for 20min' into a textbox
item_text = 'meditate for 20min'
inputbox.send_keys(item_text)
# When he hits enter the form is sent and the page updates showing his task in an ordered list
inputbox.send_keys(Keys.ENTER)
self.check_li_created(item_text)
# The inputbox is still there. He adds another item which reads "clean gutters"
item_text2 = 'clean gutters'
inputbox = self.browser.find_element_by_id('new_item')
inputbox.click()
inputbox.send_keys(item_text2)
inputbox.send_keys(Keys.ENTER)
# The page updates again showing the two items
self.check_li_created(item_text)
self.check_li_created(item_text2)
def test_unique_url_per_user(self):
# Mike starts a list
self.browser.get(self.live_server_url)
inputbox = self.browser.find_element_by_id('new_item')
inputbox.send_keys('meditate for 20min')
inputbox.send_keys(Keys.ENTER)
self.check_li_created('medidate for 20min')
# he sees that a unique URL to his list has been created
mike_list_url = self.browser.current_url
## matches against regex
self.assertRegex(mike_list_url, '/lists/.+')
# loretta logins in from her laptop
## new session
self.browser.quit()
self.browser = webdriver.Chrome(options=self.options)
self.browser.get(self.live_server_url)
# and can't see Mike's list
body = self.browser.find_element_by_tag_name('body')
self.assertNotIn('meditate for 20min', body.text)
self.assertNotIn('clean gutters', body.text)
# loretta starts a new list and adds 'buy milk' to it
inputbox = self.browser.find_element_by_id('new_item')
inputbox.send_keys('buy milk')
inputbox.send_keys(Keys.ENTER)
self.check_li_created('buy milk')
# she gets the unique url which links to ther list
lor_list_url = self.browser.current_url
self.assertRegex(lor_list_url, '/lists/.+')
self.assertNotEqual(mike_list_url, lor_list_url)
|
'''some functions required to calculate the co-ordinates of the key points.'''
import numpy as np
def padRightDownCorner(img, stride, padValue):
h = img.shape[0]
w = img.shape[1]
pad = 4 * [None]
pad[0] = 0 # up
pad[1] = 0 # left
pad[2] = 0 if (h%stride==0) else stride - (h % stride) # down
pad[3] = 0 if (w%stride==0) else stride - (w % stride) # right
img_padded = img
pad_up = np.tile(img_padded[0:1,:,:]*0 + padValue, (pad[0], 1, 1))
img_padded = np.concatenate((pad_up, img_padded), axis=0)
pad_left = np.tile(img_padded[:,0:1,:]*0 + padValue, (1, pad[1], 1))
img_padded = np.concatenate((pad_left, img_padded), axis=1)
pad_down = np.tile(img_padded[-2:-1,:,:]*0 + padValue, (pad[2], 1, 1))
img_padded = np.concatenate((img_padded, pad_down), axis=0)
pad_right = np.tile(img_padded[:,-2:-1,:]*0 + padValue, (1, pad[3], 1))
img_padded = np.concatenate((img_padded, pad_right), axis=1)
return img_padded, pad
|
# Expected Output
# Download
# Age is not valid, setting age to 0.
# You are young.
# You are young.
# You are young.
# You are a teenager.
# You are a teenager.
# You are old.
# You are old.
# You are old. |
print('kjshd')
print('ksjdhfkjhd') |
from pynmea.streamer import NMEAStream
from fractions import Fraction
from itertools import groupby
from math import modf
from common import *
try:
import ogr
except ImportError:
from osgeo import ogr
def fraction_to_rational(fra):
"""
Take a fraction and return a stupid Rational to make stupid damned
pyexiv2 happy. If it's not a fraction, just hand it back and hope
for the best.
"""
from pyexiv2.utils import Rational
if fra.__class__.__name__=='Fraction':
return Rational(fra.limit_denominator().numerator,fra.limit_denominator().denominator)
else:
return fra
class coord(object):
"""
Store a latitude or a longitude and provide some methods for converting to
various formats. Coord will be reclassed by latitude and longitude classes
that will provide methods specific to those types of coordinates.
"""
def __init__(self,degrees,minutes):
# Apparently all my type checking is bad form for python. Maybe
# I will take it out at some point but probably not
if degrees == None:
self.degrees = None
elif int(degrees) <> degrees:
raise ValueError( "The value of %s changes when cast to an integer so it can not be used for degrees" % str(degrees) )
elif abs(degrees) > 180:
raise ValueError( "Degrees can not exceed 180." )
else:
self.degrees = int(degrees)
if minutes == None:
self.minutes = None
elif not 0 <= float(minutes) < 60:
raise ValueError( "Minutes must be between 0 and 60." )
else:
self.minutes = float(minutes)
def __setattr__(self,name,value):
if name=='degrees':
if int(value) <> value:
raise ValueError( "The value of %s changes when cast to an integer so it can not be used for degrees" % str(degrees) )
elif abs(value) > 180:
raise ValueError( "Degrees can not exceed 180." )
else:
super(coord,self).__setattr__(name,value)
elif name=='minutes':
if not 0 <= float(value) < 60:
raise ValueError( "Minutes must be between 0 and 60." )
else:
super(coord,self).__setattr__(name,value)
def __repr__(self):
return "%i %g" % (self.degrees,self.minutes)
def __unicode__(self):
return u'%i\u00B0 %g\'' % (self.degrees,self.minutes)
def __str__(self):
return unicode(self).encode('utf-8')
@property
def dms(self):
"""
Return coordinates in a tuple of degrees,minutes,seconds.
"""
seconds = 60 * modf( self.minutes )[0]
return (self.degrees,int(self.minutes),seconds)
@property
def decimal_degrees(self):
"""
Return the coordinate in decimal degrees.
"""
from math import copysign as cps
return self.degrees + cps(self.minutes,self.degrees) / 60.0
@property
def nmea_string(self):
"""
Return coordinates in a nmea style string.
"""
return str(abs(self.degrees)) + "{:011.8f}".format( self.minutes )
@property
def exif_coord(self):
"""
Return the coordinate in the format that can be used to assign to an
exif tag using the pyexiv2 library.
"""
from pyexiv2.utils import Rational
(d,m,s) = self.dms
return ( Rational(abs(d),1),Rational(m,1),Rational(s * 1e7,1e7) )
def __adjust_sign(self,hemi):
"""
If given 'N' or 'E' for the hemisphere, set the sign of the degrees to positive.
If given 'S' or 'W' for the hemisphere, set the sign of the degrees to negative.
If hemi is None, do nothing.
"""
pos = ['N','E']
neg = ['S','W']
if hemi:
if hemi.upper() in neg:
self.degrees = -abs(self.degrees)
elif hemi.upper() in pos:
self.degrees = abs(self.degrees)
else:
raise ValueError( "Hemisphere should be N, S, E, or W. Why are you giving me %s?" % (hemi,) )
@staticmethod
def from_dms( d,m,s,hemi=None ):
"""
Take degrees minutes and seconds and return a coord object in degrees
and float minutes.
"""
minutes = float(m) + s / 60.0
c = coord( d, minutes )
c.__adjust_sign(hemi)
return c
@staticmethod
def from_dd( dec_deg, hemi=None ):
"""
Take decimal degrees and return a coord object with integer degrees and
float minutes.
"""
m,d = modf(dec_deg)
m = abs(m) * 60
c = coord(d,m)
c.__adjust_sign(hemi)
return c
@staticmethod
def from_exif_coord( (fracdeg,fracmin,fracsec), hemi=None ):
"""
Take a tuple of Fractions (that's how they're given from pyexiv2)
and translate into a coord.
"""
fracdeg = fraction_to_rational(fracdeg)
fracmin = fraction_to_rational(fracmin)
fracsec = fraction_to_rational(fracsec)
d = int( fracdeg.to_float() )
c = coord.from_dms( d, fracmin.to_float(), fracsec.to_float() )
c.__adjust_sign(hemi)
return c
@staticmethod
def from_nmea_string(nstr,hemi=None):
"""
Take a coordinate in the format given in NMEA log files and return a
coord object. Hemi is optional. If supplied, we will determine the
sign of the degrees value based on the value of hemi regardless of
original sign handed in.
"""
l = str(nstr).split('.')
deg = int( l[0][:-2] )
minute = float( l[0][-2:] + '.' + l[1] )
c = coord(deg,minute)
c.__adjust_sign(hemi)
return c
class latitude(coord):
def __init__(self,degrees,minutes):
if degrees <> None and abs(degrees) > 90:
raise ValueError( "Latitude degrees can not exceed 90" )
coord.__init__(self,degrees,minutes)
def __setattr__(self,name,value):
if name=='degrees' and abs(value) > 90:
raise ValueError( "Degrees of latitude can not exceed 90." )
else:
super(coord,self).__setattr__(name,value)
@staticmethod
def from_dms( d,m,s,hemi=None ):
"""
Take degrees minutes and seconds and return a coord object in degrees
and float minutes.
"""
c = coord.from_dms( d,m,s,hemi )
return latitude(c.degrees,c.minutes)
@staticmethod
def from_dd( dec_deg, hemi=None ):
"""
Take decimal degrees and return a coord object with integer degrees and
float minutes.
"""
c = coord.from_dd( dec_deg, hemi )
return latitude(c.degrees,c.minutes)
@staticmethod
def from_exif_coord( (fracdeg,fracmin,fracsec), hemi=None ):
"""
Take a tuple of Fractions (that's how they're given from pyexiv2)
and translate into a coord.
"""
c = coord.from_exif_coord( (fracdeg,fracmin,fracsec), hemi )
return latitude(c.degrees,c.minutes)
@staticmethod
def from_nmea_string(nstr,hemi=None):
c = coord.from_nmea_string(nstr,hemi)
return latitude(c.degrees,c.minutes)
@property
def hemisphere(self):
if self.degrees < 0:
return 'S'
else:
return 'N'
class longitude(coord):
def __init__(self,degrees,minutes):
coord.__init__(self,degrees,minutes)
@staticmethod
def from_dms( d,m,s,hemi=None ):
"""
Take degrees minutes and seconds and return a coord object in degrees
and float minutes.
"""
c = coord.from_dms( d,m,s,hemi )
return longitude(c.degrees,c.minutes)
@staticmethod
def from_dd( dec_deg, hemi=None ):
"""
Take decimal degrees and return a coord object with integer degrees and
float minutes.
"""
c = coord.from_dd( dec_deg, hemi )
return longitude(c.degrees,c.minutes)
@staticmethod
def from_exif_coord( (fracdeg,fracmin,fracsec), hemi=None ):
"""
Take a tuple of Fractions (that's how they're given from pyexiv2)
and translate into a coord.
"""
c = coord.from_exif_coord( (fracdeg,fracmin,fracsec), hemi )
return longitude(c.degrees,c.minutes)
@staticmethod
def from_nmea_string(nstr,hemi=None):
c = coord.from_nmea_string(nstr,hemi)
return longitude(c.degrees,c.minutes)
@property
def hemisphere(self):
if self.degrees < 0:
return 'W'
else:
return 'E'
class position(object):
def __init__(self,lat,lon):
self.lat = lat
self.lon = lon
def __repr__(self):
return "%s, %s" % (repr(self.lat),repr(self.lon))
def __unicode__(self):
return u'%s, %s' % (self.lat,self.lon)
def __str__(self):
return "%s, %s" % (self.lat,self.lon)
@property
def ogr_point(self):
"""
Return the coordinate as an ogr point geometry.
"""
geom = ogr.Geometry(ogr.wkbPoint)
geom.SetPoint(0, self.lon.decimal_degrees, self.lat.decimal_degrees)
return geom
class gpx_file(object):
def __init__(self,file_path):
self.file_path = file_path
def __repr__(self):
return "GPX file: %s" % (self.file_path,)
@property
def ogr_ds(self):
gpx_driver = ogr.GetDriverByName('GPX')
return gpx_driver.Open(self.file_path)
@property
def layer_names(self):
ds = self.ogr_ds
return [ds.GetLayerByIndex(x).GetName() for x in range(ds.GetLayerCount())]
@property
def track_points(self):
ds = self.ogr_ds
try:
lyr = ds.GetLayerByName('track_points')
except AttributeError:
return None
result = []
for feat in lyr:
lon = longitude.from_dd( feat.geometry().GetX() )
lat = latitude.from_dd( feat.geometry().GetY() )
pos = position(lat,lon)
try:
result.append([dt_parser.parse( feat.time ),pos])
except AttributeError:
pass # If we get here it's because there is a track point with no timestamp so we don't want it
return result
def read_to_db(self, dbp):
if not self.track_points:
print "The file %s has no track points." % (self.file_path,)
return None
conn,cur = connection_and_cursor(dbp)
# Make sure the table is there
create_gpslog_table(cur)
rec_count = 0
for tp in self.track_points:
utctime = tp[0]
pos = tp[1]
latitude = pos.lat.nmea_string
lat_hemi = pos.lat.hemisphere
longitude = pos.lon.nmea_string
lon_hemi = pos.lon.hemisphere
t = ( None, utctime.replace(tzinfo=None), latitude, lat_hemi, longitude, lon_hemi, None )
cur.execute("INSERT INTO GPSLog VALUES (?,?,?,?,?,?,?)", t)
rec_count += 1
conn.commit()
cur.close()
return "Read %i records from %s to %s." % (rec_count,os.path.basename(self.file_path),os.path.basename(dbp))
def create_gpslog_table(cur):
cur.execute("create table if not exists GPSLog ( validity text, utctime datetime, latitude real, lat_hemi text,\
longitude real, lon_hemi text, num_sats integer, UNIQUE (utctime) ON CONFLICT REPLACE)")
def get_position_for_time(dt_obj,db_path,reject_threshold=30,return_pretty=False,verbose=False):
"""Given a datetime object, find the position for the nearest position
fix. I may want to interpolate between positions at some point but I'll
leave that for later."""
if not dt_obj:
return None
conn,cur = connection_and_cursor(db_path)
t = ( dt_obj,dt_obj )
result = cur.execute("select abs(strftime('%s',?) - strftime('%s',utctime) ), \
latitude, lat_hemi, longitude, lon_hemi, rowid from GPSLog order by \
abs( strftime('%s',?) - strftime('%s',utctime) ) LIMIT 1", t).fetchone()
time_diff = result[0]
lat = latitude.from_nmea_string( result[1], result[2] )
lon = longitude.from_nmea_string( result[3], result[4] )
if verbose:
print "Position from rowid: %i ---> %s, %s. Time difference = %i" % (result[5], str(lat), str(lon), time_diff)
if time_diff > reject_threshold:
return None
else:
if return_pretty:
return unicode( lat ) + u', ' + unicode( lon )
else:
return position(lat,lon)
def extract_gps_data(filepath,these_sentences=('GPRMC','GPGGA',)):
"""Use the pynmea library to read data out of an nmea log file."""
with open(filepath, 'r') as data_file:
streamer = NMEAStream(data_file)
next_data = streamer.get_objects()
data = []
while next_data:
for nd in next_data:
if nd.sen_type in these_sentences:
data.append(nd)
next_data = streamer.get_objects()
return data
def group_nmea_sentences_by_timestamp(obj_list):
"""Take a list of nmea sentence objects parsed by pynmea and group them
together by timestamp value. A list of lists will be returned each sub
list item will be an nmea sentence object."""
groups = []
uniquekeys = []
for k, g in groupby(obj_list, lambda x: x.timestamp):
groups.append(list(g)) # Store group iterator as a list
uniquekeys.append(k)
#for g in groups:
# print "timestamp: %s group contains: %s" % (str(g[0].timestamp), ', '.join([z.__class__.__name__ for z in g]))
return groups
def read_gps_log(filepath,path_to_db):
"""Read in a single nmea gps log into the sqlite database. Currently requiring
the GPRMC sentence and optionally reading the number of satellites from the
GPGGA sentence when it is available.
"""
conn,cur = connection_and_cursor(path_to_db)
# Make sure the table is there
create_gpslog_table(cur)
data = extract_gps_data(filepath,these_sentences=('GPRMC','GPGGA',))
grouped = group_nmea_sentences_by_timestamp(data)
rec_count = 0
for timegroup in grouped:
# GPGGA does not have a datestamp so I don't want to use lone GPGGA sentences
# If I have both sentences, I'll get the number of satellites.
if 'GPRMC' in [s.sen_type for s in timegroup]:
num_sats = None # In case there's no GPGGA sentence in this timegroup
for sentence in timegroup:
if sentence.sen_type=='GPRMC':
validity = sentence.data_validity
datetime_str = str(sentence.datestamp) + ' ' + str(sentence.timestamp)
utctime = dt.strptime(datetime_str,'%d%m%y %H%M%S.%f')
latitude = float(sentence.lat)
lat_hemi = sentence.lat_dir
longitude = float(sentence.lon)
lon_hemi = sentence.lon_dir
elif sentence.sen_type=='GPGGA':
num_sats = int(sentence.num_sats)
t = ( validity, utctime, latitude, lat_hemi, longitude, lon_hemi, num_sats )
cur.execute("INSERT INTO GPSLog VALUES (?,?,?,?,?,?,?)", t)
rec_count += 1
conn.commit()
cur.close()
return "Read %i records from %s to %s." % (rec_count,os.path.basename(filepath),os.path.basename(path_to_db))
def batch_read_gps_logs(directory):
"""Iteratively use read_gps_log on all files in a directory. Restrict to a
range of dates?"""
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Import a GPS log file into the database. The GPS log can either be an NMEA text log file with a \'.log\' extension or a GPX file with a \'.gpx\' extension.")
parser.add_argument('input_path', type=str, help='The directory of log files or the individual file that you want to import.')
parser.add_argument('output_db', nargs='?', type=str, help='The database you would like to read the log into. If left blank, the %s will be used as specified in configuration.py.' % (db_path,), default=db_path)
args = parser.parse_args()
if os.path.isdir(args.input_path): # this means a directory has been handed in
for fname in os.listdir(args.input_path):
if fname.lower().endswith('.log'):
read_gps_log(os.path.join(args.input_path,fname),args.output_db)
elif fname.lower().endswith('.gpx'):
gpx_file(os.path.join(args.input_path,fname)).read_to_db(args.output_db)
else:
if args.input_path.lower().endswith('.log'):
read_gps_log(args.input_path,args.output_db)
elif args.input_path.lower().endswith('.gpx'):
gf = gpx_file(args.input_path)
gf.read_to_db(args.output_db)
|
### lists
#lists are iterable (can return items one at a time)
#lists are mutable (can be changed)
#lists can be indexed
print("\nlists:")
emptylist = []
numlist = [1,2,3]
print(numlist)
#index
print("first item: ", end="")
print(numlist[0])
# len
print("lenght: ", end="")
print(len(numlist))
# add
emptylist.append("notempty") # .append adds item
newlist = emptylist
print(newlist)
#insert
numlist.insert(1,0.5)
print(numlist)
#remove
numlist.remove(0.5)
print(numlist)
#reassign
numlist[0] = "first"
print(numlist)
#in
print(2 in numlist)
#matrix
print("matrix:")
matrixlist = [
[1,2,3],
[4,5,6]
]
print(matrixlist[0][0]) #first
print(matrixlist[1][2]) #last
print(matrixlist[0]) #upperrow
print(matrixlist[1]) #lowerrow
#slices
print("slices:")
listslice = [0,1,2,3,4,5]
print(listslice[0:])
print(listslice[:5])
print(listslice[1:4])
# more list stuff
print("morestuff:")
morelist = [1,2,3,4,5]
print(max(morelist)) # max value
print(min(morelist)) # min value
print(morelist.count(3)) # counts numer of occurances
morelist.reverse() # reverses list
print(morelist) #prints reversed list
enumerate(morelist)
for item in enumerate(morelist):
print(item)
str = "make a list from a string"
strlist = str.split()
print(strlist)
### tuples
# tuples are immutable, items cannot be changed
# tuples can be used with dictionaries with tuples representing the keys that are immutable
# tuples can be indexed
print("\ntuples:")
emptytp = ()
numtp = (1,2,3)
anotherwaytodothis = 1,2,3
print(numtp)
#index
print("first item: ", end="")
print(numtp[0])
#len
print("lenght: ", end="")
print(len(numtp))
#in
print(2 in numtp)
#reassign
print("cannot reassign because tuples are immutable")
### dictionaries
# dictionaries are mutable (item values can be changed
# dictionaries can be indexed (per key)
print("\ndictionaries:")
emptydict = {}
numdict = {"first": 1, "second": 2, "third": 3}
print(numdict)
#index
print("first item: ", end="")
print(numdict["first"])
#len
print("lenght: ", end="")
print(len(numdict))
# add
numdict["forth"] = 4
print(numdict)
#reassign
numdict["first"] = "first"
print(numdict)
#in
print("second" in numdict)
print(2 in numdict)
#get
print(numdict.get("third", "not in dict"))
print(numdict.get("tenth", "not in dict"))
### sets
# sets cannot be indexed since they are unordered
# sets store unique items, duplicates are removed
print("\nsets:")
emptyset = set()
numset = {1,2,3}
print(numset)
# index not possible
print("can't print first item")
#len
print("lenght: ", end="")
print(len(numset))
# add
emptyset.add("notempty") # .add adds item
newset = emptyset
print(newset)
# remove
numset.remove(3) # removes specific item by name not position since sets are unordered
print(numset)
numset.pop() # removes random item
print(numset)
#in
print(2 in numset) |
# -*- coding: utf-8 -*-
import unittest
from . import UnitTestBase
from moviesnotifier import TntvillageWebpage
class TntvillageWebpageTest(UnitTestBase):
def setUp(self):
html = self._read_file('tntvillage_example1.html')
self.page = TntvillageWebpage(html)
def test_recognizeCorrectlyNumberOfMovies(self):
movies = self.page.movies()
self.assertEqual(len(movies), 20)
def test_recognizeMovieTitles(self):
movies = self.page.movies()
self.assertEqual(movies[0].title, "Le Onde del Destino (1996) [BDmux 1080p - H264 - Ita Eng Ac3 - Sub Ita Eng]")
self.assertEqual(movies[1].title, u"Mister Felicità (2017) [XviD - Ita Mp3]")
self.assertEqual(movies[11].title, "Passengers 4K HDR (2016) [BDmux 2160p - H265 - Ita Eng Ac3 5.1 - MultiSub]")
self.assertEqual(movies[19].title, "Il GGG - Il Grande Gigante Gentile (2016) [BDmux 720p - H264 - Ita Eng Ac3 - Sub Ita Eng]")
def test_recognizeSeeds(self):
movies = self.page.movies()
self.assertEqual(movies[0].seeds, 1)
self.assertEqual(movies[1].seeds, 77)
self.assertEqual(movies[12].seeds, 46)
self.assertEqual(movies[18].seeds, 107)
|
# -*- coding: utf-8 -*-
"""
******************************************************************************
* @author : Jabed-Akhtar (github)
* @Created on : Fri Apr 1 03:18:48 2022
******************************************************************************
* @file : ml_DecisionTreeClassifier_Music.py
* @brief : an example of classifying music (music recommender)
******************************************************************************
* :Steps :
* 1. Importing python libraries
* 2. Defining variables
* 3. Reading data
* 4. Building the model and fitting model to data
* 5. Testings/Predictions
* 6. Saving model
* 7. Visualizing the model trained with DecisionTreeClassified
* :Descriptions/Infos:
* - a source used within this script: https://youtu.be/7eh4d6sabA0
* - used datasets and files can be found in folder: '/datasets/music.csv'
* - trained model is saved at location: '/trainedModels/---.pickle'
* - a dot file of visualization of the model can be found at location: '/docs_images_logs__/DecisionTreeClassifier_Music-Recommender.dot'
* -> visualization can be seen with VS-Code (Graphviz (dot) ... extension needs to be installed)
* -> a screenshot of the visualization in VS-Code can be found at location: '/docs_images_logs__/DecisionTreeClassifier_Music-Recommender-dotPreview_VSCode.jpg'
*
******************************************************************************
"""
#Imports ======================================================================
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn import tree
#from sklearn.externals import joblib
import matplotlib.pyplot as plt
#Variables ====================================================================
predict = "genre"
#Reading data =================================================================
data = pd.read_csv('datasets/music.csv')
X = data.drop(columns=[predict]) # dropping genre column, as it is the output
Y = data[predict]
print(Y)
# Allocating 10% of data for testing
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1)
#Building the model and fitting model to data =================================
model = DecisionTreeClassifier()
model.fit(X_train, Y_train)
acc = model.score(X_test, Y_test)
print(acc)
#Testings/Predictions =========================================================
predictions1 = model.predict([ [21, 1], [22, 0] ])
predictions2 = model.predict(X_test)
score = accuracy_score(Y_test, predictions2)
#Saving model =================================================================
joblib.dump(model, 'decisionTreeClassifier_music.joblib')
#Loading/Using saved model with joblib
model_loaded = joblib.load('decisionTreeClassifier_music.joblib')
# testing with loaded model
predictions_withLoadedModel = model.predict([ [21, 1], [22, 0] ])
#Plotting/Visualizing =========================================================
tree.export_graphviz(model, out_file='DecisionTreeClassifier_Music-Recommender.dot',
feature_names=['age', 'gender'],
class_names=sorted(Y_train.unique()),
label='all',
rounded=True,
filled=True)
# ****************************** END OF FILE ********************************** |
from classes.console.console import Console
from classes.option.fetch import OptionFetch
from classes.option.option_interface import OptionInterface
from classes.option.replace import OptionReplace
from classes.option.search import OptionSearch
from classes.yaml_parser import YamlParser
import sys
from typing import Any
if __name__ == '__main__':
console: Console = Console()
args: Any = console.get_args()
yaml_parser: YamlParser = YamlParser(args.config)
if Console.OPTION_FETCH == console.get_option():
option: OptionInterface = OptionFetch(console, yaml_parser)
elif Console.OPTION_REPLACE == console.get_option():
option: OptionInterface = OptionReplace(console, yaml_parser)
elif Console.OPTION_SEARCH == console.get_option():
option: OptionInterface = OptionSearch(console, yaml_parser)
else:
print(Console.warning("Choose correct option, abort"))
sys.exit()
option.run()
sys.exit()
|
"""
virtualenv
图形界面
网络编程
TCP/IP简介
TCP编程
UDP编程
"""
# 在开发Python应用程序的时候,系统安装的Python3只有一个版本:3.4。所有第三方的包都会被pip安装到Python3的site-packages目录下。
# 如果我们要同时开发多个应用程序,那这些应用程序都会共用一个Python,就是安装在系统的Python 3。如果应用A需要jinja 2.7,而应用B需要jinja 2.6怎么办?
# 这种情况下,每个应用可能需要各自拥有一套“独立”的Python运行环境。virtualenv就是用来为一个应用创建一套“隔离”的Python运行环境。
# 首先,我们用pip安装virtualenv:
# $ pip3 install virtualenv
# 然后,假定我们要开发一个新的项目,需要一套独立的Python运行环境,可以这么做:
# 第一步,创建目录:
#
# Mac:~ michael$ mkdir myproject
# Mac:~ michael$ cd myproject/
# Mac:myproject michael$
# 第二步,创建一个独立的Python运行环境,命名为venv:
#
# Mac:myproject michael$ virtualenv --no-site-packages venv
# Using base prefix '/usr/local/.../Python.framework/Versions/3.4'
# New python executable in venv/bin/python3.4
# Also creating executable in venv/bin/python
# Installing setuptools, pip, wheel...done.
# 命令virtualenv就可以创建一个独立的Python运行环境,我们还加上了参数--no-site-packages,
# 这样,已经安装到系统Python环境中的所有第三方包都不会复制过来,这样,我们就得到了一个不带任何第三方包的“干净”的Python运行环境。
# 新建的Python环境被放到当前目录下的venv目录。有了venv这个Python环境,可以用source进入该环境:
#
# Mac:myproject michael$ source venv/bin/activate
# (venv)Mac:myproject michael$
# 注意到命令提示符变了,有个(venv)前缀,表示当前环境是一个名为venv的Python环境。
# 下面正常安装各种第三方包,并运行python命令:
# (venv)Mac:myproject michael$ pip install jinja2
# ...
# Successfully installed jinja2-2.7.3 markupsafe-0.23
# (venv)Mac:myproject michael$ python myapp.py
# ...
# 在venv环境下,用pip安装的包都被安装到venv这个环境下,系统Python环境不受任何影响。也就是说,venv环境是专门针对myproject这个应用创建的。
# 退出当前的venv环境,使用deactivate命令:
# (venv)Mac:myproject michael$ deactivate
# Mac:myproject michael$
# 此时就回到了正常的环境,现在pip或python均是在系统Python环境下执行。
# 完全可以针对每个应用创建独立的Python运行环境,这样就可以对每个应用的Python环境进行隔离。
# virtualenv是如何创建“独立”的Python运行环境的呢?原理很简单,就是把系统Python复制一份到virtualenv的环境,
# 用命令source venv/bin/activate进入一个virtualenv环境时,virtualenv会修改相关环境变量,让命令python和pip均指向当前的virtualenv环境。
from tkinter import *
class Application(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack()
self.createWidgets()
def createWidgets(self):
self.helloLabel = Label(self, text='Hello, world!')
self.helloLabel.pack()
self.quitButton = Button(self, text='Quit', command=self.quit)
self.quitButton.pack()
app = Application()
# 设置窗口标题:
app.master.title('Hello World')
# 主消息循环:
app.mainloop()
from tkinter import *
import tkinter.messagebox as messagebox
class Application(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack()
self.createWidgets()
def createWidgets(self):
self.nameInput = Entry(self)
self.nameInput.pack()
self.alertButton = Button(self, text='Hello', command=self.hello)
self.alertButton.pack()
def hello(self):
name = self.nameInput.get() or 'world'
messagebox.showinfo('Message', 'Hello, %s' % name)
app = Application()
# 设置窗口标题:
app.master.title('Hello World')
# 主消息循环:
app.mainloop()
# 计算机为了联网,就必须规定通信协议,早期的计算机网络,都是由各厂商自己规定一套协议,
# IBM、Apple和Microsoft都有各自的网络协议,互不兼容,这就好比一群人有的说英语,有的说中文,有的说德语,说同一种语言的人可以交流,不同的语言之间就不行了。
# 为了把全世界的所有不同类型的计算机都连接起来,就必须规定一套全球通用的协议,为了实现互联网这个目标,
# 互联网协议簇(Internet Protocol Suite)就是通用协议标准。Internet是由inter和net两个单词组合起来的,原意就是连接“网络”的网络,有了Internet,任何私有网络,只要支持这个协议,就可以联入互联网。
# 因为互联网协议包含了上百种协议标准,但是最重要的两个协议是TCP和IP协议,所以,大家把互联网的协议简称TCP/IP协议。
# 通信的时候,双方必须知道对方的标识,好比发邮件必须知道对方的邮件地址。互联网上每个计算机的唯一标识就是IP地址,类似123.123.123.123。如果一台计算机同时接入到两个或更多的网络,
# 比如路由器,它就会有两个或多个IP地址,所以,IP地址对应的实际上是计算机的网络接口,通常是网卡。
# IP协议负责把数据从一台计算机通过网络发送到另一台计算机。数据被分割成一小块一小块,然后通过IP包发送出去。由于互联网链路复杂,两台计算机之间经常有多条线路,
# 因此,路由器就负责决定如何把一个IP包转发出去。IP包的特点是按块发送,途径多个路由,但不保证能到达,也不保证顺序到达。
# internet-computers
# IP地址实际上是一个32位整数(称为IPv4),以字符串表示的IP地址如192.168.0.1实际上是把32位整数按8位分组后的数字表示,目的是便于阅读。
# IPv6地址实际上是一个128位整数,它是目前使用的IPv4的升级版,以字符串表示类似于2001:0db8:85a3:0042:1000:8a2e:0370:7334。
# TCP协议则是建立在IP协议之上的。TCP协议负责在两台计算机之间建立可靠连接,保证数据包按顺序到达。TCP协议会通过握手建立连接,然后,对每个IP包编号,确保对方按顺序收到,如果包丢掉了,就自动重发。
# 许多常用的更高级的协议都是建立在TCP协议基础上的,比如用于浏览器的HTTP协议、发送邮件的SMTP协议等。
# 一个IP包除了包含要传输的数据外,还包含源IP地址和目标IP地址,源端口和目标端口。
# 端口有什么作用?在两台计算机通信时,只发IP地址是不够的,因为同一台计算机上跑着多个网络程序。一个IP包来了之后,到底是交给浏览器还是QQ,
# 就需要端口号来区分。每个网络程序都向操作系统申请唯一的端口号,这样,两个进程在两台计算机之间建立网络连接就需要各自的IP地址和各自的端口号。
# 一个进程也可能同时与多个计算机建立链接,因此它会申请很多端口。
# 了解了TCP/IP协议的基本概念,IP地址和端口的概念,我们就可以开始进行网络编程了。
# Socket是网络编程的一个抽象概念。通常我们用一个Socket表示“打开了一个网络链接”,而打开一个Socket需要知道目标计算机的IP地址和端口号,再指定协议类型即可。
#
# 客户端
# 大多数连接都是可靠的TCP连接。创建TCP连接时,主动发起连接的叫客户端,被动响应连接的叫服务器。
# 举个例子,当我们在浏览器中访问新浪时,我们自己的计算机就是客户端,浏览器会主动向新浪的服务器发起连接。
# 如果一切顺利,新浪的服务器接受了我们的连接,一个TCP连接就建立起来的,后面的通信就是发送网页内容了。
# 所以,我们要创建一个基于TCP连接的Socket,可以这样做:
#
# # 导入socket库:
# import socket
#
# # 创建一个socket:
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# # 建立连接:
# s.connect(('www.sina.com.cn', 80))
# 创建Socket时,AF_INET指定使用IPv4协议,如果要用更先进的IPv6,就指定为AF_INET6。SOCK_STREAM指定使用面向流的TCP协议,这样,一个Socket对象就创建成功,但是还没有建立连接。
# 客户端要主动发起TCP连接,必须知道服务器的IP地址和端口号。新浪网站的IP地址可以用域名www.sina.com.cn自动转换到IP地址,但是怎么知道新浪服务器的端口号呢?
# 答案是作为服务器,提供什么样的服务,端口号就必须固定下来。由于我们想要访问网页,因此新浪提供网页服务的服务器必须把端口号固定在80端口,
# 因为80端口是Web服务的标准端口。其他服务都有对应的标准端口号,例如SMTP服务是25端口,FTP服务是21端口,等等。端口号小于1024的是Internet标准服务的端口,端口号大于1024的,可以任意使用。
# 因此,我们连接新浪服务器的代码如下:
#
# s.connect(('www.sina.com.cn', 80))
# 注意参数是一个tuple,包含地址和端口号。
# 建立TCP连接后,我们就可以向新浪服务器发送请求,要求返回首页的内容:
# # 发送数据:
# s.send(b'GET / HTTP/1.1\r\nHost: www.sina.com.cn\r\nConnection: close\r\n\r\n')
# TCP连接创建的是双向通道,双方都可以同时给对方发数据。但是谁先发谁后发,怎么协调,要根据具体的协议来决定。例如,HTTP协议规定客户端必须先发请求给服务器,服务器收到后才发数据给客户端。
# 发送的文本格式必须符合HTTP标准,如果格式没问题,接下来就可以接收新浪服务器返回的数据了:
#
# # 接收数据:
# buffer = []
# while True:
# # 每次最多接收1k字节:
# d = s.recv(1024)
# if d:
# buffer.append(d)
# else:
# break
# data = b''.join(buffer)
# 接收数据时,调用recv(max)方法,一次最多接收指定的字节数,因此,在一个while循环中反复接收,直到recv()返回空数据,表示接收完毕,退出循环。
# 当我们接收完数据后,调用close()方法关闭Socket,这样,一次完整的网络通信就结束了:
#
# # 关闭连接:
# s.close()
# 接收到的数据包括HTTP头和网页本身,我们只需要把HTTP头和网页分离一下,把HTTP头打印出来,网页内容保存到文件:
#
# header, html = data.split(b'\r\n\r\n', 1)
# print(header.decode('utf-8'))
# # 把接收的数据写入文件:
# with open('sina.html', 'wb') as f:
# f.write(html)
# 现在,只需要在浏览器中打开这个sina.html文件,就可以看到新浪的首页了。
#
# 服务器
# 和客户端编程相比,服务器编程就要复杂一些。
# 服务器进程首先要绑定一个端口并监听来自其他客户端的连接。如果某个客户端连接过来了,服务器就与该客户端建立Socket连接,随后的通信就靠这个Socket连接了。
# 所以,服务器会打开固定端口(比如80)监听,每来一个客户端连接,就创建该Socket连接。由于服务器会有大量来自客户端的连接,
# 所以,服务器要能够区分一个Socket连接是和哪个客户端绑定的。一个Socket依赖4项:服务器地址、服务器端口、客户端地址、客户端端口来唯一确定一个Socket。
# 但是服务器还需要同时响应多个客户端的请求,所以,每个连接都需要一个新的进程或者新的线程来处理,否则,服务器一次就只能服务一个客户端了。
# 我们来编写一个简单的服务器程序,它接收客户端连接,把客户端发过来的字符串加上Hello再发回去。
# 首先,创建一个基于IPv4和TCP协议的Socket:
#
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 然后,我们要绑定监听的地址和端口。服务器可能有多块网卡,可以绑定到某一块网卡的IP地址上,也可以用0.0.0.0绑定到所有的网络地址,
# 还可以用127.0.0.1绑定到本机地址。127.0.0.1是一个特殊的IP地址,表示本机地址,如果绑定到这个地址,客户端必须同时在本机运行才能连接,也就是说,外部的计算机无法连接进来。
# 端口号需要预先指定。因为我们写的这个服务不是标准服务,所以用9999这个端口号。请注意,小于1024的端口号必须要有管理员权限才能绑定:
#
# # 监听端口:
# s.bind(('127.0.0.1', 9999))
# 紧接着,调用listen()方法开始监听端口,传入的参数指定等待连接的最大数量:
#
# s.listen(5)
# print('Waiting for connection...')
# 接下来,服务器程序通过一个永久循环来接受来自客户端的连接,accept()会等待并返回一个客户端的连接:
#
# while True:
# # 接受一个新连接:
# sock, addr = s.accept()
# # 创建新线程来处理TCP连接:
# t = threading.Thread(target=tcplink, args=(sock, addr))
# t.start()
# 每个连接都必须创建新线程(或进程)来处理,否则,单线程在处理连接的过程中,无法接受其他客户端的连接:
#
# def tcplink(sock, addr):
# print('Accept new connection from %s:%s...' % addr)
# sock.send(b'Welcome!')
# while True:
# data = sock.recv(1024)
# time.sleep(1)
# if not data or data.decode('utf-8') == 'exit':
# break
# sock.send(('Hello, %s!' % data.decode('utf-8')).encode('utf-8'))
# sock.close()
# print('Connection from %s:%s closed.' % addr)
# 连接建立后,服务器首先发一条欢迎消息,然后等待客户端数据,并加上Hello再发送给客户端。如果客户端发送了exit字符串,就直接关闭连接。
# 要测试这个服务器程序,我们还需要编写一个客户端程序:
#
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# # 建立连接:
# s.connect(('127.0.0.1', 9999))
# # 接收欢迎消息:
# print(s.recv(1024).decode('utf-8'))
# for data in [b'Michael', b'Tracy', b'Sarah']:
# # 发送数据:
# s.send(data)
# print(s.recv(1024).decode('utf-8'))
# s.send(b'exit')
# s.close()
# TCP是建立可靠连接,并且通信双方都可以以流的形式发送数据。相对TCP,UDP则是面向无连接的协议。
#
# 使用UDP协议时,不需要建立连接,只需要知道对方的IP地址和端口号,就可以直接发数据包。但是,能不能到达就不知道了。
#
# 虽然用UDP传输数据不可靠,但它的优点是和TCP比,速度快,对于不要求可靠到达的数据,就可以使用UDP协议。 |
from progressbar import ProgressBar
from tqdm import tqdm
import numpy as np
class ClassicDE:
def __init__(self, bounds, mutation, cross_probability, normalized_population, denorm_population, population_size, iterations, clip, exp_cross, cost_function):
self.bounds = bounds
self.mutation = mutation
self.cross_probability = cross_probability,
self.normalized_population = normalized_population
self.denorm_population = denorm_population
self.iterations = iterations
self.clip = clip
self.population_size = len(normalized_population)
self.cost_function = cost_function
self.dimensions = len(self.bounds)
self.best_vector = np.empty([1])
self.best_index = -1
self.population_size = population_size
self.do_exp = exp_cross
self.best_vector = self.find_best_vector(self.denorm_population)
def de(self):
normalized_population = self.normalized_population.copy()
for i in tqdm(range(self.iterations), leave=False, desc=f'DE exp - {self.do_exp}'):
for j in range(self.population_size):
indexes_except_best = [ind for ind in range (self.population_size) if ind != j]
vector_a, vector_b, vector_c = normalized_population[np.random.choice(indexes_except_best, 3, replace=False)]
mutant = np.clip(vector_a + self.mutation * (vector_b - vector_c), 0, 1)
if self.do_exp:
cross_points = self.exp_crosspoints(self.dimensions)
else:
cross_points = np.random.rand(self.dimensions) < self.cross_probability
if not np.any(cross_points):
cross_points[np.random.randint(0, self.dimensions)] = True
candidating_vect = np.where(cross_points, mutant, normalized_population[j])
candidating_denorm = self.denorm(candidating_vect)
self.update(self.cost_function(candidating_denorm), candidating_denorm, candidating_vect, j)
yield self.best_vector, self.values_array[self.best_index]
def exp_crosspoints(self, dimensions):
start_point = np.random.randint(dimensions)
length = 1 + np.random.randint(dimensions - 1)
cp = [False] * dimensions
for i in range(length):
cp[(start_point + i) % dimensions] = True
return cp
def denorm(self, normalized_population):
# to implement: check if bounds are given in proper order
min_bound, max_bound = np.asarray(self.bounds).T
bounds_difference = np.fabs(min_bound - max_bound)
return max_bound - bounds_difference * normalized_population
def find_best_vector(self, denorm_population):
self.values_array = np.asarray([self.cost_function(ind) for ind in denorm_population])
self.best_index = np.argmin(self.values_array)
return denorm_population[self.best_index]
def update(self, candidate, denorm_candidate, norm_candidate, j):
if candidate < self.values_array[j]:
self.values_array[j] = candidate
self.normalized_population[j] = norm_candidate
if candidate < self.values_array[self.best_index]:
self.best_index = j
self.best_vector = denorm_candidate
|
__author__ = 'Sanjarbek Hudaiberdiev'
import sys
sys.path.append('/users/hudaiber/Projects/SystemFiles/')
sys.path.append('/users/hudaiber/Projects/lib/BioPy')
from BioClasses import Gene
import globalVariables as gv
import os
from CogClasses import *
import cPickle as pickle
#Global variables
arGOGDataPath = os.path.join(gv.LOCAL_DATA_FOLDER, 'Archea', 'arCOG')
genomesDataPath = os.path.join(gv.LOCAL_DATA_FOLDER, 'Archea', 'genomes')
projectDataPath = os.path.join('../', 'data', 'Archea')
ptyFilePath = os.path.join(gv.LOCAL_DATA_FOLDER, 'Pty', 'Prok1402_3added.pty')
ptyGenomesPath = os.path.join(gv.LOCAL_DATA_FOLDER, 'Pty', 'genomes')
FLANK_LENGTH = 50
def get_ptt_map(file_path):
ptt_map = {}
gnm_name = os.path.dirname(file_path).split('/')[-1]
chromosome = os.path.splitext(os.path.basename(file_path))[0]
lines = open(file_path).readlines()[3:]
for l in lines:
terms = l.strip().split()
gid = terms[3]
coordinates = terms[0]
strand = terms[1]
pfrom, pto = coordinates.split('..')
cogid = terms[7]
curGene = Gene(source=chromosome, gid=gid, pFrom=pfrom, pTo=pto, organism=gnm_name, strand=strand, cogid=cogid)
ptt_map[gid] = curGene
return ptt_map
def get_pty_map(folder_path):
pty_map = {}
for f in os.listdir(folder_path):
if f.endswith('.pty'):
for l in open(os.path.join(folder_path, f)):
gid, coordinates, strand, genome, chromosome = l.strip().split()
pfrom, pto = coordinates.split('..')
curGene = Gene(source=chromosome, gid=gid, pFrom=pfrom, pTo=pto, organism=genome, strand=strand)
pty_map[gid] = curGene
return pty_map
def prepare_line(l):
out_line_fmt = "%s\t%s\t%s\t%s\t%d\t%d\t%s\n"
active_genomes_path = ptyGenomesPath
terms = l.split(',')
cur_gid = terms[0]
cur_genome = terms[1]
cur_arcog = terms[6]
cur_genome_path = os.path.join(active_genomes_path, cur_genome)
pty_map = get_pty_map(cur_genome_path)
gene = pty_map[cur_gid]
out_line = out_line_fmt % (cur_genome, gene.src, cur_gid, gene.strand, gene.pFrom, gene.pTo, cur_arcog)
return out_line
def map_all_arcog_hits():
"""arcog_hits => ar14.arCOG.csv.
Add coordinate information to the arcog_hits"""
arcog_hit_file_path = os.path.join(arGOGDataPath, 'ar14.arCOG.csv')
new_arcog_hit_file_path = os.path.join(projectDataPath, 'map_arcogs_neighborhoods', 'mapped_ar14.arCOG.csv')
cnt=0
with open(arcog_hit_file_path) as infile:
with open(new_arcog_hit_file_path,'w') as outfile:
for l in infile:
if 'arCOG' not in l:
continue
try:
new_line = prepare_line(l)
outfile.write(new_line)
except:
print 'Couldn\'t find:', l.strip()
cnt+=1
if cnt % 1000 == 0 and cnt>0:
print cnt
if __name__=='__main__':
pass
#
#
# integrase_arcogs = [l.strip() for l in open(os.path.join(projectDataPath, 'selected_arcogs', 'arcogs_integrase.txt' )).readlines()]
# recombinase_arcogs = [l.strip() for l in open(os.path.join(projectDataPath, 'selected_arcogs', 'arcogs_recombinase.txt')).readlines()]
# transposase_arcogs = [l.strip() for l in open(os.path.join(projectDataPath, 'selected_arcogs', 'arcogs_transposase.txt')).readlines()]
#
# integrases, transposases, recombinases = 0, 0, 0
#
# for arcog_hit in selected_arcog_hits:
# if arcog_hit.arcogid in integrase_arcogs:
# arcog_hit.set_product_enzyme('integrase')
# integrases += 1
#
# if arcog_hit.arcogid in transposase_arcogs:
# arcog_hit.set_product_enzyme('transposase')
# transposases += 1
#
# if arcog_hit.arcogid in recombinase_arcogs:
# arcog_hit.set_product_enzyme('recombinase')
# recombinases += 1
#
#
# pickle.dump(selected_neighborhoods, open('selected_nbrhoods.p', 'w'))
# pickle.dump(selected_arcog_hits, open('selected_arcog_hits.p', 'w'))
# selected_arcog_hits = pickle.load(open('selected_arcog_hits.p'))
# selected_neighborhoods = pickle.load(open('selected_nbrhoods.p'))
# orgname = selected_arcog_hits[0].organism
# org_hits = [g for g in selected_arcog_hits if g.organism==orgname]
# org_hits.sort()
# for g in org_hits[:2]:
# print g
# sys.exit()
# for n in selected_neighborhoods:
# print n.classname, n.cogs
# print n.organisms
# process_neighborhoods_of_arcog_hits(selected_neighborhoods, selected_arcog_hits)
|
# import re
# n = int(input())
# for i in range(n):
# print(re.sub(r'(?<= )(&&|\|\|)(?= )', lambda x: 'and' if x.group() == '&&' else 'or', input()))
import re
for _ in range(int(input())):
s = input()
s = re.sub(r" &&(?= )", " and", s)
s = re.sub(r" ||(?= )", ' or', s)
print(s)
|
import matplotlib.pyplot as plt
def main():
left_edges = [0 ,10, 20, 30, 40]
heights = [100, 200, 300, 400, 500]
plt.bar(left_edges, heights)
plt.show()
main() |
#!/usr/bin/python
from __future__ import division
import numpy as np
import pandas as pd
import sys
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
import csv
#train_file = sys.argv[1]
train_frame0 = pd.read_csv("train200_200_new1.csv")
train_frame1 = pd.read_csv("train250_250_new1.csv")
#train_frame2 = pd.read_csv("train200_250_new1.csv")
train_frame3 = pd.read_csv("test261_300.csv")
train_frame4 = pd.read_csv("train100_150_new1.csv")
train_frame5 = pd.read_csv("train130_100_new1.csv")
test_file = sys.argv[1]
test_frame = pd.read_csv(test_file)
#test_file1 = "RF_data"
#test_frame1 = pd.read_csv(test_file)
cols = ['correlation','conservation','polaritychange','chargechange','hydroindexchange','secondarystruc','asa','sizechange']
cols1 = ['correlation','conservation','polaritychange','hydroindexchange','secondarystruc','asa','sizechange']
cols2 = ['correlation','conservation','polaritychange','chargechange','hydroindexchange','secondarystruc','sizechange']
cols3 = ['correlation','conservation','polaritychange','chargechange','secondarystruc','sizechange']
colsRes = ['class']
#dataset 261,300 vsetky parametre
#dataset 150 150 vsetky parametre
trainArr0 = train_frame3.as_matrix(cols)
trainRes0 = train_frame3.as_matrix(colsRes)
trainRes0 = trainRes0.ravel()
#dataset 100 150 vsetky parametre
trainArr1 = train_frame4.as_matrix(cols)
trainRes1 = train_frame4.as_matrix(colsRes)
trainRes1 = trainRes1.ravel()
#dataset 130 100 vsetky parametre
trainArr2 = train_frame5.as_matrix(cols)
trainRes2 = train_frame5.as_matrix(colsRes)
trainRes2 = trainRes2.ravel()
#dataset 150 200 bez hydroindexu
trainArr3 = train_frame5.as_matrix(cols1)
trainRes3 = train_frame5.as_matrix(colsRes)
trainRes3 = trainRes3.ravel()
#datset 261 300 bez asa
trainArr4 = train_frame3.as_matrix(cols2)
trainRes4 = train_frame3.as_matrix(colsRes)
trainRes4 = trainRes4.ravel()
#200 200 bez asa/hydro
trainArr5 = train_frame0.as_matrix(cols3)
trainRes5 = train_frame0.as_matrix(colsRes)
trainRes5 = trainRes5.ravel()
#daatset 250 250 bez asa/hydro
trainArr6 = train_frame1.as_matrix(cols3)
trainRes6 = train_frame1.as_matrix(colsRes)
trainRes6 = trainRes6.ravel()
#daatset 250 250
trainArr8 = train_frame1.as_matrix(cols)
trainRes8 = train_frame1.as_matrix(colsRes)
trainRes8 = trainRes8.ravel()
#261 300 bez hydro/asa
trainArr7 = train_frame3.as_matrix(cols3)
trainRes7 = train_frame3.as_matrix(colsRes)
trainRes7 = trainRes7.ravel()
testArr = test_frame.as_matrix(cols)
testRes = test_frame.as_matrix(colsRes)
testRes = testRes.ravel()
testArr1 = test_frame.as_matrix(cols1)
testRes1 = test_frame.as_matrix(colsRes)
testRes1 = testRes1.ravel()
testArr2 = test_frame.as_matrix(cols2)
testRes2 = test_frame.as_matrix(colsRes)
testRes2 = testRes2.ravel()
testArr3 = test_frame.as_matrix(cols3)
testRes3 = test_frame.as_matrix(colsRes)
testRes3 = testRes3.ravel()
print("train ")
#print(trainArr)
print ()
print ("train features:")
#print (trainRes)
#correct = 0 {1: .43, -1: .57 }
classifier = svm.SVC(kernel = 'linear',class_weight={1: .43, -1: .57 })
classifier.fit(trainArr1, trainRes1)
results0 = classifier.predict(testArr)
#{1: .47, -1: .53 }
classifier = svm.SVC(kernel = 'linear',class_weight={1: .47, -1: .53 })
classifier.fit(trainArr0, trainRes0)
results1 = classifier.predict(testArr)
"""classifier = svm.SVC(kernel = 'linear',class_weight={1: .43, -1: .57 })
classifier.fit(trainArr2, trainRes2)
results2 = classifier.predict(testArr)"""
#res_frame['predicted1'] = results
#res_frame.to_csv("majority_voting.csv")
#print(results)
rf = RandomForestClassifier(max_features=0.4,n_estimators=1000,n_jobs=1,min_samples_leaf=50)
rf.fit(trainArr2,trainRes2)
result0 = rf.predict(testArr)
rf = RandomForestClassifier(max_features=0.4,n_estimators=1000,n_jobs=1,min_samples_leaf=50)
rf.fit(trainArr4,trainRes4)
result2 = rf.predict(testArr2)
rf = RandomForestClassifier(max_features=0.4,n_estimators=1000,n_jobs=1,min_samples_leaf=50)
rf.fit(trainArr5,trainRes5)
result3 = rf.predict(testArr3)
rf = RandomForestClassifier(max_features=0.4,n_estimators=1000,n_jobs=1,min_samples_leaf=50)
rf.fit(trainArr6,trainRes6)
result4 = rf.predict(testArr3)
rf = RandomForestClassifier(max_features=0.4,n_estimators=1000,n_jobs=1,min_samples_leaf=50)
rf.fit(trainArr7,trainRes7)
result5 = rf.predict(testArr3)
#rf = RandomForestClassifier(max_features=0.3,n_estimators=400,n_jobs=1,min_samples_leaf=50)
#rf.fit(trainArr8,trainRes8)
#result7 = rf.predict(testArr)
#test_frame['predicted4'] = result
#test_frame.to_csv(sys.argv[2])
with open('majority_voting4_new.csv', 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(results0,results1,result0,result2,result3,result4,result5))
#classifier = svm.SVC(kernel = 'linear',class_weight={1: .5, -1: .5 })
#classifier.fit(trainArr0, trainRes0)
#results = classifier.predict(testArr)
#rf = RandomForestClassifier(max_features=0.3,n_estimators=400,n_jobs=1,min_samples_leaf=50)
#rf.fit(trainArr0,trainRes0)
#results = rf.predict(testArr)
#test_frame['predicted1'] = results
#test_frame.to_csv(sys.argv[1])
#print(test_frame)
|
import pyspark.sql.functions as F
from pyspark.sql import DataFrame
def sample_transform(input_df: DataFrame) -> DataFrame:
inter_df = input_df.where(input_df['race'] == \
F.lit('hobbit')).groupBy('name').agg(F.sum('coins').alias('total_coins'))
output_df = inter_df.select('name', 'total_coins', \
F.when(F.col('total_coins') > 10, 'yes').otherwise('no').alias('indicator')).where(
F.col('indicator') == F.lit('yes'))
return output_df |
import numpy as np
import math
import time
from epics import caget, caput, PV
from bokeh.driving import count
from bokeh.io import curdoc
from bokeh.models import ColumnDataSource, Slider
from bokeh.plotting import figure
from bokeh.layouts import column, row
from bokeh.models.glyphs import MultiLine
from bokeh.models.widgets import Select
import json
class pv_buffer():
def __init__(self, pv, buffer_size):
self.pvname=pv
self.pv = PV(pv,auto_monitor=True)
self.data = np.array([self.pv.get()])
self.tstart = time.time()
self.time = np.array([self.tstart])
self.buffer_size=buffer_size
def poll(self):
t=time.time()
v=caget(self.pvname)#self.pv.get()
#print(t,v)
if(len(self.data)<self.buffer_size):
self.time = np.append(self.time,t)
self.data = np.append(self.data,v)
else:
self.time[:-1]=self.time[1:]
self.time[-1]=t
self.data[:-1]=self.data[1:]
self.data[-1]=v
return self.time-self.tstart, self.data
with open('pvdef.json', 'r') as fp:
pvdefs = json.load(fp)
prefix = pvdefs['prefix']
pvs = pvdefs['output']
pvbuffers = {}
for opv in pvs:
pvdef = pvs[opv]
if(pvdef['type']=='float' and isinstance(pvdef['value'],float)):
pvbuffers[opv] = pv_buffer(prefix+opv,100)
plot_pvs = list(pvbuffers.keys())
current_pv = plot_pvs[0]
def pv_select_callback(attr, old, new):
global current_pv
current_pv = new
select = Select(title="PV to Plot:", value=current_pv, options=plot_pvs)
select.on_change("value", pv_select_callback)
ts,ys = pvbuffers[current_pv].poll()
source = ColumnDataSource(dict(x=ts, y=ys))
p = figure(plot_width=400, plot_height=400)
p.line(x='x', y='y', line_width=2, source=source)
p.yaxis.axis_label = current_pv + ' ('+pvs[current_pv]['units']+')'
p.xaxis.axis_label = 'time (sec)'
def update():
global current_pv
ts,ys = pvbuffers[current_pv].poll()
source.data = dict(x=ts, y=ys)
p.yaxis.axis_label = current_pv + ' ('+pvs[current_pv]['units']+')'
scol = column(row(select),row(p),width=350)
curdoc().add_root( scol )
curdoc().add_periodic_callback(update, 250)
curdoc().title = "Online Surrogate Model Strip Tool"
|
def Age(x):
class AGE(int): pass
v = float(x)
if v > 0 and v < 160:
return AGE(v)
exit()
def Height(x):
class HEIGHT(int): pass
v = float(x)
if v > 0 and v < 300:
return HEIGHT(v)
exit()
def Weight(x):
class WEIGHT(int): pass
v = float(x)
if v > 0 and v < 600:
return WEIGHT(v)
exit()
def Fat(x):
class FAT(float): pass
v = float(x)
if v > 0.0 and v < 1.0:
return FAT(v)
exit()
class MALE: pass
class FEMALE: pass
GENDERS = {"MALE", "FEMALE"}
GENDER = {
"male" : MALE(),
"female" : FEMALE()
}
def Gender(x):
c = x.__class__.__name__
if c == "str":
return GENDER[x.strip().lower()]
return GENDER[c.lower()]
class BABY: pass
class TODDLER: pass
class CHILD: pass
class EARLYTEEN: pass
class MIDTEEN: pass
class LATETEEN: pass
class EARLYTWENTIES: pass
class MIDTWENTIES: pass
class LATETWENTIES: pass
class THIRTIES: pass
class FOURTIES: pass
class FIFTIES: pass
class ELDERLY: pass
AGE_GROUP = {"BABY","TODDLER","CHILD","EARLYTEEN","MIDTEEN","LATETEEN", "EARLYTWENTIES", "MIDTWENTIES", "LATETWENTIES", "THRITIES", "FOURTIES", "FIFTIES", "ELDERLY"}
ALLOWED_AGE_GROUP = {"EARLYTWENTIES", "MIDTWENTIES", "LATETWENTIES", "THRITIES", "FOURTIES", "FIFTIES", "ELDERLY"}
def AgeGroup(x):
assert(isinstance(x, AGE)):
if x <= 2:
return Baby()
if x <= 5:
return Toddler()
if x <= 9:
return Child()
if x <= 14:
return EarlyTeen()
if x <= 17:
return MidTeen()
if x <= 19:
return LateTeen()
if x <= 24:
return EarlyTwenties()
if x <= 27:
return MidTwenties()
if x <= 29:
return LateTwenties()
if x <= 39:
return Thirties()
if x <= 49:
return Fourties()
if x <= 59:
return Fifties()
else:
return Elderly()
class UNDERWEIGHT: pass
class NORMALWEIGHT : pass
class OVERWEIGHT: pass
class OBESE: pass
def new_bmi_trefethen(weight, height):
1.3*weight/height**2.5
BMI_NAMES = {"UNDERWEIGHT", "NORMALWEIGHT", "OVERWEIGHT", "OBESE"}
def Bmi(weight, height):
assert(isinstance(w, WEIGHT))
assert(isinstance(h, HEIGHT))
bmi = w/h**2
if bmi <= 18.5:
return UNDERWEIGHT()
if bmi <= 24.9:
return NORMALWEIGHT()
if bmi <= 29.9:
return OVERWEIGHT()
else:
return OBESE()
class DANGEROUSLYLOW: pass
class EXCELLENT: pass
class GOOD: pass
class FAIR: pass
class HIGH: pass
class DANGEROUSLYHIGH: pass
def FatGroup(fat, age_group, gender):
assert(isinstance(fat, FAT))
assert(gender.__class__.__name__ in
assert(age_group.__class__.__name__ in BMI_NAMES)
if isinstance
import sys
a, g, w, h, f = sys.argv.split()
a = Age(a)
g = Gender(g)
w = Weight(w)
h = Height(h)
f = Fat(f)
ag = AgeGroup(a)
bmi = Bmi(weight = w, height = h)
fg = FatGroup(fat = f, age_group = ag)
print(a)
print(g)
print(w)
print(h)
print(f)
|
import uuid
from django.core.cache import cache
from django.shortcuts import render
from rest_framework.exceptions import APIException
from rest_framework.generics import CreateAPIView
from rest_framework.response import Response
from Admin.models import AdminUser
from Admin.serializers import AdminUserSerializer
from DjangoRESTTpp.settings import ADMIN_USER_TIMOUT
class AdminUsersAPIView(CreateAPIView):
serializer_class = AdminUserSerializer
queryset = AdminUser.objects.filter(is_delete=False)
def post(self, request, *args, **kwargs):
action = request.query_params.get('action')
#注册
if action =='register':
return self.create(request,*args,**kwargs)
#登录
elif action =='login':
a_username =request.data.get('a_username')
a_password = request.data.get('a_password')
user =AdminUser.objects.filter(a_username =a_username)
#判断用户是否存在
if not user.exists():
raise APIException(detail='用户不存在')
#用户是否唯一
user = user.first()
#判断密码
if not user.check_admin_password(a_password):
raise APIException(detail='用户已离职')
#uuid加密
token = uuid.uuid4().hex#hex16 禁止
cache.set(token,user.id,timeout =ADMIN_USER_TIMOUT)
#返回data
data ={
'msg':'ok',
'status':200,
'token':token,
}
return Response(data)
else:
raise APIException(detail='请提供正确的动作') |
#manipulacao de dados
num_inteira = 5
num_decimal = 7.3
val_string = "qualquer texto"
#formas de concatenacao para printagem danada de decimais
print ("Concatenando decimal: ", num_decimal)
print ("Concatenando decimal: %.42f" %num_decimal)
print ("Concatenando decimal: " + str(num_decimal))
#formas de concatenacao para printagem danada de decimais
print ("Concatenando decimal: ", val_string)
print ("Concatenando decimal: %s" %val_string)
print ("Concatenando decimal: " + val_string) |
import unittest
from katas.kyu_7.reversed_strings import solution
class ReversedStringsTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(solution('world'), 'dlrow')
def test_equals_2(self):
self.assertEqual(solution('hello'), 'olleh')
def test_equals_3(self):
self.assertEqual(solution(''), '')
def test_equals_4(self):
self.assertEqual(solution('h'), 'h')
|
import sys
import os
import time
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QApplication, QPushButton, QHBoxLayout, QMainWindow, QWidget
from PyQt5.QtCore import QCoreApplication, Qt
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)
if hasattr(Qt, 'AA_UseHighDpiPixmaps'):
QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps, True)
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.setGeometry(500, 300, 400, 200)
self.setWindowTitle("Course Selection")
horizontalLayout = QHBoxLayout()
self.button1 = QPushButton("ProCon",self)
self.button1.setFixedSize(100,100)
self.button1.clicked.connect(self.proconPress)
self.button2 = QPushButton("Drones",self)
self.button2.setFixedSize(100,100)
self.button2.clicked.connect(self.dronesPress)
horizontalLayout.addWidget(self.button1)
horizontalLayout.addWidget(self.button2)
widget = QWidget()
widget.setLayout(horizontalLayout)
self.setCentralWidget(widget)
def proconPress(self):
from procon import Window
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)
if hasattr(Qt, 'AA_UseHighDpiPixmaps'):
QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps, True)
self.final = Window()
self.final.show()
self.showMinimized()
self.button1.clicked.disconnect(self.proconPress)
self.button2.clicked.connect(self.dronesPress)
def dronesPress(self):
from drones import Window
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)
if hasattr(Qt, 'AA_UseHighDpiPixmaps'):
QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps, True)
self.final = Window()
self.final.show()
self.showMinimized()
self.button2.clicked.disconnect(self.dronesPress)
self.button1.clicked.connect(self.proconPress)
def main():
app = QApplication(sys.argv)
app.setStyle("Fusion")
main = MainWindow()
main.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
sya = int(input("借金>"))
riritu = int(input("年利率(%)>"))
hennsai = int(input("返済額>"))
count = 0
while True:
count += 1
if sya <= hennsai:
sya = int(((sya*riritu/100)/12 + sya))
sumhen = hennsai*(count-1)+sya
print(count,"月: 返済額",sya,"円 これで完済。 返済総額:",sumhen,"円")
break
sya = int(((sya*riritu/100)/12 + sya) - hennsai)
print(count,"月:返済額",hennsai,"円 残り",sya,"円")
|
print("Special prime numbers")
from math import sqrt
#Given two numbers n and k, find whether there exist at least k Special prime numbers or not from 2 to n inclusively.
#A prime number is said to be Special prime number if it can be expressed as the sum of three integer numbers: two neighboring prime numbers and 1. For example, 19 = 7 + 11 + 1, or 13 = 5 + 7 + 1.
#Note:- Two prime numbers are called neighboring if there are no other prime numbers between them.
L = 2
n = int(input("n = "))
arr = []
for num in range(L,n,+1):
if num>1:
for i in range(2,num):
if (num % i == 0):
break
else:
arr.append(num)
print(arr)
import math
print("Marsenne Prime")
def prime_check(n):
if n>1:
for i in range(2,int(math.sqrt(n)+1)):
if (n%i) == 0:
return False
break
else:
return True
#n = int(input("n = "))
#print(prime_check(n))
n = int(input("n = "))
a = int
arr = []
for i in range(2,int(math.sqrt(n))+1,+1):
a = (2**i) -1
if a>n:
break
arr.append(a)
#print(arr)
for j in arr:
if prime_check(j) is True:
print(j)
|
from django.db import models
from wagtail.wagtailcore.models import Page
from wagtail.wagtailadmin.edit_handlers import FieldPanel
class BlogIndexPage(Page):
intro = models.TextField()
content_panels = Page.content_panels + [
FieldPanel('intro', classname="full")
]
subpage_types = ['BlogPage']
class BlogPage(Page):
content = models.TextField()
author = models.CharField(max_length=255, blank=True)
content_panels = Page.content_panels + [
FieldPanel('content'),
FieldPanel('author', classname="full"),
]
parent_page_types = ['BlogIndexPage']
subpage_types = []
|
# Django bootstrap, sigh.
from django.conf import settings; settings.configure()
import mock
import djpjax
from django.template.response import TemplateResponse
from django.test.client import RequestFactory
# A couple of request objects - one PJAX, one not.
rf = RequestFactory()
regular_request = rf.get('/')
pjax_request = rf.get('/', HTTP_X_PJAX=True)
# Tests.
def test_pjax_sans_template():
resp = view_sans_pjax_template(regular_request)
assert resp.template_name == "template.html"
resp = view_sans_pjax_template(pjax_request)
assert resp.template_name == "template-pjax.html"
def test_view_with_silly_template():
resp = view_with_silly_template(regular_request)
assert resp.template_name == "silly"
resp = view_with_silly_template(pjax_request)
assert resp.template_name == "silly-pjax"
def test_view_with_pjax_template():
resp = view_with_pjax_template(regular_request)
assert resp.template_name == "template.html"
resp = view_with_pjax_template(pjax_request)
assert resp.template_name == "pjax.html"
# The test "views" themselves.
@djpjax.pjax()
def view_sans_pjax_template(request):
return TemplateResponse(request, "template.html", {})
@djpjax.pjax()
def view_with_silly_template(request):
return TemplateResponse(request, "silly", {})
@djpjax.pjax("pjax.html")
def view_with_pjax_template(request):
return TemplateResponse(request, "template.html", {})
|
# _*_ coding:UTF-8 _*_
#! /usr/bin/env python
from test import *
print "是否更新控件坐标: Y or N "
updateflag = raw_input("Enter your choice: ")
if updateflag == 'Y' or 'y':
print "鼠标放到用户名框保持1S"
user_x, user_y = set_cursor_po()
time.sleep(1)
print "鼠标放到password框保持1S"
pas_x, pas_y = set_cursor_po()
time.sleep(1)
print "鼠标放到验证码输入框保持1S"
vcode_x, vcode_y = set_cursor_po()
time.sleep(1)
print "鼠标放到login保持1S"
log_x, log_y = set_cursor_po()
time.sleep(1)
inputtextbox(user_x,user_y,'13818169690')
p = str(251578056/314)
inputtextbox(pas_x,pas_y,p)
vcode = raw_input("Enter Vcode: ")
inputtextbox(vcode_x, vcode_y, vcode)
mouse_click(log_x,log_y) |
from django.contrib import admin
from .models import Post, PostImage, Comment, Like
admin.site.register(Post)
admin.site.register(PostImage)
admin.site.register(Comment)
admin.site.register(Like) |
v = float(input('Salário atual: R$'))
p = v*15 / 100
n = v + p
print('Novo sálario: R${:.2f}'.format(n)) |
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline to load compute instances into Inventory.
This pipeline depends on the LoadProjectsPipeline.
"""
from google.cloud.security.common.data_access import project_dao as proj_dao
from google.cloud.security.common.util import log_util
from google.cloud.security.common.util import parser
from google.cloud.security.inventory.pipelines import base_pipeline
LOGGER = log_util.get_logger(__name__)
class LoadInstanceGroupManagersPipeline(base_pipeline.BasePipeline):
"""Load compute instance group managers for all projects."""
RESOURCE_NAME = 'instance_group_managers'
def _transform(self, resource_from_api):
"""Create an iterator of instance group managers to load into database.
Args:
resource_from_api (dict): Instance group managers, keyed by
project id, from GCP API.
Yields:
iterator: instance group manager properties in a dict.
"""
for (project_id, igms) in resource_from_api.iteritems():
for igm in igms:
yield {'project_id': project_id,
'id': igm.get('id'),
'creation_timestamp': parser.format_timestamp(
igm.get('creationTimestamp'),
self.MYSQL_DATETIME_FORMAT),
'name': igm.get('name'),
'description': igm.get('description'),
'base_instance_name': igm.get('baseInstanceName'),
'current_actions': parser.json_stringify(
igm.get('currentActions', {})),
'instance_group': igm.get('instanceGroup'),
'instance_template': igm.get('instanceTemplate'),
'named_ports': parser.json_stringify(
igm.get('namedPorts', [])),
'region': igm.get('region'),
'target_pools': parser.json_stringify(
igm.get('targetPools', [])),
'target_size': igm.get('targetSize'),
'zone': igm.get('zone'),
'raw_instance_group_manager':
parser.json_stringify(igm)}
def _retrieve(self):
"""Retrieve instance group managers from GCP.
Get all the projects in the current snapshot and retrieve the
compute instance group managers for each.
Returns:
dict: Mapping projects with their instance group managers (list):
{project_id: [instance group managers]}
"""
projects = (proj_dao
.ProjectDao(self.global_configs)
.get_projects(self.cycle_timestamp))
igms = {}
for project in projects:
project_igms = self.safe_api_call('get_instance_group_managers',
project.id)
if project_igms:
igms[project.id] = project_igms
return igms
def run(self):
"""Run the pipeline."""
igms = self._retrieve()
loadable_igms = self._transform(igms)
self._load(self.RESOURCE_NAME, loadable_igms)
self._get_loaded_count()
|
from game import Hangman
def main():
h = Hangman()
h.play_game()
if __name__ == "__main__":
main()
|
companies = [
"3m",
"american express",
"apple",
"boeing",
"caterpillar",
"chevron",
"cisco systems",
"coca-cola",
"dowdupont",
"exxonmobil",
"goldman sachs",
"the home depot",
"ibm",
"intel",
"johnson & johnson",
"jpmorgan chase",
"mcdonald's",
"merck & company",
"microsoft",
"nike",
"pfizer",
"procter & gamble",
"travelers",
"unitedhealth group",
"united technologies",
"verizon",
"visa",
"walmart",
"walgreens boots alliance",
"walt disney"
] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import math
from PyQt5.QtCore import (qAbs, QLineF, QPointF, qrand, QRectF, QSizeF, qsrand,
Qt, QTime)
from PyQt5.QtGui import (QBrush, QColor, QLinearGradient, QPainter,
QPainterPath, QPen, QPolygonF, QRadialGradient)
from PyQt5.QtWidgets import (QApplication, QWidget, QMainWindow, QSlider, QVBoxLayout, QGridLayout, QGraphicsItem, QGraphicsScene,
QGraphicsView, QStyle)
class GraphWidget(QGraphicsView):
def __init__(self):
super(GraphWidget, self).__init__()
self.timerId = 0
self.scene = QGraphicsScene(self)
self.scene.setItemIndexMethod(QGraphicsScene.NoIndex)
self.scene.setSceneRect(QRectF(4800, 1000, 300, 300))
A = [4934.81092, 1120.9386]
B = [4818.76473, 1152.60695]
C = [4860.26541, 1166.02368]
D = [5089.50654, 1150.74735]
self.scene.addLine(A[0], A[1], D[0], D[1])
self.scene.addLine(A[0], A[1], B[0], B[1])
self.scene.addLine(B[0], B[1], C[0], C[1])
self.scene.addLine(C[0], C[1], D[0], D[1])
self.setScene(self.scene)
self.setCacheMode(QGraphicsView.CacheBackground)
self.setViewportUpdateMode(QGraphicsView.BoundingRectViewportUpdate)
self.setRenderHint(QPainter.Antialiasing)
self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse)
self.setResizeAnchor(QGraphicsView.AnchorViewCenter)
def keyPressEvent(self, event):
key = event.key()
if key == Qt.Key_Up:
pass
elif key == Qt.Key_Down:
pass
elif key == Qt.Key_Left:
pass
elif key == Qt.Key_Right:
pass
elif key == Qt.Key_Plus:
pass
elif key == Qt.Key_Minus:
pass
elif key == Qt.Key_Space or key == Qt.Key_Enter:
pass
else:
pass
def wheelEvent(self, event):
self.scaleView(math.pow(2.0, -event.angleDelta().y() / 240.0))
def scaleView(self, scaleFactor):
factor = self.transform().scale(scaleFactor, scaleFactor).mapRect(QRectF(0, 0, 1, 1)).width()
if factor < 0.07 or factor > 100:
return
self.scale(scaleFactor, scaleFactor)
def sceneupdate(self, A, B, C, D):
self.scene.clear()
pass
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.init()
def init(self, result):
self.view = GraphWidget()
self.sld = QSlider(Qt.Horizontal, self)
self.sld.setRange(0, 100)
layout = QVBoxLayout()
layout.addWidget(self.view)
layout.addWidget(self.sld)
self.widget = QWidget()
self.widget.setLayout(layout)
self.setCentralWidget(self.widget)
self.setWindowTitle("Diagramscene")
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
qsrand(QTime(0,0,0).secsTo(QTime.currentTime()))
widget = MainWindow()
widget.show()
sys.exit(app.exec_())
|
"""QtChunkReceiver and QtGuiEvent classes.
"""
from qtpy.QtCore import QObject, Signal
from napari.components.experimental.chunk import chunk_loader
from napari.utils.events import EmitterGroup, Event, EventEmitter
class QtGuiEvent(QObject):
"""Fires an event in the GUI thread.
Listens to an event in any thread. When that event fires, it uses a Qt
Signal/Slot to fire a gui_event in the GUI thread. If the original
event is already in the GUI thread that's fine, the gui_event will
be immediately fired the GUI thread.
Parameters
----------
parent : QObject
Parent Qt object.
emitter : EventEmitter
The event we are listening to.
Attributes
----------
emitter : EventEmitter
The event we are listening to.
events : EmitterGroup
The only event we report is events.gui_event.
Notes
-----
Qt's signal/slot mechanism is the only way we know of to "call" from a
worker thread to the GUI thread. When Qt signals from a worker thread
it posts a message to the GUI thread. When the GUI thread is next
processing messages it will receive that message and call into the Slot
to deliver the message/event.
If the original event was already in the GUI thread that's fine,
the resulting event will just be triggered right away.
"""
signal = Signal(Event)
def __init__(self, parent: QObject, emitter: EventEmitter) -> None:
super().__init__(parent)
emitter.connect(self._on_event)
self.emitter = emitter
self.events = EmitterGroup(source=self, gui_event=None)
self.signal.connect(self._slot)
def _on_event(self, event) -> None:
"""Event was fired, we could be in any thread."""
self.signal.emit(event)
def _slot(self, event) -> None:
"""Slot is always called in the GUI thread."""
self.events.gui_event(original_event=event)
def close(self):
"""Viewer is closing."""
self.gui_event.disconnect()
self.emitter.disconnect()
class QtChunkReceiver:
"""Passes loaded chunks to their layer.
Parameters
----------
parent : QObject
Parent Qt object.
Attributes
----------
gui_event : QtGuiEvent
We use this to call _on_chunk_loaded_gui() in the GUI thread.
Notes
-----
ChunkLoader._done "may" be called in a worker thread. The
concurrent.futures documentation only guarantees that the future's done
handler will be called in a thread in the correct process, it does not
say which thread.
We need to call Layer.on_chunk_loaded() to deliver the loaded chunk to the
Layer. We do not want to make this call from a worker thread, because our
model code is not thread safe. We don't want the GUI thread and the worker
thread changing things at the same time, both triggering events, potentially
calling into vispy or other things that also aren't thread safe.
We could add locks, but it's simpler and better if we just call
Layer.on_chunk_loaded() from the GUI thread. This class QtChunkReceiver
listens to the ChunkLoader's chunk_loaded event. It then uses QtUiEvent
to call its own _on_chunk_loaded_gui() in the GUI thread. From that
method it can safely call Layer.on_chunk_loaded.
If ChunkLoader's chunk_loaded event is already in the GUI thread for
some reason, this class will still work fine, it will just run
100% in the GUI thread.
"""
def __init__(self, parent: QObject) -> None:
listen_event = chunk_loader.events.chunk_loaded
self.gui_event = QtGuiEvent(parent, listen_event)
self.gui_event.events.gui_event.connect(self._on_chunk_loaded_gui)
@staticmethod
def _on_chunk_loaded_gui(event) -> None:
"""A chunk was loaded. This method is called in the GUI thread.
Parameters
----------
event : Event
The event object from the original event.
"""
layer = event.original_event.layer
request = event.original_event.request
layer.on_chunk_loaded(request) # Pass the chunk to its layer.
def close(self):
"""Viewer is closing."""
self.gui_event.close()
|
l = [1,2,3,4]
def rec(l):
if len(l) == 0:return []
else:return [l.pop()] + rec(l)
print(rec(l)) |
#! /usr/bin/python3
import sys
import os
sys.path.insert(0, os.path.abspath('../models'))
import numpy as np
import matplotlib.pyplot as plt
import matplotlib .animation as animation
from network import Network
# x is a vector of length n*n
def plot_neurons(x, n=8):
assert x.shape == (n**2,)
X = x.reshape(n,n)
<<<<<<< HEAD
plt.imshow(X, cmap='gray', interpolation='none')
=======
plt.imshow(X, cmap='gray', interpolation='none', vmin=-1, vmax=1)
>>>>>>> report
def random_pattern(N=64):
return np.random.choice([1,-1], N)
<<<<<<< HEAD
def W_store(p):
N = p.shape[0]
W = np.outer(p,p)/N
return W
def pattern_evolution(W, picname):
W = W_store(x)
x0 = random_pattern(64)
Hopfield = Network(64, np.sign, W, np.zeros(64), x0)
fig, ax = plt.subplots()
x_old = Hopfield.x.copy()
i = 0
while True:
plot_neurons(Hopfield.x)
plt.savefig("{}_{}".format(picname, i), bbox_inches='tight')
plt.show()
i += 1
Hopfield.simulate(1)
if np.linalg.norm(Hopfield.x - x_old) < 0.1:
break
x_old = Hopfield.x.copy()
def one_pattern(picname):
x = random_pattern()
W = W_store(x)
plot_neurons(x)
plt.savefig("{}_p".format(picname), bbox_inches='tight')
plt.show()
pattern_evolution(W, picname)
cmd_functions = ([
lambda: one_pattern("../figures/hopfield_one")
])
=======
def W_store(ps):
N = ps[0].shape[0]
W = np.zeros((N,N))
for p in ps:
W += np.outer(p,p)/N
return W
# x not modified
def mutation(x, prob):
x_copy = x.copy()
for i in range(len(x)):
if np.random.random() < prob:
x_copy[i] = -x[i]
return x_copy
def new_fig_off_axis():
fig, ax = plt.subplots()
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
return fig, ax
class pattern_evolution(object):
"""press c to do another trial, q to leave"""
def __init__(self, f, W, x0s, ts=10, sigma=0.1, picname=None, save=False):
self.f = f
self.W = W
self.x0s = x0s
self.time_step = ts
self.picname = picname
self.save = save
self.i, self.j = 0, 0
self.next_trial = False
self.end = False
def init_trial(self):
x0 = self.x0s[self.i] if self.i<len(self.x0s) else random_pattern(64)
self.Hopfield = Network(
64, self.f, self.W, np.zeros(64), x0, sigma=sigma)
def press(self, event):
if event.key == 'c':
self.next_trial = True
if event.key == 'q':
self.end = True
def one_plot(self):
plot_neurons(self.Hopfield.x)
if self.save:
assert self.picname is not None
plt.savefig("{}_{}_{}".format(self.picname, self.i, self.j),
bbox_inches='tight')
plt.show()
while not plt.waitforbuttonpress(1): pass
if self.next_trial:
self.i += 1; self.j = 0
self.init_trial()
self.next_trial = False
else:
self.Hopfield.simulate(self.time_step/10)
self.j += 1
def run(self):
print("press c to do a next trial and press q to quit, and any other"
" key to continue")
fig, ax = new_fig_off_axis()
fig.canvas.mpl_connect('key_press_event', self.press)
self.init_trial()
while not self.end: self.one_plot()
def patterns(n, f, ts=10, prob=0.15, noise=0.1, picname=None, save=False):
xs = [random_pattern() for _ in range(n)]
W = W_store(xs)
x0s = [np.zeros(64)]
plt.ion()
for i, x in enumerate(xs):
x0s.extend([mutation(x, prob), mutation(-x, prob)])
new_fig_off_axis()
plot_neurons(x)
if save:
assert picname is not None
plt.savefig("{}_p{}".format(picname, i), bbox_inches='tight')
plt.show()
while not plt.waitforbuttonpress(1): pass
pattern_evolution(f, W, x0s, ts, noise, picname=picname, save=save).run()
>>>>>>> report
if __name__ == "__main__":
<<<<<<< HEAD
n = int(sys.argv[1])
cmd_functions[n-1]()
=======
print("how many patterns?")
n = int(input())
assert n > 0
print("noise level?")
sigma = float(input())
assert sigma >= 0
print("mutation probabilty for initial conditions?")
prob = float(input())
assert 0 <= prob <= 1
print("time steps between two picutres")
ts = int(input())
print("sgn or tanh for the activation function? (1 or 2)")
while True:
try:
k = int(input())
assert k in [1, 2]
break
except:
print("please enter 1 or 2")
f = np.sign if k == 1 else np.tanh
print("save? (y or n)")
while True:
r = input()
if r in ['y', 'yes']:
print("please give the prifix (path included) of the picture name")
patterns(n, f, ts, prob, sigma, input(), True)
break
elif r in ['n', 'no']:
patterns(n, f, ts, prob, sigma)
break
else:
print("please enter y or n")
>>>>>>> report
|
"""
Configuration module.
"""
import logging
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple, Union
import pytz
from gitdb.exc import BadName
from pydriller.domain.commit import Commit
logger = logging.getLogger(__name__)
class Conf:
"""
Configuration class. This class holds all the possible configurations of
the mining process (i.e., starting and ending dates, branches, etc.)
It's also responsible for checking whether the filters are correct (i.e.,
the user did not specify 2 starting commits).
"""
def __init__(self, options: Dict[str, Any]) -> None:
# insert all the configurations in a local dictionary
self._options = {}
for key, val in options.items():
self._options[key] = val
self._sanity_check_repos(self.get('path_to_repo'))
if isinstance(self.get('path_to_repo'), str):
self.set_value('path_to_repos', [self.get('path_to_repo')])
else:
self.set_value('path_to_repos', self.get('path_to_repo'))
def set_value(self, key: str, value: Any) -> None:
"""
Save the value of a configuration.
:param key: configuration (i.e., start date)
:param value: value
"""
self._options[key] = value
def get(self, key: str) -> Any:
"""
Return the value of the configuration.
:param key: configuration name
:return: value of the configuration, None if not present
"""
return self._options.get(key, None)
@staticmethod
def _sanity_check_repos(path_to_repo: Union[str, List[str]]) -> None:
"""
Checks if repo is of type str or list.
@param path_to_repo: path to the repo as provided by the user.
@return:
"""
if not isinstance(path_to_repo, str) and not isinstance(path_to_repo, list):
raise Exception("The path to the repo has to be of type 'string' or 'list of strings'!")
def _check_only_one_from_commit(self) -> None:
if not self.only_one_filter([self.get('since'),
self.get('since_as_filter'),
self.get('from_commit'),
self.get('from_tag')]):
raise Exception('You can only specify one filter between since, since_as_filter, from_tag and from_commit')
def _check_only_one_to_commit(self) -> None:
if not self.only_one_filter([self.get('to'),
self.get('to_commit'),
self.get('to_tag')]):
raise Exception('You can only specify one between to, to_tag and to_commit')
def sanity_check_filters(self) -> None:
"""
Check if the values passed by the user are correct.
"""
self._check_correct_filters_order()
self._check_only_one_from_commit()
self._check_only_one_to_commit()
self._check_timezones()
# Check if from_commit and to_commit point to the same commit, in which case
# we remove both filters and use the "single" filter instead. This prevents
# errors with dates.
if self.get("from_commit") and self.get("to_commit") and self.get("from_commit") == self.get("to_commit"):
logger.warning("You should not point from_commit and to_commit to the same "
"commit, but use the 'single' filter instead.")
single = self.get("to_commit")
self.set_value("from_commit", None)
self.set_value("to_commit", None)
self.set_value("single", single)
if self.get('single') is not None:
if any([self.get('since'),
self.get('since_as_filter'),
self.get('to'),
self.get('from_commit'),
self.get('to_commit'),
self.get('from_tag'),
self.get('to_tag')]):
raise Exception('You can not specify a single commit with '
'other filters')
try:
self.set_value('single', self.get("git").get_commit(self.get('single')).hash)
except BadName:
raise Exception("The commit {} defined in "
"the 'single' filtered does "
"not exist".format(self.get('single')))
def _check_correct_filters_order(self) -> None:
"""
Check that from_commit comes before to_commit
"""
if self.get('from_commit') and self.get('to_commit'):
chronological_order = self._is_commit_before(
self.get('git').get_commit(self.get('from_commit')),
self.get('git').get_commit(self.get('to_commit')))
if not chronological_order:
self._swap_commit_fiters()
def _swap_commit_fiters(self) -> None:
# reverse from and to commit
from_commit = self.get('from_commit')
to_commit = self.get('to_commit')
self.set_value('from_commit', to_commit)
self.set_value('to_commit', from_commit)
@staticmethod
def _is_commit_before(commit_before: Commit, commit_after: Commit) -> bool:
if commit_before.committer_date < commit_after.committer_date:
return True
if commit_before.committer_date == commit_after.committer_date and \
commit_before.author_date < commit_after.author_date:
return True
return False
def get_starting_commit(self) -> Optional[List[str]]:
"""
Get the starting commit from the 'from_commit' or 'from_tag'
filter.
"""
from_tag = self.get('from_tag')
from_commit = self.get('from_commit')
if from_tag is not None:
tagged_commit = self.get("git").get_commit_from_tag(from_tag)
from_commit = tagged_commit.hash
if from_commit is not None:
try:
commit = self.get("git").get_commit(from_commit)
if len(commit.parents) == 0:
return [f'--ancestry-path={commit.hash}']
elif len(commit.parents) == 1:
return [f'--ancestry-path={commit.hash}', '^' + commit.hash + '^']
else:
return [f'--ancestry-path={commit.hash}'] + ['^' + x for x in commit.parents]
except Exception:
raise Exception("The commit {} defined in the 'from_tag' or 'from_commit' filter does "
"not exist".format(self.get('from_commit')))
return None
def get_ending_commit(self) -> Optional[str]:
"""
Get the ending commit from the 'to', 'to_commit' or 'to_tag' filter.
"""
to_tag = self.get('to_tag')
to_commit = self.get('to_commit')
if to_tag is not None:
tagged_commit = self.get("git").get_commit_from_tag(to_tag)
to_commit = tagged_commit.hash
if to_commit is not None:
try:
return self.get("git").get_commit(to_commit).hash
except Exception:
raise Exception("The commit {} defined in the 'to_tag' or 'to_commit' filter does "
"not exist".format(self.get('to_commit')))
return None
@staticmethod
def only_one_filter(arr: List[Any]) -> bool:
"""
Return true if in 'arr' there is at most 1 filter to True.
:param arr: iterable object
:return:
"""
return len([x for x in arr if x is not None]) <= 1
def build_args(self) -> Tuple[Union[str, List[str]], Dict[str, Any]]:
"""
This function builds the argument for git rev-list.
:return:
"""
single: str = self.get('single')
since = self.get('since')
since_as_filter = self.get('since_as_filter')
until = self.get('to')
from_commit = self.get_starting_commit()
to_commit = self.get_ending_commit()
include_refs = self.get('include_refs')
remotes = self.get('include_remotes')
branch = self.get('only_in_branch')
authors = self.get('only_authors')
order = self.get('order')
rev: Union[List[str], str] = []
kwargs = {}
if single is not None:
rev = [single, '-n', '1']
elif from_commit is not None or to_commit is not None:
if from_commit is not None and to_commit is not None:
rev.extend(from_commit)
rev.append(to_commit)
elif from_commit is not None:
rev.extend(from_commit)
rev.append('HEAD')
else:
rev = to_commit
elif branch is not None:
rev = branch
else:
rev = 'HEAD'
if self.get('only_no_merge'):
kwargs['no-merges'] = True
if not order:
kwargs['reverse'] = True
elif order == 'reverse':
kwargs['reverse'] = False
elif order == 'date-order':
kwargs['date-order'] = True
elif order == 'author-date-order':
kwargs['author-date-order'] = True
elif order == 'topo-order':
kwargs['topo-order'] = True
if include_refs is not None:
kwargs['all'] = include_refs
if remotes is not None:
kwargs['remotes'] = remotes
if authors is not None:
kwargs['author'] = authors
if since is not None:
kwargs['since'] = since
if since_as_filter is not None:
kwargs['since_as_filter'] = since_as_filter
if until is not None:
kwargs['until'] = until
return rev, kwargs
def is_commit_filtered(self, commit: Commit):
# pylint: disable=too-many-branches,too-many-return-statements
"""
Check if commit has to be filtered according to the filters provided
by the user.
:param Commit commit: Commit to check
:return:
"""
if self.get('only_modifications_with_file_types') is not None:
if not self._has_modification_with_file_type(commit):
logger.debug('Commit filtered for modification types')
return True
if self.get('only_commits') is not None and commit.hash not in self.get('only_commits'):
logger.debug("Commit filtered because it is not one of the specified commits")
return True
if self.get('filepath_commits') is not None and commit.hash not in self.get('filepath_commits'):
logger.debug("Commit filtered because it did not modify the specified file")
return True
if self.get('tagged_commits') is not None and commit.hash not in self.get('tagged_commits'):
logger.debug("Commit filtered because it is not tagged")
return True
return False
def _has_modification_with_file_type(self, commit: Commit) -> bool:
for mod in commit.modified_files:
if mod.filename.endswith(tuple(self.get('only_modifications_with_file_types'))):
return True
return False
def _check_timezones(self):
if self.get('since') is not None:
self.set_value('since', self._replace_timezone(self.get('since')))
if self.get('since_as_filter') is not None:
self.set_value('since_as_filter', self._replace_timezone(self.get('since_as_filter')))
if self.get('to') is not None:
self.set_value('to', self._replace_timezone(self.get('to')))
@staticmethod
def _replace_timezone(dt: datetime) -> datetime:
if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None:
dt = dt.replace(tzinfo=pytz.utc)
return dt
|
#Import core packages
import matplotlib.pyplot as plt
import seaborn as sns; sns.set(color_codes=True)
import numpy as np
#Drop column not perceived to be features
drop_cols = ['company_permalink','company_category_code','company_country_code','company_state_code','company_city','company_region',
'founded_month','founded_quarter','first_funding_at','first_funding_at','last_funding_at','last_milestone_at',
'acquirer_permalink','acquirer_name','acquirer_category_code','acquirer_country_code','acquirer_state_code','acquirer_region',
'acquirer_city','acquired_at','acquired_month','acquired_quarter','acquired_year','price_currency_code',
'price_amount','founded_year','founded_at','unique_name','first_to_last_fund_days','last_to_acq_days',
'found_to_acq_days','first_fund_to_acq_days','rddt_1','rddt_2','rddt_3','rddt_4','rddt_5',
'rddt_6','rddt_7','rddt_8','rddt_9','rddt_10','rddt_11','rddt_12','rddt_13','rddt_14','found_to_fund_days']
ml_df = comp_df.drop(columns=drop_cols).set_index('company_name')
#Replace Nan's with zeroes (skipping status column)
ml_df.iloc[:, 1:] = ml_df.iloc[:, 1:].fillna(0)
#Format df to various versions (i.e. filtering based on criteria)
#Fix negative category codes where -1 (missing category originally)
max_cat_code = comp_df['category_code'].max() + 1
ml_df['category_code'] = np.where(ml_df['category_code'] == -1.0, max_cat_code, ml_df['category_code'])
#Scale/transform data
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelEncoder
#Convert df to array and perform one-hot encoding for target variable 'status'
tgt_enc = LabelEncoder().fit(ml_df['status'])
tgt_encoded = tgt_enc.transform(ml_df['status'])
np.unique(tgt_enc.inverse_transform(tgt_encoded))
np.unique(tgt_encoded)
ml_df = ml_df.drop(columns='status')
X = ml_df.values
Y = tgt_encoded
scaled_X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(scaled_X, Y, test_size=0.3, random_state=0)
#Feature Selection
#Feature Importance with Forest
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
model_exTree = ExtraTreesClassifier()
model_exTree.fit(X_train, y_train)
model_RandTree = RandomForestClassifier(random_state=0, n_jobs=-1)
model_RandTree.fit(X_train, y_train)
#Visualization and ouput
#Extra Trees
features_exTree = {}
for feature, importance in zip(ml_df.columns, model_exTree.feature_importances_):
features_exTree[feature] = importance
importances_exTree = pd.DataFrame.from_dict(features_exTree, orient='index').rename(columns={0: 'Gini-Importance'})
importances_exTree.sort_values(by='Gini-Importance', ascending=False).plot(kind='bar', rot=90)
plt.title('Feature Importance - ExtraTrees Classifier')
plt.tight_layout()
plt.show()
#Visualize feature importance
features_RandTree = {}
for feature, importance in zip(ml_df.columns, model_RandTree.feature_importances_):
features_RandTree[feature] = importance
importances_RandTree = pd.DataFrame.from_dict(features_RandTree, orient='index').rename(columns={0: 'Gini-Importance'})
importances_RandTree = importances_RandTree.sort_values(by='Gini-Importance', ascending=True)
importances_RandTree = importances_RandTree[importances_RandTree['Gini-Importance'] >= 0.01]
fig, ax = plt.subplots(figsize=(15,12))
plt.barh(importances_RandTree.index, importances_RandTree['Gini-Importance'])
plt.title('Feature Importance above 1% - Random Forest Classifier', fontsize=16)
ax.set_ylabel('Features', fontsize=14)
ax.set_yticklabels(importances_RandTree.index, fontsize=12)
ax.set_xlabel('Gini-Importance', fontsize=14)
plt.tight_layout()
plt.show()
#Implement Tree Model
from sklearn import tree
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, roc_auc_score
from sklearn.feature_selection import SelectFromModel
#Subset original df using identified highest importance features
sfm_RandTree = SelectFromModel(model_RandTree, threshold=0.05)
sfm_RandTree.fit(X_train, y_train)
#Print selected features
selected_feat = []
for feature_list_index in sfm_RandTree.get_support(indices=True):
print(ml_df.columns[feature_list_index])
selected_feat.append(ml_df.columns[feature_list_index])
X_important_train = sfm_RandTree.transform(X_train)
X_important_test = sfm_RandTree.transform(X_test)
#Perform RandomizedSearchCV
from sklearn.model_selection import RandomizedSearchCV
#Number of Trees
n_estimators = [int(x) for x in np.linspace(start=10, stop=1000, num=30)]
#Number of features at each split
max_features = ['auto','sqrt']
#Max depth of each tree
max_depth = [int(x) for x in np.linspace(start=5, stop=100, num=20)]
max_depth.append(None)
#Min samples at each split
min_samples_split = [int(x) for x in np.linspace(start=2, stop=100, num=10)]
#Minimun samples at base leaf
min_samples_leaf = [int(x) for x in np.linspace(start=2, stop=100, num=10)]
#Method of selecting samples for training each tree
bootstrap = [True, False]
#Place all parameter ranges in grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
print(random_grid)
#Instantiate instance of RandomForestClassifier
rf = RandomForestClassifier()
rf_random = RandomizedSearchCV(estimator=rf, param_distributions=random_grid, n_iter=100, cv=3, verbose=2,
random_state=42, n_jobs=-1)
#Fit to training set with selected features
rf_random.fit(X_important_train, y_train)
#Best parameters from randomized search cv
rf_random.best_params_
#Implemenet Grid Search with identified best parameters from randomized search
from sklearn.model_selection import GridSearchCV
# Create the parameter grid based on the results of random search
param_grid = {
'bootstrap': [True],
'max_depth': [None],
'max_features': ['auto'],
'min_samples_leaf': [60, 67, 74],
'min_samples_split': [50, 56, 62],
'n_estimators': [100, 283, 500, 1000]
}
# Create a based model
rf = RandomForestClassifier()
# Instantiate the grid search model
grid_search = GridSearchCV(estimator = rf, param_grid = param_grid,
cv = 3, n_jobs = -1, verbose = 2)
# Fit the grid search to the data
grid_search.fit(X_important_train, y_train)
#View results of grid_search
grid_search.best_params_
best_grid = grid_search.best_estimator_
#Base Model Evaluation
base_model = RandomForestClassifier(n_estimators=10, random_state=4)
base_model.fit(X_important_train, y_train)
base_model_pred = base_model.predict(X_important_test)
#RandomizedSearch Tuned Model Evaluation
tuned_model = RandomForestClassifier(**rf_random.best_params_, random_state=5)
tuned_model.fit(X_important_train, y_train)
tuned_model_pred = tuned_model.predict(X_important_test)
#Grid Search Tuned Model
best_grid.fit(X_important_train, y_train)
best_grid_pred = best_grid.predict(X_important_test)
#Train and create prediction values for full dataset to compare to selected features
tuned_model.fit(X_train, y_train)
tuned_model_pred_full = tuned_model.predict(X_test)
best_grid.fit(X_train, y_train)
best_grid_pred_full = best_grid.predict(X_test)
#Base Model Score versus Parameter tuned models
print("Base Model Accuracy Score: %s" % round(accuracy_score(y_test, base_model_pred),2))
print("Tuned Model Accuracy Score: %s" % round(accuracy_score(y_test, tuned_model_pred),2))
print("Tuned Model Accuracy Score - Full Dataset: %s" % round(accuracy_score(y_test, tuned_model_pred_full),2))
print("Best Grid Model Accuracy Score: %s" % round(accuracy_score(y_test, best_grid_pred),2))
print("Best Grid Model Accuracy Score - Full Dataset: %s" % round(accuracy_score(y_test, best_grid_pred_full),2))
print("Best RCV Parameters: %s", rf_random.best_params_)
print("Best GSCV Parameters: %s", best_grid)
#Graph confusion matrix for full dataset and dataset using top features
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(15,15))
mat1 = confusion_matrix(y_test, base_model_pred)
mat2 = confusion_matrix(y_test, tuned_model_pred)
sns.heatmap(mat1.T, square=True, annot=True, fmt='d', cbar=False, ax=ax1)
ax1.set_title('Confusion Matrix - Base Model (accuracy = %s percent)'
% round(accuracy_score(y_test, base_model_pred),2), fontsize=15)
ax1.set_xlabel('True Label', fontsize=13)
ax1.set_ylabel('Predicted Label', fontsize=13)
bottom, top = ax1.get_ylim()
ax1.set_ylim(bottom + 0.5, top - 0.5)
sns.heatmap(mat2.T, square=True, annot=True, fmt='d', cbar=False, ax=ax2)
ax2.set_title('Confusion Matrix - Tuned Model (accuracy = %s percent)' %
round(accuracy_score(y_test, tuned_model_pred),2), fontsize=15)
ax2.set_xlabel('True Label', fontsize=13)
ax2.set_ylabel('Predicted Label', fontsize=13)
bottom, top = ax2.get_ylim()
ax2.set_ylim(bottom + 0.5, top- 0.5)
plt.show()
#Print Status Labels and Numbers for reference
print("Target Labels", np.unique(tgt_enc.inverse_transform(tgt_encoded)), '\n' ,"Label Numbers", np.unique(tgt_encoded))
#Classification Report
print("Random Forest - Full Dataset:" + '\n' + classification_report(y_test, y_pred_RandTree))
print("Random Forest - Important Dataset:" + '\n' + classification_report(y_test, y_pred_RandTree_important))
#Classification Report
print("Random Forest - Base Model:" + '\n' + classification_report(y_test, base_model_pred, target_names=np.unique(tgt_enc.inverse_transform(tgt_encoded))))
print("Random Forest - Tuned Model:" + '\n' + classification_report(y_test, tuned_model_pred, target_names=np.unique(tgt_enc.inverse_transform(tgt_encoded))))
#Plot decision trees
from glob import glob
import PIL
import pydotplus
from IPython.display import display, Image
from sklearn.tree import export_graphviz
base_model = RandomForestClassifier(n_estimators=10, max_depth=2, random_state=4)
base_model.fit(X_important_train, y_train)
base_model_pred = base_model.predict(X_important_test)
def save_decision_trees_as_png(clf, iteration, feature_name, target_name):
file_name = "vc_" + str(iteration) + ".png"
dot_data = export_graphviz(
clf,
out_file=None,
feature_names=feature_name,
class_names=target_name,
rounded=True,
proportion=False,
precision=2,
filled=2,
)
graph = pydotplus.graph_from_dot_data(dot_data)
graph.set_size('"20,20!"')
graph.write_png('C:/datascience/springboard/projects/Venture Capital/data/Exported Data/%s' % file_name)
print("Decision Tree {} saved as png file".format(iteration + 1))
feature_names = selected_feat
target_names = np.unique(tgt_enc.inverse_transform(tgt_encoded))
for i in range(len(base_model.estimators_)):
save_decision_trees_as_png(base_model.estimators_[i], i, feature_names, target_names)
images = [ PIL.Image.open(f) for f in glob('./*.png') ]
for im in images:
#display(Image(filename=im.filename, retina=True))
im.save('C:/datascience/springboard/projects/Venture Capital/data/Exported Data/Decision Tree %s.png' )
#Appendix
#Visual Tuning Parameters
#Parameter Tuning for n_estimators. Using selected features.
n_estimators = range(1, 202, 5)
train_results = []
#test_results = []
for estimator in n_estimators:
#Training data
rf = RandomForestClassifier(n_estimators=estimator, random_state=1, n_jobs=-1)
rf.fit(X_important_train, y_train)
train_pred = rf.predict(X_important_train)
train_score = round(accuracy_score(y_train, train_pred),2)
train_results.append(train_score)
#Plot Results
train_line = plt.plot(n_estimators, train_results, color='blue', label = 'Train Score')
plt.xlabel('Number of Estimators')
plt.ylabel('Accuracy Scores')
plt.show()
#Parameter Tuning for max_depth. Using selected features and n_estimators=15.
rf_depth = range(1, 50, 1)
train_results = []
test_results = []
for depth in rf_depth:
#Training data
rf = RandomForestClassifier(max_depth=depth, n_estimators=50, n_jobs=-1)
rf.fit(X_important_train, y_train)
train_pred = rf.predict(X_important_train)
train_score = round(accuracy_score(y_train, train_pred),2)
train_results.append(train_score)
#Test Data
test_pred = rf.predict(X_important_test)
test_score = round(accuracy_score(y_test, test_pred),2)
test_results.append(test_score)
#Plot Results
train_line = plt.plot(rf_depth, train_results, color='blue', label = 'Train Score')
test_line = plt.plot(rf_depth, test_results, color='red', label = 'Test Score')
plt.legend()
plt.xlabel('Max Depth of Random Forest')
plt.ylabel('Accuracy Scores')
plt.show()
#Parameter Tuning for min_samples_split and min_samples_leaf. Using selected features and n_estimators=50.
min_sample = np.arange(0.05, 0.50, 0.05)
train_results = []
test_results = []
for sample in min_sample:
#Training data
#I used same code to test leafs, just changed min_samples_split to min_samples_leaf (changed max step value to 0.5 as well)
rf = RandomForestClassifier(n_estimators=50, min_samples_leaf=sample, n_jobs=-1)
rf.fit(X_important_train, y_train)
train_pred = rf.predict(X_important_train)
train_score = round(accuracy_score(y_train, train_pred),2)
train_results.append(train_score)
#Plot Results
train_line = plt.plot(min_sample, train_results, color='blue', label = 'Train Score')
test_line = plt.plot(min_sample, test_results, color='red', label = 'Test Score')
plt.legend()
plt.xlabel('Minimum Sample')
plt.ylabel('Accuracy Scores')
plt.show()
|
from aiohttp import web
from .geocoder import geocode
async def geocode_address(request):
query = request.match_info.get('query')
response = geocode(query)
return web.json_response(response)
|
###########################
# Fichier projet.py #
# 16/05/18 #
# La communauté de l'info #
###########################
from classe import *
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as opt
import random
def chaine_to_tuple(mot):
w = ()
for i in range(len(mot)):
w += (HMM.lettre_to_num(mot[i]),)
return w
def text_to_list(adr): # transforme un document texte contenant des mot en liste de mots compréhensibles par le HMM
"""
:param adr: addresse du fichier texte à convertir
:return: liste de tuples correspondant aux mots se trouvant dans le fichier texte
"""
data = open(adr, 'r')
texte = data.read()
L = texte.split('\n')
data.close()
L2 = []
for w in L:
L2 += [chaine_to_tuple(w)]
return L2[:-1]
def xval(nbFolds, S, nbL, nbSMin, nbSMax, nbIter, nbInit):
n = len(S)
l = np.random.permutation(n)
lvOpt = -float("inf")
for nbS in range(nbSMin, nbSMax + 1):
lv = 0
for i in range(1, nbFolds + 1):
f1 = int((i - 1) * n / nbFolds)
f2 = int(i * n / nbFolds)
learn = [S[l[j]] for j in range(f1)]
learn += [S[l[j]] for j in range(f2, n)]
test = [S[l[j]] for j in range(f1, f2)]
h = HMM.bw3(nbS, nbL, learn, nbIter, nbInit)
lv += h.logV(test)
if lv > lvOpt:
lvOpt = lv
nbSOpt = nbS
return lvOpt, nbSOpt
def func(x, a, b, c): # fonction utilisée pour le fitting de certaines courbes
return - a * np.exp(-x / b) - 25580 + c
def logV_vs_nb_iteration_bw1(nb_iter_max, nbS, S,
nbL=26): # trace la log vraisemblance en fonction du nombre d'itération de bw1
"""
:param nb_iter_max: nombre d'itérations de bw1 à réaliser
:param nbS: nb d'états
:param S: liste de mots sur laquelle on entraine notre HMM
:param nbL: nombre de lettres
"""
hmm = HMM.gen_HMM(nbL, nbS)
nb_iter = [0]
logV = [hmm.logV(S)]
for i in range(1, nb_iter_max + 1):
try:
hmm.bw1(S)
nb_iter.append(i)
logV.append(hmm.logV(S))
except KeyboardInterrupt:
break
plt.plot(nb_iter, logV, '.', c='blue', label='logV en fonction du nombre d\'itération de bw1')
plt.xlabel('nb d\'iteration')
plt.ylabel('logV')
titre = 'anglais2000' + ' / nombre d\'etat = ' + str(nbS)
plt.title(titre)
optimizedParameters, pcov = opt.curve_fit(func, nb_iter, logV)
# Use the optimized parameters to plot the best fit
plt.plot(nb_iter,
[func(x, optimizedParameters[0], optimizedParameters[1], optimizedParameters[2]) for x in nb_iter],
label='-' + str(optimizedParameters[0]) + 'exp(-x/' + str(optimizedParameters[1]) + ') + ' + str(
-25580 + optimizedParameters[2]))
plt.legend()
plt.show()
def logV_vs_intialisation(nb_init_max, nb_iter, nbS, S,
nbL=26): # trace la logvraisemblance optimale en fonction de différentes initialisations
"""
:param nb_init_max: nombre d'initialisations différentes à réaliser
:param nb_iter: nombre d'itération dans bw2
:param nbS: nombre d'états
:param S: liste de mots sur laquelle on entraine nos HMM
:param nbL: nombre de lettres
"""
nb_init = []
logV = []
for i in range(1, nb_init_max + 1):
print("init", i)
try:
h = HMM.bw2(nbS, nbL, S, nb_iter)
nb_init.append(i)
logV.append(h.logV(S))
print("###################################")
print(logV[-1])
print("###################################")
except KeyboardInterrupt:
break
plt.plot(nb_init, logV)
plt.show()
def logV_vs_initialisation_variante(nb_init_max, limite, nbS, S,
nbL=26): # trace la logvraisemblance optimale en fonction de différentes
# initialisations
"""
:param nb_init_max: nombre d'initialisations différentes à réaliser
:param limite: limite pour bw2_variante
:param nbS: nombre d'états
:param S: liste de mots sur laquelle on entraine nos HMM
:param nbL: nombre de lettres
"""
nb_init = []
logV = []
for i in range(1, nb_init_max + 1):
try:
h = HMM.bw2_variante(nbS, nbL, S, limite)
nb_init.append(i)
logV.append(h.logV(S))
except KeyboardInterrupt:
break
plt.plot(nb_init, logV)
plt.show()
def efficiency_vs_nb_state(nbFolds, S, nbSMin, nbSMax, nbIter, nbInit,
nbL=26): # trace la log vraisemblance moyenne sur les echantillons tests en fonction du
# nombre d'état
"""
:param nbFolds: cardinal de la partition de S
:param S: liste de mots sur laquelle on entraine notre HMM
:param nbSMin: nombre d'etat minimum
:param nbSMax: nombre d'etat maximum
:param nbIter: nombre d'itérations pour bw3
:param nbInit: nbombre d'initialisations pour bw3
:param nbL: nombre de lettres pour le HMM
"""
n = len(S)
l = np.random.permutation(n)
nb_state = []
logV = []
for nbS in range(nbSMin, nbSMax + 1):
try:
lv = 0
for i in range(1, nbFolds + 1):
f1 = int((i - 1) * n / nbFolds)
f2 = int(i * n / nbFolds)
learn = [S[l[j]] for j in range(f1)]
learn += [S[l[j]] for j in range(f2, n)]
test = [S[l[j]] for j in range(f1, f2)]
h = HMM.bw3(nbS, nbL, learn, nbIter, nbInit)
lv += h.logV(test)
logV.append(lv / nbFolds)
nb_state.append(nbS)
except KeyboardInterrupt:
break
plt.plot(nb_state, logV)
plt.show()
def efficiency_vs_nb_state_variante(nbFolds, S, nbSMin, nbSMax, limite, nbInit,
nbL=26): # trace la log vraisemblance moyenne sur les echantillons tests en
# fonction du nombre d'état
"""
:param nbFolds: cardinal de la partition de S
:param S: liste de mots sur laquelle on entraine notre HMM
:param nbSMin: nombre d'etat minimum
:param nbSMax: nombre d'etat maximum
:param limite: limite pour bw3_variante
:param nbInit: nbombre d'initialisations pour bw3
:param nbL: nombre de lettres pour le HMM
"""
n = len(S)
l = np.random.permutation(n)
nb_state = []
logV = []
for nbS in range(nbSMin, nbSMax + 1):
try:
lv = 0
for i in range(1, nbFolds + 1):
f1 = int((i - 1) * n / nbFolds)
f2 = int(i * n / nbFolds)
learn = [S[l[j]] for j in range(f1)]
learn += [S[l[j]] for j in range(f2, n)]
test = [S[l[j]] for j in range(f1, f2)]
h = HMM.bw3_variante(nbS, nbL, learn, nbInit, limite)
lv += h.logV(test)
logV.append(lv / nbFolds)
nb_state.append(nbS)
except KeyboardInterrupt:
break
plt.plot(nb_state, logV)
plt.show()
def reconnaitre_langue(w):
"""
:param w: mot dont la langue doit être reconnue
:return: haine de caractère correspondant à la langue la plus probable
"""
mot = chaine_to_tuple(w)
langues = ['anglais', 'allemand', 'espagnol', 'neerlandais', 'suedois', 'elfique']
anglais = HMM.load('hmm_anglais_parfait')
allemand = HMM.load('hmm_allemand')
espagnol = HMM.load('hmm_espagnol')
neerlandais = HMM.load('hmm_neerlandais')
suedois = HMM.load('hmm_suedois')
elfique = HMM.load('hmm_elfique')
proba = np.array(
[anglais.logV([mot]), allemand.logV([mot]), espagnol.logV([mot]), neerlandais.logV([mot]), suedois.logV([mot]),
elfique.logV([mot])])
langue = langues[np.argmax(proba)]
return langue
def afficher_mots_anglais(n):
"""
:param n: nombre de mots anglais à générer puis à afficher
"""
h = HMM.load("hmm_anglais_parfait")
for i in range(n):
n = random.randint(3, 8)
print(h.gen_mot_lettres(n))
# L = text_to_list('anglais2000')
# print('toc', xval(20, L, 26, 2, 10, 5, 5))
# logV_vs_nb_iteration_bw1(1000, 30, text_to_list('anglais2000'))
# efficiency_vs_nb_state(10, text_to_list('anglais2000'), 53, 1000, 100, 1)
# efficiency_vs_nb_state(10, text_to_list('allemand2000'), 2, 1000, 100, 1)
# logV_vs_nb_iteration_bw1(1000, 45, text_to_list('anglais2000'))
# HMM.bw3(45, 26, text_to_list('espagnol2000'), 55, 12).save("hmm_espagnol")
# HMM.bw3(45, 26, text_to_list('suedois2000'), 55, 12).save("hmm_suedois")
# HMM.bw3(45, 26, text_to_list('neerlandais2000'), 55, 12).save("hmm_neerlandais")
# HMM.bw3(45, 26, text_to_list('neerlandais2000'), 55, 12).save("hmm_neerlandais")
# HMM.bw3(45, 26, text_to_list('elfique'), 55, 12).save("hmm_elfique")
# HMM.bw3(100, 26, text_to_list('anglais2000'), 55, 15).save("hmm_anglais_v2")
# HMM.bw3(100, 26, text_to_list('allemand2000'), 55, 15).save("hmm_allemand_v2")
# HMM.bw3(100, 26, text_to_list('suedois2000'), 55, 15).save("hmm_suedois_v2")
# HMM.bw3(100, 26, text_to_list('neerlandais2000'), 55, 15).save("hmm_neerlandais_v2")
# HMM.bw3(45, 26, text_to_list('anglais2000'), 200, 20).save("hmm_anglais_parfait")
# logV_vs_intialisation(100, 400, 45, text_to_list('anglais2000'))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.