text stringlengths 8 6.05M |
|---|
# Generated by Django 2.2.12 on 2020-06-17 17:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0008_shippingaddress'),
]
operations = [
migrations.AlterField(
model_name='cschool',
name='img',
field=models.ImageField(blank=True, null=True, upload_to='images'),
),
migrations.AlterField(
model_name='cschool',
name='subtitle',
field=models.CharField(blank=True, default='yours scool subtitle', max_length=100, null=True),
),
migrations.AlterField(
model_name='cschool',
name='title',
field=models.CharField(blank=True, default='yours scool title', max_length=100, null=True),
),
]
|
# ==============================================================================
# Copyright (c) 2019, Deutsches HörZentrum Hannover, Medizinische Hochschule Hannover
# Author: Hanna Dolhopiatenko (Dolhopiatenko.Hanna@mh-hannover.de), Waldo Nogueira (NogueiraVazquez.Waldo@mh-hannover.de)
# All rights reserved.
# ==============================================================================
'''This code represents fully connected neural network. EEG Data divided in 48 blocks. 46 blocks are taken for training, one for validation and another one for testing.
10 random block for test and validation were chosed to provide cross-validation alghoritm'''
import tensorflow
import numpy as np
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Dropout, BatchNormalization
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint
import scipy.io as io
import random
from LoadData import loadData #This module you have to modify to load your own data. The example is provided at the same repository
from tensorflow.python.keras import optimizers
from math import floor
from math import ceil
'''Define loss function, based on correlation'''
def corr_loss(act,pred):
cov=(K.mean((act-K.mean(act))*(pred-K.mean(pred))))
return 1-(cov/(K.std(act)*K.std(pred)+K.epsilon()))
'''Create model'''
def createModel():
model = Sequential()
model.add(Dense(n_hidden**(3), input_shape=(trainPredWin,numChans*2), activation='relu', use_bias=True)) #Input layer
model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001))
model.add(Dropout(dropout))
model.add(Dense(n_hidden**(2), activation='relu', use_bias=True))
model.add(Dropout(dropout))
model.add(Dense(n_hidden**(1), activation='relu', use_bias=True))
model.add(Dropout(dropout))
model.add(Dense(1, activation='tanh'))
model.compile(loss=corr_loss, optimizer='adam')
return model
'''Define the necessary parameters'''
fs=64 #sampling rate of processed EEG and Audio signals
n_hidden=2
dropout=0.25
trainPredWin=60*fs #Training prediction Window
numBlocks=48 #EEG Signal divided in 48 blocks, 60s each
'''############## MAIN CODE ################'''
for Subject in range(1,2): #Choose which subject to process
workingDir='E:\dolhopia\DNN\EasyModel' #Provide your own working path here
a, b, chans, eeg=loadData(Subject) #Load Dataset. a and b show to which stream subject was attending
numChans=len(chans)
numSides=2
Audio =io.loadmat(workingDir+'/Data/wav.mat');
LAGS15 = [250] #Define the Lag
eegData=np.zeros((numBlocks,trainPredWin,numChans),dtype=np.float32)
targetAudio=np.zeros((numBlocks,trainPredWin,numSides),dtype=np.float32)
'''Split Dataset in 48 blocks'''
for block in range(numBlocks):
eegData[block,:,:]=eeg['eegNH'+str(Subject)][block*trainPredWin:(block+1)*trainPredWin,:]
targetAudio[block,:,a]=Audio["EnvA"][block*trainPredWin:(block+1)*trainPredWin,0] #Here you need to Load the envelopes for the attended signal
targetAudio[block,:,b]=Audio["EnvU"][block*trainPredWin:(block+1)*trainPredWin,0] #Here you need to Load the envelopes for the unattended signal
''' Choose random blocks for Training/Validation/Testing'''
leaveBlocks= random.sample(range(47), 10)
leaveValidat=leaveBlocks[:5]
leaveTest=leaveBlocks[5:]
'''Training Dataset'''
trainingDataEEG=np.zeros(((numBlocks-2),trainPredWin,numChans)) # 80% of data for training
trainingDataAudioA=np.zeros(((numBlocks-2),trainPredWin,1))
trainingDataAudioU=np.zeros(((numBlocks-2),trainPredWin,1))
trainingDataEEGlagged=np.zeros(((numBlocks-2),trainPredWin,numChans))
'''Validation Set'''
develDataEEG=np.zeros(((numBlocks-47),trainPredWin,numChans))
develDataAudioA=np.zeros(((numBlocks-47),trainPredWin,1))
develDataAudioU=np.zeros(((numBlocks-47),trainPredWin,1))
'''Testing Set'''
testDataEEG=np.zeros(((numBlocks-47),trainPredWin,numChans))
testDataAudioA=np.zeros(((numBlocks-47),trainPredWin,1))
testDataAudioU=np.zeros(((numBlocks-47),trainPredWin,1))
'''Different Sets'''
StartLagTrain=np.zeros(((numBlocks-2),trainPredWin,numChans))
EndLagTrain=np.zeros(((numBlocks-2),trainPredWin,numChans))
StartLagDevel=np.zeros(((numBlocks-47),trainPredWin,numChans))
EndLagDevel=np.zeros(((numBlocks-47),trainPredWin,numChans))
StartLagTest=np.zeros(((numBlocks-47),trainPredWin,numChans))
EndLagTest=np.zeros(((numBlocks-47),trainPredWin,numChans))
results1=np.zeros((len(LAGS15),5))*np.nan
corrCoefA=np.zeros((len(LAGS15),numBlocks, 1))
corrCoefU=np.zeros((len(LAGS15),numBlocks, 1))
Acc2=np.zeros((len(LAGS15), 1))
lags_length=len(LAGS15)
for end_lagi in range(len(LAGS15)): #Start loop across Lags
print(end_lagi)
end_lag=LAGS15[end_lagi]
start_lag=end_lag-15
start=start_lag
fin=end_lag
start=floor(start/1e3*fs)
fin=ceil(fin/1e3*fs)
for blockCV in range(46): #Cross Validation
leaveValidat11=leaveValidat[blockCV]
leaveTest11=leaveTest[blockCV]
i=0
for block in range(numBlocks):
if leaveValidat11==block or leaveTest11==block:
continue
trainingDataEEG[i,:,:]=eegData[block,:,:]
trainingDataAudioA[i,:,0]=targetAudio[block,:,b]
i+=1
'''To lag the EEG Dataset'''
k=0
develDataEEG[:,:,:]=eegData[leaveValidat11,:,:]
develDataAudioA[:,:,0]=targetAudio[leaveValidat11,:,b]
testDataEEG[:,:,:]=eegData[leaveTest11,:,:]
testDataAudioA[:,:,0]=targetAudio[leaveTest11,:,b]
testDataAudioU[:,:,0]=targetAudio[leaveTest11,:,a]
StartLagDevel[k,:,:]= np.pad(develDataEEG[k,:,:], ((0, start), (0, 0)), mode='constant')[start:, :]
EndLagDevel[k,:,:]=np.pad(develDataEEG[k,:,:], ((0, fin), (0, 0)), mode='constant')[fin:, :]
DevelDataEEGLagged=np.concatenate([StartLagDevel, EndLagDevel], axis=2)
StartLagTest[k,:,:]= np.pad(testDataEEG[k,:,:], ((0, start), (0, 0)), mode='constant')[start:, :]
EndLagTest[k,:,:]=np.pad(testDataEEG[k,:,:], ((0, fin), (0, 0)), mode='constant')[fin:, :]
TestDataEEGLagged=np.concatenate([StartLagTest, EndLagTest], axis=2)
for block in range(numBlocks-2):
StartLagTrain[block,:,:] = np.pad(trainingDataEEG[block,:,:], ((0, start), (0, 0)), mode='constant')[start:, :]
EndLagTrain[block,:,:] = np.pad(trainingDataEEG[block,:,:], ((0, fin), (0, 0)), mode='constant')[fin:, :]
TrainingDataEEGLagged=np.concatenate([StartLagTrain, EndLagTrain], axis=2)
'''Create Model'''
Model=createModel()
tempModelName=workingDir+'/model/ModelWeights.hdf5' #Save weights
checkLow = ModelCheckpoint(filepath=tempModelName, verbose=0, save_best_only=True,mode='min',monitor='val_loss')
early = EarlyStopping(monitor='val_loss',patience=10, mode='min') #Early Stopping to get to the point in which the loss on the Development set does not decrease anymore
Model.fit(TrainingDataEEGLagged[:,:,:],trainingDataAudioA[:,:,:],batch_size=2,epochs=300,verbose=1,callbacks=[early,checkLow],validation_data=(DevelDataEEGLagged[:,:,:],develDataAudioA[:,:,:]))
Model.load_weights(tempModelName)
'''Prediction'''
predictionA=Model.predict(TestDataEEGLagged[:,:,:])
'''Correlate with Original Audio'''
corrCoefA[end_lagi,blockCV,:] = np.corrcoef(testDataAudioA[k,:,0],predictionA[k,:,0])[1,0]
corrCoefU[end_lagi,blockCV,:] = np.corrcoef(testDataAudioU[k,:,0],predictionA[k,:,0])[1,0]
'''Calculate Accuracy'''
Acc2[end_lagi,:] = np.mean(corrCoefU[end_lagi,:,:]<corrCoefA[end_lagi,:,:])
io.savemat(workingDir+'/Results/RevMulti_'+str(Subject)',{'corrCoeffA'+str(Subject):corrCoefA, 'corrCoefU'+str(Subject):corrCoefU, 'Acc2'+str(Subject):Acc2})
|
# This file is part of beets.
# Copyright 2016, Fabrice Laporte
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import os
import sys
import requests
import test_lyrics
def mkdir_p(path):
try:
os.makedirs(path)
except OSError:
if os.path.isdir(path):
pass
else:
raise
def safe_open_w(path):
"""Open "path" for writing, creating any parent directories as needed.
"""
mkdir_p(os.path.dirname(path))
return open(path, 'w')
def main(argv=None):
"""Download one lyrics sample page per referenced source.
"""
if argv is None:
argv = sys.argv
print('Fetching samples from:')
for s in test_lyrics.GOOGLE_SOURCES + test_lyrics.DEFAULT_SOURCES:
print(s['url'])
url = s['url'] + s['path']
fn = test_lyrics.url_to_filename(url)
if not os.path.isfile(fn):
html = requests.get(url, verify=False).text
with safe_open_w(fn) as f:
f.write(html.encode('utf-8'))
if __name__ == "__main__":
sys.exit(main())
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api
class TodoTask(models.Model):
_name = 'todo.task'
_description = 'To-do task'
numero = fields.Char('Num Pedido')
name = fields.Char('Descripcion', required=True)
is_done = fields.Boolean('Finalizado')
active = fields.Boolean('Activo', default=True)
fecha = fields.Datetime('Fecha', default=fields.Date.today)
urgencia = fields.Char('Nivel de urgencia')
@api.one
def do_toggle_done(self):
# print self.env.context
self.is_done = not self.is_done
return True
@api.multi
def do_clear_done(self):
done_recs = self.search([('is_done', '=', True)])
done_recs.write({'active': False})
return True |
# Name: Taidgh Murray
# Student ID: 15315901
# File: fibonacci.py
############################################################################
def f():
n = int(input("Please enter a number: "))
print(fib(n))
def fib(n):
if n==0:
return 0
elif n==1:
return 1
else:
return fib(n-1)+fib(n-2)
f()
|
#coding: utf8
from __future__ import unicode_literals
import logging
import os
import re
logger = logging.getLogger(__name__)
registry = {}
_basepath = '.'
def register(slug, **kwargs):
"""Called by definition files: adds a boundary definition to our list
during the loadshapefiles command."""
kwargs['file'] = os.path.join(_basepath, kwargs.get('file', ''))
registry[slug] = kwargs
def autodiscover(base_dir):
"""Walk the directory tree and load all definition files present.
Definition files are all files ending in "definition.py" or "definitions.py"
"""
global _basepath
definition_file_re = re.compile(r'definitions?\.py$')
for (dirpath, dirnames, filenames) in os.walk(base_dir, followlinks=True):
_basepath = dirpath
for filename in filenames:
if definition_file_re.search(filename):
logger.debug(filename)
exec(open(os.path.join(dirpath, filename)).read())
def attr(name):
return lambda f: f.get(name)
def _clean_string(s):
if re.search(r'[A-Z]', s) and not re.search(r'[a-z]', s):
# WE'RE IN UPPERCASE
from boundaries.titlecase import titlecase
s = titlecase(s)
s = re.sub(r'(?u)\s', ' ', s)
s = re.sub(r'( ?-- ?| - )', '—', s)
return s
def clean_attr(name):
attr_getter = attr(name)
return lambda f: _clean_string(attr_getter(f))
def dashed_attr(name):
# Replaces all hyphens with em dashes
attr_getter = clean_attr(name)
return lambda f: attr_getter(f).replace('-', '—')
|
# Exercício 2.3 - Livro
print('José Vinícius Silva Ferreira')
|
#!/usr/bin/env python
"""
@author: Jean-Lou Dupont
"""
__author__ = "Jean-Lou Dupont"
__email = "python (at) jldupont.com"
__fileid = "$Id$"
import unittest, doctest
def test_suite(*args):
test = doctest.DocFileSuite(
"tests.txt",
optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE)
return unittest.TestSuite((test,))
from pyjld.system.command import BaseCmd
class TestCmd(BaseCmd):
def __init__(self):
BaseCmd.__init__(self)
def cmd_start(self, *pargs, **kwargs):
print "TestCmd.cmd_start"
def cmd_stop(self, *pargs, **kwargs):
print "TestCmd.cmd_stop"
class TestsBaseCmd(unittest.TestCase):
def setUp(self):
self.cmd = TestCmd()
def tearDown(self):
pass
def testCmdList(self):
liste = ['start','stop']
for command in liste:
result = self.cmd.safe_validateCommand(command)
self.assertEqual(result, True)
# ==============================================
# ==============================================
if __name__ == "__main__":
""" Tests
"""
suite = unittest.TestSuite()
suite.addTest( test_suite() )
suite.addTest( unittest.TestLoader().loadTestsFromTestCase( TestsBaseCmd ) )
runner = unittest.TextTestRunner()
runner.run(suite)
|
import scrapy.cmdline
def main():
scrapy.cmdline.execute(['scrapy','crawl','mytianya','-o','mytianya.json'])
if __name__ == '__main__':
main() |
import numpy as np
import math
def Quaternion2Rotation(quat):
e0 = quat.item(0)
e1 = quat.item(1)
e2 = quat.item(2)
e3 = quat.item(3)
R = np.array([[e1**2+e0**2-e2**2-e3**2,2*(e1*e2-e3*e0),2*(e1*e3+e2*e0)],
[2*(e1*e2+e3*e0),e2**2+e0**2-e1**2-e3**2,2*(e2*e3-e1*e0)],
[2*(e1*e3-e2*e0),2*(e2*e3+e1*e0),e3**2+e0**2-e1**2-e2**2]])
return R
def Quaternion2Euler(quat):
e0 = quat[0]
ex = quat[1]
ey = quat[2]
ez = quat[3]
phi = math.atan2(2 * (e0*ex + ey*ez), e0**2 + ez**2 - ex**2 - ey**2) # phi
theta = math.asin(2 * (e0*ey - ex*ez)) # theta
psi = math.atan2(2*(e0*ez + ex*ey), e0**2 + ex**2 - ey**2 - ez**2) # psi
return phi, theta, psi
def Euler2Quaternion(phi, theta, psi):
c_phi2 = np.cos(phi/2.0)
s_phi2 = np.sin(phi/2.0)
c_theta2 = np.cos(theta/2.0)
s_theta2 = np.sin(theta/2.0)
c_psi2 = np.cos(psi/2.0)
s_psi2 = np.sin(psi/2.0)
quat = np.empty((4,1))
quat[0] = c_psi2 * c_theta2 * c_phi2 + s_psi2 * s_theta2 * s_phi2 # e0
quat[1] = c_psi2 * c_theta2 * s_phi2 - s_psi2 * s_theta2 * c_phi2 # ex
quat[2] = c_psi2 * s_theta2 * c_phi2 + s_psi2 * c_theta2 * s_phi2 # ey
quat[3] = s_psi2 * c_theta2 * c_phi2 - c_psi2 * s_theta2 * s_phi2 # ez
return quat
def Euler2Rotation(phi,theta,psi):
q = Euler2Quaternion(phi,theta,psi)
R = Quaternion2Rotation(q)
return R
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import timezone
from unittest import TestCase
from unittest.mock import patch
from requests import Session
from app.database import DbEngine
from app.services import get_users, get_commit_frequency, parse_dt
from common import REPO, content, MockResponse, MockRedis
class GitHubUserServiceTest(TestCase):
def setUp(self):
self.session = Session()
@patch.object(DbEngine, 'get_session', return_value=MockRedis("redis"))
@patch.object(Session, 'get', return_value=MockResponse(200, content))
def test_get_users(self, mock_response, mock_redis):
key = 'github_users_service__User_teradici/deploy_2020-09-01T00:00:00+0000_2021-12-01T00:00:00+0000'
client = mock_redis.return_value
client.delete(key)
start = parse_dt('2020-09-01', '%Y-%m-%d').replace(tzinfo=timezone.utc)
end = parse_dt('2021-12-01', '%Y-%m-%d').replace(tzinfo=timezone.utc)
result = get_users(start, end, REPO, client, self.session)
val = client.exists(key)
self.assertIsNotNone(result)
self.assertEqual(len(result), 2, 'get_users does not work correctly')
self.assertEqual(val, 1, 'db records should be created')
@patch.object(DbEngine, 'get_session', return_value=MockRedis("redis"))
@patch.object(Session, 'get', return_value=MockResponse(200, content))
def test_get_commit_frequency(self, mock_response, mock_redis):
key = 'github_users_service__CommitFrequency_teradici/deploy_2020-09-01T00:00:00+0000_2021-12-01T00:00:00+0000__5'
client = mock_redis.return_value
client.delete(key)
start = parse_dt('2020-09-01', '%Y-%m-%d').replace(tzinfo=timezone.utc)
end = parse_dt('2021-12-01', '%Y-%m-%d').replace(tzinfo=timezone.utc)
result = get_commit_frequency(5, start, end, REPO, client, self.session)
val = client.exists(key)
self.assertIsNotNone(result)
self.assertEqual(len(result), 2, 'get_commit_frequency does not work correctly')
self.assertEqual(result[0].name, 'user1')
self.assertEqual(result[0].commits, 2)
self.assertEqual(val, 1, 'db records should be created')
|
from uff.bin import convert_to_uff
|
import RPi.GPIO as GPIO
import time
from datetime import datetime
import telepot
from telepot.loop import MessageLoop
ID = 803524468
GPIO.setmode(GPIO.BOARD)
GPIO.setup(12, GPIO.IN)
def motionSensor():
while True:
i = GPIO.input(12)
if i == 1 :
handle()
time.sleep(10)
def sendMsg(msg):
chat_id = msg['chat']['id']
command = msg['text']
if command == 'ja' :
ja = 'nu har vi skrämt iväg dom'
bot.sendMessage(ID, str(ja))
elif command == 'nej' :
nej = 'då får dom väl vara där'
bot.sendMessage(ID, str(nej))
else:
bot.sendMessage(chat_id, 'dont know this command :'+command)
def handle():
tiden = 'Nu rör det sig någon hemma'
val = 'Vill du varna polisen? ja/nej'
bot.sendMessage(ID, str(tiden))
time.sleep(1)
bot.sendMessage(ID, str(val))
bot = telepot.Bot('826949653:AAEoJg3_IyqnwBNzbPn55SXnyV81YACD1RY')
MessageLoop(bot, sendMsg).run_as_thread()
print('I am listening ...')
while 1:
time.sleep(3)
motionSensor()
# handle()
|
'''
Base class for identity-based encryption
Notes: This class implements an interface for a standard identity-based encryption scheme.
Identity-based encryption consists of three algorithms: (setup, extract, encrypt, and decrypt).
'''
from charm.toolbox.schemebase import *
class IBEnc(SchemeBase):
def __init__(self):
SchemeBase.__init__(self)
SchemeBase.setProperty(self, scheme='IBEnc')
self.baseSecDefs = Enum('IND_ID_CPA','sIND_ID_CPA','IND_ID_CCA','sIND_ID_CCA')
def setup(self):
raise NotImplementedError
def extract(self, mk, ID):
raise NotImplementedError
def encrypt(self, pk, ID, message):
raise NotImplementedError
def decrypt(self, pk, sk, ct):
raise NotImplementedError
|
from __future__ import absolute_import, division, print_function
import copy
import random
import collections
import operator
import sys
import math
MOVES = {0: 'up', 1: 'left', 2: 'down', 3: 'right'}
class Gametree:
"""main class for the AI"""
def __init__(self, root_state, depth_of_tree, current_score):
# Set depth of tree
self.depth_of_tree = depth_of_tree
# Set directions
self.directions = [0, 1, 2, 3]
# Initialize root of tree
self.root = Node(root_state, True, current_score, -1, 0)
# Grow tree from constructor
self.growTree(self.root, 0)
# In charge of growing the game tree
def growTree(self, node, depth):
# If you've reached the desired depth then return
if(depth == self.depth_of_tree):
return
# If node is max player
if(node.isMaxPlayer()):
# Simulate moving in the four directions and make a chance child node if the board is unique
for dir in self.directions:
sim = Simulator(copy.deepcopy(node.getBoardState()), node.getPoints())
sim.move(dir)
# Check for unique board states
if(sim.getState() != node.getBoardState()):
newNode = Node(sim.getState(), False, sim.getPoints(), dir, depth + 1)
node.addChild(newNode)
# Else if the node is a chance player then randomly choose a spot to add a tile and add it as a max player
elif(node.isChancePlayer()):
# Get empty spots for the board
emptySpots = self.getEmptySpots(node.getBoardState())
# Creating a max player for each empty spot
for spot in emptySpots:
board_copy = copy.deepcopy(node.getBoardState())
board_copy[spot[0]][spot[1]] = 2
newNode = Node(board_copy, True, node.getPoints(), -1, depth + 1)
node.addChild(newNode)
# Call grow tree on every child
for child in node.getChildren():
self.growTree(child, depth + 1)
# Takes in a board and returns the empty spots for the board
def getEmptySpots(self, matrix):
zeroPoints = []
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if(matrix[i][j] == 0):
zeroPoints.append((i, j))
return zeroPoints
# Helper function to visualize tree
def printTreeLevelOrder(self):
self.printNodes(self.root)
def printNodes(self, node):
print("Parent: ", node.isMaxPlayer(), node.getPoints(), node.getDepth(), node.getDirection())
print("Children: ")
self.printChildren(node)
for child in node.getChildren():
self.printNodes2(child)
def printChildren(self, node):
counter = 0
for child in node.getChildren():
print(counter, child.isMaxPlayer(), child.getPoints(), child.getDepth(), child.getDirection())
counter += 1
# expectimax for computing best move
def expectimax(self, node):
# If terminal node then return the payoff for the node
if len(node.getChildren()) == 0:
payoff = self.payoff(node)
node.setExpectimax(payoff)
return payoff
# If maxPlayer then get the max expectimax value from all of its children and return that value
elif node.isMaxPlayer():
value = -sys.maxsize - 1
for child in node.getChildren():
value = max(value, self.expectimax(child))
node.setExpectimax(value)
return value
# If chance player calculate the average of each of the children
elif node.isChancePlayer():
value = 0
for child in node.getChildren():
value += self.expectimax(child)*(1.0/len(node.getChildren()))
node.setExpectimax(value)
return value
else:
print("Error")
return -1
# Payoff function to calculate score for board
def payoff(self, node):
numZeros = 0
# Count number of zeros on the board
for i in range(len(node.getBoardState())):
for j in range(len(node.getBoardState()[i])):
if(node.getBoardState()[i][j] == 0):
numZeros+=1
# Calculate monoticity score for board
sum = 0
board = node.getBoardState()
for j in range(len(board)):
for i in range(3):
sum += board[i + 1][j] - board[i][j]
for j in range(len(board)):
for i in range(3):
sum += board[j][i + 1] - board[j][i]
# Payoff function is a combination of the monoticity sum, the points, and the number of empty spots
return sum + node.getPoints() + numZeros
# function to return best decision to game
def compute_decision(self):
# Get optimal expectimax score
optimalVal = self.expectimax(self.root)
# Choose the direction that gives the optimalVal calculated
for child in self.root.getChildren():
if child.getExpectimax() == optimalVal:
return child.getDirection()
return 0
class Node:
'''Node class for gametree '''
def __init__(self, board_state, is_max_player, board_score, direction, depth):
# Save board state, max player, board score, direction and depth for the node
self.board_state = board_state
self.is_max_player = is_max_player
self.board_score = board_score
self.expectimax = board_score
self.direction = direction
self.children = []
self.depth = depth
# Returns true if node is a max player
def isMaxPlayer(self):
return self.is_max_player
# Returns true if node is a chance player
def isChancePlayer(self):
return not self.is_max_player
# Returns board state for the node
def getBoardState(self):
return self.board_state
# Returns points for the node
def getPoints(self):
return self.board_score
# Returns direction for the node
def getDirection(self):
return self.direction
# Sets expectimax for the node
def setExpectimax(self, val):
self.expectimax = val
# Returns expectimax for the node
def getExpectimax(self):
return self.expectimax
# Adds child to the node
def addChild(self, child):
self.children.append(child)
# Returns children of node
def getChildren(self):
return self.children
# Returns depth of node
def getDepth(self):
return self.depth
class Simulator:
'''Simulator class to simulate moves'''
def __init__(self, board_state, total_points):
# Takes in board state and total_points
self.board_state = board_state
self.board_size = 4
self.total_points = total_points
def move(self, direction):
for i in range(0, direction):
self.rotateMatrixClockwise(self.board_state)
if self.canMove(self.board_state):
self.moveTiles(self.board_state)
self.mergeTiles(self.board_state)
self.placeRandomTile(self.board_state)
for j in range(0, (4 - direction) % 4):
self.rotateMatrixClockwise(self.board_state)
def getState(self):
return self.board_state
def getPoints(self):
return self.total_points
def rotateMatrixClockwise(self, tileMatrix):
tm = tileMatrix
for i in range(0, int(self.board_size/2)):
for k in range(i, self.board_size- i - 1):
temp1 = tm[i][k]
temp2 = tm[self.board_size - 1 - k][i]
temp3 = tm[self.board_size - 1 - i][self.board_size - 1 - k]
temp4 = tm[k][self.board_size - 1 - i]
tm[self.board_size - 1 - k][i] = temp1
tm[self.board_size - 1 - i][self.board_size - 1 - k] = temp2
tm[k][self.board_size - 1 - i] = temp3
tm[i][k] = temp4
def canMove(self, tileMatrix):
tm = tileMatrix
for i in range(0, self.board_size):
for j in range(1, self.board_size):
if tm[i][j-1] == 0 and tm[i][j] > 0:
return True
elif (tm[i][j-1] == tm[i][j]) and tm[i][j-1] != 0:
return True
return False
def moveTiles(self, tileMatrix):
tm = tileMatrix
for i in range(0, self.board_size):
for j in range(0, self.board_size - 1):
while tm[i][j] == 0 and sum(tm[i][j:]) > 0:
for k in range(j, self.board_size - 1):
tm[i][k] = tm[i][k + 1]
tm[i][self.board_size - 1] = 0
def mergeTiles(self, tileMatrix):
tm = tileMatrix
for i in range(0, self.board_size):
for k in range(0, self.board_size - 1):
if tm[i][k] == tm[i][k + 1] and tm[i][k] != 0:
tm[i][k] = tm[i][k] * 2
tm[i][k + 1] = 0
self.total_points += tm[i][k]
self.moveTiles(tileMatrix)
def placeRandomTile(self, tileMatrix):
while True:
i = random.randint(0,self.board_size-1)
j = random.randint(0,self.board_size-1)
if tileMatrix[i][j] == 0:
break
tileMatrix[i][j] = 2
|
import pygame
import Cube
BLACK = (0, 0, 0)
WHITE = (255,255,255)
RED = (255,0,0)
def draw_bordered_rect(screen, x,y,w,h, color):
pygame.draw.rect(screen, BLACK, [x,y,w,h])
pygame.draw.rect(screen, color, [x+4,y+4, w-8, h-8])
def draw_face(screen, cube, faceNum, x, y):
for i in range(0,9):
if i < 3:
draw_bordered_rect(screen, x+(50*i),y, 50, 50, cube.faces[faceNum][i])
if i >= 3 and i <= 5:
draw_bordered_rect(screen, x+(50*(i-3)),y + 50*1, 50, 50, cube.faces[faceNum][i])
if i > 5:
draw_bordered_rect(screen, x+(50*(i-6)),y + 50*2, 50, 50, cube.faces[faceNum][i])
pygame.init()
size = [800, 700]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Rubik's Cube")
cube = Cube.Cube()
cube.move("TU")
done = False
clock = pygame.time.Clock()
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
screen.fill(WHITE)
draw_face(screen, cube, 0,50, 250)
draw_face(screen, cube, 1, 210, 250)
draw_face(screen, cube, 2, 370, 250)
draw_face(screen, cube, 3, 530, 250)
draw_face(screen, cube, 4, 210, 90)
draw_face(screen, cube, 5, 210, 410)
pygame.display.flip()
clock.tick(60)
pygame.quit()
|
# -*- coding:utf-8 -*-
import os
import sys
import logging
import logging.handlers
from .formatter import TEXT_FORMATTER
from .handler import TTYHandler
OUTPUT_STDOUT = 1
OUTPUT_FILE = 2
OUTPUT_ROTATE_FILE = 3
def get_log_file_path(file_name=None):
if not file_name:
log_directory = os.path.join(os.path.dirname(sys.argv[0]), "log")
if not os.path.exists(log_directory):
os.mkdir(log_directory)
log_name = ".".join(os.path.basename(sys.argv[0]).split(".")[:-1]) + "-" + str(os.getpid()) + ".log"
file_name = os.path.join(log_directory, log_name)
return file_name
class Output(object):
def __init__(self, handler, formatter=TEXT_FORMATTER, level=logging.INFO):
self.handler = handler
self.handler.setFormatter(formatter)
self.handler.setLevel(level)
class Stream(Output):
def __init__(self, stream=sys.stdout, formatter=TEXT_FORMATTER, level=logging.INFO):
super(Stream, self).__init__(TTYHandler(stream), formatter, level)
class File(Output):
def __init__(self, filename=None, formatter=TEXT_FORMATTER, level=logging.INFO):
log_file_path = get_log_file_path(file_name=filename)
handler = logging.FileHandler(filename=log_file_path)
super(File, self).__init__(handler, formatter, level)
class RotatingFile(Output):
def __init__(self, filename=None, formatter=TEXT_FORMATTER, level=logging.INFO):
log_file_path = get_log_file_path(file_name=filename)
handler = logging.handlers.RotatingFileHandler(filename=log_file_path, maxBytes=10*1024*1024, backupCount=10)
super(RotatingFile, self).__init__(handler, formatter, level)
OUTPUT_MAPPING = {
OUTPUT_STDOUT: "Stream()",
OUTPUT_FILE: "File()",
OUTPUT_ROTATE_FILE: "RotatingFile()",
}
def get_output_obj_list(outputs):
output_list = list(map(eval, [OUTPUT_MAPPING.get(output, None) for output in outputs]))
if output_list and all(output_list):
return output_list
else:
raise ValueError |
import torch
import torch.nn as nn
#from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torch.distributions.normal import Normal
from models.Encoder import Encoder
from models.Decoder import Decoder
SOS_TOKEN = 0
EOS_TOKEN = 1
UNK_TOKEN = 2
MAX_PARA_LENGTH = 15
class ParaphraseModel(nn.Module):
'''
dicstionary_size = the size of the dictionary in the dataset
embedding_dim = each word in the dictionary is embedded in a vector space with that dimension
rnn_hidden_size =
rnn_num_layers = the numbers of the layers in the each LSTM in the model
z_dim = the encoder encodes the sentence to a z-vector space with that dimension
'''
def __init__(self, dictionary_size=100, embedding_dim=1100, rnn_hidden_size=600, rnn_num_layers=2, z_dim=1100): #Does embedding_dim should be the same as z_dim?
super(ParaphraseModel, self).__init__()
self.embedding = nn.Embedding(dictionary_size, embedding_dim) #should be replaced in word embedding like word2vec
self.encoder = Encoder(embedding_dim, rnn_hidden_size, rnn_num_layers, z_dim)
self.decoder = Decoder(embedding_dim, rnn_hidden_size, rnn_num_layers, dictionary_size)
self.cel = nn.CrossEntropyLoss(ignore_index=-1) #cross entrpoy
self.dictionary_size = dictionary_size
self.embedding_dim = embedding_dim
def train_model(self, xo, xp, xo_len, xp_len, kld_coef=1):
logits, z, mu, logvar = self.AE_forward(xo, xp, xo_len, xp_len)
cel_loss = self.cel(logits.view(-1, self.dictionary_size).contiguous(), xp.cuda().view(-1))
kl_loss = -0.5 * (1 + logvar - mu.pow(2) - logvar.exp()).sum(1).mean()
total_loss = cel_loss + kld_coef * kl_loss
#print(cel_loss, kl_loss)
return total_loss
def AE_forward(self, xo, xp, xo_len, xp_len):
xo_embed = self.embedding(xo.cuda())
xp_embed = self.embedding(xp.cuda())
mu, logvar = self.encoder(xo_embed, xp_embed, xo_len, xp_len)
std = torch.exp(0.5*logvar)
nd = Normal(torch.ones_like(mu), torch.zeros_like(std))
z = nd.sample() * std + mu
logits = self.decoder(xo_embed, z, xo_len, xp_len)
return logits, z, mu, logvar
def infer(self, xo, xo_len):
xo_embed = self.embedding(xo.cuda())
_, (hT, cT) = self.decoder.ose(xo_embed, xo_len)
completed_sentences = torch.zeros(len(xo_embed))
sentences = []
mu, sigma = torch.zeros(len(xo), self.embedding_dim), torch.ones(len(xo), self.embedding_dim)
nd = Normal(mu, sigma)
z = nd.sample().cuda()
out = hT[-1]
steps = 0
while not all(completed_sentences):
real_inp = torch.cat((z, out), 1).unsqueeze(1)
output, (hT, cT) = self.decoder.pse(real_inp, torch.tensor([1] * len(z)), h0=hT, c0=cT)
out = hT[-1]
probs = self.decoder.linear(out)
topwords = [word_probs.topk(1)[1] for word_probs in probs]
for j, result in enumerate(topwords):
if int(result)==EOS_TOKEN:
completed_sentences[j] = 1
sentences.append(topwords)
steps+=1
if steps == MAX_PARA_LENGTH:
break
return sentences
if __name__ == '__main__':
a = ParaphraseModel()
b = torch.LongTensor([[1, 2, 3], [4, 5, 2], [1, 1, 1], [1, 2, 1]])
c = torch.LongTensor([3, 2, 1, 2])
print(a.train_model(b, b, c, c))
res = (a.infer(b,c))
|
# -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
from parsers import tamil_parser
class Author(models.Model): #ஆசிரியர்
name = models.CharField(max_length=100, verbose_name='பெயர்')
user = models.ForeignKey(User, null=True, blank=True, unique=True, related_name='author_profile')
biography = models.TextField(verbose_name='வாழ்க்கை சரித்திரம்', blank=True)
def __unicode__(self):
return self.name
PARSING_STATUS_OPTIONS = ( # பகுப்பாய்வு_குறிப்பு_தேர்வுகள்
('N', 'ஆராயப்படவில்லை'),
('Y', 'தகவல் இருக்கிறது'),
('E', 'பிழையுள்ளது'),
)
class Verse(models.Model): #பா
author = models.ForeignKey(Author, null=True, blank=True, verbose_name='ஆசிரியர்', related_name='verses')
publishing_date = models.DateTimeField(null=True, auto_now_add = True, verbose_name='பதிப்பு_நேரம்')
revision_date = models.DateTimeField(null=True, auto_now = True, verbose_name='திருத்தப்பட்ட நேரம்')
verse_text = models.TextField(verbose_name='பா_செய்யுள்')
parsing_status = models.CharField(max_length=2, choices=PARSING_STATUS_OPTIONS, default='N', verbose_name='பகுப்பாய்வு_குறிப்பு')
result = models.XMLField(blank=True, verbose_name='முடிவுகள்')
def __unicode__(self):
return self.verse_text[:15] + '...'
def save(self, *args, **kwargs):
result = tamil_parser.analyzeVerse(self.verse_text.replace('\r','\n'))
if result is None:
self.result = ''
self.parsing_status = 'E'
else:
self.result = result
self.parsing_status = 'Y'
super(Verse, self).save(args, kwargs)
|
# Generated by Django 3.0.3 on 2020-03-11 21:38
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0003_auto_20200304_0733'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Tm_Service',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField(null=True, verbose_name='表示順')),
('created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='作成日時')),
('modified', models.DateTimeField(auto_now=True, null=True, verbose_name='更新日時')),
('service_name', models.CharField(max_length=200, verbose_name='サービス名')),
('upload_file', models.FileField(null=True, upload_to='upload', verbose_name='アップロードファイル')),
('department', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='users.Tm_Department', verbose_name='部門')),
],
options={
'verbose_name': 'サービスマスタ',
'verbose_name_plural': 'サービスマスタ',
'db_table': 'Tm_Service',
},
),
migrations.CreateModel(
name='Tm_Workflow',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField(null=True, verbose_name='表示順')),
('created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='作成日時')),
('modified', models.DateTimeField(auto_now=True, null=True, verbose_name='更新日時')),
('workflow_route', models.IntegerField(choices=[(1, '通常'), (2, '特別')], default='1', verbose_name='承認経路')),
('workflow_count', models.IntegerField(default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(3)], verbose_name='承認回数')),
('department', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='users.Tm_Department', verbose_name='部門')),
],
options={
'verbose_name': '承認マスタ',
'verbose_name_plural': '承認マスタ',
'db_table': 'Tm_Workflow',
},
),
migrations.CreateModel(
name='Tm_Workflow_Conditions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField(null=True, verbose_name='表示順')),
('created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='作成日時')),
('modified', models.DateTimeField(auto_now=True, null=True, verbose_name='更新日時')),
('amount_min', models.IntegerField(null=True, verbose_name='金額(下限)')),
('amount_max', models.IntegerField(null=True, verbose_name='金額(下限)')),
('rate_min', models.DecimalField(decimal_places=1, max_digits=4, null=True, verbose_name='利益率(下限)')),
('rate_max', models.DecimalField(decimal_places=1, max_digits=4, null=True, verbose_name='利益率(下限)')),
('service', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='excelapp.Tm_Service', verbose_name='サービス')),
('workflow', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='excelapp.Tm_Workflow', verbose_name='承認')),
],
options={
'verbose_name': '承認経路条件マスタ',
'verbose_name_plural': '承認経路条件マスタ',
'db_table': 'Tm_Workflow_Conditions',
},
),
migrations.CreateModel(
name='Tm_Authorizer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField(null=True, verbose_name='表示順')),
('created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='作成日時')),
('modified', models.DateTimeField(auto_now=True, null=True, verbose_name='更新日時')),
('workflow_count', models.IntegerField(default=1, verbose_name='承認回数')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, verbose_name='承認者')),
('workflow', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='excelapp.Tm_Workflow', verbose_name='承認')),
],
options={
'verbose_name': '承認者マスタ',
'verbose_name_plural': '承認者マスタ',
'db_table': 'Tm_Authorizer',
},
),
]
|
import json
import requests
import time
import re
from bs4 import BeautifulSoup
def parse_single_anime(node):
res = {**node['node'], **node['list_status']}
try:
res.pop('main_picture')
except Exception:
print(res)
return res
def _get_anime_list(url, token):
response = requests.get(
url,
headers={'Authorization': F'Bearer {token}'}
)
return json.loads(response.text)
def _get_user_anime_list_url(username, token):
url = F'https://api.myanimelist.net/v2/users/{username}/animelist?fields=list_status&limit=1000'
return url
# @lru_cache(maxsize=None)
def get_user_anime_list(user_name, token):
url = _get_user_anime_list_url(user_name, token)
res = _get_anime_list(url, token)
if 'error' in res:
raise Exception(F"[{res['error']}] encountered for {user_name}")
parsed = []
for anime in res['data']:
parsed.append(parse_single_anime(anime))
while 'next' in res['paging']:
time.sleep(.5)
res = _get_anime_list(res['paging']['next'], token)
for anime in res['data']:
parsed.append(parse_single_anime(anime))
return parsed
def get_random_list_of_users():
response = requests.get('https://myanimelist.net/users.php')
soup = BeautifulSoup(response.text, 'html.parser')
# soup.findAll("div", attrs={"style": "margin-bottom"}, recursive=True) # This doesn't work for some reason
filtered_soup = soup.findAll("a", attrs={"href": re.compile(".*profile.*")})
names = []
for spoon in filtered_soup:
content = str(spoon.contents[0])
if content[0] == '<':
continue
names.append(spoon.contents[0])
return names
|
from __future__ import print_function
from flask import Flask, jsonify, abort, request, make_response, url_for
from flask import render_template, redirect
import sys
import requests
import json
app = Flask(__name__, static_url_path="")
@app.route('/', methods=['GET'])
def home_page():
return render_template("index.html")
@app.route('/distribution', methods=['GET'])
def view_distribution():
results = requests.get("https://0qqnj9g8jh.execute-api.us-east-1.amazonaws.com/prod/getsentimentvalues").json()
return render_template('distribution.html', data=results)
@app.route('/topten', methods=['GET'])
def view_topten():
results = requests.get("https://0qqnj9g8jh.execute-api.us-east-1.amazonaws.com/prod/gettophashtags").json()
return render_template('bubbles.html', data=json.dumps(results))
@app.route('/breakdown', methods=['GET'])
def view_breakdown():
data = []
negative = requests.get("https://0qqnj9g8jh.execute-api.us-east-1.amazonaws.com/prod/getnegativecount").json()
positive = requests.get("https://0qqnj9g8jh.execute-api.us-east-1.amazonaws.com/prod/getpositivecount").json()
neutral = requests.get("https://0qqnj9g8jh.execute-api.us-east-1.amazonaws.com/prod/getneutralcount").json()
data.append({"sala":"Positive", "value":positive})
data.append({"sala":"Negative","value":negative})
data.append({"sala":"Neutral", "value":neutral})
return render_template('breakdown.html', data=data)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=170)
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import sys
import threading
import time
import multiprocessing as mp
# In[2]:
import tkinter as tk
# In[4]:
sys.path.append(".")
import boucleThread
# In[5]:
def boucle_interprete_thread(n_iter,n_thread):
pool = mp.Pool()
pool.map(boucle_interprete_simple,[int(n_iter/n_thread)]*n_thread)
def wapper_boucle_interprete_thread():
time_init = time.perf_counter()
boucle_interprete_thread(int(v_iteration.get()),4)
v_temps.set(str(time.perf_counter() - time_init))
# In[7]:
def boucle_interprete_simple(n_iter):
for i in range(n_iter):
pass
def wapper_boucle_interprete_simple():
time_init = time.perf_counter()
boucle_interprete_simple(int(v_iteration.get()))
v_temps.set(str(time.perf_counter() - time_init))
# In[8]:
def wapper_boucle_compile_simple():
time_init = time.perf_counter()
boucleThread.boucleSimple(int(v_iteration.get()))
v_temps.set(str(time.perf_counter() - time_init))
# In[9]:
def wapper_boucle_compile_thread():
time_init = time.perf_counter()
boucleThread.boucleThread(int(v_iteration.get()))
v_temps.set(str(time.perf_counter() - time_init))
# In[ ]:
# In[10]:
if __name__ == "__main__":
fenetre = tk.Tk()
greeting = tk.Label(text="Hello, Tkinter")
greeting.pack(side = tk.TOP)
label_nb_itration = tk.Label(text="inserer nb iterations")
label_nb_itration.pack(side = tk.LEFT)
v_iteration = tk.StringVar()
entry_nb_iteration = tk.Entry(textvariable=v_iteration)
entry_nb_iteration.pack(side = tk.LEFT)
label_consume = tk.Label(text="temps consume:")
label_consume.pack(side = tk.LEFT)
v_temps = tk.StringVar()
label_temps = tk.Label(textvariable=v_temps)
label_temps.pack(side = tk.LEFT)
v_temps.set("^&^")
interprete_simple_button = tk.Button(text="interprete simple", fg="black",command=wapper_boucle_interprete_simple)
interprete_simple_button.pack( side = tk.BOTTOM)
interprete_thread_button = tk.Button(text="interprete thread", fg="black",command=wapper_boucle_interprete_thread)
interprete_thread_button.pack( side = tk.BOTTOM)
compile_simple_button = tk.Button(text="compile simple", fg="black",command=wapper_boucle_compile_simple)
compile_simple_button.pack( side = tk.BOTTOM)
compile_thread_button = tk.Button(text="compile thread", fg="black",command=wapper_boucle_compile_thread)
compile_thread_button.pack( side = tk.BOTTOM)
fenetre.mainloop()
|
from datetime import datetime
import random
import smtplib
import pandas
PLACEHOLDER = "[NAME]"
my_email = "jalltrades12@gmail.com"
password = "@#Jack098"
birthdays_list = pandas.read_csv("birthdays.csv").to_dict(orient="records")
now = datetime.now()
for birthday_person in birthdays_list:
try:
if int(birthday_person["month"]) == now.month and int(birthday_person["day"] == now.day):
with open(f"letter_templates/letter_{random.randint(1, 3)}.txt") as template:
to_send = template.read().replace(PLACEHOLDER, birthday_person["name"])
with smtplib.SMTP("smtp.gmail.com") as connection:
connection.starttls()
connection.login(user=my_email, password=password)
connection.sendmail(
from_addr=my_email,
to_addrs=birthday_person["email"],
msg=f"Subject:Happy Birthday!\n\n{to_send}"
)
except ValueError:
pass
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import functools
from collections import defaultdict
from dataclasses import dataclass
from textwrap import dedent
from typing import DefaultDict
import pytest
from pants.backend.python.macros import python_requirements
from pants.backend.python.macros.python_requirements import PythonRequirementsTargetGenerator
from pants.base.specs import RawSpecsWithoutFileOwners, RecursiveGlobSpec
from pants.core.target_types import FileTarget, GenericTarget, LockfilesGeneratorTarget
from pants.engine.addresses import Address, Addresses
from pants.engine.environment import EnvironmentName
from pants.engine.internals.synthetic_targets import (
SyntheticAddressMaps,
SyntheticTargetsRequest,
SyntheticTargetsSpecPaths,
rules,
)
from pants.engine.internals.target_adaptor import TargetAdaptor as _TargetAdaptor
from pants.engine.internals.testutil import resolve_raw_specs_without_file_owners
from pants.engine.rules import QueryRule, rule
from pants.engine.target import (
Dependencies,
DependenciesRequest,
DescriptionField,
InvalidTargetException,
Tags,
Target,
WrappedTarget,
WrappedTargetRequest,
)
from pants.testutil.rule_runner import RuleRunner, engine_error
from pants.util.strutil import softwrap
TargetAdaptor = functools.partial(_TargetAdaptor, __description_of_origin__="BUILD")
@dataclass(frozen=True)
class SyntheticExampleTargetsRequest(SyntheticTargetsRequest):
path: str = SyntheticTargetsRequest.SINGLE_REQUEST_FOR_ALL_TARGETS
@rule
async def example_synthetic_targets(
request: SyntheticExampleTargetsRequest,
) -> SyntheticAddressMaps:
assert request.path == SyntheticTargetsRequest.SINGLE_REQUEST_FOR_ALL_TARGETS
targets = [
(
"src/test/BUILD.test",
(
TargetAdaptor(
"target",
"generic1",
description="Target 1",
),
TargetAdaptor(
"target",
"generic2",
description="Target 2",
tags=["synthetic", "tags"],
),
),
),
(
"src/synthetic/BUILD.synthetic",
(TargetAdaptor("target", "generic-synth", description="Additional target"),),
),
]
return SyntheticAddressMaps.for_targets_request(request, targets)
class SyntheticExampleTargetsPerDirectorySpecPathsRequest:
pass
@dataclass(frozen=True)
class SyntheticExampleTargetsPerDirectoryRequest(SyntheticTargetsRequest):
path: str = SyntheticTargetsRequest.REQUEST_TARGETS_PER_DIRECTORY
spec_paths_request = SyntheticExampleTargetsPerDirectorySpecPathsRequest
example_synthetic_targets_per_directory_counts: DefaultDict[str, int] = defaultdict(int)
example_synthetic_targets_per_directory_targets = {
"src/test": [
(
"BUILD.dir-a",
(
TargetAdaptor(
"target",
"generic3",
description="Target 3",
),
),
),
(
"BUILD.dir-b",
(
TargetAdaptor(
"target",
"generic4",
description="Target 4",
tags=["synthetic", "tags"],
),
),
),
],
"src/issues/17343": [
(
"BUILD.issue",
(
TargetAdaptor(
"_lockfiles",
"python-default",
sources=["lockfile"],
),
),
),
],
"src/bare/tree": [
(
"BUILD.synthetic-targets",
(TargetAdaptor("target", "bare-tree"),),
),
],
}
@rule
def example_synthetic_targets_per_directory_spec_paths(
request: SyntheticExampleTargetsPerDirectorySpecPathsRequest,
) -> SyntheticTargetsSpecPaths:
# Return all paths we have targets for.
return SyntheticTargetsSpecPaths.from_paths(example_synthetic_targets_per_directory_targets)
@rule
async def example_synthetic_targets_per_directory(
request: SyntheticExampleTargetsPerDirectoryRequest,
) -> SyntheticAddressMaps:
assert request.path != SyntheticTargetsRequest.SINGLE_REQUEST_FOR_ALL_TARGETS
example_synthetic_targets_per_directory_counts[request.path] += 1
targets = example_synthetic_targets_per_directory_targets.get(request.path, ())
return SyntheticAddressMaps.for_targets_request(request, targets)
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*rules(),
*python_requirements.rules(),
*SyntheticExampleTargetsRequest.rules(),
*SyntheticExampleTargetsPerDirectoryRequest.rules(),
example_synthetic_targets,
example_synthetic_targets_per_directory,
example_synthetic_targets_per_directory_spec_paths,
QueryRule(Addresses, [RawSpecsWithoutFileOwners]),
QueryRule(Addresses, (DependenciesRequest, EnvironmentName)),
],
target_types=[
FileTarget,
GenericTarget,
LockfilesGeneratorTarget,
PythonRequirementsTargetGenerator,
],
)
return rule_runner
def assert_target(
rule_runner: RuleRunner,
name_or_address: str | Address,
alias: str = "target",
description: str | None = None,
tags: tuple | None = None,
) -> Target:
if isinstance(name_or_address, str):
address = Address("src/test", target_name=name_or_address)
elif isinstance(name_or_address, Address):
address = name_or_address
tgt = rule_runner.request(
WrappedTarget,
[WrappedTargetRequest(address, "synth test")],
).target
assert tgt.alias == alias
assert tgt.address.target_name == address.target_name
assert tgt[DescriptionField].value == description
assert tgt[Tags].value == tags
return tgt
def test_register_synthetic_targets(rule_runner: RuleRunner) -> None:
example_synthetic_targets_per_directory_counts.clear()
assert_target(rule_runner, "generic1", description="Target 1")
assert_target(rule_runner, "generic2", description="Target 2", tags=("synthetic", "tags"))
assert_target(rule_runner, "generic3", description="Target 3")
assert_target(rule_runner, "generic4", description="Target 4", tags=("synthetic", "tags"))
assert example_synthetic_targets_per_directory_counts == {".": 1, "src": 1, "src/test": 1}
def test_override_synthetic_targets(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/test/BUILD": """target(name="generic2", description="from BUILD file")""",
}
)
assert_target(rule_runner, "generic2", description="from BUILD file")
def test_extend_synthetic_targets(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/test/BUILD": """target(name="generic2", description="from BUILD file", _extend_synthetic=True)""",
}
)
assert_target(
rule_runner, "generic2", description="from BUILD file", tags=("synthetic", "tags")
)
def test_synthetic_targets_with_defaults(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/test/BUILD": """__defaults__(dict(target=dict(tags=["default", "4real"])))""",
}
)
assert_target(rule_runner, "generic1", description="Target 1", tags=("default", "4real"))
def test_override_synthetic_targets_wrong_type(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/test/BUILD": """file(name="generic1", source="BUILD", _extend_synthetic=True)""",
}
)
err = softwrap(
"""
The `file` target 'generic1' in src/test/BUILD is of a different type than the synthetic
target `target` from src/test/BUILD.test.
When `_extend_synthetic` is true the target types must match, set this to false if you want
to replace the synthetic target with the target from your BUILD file.
"""
)
with engine_error(InvalidTargetException, contains=err):
rule_runner.request(
WrappedTarget,
[WrappedTargetRequest(Address("src/test", target_name="generic1"), "synth test")],
)
def test_extend_missing_synthetic_target(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/test/BUILD": """target(name="another", _extend_synthetic=True)""",
}
)
err = softwrap(
"""
The `target` target 'another' in src/test/BUILD has `_extend_synthetic=True` but there is no
synthetic target to extend.
"""
)
with engine_error(InvalidTargetException, contains=err.strip()):
rule_runner.request(
WrappedTarget,
[WrappedTargetRequest(Address("src/test", target_name="another"), "synth test")],
)
def test_additional_spec_path(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
# We need this to avoid:
# Exception: Unmatched glob from tests: "src/**"
"src/BUILD": "",
}
)
addresses = resolve_raw_specs_without_file_owners(rule_runner, [RecursiveGlobSpec("src")])
assert Address("src/synthetic", target_name="generic-synth") in addresses
assert Address("src/bare/tree", target_name="bare-tree") in addresses
def test_target_name_collision_issue_17343(rule_runner: RuleRunner) -> None:
# The issue was that the synthesized _lockfiles target is replaced by the python_requirements
# target with the same name so the injected dependency pointed to itself rather than the
# lockfile.
rule_runner.set_options(
[
"--python-enable-resolves",
"--python-resolves={'python-default': 'src/issues/17343/lockfile'}",
],
)
rule_runner.write_files(
{
"src/issues/17343/BUILD": softwrap(
"""
python_requirements(
name="_python-default_lockfile",
overrides={
"humbug": {
"dependencies": ["_python-default_lockfile#setuptools"],
},
},
)
"""
),
"src/issues/17343/lockfile": "lockfile content",
"src/issues/17343/requirements.txt": dedent(
"""\
humbug
setuptools
"""
),
}
)
tgt = assert_target(
rule_runner,
Address(
"src/issues/17343", target_name="_python-default_lockfile", generated_name="setuptools"
),
alias="python_requirement",
)
# This should just work, as the `python_requirements` has the same target name as the synthetic
# _lockfiles target, the synthetic target will be replaced. The fix for #17343 is that there
# shouldn't be a dependency added to the python_requirements target on the _lockfile as it won't
# exist.
addresses = rule_runner.request(Addresses, [DependenciesRequest(tgt[Dependencies])])
assert addresses
|
"""Populate an item's `added` and `mtime` fields by using the file
modification time (mtime) of the item's source file before import.
Reimported albums and items are skipped.
"""
import os
from beets import util
from beets import importer
from beets.plugins import BeetsPlugin
class ImportAddedPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'preserve_mtimes': False,
'preserve_write_mtimes': False,
})
# item.id for new items that were reimported
self.reimported_item_ids = None
# album.path for old albums that were replaced by a reimported album
self.replaced_album_paths = None
# item path in the library to the mtime of the source file
self.item_mtime = {}
register = self.register_listener
register('import_task_created', self.check_config)
register('import_task_created', self.record_if_inplace)
register('import_task_files', self.record_reimported)
register('before_item_moved', self.record_import_mtime)
register('item_copied', self.record_import_mtime)
register('item_linked', self.record_import_mtime)
register('item_hardlinked', self.record_import_mtime)
register('item_reflinked', self.record_import_mtime)
register('album_imported', self.update_album_times)
register('item_imported', self.update_item_times)
register('after_write', self.update_after_write_time)
def check_config(self, task, session):
self.config['preserve_mtimes'].get(bool)
def reimported_item(self, item):
return item.id in self.reimported_item_ids
def reimported_album(self, album):
return album.path in self.replaced_album_paths
def record_if_inplace(self, task, session):
if not (session.config['copy'] or session.config['move'] or
session.config['link'] or session.config['hardlink'] or
session.config['reflink']):
self._log.debug("In place import detected, recording mtimes from "
"source paths")
items = [task.item] \
if isinstance(task, importer.SingletonImportTask) \
else task.items
for item in items:
self.record_import_mtime(item, item.path, item.path)
def record_reimported(self, task, session):
self.reimported_item_ids = {item.id for item, replaced_items
in task.replaced_items.items()
if replaced_items}
self.replaced_album_paths = set(task.replaced_albums.keys())
def write_file_mtime(self, path, mtime):
"""Write the given mtime to the destination path.
"""
stat = os.stat(util.syspath(path))
os.utime(util.syspath(path), (stat.st_atime, mtime))
def write_item_mtime(self, item, mtime):
"""Write the given mtime to an item's `mtime` field and to the mtime
of the item's file.
"""
# The file's mtime on disk must be in sync with the item's mtime
self.write_file_mtime(util.syspath(item.path), mtime)
item.mtime = mtime
def record_import_mtime(self, item, source, destination):
"""Record the file mtime of an item's path before its import.
"""
mtime = os.stat(util.syspath(source)).st_mtime
self.item_mtime[destination] = mtime
self._log.debug("Recorded mtime {0} for item '{1}' imported from "
"'{2}'", mtime, util.displayable_path(destination),
util.displayable_path(source))
def update_album_times(self, lib, album):
if self.reimported_album(album):
self._log.debug("Album '{0}' is reimported, skipping import of "
"added dates for the album and its items.",
util.displayable_path(album.path))
return
album_mtimes = []
for item in album.items():
mtime = self.item_mtime.pop(item.path, None)
if mtime:
album_mtimes.append(mtime)
if self.config['preserve_mtimes'].get(bool):
self.write_item_mtime(item, mtime)
item.store()
album.added = min(album_mtimes)
self._log.debug("Import of album '{0}', selected album.added={1} "
"from item file mtimes.", album.album, album.added)
album.store()
def update_item_times(self, lib, item):
if self.reimported_item(item):
self._log.debug("Item '{0}' is reimported, skipping import of "
"added date.", util.displayable_path(item.path))
return
mtime = self.item_mtime.pop(item.path, None)
if mtime:
item.added = mtime
if self.config['preserve_mtimes'].get(bool):
self.write_item_mtime(item, mtime)
self._log.debug("Import of item '{0}', selected item.added={1}",
util.displayable_path(item.path), item.added)
item.store()
def update_after_write_time(self, item, path):
"""Update the mtime of the item's file with the item.added value
after each write of the item if `preserve_write_mtimes` is enabled.
"""
if item.added:
if self.config['preserve_write_mtimes'].get(bool):
self.write_item_mtime(item, item.added)
self._log.debug("Write of item '{0}', selected item.added={1}",
util.displayable_path(item.path), item.added)
|
from character import *
class Boss(Main_person):
def __init__(self,ycoo,xcoo,life):
Main_person.__init__(self,ycoo,xcoo,life)
self.boss=[]
self.temp=0
self.balls=[]
def create_boss(self,grid):
with open("./dragon") as obj:
for line in obj:
self.boss.append(line.strip('\n'))
for i in range(10):
for j in range(50):
grid[self.ycoo+i][self.xcoo+j]=self.boss[i][j]
def move_boss(self,grid,player_ycoo,player_xcoo):
for i in range(10):
for j in range(50):
grid[self.ycoo+i][self.xcoo+j]=" "
# if (self.temp%2)==0:
# self.ycoo = self.ycoo+1
# else:
# self.ycoo = self.ycoo-1
# if(self.ycoo == 17):
# self.temp=self.temp+1
# elif(self.ycoo == 2):
# self.temp=self.temp-1
# for i in range(10):
# for j in range(50):
# grid[self.ycoo+i][self.xcoo+j]=self.boss[i][j]
if(self.temp%2==0):
if(self.ycoo == 17):
self.ycoo=self.ycoo-1
elif(self.ycoo == 2):
self.ycoo=self.ycoo+1
if(player_ycoo<self.ycoo):
self.ycoo=self.ycoo-1
else:
self.ycoo=self.ycoo+1
self.temp=self.temp+1
for i in range(10):
for j in range(50):
grid[self.ycoo+i][self.xcoo+j]=self.boss[i][j]
def boss_balls(self,grid):
grid[self.ycoo+7][self.xcoo-3] = 0
self.balls.append((self.ycoo+7,self.xcoo-3))
def balls_propagate(self,grid):
for x,y in self.balls:
grid[x][y]=" "
if(y==0):
self.balls.pop(self.balls.index((x,y)))
self.balls=[(x,y-1) for x,y in self.balls]
for x,y in self.balls:
if(grid[x][y]==" "):
grid[x][y]=0
|
#!/usr/bin/python2.7
import bs4
import unicodecsv as csv
import re
import time
from multiprocessing.dummy import Pool as ThreadPool
import urllib2
import MySQLdb
import sys
def processCroRealEstateDetailPage(text):
# parse page for information
bs = bs4.BeautifulSoup(text, 'lxml')
# cost
try:
cost = bs.find('td', string='Cijena:').find_next_sibling('td')
cost = cost.find(string=re.compile(u'([,0-9]* \u20ac)'))
cost = cost.replace(',', '').split(' ')[0]
except:
cost = 'NULL'
# place
try:
place = bs.find(
'td', string='Mjesto:').find_next_sibling('td').string
place = '"' + place + '"'
except:
place = 'NULL'
# area
try:
area = bs.find(
'td', string=re.compile('Povr.ina:')
).find_next_sibling('td').string.split(' ')[0]
except:
area = 'NULL'
# number of bedrooms
try:
bedroom = bs.find(
'td', string='Broj soba:').find_next_sibling('td').string
except:
bedroom = 'NULL'
# parking lot
try:
parking = bs.find(
'td', string=re.compile('Parkirali.te:')
).find_next_sibling('td').string.split(' ')[0]
except:
parking = 'NULL'
# garden
try:
garden = bs.find(
'td', string=re.compile('Oku.nica:')
).find_next_sibling('td').string.split(' ')[0]
except:
garden = "NULL"
# number of bath tubs
try:
bath = bs.find(
'td', string='Broj kupaona:'
).find_next_sibling('td').string
except:
bath = "NULL"
# garage
try:
garage = bs.find(
'td', string=re.compile('Gara.a:')
).find_next_sibling('td').string
garage = '+' in garage
except:
garage = 'NULL'
data = [cost, place, area, bedroom, parking, bath, garage]
return data
def processCroRealEstate(page_url, table):
weblink_base = "http://www.realestatecroatia.com/hrv/"
# get number of links
bs = bs4.BeautifulSoup(urllib2.urlopen(page_url + '1').read(), 'lxml')
last_page = int(bs.find('a', string='>|')['href'].split('=')[-1])
pages = map(lambda i: page_url + str(i), range(1, last_page + 1))
# get detail urls from pages
def get_link_data(page):
while True:
try:
bs = bs4.BeautifulSoup(urllib2.urlopen(page).read(), 'lxml')
return [weblink_base + a['href'] for a in bs.find_all('a', string='Vidi detalje')]
except Exception as e:
print e
links = []
pool = ThreadPool(5)
links = pool.map(get_link_data, pages)
pool.close()
pool.join()
links = reduce(lambda a, b: a + b, links)
# database configuration
def reconnect_database(db):
db.ping(True)
db.set_character_set('utf8')
cursor = db.cursor()
cursor.execute('SET NAMES utf8;')
cursor.execute('SET CHARACTER SET utf8;')
cursor.execute('SET character_set_connection=utf8;')
return cursor
# get detail data and parse it
def parse_detail(link):
while True:
try:
result = processCroRealEstateDetailPage(
urllib2.urlopen(link).read())
sql = (result[0], result[1], result[2], result[3], result[4], result[5], result[6])
return sql
except Exception as e:
print e
pool = ThreadPool(5)
results = pool.map(parse_detail, links)
pool.close()
pool.join()
# database configuration
query = "INSERT INTO " + table +\
" (Country, Cost, Place, Area, Bedrooms, Parking, Bathrooms, Garage) " +\
"VALUES ( 0, %s, %s, %s, %s, %s, %s, %s);"
db = MySQLdb.connect("localhost", "nemo", "nemo_root", "nemodb")
try:
cursor = reconnect_database(db)
cursor.executemany(query, results)
db.commit()
except Exception as e:
print e
db.close()
def getCroRealEstate():
house_prices_url = 'http://www.realestatecroatia.com/hrv/' +\
'list.asp?vrsta=1&mjesto_o=&akcija=1&submit=+&page='
house_rents_url = 'http://www.realestatecroatia.com/hrv/' +\
'list.asp?vrsta=1&mjesto_o=&akcija=2&submit=+&page='
condo_prices_url = 'http://www.realestatecroatia.com/hrv/' +\
'list.asp?vrsta=2&mjesto_o=&akcija=1&submit=+&page='
condo_rents_url = 'http://www.realestatecroatia.com/hrv/' +\
'list.asp?vrsta=2&mjesto_o=&akcija=2&submit=+&page='
# get house prices
processCroRealEstate(house_prices_url, 'HomeOffers')
# get house rents
processCroRealEstate(house_rents_url, 'HomeRents')
# get condo prices
processCroRealEstate(condo_prices_url, 'CondoOffers')
# get condo rents
processCroRealEstate(condo_rents_url, 'CondoRents')
if __name__ == "__main__":
# find news sites which are ranked from best to worst
# get information from websites
# process information from websites into text form
# assign geographical information to text
# classify text good/bad
# gather informatio about house selling deals
getCroRealEstate()
# gather information about rents
# compile information into a database
|
from cinemana import Cinemana
cinem = Cinemana("Man in the high castle")
test = ""
for i in cinem.links:
test += i['url'] + '\n'
print(test) |
class A(object):
def __init__(self):
self.intro = "I am Suraj"
def func1(self):
return self.intro + "...and I like to code"
class B(A):
def __init__(self):
self.newintro = A.__init__()
def func1(self, input):
if input == 7:
return self.intro + "...and I HATTTTE to code"
else:
return super(B, self).newintro
print self.newintro
test = A()
print test.intro |
from flask import Flask, Response, request
from difflib import SequenceMatcher
import getopt, sys
app = Flask(__name__)
@app.route('/')
def index():
data = "Page Requested Unavailable at the moment"
return Response(data, status=404)
def check_values_is_unique(value):
value_set = list()
return not any(each_value in value_set or value_set.append(each_value) for each_value in value)
def lcs_algorithm(value_list):
common_strings = []
for each_x in value_list:
for each_y in value_list:
if each_x != each_y:
match = SequenceMatcher(None, each_x, each_y).find_longest_match(0, len(each_x), 0, len(each_y))
if each_x[match.a:match.a + match.size] not in common_strings and each_x[
match.a:match.a + match.size] != '':
common_strings.append(each_x[match.a:match.a + match.size])
else:
continue
return common_strings
def extract_value_list(value_data):
return [each["value"] for each in value_data]
def make_value_list(common_string_data):
return [{"value": each} for each in common_string_data]
@app.route('/lcs', methods=['GET', 'POST'])
def longest_common_string():
if request.method == 'GET':
info = 'Requested Method is not accepted'
return Response(info, status=406)
elif request.method == 'POST':
if request.is_json:
string_set = dict(request.get_json())
if "setOfStrings" in string_set.keys():
if string_set["setOfStrings"] is None or list(string_set["setOfStrings"]).__eq__(list()):
data = 'setOfStrings in POST data should not be empty'
return Response(data, status=409)
else:
if check_values_is_unique(string_set["setOfStrings"]):
common_strings = lcs_algorithm(extract_value_list(string_set["setOfStrings"]))
data = {"lcs": make_value_list(common_strings)}
return Response(str(data), status=200, mimetype="application/json")
else:
data = "setOfStrings in POST data must be a Set (Unique)"
return Response(data, status=409)
else:
data = "JSON Data must contains setOfStrings as its key"
return Response(data, status=409)
else:
data = "POST data must be in JSON Format"
return Response(data, status=404)
else:
info = {'information': 'Requested {} Method is not allowed'.format(request.method)}
return Response(str(info), status=404, mimetype='application/json')
if __name__ == '__main__':
app.run()
|
import gym
import gym_duckietown
from stable_baselines.common.policies import CnnPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import PPO2
import time
env = gym.make('Duckietown-straight_road-v0')
from numpy import random
import numpy as np
import matplotlib.pyplot as plt
def rgb2gray(rgb):
return np.dot(rgb[..., :3], [0.299, 0.587, 0.114])
# while True:
# obs = env.reset()
# env.render()
# time.sleep(2)
obs = env.reset()
grayImg = rgb2gray(obs)
imgs = np.array(grayImg)[np.newaxis, :]
# print(type(imgs), imgs.shape)
# imgs = np.append(imgs, grayImg, axis=0)
# print(type(imgs), imgs.shape)
# a = np.array([[1, 2, 3], [4, 5, 6]])
# print(np.append(a, a, axis=0).shape)
# print(a.shape)
steps = 0
while steps < 5000 - 1:
# action = (random.normal(loc=0.8, scale=, size=1), random.uniform(-1, 1, size=1))
# obs, _, done, ___ = env.step(action)
# if done:
# obs = env.reset()
obs = env.reset()
grayImg = rgb2gray(obs)
_grayImg = np.array(grayImg)[np.newaxis, :]
imgs = np.append(imgs, _grayImg, axis=0)
steps = steps + 1
# print(obs.shape)
# grayImg = rgb2gray(obs)
# fig = plt.figure()
# ax = fig.add_subplot(221)
# ax.imshow(grayImg, cmap=plt.cm.gray)
# ax = fig.add_subplot(222)
# ax.imshow(obs)
# ax = fig.add_subplot(223)
# ax.imshow(grayImg)
# ax = fig.add_subplot(224)
# ax.imshow(obs, cmap=plt.cm.gray)
# plt.show()
print(imgs.shape)
np.savez('duckie_img.npz', imgs)
|
import unittest
import author_functions as af
class TestAverageWordLength(unittest.TestCase):
failure_message = '''
When we called {}, we expected this:
{}
but your code returned this:
{}'''
def approx(self, v1, v2):
""" (float, float) -> bool
Return True iff v1 and v2 are approximately equal.
"""
#error_margin = 0.0001
error_margin = 0.0
return v1 - error_margin <= v2 <= v1 + error_margin
def prototype_test(self, text, expected):
call = 'author_functions.avg_word_length({})'.format(text)
returned = af.avg_word_length(text)
msg = TestAverageWordLength.failure_message.format(call, expected,
returned)
#self.assertEqual(returned, expected, msg)
#Use next line if we want to allow some error margin
self.assertTrue(self.approx(returned, expected), msg)
def test_01_single_line_one_word(self):
text = ["simple\n"]
expected = 6.0
self.prototype_test(text, expected)
def test_02_two_lines_two_words(self):
text = ["two\n","lines\n"]
expected = 4.0
self.prototype_test(text, expected)
def test_03_multiple_words_single_line(self):
text = ["multiple words single line\n"]
expected = 5.75
self.prototype_test(text, expected)
def test_04_single_line_one_word_with_punctuation(self):
text = [",,,simple:\n"]
expected = 6.0
self.prototype_test(text, expected)
def test_05_two_lines_two_words_with_punctuation(self):
text = ["two,\n", "lines!\n"]
expected = 4.0
self.prototype_test(text, expected)
def test_06_multiple_words_single_line_with_punctuation(self):
text = ["multiple \"words\" single line?\n"]
expected = 5.75
self.prototype_test(text, expected)
def test_07_single_line_multiple_words_internal_punctuation(self):
text = ["multi-word lines, gotta check'em!\n"]
expected = 7.0
self.prototype_test(text, expected)
def test_08_single_line_all_punctuation_words(self):
text = ["Not even a word !!?!\n"]
expected = 3.0
self.prototype_test(text, expected)
if __name__ == '__main__':
unittest.main()
|
import sys
import math
args = sys.argv
if len(args) > 1:
filename = args[1]
else:
print("specify GBR data file'")
exit()
f = open(filename, 'r')
lines = f.readlines()
state = 0
areaR = {}
n = 0
for line in lines:
#print(n,state, line)
#n = n + 1
if state == 0 and line.startswith("%AD"):
lineR = line.replace('%AD', '').replace("R","").replace("C","").replace("X"," ").replace(","," ").replace("*", " ")
pos = lineR.split(" ")
if line.find('C') == -1:
areaR[pos[0]] = [float(pos[1]), float(pos[2])]
else:
d = float(pos[1])
r = d / 2
A = math.pi * r * r
a = math.sqrt(A)
areaR[pos[0]] = [a, a]
elif state == 0 and line.startswith("D"):
line = line.replace('*', '').replace("\n", "")
ap = line.replace("\r", "")
elif line.startswith("X"):
# line = line.replace('X', ' ').replace("Y"," ").replace("D"," ").replace("*", " ")
# note: assuming Rectangular aperture ('R')
line = line.replace('X', ' ').replace("Y"," ").replace("D"," ").replace("*", " ").replace("I", " ").replace("J", " ")
pos = line.split(" ")
posx = float(pos[1]) / 1000000.0
posy = float(pos[2]) / 1000000.0
op = pos[3] #D01=draw, D03=flash
xsize = areaR[ap][0]
ysize = areaR[ap][1]
if op == "03": #D03=flash
print("{0:.3f},{1:.3f},{2:.3f},{3:.3f},F".format(posx, posy, xsize, ysize))
elif state == 1 and op == "01":
x, y = int(pos[1]) / 1000000.0, int(pos[2]) / 1000000.0
xs, ys, np = xs + x, ys + y, np + 1
if x < xmin:
xmin = x
if x > xmax:
xmax = x
if y < ymin:
ymin = y
if y > ymax:
ymax = y
if state == 0 and line.startswith("G36"):
# start of group
state = 1
xs, ys, np = 0, 0, 0
xmin, ymin = 1e9, 1e9
xmax, ymax = -1e9, -1e9
if state == 1 and line.startswith("G37"):
# end of group
if np > 0:
posx, posy = xs / np, ys / np
xsize, ysize = xmax - xmin, ymax - ymin
print("{0:.3f},{1:.3f},{2:.3f},{3:.3f},D".format(posx, posy, xsize, ysize))
state = 0
|
###############################################################
# Class: wowhead.py
# Description: scrapp wowhead.com
###############################################################
# system imports
import re
import requests
from lxml import html
import re
import array
import logging
class WowheadScrapper:
BASE_URL = "http://legion.wowhead.com/"
RECIPE_LIST_URL = "/items/recipes/{0}?filter=166;{1};0"
ITEM_DETAIL_URL = "/item={0}"
def __init__(self):
self.log = logging.getLogger(self.__class__.__name__)
self.log.info("WowheadScrapper init")
self.recipesExpr = re.compile(ur'\_\[(\d+)\]')
self.itemExpr = re.compile(ur'^\/item\=(\d+)')
def getPage(self, relative):
for page_try in range(5):
content = ""
self.last_content = ""
try:
self.log.info("Gettig page: %s, try %d", relative,
page_try + 1)
url = self.BASE_URL + relative
self.last_url = url
http_request = requests.get(url, headers={
'User-Agent': 'Mozilla/5.0'})
self.log.info("Generate HTML tree")
content = http_request.content
self.last_content = content
tree = html.fromstring(content)
return tree
except Exception as ex:
self.log.error("Exception in get page %s", str(ex))
self.log.info(content)
raise Exception("unable to get the page")
def getRecipes(self, type, expansion):
try:
self.log.info("Getting recipes type: %s, %d", type, expansion)
list_url = self.RECIPE_LIST_URL.format(type, expansion)
tree = self.getPage(list_url)
data_node = tree.xpath(
'//div[@class="listview"]/following-sibling::script')[0].text
recipes = re.findall(self.recipesExpr, data_node)
return recipes
except Exception as ex:
self.log.error("Exception getting recipes %s", str(ex))
def getRecipeDetail(self, recipe):
try:
self.log.info("Getting recipe detail: %s", recipe)
item_url = self.ITEM_DETAIL_URL.format(recipe)
tree = self.getPage(item_url)
containers = tree.xpath(
'//noscript/table/tr/td/span/a')
for container in containers:
link = container.get("href")
item = re.search(self.itemExpr, link)
if not(item is None):
item = item.groups()[0]
return item
return None
except Exception as ex:
self.log.error("Exception getting recipes %s", str(ex))
|
print(10 / 3) #3.3333333333333335, 정수와 정수를 나눠도 실수로 나옴
print(10 // 3) #3 -> 몫
print(10 % 3) #1 -> 나머지
print(10 ** 3) #1000 -> 거듭제곱
a = "1"
b = "1"
c = 1
d = str(1) # "1"
print("a == b", a == b)
print("a is b", a is b) #같은 메모리를 참조하니 참이 나온다
print("a == c", a == c) #a == c False
print("a is c", a is c) #a is c False
print("a == d", a == d) #a == d True -> 같은 값이니 참이 나옴
print("a is d", a is d) #a is d False -> 같은 객체인지 물어 봄
#메모리 주소 찾기
print(id(a)) #29722560
print(id(b)) #29722560
print(id(d)) #57645632
#객체 비교 연산자(is, is not)
e = False
f = bool(0)
print("e == f", e == f) #e == f True
print("e is f", e is f) #e is f True
#true, false, none은 하나의 객체만 참조한다
# ==보다는 is를 써서 비교하는 것이 관례
g = None
h = None
print("g == h", g == h) #g == h True
print("g is h", g is h) #g is h True
# None의 경우 is를 써서 비교하기
#멤버십 연산자(in, not in)
#대소문자를 구분한다
#객체로는 반복 가능한 객체가 온다, str 또한 반복 객체다
str1 = "Python is Fun!"
print("P" in str1) #True
print("Python" in str1) #True
print("Java" not in str1) #True
#iterable
print(1 in [1, 2, 3]) #True
|
from django.db import models
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
CustomUser = settings.AUTH_USER_MODEL
class School(models.Model):
name = models.CharField(unique=True,blank=False,max_length=80)
def __str__(self):
return self.name
class Profile(models.Model):
user = models.OneToOneField(CustomUser, on_delete=models.CASCADE, related_name='profile')
school = models.ForeignKey(School, on_delete=models.SET_NULL, related_name='students', null=True)
passing_date = models.DateField(null=True)
def __str__(self):
if self.user.is_superuser:
return f'Superuser ({self.user.name}) profile'
return f'{self.user.name} Profile'
@receiver(post_save, sender=CustomUser)
def update_profile_signal(sender, instance, created, **kwargs):
if created:
user_profile = Profile.objects.create(user=instance)
instance.profile.save()
|
##emails pupper pictures
"""
sends out an email on a regular basis of submissions grabbed off reddit from the pupperscraper
"""
import os, sys, inspect
import smtplib, time, email, io
from pupperScraper import pupperScraper
from datetime import datetime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
scraper = pupperScraper()
## gets emails from emails.txt
def get_email(in_file):
emails = []
with open (in_file, 'r', encoding = 'utf-8') as email_file:
for email in email_file:
emails.append(email)
return emails
## creates the message to be sent
def message_generator():
msg = MIMEMultipart()
msg['Subject'] = 'Doggo of The Day, '+datetime.today().strftime('%Y-%m-%d')
msg['From'] = 'Pupper Bot'
pupper_tuple_list = scraper.pupperList
##attaches header to message
with open ('header.txt','r',encoding = 'utf-8') as header_file:
for line in header_file:
msg.attach(MIMEText(line, 'plain'))
for entry in pupper_tuple_list:
msg.attach(MIMEText('\n'))
msg.attach(MIMEText(entry[1],'plain'))
msg.attach(MIMEText('<html><body><p><img src="'+entry[2]+'" width ="400"></p></body></html>','html','utf-8'))
with open ('footer.txt','r',encoding = 'utf-8') as footer_file:
for line in footer_file:
msg.attach(MIMEText(line,'plain'))
return msg
## main function which handshakes with email server and sends off message
def main():
with open('secrets.txt', 'r') as secrets_file:
secret = secrets_file.read()
emails = get_email('emails.txt')
gmail_user = 'pupperemailer@gmail.com'
message = message_generator()
try:
email_server_ssl = smtplib.SMTP_SSL(host ='smtp.gmail.com',port = 465)
email_server_ssl.ehlo()
email_server_ssl.login(gmail_user,secret)
for email in emails:
email_server_ssl.send_message(message,gmail_user,email)
print ('emails sent')
except:
print('no email handshake...')
main()
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from typing import Any
from pants.bsp.spec.base import BuildTargetIdentifier, Uri
# -----------------------------------------------------------------------------------------------
# Resources Request
# See https://build-server-protocol.github.io/docs/specification.html#resources-request
# -----------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class ResourcesParams:
targets: tuple[BuildTargetIdentifier, ...]
@classmethod
def from_json_dict(cls, d: dict[str, Any]) -> Any:
return cls(
targets=tuple(BuildTargetIdentifier.from_json_dict(x) for x in d["targets"]),
)
def to_json_dict(self) -> dict[str, Any]:
return {"targets": [tgt.to_json_dict() for tgt in self.targets]}
@dataclass(frozen=True)
class ResourcesItem:
target: BuildTargetIdentifier
# List of resource files.
resources: tuple[Uri, ...]
def to_json_dict(self):
result = {
"target": self.target.to_json_dict(),
"resources": self.resources,
}
return result
@dataclass(frozen=True)
class ResourcesResult:
items: tuple[ResourcesItem, ...]
def to_json_dict(self) -> dict[str, Any]:
return {
"items": [ri.to_json_dict() for ri in self.items],
}
|
import numpy as np
import matplotlib.pyplot as plt
import json
"""
" fonction qui affiche un gradient 2D de la densité nucléaire
" @param f un descripteur de fichier
"""
def plot(f):
config = json.loads(f.readline())
print(config)
plt.imshow(np.loadtxt(f, dtype=np.float64), interpolation='bilinear', cmap="Blues")
plt.title("Densité nucléaire (pour une tranche theta=0)");
plt.xlabel("z (de %d fm à %d fm)" % (config["z"]["min"], config["z"]["max"]))
plt.ylabel("x (de %d fm à %d fm)" % (config["x"]["min"], config["x"]["max"]))
plt.show()
# on lit sur l'entree standart
import sys
plot(sys.stdin)
|
from itertools import permutations
import intcode
def thrust_calc(verbose=False):
with open('input.txt') as my_file:
input_list = list(map(lambda x: int(x), my_file.readline().split(',')))
phase_orders = list(permutations(range(0,5)))
max_thruster = 0
for phase_order in phase_orders:
outputs = []
for x in phase_order:
inst = intcode.IntCode(input_list)
loop = 0
first_input = True
while inst.current_list[inst.current_op] != 99:
if inst.current_list[inst.current_op] == 3:
if first_input:
inst.input = x
first_input = False
else:
try:
inst.input = outputs[-1]
except IndexError:
inst.input = 0
if verbose:
print('step {}'.format(loop))
print('Starting list is \n{} \nwith operation index {}'.format(inst.current_list, inst.current_op))
inst.one_step()
if verbose:
print('Ending list is \n{} \nwith operation index {}'.format(inst.current_list, inst.current_op))
print('\n\n')
loop += 1
outputs.append(inst.output)
max_thruster = max(max_thruster, outputs[-1])
return max_thruster
if __name__ == '__main__':
max_thrust = thrust_calc(verbose=False)
print(max_thrust) |
import enum
from collections import namedtuple
from arguments import Arguments
class ReportCode(enum.Enum):
SUCCESS = 0
UNSUPPORTED = 1
FAILED = 2
ReportSeverityLevelTupe = namedtuple('SeverityLevel',
['code', 'short'])
class ReportSeverityLevel(enum.Enum):
INFO = ReportSeverityLevelTupe(0, 'I')
WARN = ReportSeverityLevelTupe(1, 'W')
ERROR = ReportSeverityLevelTupe(2, 'E')
@property
def code(self):
return self.value.code
@property
def short(self):
return self.value.short
ReportIncidentCodeTupe = namedtuple('IncidentCode',
['code',
'description'])
class ReportIncidentCode(enum.Enum):
BLACKLIST_FILENAME = ReportIncidentCodeTupe('B001', 'Blacklisted filename')
BLACKLIST_STRING = ReportIncidentCodeTupe('E001', 'Blacklisted string')
ENTROPY_STRING = ReportIncidentCodeTupe('E002', 'High entropy string')
@property
def code(self):
return self.value.code
@property
def description(self):
return self.value.description
class ReportIncident():
Code = ReportIncidentCode
def __init__(self, serverity, code, filename: str, offend: str, author: str):
self.serverity = serverity
self.code = code
self.filename = filename
self.offend = offend
self.author = author
def dump(self, verbose=True):
if not verbose and (self.serverity == ReportSeverityLevel.INFO or self.serverity == ReportSeverityLevel.WARN):
return
print("- [{serverity}:{code}] {message}\n"
" File : {file}\n"
" Author: {author}".format(
serverity=self.serverity.short,
code=self.code.code,
file=self.filename,
offend=self.offend,
author=self.author,
message=self.code.description))
if self.serverity == Report.SeverityLevel.ERROR and self.offend:
print(" * Offend: >>>{}<<<".format(self.offend))
class Report:
Code = ReportCode
SeverityLevel = ReportSeverityLevel
Incident = ReportIncident
def __init__(self, code=ReportCode.FAILED):
self.code = code
self.incidents = []
def dump(self, verbose=True):
print("[ ] Scan result: {}".format(self.code.name))
reordered = sorted(self.incidents, key=lambda x: x.serverity.code, reverse=True)
for incident in reordered:
incident.dump(verbose)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# S3 access utilities
#
# This file uses the boto3 client and provides simple functions to the Impala test suite
# to access Amazon S3.
import boto3
from tests.util.filesystem_base import BaseFilesystem
class S3Client(BaseFilesystem):
@classmethod
def __init__(self, bucket):
self.bucketname = bucket
self.s3 = boto3.resource('s3')
self.bucket = self.s3.Bucket(self.bucketname)
self.s3client = boto3.client('s3')
def create_file(self, path, file_data, overwrite=True):
if not overwrite and self.exists(path): return False
self.s3client.put_object(Bucket=self.bucketname, Key=path, Body=file_data)
return True
def make_dir(self, path, permission=None):
# This function is a no-op. S3 is a key-value store and does not have a directory
# structure. We can use a non existant path as though it already exists.
pass
def copy(self, src, dst):
self.s3client.copy_object(Bucket=self.bucketname,
CopySource={'Bucket':self.bucketname, 'Key':src}, Key=dst)
assert self.exists(dst), \
'S3 copy failed: Destination file {dst} does not exist'.format(dst=dst)
# Since S3 is a key-value store, it does not have a command like 'ls' for a directory
# structured filesystem. It lists everything under a path recursively.
# We have to manipulate its response to get an 'ls' like output.
def ls(self, path):
if not path.endswith('/'):
path += '/'
# Use '/' as a delimiter so that we don't get all keys under a path recursively.
response = self.s3client.list_objects(
Bucket=self.bucketname, Prefix=path, Delimiter='/')
dirs = []
# Non-keys or "directories" will be listed as 'Prefix' under 'CommonPrefixes'.
if 'CommonPrefixes' in response:
dirs = [t['Prefix'] for t in response['CommonPrefixes']]
files = []
# Keys or "files" will be listed as 'Key' under 'Contents'.
if 'Contents' in response:
files = [t['Key'] for t in response['Contents']]
files_and_dirs = []
files_and_dirs.extend([d.split('/')[-2] for d in dirs])
for f in files:
key = f.split("/")[-1]
if not key == '':
files_and_dirs += [key]
return files_and_dirs
def get_all_file_sizes(self, path):
if not path.endswith('/'):
path += '/'
# Use '/' as a delimiter so that we don't get all keys under a path recursively.
response = self.s3client.list_objects(
Bucket=self.bucketname, Prefix=path, Delimiter='/')
if 'Contents' in response:
return [t['Size'] for t in response['Contents']]
return []
def exists(self, path):
response = self.s3client.list_objects(Bucket=self.bucketname,Prefix=path)
return response.get('Contents') is not None
# Helper function which lists keys in a path. Should not be used by the tests directly.
def _list_keys(self, path):
if not self.exists(path):
return False
response = self.s3client.list_objects(Bucket=self.bucketname, Prefix=path)
contents = response.get('Contents')
return [c['Key'] for c in contents]
def delete_file_dir(self, path, recursive=False):
if not self.exists(path):
return True
objects = [{'Key': k} for k in self._list_keys(path)] if recursive else path
self.s3client.delete_objects(Bucket=self.bucketname, Delete={'Objects':objects})
return True
|
def sacar(valor, notas_disponiveis = {100:0, 50:0, 20:0, 10:0}):
retorno = []
notas_disponiveis.each { qtd ->
}
for qtd in notas_disponiveis.items() :
print nota
if nota <= valor:
div = valor / nota
if (div <= qtd) :
retorno.extend([nota] * div)
valor = valor - (nota * div)
notas_disponiveis[nota] -= div
return retorno
|
# 怎么也是水题?
class Solution:
def maximumEvenSplit(self, finalSum: int) -> List[int]:
if finalSum % 2:
return []
res, tempsum = [], 0
i = 2
while tempsum <= finalSum:
tempsum += i
res.append(i)
i += 2
delE = tempsum - finalSum
if delE == 0:
return res
else:
res.remove(delE)
return res
|
# hard
# 优先队列
# 需要添加索引,以便查看最大值是否处于窗口内,否则就及时pop掉
# 时间复杂度较高 O(nlogn)
class Solution:
def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:
n = len(nums)
# 注意 Python 默认的优先队列是小根堆
q = [(-nums[i], i) for i in range(k)]
heapq.heapify(q)
ans = [-q[0][0]]
for i in range(k, n):
heapq.heappush(q, (-nums[i], i))
while q[0][1] <= i - k:
heapq.heappop(q)
ans.append(-q[0][0])
return ans
# 用deque维护单调队列 这种思想和数据结构要掌握!!!
class Solution:
def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:
n = len(nums)
q = deque()
for i in range(k):
while q and nums[i] >= nums[q[-1]]:
q.pop()
q.append(i)
ans = []
ans.append(nums[q[0]])
for i in range(k, n):
while q and nums[i] >= nums[q[-1]]:
q.pop()
q.append(i)
# 不在窗口内
while q[0] <= i-k:
q.popleft()
ans.append(nums[q[0]])
return ans
|
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
import time
import smtplib
#Swim Lane Reservation Scalper Bot
#Automates purchasing of City of Henderson pools' swim lane reservations upon release to public
######################################################################################################################
""""PREREQUISITES:
#Must install geckodriver for Selenium use with browser, https://selenium-python.readthedocs.io/installation.html"""
######################################################################################################################
################### Please fill in all variables marked with an 'X' ############################################
######################################################################################################################
browser = 'X' #Insert the browser you are using, 'Firefox' or 'Chrome'
######################################################################################################################
"""Information required from user
MUST FILL THESE OUT EVERY NEW RESERVATION"""
#Refer to SwimSlotDatabase.txt for a general listing of swim slots
#Check City of Henderson Rectrac for updates and most accurate listings:
#Multigen - https://web2.myvscloud.com/wbwsc/nvhendersonwt.wsc/search.html?display=listing&location=hmgc&module=AR&search=yes&type=reservelane
#Whitney - https://web2.myvscloud.com/wbwsc/nvhendersonwt.wsc/search.html?display=listing&location=wrip&module=AR&search=yes&type=reservelane
#Heritage - https://web2.myvscloud.com/wbwsc/nvhendersonwt.wsc/search.html?display=listing&location=hpac&module=AR&search=yes&type=reservelane
slotDescription = 'X' #Insert the description of the slot, includes pool name and time
#Can find master list of these in SwimSlotDatabase.txt
#Ex: HPAC 3:00-4:00pm
day = 'X' #Insert the initial for day of week for the slot
#Ex: Tu
familymember = 'X' #Insert legal first name of swimmer
######################################################################################################################
username = 'X' #Insert City of Henderson Rectrac username
password = 'X' #Insert City of Henderson Rectrac password
billingfirstname = 'X' #Insert first name
billinglastname = 'X' #Insert last name
billingphone = 'X' #Insert phone number, no dashes or parentheses
billingemail = 'X' #insert email
cardholder = 'X' #Insert name of cardholder that appears on card
creditcardnumber = 'X' #Insert credit card number
cardpaymentmethod = 'X' #choose between: VISA, Master Card, or Discover (Case sensitive, please follow as shown)
exp_month = 'X' #Insert expiration month of card as two digits
#Ex: 04
exp_year = 'X' #Insert expiration year of card as four digits
#Ex: 2024
cvv = 'X' #Insert CVV
billingaddress = 'X' #Insert billing address
billingzipcode = 'X' #Insert billing zip code
######################################################################################################################
"""If you would like to receive a text upon the confirmation of your swim slot, enter phone info here:"""
phonenumberSMS = 'X' #Insert phone number here, no dashes or parentheses
phonenumbercarrier = 'x' #choose from the following: att, tmobile, verizon, or sprint (Case sensitive, please follow as shown)
######################################################################################################################
#These are the two places where Firefox may be installed, please put these into binary variable if default doesn't work
#C:/Users/INSERT_USERNAME_HERE/AppData/Local/Mozilla Firefox/firefox.exe
#C:/Program Files/Mozilla Firefox/firefox.exe
#Sets selenium broswer to Firefox
if (browser == 'Firefox'):
binary = FirefoxBinary('C:/Program Files/Mozilla Firefox/firefox.exe')
driver = webdriver.Firefox(firefox_binary=binary)
#Sets selenium browser to Chrome
if (browser == 'Chrome'):
driver = webdriver.Chrome() #Insert correct path to Chrome exe if default doesn't work
######################################################################################################################
################################ DO NOT EDIT PAST THIS POINT ##################################################
################################ DO NOT EDIT PAST THIS POINT ##################################################
################################ DO NOT EDIT PAST THIS POINT ##################################################
################################ DO NOT EDIT PAST THIS POINT ##################################################
################################ DO NOT EDIT PAST THIS POINT ##################################################
######################################################################################################################
#Establishes system for sending messages, DO NOT EDIT
carriers = {
'att': '@mms.att.net',
'tmobile':' @tmomail.net',
'verizon': '@vtext.com',
'sprint': '@page.nextel.com'
}
#Texts user confirmation of reservation, with info of time, date, pool, and swimmer
def send(message):
# Replace number in main function
to_number = phonenumberSMS + '{}'.format(carriers[phonenumbercarrier])
auth = ('cohswimslotscalper@gmail.com', 'Sw1mSl0tScalp3r') #Enter in gmail login to send text messages through gmail servers, used due to no cost
# Establish a secure session with gmail's outgoing SMTP server using your gmail account
server = smtplib.SMTP( "smtp.gmail.com", 587 )
server.starttls()
server.login(auth[0], auth[1])
# Send text message through SMS gateway of destination number
server.sendmail( auth[0], to_number, message)
######################################################################################################################
#Global variables
counter = 1 #Helps iterate between pages in change_page()
not_available_counter = 1 #Keeps track of how many times program has iterated through unavailable slot, could get confusing after some time running in terminal
thingsincart = 0 #Keeps track of whether wanted slot has been bought in begin_buy()
familymembercounter = 1 #Helps iterate through family members based on their xpath in choose_family_member()
calendardate = '' #Used to give date of reservation when sending SMS to user
wantedSlot = slotDescription + ' on ' + day #creates wantedSlot to search for, DO NOT EDIT
#logs into City of Henderson Webtrac website
def login():
print('Logging in...')
login_link = driver.find_element_by_id('menu_myaccount')
login_link.click()
username_box = driver.find_element_by_id('weblogin_username')
username_box.click()
username_box.send_keys(username)
password_box = driver.find_element_by_id('weblogin_password')
password_box.click()
password_box.send_keys(password)
login_button = driver.find_element_by_id('weblogin_buttonlogin')
login_button.click()
#Chooses reservations/rentals tile to begin swim reservation process
def init_press():
reserve_swim_time_slot_link = driver.find_element_by_xpath('//a[@href="splash.html?ccode=RRSplash"]')
print('Reservations & Rentals tile clicked')
reserve_swim_time_slot_link.click()
#Chooses the Multigen and its pools to look for swim slots
def choose_multigen():
print('Chose Multigen pool...')
multigen_link = driver.find_element_by_xpath('//a[@href="search.html?display=listing&location=hmgc&module=AR&search=yes&type=reservelane"]')
multigen_link.click()
#Chooses Whitney Ranch and its pool to look for swim slots
def choose_whitney_ranch():
print('Chose Whitney Ranch pool...')
whitney_ranch_link = driver.find_element_by_xpath('//a[@href="search.html?display=listing&location=wrip&module=AR&search=yes&type=reservelane"]')
whitney_ranch_link.click()
#chooses Heritage and its pools to look for swim slots
def choose_heritage():
print('Chose Heritage pool...')
heritage_link = driver.find_element_by_xpath('//a[@href="search.html?display=listing&location=hpac&module=AR&search=yes&type=reservelane"]')
heritage_link.click()
#Makes decision on what pool to look for slots in based on wanted slot
def choose_pool():
global wantedSlot
if ('HMIP' in wantedSlot):
choose_multigen()
elif ('HMCP' in wantedSlot):
choose_multigen()
elif ('WRIP' in wantedSlot):
choose_whitney_ranch()
elif ('HPAC' in wantedSlot):
choose_heritage()
else:
print('Could not find a pool with wanted slot, please recheck inputs')
quit()
#Adds available swim slot to cart
def add_to_cart(resultnum):
global thingsincart
add_to_cart_link = driver.find_element_by_xpath('/html/body/div[1]/div[1]/div/div/form/div[1]/div[2]/div/div[1]/table/tbody/tr[' + str(resultnum) + ']/td[1]')
add_to_cart_link.click()
thingsincart += 1
#When finished adding items to cart, will go to family selection screen
def go_to_family_selection():
print('Going to swimmer selection')
element_present = EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[2]/div/div/div/button[2]'))
timeout = 10
WebDriverWait(driver, timeout).until(element_present)
go_to_family_selection_link = driver.find_element_by_xpath('/html/body/div[1]/div[2]/div/div/div/button[2]')
go_to_family_selection_link.click()
#if program doesn't check out, gets rid of swim slot being left in cart
def clear_cart_from_cookies():
clear_selection_button = driver.find_element_by_xpath('/html/body/div[1]/div[2]/div/div/div/button[1]')
if (clear_selection_button.is_displayed()):
clear_selection_button.click()
#Chooses correct family member for swim slot
def choose_family_member():
family_clicked = False
global familymembercounter
while family_clicked != True:
family_member_select = driver.find_element_by_xpath('/html/body/div[1]/div/div/div/form/div[' + str(familymembercounter) + ']/h1/span')
if familymember.lower() in family_member_select.text.lower():
family_member_checkbox = driver.find_elements_by_xpath('//button[@role="checkbox"]')[familymembercounter - 1]
family_member_checkbox.click()
print('Chose Swimmer: ' + str(familymember))
family_clicked = True
else:
familymembercounter += 1
def fill_out_waivers():
def go_to_waivers():
print('Filling out waivers...')
element_present = EC.presence_of_element_located((By.ID, 'button201'))
timeout = 10
WebDriverWait(driver, timeout).until(element_present)
continue_button = driver.find_element_by_id('button201')
continue_button.click()
go_to_waivers()
#If slot is actually fully enrolled but still says available, notifies user and quits program
text = 'The MAX ENROLLED Count has been reached and no waitlist is allowed.'
if (text in driver.page_source):
print('Sorry, this slot is actually filled up, please choose a time other than ' + wantedSlot)
quit()
#If user is already enrolled for this slot, notifies user and quits program
dupl_text = 'Duplicate enrollment.'
if (dupl_text in driver.page_source):
print('You have already signed up for this slot. Quitting program...')
quit()
#Accesses drop down menus to sign COVID waivers
select = driver.find_elements_by_xpath('//button[@class="combobox"]')[0] #accesses first dropdown menu
select.click()
select = driver.find_elements_by_class_name('listitem__text')[1] #clicks yes in activated dropdown menu
select.click()
select = driver.find_elements_by_xpath('//button[@class="combobox"]')[1] #accessses second dropdown menu
select.click()
select = driver.find_elements_by_class_name('listitem__text')[1] #clicks yes in activated dropdown menu
select.click()
select = driver.find_element_by_xpath('//button[@role="checkbox"]')
select.click()
select = driver.find_element_by_id('processingprompts_buttoncontinue')
select.click()
print('Filled out waivers...')
#Goes to checkout after filling out waivers
def proceed_to_checkout():
print('Proceeding to checkout')
select = driver.find_element_by_id('webcart_buttoncheckout')
select.click()
#Enters basic billing info if not already filled out by WebTrac,
def enter_billing_info():
print('Entering billing info...')
firstname = driver.find_element_by_id('webcheckout_billfirstname')
if firstname.get_attribute(''):
firstname.click()
firstname.send_keys(billingfirstname)
lastname = driver.find_element_by_id('webcheckout_billlastname')
if lastname.get_attribute(''):
lastname.click()
lastname.send_keys(billinglastname)
phone = driver.find_element_by_id('webcheckout_billphone')
if phone.get_attribute(''):
phone.click()
phone.send_keys(billingphone)
email = driver.find_element_by_id('webcheckout_billemail')
if email.get_attribute(''):
email.click()
email.send_keys(billingemail)
email2 = driver.find_element_by_id('webcheckout_billemail_2')
if email2.get_attribute(''):
email2.click()
email2.send_keys(billingemail)
continue_link = driver.find_element_by_id('webcheckout_buttoncontinue')
continue_link.click()
#Enters payment info regarding credit card and address
#This is required only if swimmer does not have a membership tied to their name in RecTrac
def enterpaymentinfo():
print('Entering payment info...')
#Enters card payment method
select = Select(driver.find_element_by_id('webcheckout_requiredmethod'))
select.select_by_visible_text(str(cardpaymentmethod))
#Enters cardholder's name
cardholder_enter = driver.find_element_by_id('webcheckout_nameoncard')
cardholder_enter.click()
cardholder_enter.send_keys(cardholder)
#Enters credit card number
driver.switch_to_frame("tokenFrame") #ccn input is inside i-frame, so must go into it
creditcardnumber_enter = driver.find_element_by_id('ccnumfield')
creditcardnumber_enter.click()
creditcardnumber_enter.send_keys(creditcardnumber)
driver.switch_to.default_content() #must go out of i-frame to access other contents
#Enters expiration month
datavaluexpathlink = "//*[@data-value='" #Used by both expiration month and year
exp_month_enter = driver.find_element_by_id('webcheckout_expirationmonth_vm_3_button')
exp_month_enter.click()
month = driver.find_element_by_xpath(datavaluexpathlink + str(exp_month) + "']")
actions = ActionChains(driver)
actions.move_to_element(month).perform()
WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.XPATH, datavaluexpathlink + str(exp_month) + "']"))).click()
#Enters expiration year
exp_year_enter = driver.find_element_by_id('webcheckout_expirationyear_vm_4_button')
exp_year_enter.click()
month = driver.find_element_by_xpath(datavaluexpathlink + str(exp_year) + "']")
actions = ActionChains(driver)
actions.move_to_element(month).perform()
WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.XPATH, datavaluexpathlink + str(exp_year) + "']"))).click()
#Enters CVV
cvv_enter = driver.find_element_by_id('webcheckout_cvv')
cvv_enter.click()
cvv_enter.send_keys(str(cvv))
#Enters billing address
billingaddress_enter = driver.find_element_by_id('webcheckout_billingaddress')
billingaddress_enter.click()
billingaddress_enter.send_keys(str(billingaddress))
#Enters billing zip code
billingzipcode_enter = driver.find_element_by_id('webcheckout_billingzipcode')
billingzipcode_enter.click()
billingzipcode_enter.send_keys(str(billingzipcode))
#Fills out entire checkout page, with info of billing address and payment method
def checkout():
global phonenumberSMS
global calendardate
enter_billing_info()
payment_present = EC.presence_of_element_located((By.ID, 'webcheckout_group6'))
try:
payment_present = EC.presence_of_element_located((By.ID, 'webcheckout_group6'))
if (payment_present): #distinguishes those who have pool membership tied to family member
enterpaymentinfo()
except NoSuchElementException:
pass
try:
end_continue = driver.find_element_by_id('webcheckout_buttoncontinue')
end_continue.click()
except NoSuchElementException:
pass
print(wantedSlot + ' has been reserved for ' + familymember + '.')
if (phonenumberSMS != ''):
print('Sending text message...')
confirmedreservation = '\n\n' + wantedSlot + ' ' + calendardate + ' has been reserved for ' + familymember + '.'
send(confirmedreservation)
#Begins buying process once wanted slot is added to cart
def beginbuy():
global thingsincart
if (thingsincart > 0):
go_to_family_selection()
choose_family_member()
else:
#will iterate through program until wanted slot is available
while (thingsincart == 0):
iterate_pages()
#Clicks button to switch between pages 1-4, based on counter variable
def change_page():
global counter
counter += 1
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);") #in cart overlay blocks page numbers, so must scroll to bottom
time.sleep(2)
beginpagelink = '/html/body/div[1]/div[1]/div/div/form/div[1]/div[2]/div/div[1]/ul/li['
endpagelink = ']/button'
if (counter == 3): #this fixes program checking page 2 twice, not sure if problem is on my end or website
counter += 1
if (counter == 6): #hits page 4, then goes back to page 1
counter = 1
page = driver.find_element_by_xpath(beginpagelink + str(counter) + endpagelink)
page.click()
#goes through page of swim slots, and adds to cart those that match user's description/are available
def choose_slot():
global calendardate
global counter
global not_available_counter
slot_times = []
slot_availability = []
day_of_week_letter = []
date = []
description = []
for i in range(1, 21):
#tr[%i] determines what element it is, 1-21 per page
slot_times.append(driver.find_element_by_xpath('/html/body/div[1]/div[1]/div/div/form/div[1]/div[2]/div/div[1]/table/tbody/tr[%i]/td[5]' %(i)))
slot_availability.append(driver.find_element_by_xpath('/html/body/div[1]/div[1]/div/div/form/div[1]/div[2]/div/div[1]/table/tbody/tr[%i]/td[10]' %(i)))
day_of_week_letter.append(driver.find_element_by_xpath('/html/body/div[1]/div[1]/div/div/form/div[1]/div[2]/div/div[1]/table/tbody/tr[%i]/td[6]' %(i)))
date.append(driver.find_element_by_xpath('/html/body/div[1]/div[1]/div/div/form/div[1]/div[2]/div/div[1]/table/tbody/tr[%i]/td[4]/a/span[1]' %(i)))
description.append(driver.find_element_by_xpath('/html/body/div[1]/div[1]/div/div/form/div[1]/div[2]/div/div[1]/table/tbody/tr[%i]/td[3]' %(i)))
slots = description[i-1].text + ' on ' + day_of_week_letter[i-1].text
if (wantedSlot in slots):
#Tells user that slot is available and adds to cart
if slot_availability[i-1].text == 'Available':
print((description[i - 1].text + ' on ' + day_of_week_letter[i - 1].text + ' ' + date[i - 1].text) + ' is available')
print('Adding to cart...')
calendardate = date[i - 1].text
add_to_cart(i) #clicks add to cart of available slot
#Tells user that slot is full, quits program
elif (slot_availability[i-1].text == 'Full'):
print((description[i - 1].text + ' on ' + day_of_week_letter[i - 1].text + ' ' + date[i - 1].text) + ' is full')
print('please choose another slot that is either Available or Unavailable')
print('Quitting program')
time.sleep(2)
quit()
#Tells user that slot is not yet available, will loop through pages until slot becomes available in beginbuy()
elif (slot_availability[i-1].text == 'Unavailable'):
print((description[i - 1].text + ' on ' + day_of_week_letter[i - 1].text + ' ' + date[i - 1].text) + ' is not available yet ' + '(' + str(not_available_counter) + ')')
not_available_counter += 1
#process done on each page, choosing slots, then going to next page
def iteration():
choose_slot()
change_page()
element_present = EC.presence_of_element_located((By.ID, 'arwebsearch_buttonsearch'))
timeout = 10
WebDriverWait(driver, timeout).until(element_present)
#iterates through all listings of swim slots for 4 pages
def iterate_pages():
iteration()
iteration()
iteration()
print('Slot wanted: ' + wantedSlot)
driver.get('https://web2.myvscloud.com/wbwsc/nvhendersonwt.wsc/splash.html?InterfaceParameter=WebTrac')
login()
init_press()
choose_pool()
clear_cart_from_cookies()
iterate_pages()
beginbuy()
fill_out_waivers()
proceed_to_checkout()
checkout()
exit()
|
def fi(x):
fibSeq = []
while len(fibSeq) < x+1:
if len(fibSeq) == 0:
fibSeq.append(0)
elif len(fibSeq) == 1:
fibSeq.append(1)
else:
fibSeq.append(fibSeq[-1]+fibSeq[-2])
print("The Fibonacci sequence is:")
print(str(fibSeq)+"\n")
print("The number is:")
print(fibSeq[-1])
fi(14) |
from .models import Comment
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class CommentForm(forms.ModelForm):
content = forms.CharField(label="", widget=forms.Textarea(attrs={'class': 'form-control', 'placeholder': 'TExt goes here!!!', 'rows':4, 'cols':'50'}))
class Meta:
model = Comment
fields = ('content',)
class SignupForm(UserCreationForm):
email = forms.EmailField(max_length=200, help_text='Required')
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2')
|
import os
from unittest.mock import MagicMock, patch
from rubicon_ml import domain
from rubicon_ml.client import Artifact
def test_properties(project_client):
parent = project_client
domain_artifact = domain.Artifact(name="test.txt")
artifact = Artifact(domain_artifact, parent)
assert artifact.id == domain_artifact.id
assert artifact.name == domain_artifact.name
assert artifact.description == domain_artifact.description
assert artifact.created_at == domain_artifact.created_at
assert artifact.parent == parent
def test_get_data(project_client):
project = project_client
data = b"content"
artifact = project.log_artifact(name="test.txt", data_bytes=data)
artifact._get_data()
assert artifact.data == data
def test_get_data_unpickle_false(project_client):
project = project_client
data = b"content"
artifact = project.log_artifact(name="test.txt", data_bytes=data)
assert artifact.get_data(unpickle=False) == data
def test_get_data_unpickle_true(project_client):
"""Unpickle=True intended for retrieving python objects
that were logged as artifacts, hence dummy object is needed.
"""
project = project_client
global TestObject # cannot pickle local variable
class TestObject:
value = 1
test_object = TestObject()
artifact = project.log_artifact(name="test object", data_object=test_object)
assert artifact.get_data(unpickle=True).value == test_object.value
@patch("fsspec.implementations.local.LocalFileSystem.open")
def test_download_cwd(mock_open, project_client):
project = project_client
data = b"content"
artifact = project.log_artifact(name="test.txt", data_bytes=data)
artifact.data
mock_file = MagicMock()
mock_open.side_effect = mock_file
artifact.download()
mock_open.assert_called_once_with(os.path.join(os.getcwd(), artifact.name), mode="wb")
mock_file().write.assert_called_once_with(data)
@patch("fsspec.implementations.local.LocalFileSystem.open")
def test_download_location(mock_open, project_client):
project = project_client
data = b"content"
artifact = project.log_artifact(name="test.txt", data_bytes=data)
artifact.data
mock_file = MagicMock()
mock_open.side_effect = mock_file
artifact.download(location="/path/to/tests", name="new_name.txt")
mock_open.assert_called_once_with("/path/to/tests/new_name.txt", mode="wb")
mock_file().write.assert_called_once_with(data)
|
# Copyright 2022 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for tracking the phase and usage of a qubit over time."""
from __future__ import annotations
from typing import Generator, Union
import numpy as np
class _QubitRef:
def __init__(self) -> None:
self.phase = _PhaseTracker(0)
self.last_used = 0
def increment_phase(self, phi: float) -> None:
self.phase[self.last_used] = self.phase.last_phase + phi
def update_last_used(self, new_t: int) -> None:
self.last_used = max(self.last_used, new_t)
class _PhaseTracker:
"""Tracks a phase reference over time."""
def __init__(self, initial_phase: float):
self._times: list[int] = [0]
self._phases: list[float] = [self._format(initial_phase)]
@property
def last_time(self) -> int:
return self._times[-1]
@property
def last_phase(self) -> float:
return self._phases[-1]
def changes(
self,
ti: Union[float, int],
tf: Union[float, int],
time_scale: float = 1.0,
) -> Generator[tuple[float, float], None, None]:
"""Changes in phases within ]ti, tf]."""
start, end = np.searchsorted(
self._times, (ti * time_scale, tf * time_scale), side="right"
)
for i in range(start, end):
change = self._phases[i] - self._phases[i - 1]
yield (self._times[i] / time_scale, change)
def _format(self, phi: float) -> float:
return phi % (2 * np.pi)
def __setitem__(self, t: int, phi: float) -> None:
phase = self._format(phi)
if t in self._times:
ind = self._times.index(t)
self._phases[ind] = phase
else:
ind = int(np.searchsorted(self._times, t, side="right"))
self._times.insert(ind, t)
self._phases.insert(ind, phase)
def __getitem__(self, t: int) -> float:
ind = int(np.searchsorted(self._times, t, side="right")) - 1
return self._phases[ind]
|
import time
given_string1="Motilal Nehru"
given_string2="Is Amit Anand"
print("Checking given_string1 ...")
time.sleep(1)
if given_string1[0:2]=='Is':
print("As the string contain 'Is' in the beginning")
print("Printing the given string ...")
time.sleep(1)
print(given_string1)
else:
print("Updating string with 'Is' in the beginning ")
time.sleep(1)
print(given_string1[:2]+"Is")
time.sleep(2)
print("Now checking the condition with given_string2")
time.sleep(2)
if given_string2[0:2]=='Is':
print("As the string contain 'Is' in the beginning")
print("Printing the given string ...")
time.sleep(1)
print(given_string2)
else:
print("Updating string with 'Is' in the beginning ")
print(given_string2[:2]+"Is") |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Element',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64)),
('description', models.CharField(default=b'', max_length=256, blank=True)),
('fullname', models.CharField(max_length=128)),
('keywords', models.CharField(default=b'', max_length=256, blank=True)),
('data', models.TextField(default=b'', blank=True)),
('source_file', models.CharField(max_length=128, null=True, blank=True)),
('source_format', models.CharField(max_length=32, null=True, blank=True)),
],
options={
'ordering': ('name',),
'verbose_name': 'Rendered Element',
'verbose_name_plural': 'Rendered Elements',
},
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64)),
('slug', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='Namespace',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64)),
('display_name', models.CharField(default=b'', max_length=64, blank=True)),
('data', models.TextField(default=b'', blank=True)),
('source_file', models.CharField(max_length=128, null=True, blank=True)),
('source_format', models.CharField(max_length=32, null=True, blank=True)),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slug', models.CharField(max_length=64)),
('title', models.CharField(max_length=64)),
('description', models.CharField(default=b'', max_length=256, blank=True)),
('fullname', models.CharField(max_length=128)),
('keywords', models.CharField(default=b'', max_length=256, blank=True)),
('data', models.TextField(default=b'', blank=True)),
('source_file', models.CharField(max_length=128, null=True, blank=True)),
('source_format', models.CharField(max_length=32, null=True, blank=True)),
('order_index', models.PositiveIntegerField(default=0, blank=True)),
('namespace', models.ForeignKey(blank=True, to='api_docs.Namespace', null=True)),
],
options={
'ordering': ('order_index',),
'verbose_name': 'Rendered Page',
'verbose_name_plural': 'Rendered Pages',
},
),
migrations.CreateModel(
name='Section',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64)),
('description', models.TextField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64)),
('slug', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='Version',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64)),
('slug', models.CharField(max_length=64)),
('language', models.ForeignKey(to='api_docs.Language', null=True)),
],
),
migrations.AddField(
model_name='section',
name='topic_version',
field=models.ForeignKey(to='api_docs.Version'),
),
migrations.AddField(
model_name='page',
name='section',
field=models.ForeignKey(to='api_docs.Section'),
),
migrations.AddField(
model_name='namespace',
name='platform_section',
field=models.ForeignKey(to='api_docs.Section'),
),
migrations.AddField(
model_name='language',
name='current_version',
field=models.ForeignKey(related_name='current_for_lang', blank=True, to='api_docs.Version', null=True),
),
migrations.AddField(
model_name='language',
name='development_version',
field=models.ForeignKey(related_name='development_for_lang', blank=True, to='api_docs.Version', null=True),
),
migrations.AddField(
model_name='language',
name='topic',
field=models.ForeignKey(to='api_docs.Topic'),
),
migrations.AddField(
model_name='element',
name='namespace',
field=models.ForeignKey(blank=True, to='api_docs.Namespace', null=True),
),
migrations.AddField(
model_name='element',
name='section',
field=models.ForeignKey(to='api_docs.Section'),
),
]
|
from typing import Dict, List
import plotly.graph_objs as go
import dash_table
import dash_core_components as dcc
import numpy as np
import pandas as pd
import networkx as nx
from collections import defaultdict
from itertools import permutations, groupby
from operator import itemgetter
import helpers
NODES_FILE = 'data/nodes.csv'
EDGES_FILE = 'data/edges.csv'
def read_node_df(node_file:str, dtype:bool=False):
node_df = pd.read_csv(node_file)
node_df.rename(
columns={
"Genre": "genre",
"Number of Pages": "num_pages",
"Price": "price",
"Sales Rank": "sales_rank",
"Average Rating": "avg_rating",
"Number of Reviews": "num_reviews"
},
inplace=True
)
return node_df
def read_edge_df(edge_file:str):
edge_df = pd.read_csv(edge_file)
edge_df.rename(
columns={
"Source": "source",
"Target": "target",
"Frequency": "weight"
},
inplace=True
)
return edge_df
NODE_DF = read_node_df(NODES_FILE)
EDGE_DF = read_edge_df(EDGES_FILE)
def get_nclique_options():
G = generate_graph()
clique_graph_table_info = get_cliques_by_size(G)
return [str(i+1) for i in range(len(clique_graph_table_info))]
def get_unique_genres(df = NODE_DF):
return sorted(df['genre'].unique())
def get_sales_rank_categories(df = NODE_DF):
lowest = int(df['sales_rank'].min())
q1 = int(df['sales_rank'].quantile(0.25))
median = int(df['sales_rank'].quantile(0.5))
q3 = int(df['sales_rank'].quantile(0.75))
highest = int(df['sales_rank'].max())
return [
f"{lowest} - {q1}",
f"{q1 + 1} - {median}",
f"{median + 1} - {q3}",
f"{q3 + 1} - {highest}"
]
def get_unique_ratings(df = NODE_DF):
return sorted(df['avg_rating'].unique())
def get_review_categories(df = NODE_DF):
lowest = int(df['num_reviews'].min())
q1 = int(df['num_reviews'].quantile(0.25))
median = int(df['num_reviews'].quantile(0.5))
q3 = int(df['num_reviews'].quantile(0.75))
highest = int(df['num_reviews'].max())
return [
f"{lowest} - {q1}",
f"{q1 + 1} - {median}",
f"{median + 1} - {q3}",
f"{q3 + 1} - {highest}"
]
def get_num_pages_quantiles(df = NODE_DF):
return [int(df['num_pages'].quantile(i * 0.1)) for i in range(0, 11)]
def get_price_quantiles(df = NODE_DF):
return [round(df['price'].quantile(i * 0.1), 2) for i in range(0, 11)]
def get_generic_insights(data_df:pd.DataFrame = NODE_DF):
most_common_genre = data_df['genre'].mode()[0]
min_pages = data_df['num_pages'].min()
max_pages = data_df['num_pages'].max()
avg_pages = data_df['num_pages'].mean()
min_price = data_df['price'].min()
max_price = data_df['price'].max()
avg_price = data_df['price'].mean()
min_reviews = data_df['num_reviews'].min()
max_reviews = data_df['num_reviews'].max()
avg_reviews = round(data_df['num_reviews'].mean())
avg_rating = round(data_df['avg_rating'].mean())
figure = {
'data' : [
go.Indicator(
domain = {'x': [0, 0.33], 'y': [0.5, 1]},
value = avg_pages,
mode = "gauge+number",
title = {'text': "(Min, Max, Avg) Pages"},
gauge = {
'axis': {'range': [min_pages, max_pages]},
'threshold' : {'line': {'color': "red", 'width': 4}, 'thickness': 0.75, 'value': max_pages}
}
),
go.Indicator(
domain = {'x': [0.33, 0.66], 'y': [0.5, 1]},
value = avg_price,
mode = "gauge+number",
number = {'prefix': "$"},
title = {'text': "(Min, Max, Avg) Prices"},
gauge = {
'axis': {'range': [min_price, max_price]},
'threshold' : {'line': {'color': "red", 'width': 4}, 'thickness': 0.75, 'value': max_price}
}
),
go.Indicator(
domain = {'x': [0.66, 1], 'y': [0.5, 1]},
value = avg_reviews,
mode = "gauge+number",
title = {'text': "(Min, Max, Avg) Reviews"},
gauge = {
'axis': {'range': [min_reviews, max_reviews]},
'threshold' : {'line': {'color': "red", 'width': 4}, 'thickness': 0.75, 'value': max_reviews}
}
),
go.Indicator(
domain = {'x': [0.5, 1], 'y': [0, 0.3]},
value = avg_rating,
mode = "number",
title = {'text': "Avg Rating"},
gauge = {
'axis': {'range': [0, 5]},
'threshold' : {'line': {'color': "red", 'width': 4}, 'thickness': 0.75, 'value': 5}
}
),
go.Indicator(
domain = {'x': [0, 0.5], 'y': [0, 0.3]},
value = most_common_genre,
mode = "number",
title = {'text': "Top Genre"},
gauge = {
'axis': {'range': [0, 5]},
'threshold' : {'line': {'color': "red", 'width': 4}, 'thickness': 0.75, 'value': 5}
}
)
],
'layout' : {
"margin" : {"l": 0, "r" : 0, "t" : 50, "b" : 50}
}
}
return figure
def get_stats_table(data_df: pd.DataFrame = NODE_DF):
"""
Returns filterable table
"""
stats_table = dash_table.DataTable(
id = "stats_table",
columns = [
{"name": i, "id": i, "deletable": False, "selectable": True} for i in data_df
],
data = data_df.to_dict('records'),
filter_action="custom",
filter_query="",
sort_action="native",
sort_mode="multi",
style_as_list_view = True,
merge_duplicate_headers = True,
page_size = 20,
style_header = {
'backgroundColor': 'white',
'fontWeight': 'bold',
'textAlign' : 'center'
},
style_table = {
'overflowX': 'auto'
},
style_cell = {
"padding" : "5px",
"font-family" : "Source Sans Pro",
"fontSize" : 16,
},
style_data={
'height': 'auto'
},
style_data_conditional = [
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
}
] + [{'if': {'column_id': c},'textAlign': 'center'} for c in data_df.columns]
,
css = [
{"selector": ".cell-1-1", "rule": "width: 100%;"},
{"selector": 'td.cell--selected, td.focused', "rule": 'background-color: #6cc091 !important; color: #ffffff !important'}
]
)
return stats_table
def filter_stats_table(filter):
filtering_expressions = filter.split(' && ')
dff = NODE_DF
for filter_part in filtering_expressions:
col_name, operator, filter_value = helpers.split_filter_part(filter_part)
if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'):
# these operators match pandas series operator method names
dff = dff.loc[getattr(dff[col_name], operator)(filter_value)]
elif operator == 'contains':
dff = dff.loc[dff[col_name].str.contains(filter_value)]
elif operator == 'datestartswith':
dff = dff.loc[dff[col_name].str.startswith(filter_value)]
return dff
def _compute_correlations_traces(corr_direction: str):
"""
Returns information about positive / negative correlated features
Checks all the edges and removes some based on corr_direction.
-------------------
direction = positive: Only returns the positive correlations and delete the edges with weight smaller than 0
direction = negative: Only returns the negative correlations and delete the edges with weight equal or larger than 0
"""
node_df = read_node_df(NODES_FILE)
cor_matrix = node_df.iloc[:, 1:].corr()
node_idx = cor_matrix.index.values
cor_matrix = np.asmatrix(cor_matrix)
cor_G = nx.from_numpy_matrix(cor_matrix)
cor_G = nx.relabel_nodes(cor_G, lambda x: node_idx[x])
cor_G_copy = cor_G.copy()
for feature_1, feature_2, weight in cor_G.edges(data=True):
if corr_direction == "Positive":
if weight["weight"] < 0:
cor_G_copy.remove_edge(feature_1, feature_2)
else:
if weight["weight"] >= 0:
cor_G_copy.remove_edge(feature_1, feature_2)
# Generate circular layout positions
pos = nx.circular_layout(cor_G_copy)
temp_node_x = []
temp_node_y = []
temp_edge_x = []
temp_edge_y = []
for edge in cor_G_copy.edges():
x0, y0 = pos[edge[0]]
x1, y1 = pos[edge[1]]
temp_edge_x.append(x0)
temp_edge_x.append(x1)
temp_edge_x.append(None)
temp_edge_y.append(y0)
temp_edge_y.append(y1)
temp_edge_y.append(None)
edge_trace = go.Scatter(
x=temp_edge_x, y=temp_edge_y,
line=dict(width=0.5, color='#888'),
mode='lines'
)
for node in cor_G_copy.nodes():
x, y = pos[node]
temp_node_x.append(x)
temp_node_y.append(y)
node_trace = go.Scatter(
x=temp_node_x, y=temp_node_y,
mode='markers',
hoverinfo='text',
marker=dict(
color='#00c292',
size=15,
line_width=2
)
)
node_adjacencies = []
node_text = []
for node, adjacencies in enumerate(cor_G_copy.adjacency()):
node_adjacencies.append(len(adjacencies[1]))
node_text.append(
# Append all node information here
f"<b>{adjacencies[0]}"
)
edge_adjacencies = []
edge_text = []
for edge_pair in cor_G_copy.edges():
wanted_weights = _get_weights(cor_G_copy, edge_pair[0])
if edge_pair[0] != edge_pair[1]:
cur_weight = wanted_weights[edge_pair[1]]['weight']
edge_adjacencies.append(cur_weight)
edge_text.append(
# Append all node information here
f"<b>Edge: {edge_pair}</b><br /><b>Properties:</b><br />\
Weight: {cur_weight}\
"
)
Xe=[]
Ye=[]
xt = []
yt = []
for e in cor_G_copy.edges():
if e[0] != e[1]:
mid_edge = 0.5*(pos[e[0]]+pos[e[1]])# mid point of the edge e
xt.append(mid_edge[0])
yt.append(mid_edge[1])
Xe.extend([pos[e[0]][0], pos[e[1]][0], None])
Ye.extend([pos[e[0]][1], pos[e[1]][1], None])
trace_text = go.Scatter(
x=xt, y=yt,
mode='text',
text=edge_text,
textposition='bottom center',
hoverinfo='text'
)
node_trace.text = node_text
return node_trace, edge_trace, trace_text
def _get_weights(G: nx.Graph, matching_col: str):
adj_weights = list(G.adjacency())
wanted_weights = None
for col in adj_weights:
if col[0] == matching_col:
wanted_weights = col[1]
return wanted_weights
def generate_correlation_network(corr_direction: str):
"""
Returns Plotly Graph object for correlation network.
"""
node_traces, edge_traces, trace_text = _compute_correlations_traces(corr_direction)
fig = go.Figure(
data=[node_traces, edge_traces, trace_text],
layout=go.Layout(
title={
"text":f"<b>Correlation Network ({corr_direction})</b>"},
titlefont_size=16,
showlegend=False,
hovermode='closest',
margin=dict(b=20,l=5,r=5,t=40),
)
)
fig.update_layout(
title = helpers.style_title(),
font = helpers.style_font()
)
return fig
def _compute_centrality_measures(G: nx.Graph):
degree_centrality = list(nx.degree_centrality(G).values())
betweenness_centrality = list(nx.betweenness_centrality(G).values())
closeness_centrality = list(nx.closeness_centrality(G).values())
for idx, node_id in enumerate(G.nodes()):
G.nodes[node_id]['degree_centrality'] = degree_centrality[idx]
G.nodes[node_id]['betweenness_centrality'] = betweenness_centrality[idx]
G.nodes[node_id]['closeness_centrality'] = closeness_centrality[idx]
return G
def _compute_clustering_coefficients(G: nx.Graph):
clustering_coefficients = nx.clustering(G)
for idx in G.nodes():
G.nodes[idx]['clustering_coefficient'] = clustering_coefficients[idx]
return G
def _compute_adjacencies(G: nx.Graph):
for idx in G.nodes():
G.nodes[idx]['num_connections'] = len(G[idx])
return G
"""
Clique Analysis
"""
def _generate_intracluster_strength(G:nx.Graph, nodes: List[int]) -> int:
node_pairs = [sorted(pair) for pair in permutations(nodes, 2)]
node_pairs.sort()
node_pairs = list(k for k,_ in groupby(node_pairs))
total_interactions = 0
for pair in node_pairs:
total_interactions += G[pair[0]][pair[1]]['weight']
return total_interactions/len(nodes)
def generate_graph(dataset:str='amazon', filters = None, nodelist = None):
G = nx.Graph()
if dataset == 'amazon':
node_df = NODE_DF
edge_df = EDGE_DF
if filters:
filter_mapping = {
'genre_filter' : 'genre',
'sales_rank_filter' : 'sales_rank',
'rating_filter' : 'avg_rating',
'reviews_filter' : 'num_reviews',
'page_filter' : 'num_pages',
'price_filter' : 'price',
'nclique_filter' : 'nclique',
'nodes_filter': 'nodes'
}
filtered_df = node_df.copy()
for filter_name, value in filters.items():
column = filter_mapping[filter_name]
if type(value) != list:
value = [value]
if column in ['genre', 'avg_rating']:
filtered_df = filtered_df[filtered_df[column].isin(value)]
elif column in ['sales_rank', 'num_reviews']:
temp_df = pd.DataFrame()
for value_range in value:
low, high = value_range.split(" - ")
df_slice = filtered_df[filtered_df[column].between(int(low), int(high))]
temp_df = temp_df.append(df_slice)
filtered_df = temp_df
elif column in ['num_pages', 'price']:
low, high = value
low_quantile = filtered_df[column].quantile(low * 0.1)
high_quantile = filtered_df[column].quantile(high * 0.1)
filtered_df = filtered_df[filtered_df[column].between(low_quantile, high_quantile)]
elif column in ['nodes']:
value = value[0]
if value and nodelist != None:
nodelist += value.split(',')
elif value and nodelist == None:
nodelist = value.split(',')
if nodelist != None:
filtered_df = filtered_df[filtered_df['id'].isin(nodelist)]
keep_nodes = filtered_df['id']
node_df = filtered_df
edge_df = edge_df[edge_df['source'].isin(keep_nodes) & edge_df['target'].isin(keep_nodes)]
# Create Nodes
for index, row in node_df.iterrows():
node_info = {
'id': row['id'],
'genre': row['genre'],
'num_pages': row['num_pages'],
'price': row['price'],
'sales_rank': row['sales_rank'],
'avg_rating': row['avg_rating'],
'num_reviews': row['num_reviews']
}
G.add_node(row['id'], id=node_info['id'])
G.nodes[row['id']]['genre'] = node_info['genre']
G.nodes[row['id']]['num_pages'] = node_info['num_pages']
G.nodes[row['id']]['price'] = node_info['price']
G.nodes[row['id']]['sales_rank'] = node_info['sales_rank']
G.nodes[row['id']]['avg_rating'] = node_info['avg_rating']
G.nodes[row['id']]['num_reviews'] = node_info['num_reviews']
# Create edges
for index, row in edge_df.iterrows():
edge_info = {
'source': row['source'],
'target': row['target'],
'weight': row['weight']
}
G.add_edge(edge_info['source'], edge_info['target'], weight=edge_info['weight'])
# Centrality calculations
G = _compute_centrality_measures(G)
# Clustering coefficients
G = _compute_clustering_coefficients(G)
G = _compute_adjacencies(G)
return G
def get_cliques_by_size(G:nx.Graph) -> Dict[str,List[int]]:
maximal_cliques = list(nx.find_cliques(G))
maximal_clique_sizes = [len(clique) for clique in list(nx.find_cliques(G))]
maximal_cliques_dict = defaultdict(list)
for idx in range(len(maximal_cliques)):
maximal_cliques_dict[maximal_clique_sizes[idx]].append({'nodes': maximal_cliques[idx]})
return maximal_cliques_dict
def generate_clique_metrics(G: nx.Graph) -> Dict[str,List[int]]:
maximal_cliques_dict = get_cliques_by_size(G)
for k,v in maximal_cliques_dict.items():
for clique_info in v:
avg_price = np.mean([G.nodes[node]['price'] for node in clique_info['nodes']])
avg_rating = np.mean([G.nodes[node]['avg_rating'] for node in clique_info['nodes']])
avg_review = np.mean([G.nodes[node]['num_reviews'] for node in clique_info['nodes']])
clique_info['avg_price'] = avg_price
clique_info['avg_rating'] = avg_rating
clique_info['avg_review'] = avg_review
clique_info['intracluster_strength'] = _generate_intracluster_strength(G, clique_info['nodes'])
return maximal_cliques_dict
# Initialize NetworkX graph
networkGraph = generate_graph()
def plot_graph(G = networkGraph, params = None):
chart_type_option = 'spring'
if params:
chart_type_option = params.pop('chart_type_option')
valid_params = {key : value for key, value in params.items() if value}
G = generate_graph(filters = valid_params)
layouts = {
'circular' : lambda g : nx.circular_layout(g),
'kamada-kawai' : lambda g : nx.kamada_kawai_layout(g),
'random' : lambda g : nx.random_layout(g),
'shell' : lambda g : nx.shell_layout(g),
'spring' : lambda g : nx.spring_layout(g),
'spectral' : lambda g : nx.spectral_layout(g)
}
pos = layouts[chart_type_option](G)
edge_x = []
edge_y = []
edge_traces = []
for edge in G.edges(data = True):
start, end, data = edge
x0, y0 = pos[start]
x1, y1 = pos[end]
edge_trace = go.Scatter(
x=[x0, x1, None], y=[y0, y1, None],
line=dict(width=np.log(data['weight']), color='#888'),
hoverinfo='none',
mode='lines'
)
edge_traces.append(edge_trace)
node_x = []
node_y = []
for node in G.nodes():
x, y = pos[node]
node_x.append(x)
node_y.append(y)
node_trace = go.Scatter(
x=node_x, y=node_y,
mode='markers',
hoverinfo='text',
marker=dict(
showscale=False,
# colorscale options
#'Greys' | 'YlGnBu' | 'Greens' | 'YlOrRd' | 'Bluered' | 'RdBu' |
#'Reds' | 'Blues' | 'Picnic' | 'Rainbow' | 'Portland' | 'Jet' |
#'Hot' | 'Blackbody' | 'Earth' | 'Electric' | 'Viridis' |
colorscale='YlGnBu',
reversescale=True,
color=[],
size=10,
line_width=2))
node_text = []
for node in G.nodes():
node_info = G.nodes()[node]
node_text.append("<br>".join([f"<b>{key}:</b> {value}" for key, value in node_info.items()]))
node_adjacencies = [G.nodes()[node]['num_connections'] for node in G]
node_trace.marker.color = node_adjacencies
node_trace.text = node_text
fig = go.Figure(data=edge_traces + [node_trace],
layout=go.Layout(
showlegend=False,
hovermode='closest',
xaxis = {
'visible' : False
},
yaxis = {
'visible' : False
},
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
margin=dict(b=20,l=5,r=5,t=40)
)
)
return fig
def get_ego_network(G:nx.Graph, rank:int):
"""
Returns information of nth rank ego-network.
------------------------
ego_node: Ego node
hub_ego: networkx.Graph
pos: Dict[int, List[float, float]]
pos: Key of represents node connected to ego_node and the values are x-y coordinates.
"""
node_and_degree = G.degree()
try:
(ego_node, _) = sorted(node_and_degree, key=itemgetter(1))[-1*rank]
hub_ego = nx.ego_graph(G, ego_node)
except IndexError:
ego_node = None
hub_ego = nx.Graph()
pos = nx.spring_layout(hub_ego)
return ego_node, hub_ego, pos
def generate_cyto_ego_networks(G:nx.Graph, n_ranks:int) -> List[nx.Graph]:
ego_networks = []
for i in range(n_ranks):
ego_node, hub_ego, _ = get_ego_network(G, i+1)
for node in hub_ego.nodes():
if node == ego_node:
hub_ego.nodes[node]['is_ego'] = True
else:
hub_ego.nodes[node]['is_ego'] = False
ego_networks.append(hub_ego)
return ego_networks
def get_ego_node_edge_traces(G:nx.Graph, n_ranks:int):
"""
Returns Plotly Graph objects for n ego-networks.
"""
node_traces = []
edge_traces = []
for i in range(n_ranks):
temp_node_x = []
temp_node_y = []
temp_edge_x = []
temp_edge_y = []
ego_node, hub_ego, pos = get_ego_network(G, i+1) # ego_node to be used to make it red
for edge in hub_ego.edges():
x0, y0 = pos[edge[0]]
x1, y1 = pos[edge[1]]
temp_edge_x.append(x0)
temp_edge_x.append(x1)
temp_edge_x.append(None)
temp_edge_y.append(y0)
temp_edge_y.append(y1)
temp_edge_y.append(None)
edge_trace = go.Scatter(
x=temp_edge_x, y=temp_edge_y,
line=dict(width=0.5, color='#888'),
hoverinfo='none',
mode='lines'
)
for node in hub_ego.nodes():
x, y = pos[node]
temp_node_x.append(x)
temp_node_y.append(y)
node_trace = go.Scatter(
x=temp_node_x, y=temp_node_y,
mode='markers',
hoverinfo='text',
marker=dict(
showscale=True,
# colorscale options
#'Greys' | 'YlGnBu' | 'Greens' | 'YlOrRd' | 'Bluered' | 'RdBu' |
#'Reds' | 'Blues' | 'Picnic' | 'Rainbow' | 'Portland' | 'Jet' |
#'Hot' | 'Blackbody' | 'Earth' | 'Electric' | 'Viridis' |
colorscale='YlGnBu',
reversescale=True,
color=[],
size=10,
colorbar=dict(
thickness=15,
title='Node Connections',
xanchor='left',
titleside='right'
),
line_width=2
)
)
node_adjacencies = []
node_text = []
for node, adjacencies in enumerate(hub_ego.adjacency()):
node_adjacencies.append(len(adjacencies[1]))
node_text.append(
# Append all node information here
f"<b>Node: {str(node)}</b><br /><b>Properties:</b><br />\
# of connections: {str(len(adjacencies[1]))}\
"
)
node_trace.marker.color = node_adjacencies
node_trace.text = node_text
node_traces.append(node_trace)
edge_traces.append(edge_trace)
return node_traces, edge_traces
def generate_ego_network(n_ranks:int, G:nx.Graph = networkGraph):
node_traces, edge_traces = get_ego_node_edge_traces(G, n_ranks)
plotly_figures = []
for idx in range(len(node_traces)):
fig = go.Figure(
data=[node_traces[idx], edge_traces[idx]],
layout=go.Layout(
title={
"text":f"<b>Rank {idx+1} Ego Network</b>"},
titlefont_size=16,
showlegend=False,
hovermode='closest',
margin=dict(b=20,l=5,r=5,t=40),
)
)
fig.update_layout(
title = helpers.style_title(),
font = helpers.style_font()
)
plotly_figures.append(fig)
return plotly_figures
def plot_cyto_graph(G = networkGraph, params = None):
if params:
valid_params = {key : value for key, value in params.items() if value}
G = generate_graph(filters = valid_params)
elements = [{'data': {
'id': str(int(node_id)),
'label': str(int(node_id)),
'genre' : int(G.nodes[node_id]['genre']),
'sales_rank' : int(G.nodes[node_id]['sales_rank']),
'avg_rating' : float(G.nodes[node_id]['avg_rating']),
'num_reviews' : int(G.nodes[node_id]['num_reviews']),
'num_pages' : int(G.nodes[node_id]['num_pages']),
'price' : float(G.nodes[node_id]['price']),
}
} for node_id in G.nodes()]
edges = [{'data' : {'source' : str(int(source)), 'target' : str(int(target)), 'weight' : np.log(weight)}} for source, target, weight in G.edges.data("weight")]
return elements + edges
def plot_cyto_ego_graphs(G = networkGraph, params = None):
if params:
valid_params = {key : value for key, value in params.items() if value}
G = generate_graph(filters = valid_params)
ego_graph_list = generate_cyto_ego_networks(G, 3)
# To update to show 3
graph_elements = []
for eg_graph in ego_graph_list:
elements = [{'data': {
'id': str(int(node_id)),
'label': str(int(node_id)),
'genre' : int(eg_graph.nodes[node_id]['genre']),
'sales_rank' : int(eg_graph.nodes[node_id]['sales_rank']),
'avg_rating' : float(eg_graph.nodes[node_id]['avg_rating']),
'num_reviews' : int(eg_graph.nodes[node_id]['num_reviews']),
'num_pages' : int(eg_graph.nodes[node_id]['num_pages']),
'price' : float(eg_graph.nodes[node_id]['price']),
'is_ego': eg_graph.nodes[node_id]['is_ego']
}
} for node_id in eg_graph.nodes()]
edges = [{'data' : {'source' : str(int(source)), 'target' : str(int(target)), 'weight' : np.log(weight)}} for source, target, weight in eg_graph.edges.data('weight')]
graph_elements.append(elements + edges)
return graph_elements
def plot_cyto_nclique_graph(G = networkGraph, params = None):
nclique_value = 3
valid_params = {}
if params:
if 'nclique_filter' in params:
nclique_value = helpers.clique_sizes()[int(params.pop('nclique_filter'))]
valid_params = {key : value for key, value in params.items() if value}
G = generate_graph(filters = valid_params)
# Dictionary with information of each maximal clique based on value of n in n-clique
clique_graph_table_info = generate_clique_metrics(G)
clique_nodes_list = [item['nodes'] for item in get_cliques_by_size(G)[nclique_value]]
flatten_node_list = list(set([node for clique in clique_nodes_list for node in clique]))
G = generate_graph(filters = valid_params, nodelist = flatten_node_list)
elements = [{'data': {
'id': str(int(node_id)),
'label': str(int(node_id)),
'genre' : int(G.nodes[node_id]['genre']),
'sales_rank' : int(G.nodes[node_id]['sales_rank']),
'avg_rating' : float(G.nodes[node_id]['avg_rating']),
'num_reviews' : int(G.nodes[node_id]['num_reviews']),
'num_pages' : int(G.nodes[node_id]['num_pages']),
'price' : float(G.nodes[node_id]['price']),
}
} for node_id in G.nodes()]
edges = [{'data' : {'source' : str(int(source)), 'target' : str(int(target)), 'weight' : np.log(weight)}} for source, target, weight in G.edges.data('weight')]
return elements + edges
def include_loader(graph_component):
return dcc.Loading(type = "cube", color = "#36454f", children = [graph_component]) |
# api/app.py
from flask import Flask
from api.endpoints.prediction import prediction_api
# Create an instance of the Flask class with the default __name__
app = Flask(__name__)
# Register our endpoint
app.register_blueprint(prediction_api)
if __name__ == '__main__':
# listen on port 8080
app.run(host="0.0.0.0", port=8080, debug=True)
|
def countEvens(list):
even = 0
for number in list:
if (number % 2 == 0):
even = even+1
return even
print countEvens([2, 2, 2, 2, 2, 2, 2, 2]) # expect 8
print countEvens([2, 8, 7, 8, 1, 1]) # expect 3
def sum13(list):
sum = 0
for i in range(0, len(list)):
if not (list[i] == 13):
sum = list[i] + sum
if list[i] == 13:
i = i + 2
return sum
print sum13([1, 2, 2, 1]) # expect 6
print sum13([1, 1]) # expect 2
print sum13([1, 2, 2, 1, 13]) # expect 6
def sameFirstLast(list):
array = 0
for i in list:
if len > 1:
return True
return array
print sameFirstLast([1,2,5]) |
#1-1.2
name = input("Input your full name?")
age = str(input("How old are you?"))
ques = input("Are u a man?")
ans = ('woman', 'man')[ques == "yes"]
man = "male"
woman = "female"
yes = True
no = False
print("You are " + name + ", Вам " + age + "лет, " + ans + " " + ques)
#2.1
a = input("Введите 1 число ")
b = input("Введите 2 число ")
a, b = b, a
print(a, b)
#2
a = input("Введите 1 число ")
b = input("Введите 2 число ")
print(a, b)
c = [b, a]
print(c)
|
from typing import Dict, Optional, List #, String# helps enforce typing
import random
import numpy as np
from fastapi import APIRouter
import joblib
import pandas as pd
from pydantic import BaseModel, Field, validator, Json
# import spacy
# from sklearn.feature_extraction.text import TfidfVectorizer
# import en_core_web_sm
# from spacy import load
# nlp= en_core_web_sm.load()
# # tokenizer function
# def tokenizer(text):
# doc=nlp(text)
# return [token.lemma_ for token in doc if ((token.is_stop == False) and
# (token.is_punct == False)) and (token.pos_ != 'PRON')]
nlp_working = False
# nlp_preprocessing = TfidfVectorizer(stop_words = 'english',
# ngram_range = (1, 2),
# max_df = .95,
# min_df = 3,
# tokenizer = tokenizer)
# df = pd.read_csv('https://raw.githubusercontent.com/bw-med-cabinet-1/DS/master/data/cannabis_strain')
# df = df.drop('Unnamed: 0', axis= 1)
# nlp_preprocessing.fit_transform(df['effects'])
#print(f'preprocessing')
# dtm = pd.DataFrame(dtm.todense(), columns = nlp_preprocessing.get_feature_names())
dataframe = pd.read_csv('https://raw.githubusercontent.com/bw-med-cabinet-1/DS/master/data/Cannabis_Strains_Features.csv')
#pd.to_numeric(dataframe['strain_id'])
# for id in dataframe['strain_id']:
# dataframe['strain_id'][id] = id.__int__()
# print(dataframe.strain_id.dtypes)
#print(len(dtm))
router = APIRouter()
nn_model = joblib.load("app/api/cat_model.joblib")
#nlp_model = joblib.load("app/api/nlp_model.joblib")
#nlp_preprocessing = joblib.load("app/api/nlp_preprocessing.joblib")
print("Serialized Model Loaded")
nlp_cats = ['strain_id', 'strain', 'type', 'Rating', 'effects', 'flavor',
'description']
# cats = ['hybrid', 'sativa', 'indica', 'Aroused', 'Creative', 'Euphoric',
# 'Energetic', 'Euphoric', 'Focused', 'Giggly', 'Happy', 'Hungry',
# 'Relaxed', 'Sleepy', 'Talkative', 'Tingly', 'Uplifted', 'anxiety',
# 'depression', 'pain', 'fatigue', 'insomnia', 'brain fog',
# 'loss of appetite', 'nausea', 'low libido']
nn_cats = ['anxiety', 'depression', 'pain', 'fatigue', 'insomnia', 'brain fog',
'loss of appetite', 'nausea', 'low libido', 'hybrid', 'sativa',
'indica', 'happy', 'energentic', 'hungry', 'aroused', 'creative',
'euphoric', 'relaxed', 'tingly', 'energetic', 'sleepy', 'giggly',
'uplifted', 'focused', 'talkative']
#nn_cats = [feature.lower() for feature in features]
class UserInputData(BaseModel):
"""Create a class for OOP reasons. I think we are better off hiding
implementation instead of exposing the direct Dict structure
also we will now get back more meaningful errors,
because fastapi is going to parse the new object for us,
basically ensure a valid state for our object. """
include: Optional[List[str]]
exclude: Optional[Dict[str, bool]]
text: Optional[str]
def categorical_formatting(self):
'''somehow force shape, fillna'''
df = pd.DataFrame(columns=nn_cats)
df.loc[0] = [0.5]*len(nn_cats) # number of training dimensions; 0.5 is null
for trait in self.include: # in 'include'
df[trait.lower()] = 1 # converts T/F to ints 1/0
return df
def nlp_formatting(self):
#print(self.text)
#vec = nlp_preprocessing.transform(self.text.encode('unicode_escape'))
vec = nlp_preprocessing.transform([fR"{self.text}"])
#print(f'vec shape: {vec.shape}')
# dense = vec.todense()
# print(self.text)
# print(f'dense: {dense}')
# print(f'self.nlp_formatting() length:{len(dense)}')
return vec
@router.post("/predict")
def predict_strain(user: UserInputData):
"""Predict the ideal strain based on user input"""
nn_return_values = [] # initializing to empty for valid return
nlp_return_value = []
if user.include or user.exclude:
X_new = user.categorical_formatting()
neighbors = nn_model.kneighbors(X_new) # vid @ 56:02
neighbor_ids = [int(id_) for id_ in neighbors[1][0]]
nn_return_values = [dataframe.iloc[id] for id in neighbor_ids]
elif user.text and nlp_working:
#print(f'user.text = True')
X_new = user.nlp_formatting()
#vec = nlp_preprocessing.transform(X_new)
dense = X_new.todense()
#print(f'dense/input shape : {dense.shape}')
similar = nlp_model.kneighbors(dense, return_distance=False)
similar.T
output = []
for i in range(5):
elem = similar[0][i]
output.append(elem)
nlp_return_value = output[0]
#print(user.text)
#print(nlp_return_value)
else: # if neither are given
return {
"error": "insufficient inputs"
}
return {
"Nearest Neighbors": nn_return_values,
"Text-based Prediction": nlp_return_value
}
# @router.get('/random') # What is this route going to be?
# def random_penguin():
# """Return a random penguin species"""
# return random.choice(["Adelie", "ChinStrap", "Gentoo"])
# class Item(BaseModel):
# """Use this data model to parse the request body JSON."""
# x1: float = Field(..., example=3.14)
# x2: int = Field(..., example=-42)
# x3: str = Field(..., example='banjo')
# def to_df(self):
# """Convert pydantic object to pandas dataframe with 1 row."""
# return pd.DataFrame([dict(self)])
# @validator('x1')
# def x1_must_be_positive(cls, value):
# """Validate that x1 is a positive number."""
# assert value > 0, f'x1 == {value}, must be > 0'
# return value
# @router.post('/predict')
# async def predict(item: Item):
# """
# Make random baseline predictions for classification problem 🔮
# ### Request Body
# - `x1`: positive float
# - `x2`: integer
# - `x3`: string
# ### Response
# - `prediction`: boolean, at random
# - `predict_proba`: float between 0.5 and 1.0,
# representing the predicted class's probability
# Replace the placeholder docstring and fake predictions with your own model.
# """
# X_new = item.to_df()
# log.info(X_new)
# y_pred = random.choice([True, False])
# y_pred_proba = random.random() / 2 + 0.5
# return {
# 'prediction': y_pred,
# 'probability': y_pred_proba
# }
|
import math
import numpy as np
import random
import scipy.stats as st
import statistics
import matplotlib.pyplot as plt
n = 50
mu = 0
sigma = 1
low, high = -math.sqrt(3), math.sqrt(3)
def cramerVonMisesTest(x, a, b):
A = np.ndarray(shape=(n, 1))
B = np.ndarray(shape=(n, 1))
C = np.ndarray(shape=(n, 1))
D = np.ndarray(shape=(n, 1))
E = np.ndarray(shape=(n, 1))
F = np.ndarray(shape=(n, 1))
G = np.ndarray(shape=(n, 1))
H = np.ndarray(shape=(n, 1))
I = np.ndarray(shape=(n, 1))
count1 = 0
count2 = 0
for j in range(n):
if (x[j] > b):
count1 = count1 + 1
for j in range(n):
if (x[j] < a):
count2 = count2 + 1
print("count1 = " + str(count1))
print("count2 = " + str(count2))
for j in range(n):
A[j] = (2 * j + 1) / (2 * n)
if (x[j] < a):
B[j] = 0
else:
if (x[j] > b):
B[j] = 1
else:
B[j] = (x[j] - a) / (b - a)
C[j] = math.log(B[j])
D[j] = A[j] * C[j]
E[j] = 1 - A[j]
F[j] = 1 - B[j]
G[j] = math.log(F[j])
H[j] = E[j] * G[j]
I[j] = D[j] + H[j]
print(np.sum(I))
stat = -n - 2 * np.sum(I)
for j in range(n):
print("%.2s & " % str(j + 1),
"%s & " % str("%.3f" % A[j]),
"%s & " % str("%.6f" % B[j]),
"%s & " % str("%.5f" % C[j]),
"%s & " % str("%.5f" % D[j]),
"%s & " % str("%.4f" % E[j]),
"%s & " % str("%.6f" % F[j]),
"%s & " % str("%.5f" % G[j]),
"%s & " % str("%.5f" % H[j]),
"%s \\\\" % str("%.5f" % I[j]),
"\hline"
)
return stat
def main():
data = np.random.uniform(low, high, n)
data = np.sort(data)
x_max = max(data)
x_min = min(data)
a = x_min - (x_max + x_min) / (n - 1)
b = x_max + (x_max - x_min) / (n - 1)
print("Оценки параметров:" + str("%.4f" % a), str("%.4f" % b))
count, bins, ignored = plt.hist(data, 30, density=True)
plt.xlabel("")
plt.ylabel("frequency")
plt.show()
for j in range(n):
print("%s &" % str(j + 1),
"%s \\\\" % data[j],
"\hline"
)
stat = cramerVonMisesTest(data, a, b)
print("Значение статистики:" + str(stat))
if __name__ == "__main__":
main()
|
#function to use to fit the T1 data (monoexp, stretched, biexp, spec_diff)
function = 'spec_diff'
#general parameters (a and b should be 1 and 0 if normalized properly,
#T1 is best guess at lowest temp)
a = 1
b = 0
T1 = 310000000
#only for spec_diffus (spectral diffusion parameter)
q = 10000000
#only for stretched (stretch factor)
c = 0.5
#params for biexp
a2 = 0.5
T1long = 30000000
T1short = 10000
#folder to spit data out into
folder_name = 'T1_fit_{0}'.format(function)
#misc instructions
show_plots = True
show_temp_dep = True #not implemented yet
err_append = True #appends esds to the end of the values and truncates to show error |
import MySQLdb as mydb
connector = mydb.connect(host='dryer.wsl.mind.meiji.ac.jp',
db='Rakuten', user='hayap', passwd='tomonori')
cursor = connector.cursor()
f = open("../../../Desktop/test/choose_test.csv", "w")
sql = "select userID from review_data where review_date between '2010-01-01' and '2012-03-01' group by userID having count(*) > 100 limit 1"
cursor.execute(sql)
result = cursor.fetchall()
#f = open("../Rakuten-real-/userID150-165.csv","w")
for line in result:
f.write(line[0] + "\n")
|
import math
print("Enter the meat price")
meat_price = int(input())
tip = meat_price/100*18
both = meat_price+tip
print("The meat price is", meat_price)
print("The tip is:", tip)
print("You need to pay:", both)
|
from mod_base import *
class Define(Command):
"""Defines terms and words related to the bot."""
def init(self):
self.glossary = {
"auth": "Authenticate or login to the bot. You need a username and password.",
"level": "Level is a number indicating what permissions a user has. A higher number indicates more permissions.",
"module": "Modules are individual components of the bot that add extra features.",
"command": "Commands are used to tell the bot to do various things.",
}
def run(self, win, user, data, caller=None):
data = "" if not data else data.lower()
if data in self.glossary.keys():
win.Send(self.glossary[data])
else:
win.Send("I don't know anything about that")
module = {
"class": Define,
"type": MOD_COMMAND,
"level": 0,
} |
print "hello"
print "haha" |
import os
import sys
import subprocess
import shutil
import time
import concurrent.futures
import fam
sys.path.insert(0, 'scripts')
sys.path.insert(0, os.path.join("tools", "families"))
sys.path.insert(0, os.path.join("tools", "trees"))
sys.path.insert(0, os.path.join("tools", "msa_edition"))
import saved_metrics
import experiments as exp
import msa_converter
import rename_leaves
import sequence_model
import run_raxml_supportvalues as run_pargenes
import read_tree
def short_str(num):
if (num >= 1000000):
return str(int(num/1000000)) + "M"
elif (num >= 1000):
return str(int(num/1000)) + "K"
else:
return str(int(num))
def int_from_short_str(num):
if (num[-1] == "M"):
return int(num[:-1]) * 1000000
elif (num[-1] == "K"):
return int(num[:-1]) * 1000
else:
return int(num)
class MrbayesInstance():
def __init__(self, datadir, subst_model, runs = 4, chains = 2, generations = 1000000, frequency = 1000, burnin = 100):
self.datadir = os.path.abspath(datadir)
self.subst_model = subst_model
self.runs = runs
self.chains = chains
self.generations = generations
self.frequency = frequency
self.burninmode = burnin
if (burnin == "raxml" or burnin == "fasttree"):
self.burnin = 0
else:
self.burnin = int(burnin)
basename = self.get_tag("mrbayes_run")
self.output_dir = fam.get_run_dir(self.datadir, self.subst_model, basename)
self.output_dir = os.path.abspath(self.output_dir)
self.output_dir = "/data/morelbt/github/BenoitDatasets/families/cyano_simulated/runs/LG+G+I/mrbayes_run/"
def get_tag(self, prefix = "mrbayes"):
tag = prefix
tag += "-r" + short_str(self.runs)
tag += "-c" + short_str(self.chains)
tag += "-g" + short_str(self.generations)
tag += "-f" + short_str(self.frequency)
if (self.burninmode == "raxml"):
tag += "-braxml"
elif (self.burninmode == "fasttree"):
tag += "-bfasttree"
else:
tag += "-b" + short_str(self.burnin)
return tag
@staticmethod
def get_instance(datadir, genetree_name):
sp = genetree_name.split(".")
tag = sp[0]
subst_model = sp[1]
sp = tag.split("-")
assert(sp[0] == "mrbayes")
runs = int_from_short_str(sp[1][1:])
chains = int_from_short_str(sp[2][1:])
generations = int_from_short_str(sp[3][1:])
frequency = int_from_short_str(sp[4][1:])
burnin = int_from_short_str(sp[5][1:])
inst = MrbayesInstance(datadir, subst_model, runs, chains, generations, frequency, burnin)
return inst
def save_parameters(self):
output_dir = self.output_dir
out = os.path.join(output_dir, "mrbayes_parameters.txt")
with open(out, "w") as writer:
writer.write("Parameters:")
writer.write(" " + self.datadir)
writer.write(" " + str(self.subst_model))
writer.write(" " + str(self.runs))
writer.write(" " + str(self.chains))
writer.write(" " + str(self.generations))
writer.write(" " + str(self.frequency))
writer.write(" " + str(self.burnin))
def get_treelist(self, family):
return os.path.join(self.output_dir, "results", family, family + ".treelist")
def generate_config_file(self, output_config_file, nexus_alignment, subst_model, treepath, seed, output_prefix):
append = "no"
ckp = output_prefix + r".ckp~"
parsi_mode = False
if (os.path.isfile(ckp)):
append = "yes"
with open(output_config_file, "w") as writer:
writer.write("\tbegin mrbayes;\n")
writer.write("\tset seed=" + str(seed) + ";\n")
writer.write("\tset autoclose=yes nowarn=yes;\n")
writer.write("\texecute " + nexus_alignment + ";\n")
writer.write(sequence_model.get_mrbayes_preset_line(subst_model))
writer.write(sequence_model.get_mrbayes_lset_line(subst_model))
if (parsi_mode):
writer.write("\tpropset ParsSPR(Tau,V)$prob=0;\n")
writer.write("\tpropset NNI(Tau,V)$prob=0;\n")
writer.write("\tpropset ExtSPR(Tau,V)$prob=0;\n")
writer.write("\tpropset ExtTBR(Tau,V)$prob=0;\n")
writer.write("\tpropset ParsSPR1(Tau,V)$prob=12;\n")
writer.write("\tpropset ParsTBR1(Tau,V)$prob=6;\n")
if (self.burninmode == "raxml" or self.burninmode == "fasttree"):
treestr = read_tree.read_tree(treepath).write(format = 5)
#treestr = open(treepath).read()
#treestr = treestr.replace(":0.0;", ";")
writer.write("begin trees;tree mytree = " + treestr + " end;\n")
writer.write("startvals tau=mytree;\n")
writer.write("\tmcmc nruns=1" + " nchains=" + str(self.chains) + " ngen=" + str(self.generations) + " samplefreq=" + str(self.frequency) + " file=" + output_prefix + " append=" + append + ";\n")
writer.write("end;")
def remove_mrbayes_run(self):
output_dir = os.path.abspath(self.output_dir)
to_rm = os.path.join(output_dir, "results")
shutil.rmtree(to_rm, True)
def get_mapping_dictionnary(mapping_file):
res = {}
lines = open(mapping_file).readlines()
for line in lines:
split = line[:-1].split(":")
species = split[0]
genes = split[1].split(";")
for gene in genes:
res[gene] = species
return res
def generate_commands_file(instance, cores, prefix_species):
output_dir = instance.output_dir
results_dir = os.path.join(output_dir, "results")
scheduler_commands_file = os.path.join(output_dir, "commands.txt")
exp.mkdir(results_dir)
family_dimensions = {}
datadir = instance.datadir
try:
family_dimensions = run_pargenes.get_family_dimensions(datadir, instance.subst_model)
except:
pass
with open(scheduler_commands_file, "w") as writer:
for family in sorted(fam.get_families_list(datadir)):
family_dir = fam.get_family_path(datadir, family)
mrbayes_family_dir = os.path.join(results_dir, family)
exp.mkdir(mrbayes_family_dir)
nexus_alignment = os.path.join(family_dir, "species_prefixed_alignment.nex")
fasta_alignment = os.path.join(family_dir, "alignment.msa")
treepath = ""
if (instance.burninmode == "raxml"):
treepath = os.path.abspath(fam.get_raxml_tree(datadir, instance.subst_model, family))
if (instance.burninmode == "fasttree"):
treepath = os.path.abspath(fam.get_fasttree_tree(datadir, instance.subst_model, family))
mapping_dictionnary = None
if (prefix_species):
mapping_dictionnary = get_mapping_dictionnary(fam.get_mappings(datadir, family))
msa_converter.msa_convert(fasta_alignment, nexus_alignment, "fasta", "nexus", mapping_dictionnary)
for run in range(0, instance.runs):
mrbayes_config = os.path.join(mrbayes_family_dir, "mrbayes_config_run" + str(run) + "." + instance.subst_model + ".nex")
output_prefix = os.path.join(mrbayes_family_dir, family) + str(run)
seed = run + 42
instance.generate_config_file(mrbayes_config, nexus_alignment, instance.subst_model, treepath, seed, output_prefix)
command = []
command.append(family + "__" + str(run))
command.append("1")
if (family in family_dimensions):
dim = family_dimensions[family][1] * family_dimensions[family][0]
command.append(str(dim))
else:
command.append("1")
command.append(mrbayes_config)
writer.write(" ".join(command) + "\n")
return scheduler_commands_file
#def get_reelist(datadir, subst_model, family):
# return os.path.join(get_mrbayes_output_dir(datadir, subst_model), "results", family, family + ".treelist")
def extract_mrbayes_family(futures_params):
instance, family = futures_params
datadir = instance.datadir
subst_model = instance.subst_model
burnin = instance.burnin
tag = instance.get_tag()
mrbayes_dir = instance.output_dir
family_misc_dir = fam.get_family_misc_dir(datadir, family)
#output = instance.get_treelist(family)
output = fam.build_gene_tree_path(datadir, subst_model, family, tag)
remove_prefix = True
with open(output, "w") as writer:
d = os.path.join(mrbayes_dir, "results", family)
for topologies in os.listdir(d):
if (not topologies.endswith(".t")):
continue
topologies = os.path.join(d, topologies)
lines = open(topologies).readlines()
is_translation = False
translator = {}
tree_index = 0
for line in lines:
if ("tree gen" in line):
tree_index += 1
if (tree_index <= burnin):
continue
is_translation = False
tree = line.split(" ")[-1]
writer.write(rename_leaves.rename_leaves(tree, translator) + "\n")
continue
if ("translate" in line):
is_translation = True
continue
if (is_translation):
split = line.split(" ")
left = split[-2]
right = split[-1][:-2]
if (remove_prefix):
sp = right.split("_")
right = "_".join(sp[1:])
translator[left] = right
def extract_mrbayes_results(instance):
start = time.time()
print("Extracting mrbayes results...")
futures_params = []
for family in fam.get_families_list(instance.datadir):
futures_params.append((instance, family))
with concurrent.futures.ProcessPoolExecutor() as executor:
executor.map(extract_mrbayes_family, futures_params)
print("Finished extracting mrbayes results " + str(time.time() - start) + "s")
sys.stdout.flush()
def run_mrbayes_on_families(instance, cores, do_continue = False, prefix_species = False):
cwd = os.getcwd()
save_output_dir = instance.output_dir
try:
if (not do_continue):
pass
try:
exp.mkdir(instance.output_dir)
except:
pass
print("chdir " + instance.output_dir)
os.chdir(instance.output_dir)
instance.output_dir = os.path.relpath(instance.output_dir)
instance.datadir = os.path.relpath(instance.datadir)
instance.save_parameters()
#commands = generate_commands_file(instance, cores, prefix_species)
#start = time.time()
#exp.run_with_scheduler(exp.mrbayes_exec, commands, "onecore", cores, instance.output_dir, "logs.txt")
#tag = instance.get_tag()
#saved_metrics.save_metrics(instance.datadir, fam.get_run_name(tag, instance.subst_model), (time.time() - start), "runtimes")
#lb = fam.get_lb_from_run(instance.output_dir)
#saved_metrics.save_metrics(instance.datadir, fam.get_run_name(tag, instance.subst_model), (time.time() - start) * lb, "seqtimes")
#print("Finished running mrbayes after " + str(time.time() - start) + "s")
#sys.stdout.flush()
extract_mrbayes_results(instance)
finally:
os.chdir(cwd)
instance.output_dir = save_output_dir
if (__name__== "__main__"):
if len(sys.argv) != 10:
print("Syntax error: python " + os.path.basename(__file__) + " datadir subst_model runs chains generations frequency burnin cores continue{0,1}")
print(len(sys.argv))
sys.exit(0)
datadir = sys.argv[1]
subst_model = sys.argv[2]
runs = int(sys.argv[3])
chains = int(sys.argv[4])
generations = int(sys.argv[5])
frequency = int(sys.argv[6])
burnin = sys.argv[7]
cores = int(sys.argv[8])
do_continue = int(sys.argv[9]) > 0
instance = MrbayesInstance(datadir, subst_model, runs, chains, generations, frequency, burnin)
run_mrbayes_on_families(instance, cores, do_continue)
|
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
l1 = ListNode(-1)
l1.next = ListNode(0)
l1.next.next = ListNode(3)
l1.next.next.next = ListNode(8)
l2 = ListNode(-4)
l2.next = ListNode(0)
l2.next.next = ListNode(1)
l2.next.next.next = ListNode(4)
r = ListNode(-1)
r.next = ListNode(0)
r.next.next = ListNode(1)
r.next.next.next = ListNode(3)
r.next.next.next.next = ListNode(4)
r.next.next.next.next.next = ListNode(8)
def merge(l1, l2):
"""
Non-destructively merges the lists
"""
if l1 == None:
return l2
elif l2 == None:
return l1
result = ListNode(0)
pointer1, pointer2, pointer3 = l1, l2, result
while pointer1 != None and pointer2 != None:
if pointer1.val < pointer2.val:
pointer3.next = ListNode(pointer1.val)
pointer1 = pointer1.next
pointer3 = pointer3.next
elif pointer1.val > pointer2.val:
pointer3.next = ListNode(pointer2.val)
pointer2 = pointer2.next
pointer3 = pointer3.next
elif pointer1.val == pointer2.val:
pointer3.next = ListNode(pointer2.val)
pointer1, pointer2, pointer3 = pointer1.next, pointer2.next, pointer3.next
result = result.next
return result
def recursive_merge(l1, l2):
"""
Non-destructively (recursively) merges the lists
"""
if l1.val < l2.val:
temp = ListNode(l1.val)
l1 = l1.next
elif l1.val > l2.val:
temp = ListNode(l2.val)
l2 = l2.next
result = temp
def sorter(l1, l2, holder):
if l1 == None:
holder.next = l2
return result
elif l2 == None:
holder.next = l1
return result
elif l1.val < l2.val:
holder.next = ListNode(l1.val)
return sorter(l1.next, l2, holder.next)
elif l1.val > l2.val:
holder.next = ListNode(l2.val)
return sorter(l1, l2.next, holder.next)
elif l1.val == l2.val:
holder.next = ListNode(l2.val)
return sorter(l1.next, l2.next, holder.next)
return sorter(l1, l2, result)
test1 = merge(l1, l2)
test2 = recursive_merge(l1, l2)
while test1 != None and test2 != None:
print(test1.val == test2.val)
print(test1.val, test2.val)
test1, test2 = test1.next, test2.next
|
from django.conf.urls import url
from django.views.decorators.csrf import csrf_exempt
from . import views
urlpatterns = [
url(r'^refresh/hrr_calculations$',views.hrr_calculations,
name="hrr"),
#url(r'^aa_calculations$',views.aa_calculations,
# name="aa"),
#url(r'^aa_workout_calculations$',views.aa_workout_calculations,
# name="aa_workout"),
#url(r'^daily_aa_calculations$',views.daily_aa_calculations,
# name="daily_aa"),
#url(r'^aa_low_high_calculations$',views.aa_low_high_end_calculations,
# name="aa_classification chart"),
url(r'^user/weekly_aa_data$',views.UserAaView.as_view(),name="weekly_aa_api"),
url(r'^user/heartzone_data$',views.UserheartzoneView.as_view(),name="hearzone_api"),
url(r'^weekly_workout_summary$',views.weekly_workout_summary,
name="Weekly workout summary"),
url(r'^hrr_calculations$',views.UserHrrView.as_view(),
name="hrr_api"),
#url(r'^update/hrr_calculations/(?P<id>[0-9]+)/$',views.UpdateHrr.as_view(),
# name="hrr_update"),
url(r'^update/hrr_calculations$',views.UpdateHrr.as_view(),
name="hrr_update"),
url(r'^raw_data/hrr_calculations$',views.UserHrrViewRawData.as_view(),
name="hrr_api_raw_data"),
url(r'^aa_calculations$',views.UserAA.as_view(),
name="aa_database"),
url(r'^aa_workout_calculations$',views.UserAA_workout.as_view(),
name="aa_workout_databse"),
url(r'^daily_aa_calculations$',views.UserAA_daily.as_view(),
name="daily_aa"),
url(r'^aa_low_high_calculations$',views.UserAA_low_high_values.as_view(),
name="aa_classification chart"),
url(r'^aa_twentyfour_hour_calculations$',views.UserAA_twentyfour_hour.as_view(),
name="aa_whole_day_database"),
url(r'^aa_twentyfour_hour_low_high_calculations$',views.UserAA_twentyfour_hour_low_high_values.as_view(),
name="aa_whole_day_database"),
url(r'^aa_ranges$',views.aa_ranges_api,name="aa ranges api"),
url(r'^aa_dashboard/table$',views.UserAAdashboadTable.as_view(),name="aa dashboard table"),
] |
#!/usr/bin/python26
# Query values
QUERY_VALUES = ['uuid', 'service_name', 'category_name', 'event_name',
'username', 'proxy_username', 'start_date', 'end_date']
# Analytics Database
AYLT_DB_HOST = ""
AYLT_DB_USERNAME = ""
AYLT_DB_PASSWORD = ""
AYLT_DB_NAME = ""
AYLT_DB_PORT = 3306
# Log Files
ANALYTICS_LOGFILE = "/path/to/logfile/analytics.log"
# Support Mail
MAIL_FROM = ""
MAIL_TO = ""
|
from __future__ import absolute_import
from flexp.flow import cache, Chain
from flexp.flow.cache import PickleCache
from flexp.flow.caching_chain import CachingChain
from flexp import flexp
class PrintIdModule:
def process(self, data):
print(data.id)
class TestModule:
PickleCacheBlackList = ['attr3']
UpdateDataId = "id"
def __init__(self, attr1, attr2, attr3):
self.attr1 = attr1
self.attr2 = attr2
self.attr3 = attr3
# self.cache_bckl = ['attr3']
def process(self, data):
pass
class FlowData:
def __init__(self):
self.id = "/adsasd/asdasd/asdad/asd"
def __iter__(self):
return iter(self.__dict__)
def items(self):
return [(attr, getattr(self, attr)) for attr in self.__dict__]
def __setitem__(self, key, item):
setattr(self, key, item)
def __getitem__(self, key):
return getattr(self, key)
def main():
flexp.setup("./experiments", "tf-idf", with_date=True)
data = FlowData()
my_chain = CachingChain([
PrintIdModule(),
PickleCache("cached_pkl", "id", [TestModule(12, 14, 18)]), # id updated by PickleChain hash
PrintIdModule(),
PickleCache("cached_pkl", "id", [TestModule(12, 16, 18)]), # id updated by PickleChain hash
PrintIdModule(),
TestModule(12, 16, 20), # id updated
PrintIdModule(),
PrintIdModule(),
], update_data_id='id')
my_chain.process(data)
if __name__ == "__main__":
main()
|
from qcodes.instrument.visa import VisaInstrument
from qcodes.utils import validators as vals
import numpy as np
class Weinschel_8311(VisaInstrument):
'''
QCodes driver for the stepped attenuator
Weinschel is formerly known as Aeroflex/Weinschel
'''
def __init__(self, name, address, **kwargs):
super().__init__(name, address, terminator='\r', **kwargs)
self.add_parameter('attenuationCH1', unit='dB',
set_cmd='CHAN 1;ATTN {};',
get_cmd='CHAN 1;ATTN?',
vals=vals.Enum(*np.arange(0, 100.1, 2).tolist()),
get_parser=float)
self.add_parameter('attenuationCH2', unit='dB',
set_cmd='CHAN 2;ATTN {};',
get_cmd='CHAN 2;ATTN?',
vals=vals.Enum(*np.arange(0, 100.1, 2).tolist()),
get_parser=float)
self.add_parameter('attenuationCH3', unit='dB',
set_cmd='CHAN 3;ATTN {};',
get_cmd='CHAN 3;ATTN?',
vals=vals.Enum(*np.arange(0, 100.1, 2).tolist()),
get_parser=float)
self.connect_message()
if __name__ == "__main__":
try:
Instrument.close_all()
except KeyError:
pass
Aeroflex = Weinschel_8311(name = "Aeroflex", address = "GPIB::10::INSTR")
Aeroflex.attenuationCH2.set(0)
Aeroflex.attenuationCH1.set(20)
print( Aeroflex.attenuationCH1.get() )
print( Aeroflex.attenuationCH2.get() )
Aeroflex.close() |
# Generated by Django 2.2 on 2020-04-06 15:31
import core.utils.utils
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import users.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('email', models.EmailField(blank=True, default=None, max_length=254, null=True, unique=True)),
('phone_number', models.CharField(max_length=16, unique=True)),
('avatar', models.ImageField(blank=True, upload_to=core.utils.utils.get_file_path)),
('device_id', models.CharField(editable=False, max_length=200, null=True)),
('phone_number_confirmation_token', models.CharField(editable=False, max_length=12, null=True)),
('phone_number_valid_date', models.DateTimeField(blank=True, null=True)),
('reset_password_token', models.CharField(editable=False, max_length=12, null=True)),
('reset_password_valid_date', models.DateTimeField(blank=True, null=True)),
('country', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Country')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', users.models.UserManager()),
],
),
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('notification_method', models.CharField(choices=[('sms', 'sms notification'), ('app', 'Application Notification'), ('bh', 'Both Types')], default='app', max_length=3)),
('show_phone_number', models.BooleanField(default=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Car',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('car_number', models.CharField(max_length=60, unique=True)),
('color', models.CharField(blank=True, max_length=15, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('deleted', models.BooleanField(default=False)),
('car_model', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.CarModel')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddIndex(
model_name='car',
index=models.Index(fields=['car_number', 'user'], name='users_car_car_num_4d10eb_idx'),
),
]
|
# codeforces
n,k=map(int,input().split(" "))
nums=list(map(int,input().split(" ")))
x=nums[k-1]
count=0
if(x==0):
for i in nums:
if(i>x):
count+=1
print(count)
else:
for i in nums:
if(i==0):
count+=1
if(count==len(nums)):
print("0")
else:
count=0
for i in nums:
if(i>=x):
count+=1
print(count)
|
from mongoengine import *
from mongoengine import signals
from datetime import datetime
def update_document(sender, document):
document.last_seen = datetime.utcnow()
class Event(EmbeddedDocument):
e_type = StringField(required=True)
ssid = StringField()
dest = StringField(required=False)
timestamp = DateTimeField(default=datetime.utcnow())
location = StringField()
meta = {'allow_inheritance': True}
class Probe(Event):
e_type = StringField(default='probe')
class Beacon(Event):
e_type = StringField(default='beacon')
class Data(Event):
e_type = StringField(default='data')
class Device(Document):
mac = StringField(primary_key=True, required=True)
vendor = StringField()
events = EmbeddedDocumentListField(Event)
last_seen = DateTimeField()
signals.pre_save.connect(update_document)
|
from django.contrib.auth import authenticate
from django.shortcuts import redirect, render
from django.contrib.auth.models import User
from django.contrib.auth import login, logout, authenticate
from .models import Report
from django.contrib import messages
import json
# Create your views here.
def index(request):
return render(request, "index.html")
def get_incident_report(request):
if request.user.is_anonymous:
return redirect('/')
elif request.method == "POST":
location = request.POST.get("location")
incident_description = request.POST.get("incident_description")
date = request.POST.get("date")
time = request.POST.get("time")
incident_location = request.POST.get("incident_location")
initial_severity = request.POST.get("initial_severity")
suspected_cause = request.POST.get("suspected_cause")
immediate_actions_taken = request.POST.get("immediate_actions_taken")
sub_incident_type = json.dumps(request.POST.getlist("checks[]"))
print(sub_incident_type, type(sub_incident_type))
reported_by = request.user
print(location,incident_description,date,time,incident_location,initial_severity,suspected_cause,
immediate_actions_taken,sub_incident_type,reported_by,end='\n')
report = Report(location=location, incident_description=incident_description, date=date, time=time,
incident_location=incident_location, initial_severity=initial_severity,
suspected_cause=suspected_cause, immediate_actions_taken=immediate_actions_taken,
sub_incident_type=sub_incident_type, reported_by=reported_by)
report.save()
messages.success(request, 'Incident report saved.')
return redirect('/')
return render(request, 'form.html')
# login password suhail$$$***
def loginUser(request):
error = None
if request.method == "POST":
username = request.POST.get("username")
password = request.POST.get("password")
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
messages.success(request, "Success: You are logged in.")
return redirect('/')
else:
error = {"error":"User does not exist"}
return render(request, "login.html", error)
def logoutUser(request):
logout(request)
return redirect('/') |
import numpy as np
from rvi_sampling.distributions.prior_distributions import DiscreteUniform
def test_discrete_uniform():
du = DiscreteUniform(dimensions=1, start=-5, n_numbers=10)
assert du.pdf(np.array([5])) != 0
assert du.pdf(np.array([-5])) != 0
assert du.pdf(np.array([6])) == 0
assert du.pdf(np.array([-6])) == 0
pass |
import bluetooth
target_name = "Galaxy A3 (2016)"
target_address = None
try:
nearby_devices = bluetooth.discover_devices(duration=20)
except BluetoothError as e:
LOG.error("BT error %s" % e )
for bdaddr in nearby_devices:
print bdaddr
if target_name == bluetooth.lookup_name(bdaddr):
target_address = bdaddr
break
if target_address is not None:
print "found target bluetooth device with addres ", target_address
else:
print "could not find target bluetooth device nearby"
|
import pyjsonrpc
URL = "http://localhost:5050/"
client = pyjsonrpc.HttpClient(url=URL)
def predict(bathroom, school_rating, bedroom, property_type, size,avg):
predicted_value = client.call('predict', bathroom, school_rating, bedroom, property_type, size,avg)
print "Predicted value: %f" % predicted_value
return predicted_value
|
#!/usr/bin/env python
# coding=utf-8
from torch.utils.data import DataLoader
import torch
from MNISTDataset import MNISTDataset
from LeNet import LeNet
import skimage.io as io
from torch.autograd import Variable
import matplotlib.pyplot as plt
learning_rate = 0.0001
batch_size = 64 #60000/64=937.5
iter_display = 100 #迭代多少次显示一次
num_epoch = 10
test_iterval = 1000 #每训练多少次进行一次测试
#test_iter = 150 #测试进行多少次 10000/64 = 156
train_dataset = MNISTDataset("./mnist/train.txt")
test_dataset = MNISTDataset("./mnist/test.txt")
train_loader = DataLoader(train_dataset,batch_size,True) #数据加载器 主要实现了批量加载 数据打乱 以及并行化读取数据 pytorch自带 也可以自己实现
test_loader = DataLoader(test_dataset,batch_size,True)
lenet_model = LeNet() #实例化类LeNet 实际上也调用了LeNet的__init__()函数
lenet_model = lenet_model.cuda() #转移到GPU上
print(lenet_model)
optimizer = torch.optim.SGD(lenet_model.parameters(),learning_rate,0.99) #实例化一个SGD优化器 参数:需要更新的参数 学习率 momentum(默认为0) 正则化项系数(默认为0) dampening(动量抑制因子 默认为0) nesterov(是否使用Nesterov 默认为false)
loss_func = torch.nn.CrossEntropyLoss() #softmax loss函数
train_loss = 0.0
train_acc = 0.0
test_loss = 0.0
test_acc = 0.0
num_iter = -1
recoder = {"num_iter":[],"train_loss":[],"train_acc":[],"test_loss":[],"test_acc":[]}
for epoch in range(num_epoch):
for index,(batch_img,batch_label) in enumerate(train_loader):
# print index
num_iter += 1
# print type(batch_img)
# print type(batch_label)
batch_img = Variable(batch_img.cuda()) #将数据拷贝到GPU上运行,并且放入到Variable中
batch_label = Variable(batch_label.cuda())
lenet_model.train()
output = lenet_model(batch_img) #进行前向计算
# print output.data.size() #output输出为batch_size*10
# print batch_label.data.size()
loss = loss_func(output,batch_label) #计算loss 此loss已经在一个batch_size上做了平均
#print loss.data.size() loss输出为一个标量
train_loss += loss.item()
max,max_index = torch.max(output,1) #返回1维度上的最大值 记忆下标
#print max.size()
train_correct = torch.sum((max_index.data == batch_label.data)) #统计一个batch中预测正确的数量
# print "train_correct:",train_correct
train_acc += train_correct
# print "train_acc:",train_acc
#反向传播
lenet_model.zero_grad() #因为梯度是累计的 所以先将梯度清0
loss.backward() #将自动计算所有可学习参数的梯度
optimizer.step() #调用优化器来更新参数
#显示设置
if(num_iter % iter_display == 0):
print "iter_num:",num_iter
if num_iter == 0:
train_loss = train_loss
train_acc = float(train_acc)/batch_size
else:
train_loss = train_loss / iter_display
train_acc = float(train_acc)/(iter_display*batch_size)
recoder["num_iter"].append(num_iter)
recoder["train_loss"].append(train_loss)
recoder["train_acc"].append(train_acc)
print "train loss:",train_loss
print "train acc:",train_acc
train_loss = 0
train_acc = 0
#测试
if(num_iter % test_iterval == 0):
test_loss = 0
test_acc = 0
for test_batch_img,test_batch_label in test_loader:
test_batch_img = Variable(test_batch_img.cuda())
test_batch_label = Variable(test_batch_label.cuda())
lenet_model.eval() #转换成测试模式 仅仅只对dropout层和batchnorm层有影响
output = lenet_model(test_batch_img) #测试前向计算
loss = loss_func(output,test_batch_label)
test_loss += loss.item()
max,max_index = torch.max(output,1)
test_correct = torch.sum((max_index.data == test_batch_label.data))
test_acc += test_correct
test_loss = test_loss/len(test_dataset)*batch_size
test_acc = float(test_acc)/float(len(test_dataset))
recoder["test_loss"].append(test_loss)
recoder["test_acc"].append(test_acc)
print "test loss:",test_loss
print "test acc:",test_acc
#存储权重
torch.save(lenet_model,"./weight/LeNet.weights")
#绘图
plt.figure("loss")
plt.plot(recoder["num_iter"],recoder["train_loss"])
x = []
for iter in recoder["num_iter"]:
if iter % test_iterval ==0:
x.append(iter)
plt.plot(x,recoder["test_loss"])
plt.figure("acc")
plt.plot(recoder["num_iter"],recoder["train_acc"])
plt.plot(x,recoder["test_acc"])
plt.show()
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from config import config
import delegate_social
from flask import Flask
from flask import request
from flask import jsonify
app = Flask(__name__)
@app.route("/")
def api():
return jsonify(config["service_description"])
@app.route('/v1_0/notifications/', defaults= {'service': ''})
@app.route('/v1_0/notifications/<service>/')
def v1_0_notifications(service):
#Args
days = (int(request.args.get('days')) if request.args.get('days') else 1) # Default is 1.
return jsonify(notifications=delegate_social.notifications(service, days))
if __name__ == "__main__":
app.run(debug=True, threaded=True) |
# -*- coding: utf-8 -*-
import logging
import time
from openerp.osv import fields, osv
#from datetime import datetime, date, timedelta
_logger = logging.getLogger(__name__)
class wiz_create_chart_dre(osv.osv_memory):
"""
Assitente para criar o DRE do período
"""
_name = "wiz.create.chart_dre"
_description = ""
_columns = {
'date_ger': fields.date(u'Data Geração'),
'period_from': fields.many2one('account.period', u'Período', required=True),
}
def action_wiz_create_chart_dre(self, cr, uid, ids, context=None):
_logger.info(self._name)
if context == None:
context = {}
[wizard] = self.browse(cr, uid, ids)
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
objChartDRE = self.pool.get('chart_dre')
chart = {
'date': time.strftime('%Y-%m-%d'),
'period_id': wizard.period_from.id,
}
_logger.info(u'Cria Chart: '+str(chart))
idChartDRE = objChartDRE.create(cr,uid,chart,context=None)
#ChartFlowCash = objChartFlowCash.browse(cr,uid,id_ChartFlowCash,context)
objChartDRE.cria_dre(cr,uid,idChartDRE,wizard.period_from.id)
result = mod_obj.get_object_reference(cr, uid, 'account_flow_cash', 'action_chart_dre_line')
_logger.info('ID1 = '+str(result))
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
result['domain'] = "[('chart_id','=',"+str(idChartDRE)+")]"
_logger.info('ID2 = '+str(result))
return result
_defaults = {
'date_ger': lambda *a: time.strftime('%Y-%m-%d'),
}
wiz_create_chart_dre()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
from model.models import GRUMultiTask
import os
import train as tr
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader
import defines as df
try:
import cPickle as pickle
except ImportError: # Python 3.x
import pickle
# Preprocessed data paths
preprocessed_data_paths = {
'test_rumors_tweets path': os.path.join('data', 'preprocessed data', 'test', 'rumors_tweets.npy'),
'test_rumors_labels path': os.path.join('data', 'preprocessed data', 'test', 'rumors_labels.npy'),
'test_stances_tweets path': os.path.join('data', 'preprocessed data', 'test', 'stances_tweets.npy'),
'test_stances_labels path': os.path.join('data', 'preprocessed data', 'test', 'stances_labels.npy'),
}
batch_size_test_rumors = 4
batch_size_test_stances = 11
# 2 4 8 16 32 64 100
# 5 11 22 44 88 176 274
loss_function = 'BCELoss' # supported options: CrossEntropyLoss | BCELoss | L1Loss | MSELoss
def main():
# for rumors
test_data_rumors = TensorDataset(torch.from_numpy(np.load(preprocessed_data_paths['test_rumors_tweets path'])),
torch.from_numpy(np.load(preprocessed_data_paths['test_rumors_labels path'])))
test_loader_rumors = DataLoader(test_data_rumors, shuffle=False, batch_size=batch_size_test_rumors, drop_last=False)
# for stances
test_data_stances = TensorDataset(torch.from_numpy(np.load(preprocessed_data_paths['test_stances_tweets path'])),
torch.from_numpy(np.load(preprocessed_data_paths['test_stances_labels path'])))
test_loader_stances = DataLoader(test_data_stances, shuffle=False, batch_size=batch_size_test_stances, drop_last=False)
# torch.cuda.is_available() checks and returns a Boolean True if a GPU is available, else it'll return False
is_cuda = torch.cuda.is_available()
# if we have a GPU available, we'll set our device to GPU. We'll use this device variable later in our code.
if is_cuda:
device = torch.device('cuda')
else:
device = torch.device('cpu')
# create the model
model_multi_task = GRUMultiTask(input_length=df.input_length,
hidden_length_rumors=df.hidden_length_rumors,
hidden_length_stances=df.hidden_length_stances,
hidden_length_shared=df.hidden_length_shared,
loss_func=loss_function,
is_dropout=False
)
model_multi_task.to(device)
# Loss
if loss_function == 'BCELoss':
criterion = nn.BCELoss()
elif loss_function == 'L1Loss':
criterion = nn.L1Loss()
elif loss_function == 'MSELoss':
criterion = nn.MSELoss()
else: # the default
criterion = nn.CrossEntropyLoss()
# Loading the model
model_multi_task.load_state_dict(torch.load(os.path.join('model', 'model_state_dict.pt')))
# Load hidden states
try:
with open(os.path.join('model', 'h_prevs.pickle'), 'rb') as fp:
h_training = pickle.load(fp)
h_training = (torch.from_numpy(h_training['h_1']).to(device),
torch.from_numpy(h_training['h_2']).to(device),
torch.from_numpy(h_training['h_3']).to(device))
except EnvironmentError:
h_training = model_multi_task.init_hidden()
# Run the model
accuracy_r, accuracy_s = tr.validation_or_testing(model_multi_task, test_loader_rumors,
test_loader_stances, criterion, device,
h_training, operation='testing')
print('-----------------------------------------\n')
print('Test accuracy rumors: {:.3f}%'.format(accuracy_r))
print('Test accuracy stances: {:.3f}%'.format(accuracy_s))
if __name__ == '__main__':
main()
|
import numpy as np
class Calculadora:
def __init__(self):
print ("Se creo una calculadora")
def sumar(self,x,y):
return x + y
def restar(self,x,y):
return x - y
def multiplicar(self,x,y):
return x*y
def dividir(self,x,y):
return x/y
class CalcAleatorea(Calculadora):
def __init__(self):
Calculadora.__init__(self)
def media(self,x):
return (np.sum(x))/len(x)
def mediaCuadratica(self,x):
z = np.power(x,2)
return (np.sum(z))/len(z)
def varianza(self,x):
media = self.media(x)
return self.mediaCuadratica(x-media)
def desviacionEstandar(self,x):
return np.sqrt(self.varianza(x))
def correlacion(self,x,y):
return self.media(x*y)
calc2 = CalcAleatorea()
x = np.array([7,6,10,9,12,14])
y = np.array([7,8,9,10,11,12])
print("x es: " , x)
print("y es: " , y)
media = calc2.media(x)
print("media de x es: " , media)
mediaCua = calc2.mediaCuadratica(x)
print("mediaCua de x es: " , mediaCua)
varianza = calc2.varianza(x)
print("varianza de x es: " , varianza)
devEst = calc2.desviacionEstandar(x)
print("devEst de x es: " , devEst)
corr = calc2.correlacion(x,y)
print("corr de x,y es: " , corr)
|
def turn_right(curr_dir):
curr_dir = [-curr_dir[1], curr_dir[0]]
return curr_dir
def spiral_of(n):
spiral = [[0 for i in range(n)] for i in range(n)]
pos = [n//2, n//2]
direction = [0,-1]
spiral[pos[0]][pos[1]] = 1
num = 2
while num<=n**2:
if spiral[pos[0]+turn_right(direction)[1]][pos[1]+turn_right(direction)[0]]==0:
direction = turn_right(direction)
pos[0]+=direction[1]
pos[1]+=direction[0]
spiral[pos[0]][pos[1]] = num
num+=1
return spiral
def visual(spiral):
for i in spiral:
row = ''
for num in i:
row+=str(num)
row+=' '*(8 - len(str(num)))
print(row)
# visual(spiral_of(13))
def sum_of_diags_in(spiral):
diag_sum = -1
for i in range(len(spiral)):
diag_sum+= spiral[i][i]
for j in range(len(spiral)):
diag_sum+=spiral[j][len(spiral)-1-j]
return diag_sum
def main():
print(sum_of_diags_in(spiral_of(1001)))
if __name__=='__main__':
main() |
# This file is a part of ninfs.
#
# Copyright (c) 2017-2019 Ian Burgwin
# This file is licensed under The MIT License (MIT).
# You can find the full license text in LICENSE.md in the root of this project.
from functools import wraps
from io import BufferedIOBase
from typing import TYPE_CHECKING
if TYPE_CHECKING:
# this is a lazy way to make type checkers stop complaining
from typing import BinaryIO
BufferedIOBase = BinaryIO
class PyCTRError(Exception):
"""Common base class for all PyCTR errors."""
def _raise_if_closed(method):
@wraps(method)
def decorator(self: '_ReaderOpenFileBase', *args, **kwargs):
if self._reader.closed:
self.closed = True
if self.closed:
raise ValueError('I/O operation on closed file')
return method(self, *args, **kwargs)
return decorator
class _ReaderOpenFileBase(BufferedIOBase):
"""Base class for all open files for Reader classes."""
_seek = 0
_info = None
closed = False
def __init__(self, reader, path):
self._reader = reader
self._path = path
def __repr__(self):
return f'<{type(self).__name__} path={self._path!r} info={self._info!r} reader={self._reader!r}>'
@_raise_if_closed
def read(self, size: int = -1) -> bytes:
if size == -1:
size = self._info.size - self._seek
data = self._reader.get_data(self._info, self._seek, size)
self._seek += len(data)
return data
read1 = read # probably make this act like read1 should, but this for now enables some other things to work
@_raise_if_closed
def seek(self, seek: int, whence: int = 0) -> int:
if whence == 0:
if seek < 0:
raise ValueError(f'negative seek value {seek}')
self._seek = min(seek, self._info.size)
elif whence == 1:
self._seek = max(self._seek + seek, 0)
elif whence == 2:
self._seek = max(self._info.size + seek, 0)
return self._seek
@_raise_if_closed
def tell(self) -> int:
return self._seek
@_raise_if_closed
def readable(self) -> bool:
return True
@_raise_if_closed
def writable(self) -> bool:
return False
@_raise_if_closed
def seekable(self) -> bool:
return True
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, Dict, List, Optional
import numpy as np
from gym import spaces
from mtenv import MTEnv
from mtenv.utils import seeding
from mtenv.utils.types import ActionType, ObsType, StepReturnType
TaskStateType = int
class FiniteMTBanditEnv(MTEnv):
"""Multitask Bandit Env where the task_state is sampled from a finite list of states"""
def __init__(self, n_tasks: int, n_arms: int):
super().__init__(
action_space=spaces.Discrete(n_arms),
env_observation_space=spaces.Box(
low=0.0, high=1.0, shape=(1,), dtype=np.float32
),
task_observation_space=spaces.Box(low=0.0, high=1.0, shape=(n_arms,)),
)
self.n_arms = n_arms
self.n_tasks = n_tasks
self.observation_space["task_obs"].seed(0)
self.possible_task_observations = np.asarray(
[self.observation_space["task_obs"].sample() for _ in range(self.n_tasks)]
)
# possible_task_observations is assumed to be part of the environment definition ie
# everytime we instantiate the env, we get the same `possible_task_observations`.
self._should_reset_env = True
def reset(self, **kwargs: Dict[str, Any]) -> ObsType:
self.assert_env_seed_is_set()
self._should_reset_env = False
return {"env_obs": [0.0], "task_obs": self.task_obs}
def sample_task_state(self) -> TaskStateType:
"""Sample a `task_state` that contains all the information needed to revert to any
other task. For examples, refer to TBD"""
self.assert_task_seed_is_set()
# The assert statement (at the start of the function) ensures that self.np_random_task
# is not None. Mypy is raising the warning incorrectly.
return self.np_random_task.randint(0, self.n_tasks) # type: ignore[no-any-return, union-attr]
def set_task_state(self, task_state: TaskStateType) -> None:
self.task_state = task_state
self.task_obs = self.possible_task_observations[task_state]
def step(self, action: ActionType) -> StepReturnType:
if self._should_reset_env:
raise RuntimeError("Call `env.reset()` before calling `env.step()`")
# The assert statement (at the start of the function) ensures that self.np_random_task
# is not None. Mypy is raising the warning incorrectly.
sample = self.np_random_env.rand() # type: ignore[union-attr]
reward = 0.0
if sample < self.task_obs[action]: # type: ignore[index]
reward = 1.0
return (
{"env_obs": [0.0], "task_obs": self.task_obs},
reward,
False,
{},
)
def seed_task(self, seed: Optional[int] = None) -> List[int]:
"""Set the seed for task information"""
self.np_random_task, seed = seeding.np_random(seed)
# in this function, we do not need the self.np_random_task
return [seed]
def get_task_state(self) -> TaskStateType:
"""Return all the information needed to execute the current task again.
For examples, refer to TBD"""
return self.task_state
def run() -> None:
env = FiniteMTBanditEnv(n_tasks=10, n_arms=5)
env.seed(seed=1)
env.seed_task(seed=2)
for task in range(3):
print("=== Task " + str(task % 2))
env.set_task_state(task % 2)
print(env.reset())
for _ in range(5):
action = env.action_space.sample()
print(env.step(action))
new_env = FiniteMTBanditEnv(n_tasks=10, n_arms=5)
new_env.seed(seed=1)
new_env.seed_task(seed=2)
print("=== Executing the current task (from old env) in new env ")
new_env.set_task_state(task_state=env.get_task_state())
print(new_env.reset())
for _ in range(5):
action = new_env.action_space.sample()
print(new_env.step(action))
if __name__ == "__main__":
run()
|
import webbrowser
class Movie:
"""
Initiate a Movie object.
Each object contains data including title, poster_image_url and trailer_yourtube_url
Each object also inherits a show_trailer method
"""
def __init__(self, title, poster_image_url, trailer_youtube_url):
self.title = title
self.poster_image_url = poster_image_url
self.trailer_youtube_url = trailer_youtube_url
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url)
|
# -*- coding: utf-8 -*-
import json
from flask import Flask, request, jsonify
from flask_cors import CORS
from bl.temperature.required_input_info import REQUIRED_INPUT_INFO
from bl.temperature.temperature_mode_handler import TemperatureModeHandler
app = Flask(__name__)
CORS(app)
@app.route('/', methods=['POST', 'GET'])
def handle():
if request.method == 'GET':
return jsonify(REQUIRED_INPUT_INFO)
try:
payload = request.get_json(force=True)
handler = TemperatureModeHandler()
result = handler.handle(payload)
if not result:
print(payload)
return jsonify(error='Invalid input data provided.'), 400
return json.dumps(result)
except Exception as e:
print(e)
return jsonify(error=str(e)), 500
if __name__ == '__main__':
app.run()
|
from .forms import BuyForm
from django.conf import settings
marketplace_item_list = ['Players', 'ICC', 'Countries', 'Boards', 'Tournaments', 'Team Owners']
def marketplace_items_context(request):
return {
'marketplace_items': marketplace_item_list,
'multiplier_0': settings.MULTIPLIER_0,
'multiplier_1': settings.MULTIPLIER_1,
'multiplier_2': settings.MULTIPLIER_2,
'multiplier_default': settings.MULTIPLIER_DEFAULT,
'buy_form': BuyForm,
}
|
import pandas as pd
import scipy as sp
import matplotlib.pyplot as plt
from io import StringIO
import numpy as np
from scipy.interpolate import interp1d
from derivativesfedrick_hw6 import *
from integrationfedrick_hw6 import *
from nonlinequationfedrick_hw6 import *
#input data
data=np.loadtxt('cambfedrick_hw6.dat')
df = pd.DataFrame(data)
df.to_excel("tester.xls", index=False)
l=3
n=len(data[:,0])
#pulls 200 points from the cmb
x200=data[0:n:10,0]
y200=data[0:n:10,1]
#perform an interpolation on the 200 data points
cf=interp1d(x200,y200,kind='cubic')
xnew=np.linspace(2,2191)
#grabs the whole data set to compare the error
xwhole=data[:,0]
ywhole=data[:,1]
yerror=abs(cf(xnew)-ywhole[0:len(xnew)])/ywhole[0:len(xnew)]
yerror=np.average(yerror)
print("this is the total error "+ str(yerror))
#compute more points between the orginal 2000 points
sx=np.linspace(2,2191,10000)
sy=cf(sx)
#convert into the desired integral form
cy=((cf(sx))*2*(np.pi))/(l*(l+1))
h=sx[0]-sx[1]
#perfomr fourth order derivative
dfxc,dfyc=fourthorderwhole(sx,cy,h)
#compute intergral over all the points
#define integrand
def cI(x):
y=(((cf(x)*2*(np.pi))/(l*(l+1)))*(l/(2*np.pi)))
return y
Fxc,Fyc,sum=twhole(10000,2,2191,cI)
fig1=plt.figure()
#make plots
ax=fig1.add_subplot()
ax.scatter(x200,y200, label="200 points from cmb")
ax.plot(sx,sy, label="Cubic fit of 200 points")
ax.set_title(" Power law of CMB ")
ax.set_xlabel("Power")
ax.set_ylabel("C_{TT}")
ax.legend()
fig2=plt.figure()
ax2=fig2.add_subplot()
ax2.set_title("Derivative of CMB")
ax2.plot(dfxc,dfyc)
fig3=plt.figure()
ax3=fig3.add_subplot()
ax3.plot(Fxc,Fyc)
ax3.set_title("Integral of CMB")
plt.show()
|
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
import time
driver = webdriver.Chrome(ChromeDriverManager().install())
def driverInit(url):
try:
# driver.get_screenshot_as_file("1.png")
driver.get(url)
time.sleep(5)
driver.save_screenshot("2.png")
time.sleep(2)
except Exception as ex:
print(ex)
finally:
driver.close()
driver.quit()
|
import datetime
from sqlalchemy import Column, Integer, String, Boolean, DateTime, and_
from sqlalchemy.orm import relationship
from database.loader import db
class User(db.Model):
id = Column(Integer, primary_key=True)
telegram_id = Column(String(40), unique=True)
chat_id = Column(String(40))
alias = Column(String(40))
referer_id = Column(String(40))
is_banned = Column(Boolean)
state = Column(String(50), default=None, nullable=True)
state_data = Column(String(1000), default=None, nullable=True)
balance = Column(Integer, default=0)
orders = relationship("Order", backref='user')
was_registered = Column(DateTime, default=datetime.datetime.now())
def __init__(self, telegram_id, alias, referer_id='', chat_id=''):
self.telegram_id = str(telegram_id)
self.chat_id = str(chat_id)
self.alias = alias
self.referer_id = referer_id
self.orders = []
self.is_banned = False
self.balance = 0
def __repr__(self):
return f'{self.alias} : {self.telegram_id}'
def get_registration_date(self):
return self.was_registered.date()
@staticmethod
def register(telegram_user, chat_id, referer_id=''):
if not User.query.filter_by(telegram_id=str(telegram_user.id)).first():
if not User.query.filter_by(telegram_id=referer_id).first():
referer_id = ''
db.session.add(User(telegram_user.id, telegram_user.username, referer_id=referer_id, chat_id=chat_id))
db.session.commit()
@staticmethod
def get(user_id: str = '', chat_id: str = ''):
user = User.query.filter(and_(User.telegram_id == user_id, User.chat_id == chat_id)).first()
if not user:
ValueError('User does not exist')
return user
|
import unittest
import main
import bomb
from config_entity import WIDTH
class TestBomb(unittest.TestCase):
def setUp(self):
self.game = main.Game()
self.game.start()
def test_can_bomb_explode(self):
my_bomb = bomb.Bomb(WIDTH, WIDTH, 1)
my_bomb.time_of_planting = 0
my_bomb.update(self.game.platforms)
self.assertTrue(my_bomb.is_blowed)
def test_can_bomb_make_explosion(self):
my_bomb = bomb.Bomb(WIDTH, WIDTH, 1)
my_bomb.is_blowed = True
self.game.add_sprite(my_bomb)
self.game.queue_of_bombs.append(my_bomb)
self.game.bombs_update()
self.assertTrue(len(self.game.queue_of_explosions) == 1)
def test_can_hero_place_bomb(self):
self.game.space = True
self.game.bombs_update()
self.assertTrue(len(self.game.queue_of_bombs) == 1)
if __name__ == '__main__':
unittest.main()
|
'''
Created on Jul 1, 2013
@author: padelstein
'''
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from robot.libraries.BuiltIn import BuiltIn
class ACPModal:
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
def __init__(self):
self._webd_wrap = BuiltIn().get_library_instance('WebDriverWrapper')
def _confirm_modal(self):
self._webd_wrap.wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'fancybox-inner')), 'Message modal not present')
def close_modal(self):
self._confirm_modal()
_close = self._webd_wrap._driver.find_element_by_class_name('fancybox-skin').find_element_by_xpath('a')
self._webd_wrap._driver.execute_script("(arguments[0]).click()", _close)
# confirms the modal is gone
self._webd_wrap.wait.until(EC.invisibility_of_element_located((By.CLASS_NAME, 'fancybox-inner')))
########################################################################
def click_follow(self):
self._confirm_modal()
_follow_button = self._webd_wrap._driver.find_element_by_class_name('fancybox-inner').find_element_by_xpath('div/div/section[1]/section[2]/section/ul/li[1]/a')
self._webd_wrap._driver.execute_script('$(arguments[0]).click()', _follow_button)
self._webd_wrap.wait.until(EC.title_contains("Zola"))
def click_full_profile(self):
self._confirm_modal()
_full_profile = self._webd_wrap._driver.find_element_by_class_name('fancybox-inner').find_element_by_xpath('div/div/footer/a')
self._webd_wrap._driver.execute_script("(arguments[0]).click()", _full_profile)
# confirms the modal is gone
self._webd_wrap.wait.until(EC.invisibility_of_element_located((By.CLASS_NAME, 'fancybox-inner')))
########################################################################
def get_name(self):
self._confirm_modal()
_elt = self._webd_wrap._driver.find_element_by_class_name("fancybox-inner").find_element_by_class_name('name-header').find_element_by_xpath("div/a")
return _elt.text
|
# -*- coding: utf-8 -*-
"""
This file is a scratch of technology parser.
"""
import csv
import scrapy
from scrapy import Request
# Settings
# Delimiter and quotechar are parameters of csv file. You should know it if you created the file
CSV_DELIMITER = ' '
CSV_QUOTECHAR = '"' # '|'
INPUT_NAME = 'input.txt'
OUTPUT_CSV_NAME = 'technology.csv' # Path to output file with csv type
ONLY_RECOMMENDATED_TRANSLATIONS = True
COLUMNS = ['Input word', 'Translations', 'Dictionary', 'Block number', 'Block name', 'Author', 'Link on author',
'Comment']
class MultitranSpider(scrapy.Spider):
name = "multitran_technology"
allowed_domains = ["multitran.com"]
def __init__(self):
self.input_file = open(INPUT_NAME, 'r')
self.output_file = open(OUTPUT_CSV_NAME, 'w')
self.output_writer = csv.writer(self.output_file, delimiter=CSV_DELIMITER, quotechar=CSV_QUOTECHAR,
quoting=csv.QUOTE_ALL)
def start_requests(self):
requests = []
for request in self.input_file:
requests.append(Request(url='http://www.multitran.com/m.exe?CL=1&s={}&l1=1&l2=2&SHL=2'.format(request),
meta={'theme': request}))
return requests
def parse(self, response):
# self.logger.info(response.url)
theme = response.meta['theme']
common_row_xpath = '//*/tr/td[@class="phras"]/a'
for common_row in response.xpath(common_row_xpath):
link = "http://www.multitran.com{}".format(common_row.xpath('@href').extract_first())
name = common_row.xpath('text()').extract_first()
yield scrapy.Request(url=link, callback=self.parse_dictionary, meta={'name': name, 'theme': theme})
def parse_dictionary(self, response):
name = response.meta['name']
theme = response.meta['theme']
ROW_XPATH = '//*/tr'
WORD_XPATH = 'td[@class="phraselist1"]/a/text()'
TRANSLATE_XPATH = 'td[@class="phraselist2"]/a/text()'
for row in response.xpath(ROW_XPATH):
row_value = [None] * 4
row_value[0] = row.xpath(WORD_XPATH).extract_first()
row_value[1] = row.xpath(TRANSLATE_XPATH).extract_first()
row_value[2] = name
row_value[3] = theme
if row_value[0] is not None:
self.output_writer.writerow(row_value)
def close(self, reason):
self.output_file.close()
|
# Copyright 2022 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allows for a temporary register to exist, when associated with a layout."""
from __future__ import annotations
from collections.abc import Mapping
from typing import TYPE_CHECKING, Any
from typing import Sequence as abcSequence
from pulser.json.utils import obj_to_dict, stringify_qubit_ids
if TYPE_CHECKING:
from pulser.register.base_register import BaseRegister, QubitId
from pulser.register.register_layout import RegisterLayout
from pulser.register.weight_maps import DetuningMap
class MappableRegister:
"""A register with the traps of each qubit still to be defined.
Args:
register_layout: The register layout on which this
register will be defined.
qubit_ids: The Ids for the qubits to pre-declare on this
register.
"""
def __init__(self, register_layout: RegisterLayout, *qubit_ids: QubitId):
"""Initializes the mappable register."""
self._layout = register_layout
if len(qubit_ids) > self._layout.number_of_traps:
raise ValueError(
"The number of required qubits is greater than the number of "
f"traps in this layout ({self._layout.number_of_traps})."
)
self._qubit_ids = qubit_ids
@property
def qubit_ids(self) -> tuple[QubitId, ...]:
"""The qubit IDs of this mappable register."""
return self._qubit_ids
@property
def layout(self) -> RegisterLayout:
"""The layout used to define the register."""
return self._layout
def build_register(self, qubits: Mapping[QubitId, int]) -> BaseRegister:
"""Builds an actual register.
Args:
qubits: A map between the qubit IDs to use
and the layout traps where the qubits will be placed. Qubit IDs
declared in the MappableRegister but not defined here will
simply be left out of the final register.
Returns:
The resulting register.
"""
chosen_ids = tuple(qubits.keys())
if not set(chosen_ids) <= set(self._qubit_ids):
raise ValueError(
"All qubits must be labeled with pre-declared qubit IDs."
)
elif set(chosen_ids) != set(self.qubit_ids[: len(chosen_ids)]):
raise ValueError(
f"To declare {len(qubits.keys())} qubits, 'qubits' should "
f"contain the first {len(qubits.keys())} elements of the "
"'qubit_ids'."
)
register_ordered_qubits = {
id: qubits[id] for id in self._qubit_ids if id in chosen_ids
}
return self._layout.define_register(
*tuple(register_ordered_qubits.values()),
qubit_ids=tuple(register_ordered_qubits.keys()),
)
def find_indices(self, id_list: abcSequence[QubitId]) -> list[int]:
"""Computes indices of qubits.
This can especially be useful when building a Pulser Sequence
with a parameter denoting qubits.
Example:
Let ``reg`` be a mappable register with qubit Ids "a", "b", "c"
and "d".
>>> reg.find_indices(["a", "b", "d", "a"])
It returns ``[0, 1, 3, 0]``, following the qubits order of the
mappable register (defined by qubit_ids).
Then, it is possible to use these indices when building a
sequence, typically to instanciate an array of variables
that can be provided as an argument to ``target_index``
and ``phase_shift_index``.
When building a sequence and declaring N qubits, their ids should
refer to the first N elements of qubit_id.
Args:
id_list: IDs of the qubits to denote.
Returns:
Indices of the qubits to denote, only valid for the
given mapping.
"""
if not set(id_list) <= set(self._qubit_ids):
raise ValueError(
"The IDs list must be selected among pre-declared qubit IDs."
)
return [self.qubit_ids.index(id) for id in id_list]
def define_detuning_map(
self, detuning_weights: Mapping[int, float]
) -> DetuningMap:
"""Defines a DetuningMap for some trap ids of the register layout.
Args:
detuning_weights: A mapping between the IDs of the targeted traps
and detuning weights (between 0 and 1, their sum must be equal
to 1).
Returns:
A DetuningMap associating detuning weights to the trap coordinates
of the targeted traps.
"""
return self._layout.define_detuning_map(detuning_weights)
def _to_dict(self) -> dict[str, Any]:
return obj_to_dict(self, self._layout, *self._qubit_ids)
def _to_abstract_repr(self) -> list[dict[str, str]]:
return [dict(qid=qid) for qid in stringify_qubit_ids(self.qubit_ids)]
|
from setuptools import setup, find_packages
install_requires = [
'protobuf>=3.0.0b2,<4.0.0'
]
classifiers = [
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Intended Audience :: Developers',
'License :: OSI Approved',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Networking',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration'
]
kw = {
'name': 'riemann-api',
'version': '1.0.0',
'description': 'Client for the Riemann event stream processor',
'long_description': open('README.rst').read(),
'author': 'Georgi Valkov',
'author_email': 'georgi.t.valkov@gmail.com',
'license': 'MIT',
'keywords': 'riemann',
'url': 'https://github.com/gvalkov/riemann-api',
'classifiers': classifiers,
'install_requires': install_requires,
'packages': find_packages(),
'zip_safe': True,
}
#-----------------------------------------------------------------------------
if __name__ == '__main__':
setup(**kw)
|
import urllib
u = urllib.urlopen('https://www.google.com')
con = u.read()
print con
|
import boto3
import json
def get_cognito_info():
"""
A function that gives cognito user pools information
"""
conn = boto3.client('ec2')
regions = [region['RegionName'] for region in conn.describe_regions()['Regions']]
user_pool_info = []
user_info = []
for region in regions:
if region == 'ap-east-1' or region == 'eu-north-1' or region == 'sa-east-1' or region == 'us-west-1' or region == 'eu-west-3':
continue
conn = boto3.client('cognito-idp', region_name=region)
user_pool_ids = []
# list the user pools to get the user pool id
response = conn.list_user_pools(
MaxResults=10
)['UserPools']
for res in response:
user_pool_ids.append(res['Id'])
# describe each user pool
for ids in user_pool_ids:
response = conn.describe_user_pool(
UserPoolId=ids
)['UserPool']
req_info = []
req_info.append(response)
# append each user pool as a seperate list
user_pool_info.append(req_info)
# get user information
response = conn.list_users(
UserPoolId=ids
)['Users']
req_info = []
req_info.append(response)
user_info.append(req_info)
# convert user pool and user lists into dictionaries
dict_user_pool = {"User pools": user_pool_info}
dicts_users = {"Users": user_info}
# convert dictionaries into json
user_pool_json = json.dumps(dict_user_pool, indent=4, default=str)
user_json = json.dumps(dicts_users, indent=4, default=str)
print(user_pool_json)
print(user_json)
get_cognito_info()
|
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
def adjacent_edges(nodes, halfk):
n = len(nodes)
for i, u in enumerate(nodes):
for j in range(i + 1, i + halfk + 1):
v = nodes[j % n]
yield u, v
def make_ring_lattice(n, k):
G = nx.Graph()
nodes = range(n)
G.add_nodes_from(nodes)
G.add_edges_from(adjacent_edges(nodes, k//2))
return G
def draw_graph(G):
nx.draw_circular(G,
node_color = 'y',
node_size = 300,
with_labels = True)
plt.show()
def flip(p):
return np.random.random() < p
def rewire(G, p):
nodes = set(G)
for u, v in G.edges():
if flip(p):
choices = nodes - {u} - set(G[u])
new_v = np.random.choice(list(choices))
G.remove_edge(u, v)
G.add_edge(u, new_v)
def make_ws_graph(n, k, p):
ws = make_ring_lattice(n, k)
rewire(ws, p)
return ws
def all_pairs(nodes):
for i, j in enumerate(nodes):
for v, w in enumerate(nodes):
if i > v:
yield j, w
def node_clustering(G, u):
neighbors = G[u]
k = len(neighbors)
if k < 2:
return np.nan
possible = k * (k - 1) / 2
exist = 0
for v, w in all_pairs(neighbors):
if G.has_edge(v, w):
exist += 1
return exist / possible
def clustering_coefficient(G):
cu = [node_clustering(G, node) for node in G]
return np.nanmean(cu)
def path_lengths(G):
length_iter = nx.shortest_path_length(G)
for source, dist_map in length_iter:
for dest, dist in dist_map.items():
yield dist
def characteristic_path_length(G):
return np.mean(list(path_lengths(G)))
def run_one_graph(n, k, p):
ws = make_ws_graph(n, k, p)
mpl = characteristic_path_length(ws)
cc = clustering_coefficient(ws)
# print(mpl, cc)
return mpl, cc
def run_experiment(ps, n = 100, k = 4, iters = 20):
res = []
for p in ps:
t = [run_one_graph(n, k, p) for _ in range(iters)]
means = np.array(t).mean(axis = 0)
res.append(means)
return np.array(res)
ps = np.logspace(-4, 0, 15)
res = run_experiment(ps)
L, C = np.transpose(res)
L /= L[0]
C /= C[0]
plt.plot(ps, C, 's-', linewidth=1, label='C(p)/C(0)')
plt.plot(ps, L, 'o-', linewidth=1, label='L(p)/L(0)')
plt.xscale('log')
plt.legend()
plt.show()
|
import csv
import numpy as np
import math
import matplotlib.pyplot as plt
import sys
args = sys.argv
#評価データを読み込む
f = open(args[1]+"_O.csv","r")
reader = csv.reader(f)
O = [ [float(s) for s in e] for e in reader ]
#読み込んだ信号の長さ
M=len(O)
#信号を256個ずつ区切る
r=256
#DFTの値は左右対称となるため、128次元めまでを考えれば良い、かつO[m][k]のデータは捨てたので、127次元目までの値を持つ
vecSize=int(r/2)-1
#読み込む学習データファイル名
filename=['coffee','buzzer','whistle','do','mi']
#対数確率を計算する
Pr=[]
for n in filename:
f = open(n+"_myu.csv","r")
reader = csv.reader(f)
myu = sum([ [float(s) for s in e] for e in reader ],[])
f = open(n+"_sigma.csv","r")
reader = csv.reader(f)
sigma = sum([ [float(s) for s in e] for e in reader ],[])
def log_normal_distribution(l):
const= vecSize*math.log(2*math.pi)
for k in range(vecSize):
z = math.log(sigma[k])
const += z
sum=const
for k in range(vecSize):
xmm = O[l][k] - myu[k]
sum += xmm*xmm/sigma[k]
return -0.5*sum
Pr.append(np.sum([log_normal_distribution(num) for num in range(M)]))
i=Pr.index(max(Pr))
for index, n in enumerate(filename):
print(n+":"+str(Pr[index]))
print("======================")
print("this is {}".format(filename[i]))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.