blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
8a66856761553707c99f5dce0359aa6ab4d4ab8a | Python | Diegoslourenco/CS50x | /pset6/cash/cash.py | UTF-8 | 428 | 3.359375 | 3 | [] | no_license | from cs50 import get_float
change = 0
coins = 0
while True:
change = int(get_float("Change owned: ") * 100)
if change > 0:
break
while change > 0:
if change >= 25:
change -= 25
coins += 1
elif change >= 10:
change -= 10
coins += 1
elif change >= 5:
change -= 5
coins += 1
elif change >= 1:
change -= 1
coins += 1
print(coins) | true |
85cc5f7329c3d798dbf99ff69407d66d1184b3bb | Python | Jhonnis007/Python | /ex040.py | UTF-8 | 676 | 4.4375 | 4 | [] | no_license | '''
Crie um programa que leia duas notas de um aluno e calcule sua média, mostrando uma mensagem no final, de acordo com a média atingida:
- Média abaixo de 5.0: REPROVADO
- Média entre 5.0 e 6.9: RECUPERAÇÃO
- Média 7.0 ou superior: APROVADO
'''
n1 = float(input('Digite a primeira nota:'))
n2 = float(input('Digite a segunda nota:'))
media = (n1 + n2) / 2
print('Tirando nota {:.1f} e nota {:.1f}, a média é: {:.1f}'.format(n1, n2, media))
if media < 5:
print('Você esta\033[31m REPROVADO! \033[m'.format(media))
elif media >= 5 and media <= 6.9:
print('Você esta em \033[33mRECUPERAÇÃO! \033[m')
else: print('Você foi\033[32m APROVADO! \033[m')
| true |
05b276ce581c9fb22377e11f385446bf4abbf0da | Python | ningtangla/escapeFromMultipleSuspectors | /src/envWithProbabilityDistractor.py | UTF-8 | 14,274 | 2.5625 | 3 | [
"MIT"
] | permissive | import os
import numpy as np
import pandas as pd
import pygame as pg
import itertools as it
import random
import anytree
import AnalyticGeometryFunctions as ag
import math
#np.random.seed(123)
class TransitionFunction():
def __init__(self, resetPhysicalState, resetBeliefAndAttention, updatePhysicalState, transiteStateWithoutActionChange, updateBeliefAndAttention, updatePhysicalStateByBelief):
self.resetPhysicalState = resetPhysicalState
self.resetBeliefAndAttention = resetBeliefAndAttention
self.updatePhysicalState = updatePhysicalState
self.transiteStateWithoutActionChange = transiteStateWithoutActionChange
self.updateBeliefAndAttention = updateBeliefAndAttention
self.updatePhysicalStateByBelief = updatePhysicalStateByBelief
def __call__(self, oldState, action):
if oldState is None:
newPhysicalState = self.resetPhysicalState()
newBeliefAndAttention = self.resetBeliefAndAttention(newPhysicalState)
newState = [newPhysicalState, newBeliefAndAttention]
else:
oldPhysicalState, oldBeliefAndAttention = oldState
newPhysicalState = self.updatePhysicalState(oldPhysicalState, action)
stateBeforeNoActionChangeTransition = [newPhysicalState, oldBeliefAndAttention]
physicalStateAfterNoActionChangeTransition, beliefAndAttentionAfterNoActionChangeTransition = self.transiteStateWithoutActionChange(stateBeforeNoActionChangeTransition)
newBeliefAndAttention = self.updateBeliefAndAttention(oldBeliefAndAttention, physicalStateAfterNoActionChangeTransition)
newState = [physicalStateAfterNoActionChangeTransition, newBeliefAndAttention]
newState = self.updatePhysicalStateByBelief(newState)
return newState
class TransiteStateWithoutActionChange():
def __init__(self, maxFrame, isTerminal, transiteMultiAgentMotion, render, renderOn):
self.maxFrame = maxFrame
self.isTerminal = isTerminal
self.transiteMultiAgentMotion = transiteMultiAgentMotion
self.render = render
self.renderOn = renderOn
def __call__(self, state):
for frame in range(self.maxFrame):
physicalState, beliefAndAttention = state
agentStates, agentActions, timeStep, wolfIdAndSubtlety = physicalState
if self.renderOn == True:
self.render(state)
if self.isTerminal(state):
break
change = np.random.randint(0, self.maxFrame, len(agentStates))
changeLabel = 1 * (change == 0)
changeLabel[0] = 0
changeLabel[wolfIdAndSubtlety[0]] = 0
currentActionsPolar = np.array([ag.transiteCartesianToPolar(action) for action in agentActions])
polarAfterChange = np.random.uniform(-math.pi*1/3, math.pi*1/3, len(agentStates)) * np.array(changeLabel) + currentActionsPolar
actionsAfterChange = np.array([ag.transitePolarToCartesian(polar) for polar in polarAfterChange]) * np.linalg.norm(agentActions[1])
actionsAfterChange[0] = actionsAfterChange[0] * 1.0 * np.linalg.norm(agentActions[0]) / np.linalg.norm(agentActions[1])
#print(np.linalg.norm(actionsAfterChange[0]), np.linalg.norm(actionsAfterChange[1]))
newAgentStates, newAgentActions = self.transiteMultiAgentMotion(agentStates, actionsAfterChange)
newPhysicalState = [newAgentStates, newAgentActions, timeStep, wolfIdAndSubtlety]
stateAfterNoActionChangeTransition = [newPhysicalState, beliefAndAttention]
state = stateAfterNoActionChangeTransition
return state
class IsTerminal():
def __init__(self, sheepId, minDistance):
self.sheepId = sheepId
self.minDistance = minDistance
def __call__(self, state):
terminal = False
physicalState, beliefAndAttention = state
agentStates, agentActions, timeStep, wolfIdAndSubtlety = physicalState
wolfId, wolfSubtlety = wolfIdAndSubtlety
sheepPosition = agentStates[self.sheepId]
wolfPosition = agentStates[wolfId]
if np.sum(np.power(sheepPosition - wolfPosition, 2)) ** 0.5 <= self.minDistance:
terminal = True
return terminal
class Render():
def __init__(self, numAgent, screen, surfaceWidth, surfaceHeight, screenColor, sheepColor, wolfColor, circleSize, saveImage, saveImageFile, isTerminal):
self.numAgent = numAgent
self.screen = screen
self.surfaceWidth = surfaceWidth
self.surfaceHeight = surfaceHeight
self.screenColor = screenColor
self.sheepColor = sheepColor
self.wolfColor = wolfColor
self.circleSize = circleSize
self.saveImage = saveImage
self.saveImageFile = saveImageFile
self.isTerminal = isTerminal
def __call__(self, state):
physicalState, beliefAndAttention = state
agentStates, agentActions, timeStep, wolfIdAndSubtlety = physicalState
groundTruthWolf, groundTruthSubtlety = wolfIdAndSubtlety
hypothesisInformation, positionOldTimeDF = beliefAndAttention
posteriorAllHypothesesBeforeNormalization = np.exp(hypothesisInformation['logP'])
posteriorAllHypotheses = posteriorAllHypothesesBeforeNormalization / (np.sum(posteriorAllHypothesesBeforeNormalization))
posteriorAllWolf = posteriorAllHypotheses.groupby(['wolfIdentity']).sum().values
attentionStatus = hypothesisInformation.groupby(['wolfIdentity'])['attentionStatus'].mean().values
attentionSlot = np.concatenate(np.argwhere(attentionStatus != 0)) + 1
beliefSurface = pg.Surface((self.surfaceWidth, self.surfaceHeight))
wolfColors = [np.ones(3) * 250 * (1 - 1.5*wolfBelief) + self.wolfColor * 1.5*wolfBelief for wolfBelief in posteriorAllWolf]
circleColorList = np.array([self.sheepColor] + wolfColors)
#circleColorList[groundTruthWolf] = circleColorList[groundTruthWolf] + np.array([0, 0, 255])
for j in range(1):
for event in pg.event.get():
if event.type == pg.QUIT:
pg.quit
self.screen.fill(self.screenColor)
for i in range(self.numAgent):
oneAgentState = agentStates[i]
oneAgentPosition = np.array(oneAgentState)
if i in attentionSlot:
pg.draw.circle(self.screen, np.array([0, 0, 255]), [np.int(oneAgentPosition[0]),np.int(oneAgentPosition[1])],
min(0, 5*int(attentionStatus[i - 1]) + 10), min(0, 5*int(attentionStatus[i - 1])))
pg.draw.circle(self.screen, np.clip(circleColorList[i], 0, 255), [np.int(oneAgentPosition[0]),np.int(oneAgentPosition[1])], self.circleSize)
if self.isTerminal(state) and i == groundTruthWolf:
pg.draw.circle(self.screen, np.array([255, 0, 0]), [np.int(oneAgentPosition[0]),np.int(oneAgentPosition[1])], int(self.circleSize*1.5))
pg.display.flip()
if self.saveImage==True:
currentDir = os.getcwd()
parentDir = os.path.abspath(os.path.join(currentDir, os.pardir))
saveImageDir=parentDir+'/src/data/'+self.saveImageFile
#if j == 1 :
# saveImageDir=parentDir+'/src/data/'+self.saveImageFile+'/groundtruth'
if self.isTerminal(state):
for pauseTimeIndex in range(90):
filenameList = os.listdir(saveImageDir)
pg.image.save(self.screen,saveImageDir+'/'+str(len(filenameList))+'.png')
pg.time.wait(1)
filenameList = os.listdir(saveImageDir)
pg.image.save(self.screen,saveImageDir+'/'+str(len(filenameList))+'.png')
pg.time.wait(1)
class MctsRender():
def __init__(self, numAgent, screen, surfaceWidth, surfaceHeight, screenColor, sheepColor, wolfColor, distractorColor, circleSize, saveImage, saveImageFile):
self.numAgent = numAgent
self.screen = screen
self.surfaceWidth = surfaceWidth
self.surfaceHeight = surfaceHeight
self.screenColor = screenColor
self.sheepColor = sheepColor
self.wolfColor = wolfColor
self.distractorColor = distractorColor
self.circleSize = circleSize
self.saveImage = saveImage
self.saveImageFile = saveImageFile
def __call__(self, currNode, nextNode, roots, backgroundScreen):
parentNumVisit = currNode.num_visited
parentValueToTal = currNode.sum_value
state = list(currNode.id.values())[0]
physicalState, beliefAndAttention = state
agentStates, agentActions, timeStep, wolfIdAndSubtlety = physicalState
wolfId, wolfSubtlety = wolfIdAndSubtlety
hypothesisInformation, positionOldTimeDF = beliefAndAttention
posteriorAllHypothesesBeforeNormalization = np.exp(hypothesisInformation['logP'])
posteriorAllHypotheses = posteriorAllHypothesesBeforeNormalization / (np.sum(posteriorAllHypothesesBeforeNormalization))
posteriorAllWolf = posteriorAllHypotheses.groupby(['wolfIdentity']).sum().values
childNumVisit = nextNode.num_visited
childValueToTal = nextNode.sum_value
nextState = list(nextNode.id.values())[0]
nextPhysicalState, nextBeliefAndAttention = nextState
nextAgentStates, nextAgentActions, nextTimeStep, nextWolfIdAndSubtlety = nextPhysicalState
lineWidth = nextNode.num_visited + 1
if len(roots) > 0 and nextNode.depth == 1:
nodeIndex = currNode.children.index(nextNode)
grandchildren_visit = np.sum([[child.num_visited for child in anytree.findall(root, lambda node: node.depth == 1)] for root in roots], axis=0)
lineWidth = lineWidth + grandchildren_visit[nodeIndex]
font = pg.font.SysFont("Arial", 12)
surfaceToDraw = pg.Surface((self.surfaceWidth, self.surfaceHeight))
surfaceToDraw.fill(self.screenColor)
if backgroundScreen == None:
backgroundScreen = pg.Surface((self.surfaceWidth, self.surfaceHeight))
beliefSurface = pg.Surface((self.surfaceWidth, self.surfaceHeight))
backgroundScreen.fill(self.screenColor)
self.screen.fill(self.screenColor)
wolfColors = [np.ones(3) * 250 * (1 - 1.5*wolfBelief) + 1.5*self.wolfColor * wolfBelief for wolfBelief in posteriorAllWolf]
circleColorList = np.array([self.sheepColor] + wolfColors)
attentionStatus = hypothesisInformation.groupby(['wolfIdentity'])['attentionStatus'].mean().values
attentionSlot = np.concatenate(np.argwhere(attentionStatus != 0)) + 1
for i in range(self.numAgent):
oneAgentState = agentStates[i]
oneAgentNextState = nextAgentStates[i]
oneAgentPosition = np.array(oneAgentState)
oneAgentNextPosition = np.array(oneAgentNextState)
if i in attentionSlot:
pg.draw.circle(backgroundScreen, np.array([0, 0, 255]), [np.int(oneAgentPosition[0]),np.int(oneAgentPosition[1])], 5*int(attentionStatus[i - 1]) + 10,
5*int(attentionStatus[i - 1]))
pg.draw.circle(backgroundScreen, np.clip(circleColorList[i],0,255), [np.int(oneAgentPosition[0]),np.int(oneAgentPosition[1])], self.circleSize)
if self.saveImage==True:
for i in range(1):
currentDir = os.getcwd()
parentDir = os.path.abspath(os.path.join(currentDir, os.pardir))
saveImageDir=parentDir+'/src/data/'+self.saveImageFile
filenameList = os.listdir(saveImageDir)
pg.image.save(backgroundScreen,saveImageDir+'/'+str(len(filenameList))+'.png')
surfaceToDraw.set_alpha(180)
surfaceToDraw.blit(backgroundScreen, (0,0))
self.screen.blit(surfaceToDraw, (0, 0))
pg.display.flip()
pg.time.wait(1)
circleColorList = [self.distractorColor] * self.numAgent
circleColorList[wolfId] = self.wolfColor
circleColorList[0] = self.sheepColor
for j in range(1):
for event in pg.event.get():
if event.type == pg.QUIT:
pg.quit
for i in range(self.numAgent):
oneAgentState = agentStates[i]
oneAgentNextState = nextAgentStates[i]
oneAgentPosition = np.array(oneAgentState)
oneAgentNextPosition = np.array(oneAgentNextState)
if i == 0:
pg.draw.line(surfaceToDraw, np.ones(3) * 240, [np.int(oneAgentPosition[0]), np.int(oneAgentPosition[1])], [np.int(oneAgentNextPosition[0]),np.int(oneAgentNextPosition[1])], lineWidth)
pg.draw.circle(surfaceToDraw, circleColorList[i], [np.int(oneAgentNextPosition[0]),np.int(oneAgentNextPosition[1])], self.circleSize)
if i == wolfId:
pg.draw.circle(surfaceToDraw, circleColorList[i], [np.int(oneAgentPosition[0]),np.int(oneAgentPosition[1])], self.circleSize)
pg.draw.circle(surfaceToDraw, circleColorList[i], [np.int(oneAgentNextPosition[0]),np.int(oneAgentNextPosition[1])], self.circleSize)
self.screen.blit(surfaceToDraw, (0, 0))
pg.display.flip()
pg.time.wait(1)
backgroundScreenToReturn = self.screen.copy()
if self.saveImage==True:
currentDir = os.getcwd()
parentDir = os.path.abspath(os.path.join(currentDir, os.pardir))
saveImageDir=parentDir+'/src/data/'+self.saveImageFile
filenameList = os.listdir(saveImageDir)
pg.image.save(self.screen,saveImageDir+'/'+str(len(filenameList))+'.png')
return self.screen
if __name__ == '__main__':
a = TransitionFunction
__import__('ipdb').set_trace()
| true |
8844eb7bd7fe53155941389742878375068f84e4 | Python | breezekiller789/LeetCode | /428_Serialize_And_Deserialize_N_ary_Tree.py | UTF-8 | 2,060 | 3.578125 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# https://leetcode.com/problems/serialize-and-deserialize-n-ary-tree/
class Node(object):
def __init__(self, val=None, children=None):
self.val = val
self.children = children
root = Node(1, [])
root.children.extend([Node(3, []), Node(2, []), Node(4, [])])
root.children[1].children.extend([Node(5, []), Node(6, [])])
# 1
# 3 2 4
# 5 6
class Codec(object):
# Level order traversal
def serialize(self, root):
Q = [root]
Result = []
String = ""
while Q:
nodesOfCurrentLevel = len(Q)
Result.append([])
while nodesOfCurrentLevel > 0:
currentNode = Q[0]
if currentNode == "null":
String += "null,"
Q = Q[1:]
nodesOfCurrentLevel -= 1
continue
Q = Q[1:]
String += "{},".format(currentNode.val)
Result[-1].append(currentNode.val)
for child in currentNode.children:
Q.append(child)
if not currentNode.children:
Q.append("null")
nodesOfCurrentLevel -= 1
String += "null,"
print String[:-1].split(",")
return String[:-1].split(",")
def deserialize(self, data):
root = Node(data[0], [])
data = data[2:]
Q = [root]
while Q:
nodesOfCurrentLevel = len(Q)
while nodesOfCurrentLevel > 0:
currentNode = Q[0]
Q = Q[1:]
while data[0] != "null":
newNode = Node(data[0], [])
currentNode.children.append(newNode)
Q.append(newNode)
data = data[1:]
data = data[1:]
nodesOfCurrentLevel -= 1
return root
obj = Codec()
newroot = obj.deserialize(obj.serialize(root))
obj.serialize(newroot)
| true |
d212a18a7a969c06ee33da3aac8a47bad3ce46dd | Python | mwappner/Tesis | /Analisis cantos/recortar.py | UTF-8 | 2,685 | 2.890625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 3 12:26:24 2018
@author: Marcos
"""
import numpy as np
from PIL import Image
import os
#from skimage import filters #para otsu
motivoPath = os.path.join(os.getcwd(),'Motivos')
sonoPath = os.path.join(motivoPath,'Sonogramas','Nuevos')
sonoFiles = os.listdir(sonoPath)
sonoFiles.sort()
with open(os.path.join(motivoPath,'duraciones_nuevos.txt'),'r') as d:
c_d = d.readlines()
#diccionario que tiene numbre_del_archivo:duracion
#duración en float y al nombre le saco el '\n' final
dur = {k.strip():float(v) for v,k in (s.split(', ') for s in c_d)}
escala = 2500 #cantidad de pixeles del sonograma por segundo
#%%
def abro_imagen(file):
#recupero sólo el nombre sin extensión ni path
nombre_actual = os.path.basename(file)[:-4]
#cargo imagen, transformo a escala de grises ('L')
sono1 = Image.open(file).convert('L')
#recorto la parte que me interesa (dentro de los ejes)
sono1 = sono1.crop((360, 238, 6845, 1860)) #obtenido a mano
#reescalo la imagen para que el tamaño refleje la duración
#convierto a np.array
global alto
alto = sono1.size[1]
ancho = int(dur[nombre_actual] * escala)
A = np.array(sono1.resize((ancho,alto),Image.LANCZOS))
A = 255-A #la escala está al revés (255 es poca potencia)
return A
def cortar(im, ti, dur, fi, ff):
#por si pongo las frecuencias al revés
if fi>ff:
fi, ff = ff, fi
#recorto:
ic = im[int(fi*alto * 10) : int(ff*alto * 10), int(ti*escala) : int((ti+dur) * escala)]
return ic
def guardar(im_array, nombre, extension='png'):
''' Toma un array que contiene una imagen y un nombre (path completo,
sin extensión). Opcionalmente una extensión de archivo (default: 'png')'''
im = Image.fromarray(im_array)
nombre = new_file(nombre + '.' + extension)
im.save(nombre)
#ic = A[0:alto, int(0.27*escala): int((0.27 + 0.05) * escala)]
#ic = cortar(A, 0.08, 0.09, 0.055, 0.031)
#pl.imshow(ic)
#%%
def new_file(name, newseparator='_'):
'''Returns a name of a unique file or directory so as to not overwrite.
If propsed name existed, will return name + newseparator + number.
Parameters:
-----------
name : str (path)
proposed file or directory name influding file extension
nweseparator : str
separator between original name and index that gives unique name
'''
#if file is a directory, extension will be empty
base, extension = os.path.splitext(name)
i = 2
while os.path.exists(name):
name = base + newseparator + str(i) + extension
i += 1
return name
| true |
13f141517da93da4396c756e1bc7b376d9daa516 | Python | srisivan/python | /newton_law_gravity.py | UTF-8 | 869 | 4.46875 | 4 | [] | no_license | # A program to get the approximate value of newton's law.
print("This is a program to calcute the Force between two bodies, Using Newton's law of gravity.")
Gravitational_constant = 6.67 * 10 ** -11
print(" ")
Mass1 = int(input("Enter Mass of Body 1 \n"))
Mass2 = int(input("Enter Mass of Body 2\n"))
Distance = int(input("Enter distance between 2 bodies.\n"))
print("Newton's law: Force = Gravitational constant * (Mass1 * Mass2) / Distance squared\n")
print("You have entered:")
print("Mass of Body1 = %s" % Mass1 )
print("Mass of Body2 = %s" % Mass2 )
print("Distance between bodies = %s " % Distance)
# Newtons's law.
Force = Gravitational_constant * ( Mass1 * Mass2) / Distance ** 2
# The 'e' in the result corresponds to power.
print("The approximate force between them is %s \n" % Force)
print("The 'e' corresponds to power.\n")
| true |
7d88ce1ed07182f09687de87bdd6c076b3be251c | Python | afarahi/XCat | /XCat.v.0.0.1/source/healpy/rotator.py | UTF-8 | 28,784 | 2.953125 | 3 | [] | no_license | #
# This file is part of Healpy.
#
# Healpy is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Healpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Healpy; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# For more information about Healpy, see http://code.google.com/p/healpy
#
import numpy as npy
import warnings
coordname = {'G': 'Galactic', 'E': 'Ecliptic', 'C': 'Equatorial'}
class ConsistencyWarning(Warning):
"""Warns for a problem in the consistency of data
"""
pass
if __name__ != '__main__':
warnings.filterwarnings("always", category=ConsistencyWarning, module=__name__)
class Rotator:
"""This class provides tools for spherical rotations. It is meant to be used
in the healpy library for plotting, and for this reason reflects the
convention used in the Healpix IDL library.
Example:
>>> r = Rotator(coord=['G','E'])
>>> theta_ecl, phi_ecl = r(theta_gal, phi_gal)
or in a shorter way:
>>> theta_ecl, phi_ecl = Rotator(coord=['G','E'])(theta_gal, phi_gal)
"""
ErrMessWrongPar = ("rot and coord must be single elements or "
"sequence of same size.")
def __init__(self,rot=None,coord=None,inv=None,deg=True,
eulertype='ZYX'):
"""Create a rotator with given parameters.
- rot: a float, a tuple of 1,2 or 3 floats or a sequence of tuples.
If it is a sequence of tuple, it must have the same length as coord.
- coord: a string or a tuple of 1 or 2 strings or a sequence of tuple
If it is a sequence of tuple, it must have same length as rot.
- inv: whether to use inverse rotation or not
- deg: if True, angles in rot are assumed in degree (default: True)
- eulertype: the convention for Euler angles in rot.
Note: the coord system conversion is applied first, then the rotation.
"""
rot_is_seq = (hasattr(rot,'__len__')
and hasattr(rot[0], '__len__'))
coord_is_seq = (hasattr(coord,'__len__')
and hasattr(coord[0],'__len__')
and type(coord[0]) is not str)
if rot_is_seq and coord_is_seq:
if len(rot) != len(coord):
raise ValueError(Rotator.ErrMessWrongPar)
else:
rots = rot
coords = coord
elif (rot_is_seq or coord_is_seq) and (rot is not None and
coord is not None):
raise ValueError(Rotator.ErrMessWrongPar)
else:
rots = [rot]
coords = [coord]
inv_is_seq = hasattr(inv,'__len__')
if inv_is_seq:
if len(inv) != len(rots):
raise ValueError("inv must have same length as rot and/or coord")
invs = inv
else:
invs = [inv]*len(rots)
# check the argument and normalize them
if eulertype in ['ZYX','X','Y']:
self._eultype = eulertype
else:
self._eultype = 'ZYX'
self._rots = []
self._coords = []
self._invs = []
for r,c,i in zip(rots,coords,invs):
rn = normalise_rot(r,deg=deg)
# if self._eultype in ['X','Y']:
# rn[1] = -rn[1]
cn = normalise_coord(c)
self._rots.append(rn) # append(rn) or insert(0, rn) ?
self._coords.append(cn) # append(cn) or insert(0, cn) ?
self._invs.append(bool(i))
if not self.consistent:
warnings.warn("The chain of coord system rotations is not consistent",
category=ConsistencyWarning)
self._update_matrix()
def _update_matrix(self):
self._matrix = npy.identity(3)
self._do_rotation = False
for r,c,i in zip(self._rots, self._coords,self._invs):
rotmat,do_rot,rotnorm = get_rotation_matrix(r,
eulertype=self._eultype)
convmat,do_conv,coordnorm = get_coordconv_matrix(c)
r = npy.dot(rotmat,convmat)
if i: r = r.T
self._matrix = npy.dot(self._matrix, r)
self._do_rotation = self._do_rotation or (do_rot or do_conv)
def _is_coords_consistent(self):
c,i = zip(self._coords,self._invs)[0]
for cnext,inext in zip(self._coords[1:],self._invs[1:]):
if c[i] != cnext[not inext]:
return False
c,i = cnext,inext
return True
consistent = property(_is_coords_consistent,
doc="consistency of the coords transform chain")
def __eq__(self,a):
if type(a) is not type(self): return False
# compare the _rots
v = [npy.allclose(x,y,rtol=0,atol=1e-15) for x,y in zip(self._rots,a._rots)]
return ( npy.array(v).all() and
(self._coords == a._coords) and
(self._invs == a._invs) )
def __call__(self,*args,**kwds):
"""Use the rotator to rotate either spherical coordinates (theta, phi)
or a vector (x,y,z). You can use lonla keyword to use longitude, latitude
(in degree) instead of theta, phi (in radian). In this case, returns
longitude, latitude in degree.
Accepted forms:
>>> r = Rotator()
>>> r(x,y,z) # x,y,z either scalars or arrays
>>> r(theta,phi) # theta, phi scalars or arrays
>>> r(lon,lat,lonlat=True) # lon, lat scalars or arrays
>>> r(vec) # vec 1-D array with 3 elements, or 2-D array 3xN
>>> r(direction) # direction 1-D array with 2 elements, or 2xN array
"""
if kwds.pop('inv',False): m=self._matrix.T
else: m=self._matrix
lonlat = kwds.pop('lonlat',False)
if len(args) == 1:
arg=args[0]
if not hasattr(arg,'__len__') or len(arg) < 2 or len(arg) > 3:
raise TypeError('Argument must be a sequence of 2 or 3 '
'elements')
if len(arg) == 2:
return rotateDirection(m,arg[0],arg[1],
self._do_rotation,lonlat=lonlat)
else:
return rotateVector(m,arg[0],arg[1],arg[2],
self._do_rotation)
elif len(args) == 2:
return rotateDirection(m,args[0],args[1],
self._do_rotation,lonlat=lonlat)
elif len(args) == 3:
return rotateVector(m,args[0],args[1],args[2],
self._do_rotation)
else:
raise TypeError('Either 1, 2 or 3 arguments accepted')
def __mul__(self,a):
"""Composition of rotation.
"""
if not isinstance(a,Rotator):
raise TypeError("A Rotator can only multiply another Rotator "
"(composition of rotations)")
rots = self._rots + a._rots
coords = self._coords + a._coords
invs = self._invs + a._invs
return Rotator(rot=rots,coord=coords,inv=invs,deg=False)
def __rmul__(self,b):
if not isinstance(b,Rotator):
raise TypeError("A Rotator can only be multiplied by another Rotator "
"(composition of rotations)")
rots = b._rots + self._rots
coords = b._coords + self._coords
invs = self._invs + a._invs
return Rotator(rot=rots,coord=coords,inv=invs,deg=False)
def __nonzero__(self):
return self._do_rotation
def get_inverse(self):
rots = self._rots[::-1]
coords = self._coords[::-1]
invs = [ not i for i in self._invs[::-1]]
return Rotator(rot=rots,coord=coords,inv=invs,deg=False)
#I = property(get_inverse,doc='Return a new rotator representing the '
# 'inverse rotation')
def I(self,*args,**kwds):
"""Rotate the given vector or direction using the inverse matrix.
rot.I(vec) <==> rot(vec,inv=True)
"""
kwds['inv'] = True
return self.__call__(*args,**kwds)
def get_matrix(self):
return npy.matrix(self._matrix)
mat = property(get_matrix,doc='Return a matrix representing the rotation')
def get_coordin(self):
if not self.consistent: return None
c,i = zip(self._coords,self._invs)[-1]
return c[i]
coordin = property(get_coordin, doc="the input coordinate system")
def get_coordout(self):
if not self.consistent: return None
c,i = zip(self._coords,self._invs)[0]
return c[not i]
coordout = property(get_coordout, doc="the output coordinate system")
def get_coordin_str(self):
return coordname.get(self.coordin,'')
coordinstr = property(get_coordin_str, doc="the input coordinate system in str")
def get_coordout_str(self):
return coordname.get(self.coordout,'')
coordoutstr = property(get_coordout_str, doc="the output coordinate system in str")
def get_rots(self):
return self._rots
rots = property(get_rots, doc="the sequence of rots defining")
def get_coords(self):
return self._coords
coords = property(get_coords, doc="the sequence of coords")
def do_rot(self,i):
return not npy.allclose(self.rots[i],npy.zeros(3),rtol=0.,atol=1.e-15)
def angle_ref(self,*args,**kwds):
"""Compute the angle between transverse reference direction of initial and final frames
For example, if angle of polarisation is psi in initial frame, it will be psi+angle_ref in final
frame.
Input:
- direction or vector (see Rotator.__call__)
Keywords:
- lonlat: if True, assume input is longitude,latitude in degrees. Otherwise,
theta,phi in radian. Default: False
- inv: if True, use the inverse transforms. Default: False
Return:
- angle in radian (a scalar or an array if input is a sequence of direction/vector)
"""
R = self
lonlat = kwds.get('lonlat',False)
inv = kwds.get('inv',False)
if len(args) == 1:
arg=args[0]
if not hasattr(arg,'__len__') or len(arg) < 2 or len(arg) > 3:
raise TypeError('Argument must be a sequence of 2 or 3 '
'elements')
if len(arg) == 2:
v = dir2vec(arg[0],arg[1],lonlat=lonlat)
else:
v = arg
elif len(args) == 2:
v = dir2vec(args[0],args[1],lonlat=lonlat)
elif len(args) == 3:
v = args
else:
raise TypeError('Either 1, 2 or 3 arguments accepted')
vp = R(v,inv=inv)
north_pole = R([0.,0.,1.],inv=inv)
sinalpha = north_pole[0]*vp[1]-north_pole[1]*vp[0]
cosalpha = north_pole[2] - vp[2]*npy.dot(north_pole,vp)
return npy.arctan2(sinalpha,cosalpha)
def __repr__(self):
return '[ '+', '.join([str(self._coords),
str(self._rots),
str(self._invs)]) +' ]'
__str__ = __repr__
################################################################
#
# Helpers function for rotation
# used in the Rotator class.
def rotateVector(rotmat,vec,vy=None,vz=None, do_rot=True):
"""Rotate a vector (or a list of vectors) using the euler matrix
given as first argument.
Usage: vec_rot = rotateVector(rotmat,vec,vy=None,vz=None,do_rot=True)
- rotmat : the 3x3 rotation matrix
- vec[0], vec[1], vec[2] : vx,vy,vz (can be vectors)
or: vec, vy, vz : vx,vy,vz if vy and vz are given.
- do_rot: really perform the rotation if True, do nothing if false
Return: vx,vy,vz
"""
if vy is None and vz is None:
if do_rot: return npy.tensordot(rotmat,vec,axes=(1,0))
else: return vec
elif vy is not None and vz is not None:
if do_rot: return npy.tensordot(rotmat,npy.array([vec,vy,vz]),axes=(1,0))
else: return vec,vy,vz
else:
raise TypeError("You must give either vec only or vec, vy "
"and vz parameters")
def rotateDirection(rotmat,theta,phi=None,do_rot=True,lonlat=False):
"""Rotate the direction pointed by theta,phi using the rotation matrix
given as first argument.
Usage: dir_rot = rotateDirection(rotmat,theta,phi=None,do_rot=True)
- rotmat : the 3x3 rotation matrix
- theta[0],theta[1] : theta, phi (can be vectors)
or: theta, phi : theta, phi if phi is given.
- do_rot: really perform the rotation if True, do nothing if false
Return: theta_rot,phi_rot
"""
vx,vy,vz=rotateVector(rotmat,dir2vec(theta,phi,lonlat=lonlat),do_rot=do_rot)
return vec2dir(vx,vy,vz,lonlat=lonlat)
def vec2dir(vec,vy=None,vz=None,lonlat=False):
"""Transform a vector to a direction given by theta,phi.
"""
if vy is None and vz is None:
vx,vy,vz = vec
elif vy is not None and vz is not None:
vx=vec
else:
raise TypeError("You must either give both vy and vz or none of them")
r = npy.sqrt(vx**2+vy**2+vz**2)
theta = npy.arccos(vz/r)
phi = npy.arctan2(vy,vx)
if lonlat:
return npy.asarray([phi*180/npy.pi,90-theta*180/npy.pi])
else:
return npy.asarray([theta,phi])
def dir2vec(theta,phi=None,lonlat=False):
"""Transform a direction theta,phi to a unit vector.
"""
if phi is None:
theta,phi=theta
if lonlat:
lon,lat=theta,phi
theta,phi = npy.pi/2.-lat*npy.pi/180,lon*npy.pi/180
ct,st,cp,sp = npy.cos(theta),npy.sin(theta),npy.cos(phi),npy.sin(phi)
return npy.asarray([st*cp,st*sp,ct])
def angdist(dir1,dir2,lonlat=False):
"""Return the angular distance between dir1 and dir2.
"""
if hasattr(lonlat,'__len__') and len(lonlat) == 2:
lonlat1,lonlat2 = lonlat
else:
lonlat1=lonlat2=lonlat
if len(dir1) == 2: # theta,phi or lonlat, convert to vec
vec1 = npy.asarray(dir2vec(dir1,lonlat=lonlat1))
else:
vec1 = npy.asarray(dir1)
if vec1.ndim == 1:
vec1 = npy.expand_dims(vec1,-1)
if len(dir2) == 2:
vec2 = npy.asarray(dir2vec(dir2,lonlat=lonlat1)).T
else:
vec2 = npy.asarray(dir2)
if vec2.ndim == 1:
vec2 = npy.expand_dims(vec2,-1)
# compute scalar product
pscal = (vec1*vec2).sum(axis=0)
return npy.arccos(pscal)
#######################################################
#
# Manage the coord system conventions
#
def check_coord(c):
"""Check if parameter is a valid coord system.
Raise a TypeError exception if it is not, otherwise returns the normalized
coordinate system name.
"""
if c is None:
return c
if type(c) is not str:
raise TypeError('Coordinate must be a string (G[alactic],'
' E[cliptic], C[elestial]'
' or Equatorial=Celestial)')
if c[0].upper() == 'G':
x='G'
elif c[0].upper() == 'E' and c != 'Equatorial':
x='E'
elif c[0].upper() == 'C' or c == 'Equatorial':
x='C'
else:
raise ValueError('Wrong coordinate (either G[alactic],'
' E[cliptic], C[elestial]'
' or Equatorial=Celestial)')
return x
def normalise_coord(coord):
"""Normalise the coord argument.
Coord sys are either 'E','G', 'C' or 'X' if undefined.
Input: either a string or a sequence of string.
Output: a tuple of two strings, each being one of the norm coord sys name
above.
eg, 'E' -> ['E','E'], ['Ecliptic','G'] -> ['E','G']
None -> ['X','X'] etc.
"""
coord_norm = []
if coord is None:
coord = (None,None)
coord=tuple(coord)
if len(coord) > 2:
raise TypeError('Coordinate must be a string (G[alactic],'
' E[cliptic] or C[elestial])'
' or a sequence of 2 strings')
for x in coord:
coord_norm.append(check_coord(x))
if len(coord_norm) < 2:
coord_norm.append(coord_norm[0])
return tuple(coord_norm)
def normalise_rot(rot,deg=False):
"""Return rot possibly completed with zeroes to reach size 3.
If rot is None, return a vector of 0.
If deg is True, convert from degree to radian, otherwise assume input
is in radian.
"""
if deg: convert=npy.pi/180.
else: convert=1.
if rot is None:
rot=npy.zeros(3)
else:
rot=npy.array(rot,npy.float64).flatten()*convert
rot.resize(3)
return rot
def get_rotation_matrix(rot, deg=False, eulertype='ZYX'):
"""Return the rotation matrix corresponding to angles given in rot.
Usage: matrot,do_rot,normrot = get_rotation_matrix(rot)
Input:
- rot: either None, an angle or a tuple of 1,2 or 3 angles
corresponding to Euler angles.
Output:
- matrot: 3x3 rotation matrix
- do_rot: True if rotation is not identity, False otherwise
- normrot: the normalized version of the input rot.
"""
rot = normalise_rot(rot, deg=deg)
if not npy.allclose(rot,npy.zeros(3),rtol=0.,atol=1.e-15):
do_rot = True
else:
do_rot = False
if eulertype == 'X':
matrot=euler_matrix_new(rot[0],-rot[1],rot[2],X=True)
elif eulertype == 'Y':
matrot=euler_matrix_new(rot[0],-rot[1],rot[2],Y=True)
else:
matrot=euler_matrix_new(rot[0],-rot[1],rot[2],ZYX=True)
return matrot,do_rot,rot
def get_coordconv_matrix(coord):
"""Return the rotation matrix corresponding to coord systems given
in coord.
Usage: matconv,do_conv,normcoord = get_coordconv_matrix(coord)
Input:
- coord: a tuple with initial and final coord systems.
See normalise_coord.
Output:
- matconv: the euler matrix for coord sys conversion
- do_conv: True if matconv is not identity, False otherwise
- normcoord: the tuple of initial and final coord sys.
History: adapted from CGIS IDL library.
"""
coord_norm = normalise_coord(coord)
if coord_norm[0] == coord_norm[1]:
matconv = npy.identity(3)
do_conv = False
else:
eps = 23.452294 - 0.0130125 - 1.63889E-6 + 5.02778E-7
eps = eps * npy.pi / 180.
# ecliptic to galactic
e2g = npy.array([[-0.054882486, -0.993821033, -0.096476249],
[ 0.494116468, -0.110993846, 0.862281440],
[-0.867661702, -0.000346354, 0.497154957]])
# ecliptic to equatorial
e2q = npy.array([[1., 0. , 0. ],
[0., npy.cos( eps ), -1. * npy.sin( eps )],
[0., npy.sin( eps ), npy.cos( eps ) ]])
# galactic to ecliptic
g2e = npy.linalg.inv(e2g)
# galactic to equatorial
g2q = npy.dot(e2q , g2e)
# equatorial to ecliptic
q2e = npy.linalg.inv(e2q)
# equatorial to galactic
q2g = npy.dot(e2g , q2e)
if coord_norm == ('E','G'):
matconv = e2g
elif coord_norm == ('G','E'):
matconv = g2e
elif coord_norm == ('E','C'):
matconv = e2q
elif coord_norm == ('C','E'):
matconv = q2e
elif coord_norm == ('C','G'):
matconv = q2g
elif coord_norm == ('G','C'):
matconv = g2q
else:
raise ValueError('Wrong coord transform :',coord_norm)
do_conv = True
return matconv,do_conv,coord_norm
###################################################
## ##
## euler functions ##
## ##
###### #######
def euler(ai, bi, select, FK4 = 0):
"""
NAME:
euler
PURPOSE:
Transform between Galactic, celestial, and ecliptic coordinates.
EXPLANATION:
Use the procedure ASTRO to use this routine interactively
CALLING SEQUENCE:
EULER, AI, BI, AO, BO, [ SELECT, /FK4, SELECT = ]
INPUTS:
AI - Input Longitude in DEGREES, scalar or vector. If only two
parameters are supplied, then AI and BI will be modified
to contain the output longitude and latitude.
BI - Input Latitude in DEGREES
OPTIONAL INPUT:
SELECT - Integer (1-6) specifying type of coordinate
transformation.
SELECT From To | SELECT From To
1 RA-Dec (2000) Galactic | 4 Ecliptic RA-Dec
2 Galactic RA-DEC | 5 Ecliptic Galactic
3 RA-Dec Ecliptic | 6 Galactic Ecliptic
If not supplied as a parameter or keyword, then EULER will prompt
for the value of SELECT
Celestial coordinates (RA, Dec) should be given in equinox J2000
unless the /FK4 keyword is set.
OUTPUTS:
AO - Output Longitude in DEGREES
BO - Output Latitude in DEGREES
INPUT KEYWORD:
/FK4 - If this keyword is set and non-zero, then input and output
celestial and ecliptic coordinates should be given in
equinox B1950.
/SELECT - The coordinate conversion integer (1-6) may
alternatively be specified as a keyword
NOTES:
EULER was changed in December 1998 to use J2000 coordinates as the
default, ** and may be incompatible with earlier versions***.
REVISION HISTORY:
Written W. Landsman, February 1987
Adapted from Fortran by Daryl Yentis NRL
Converted to IDL V5.0 W. Landsman September 1997
Made J2000 the default, added /FK4 keyword
W. Landsman December 1998
Add option to specify SELECT as a keyword W. Landsman March 2003
Converted to python by K. Ganga December 2007
"""
# npar = N_params()
# if npar LT 2 then begin
# print,'Syntax - EULER, AI, BI, A0, B0, [ SELECT, /FK4, SELECT= ]'
# print,' AI,BI - Input longitude,latitude in degrees'
# print,' AO,BO - Output longitude, latitude in degrees'
# print,' SELECT - Scalar (1-6) specifying transformation type'
# return
# endif
PI = npy.pi
twopi = 2.0*PI
fourpi = 4.0*PI
deg_to_rad = 180.0/PI
#
# ; J2000 coordinate conversions are based on the following constants
# ; (see the Hipparcos explanatory supplement).
# ; eps = 23.4392911111 # Obliquity of the ecliptic
# ; alphaG = 192.85948d Right Ascension of Galactic North Pole
# ; deltaG = 27.12825d Declination of Galactic North Pole
# ; lomega = 32.93192d Galactic longitude of celestial equator
# ; alphaE = 180.02322d Ecliptic longitude of Galactic North Pole
# ; deltaE = 29.811438523d Ecliptic latitude of Galactic North Pole
# ; Eomega = 6.3839743d Galactic longitude of ecliptic equator
#
if FK4 == 1:
equinox = '(B1950)'
psi = [ 0.57595865315, 4.9261918136,
0.00000000000, 0.0000000000,
0.11129056012, 4.7005372834]
stheta =[ 0.88781538514,-0.88781538514,
0.39788119938,-0.39788119938,
0.86766174755,-0.86766174755]
ctheta =[ 0.46019978478, 0.46019978478,
0.91743694670, 0.91743694670,
0.49715499774, 0.49715499774]
phi = [ 4.9261918136, 0.57595865315,
0.0000000000, 0.00000000000,
4.7005372834, 0.11129056012]
else:
equinox = '(J2000)'
psi = [ 0.57477043300, 4.9368292465,
0.00000000000, 0.0000000000,
0.11142137093, 4.71279419371]
stheta =[ 0.88998808748,-0.88998808748,
0.39777715593,-0.39777715593,
0.86766622025,-0.86766622025]
ctheta =[ 0.45598377618, 0.45598377618,
0.91748206207, 0.91748206207,
0.49714719172, 0.49714719172]
phi = [ 4.9368292465, 0.57477043300,
0.0000000000, 0.00000000000,
4.71279419371, 0.11142137093]
#
i = select - 1 # IDL offset
a = ai/deg_to_rad - phi[i]
b = bi/deg_to_rad
sb = npy.sin(b)
cb = npy.cos(b)
cbsa = cb * npy.sin(a)
b = -stheta[i] * cbsa + ctheta[i] * sb
#bo = math.asin(where(b<1.0, b, 1.0)*deg_to_rad)
bo = npy.arcsin(b)*deg_to_rad
#
a = npy.arctan2( ctheta[i] * cbsa + stheta[i] * sb, cb * npy.cos(a) )
ao = npy.fmod( (a+psi[i]+fourpi), twopi) * deg_to_rad
return ao, bo
def euler_matrix_new(a1,a2,a3,X=True,Y=False,ZYX=False,deg=False):
"""
NAME:
euler_matrix_new
PURPOSE:
computes the Euler matrix of an arbitrary rotation described
by 3 Euler angles
correct bugs present in Euler_Matrix
CALLING SEQUENCE:
result = euler_matrix_new (a1, a2, a3 [,X, Y, ZYX, DEG ])
INPUTS:
a1, a2, a3 = Euler angles, scalar
(in radian by default, in degree if DEG is set)
all the angles are measured counterclockwise
correspond to x, y, zyx-conventions (see Goldstein)
the default is x
KEYWORD PARAMETERS:
DEG : if set the angle are measured in degree
X : rotation a1 around original Z
rotation a2 around interm X
rotation a3 around final Z
DEFAULT, classical mechanics convention
Y : rotation a1 around original Z
rotation a2 around interm Y
rotation a3 around final Z
quantum mechanics convention (override X)
ZYX : rotation a1 around original Z
rotation a2 around interm Y
rotation a3 around final X
aeronautics convention (override X)
* these last three keywords are obviously mutually exclusive *
OUTPUTS:
result is a 3x3 matrix
USAGE:
if vec is an Nx3 array containing N 3D vectors,
vec # euler_matrix_new(a1,a2,a3,/Y) will be the rotated vectors
MODIFICATION HISTORY:
March 2002, EH, Caltech, rewritting of euler_matrix
convention euler_matrix_new euler_matrix
X: M_new(a,b,c,/X) = M_old(-a,-b,-c,/X) = Transpose( M_old(c, b, a,/X))
Y: M_new(a,b,c,/Y) = M_old(-a, b,-c,/Y) = Transpose( M_old(c,-b, a,/Y))
ZYX: M_new(a,b,c,/Z) = M_old(-a, b,-c,/Z)
"""
t_k = 0
if ZYX: t_k = t_k + 1
#if X: t_k = t_k + 1
if Y: t_k = t_k + 1
if t_k > 1:
raise ValueError('Choose either X, Y or ZYX convention')
convert = 1.0
if deg:
convert = npy.pi/180.
c1 = npy.cos(a1*convert)
s1 = npy.sin(a1*convert)
c2 = npy.cos(a2*convert)
s2 = npy.sin(a2*convert)
c3 = npy.cos(a3*convert)
s3 = npy.sin(a3*convert)
if ZYX:
m1 = npy.array([[ c1,-s1, 0],
[ s1, c1, 0],
[ 0, 0, 1]]) # around z
m2 = npy.array([[ c2, 0, s2],
[ 0, 1, 0],
[-s2, 0, c2]]) # around y
m3 = npy.array([[ 1, 0, 0],
[ 0, c3,-s3],
[ 0, s3, c3]]) # around x
elif Y:
m1 = npy.array([[ c1,-s1, 0],
[ s1, c1, 0],
[ 0, 0, 1]]) # around z
m2 = npy.array([[ c2, 0, s2],
[ 0, 1, 0],
[-s2, 0, c2]]) # around y
m3 = npy.array([[ c3,-s3, 0],
[ s3, c3, 0],
[ 0, 0, 1]]) # around z
else:
m1 = npy.array([[ c1,-s1, 0],
[ s1, c1, 0],
[ 0, 0, 1]]) # around z
m2 = npy.array([[ 1, 0, 0],
[ 0, c2,-s2],
[ 0, s2, c2]]) # around x
m3 = npy.array([[ c3,-s3, 0],
[ s3, c3, 0],
[ 0, 0, 1]]) # around z
M = npy.dot(m3.T,npy.dot(m2.T,m1.T))
return M
| true |
bdcfe91f87f75643c538b14f79ab700ccd3e8231 | Python | lynardsalingujay/myevo_odoopython | /svn_myevo/trunk/chicken_api/json/replay.py | UTF-8 | 360 | 2.515625 | 3 | [] | no_license | import requests
import sys
def getFileContent(pathAndFileName):
with open(pathAndFileName, 'r') as theFile:
data = theFile.read()
return data
def main():
URL = "http://localhost:5202/chicken/api/order/create"
data = getFileContent(sys.argv[1])
r = requests.post(url=URL, data=data)
print r.text
if __name__ == '__main__':
main()
| true |
ab4b697ada072459918f2b570ae1ac24a596cbad | Python | AneliyaPPetkova/Programming | /Python/5.ModulesAndTime/2.MostProfitableDate.py | UTF-8 | 800 | 3.703125 | 4 | [] | no_license | """ Find the most profitable date from a file with data
"""
from datetime import datetime
from datetime import date
FILENAME = './CommonResources/sales.csv'
sales = {}
maxProfit = 0.0
dateWithMaxSales = date.today()
with open(FILENAME) as f:
for line in f:
sale = line.strip().split(",")
dayAndTime = datetime.strptime(sale[0], '%Y-%m-%d %H:%M:%S')
salesDate = dayAndTime.date()
if '{}'.format(salesDate) not in sales:
sales['{}'.format(salesDate)] = float(sale[1])
else:
sales['{}'.format(salesDate)] += float(sale[1])
for key, value in sales.items():
if value > maxProfit:
maxProfit = value
dateWithMaxSales = key
print("The most profitable day is {} with {} profit.".format(dateWithMaxSales, maxProfit))
| true |
357c23c386f0710f63539bc76fc94bda83bee50a | Python | smraus/python | /43.py | UTF-8 | 1,387 | 2.78125 | 3 | [] | no_license |
# 43-what is machine learning
# 1-import data
# 2-clean data
# 3-split the data into training/test sets
# 4-create a model
# 5-train the model
# 6-make predictions
# 7-evaluate and improve the predictions
# ------------------------------------------
# 44-machine learning libraries and tools
# 1-numpy
# 2-panda
# 3-matPlotLib
# 4-SciKit-Learn
# -------------------------------------------
# 45-importing data set
# --------------------------------------------
# 46-jupyter shortcut
# ------------------------------------------------
# 47-A real problem
# music problems
# ------------------------------------------------
# 48- import data
# .............................................
# 49-prepare/clean data
# We don't have any duplicate or null value to clean. So we split this into to column, input and output
# ...............................................
# 50-learning and predicting
# .................................................
# 51-calculating and accuracy of model
# ......................................
# 52-persisting model
# we don't want to train our model every time new user add or anything like it
# ......training our model is really lengthy process
# ......that is why we use persisting model to train our model once and use it with calling it
# .........................................................
# 53-visualizing a decision tree
| true |
007179df592f22e3b4db5cfccd820446655c66a7 | Python | odubno/algorithms | /stack_queue_deque/queue.py | UTF-8 | 602 | 3.9375 | 4 | [] | no_license | # An ordered collection of items.
# New items are added at one end -> the "rear"
# Items are removed from the other end -> the "front"
# FiFo, first-in first-out or "first-come first served"
# The item that has been in the queue the longest is in the front
class Queue:
def __init__(self):
self.items = []
def enqueue(self, item):
self.items.insert(0, item)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
def isEmpty(self):
return self.items == []
if __name__ == '__main__':
s = Queue()
pass | true |
1c5c6e8e529ef6ad77c61303d93f6567e67fce60 | Python | AvinashSingh1996/Vaibhav_Codes | /Practise_Codes/printing_citizend.py | UTF-8 | 826 | 3.6875 | 4 | [] | no_license | class Citizen:
def __init__(self, state, men, women, children):
self.state = state
self.men = men
self.women = women
self.children = children
def show(self):
print("State =>",self.state)
print(" Men =>",self.men)
print("Women =>",self.women)
print("Children =>",self.children)
def __str__(self):
return f"{self.state} {self.men}{self.women} {self.children}"
s1 = Citizen('Bihar',23500,17237,5927)
s2 = Citizen('Orissa',23658,24555,2364)
s3 = Citizen('U P',36517,22617,6314)
s4 = Citizen ('Jharkhand',23254,19845,1336)
s5 = Citizen('State','Men',' Women','Children')
Citizen = [s1,s2,s3,s4,s5]
print('By Train \n ------- \n')
print(s5,'\n',s1,'\n',s2)
print('By Bus \n ------- \n')
print(s5,'\n',s3,'\n',s4)
| true |
f09b79d3cc708a7e92a82b1465f9df3f8e3fc382 | Python | darkyvro1/REXA | /pokebrbot.py | UTF-8 | 18,178 | 2.625 | 3 | [] | no_license | import time
import random
import telepot
import pickle
import schedule
import PokeFight
import _thread
"""
PokeBot versão beta
Bot criado com o propósito de aprender programação.
possui alguns comandos simples e uma batalha.
"""
pokelist = pickle.load(open("pokelist2.p", "rb"))
battledic = {}
battledic[0] = PokeFight.Battle(0)
def handle(msg):
global battledic
msg_id = msg['message_id']
content_type, chat_type, chat_id = telepot.glance(msg)
nome_usuario = msg['from']['first_name']
user_id = msg['from']['id']
bot_name = str.lower(bot.getMe()['username'])
# Este if impede o bot de responder mensagens que tenham sido enviadas antes de sua ativação
if timestamp >= msg['date']:
return
if content_type == 'text':
mensagem = str.lower(msg['text'])
print('%s: "%s"' % (nome_usuario, mensagem))
if mensagem == '/shutdown0709':
global off_switch
off_switch = 0
if mensagem == '/ajuda':
bot.sendMessage(chat_id, 'Olá, eu sou o PokebrBOT, o Bot de batalhas pokemon! \n'
'Meus principais comandos são:'
'\n /pokedex retorna um Pokemon aleatório, e uma breve descrição (em inglês).'
'\n /batalhar inicia uma batalha pokemon! (apenas em grupos)'
'\n /ranking exibe o rank global da atual temporada de batalhas pokemon.'
'\n /meurank exibe estatísticas do seu próprio desempenho.'
'\n /faq mostra perguntas mais frequentes sobre o funcionamento do BOT'
'(apenas em privado).'
'\n outros comandos serão adicionados posteriormente :)')
elif mensagem[0] != '/' and any(i in mensagem for i in ('RameloBOT', 'ramelobot', 'RAMELOBOT', 'RameloBot',
'Blitzcrank', 'bot do ramelo', 'ramelo bot')):
bot.sendMessage(chat_id, "Oi %s, me chamou? Digite /ajuda para saber meus comandos." % nome_usuario,
reply_to_message_id=msg_id)
elif mensagem == '/pokedex' or mensagem == '/pokedex@%s' % bot_name:
responder_poke = pokemon(random.randrange(251))
bot.sendSticker(chat_id, responder_poke[0], reply_to_message_id=msg_id)
bot.sendMessage(chat_id, responder_poke[1], )
elif mensagem == '/ranking' or mensagem == '/ranking@%s' % bot_name:
bot.sendMessage(chat_id, 'Ranking da segunda temporada de batalhas Pokemon:\n%s' % PokeFight.rank())
elif mensagem == '/resetdoudaralho0709':
PokeFight.restart()
elif mensagem == '/meurank' or mensagem == '/ranking@%s' % bot_name:
try:
bot.sendMessage(chat_id, 'Sua posição no rank da segunda temporada de batalhas Pokemon:\n%s'
% PokeFight.myrank(user_id))
except IndexError:
bot.sendMessage(chat_id, 'Você ainda não está classificado no rank das batalhas.')
elif mensagem == '/batalhar' or mensagem == '/batalhar@%s' % bot_name:
if chat_type == 'group':
try:
if chat_id not in battledic:
battledic[chat_id] = PokeFight.Battle(chat_id)
battle = battledic[chat_id]
for i in battledic:
if user_id in battledic[i].p1 or user_id in battledic[i].p2:
bot.sendMessage(user_id, 'Você já está registrado para uma batalha, '
'aguarde enquanto a anterior não termina.')
return
if battle.pcount == 2:
bot.sendMessage(user_id, 'No momento, %s está batalhando contra %s.\n'
'Aguarde enquanto a batalha não acaba.' % (
battle.p1[0], battle.p2[0]))
return
if battle.pcount == 0:
bot.sendMessage(user_id, 'Você se cadastrou para batalha, aguarde um oponente.')
battle.timer += 1
_thread.start_new_thread(timeout, (chat_id,))
battle.team1 = PokeFight.genteam()
bot.sendMessage(chat_id,
'Desafio lançado! digite /batalhar para aceitar o desafio de %s' % nome_usuario)
battle.pcount += 1
battle.p1 = (nome_usuario, user_id)
elif battle.pcount == 1:
bot.sendMessage(user_id, 'Você aceitou o desafio!')
battle.p2 = (nome_usuario, user_id)
battle.team2 = PokeFight.genteam()
bot.sendMessage(battle.p2[1], 'Esta batalha é uma melhor de 3. Escolha seu primeiro pokemon:',
reply_markup=PokeFight.poke_keyboard3(battle.team2))
bot.sendMessage(battle.p1[1], 'Esta batalha é uma melhor de 3. Escolha seu primeiro pokemon:',
reply_markup=PokeFight.poke_keyboard3(battle.team1))
bot.sendMessage(chat_id, 'Desafio aceito! %s e %s agora estão batalhando.' % (
battle.p1[0], nome_usuario))
battle.pcount += 1
battle.timer -= 1
except telepot.exception.TelegramError:
bot.sendMessage(chat_id, 'Você ainda não possui registro, envie o comando /start para mim em um'
'chat particular (@%s <-- clique aqui) para cadastrar-se' % bot_name)
return
elif mensagem == '/start' or mensagem == '/start@%s' % bot_name:
if chat_type == 'group':
bot.sendMessage(chat_id, 'Para se cadastrar para as batalhas, envie o comando diretamente para mim em'
' um chat particular! Clique em @%s e envie /start' % bot_name),
if chat_type == 'private':
bot.sendMessage(chat_id, 'Você agora está cadastrado para as batalhas pokemon, as batalhas funcionam '
'somente em grupos em que o bot esteja adicionado.\nDigite /ajuda para '
'conhecer os comandos do bot, ou /faq para as perguntas mais frequentes.')
if chat_type == 'private':
if mensagem == '/faq' or mensagem == '/faq@%s' % bot_name:
bot.sendMessage(user_id, '<strong>O que é o @PokebrBot?</strong>\n'
'Esse bot foi criado como um projeto pessoal, com o objetivo de estudar '
'programação. Quis fazer uma aplicação que fosse complexa o suficiente para '
'representar um desafio para mim, e que fosse divertido de alguma forma. O bot'
' foi desenvolvido inteiramente em Python, utilizando a '
'<a href="https://pokeapi.co/">PokeAPI</a>, e o '
'<a href="https://github.com/nickoala/telepot">Telepot</a>.'
'\n\n<strong>Como são definidas as batalhas?</strong>\n'
'Os times são gerados aleatoriamente, sem modificadores, as batalhas em si'
' levam em conta a soma dos status base de cada Pokemon que são somados a um'
' modificador semi-aleatório baseado nas suas fraquezas e resistências. '
'Basicamente pokemons mais fortes ganham de mais fracos na maior parte das'
' vezes, mas ainda há um valor aleatório nos combates.'
'\n\n<strong>Por que a pokedex está em inglês?</strong>\n'
'A database do bot foi criada utilizando a '
'<a href="https://pokeapi.co/">PokeAPI</a>, que está em inglês. Traduzir tudo '
'levaria tempo demais\n\n<strong>Por que o bot só tem 251 Pokemons?</strong>\n'
'Basicamente por três motivos: Primeiro, há uma limitação pois não encontrei '
'stickers para pokemons até a última geração. Segundo, preferi um numero mais '
'limitado de opções para os jogadores, e por fim, por questão de gosto mesmo'
'\n\n<strong>Encontrei um Erro / Bug, ou tenho uma sugestão!</strong>\n'
'Qualquer erro (mesmo que de gramática) que encontrar pode enviar direto para '
'mim @phellpss, também aceito sugestões e reclamações para futuras alterações'
' no bot. Lembrando que é apenas um projeto pessoal, sem grandes pretenções ;)'
, disable_web_page_preview=True, parse_mode='HTML')
# função que retorna um sticker e os dados do pokemon relacionado
def pokemon(n):
sticker = pokelist[n]['sticker']
if len(pokelist[n]['types']) == 1:
texto = "You've got %s! The %s Pokémon, its type is %s and his pokédex number is %s" % (
pokelist[n]['name'], pokelist[n]['species'], pokelist[n]['types'][0], pokelist[n]['national_id'])
else:
texto = "You've got %s! The %s Pokémon, its types are %s and %s, and his national pokédex number is %s" % (
pokelist[n]['name'], pokelist[n]['species'], pokelist[n]['types'][0], pokelist[n]['types'][1],
pokelist[n]['national_id'])
return sticker, texto
def pokebat(chat_id):
battle = battledic[chat_id]
poke1 = battle.battlechoicep1
poke2 = battle.battlechoicep2
p1score = battle.p1score
p2score = battle.p2score
if poke1 == -1 or poke2 == -1:
return
elif p1score != 2 and p2score != 2:
result = PokeFight.type_fight(poke1, poke2)
battle.team1.remove(poke1)
battle.team2.remove(poke2)
battle.battlechoicep1 = -1
battle.battlechoicep2 = -1
if result == 'win1':
battle.p1score += 1
bot.sendSticker(battle.p1[1], pokelist[poke2]['sticker'])
bot.sendMessage(battle.p1[1],
'Seu oponente escolheu %s, e agora eles estão batalhando!' % pokelist[poke2]['name'])
bot.sendSticker(battle.p2[1], pokelist[poke1]['sticker'])
bot.sendMessage(battle.p2[1],
'Seu oponente escolheu %s, e agora eles estão batalhando!' % pokelist[poke1]['name'])
time.sleep(4)
bot.sendMessage(battle.p1[1],
'Seu %s venceu a batalha contra %s' % (pokelist[poke1]['name'], pokelist[poke2]['name']))
bot.sendMessage(battle.p2[1],
'Seu %s perdeu a batalha contra %s' % (pokelist[poke2]['name'], pokelist[poke1]['name']))
if result == 'win2':
battle.p2score += 1
bot.sendSticker(battle.p2[1], pokelist[poke1]['sticker'])
bot.sendMessage(battle.p2[1],
'Seu oponente escolheu %s, e agora eles estão batalhando!' % pokelist[poke1]['name'])
bot.sendSticker(battle.p1[1], pokelist[poke2]['sticker'])
bot.sendMessage(battle.p1[1],
'Seu oponente escolheu %s, e agora eles estão batalhando!' % pokelist[poke2]['name'])
time.sleep(4)
bot.sendMessage(battle.p2[1],
'Seu %s venceu a batalha contra %s' % (pokelist[poke2]['name'], pokelist[poke1]['name']))
bot.sendMessage(battle.p1[1],
'Seu %s perdeu a batalha contra %s' % (pokelist[poke1]['name'], pokelist[poke2]['name']))
if battle.p1score == 2 or battle.p2score == 2:
if battle.p1[1] not in PokeFight.ranking['season2']:
PokeFight.ranking['season2'][battle.p1[1]] = {}
PokeFight.ranking['season2'][battle.p1[1]]['nome'] = battle.p1[0]
PokeFight.ranking['season2'][battle.p1[1]]['vitorias'] = 0
PokeFight.ranking['season2'][battle.p1[1]]['derrotas'] = 0
PokeFight.ranking['season2'][battle.p1[1]]['ELO'] = 50
if battle.p2[1] not in PokeFight.ranking['season2']:
PokeFight.ranking['season2'][battle.p2[1]] = {}
PokeFight.ranking['season2'][battle.p2[1]]['nome'] = battle.p2[0]
PokeFight.ranking['season2'][battle.p2[1]]['vitorias'] = 0
PokeFight.ranking['season2'][battle.p2[1]]['derrotas'] = 0
PokeFight.ranking['season2'][battle.p2[1]]['ELO'] = 50
if battle.p1score == 2:
bot.sendMessage(battle.p1[1], 'Parabéns! você venceu a batalha :)')
bot.sendMessage(battle.p2[1], 'Você perdeu esta batalha, mais sorte na próxima.')
bot.sendMessage(battle.mainchat,
'A batalha terminou! %s venceu.\nDigite /batalhar para ser o próximo.' % battle.p1[0])
if battle.p1[1] in PokeFight.ranking['season2']:
PokeFight.ranking['season2'][battle.p1[1]]['vitorias'] += 1
PokeFight.ranking['season2'][battle.p1[1]]['ELO'] += 1
PokeFight.ranking['season2'][battle.p2[1]]['derrotas'] += 1
PokeFight.ranking['season2'][battle.p2[1]]['ELO'] -= 1
elif battle.p2score == 2:
bot.sendMessage(battle.p2[1], 'Parabéns! você venceu a batalha :)')
bot.sendMessage(battle.p1[1], 'Você perdeu esta batalha, mais sorte na próxima.')
bot.sendMessage(battle.mainchat,
'A batalha terminou! %s venceu.\nDigite /batalhar para ser o próximo.' % battle.p2[0])
if battle.p2[1] in PokeFight.ranking['season2']:
PokeFight.ranking['season2'][battle.p2[1]]['vitorias'] += 1
PokeFight.ranking['season2'][battle.p2[1]]['ELO'] += 1
PokeFight.ranking['season2'][battle.p1[1]]['derrotas'] += 1
PokeFight.ranking['season2'][battle.p1[1]]['ELO'] -= 1
battle.reset()
PokeFight.save()
return
bot.sendMessage(battle.p1[1], '%s %s x %s %s\nEscolha seu próximo pokemon:' %
(battle.p1[0], battle.p1score, battle.p2score, battle.p2[0]),
reply_markup=PokeFight.poke_keyboard3(battle.team1))
bot.sendMessage(battle.p2[1], '%s %s x %s %s\nEscolha seu próximo pokemon:' %
(battle.p1[0], battle.p1score, battle.p2score, battle.p2[0]),
reply_markup=PokeFight.poke_keyboard3(battle.team2))
def callback_query(msg):
query_id, from_id, data = telepot.glance(msg, flavor='callback_query')
idata = int(data)
iduser = int(from_id)
for i in battledic:
if iduser in battledic[i].p1 or iduser in battledic[i].p2:
battle = battledic[i]
try:
if battle.battlechoicep1 == -1 and battle.p1[1] == from_id:
if idata not in battle.team1:
bot.sendMessage(from_id,
'Este pokemon não está disponível pra você no momento...'
' está tentando trapacear? ;)')
else:
bot.sendSticker(from_id, pokelist[idata]['sticker'])
bot.sendMessage(from_id, 'Você escolheu %s!' % pokelist[idata]['name'])
battle.battlechoicep1 = idata
pokebat(battle.mainchat)
elif battle.battlechoicep1 != -1 and battle.p1[1] == from_id:
bot.sendMessage(from_id, 'Aguardando oponente')
elif battle.battlechoicep2 == -1 and battle.p2[1] == from_id:
if idata not in battle.team2:
bot.sendMessage(from_id,
'Este pokemon não está disponível pra você no momento... '
'está tentando trapacear? ;)')
else:
bot.sendMessage(from_id, 'Você escolheu %s!' % pokelist[idata]['name'])
bot.sendSticker(from_id, pokelist[idata]['sticker'])
battle.battlechoicep2 = idata
pokebat(battle.mainchat)
elif battle.battlechoicep2 != -1 and battle.p2[1] == from_id:
bot.sendMessage(from_id, 'Aguardando oponente')
else:
bot.sendMessage(from_id, 'Você não está inscrito para esta batalha,'
' por favor aguarde enquanto não termina')
except IndexError:
pass
else:
pass
def timeout(idbattle):
time.sleep(90)
battle = battledic[idbattle]
if battledic[idbattle].timer == 1:
print('batalha foi interrompida por timeout')
bot.sendMessage(idbattle, 'Ninguém aceitou o desafio, por favor tente novamente')
battle.reset()
else:
return
timestamp = int(time.time())
token = '' #insert token here
bot = telepot.Bot(token)
bot.message_loop({'chat': handle,
'callback_query': callback_query})
print('I am listening ...')
off_switch = 1
while off_switch == 1:
schedule.run_pending()
time.sleep(10)
print('morto por causas naturais')
| true |
655f98c7b9eee540ccd3c41fcfe9c5b807d76bae | Python | garnachod/tfg | /sources/tests/Research/NLP/test_tweets.py | UTF-8 | 2,750 | 2.59375 | 3 | [] | no_license | # gensim modules
from gensim import utils
from gensim.models.doc2vec import TaggedDocument
from gensim.models.doc2vec import LabeledSentence
from gensim.models import Doc2Vec
from collections import namedtuple
import time
import random
from blist import blist
# numpy
import numpy as np
class LabeledLineSentence(object):
def __init__(self, source, prefix):
self.source = source
self.prefix = prefix
self.sentences = None
def to_array(self):
if self.sentences is None:
self.sentences = blist()
with utils.smart_open(self.source) as fIn:
for item_no, line in enumerate(fIn):
line = line.replace("\n", "")
self.sentences.append(TaggedDocument(utils.to_unicode(line).split(), [self.prefix + '_%s' % item_no]))
return self.sentences
def sentences_perm(self):
random.shuffle(self.sentences)
return self.sentences
if __name__ == '__main__':
dimension = 100
sentences = LabeledLineSentence("tweets_clean.txt", "TWEET")
total_start = time.time()
dbow = False
if dbow:
model = Doc2Vec(min_count=1, window=10, size=dimension, sample=1e-3, negative=5, dm=0 ,workers=6, alpha=0.04)
print "inicio vocab"
model.build_vocab(sentences.to_array())
print "fin vocab"
first_alpha = model.alpha
last_alpha = 0.01
next_alpha = first_alpha
epochs = 30
for epoch in range(epochs):
start = time.time()
print "iniciando epoca DBOW:"
print model.alpha
model.train(sentences.sentences_perm())
end = time.time()
next_alpha = (((first_alpha - last_alpha) / float(epochs)) * float(epochs - (epoch+1)) + last_alpha)
model.alpha = next_alpha
print "tiempo de la epoca " + str(epoch) +": " + str(end - start)
model.save('./tweet_dbow.d2v')
dm = True
if dm:
#model = Doc2Vec(min_count=1, window=10, size=dimension, sample=1e-3, negative=5, workers=6, dm_mean=1, alpha=0.04)
model = Doc2Vec(min_count=1, window=10, size=dimension, sample=1e-3, negative=5, workers=6, alpha=0.04)
#model = Doc2Vec(min_count=1, window=10, size=dimension, sample=1e-3, negative=5, workers=6, alpha=0.04, dm_concat=1)
#
print "inicio vocab"
model.build_vocab(sentences.to_array())
print "fin vocab"
first_alpha = model.alpha
last_alpha = 0.01
next_alpha = first_alpha
epochs = 30
for epoch in range(epochs):
start = time.time()
print "iniciando epoca DM:"
print model.alpha
model.train(sentences.sentences_perm())
end = time.time()
next_alpha = (((first_alpha - last_alpha) / float(epochs)) * float(epochs - (epoch+1)) + last_alpha)
model.alpha = next_alpha
print "tiempo de la epoca " + str(epoch) +": " + str(end - start)
model.save('./tweet_dm.d2v')
total_end = time.time()
print "tiempo total:" + str((total_end - total_start)/60.0)
| true |
e0c9c760834ca9fffa5ed8f4bf7492b8af706739 | Python | codeAligned/LEETCodePractice | /Python/PerfectNumber.py | UTF-8 | 337 | 3.15625 | 3 | [
"MIT"
] | permissive | class Solution(object):
def checkPerfectNumber(self, num):
"""
:type num: int
:rtype: bool
"""
if num <= 1:
return False
total = 1
for i in range(2, int(num**0.5) + 1):
if num % i == 0:
total += i + num / i
return total == num
| true |
41555654dba2071ee9e7970bc713f6178f760c53 | Python | ucaiado/SignLgRecognizer | /aind/my_recognizer.py | UTF-8 | 1,918 | 2.984375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Implement a recognizer that used models already trained to classify unseen data
@author: udacity, ucaiado
Created on 10/03/2017
"""
import warnings
from aind.asl_data import SinglesData
def recognize(models: dict, test_set: SinglesData):
""" Recognize test word sequences from word models set
:param models: dict of trained models
{'SOMEWORD': GaussianHMM model object,
'SOMEOTHERWORD': GaussianHMM model object, ...}
:param test_set: SinglesData object
:return: (list, list) as probabilities, guesses. both lists are ordered by
the test set word_id probabilities is a list of dictionaries where each
key a word and value is Log Liklihood
[{SOMEWORD': LogLvalue, 'SOMEOTHERWORD' LogLvalue, ... },
{SOMEWORD': LogLvalue, 'SOMEOTHERWORD' LogLvalue, ... },
]
guesses is a list of the best guess words ordered by the test set
word_id
['WORDGUESS0', 'WORDGUESS1', 'WORDGUESS2',...]
"""
warnings.filterwarnings("ignore", category=DeprecationWarning)
probabilities = []
guesses = []
# implement the recognizer
Xlengths = test_set.get_all_Xlengths()
for X_test, y_test in Xlengths.values():
best_guess = None
best_prob = -1e8
d_guesses = {}
for word, hmm_model in models.items():
try:
# if the probability to be this word is greater
logL = hmm_model.score(X_test, y_test)
d_guesses[word] = logL
if logL > best_prob:
# it is the best guess
best_guess = word
best_prob = logL
except (ValueError, AttributeError) as e:
d_guesses[word] = None
probabilities.append(d_guesses)
guesses.append(best_guess)
return probabilities, guesses
| true |
2035dcdbcee398f9cddd4602dc0427491a6cda3b | Python | 0n1cOn3/YT_View_Bot | /viewbot_1.py | UTF-8 | 1,597 | 2.53125 | 3 | [
"Unlicense"
] | permissive | #!/usr/bin/python3
import random
import bs4 as pi
import requests as R
from requests.exceptions import ConnectionError
import headers as H
class test:
def __init__(self, url):
self.url = url
def views(self):
Z = R.get(self.url)
X = pi.BeautifulSoup(Z.content.decode('utf-8'), 'html5lib')
V = X.find_all("script")[13].text
a2 = []
for i in V.split(','):
if "viewCount" in i:
a2.append(i)
b5 = a2[0].replace("\\", "")[13:-1]
print("\n Current Views: ", b5)
def main(self):
try:
while True:
for o in range(1, 78):
U = random.choice(H.head)
h = {
"User-Agent":U,
"Accept":"*/*",
"Accept-Encoding":"gzip, deflate, br",
"Accept-Language":"en-us,en;q=0.5",
"Connection":"keep-alive",
"Host":"www.youtube.com",
"Referer":"https://www.youtube.com/",
"TE":"Trailers"
}
print("--------")
rr = R.get(self.url, headers=h, allow_redirects=False, timeout=10)
if rr.status_code == 200:
print("Ok")
else:
print("retrying..")
except ConnectionError:
print('Network error!')
except KeyboardInterrupt:
print('Quitting..')
q = input('\n Enter video url : ')
q1 = test(q)
q1.main()
q1.views()
| true |
804a755051c62914286c61b4ba474bf8998679d4 | Python | khalidalmuhawis/calculator | /calculator.py | UTF-8 | 775 | 4.21875 | 4 | [] | no_license |
def main():
first_number = input("Enter the first number: ")
second_number = input("Enter the second number: ")
operation = input("Choose an operation: ")
if first_number.isdigit() and second_number.isdigit():
first_number = int(first_number)
second_number = int(second_number)
if operation == "+":
print(first_number + second_number)
elif operation == "-":
print(first_number - second_number)
elif operation == "/":
print(first_number / second_number)
elif operation == "*":
print(first_number * second_number)
else:
print("invalid operation")
else:
print("invalid numbers")
pass
if __name__ == '__main__':
main()
| true |
19bcd8a84c3fc285f5c647296428dd9c483f8e09 | Python | dagrawa2/ece692_deep_learning | /project2-improve-training/see.py | UTF-8 | 225 | 2.53125 | 3 | [] | no_license | import os
import numpy as np
files = os.listdir("results/")
files.sort()
files.remove("1-3b-grads.npy")
for f in files:
print(f)
acc = np.load("results/"+f)
print("epochs: ", len(acc))
print("acc: ", np.max(acc), "\n")
| true |
5d138241b3738bfb97fd20324546d7e78e896432 | Python | JulseJiang/leetcode | /牛客网热题_反转链表.py | UTF-8 | 964 | 3.65625 | 4 | [] | no_license | # Title : 牛客网热题_反转链表.py
# Created by: julse@qq.com
# Created on: 2021/7/17 10:02
# des : TODO
# -*- coding:utf-8 -*-
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# 返回ListNode
def ReverseList(self, pHead):
# write code here
if not pHead:return pHead
qhead = ListNode(None)
while(pHead):
# 原链上删除,并移动头指针
temp = pHead
pHead = pHead.next
# 插入到反转后的链上
temp.next = qhead.next
qhead.next = temp
return qhead.next
if __name__ == '__main__':
# 输入:
# {1,2,3}
# 返回值:
# {3,2,1}
inputs = [1,2,3]
ans = [3,2,1]
pHead = ListNode(None)
temp = pHead
for input in inputs:
a = ListNode(input)
temp.next = a
temp = a
qHead = Solution().ReverseList(pHead.next)
| true |
93d368574f0cb4e95406225c4f0d3793e508fc62 | Python | gokul0801/departure_times | /rtt/queries_lxml.py | UTF-8 | 8,895 | 2.71875 | 3 | [] | no_license | import urllib, urllib2
import log
import sys
import lxml
from lxml import etree
BASE_URL = 'http://services.my511.org/Transit2.0/'
SECURITY_TOKEN = '4272b807-452a-406b-b034-d2b471318fea'
sessionCache = {}
routes_dict_cache = {}
def getXml(requestURL):
try:
return sessionCache[requestURL]
except KeyError:
xml = urllib2.urlopen(requestURL)
sessionCache[requestURL] = xml
return xml
#Classes for Agency, Route, RouteDirection and Stop
class Agency:
def __init__(self, name, hasDirection):
self.name = name
self.hasDirection = hasDirection
self.routes = []
def addRoute(self, route):
self.routes.append(route)
def __str__(self):
return "<Agency Name:%s, hasDirection:%s>" % (self.name, self.hasDirection)
class Route:
def __init__(self, name, code):
self.name = name
self.code = code
self.routeDirectionList = []
### stopList used for agencies that dont have route direction
### For other agencies, stoplist is stored in the route direction list
self.stopList = []
def addRouteDirection(self, routeDirection):
self.routeDirectionList.append(routeDirection)
def addStop(self, stop):
self.stopList.append(stop)
def __str__(self):
result = "<Route Name:%s, Code:%s>\n" % (self.name, self.code)
if self.routeDirectionList != []:
for routeDir in self.routeDirectionList:
result += str(routeDir) + '\n'
else:
for stop in self.stopList:
result += str(stop) + '\n'
return result
def hasDirectionList(self):
if self.routeDirectionList != []:
return True
else:
return False
##RouteDirection has list of stops to store data for stops
class RouteDirection:
def __init__(self, name, code):
self.name = name
self.code = code
self.stopList = []
def __str__(self):
result = "<RouteDirection Name:%s, Code:%s>\n" % (self.name, self.code)
for stop in self.stopList:
result += str(stop) + '\n'
return result
def addStop(self, stop):
self.stopList.append(stop)
class Stop:
def __init__(self, name, code):
self.name = name
self.code = code
self.departureTimeList = []
def __str__(self):
result = "<Stop Name:%s, Code:%s>" % (self.name, self.code)
if len(self.departureTimeList) != 0:
result += '\nDepartureTimes: '
num = len(self.departureTimeList)
for i in range(0, num-1):
result += self.departureTimeList[i] + ', '
result += self.departureTimeList[num-1] + ' mins'
else:
result += '\nNo data available..'
return result
### Return list of departure times for this stop
def getDepartureList(self):
result = ''
if len(self.departureTimeList) != 0:
num = len(self.departureTimeList)
for i in range(0, num-1):
result += self.departureTimeList[i] + ', '
result += self.departureTimeList[num-1] + ' mins'
else:
result += 'No data available..'
return result
def addDepartureTime(self, time):
self.departureTimeList.append(time)
def getAgencies():
try:
url = BASE_URL + 'GetAgencies.aspx?token=%s' % SECURITY_TOKEN
xml = getXml(url)
dom = etree.parse(xml)
agencies = []
for agency in dom.xpath('//RTT/AgencyList/Agency'):
name = agency.xpath('@Name')[0]
hasDirection = agency.xpath('@HasDirection')[0]
agency = Agency(name, hasDirection)
agencies.append(agency)
return agencies
except:
raise Exception("Error getting Transit Agency list: %s %s" % (sys.exc_type, sys.exc_value))
def getRoutes(agency):
try:
if routes_dict_cache.has_key(agency):
route = routes_dict_cache[agency][0]
if route.routeDirectionList == []:
hasDirection = 'False'
else:
hasDirection = 'True'
return routes_dict_cache[agency], hasDirection
params = urllib.urlencode({'agencyName': agency})
url = BASE_URL + 'GetRoutesForAgency.aspx?'+ params + '&token=%s' % SECURITY_TOKEN
log.debug(url)
xml = getXml(url)
dom = etree.parse(xml)
routes = []
hasDirection = dom.xpath('//RTT/AgencyList/Agency/@HasDirection')[0]
for route in dom.xpath('//RTT/AgencyList/Agency/RouteList/Route'):
routeName = route.xpath('@Name')[0]
routeCode = route.xpath('@Code')[0]
routeObj = Route(routeName, routeCode)
if route.xpath('RouteDirectionList') != []:
for routeDirection in route.xpath('RouteDirectionList/RouteDirection'):
routeDirectionName = routeDirection.xpath('@Name')[0]
routeDirectionCode = routeDirection.xpath('@Code')[0]
routeDirectionObj = RouteDirection(routeDirectionName, routeDirectionCode)
routeObj.addRouteDirection(routeDirectionObj)
routes.append(routeObj)
routes_dict_cache[agency] = routes
return routes, hasDirection
except:
raise Exception("Error getting routes for Agency %s" % agency + ":%s %s" % (sys.exc_type, str(sys.exc_value)))
def getDirectionsForRoute(agency, routename):
try:
for route in routes_dict_cache[agency]:
if route.name == routename:
return route.routeDirectionList
except:
raise Exception("Error getting directions for Agency %s, Route %s" % (agency, routename))
def getStopsForRoute(agency, routeName, routeDirectionName=None):
try:
routeCode = ''
routeDirectionCode = ''
log.debug(routeDirectionName)
for route in routes_dict_cache[agency]:
if route.name == routeName:
routeCode = route.code
if routeDirectionName is not None:
for direction in route.routeDirectionList:
if direction.name == routeDirectionName:
routeDirectionCode = direction.code
if routeDirectionName is None:
routeIDF = agency + '~' + routeCode
params = urllib.urlencode({'routeIDF': routeIDF})
url = BASE_URL + 'GetStopsForRoute.aspx?' + params + '&token=%s' % SECURITY_TOKEN
path = '//RTT/AgencyList/Agency/RouteList/Route/StopList/Stop'
else:
routeIDF = agency + '~' + routeCode + '~' + routeDirectionCode
params = urllib.urlencode({'routeIDF': routeIDF})
url = BASE_URL + 'GetStopsForRoute.aspx?' + params + '&token=%s' % SECURITY_TOKEN
path = '//RTT/AgencyList/Agency/RouteList/Route/RouteDirectionList/RouteDirection/StopList/Stop'
xml = getXml(url)
dom = etree.parse(xml)
stops = []
for stop in dom.xpath(path):
stopName = stop.xpath('@name')[0]
stopCode = stop.xpath('@StopCode')[0]
stopObj = Stop(stopName, stopCode)
stops.append(stopObj)
return stops
except:
raise Exception("Error getting stops for Agency %s, Route %s" % (agency, routeName))
def getDepartureTimes(agency, stopName):
try:
params = urllib.urlencode({'agencyName': agency, 'stopName': stopName})
url = BASE_URL + 'GetNextDeparturesByStopName.aspx?' + params + '&token=%s' % SECURITY_TOKEN
xml = urllib2.urlopen(url)
dom = etree.parse(xml)
routeTimes = []
hasDirection = dom.xpath('//RTT/AgencyList/Agency/@HasDirection')[0]
for route in dom.xpath('//RTT/AgencyList/Agency/RouteList/Route'):
routeName = route.xpath('@Name')[0]
routeCode = route.xpath('@Code')[0]
routeObj = Route(routeName, routeCode)
if hasDirection == 'True':
for routeDirection in route.xpath('RouteDirectionList/RouteDirection'):
routeDirectionName = routeDirection.xpath('@Name')[0]
routeDirectionCode = routeDirection.xpath('@Code')[0]
routeDirectionObj = RouteDirection(routeDirectionName, routeDirectionCode)
routeObj.addRouteDirection(routeDirectionObj)
for stop in routeDirection.xpath('StopList/Stop'):
stopName = stop.xpath('@name')[0]
stopCode = stop.xpath('@StopCode')[0]
stopObj = Stop(stopName, stopCode)
routeDirectionObj.addStop(stopObj)
if len(stop.xpath('DepartureTimeList/DepartureTime')) != 0:
for departureTime in stop.xpath('DepartureTimeList/DepartureTime'):
stopObj.addDepartureTime(departureTime.text)
else:
for stop in route.xpath('StopList/Stop'):
stopName = stop.xpath('@name')[0]
stopCode = stop.xpath('@StopCode')[0]
stopObj = Stop(stopName, stopCode)
routeObj.addStop(stopObj)
if len(stop.xpath('DepartureTimeList/DepartureTime')) != 0:
for departureTime in stop.xpath('DepartureTimeList/DepartureTime'):
stopObj.addDepartureTime(departureTime.text)
routeTimes.append(routeObj)
return routeTimes
except:
raise Exception("Error getting departure times for Agency %s, Stop %s" % (agency, stopName))
### Used in testing for testing individual query functions
if __name__ == '__main__':
getAgencies()
getRoutes('BART')
getStopsForRoute('BART', 'Fremont')
getDepartureTimes('BART', 'Fremont')
| true |
3ee86ad5581544bb5ca1d65a8f1cd16927ec46d4 | Python | yuki7125/mlpp_pop_pc | /final-project/util_gmm.py | UTF-8 | 9,550 | 2.859375 | 3 | [] | no_license | import numpy as np
import torch
import pandas as pd
import pyro
import pyro.distributions as dist
from matplotlib import pyplot
from matplotlib.patches import Ellipse
def get_train_test_split(movies_metadata):
"""Get train test split of obs and new data"""
movies_metadata = normalize_data(movies_metadata)
features = get_features(movies_metadata)
data = torch.tensor(features, dtype=torch.float32)
N = 500
data_new = data[N:2 * N].clone().detach()
data = data[:N].clone().detach()
return data, data_new
def plot_svi_convergence(losses, gradient_norms):
"""Plot SVI convergence and gradient"""
fig = pyplot.figure(figsize=(16, 4))
ax1 = fig.add_subplot(121, xlabel="iters", ylabel="loss",
yscale="log", title="Convergence of SVI")
ax1.plot(losses)
ax2 = fig.add_subplot(122, xlabel="iters", ylabel="gradient norm",
yscale="log", title="Gradient norm SVI")
for name, grad_norms in gradient_norms.items():
ax2.plot(grad_norms, label=name)
ax2.legend()
fig.show()
def plot(data, mus=None, sigmas=None, colors='black', K=None, d=None, ax=None):
"""Plot 2D GMMs"""
x = data[:, 0]
y = data[:, 1]
pyplot.scatter(x, y, 24, c=colors)
if mus is not None:
x = [float(m[0]) for m in mus]
y = [float(m[1]) for m in mus]
pyplot.scatter(x, y, 99, c='red')
if sigmas is not None:
for sig_ix in range(K):
try:
cov = (torch.eye(d) * sigmas[sig_ix]).detach().numpy()
except TypeError:
cov = np.array(sigmas[sig_ix])
lam, v = np.linalg.eig(cov)
lam = np.sqrt(lam)
ell = Ellipse(xy=(x[sig_ix], y[sig_ix]),
width=lam[0] * 4, height=lam[1] * 4,
angle=np.rad2deg(np.arccos(v[0, 0])),
color='blue')
ell.set_facecolor('none')
ax.add_artist(ell)
def normalize_data(movies_metadata):
"""Return normalized movies_metadata"""
movies_metadata['revenue'] = normalize(movies_metadata['revenue'])
movies_metadata['budget'] = normalize(movies_metadata['budget'])
movies_metadata['vote_average'] = normalize(
movies_metadata['vote_average'])
movies_metadata['vote_count'] = normalize(movies_metadata['vote_count'])
movies_metadata['popularity'] = normalize(movies_metadata['popularity'])
movies_metadata['runtime'] = normalize(movies_metadata['runtime'])
return movies_metadata
def normalize(np_arr):
"""Normalize an array"""
return (np_arr - np.mean(np_arr)) / np.std(np_arr)
def get_features(movies_metadata):
"""Extract features"""
features = np.stack((
movies_metadata['budget'],
movies_metadata['revenue'],
movies_metadata['vote_count'],
movies_metadata['vote_average'],
movies_metadata['popularity']
), axis=1)
return features
def get_Sigma_samples(posterior_samples):
"""Compute Sigma based on theta and L_omega"""
Sigma_samples = []
for i in range(len(posterior_samples["theta"])):
L_Omega = torch.mm(
torch.diag(posterior_samples["theta"][i].sqrt()),
posterior_samples["L_omega"][i])
Sigma = torch.mm(L_Omega, L_Omega.t()).tolist()
Sigma_samples.append(Sigma)
Sigma_samples = torch.tensor(Sigma_samples)
return Sigma_samples
def get_bayes_estimate_cov(Sigma_samples, K):
"""Compute covariance"""
Sigma_bayes_est = [
[torch.mean(Sigma_samples[:, i, j]).item()
for j in range(len(Sigma_samples[0]))]
for i in range(len(Sigma_samples[0]))]
cov = [Sigma_bayes_est for num_clusters in range(K)]
cov = torch.tensor(cov)
return cov
def get_bayes_estimate_mu(posterior_samples):
"""Compute bayes estimate of mu"""
mu = [[torch.mean(posterior_samples["locs"][:, i, j]).item()
for j in range(len(posterior_samples["locs"][0][0]))]
for i in range(len(posterior_samples['locs'][0]))]
mu = torch.tensor(mu)
return mu
def get_bayes_estimate_pi(posterior_samples):
pi = [torch.mean(posterior_samples["weights"][:, i]).item()
for i in range(len(posterior_samples['weights'][0]))]
return pi
def plot_mcmc_mu(posterior_samples, K, d):
"""Plot posterior of mu"""
for i in range(K):
trace = posterior_samples["locs"][:, i, :]
fig = pyplot.figure(figsize=(16, 2))
for j in range(d):
ax1 = fig.add_subplot(
121, xlabel="x", ylabel="Frequency", title="mu" + str(i))
ax1.hist(trace[:, j], 50, density=True)
ax2 = fig.add_subplot(
122,
xlabel="Steps",
ylabel="Sample Values",
title="mu" + str(i))
ax2.plot((trace[:, j]))
fig.show()
def plot_mcmc_pi(posterior_samples, K, d):
"""Plot posterior of pi"""
fig = pyplot.figure(figsize=(16, 2))
ax1 = fig.add_subplot(
121, xlabel="x", ylabel="Frequency", title="pi")
ax2 = fig.add_subplot(
122, xlabel="Steps", ylabel="Sample Values", title="pi")
for i in range(K):
ax1.hist(posterior_samples["weights"][:, i])
ax2.plot(posterior_samples["weights"][:, i])
fig.show()
def plot_mcmc_theta(posterior_samples, K, d):
"""Plot posterior of theta"""
fig = pyplot.figure(figsize=(16, 2))
for j in range(d):
ax1 = fig.add_subplot(
121, xlabel="x", ylabel="Frequency", title="theta")
ax1.hist(posterior_samples["theta"][:, j], 50, density=True)
ax2 = fig.add_subplot(
122, xlabel="Steps", ylabel="Sample Values", title="theta")
ax2.plot(posterior_samples["theta"][:, j])
fig.show()
def plot_mcmc_Sigma(Sigma_samples, K, d):
"""Plot posterior of Sigma"""
fig = pyplot.figure(figsize=(16, 2))
ax1 = fig.add_subplot(
121, xlabel="x", ylabel="Frequency", title="Sigma")
ax2 = fig.add_subplot(
122, xlabel="Steps", ylabel="Sample Values", title="Sigma")
for i in range(d):
for j in range(d):
ax1.hist(Sigma_samples[:, i, j], 30, density=True)
ax2.plot(Sigma_samples[:, i, j])
fig.show()
def plot_gmm_results(data, mu, cov, K, d):
"""Plot how clusters look like"""
fig = pyplot.figure(figsize=(16, 4))
ax = fig.add_subplot(121, xlabel="revenue", ylabel="budget")
plot(data[:, 0:2], mu[:, 0:2], cov, K=K, d=d, ax=ax)
ax = fig.add_subplot(122, xlabel="vote_average", ylabel="vote_count")
plot(data[:, 2:4], mu[:, 2:4], cov, K=K, d=d, ax=ax)
fig.show()
def plot_assignments(assignment, K):
pyplot.figure(figsize=(8, 4))
pyplot.hist(assignment, bins=K, ec="k")
pyplot.xlabel("pi")
pyplot.ylabel("Frequency")
pyplot.title("Components")
pyplot.show()
def get_members(data, assignment, group):
data_df = pd.DataFrame(
data.detach().numpy(), columns=['budget', 'revenue'])
data_df['assignment'] = pd.DataFrame(assignment, columns=['assignment'])
return data_df[data_df['assignment'] == group].to_numpy()[:, 0:2]
def compute_log_likelihood(data, mu, cov, pi):
log_likelihood = 0
for i in range(len(data)):
for j in range(len(data[0])):
log_likelihood += np.log(pi[j]) + \
dist.MultivariateNormal(mu[j], cov[j]).log_prob(data[i])
return log_likelihood
def get_replicated_data(data, mu, cov, pi):
data_rep = []
for i in range(len(data)):
cluster = pyro.sample('category', dist.Categorical(torch.tensor(pi)))
idx = cluster.item()
sample = pyro.sample("obs", dist.MultivariateNormal(mu[idx], cov[idx]))
while sample[0] < min(data[:, 0]) or sample[1] < min(data[:, 1]):
# Only sample valid points
sample = pyro.sample(
"obs", dist.MultivariateNormal(mu[idx], cov[idx]))
data_rep.append(sample.tolist())
data_rep = torch.tensor(data_rep)
return data_rep
def plot_rep_obs_new_data(data, data_rep, data_new):
fig = pyplot.figure(figsize=(16, 4))
ax1 = fig.add_subplot(
121, xlabel="budget", ylabel="revenue",
title="PPC", ylim=(-1, 6), xlim=(-1, 4))
ax1.scatter(data_rep[:, 0], data_rep[:, 1], label="replicated data")
ax1.scatter(data[:, 0], data[:, 1], label="observed data")
ax1.legend()
ax2 = fig.add_subplot(
122, xlabel="budget", ylabel="revenue",
title="POP-PC", ylim=(-1, 6), xlim=(-1, 4))
ax2.scatter(data_rep[:, 0], data_rep[:, 1], label="replicated data")
ax2.scatter(data_new[:, 0], data_new[:, 1], label="new data")
ax2.legend()
fig.show()
def plot_ppc_vs_poppc():
K_variable = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
ppc_response = [1028425, 499302, 2828440, 15175103, 15331, 30597838,
4952241, 17231320, 6284324, 5327910, 4547856, 23852158,
17231514]
pop_pc_response = [2522314, 833008, 1790494, 77044920, 3299700,
158990000, 8688746, 129820000, 1125285, 19079732,
30745118, 72734376, 94568328]
fig = pyplot.figure(figsize=(16, 4))
ax = fig.add_subplot(
111, xlabel="K number of clusters",
ylabel="Discrepancy", title="POP-PC vs PPC")
ax.plot(K_variable, np.sqrt(ppc_response), label="PPC")
ax.plot(K_variable, np.sqrt(pop_pc_response), label="POP-PC")
ax.legend()
fig.show()
| true |
45d8bd600dbdc976d801f5c2d417c326e00a67ca | Python | andrey-ladygin-loudclear/deep-learning | /helper/week1/decorators_hard.py | UTF-8 | 373 | 3.0625 | 3 | [] | no_license | def logger(filename):
def decorator(func):
def wrapped(*args, **kwargs):
result = func(*args, **kwargs)
with open(filename, 'w') as f:
f.write(str(result))
return result
return wrapped
return decorator
@logger('new_log.txt')
def summator(num_list):
return sum(num_list)
summator([1,2,3,4,5]) | true |
484538cb3ac2b9b53b00ca0f67e628196d8c395c | Python | sergelemon/python_training | /old/task9.py | UTF-8 | 778 | 4.21875 | 4 | [] | no_license | #9. n школьников делят k яблок поровну, неделящийся остаток остается в корзинке. Сколько яблок достанется каждому
#школьнику? Сколько яблок останется в корзинке? Программа получает на вход числа `n` и `k` и должна вывести искомое
#количество яблок (два числа).
n = int(input("Укажите количество школьников\n"))
k = int(input("Укажите количество яблок\n"))
print('Количество яблок на каждого школьника: ' + str(k // n))
print('Количество яблок в корзине: ' + str(k % n)) | true |
6d725e715f7d5b487d8f557df45293f2cd4004fe | Python | Sapphirine/202005-17-Understandin--Personal-Value-and-Objectives | /python/processing/neo4j_loader.py | UTF-8 | 6,508 | 2.515625 | 3 | [
"MIT"
] | permissive |
#!/usr/bin/env python3
from datetime import datetime
import sys
import os
import shutil
import time
import csv
import argparse
import re
import numpy as np
from neo4j import GraphDatabase
GIT_REPO = "https://tbd.com"
class Neo4jUtil:
"""
This class postprocesses data collected during the personality network
study, understanding personal value and objectives.
"""
def __init__(self,n4j_import="file:///demo",node_tail="_Z_raw_vert.csv",
edge_tail="_Z_raw_edges.csv",n4j_user="neo4j",n4j_pass="admin",
n4j_uri="localhost",n4j_port="7687", dump=False,collect=False,
test=False, pretty=False,verbose=False):
"""
"""
self.n4j_import = n4j_import # top level path to neo4j import
self.node_tail = node_tail
self.edge_tail = edge_tail
self.dump = dump
self.collect = collect
self.test = test
self.pretty = pretty
self.verbose = verbose
n4j_db = "bolt://{uri}:{port}".format(uri=n4j_uri,port=n4j_port)
print(n4j_db)
self.driver = GraphDatabase.driver(n4j_db, auth=(n4j_user,n4j_pass))
def add_friend(self,tx, name, friend_name):
tx.run("MERGE (a:Person {name: $nn}) "
"MERGE (a)-[:KNOWS]->(friend:Person {name: $friend_name})",
nn=name, friend_name=friend_name)
def print_friends(self,tx, name):
for record in tx.run("MATCH (a:Person)-[:KNOWS]->(friend) WHERE a.name = $name "
"RETURN friend.name ORDER BY friend.name", name=name):
print(record["friend.name"])
def dump_db(self,tx):
tx.run("MATCH (n) DETACH DELETE n")
def return_n(self,tx):
for record in tx.run("MATCH (n) RETURN n"):
print(record)
def load_nodes(self,tx,csv,ffm):
query = "LOAD CSV WITH HEADERS FROM "
query += "\"{csv}\" AS users ".format(csv=csv)
query += "CREATE(n:{ffm} {{id:toInteger(users.id),label:users.name, " \
"score:toFloat(users.{ffm}_Z),age:toInteger(users.age), " \
"sex:users.sex,country:users.country}})".format(ffm=ffm)
print(query)
tx.run(query)
def load_edges(self,tx,csv,ffm):
query = "LOAD CSV WITH HEADERS FROM "
query += "\"{csv}\" AS edges ".format(csv=csv)
query += "MATCH (a:{ffm} {{ id: toInteger(edges.src) }}) " \
"MATCH (b:{ffm} {{ id: toInteger(edges.dst) }}) " \
"CREATE (a)-[r:INTERLOCK{{weight:toFloat(edges.{ffm}_Z)}}]->(b);".format(ffm=ffm)
tx.run(query)
def test_neo4j(self,session):
session.write_transaction(self.add_friend, "Arthur", "Guinevere")
session.write_transaction(self.add_friend, "Arthur", "Lancelot")
session.write_transaction(self.add_friend, "Arthur", "Merlin")
session.read_transaction(self.print_friends, "Arthur")
def run(self):
"""MultilayerAnalsis.run()
Main function that runs the postprocessing
"""
print("\n----------------------------------------")
print("Starting \'{}\'".format(self.__class__.__name__))
print("----------------------------------------\n")
with self.driver.session() as session:
# This helps us not duplicate records
session.write_transaction(self.dump_db)
if self.dump:
print("Dumped the database.")
return
if self.collect:
session.read_transaction(self.return_n)
return
if self.test:
self.test_neo4j(session)
print("Test complete.")
return
ffms=["OPN","CSN","EXT","AGR","NEU"]
for ffm in ffms:
node_path = ffm + self.node_tail
edge_path = ffm + self.edge_tail
v_csv_path = "{csv}/{nodes}".format(csv=self.n4j_import,nodes=node_path)
e_csv_path = "{csv}/{edges}".format(csv=self.n4j_import,edges=edge_path)
session.write_transaction(self.load_nodes,v_csv_path,ffm)
session.write_transaction(self.load_edges,e_csv_path,ffm)
# session.read_transaction(self.return_n)
# session.write_transaction(dump_db)
self.driver.close()
if __name__ == "__main__":
prg_desc = 'Batch loads the FFM data from ComplexAnalysis output'
prg_desc += 'into a neo4j database '
prg_desc += '{}'.format(GIT_REPO)
parser = argparse.ArgumentParser(description=prg_desc)
parser.add_argument('-i','--input',metavar='INPUT_PATH', default="file:///demo",
dest='n4j_import',help='Path to data that needs processed.')
parser.add_argument('-n','--node',metavar='NODE_TAIL',default="_Z_raw_vert.csv",
dest='node_tail',help='Last part of node CSV path, to include the .csv.')
parser.add_argument('-e','--edge',metavar='EDGE_TAIL',default="_Z_raw_edges.csv",
dest='edge_tail',help='Last part of edge CSV path, to include the .csv.')
parser.add_argument('-u','--user',default="neo4j",
dest='n4j_user',help='neo4j username.')
parser.add_argument('-p','--pass',default="admin",
dest='n4j_pass',help='neo4j password.')
parser.add_argument('-r','--uri',default="localhost",
dest='n4j_uri',help='neo4j database uri hostname. Default: localhost')
parser.add_argument('-t','--port',default="7687",
dest='n4j_port',help='neo4j database port number.')
parser.add_argument('-d','--dump',action='store_true',default=False,
dest='dump',help='Dump/delete neo4j database.')
parser.add_argument('-c','--collect',action='store_true',default=False,
dest='collect',help='Collect/return all from neo4j database.')
parser.add_argument('-s','--test',action='store_true',default=False,
dest='test',help='Run neo4j test.')
parser.add_argument('-y','--pretty',action='store_true',default=False,
dest='pretty',help='Enables pretty output of all files.')
parser.add_argument('-v','--verbose',action='store_true',default=False,
dest='verbose',help='Verbose output.')
args = parser.parse_args()
load = Neo4jUtil(**vars(args))
load.run()
| true |
f7d62df130aa4411efd9ac4fcb3dba2188624268 | Python | 15871687941/PyCode | /GUI_tkinter/Enter_Text.py | UTF-8 | 540 | 3.15625 | 3 | [] | no_license | # coding = UTF-8
from tkinter import *
Windows = Tk()
Windows.title("MY WINDOWS")
Windows.geometry("200x200")
e1 = Entry(Windows, show="*", width=16, font=("楷体", 10))
e1.pack()
def insert_point():
var = e1.get()
t1.insert("insert", var)
b1 = Button(Windows, text="Insert Point", width=16, command=insert_point)
b1.pack()
def insert_end():
var = e1.get()
t1.insert("end", var + "\n")
b2 = Button(Windows, text="Insert End", width=16, command=insert_end)
b2.pack()
t1 = Text(Windows)
t1.pack()
Windows.mainloop()
| true |
e741894e11af99c42298a8b0b61fbee67dcbc439 | Python | Insomnia1437/PV_crawler | /telnetEV.py | UTF-8 | 3,105 | 2.5625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# @Time :
# @File : telnetEV
# @Software:
# @Author : Kudoh
# @Email :
import time
import telnetlib
class Reset:
def __init__(self):
self.rtimeout = 1
def open(self, host, port):
tn = telnetlib.Telnet(host, port)
time.sleep(1)
tn.write("\n")
if self.readmsg(tn, "->") == 0:
tn.close()
tn = None
return tn
def resetEVG(self, host, port, cardnumber):
print("start resetEVG", host, port, cardnumber)
tn = self.open(host, port)
f = True
if tn == None:
f = False
else:
try:
if f: tn.write("EG\n")
if f and self.readmsg(tn, "]:") == 0: f = False
if f: tn.write("%s\n" % (cardnumber))
if f and self.readmsg(tn, "quit") == 0: f = False
if f: tn.write("r\n")
if f and self.readmsg(tn, "quit") == 0: f = False
if f: tn.write("r\n")
if f and self.readmsg(tn, "quit") == 0: f = False
if f: tn.write("q\n")
if f: self.readmsg(tn, "->")
r = tn.read_eager()
while r != "":
r = tn.read_eager()
tn.write("\r\n")
time.sleep(1)
except:
print("error")
tn.close()
return f
def resetEVR(self, host, port, cardnumber):
print("start resetEVR", host, port, cardnumber)
tn = self.open(host, port)
f = True
if tn == None:
f = False
else:
try:
if f: tn.write("ER\n")
if f and self.readmsg(tn, "]:") == 0: f = False
if f: tn.write("%s\n" % (cardnumber))
if f and self.readmsg(tn, "quit") == 0: f = False
if f: tn.write("r\n")
if f and self.readmsg(tn, "quit") == 0: f = False
if f: tn.write("r\n")
if f and self.readmsg(tn, "quit") == 0: f = False
if f: tn.write("q\n")
if f: self.readmsg(tn, "->")
r = tn.read_eager()
while r != "":
r = tn.read_eager()
tn.write("\r\n")
time.sleep(1)
except:
print("error")
tn.close()
return f
def resetVME(self, host, port):
print("start resetVME")
tn = self.open(host, port)
f = True
if tn is None:
f = False
else:
try:
tn.write("reboot\n")
r = tn.read_eager()
while r != "":
r = tn.read_eager()
tn.write("\r\n")
time.sleep(1)
tn.close()
except:
tn.close()
f = False
return f
def readmsg(self, tn, expected):
msg = tn.read_until(expected, self.rtimeout)
print(msg)
return len(msg)
| true |
96396306de3a92c554ee9475424a3e0a74369a7a | Python | nitoc-ict/slack-FileRemove | /channel_list.py | UTF-8 | 344 | 2.53125 | 3 | [
"MIT"
] | permissive | import requests
import os
import json
def get_channel_list():
url = "https://slack.com/api/channels.list"
slack_res = requests.get(url, params = {"token": os.environ["SLACK_TOKEN"]}).json()
if slack_res["ok"]:
channel_id = [i.get("id") for i in slack_res["channels"]]
return channel_id
else:
return -1
| true |
d07692da0f2fe51c22231e55d589a0fd1ea91f4c | Python | vjek/procedural_generation | /otp-enc.py | UTF-8 | 2,499 | 3.53125 | 4 | [
"Unlicense"
] | permissive | #!/usr/bin/env python3
# script to demonstrate mod10 Message + Key = Ciphertext
# by vjek, 20200426, updated 20230422
###
from getpass import getpass
import random,hashlib,sys
def getphrase():
#get sha512 of passphrase, use it as rand seed to generate OTP of any length
#if you wanted true OTP, add ISO 8601 metric date to seed value
#this permits arbitrary encryption and decryption based on date & passphrase
passphrase = getpass("Passphrase:")
if len(passphrase) < 16:
print("Passphrase too short, enter at least 16 characters.")
exit()
hashphrase = hashlib.sha512(passphrase.encode('utf-8')).hexdigest()
return hashphrase
def make_custom_dict(rand1):
#use 00-99 values to map a-zA-Z0-9.. and create custom dictionary for mod10
# instead of 01 = a and 26=z, the assignment will vary procedurally
x=0
my_dict={}
dictionary = list(range(0,99))
letters = '''abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789`~!@#$%^&*()-_=+[]\{}|;':",./<>? \n'''
rand1.shuffle(dictionary) #this shuffle will be procedural based on rand1 seed
for letter in letters:
my_dict[letter]="%02d" % dictionary[x]
x=x+1
return my_dict
def mod10(num): #this function will discard the tens place of a given two digit number
num %= 10
return num
#first, get the hash of a passphrase, as a random seed
hashphrase = getphrase()
rand1=random.Random()
rand1.seed(hashphrase) #use the hashed passphrase as seed
cust_dict=make_custom_dict(rand1)
#take input
print("Enter the message to encrypt. You may use any printable key on the us-english keyboard, plus space and newline. End with newline + ctrl-d: ")
cleartext1=sys.stdin.read().rstrip()
if len(cleartext1) < 1:
print("Your message is too short.")
exit()
hashclear = hashlib.sha512(cleartext1.encode('utf-8')).hexdigest() #get hash of message
cleartext1=hashclear+cleartext1 #prepend message hash to message
#this produces the message line, using the custom dictionary entries
try:
cleartext1=''.join(str(cust_dict[c]) for c in cleartext1)
except:
print("ERROR:Some part of your message exceeded the bounds of the dictionary.")
exit()
s_len=len(cleartext1)
key1=''
for a in range(0,s_len):
key1 += str(rand1.randint(0,9)) #create OTP key of message length
ciph1=''
for a in range(0,s_len):
m1=int(cleartext1[a])
k1=int(key1[a])
ciph1 += str(mod10(m1+k1)) #mod10 message + key
print("Your cipher text is:\n"+ciph1)
| true |
3dd3c7725a5ca263152659e42e803f39c5906377 | Python | nabmctackle/pythonbelt18 | /beltexam/models/model.py | UTF-8 | 3,192 | 2.703125 | 3 | [] | no_license | import re
from beltexam.config.mysqlconnection import connectToMySQL
from beltexam import app
from flask_bcrypt import Bcrypt
app.secret_key = "theMostSecret"
mysql = connectToMySQL('mydb')
bcrypt = Bcrypt(app)
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
class Model:
def register(self, form):
flasharr = []
responsearr=[]
if len(form["f_name"]) < 1:
flash.append["Name can't be blank!"]
if form['f_name'].isalpha() == False:
flash.append["Name must be alpha!"]
if len(form["l_name"]) < 1:
flash.append["Last Name can't be blank!"]
if form['l_name'].isalpha() == False:
flash.append["Last Name can't be alpha!"]
if len(form['email'])< 1:
flash.append["Email cant be blank"]
if not EMAIL_REGEX.match(form['email']):
flash.append['Invalid Email Format']
if len(form["pw"]) < 8:
flash.append["Password must be 8 characters!"]
if form["pw"] != form['pwc']:
flash.append["Passwords must match!"]
query = "SELECT * FROM users where email = %(email)s;"
data = {'email': form['email']}
all_users = mysql.query_db(query, data)
if len(all_users) > 0:
flash.append["Email already registered!"]
if flasharr!=[]:
responsearr.append(flasharr)
return responsearr
elif flasharr==[]:
username= form['f_name']+" "+form['l_name']
query = "INSERT INTO users (f_name,l_name,email,password) VALUES (%(f_name)s,%(l_name)s,%(email)s,%(password)s);"
data = {
'f_name': form['f_name'],
'l_name': form['l_name'],
'email': form['email'],
'password': bcrypt.generate_password_hash(form['pw'])
}
newuserid = mysql.query_db(query, data)
responsearr.append(flasharr)
responsearr.append(username)
return responsearr
def login(self,form):
flasharr=[]
responsearr=[]
if len(form['liemail'])< 1:
flasharr.append("email cannot be blank!")
if not EMAIL_REGEX.match(form['liemail']):
flasharr.append("email not valid")
if len(form["lipw"]) < 8:
flasharr.append("password minimum length is 8 characters")
if flasharr != []:
responsearr.append(flasharr)
return responsearr
elif flasharr== []:
query = "SELECT * FROM users WHERE email = %(email)s;"
data = {
'email': form['liemail']
}
usercheck = mysql.query_db(query,data)
if bcrypt.check_password_hash(usercheck[0]['password'], form['lipw'])==True:
username = usercheck[0]['f_name']+" "+usercheck[0]['l_name']
responsearr.append(flasharr)
responsearr.append(username)
return responsearr
else:
flasharr.append("you could not be logged in")
responsearr.append(flasharr)
return responsearr | true |
e8899be690842cbb604f6297c3621da6c49afbb0 | Python | ForceCry/iem | /scripts/feature/multiday_timeseries.py | UTF-8 | 2,909 | 2.703125 | 3 | [] | no_license | import iemdb
import mx.DateTime
import numpy
ASOS = iemdb.connect('asos', bypass=True)
acursor = ASOS.cursor()
COOP = iemdb.connect('coop', bypass=True)
ccursor = COOP.cursor()
cdates = []
chighs = []
clows = []
ccursor.execute("""SELECT
case when extract(month from valid) = 1 then valid + '1 year'::interval else valid end as dvalid,
high, low from ncdc_climate71 where station = 'ia2203'
and extract(month from valid) in (12,1,2,3) ORDER by dvalid ASC""")
for row in ccursor:
ts = mx.DateTime.strptime(row[0].strftime("%Y%m%d"), "%Y%m%d")
ts += mx.DateTime.RelativeDateTime(year=2011)
if row[0].month in (1,2,3):
ts += mx.DateTime.RelativeDateTime(year=2012)
if row[0].month == 3 and row[0].day == 1:
cdates.append( ts - mx.DateTime.RelativeDateTime(days=1) )
chighs.append( chighs[-1] )
clows.append( clows[-1] )
cdates.append( ts )
chighs.append( row[1] )
clows.append( row[2] )
chighs = numpy.array(chighs)
clows = numpy.array(clows)
valid = []
tmpf = []
for yr in [2011,2012]:
acursor.execute("""
SELECT valid, tmpf from t%s WHERE station = 'DSM'
and valid > '2011-12-01' ORDER by valid ASC
""" % (yr))
for row in acursor:
ts = mx.DateTime.strptime(row[0].strftime("%Y%m%d%H%M"), "%Y%m%d%H%M")
valid.append( ts )
tmpf.append( row[1] )
valid2 = []
tmpf2 = []
#for yr in [2010,2011]:
# acursor.execute("""
# SELECT valid, tmpf from t%s WHERE station = 'DSM'
# and valid > '2010-12-01' and valid < '2011-01-25' ORDER by valid ASC
#""" % (yr))
# for row in acursor:
# ts = mx.DateTime.strptime(row[0].strftime("%Y%m%d%H%M"), "%Y%m%d%H%M")
# valid2.append( ts + mx.DateTime.RelativeDateTime(years=1) )
# tmpf2.append( row[1] )
import matplotlib.pyplot as plt
import matplotlib.font_manager
prop = matplotlib.font_manager.FontProperties(size=12)
fig = plt.figure()
sts = mx.DateTime.DateTime(2012,2,1)
ets = mx.DateTime.DateTime(2012,3,26)
now = sts
xticks = []
xticklabels = []
while now < ets:
if now.day == 1 or now.day % 7 == 0:
xticks.append( now )
fmt = "%-d"
if now.day == 1:
fmt = "%-d\n%b"
xticklabels.append( now.strftime(fmt))
now += mx.DateTime.RelativeDateTime(days=1)
ax = fig.add_subplot(111)
ax.bar(cdates, chighs - clows, bottom=clows, fc='lightblue', ec='lightblue',
label="Daily Climatology")
ax.plot(valid, tmpf, color='r', label='2012 Hourly Obs')
#ax.plot(valid2, tmpf2, color='k', label='2010-11 Hourly Obs')
ax.set_ylabel("Temperature $^{\circ}\mathrm{F}$")
#ax.set_xlabel("7 September 2011 (EDT)")
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels)
ax.set_xlim(float(sts), float(ets))
ax.set_ylim(-10,90)
ax.legend(loc=2)
ax.grid(True)
ax.set_title("Des Moines (KDSM) Air Temperature\n1 Feb 2012 - 25 Mar 2012")
fig.savefig('test.ps')
import iemplot
iemplot.makefeature('test')
| true |
7fc1e0293298aa53bff9822caba74c3de373342b | Python | samlawlerr/HalfBrick_Project-Sam_Lawler | /src/csv_to_json.py | UTF-8 | 439 | 2.734375 | 3 | [] | no_license | import json
import csv
csvFile = "sandbox-installs.csv"
jsonFilePath = "jsonOutput.json"
# Read the CSV file and add data to dictionary
data = {}
with open(csvFile, encoding="utf8") as csvFile:
csvReader = csv.DictReader(csvFile)
for rows in csvReader:
id = rows["user_pseudo_id"]
data[id] = rows
# Write data to JSON file
with open(jsonFilePath, "w") as jsonFile:
jsonFile.write(json.dumps(data, indent=4))
| true |
389d12bb2ae3e52aea567222f7ab61a38e2ccfba | Python | edu-athensoft/ceit4101python | /stem1400_modules/module_4_function/func2_recursive/recursive_problem_03_b.py | UTF-8 | 547 | 4.3125 | 4 | [] | no_license | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Fibonacci sequence
0,1,1,2,3,5,8,13,21,...
Input n and get the n-th number in the sequence
key point:
a = 0, b = 1
a = 1 = b, b = 1 = last_a + b
a = 1 = b, b = 2 = last_a + b
"""
# iteration
def fib(n):
a, b = 0, 1
for i in range(n-1):
# a, b = b, a + b
last_a = a
a = b
b = last_a+b
return a
# test n
print(fib(1))
print(fib(2))
print(fib(3))
print(fib(4))
print(fib(5))
print(fib(6))
print(fib(7))
print(fib(8))
print(fib(9))
print(fib(10)) | true |
736d90b956ccb2180c29b95879eb24cca088546d | Python | chrishefele/kaggle-sample-code | /WordImputation/sandbox/src/try_w2v.py | UTF-8 | 1,150 | 3.171875 | 3 | [] | no_license | import os.path
import gensim
from nltk.corpus import brown
TRAIN_FILE = '/home/chefele/kaggle/WordImputation/download/train_v2.txt'
MODEL_FILE = 'model.mdl'
def sentence_reader():
for line_num, line in enumerate(open(TRAIN_FILE,'r')):
if line_num % (100*1000) == 0:
print 'read line:', line_num
yield line.rstrip().split()
#for sentence in brown.sents(): # about 50K sentences
# yield sentence
if not os.path.isfile(MODEL_FILE):
print "Training word2vec model"
sentences = sentence_reader()
model = gensim.models.Word2Vec(sentences, min_count=5)
print "Saving word2vec model to:", MODEL_FILE
model.save(MODEL_FILE)
else:
print "Loading word2vec model from:", MODEL_FILE
model = gensim.models.Word2Vec.load(MODEL_FILE)
print "Model loaded."
words = 'the and that this it'.split()
for word in words:
print word, "->", model.most_similar(positive=[word])
print
print model.similarity('man','woman')
print model.most_similar(positive=['Washington'])
print model.most_similar(positive=['Boston'])
print model.most_similar(positive=['President', 'woman'], negative=['man'])
| true |
23539007725a52356dabcf9d1e97306028602319 | Python | adi1201239/b | /Q_15.py | UTF-8 | 522 | 4.03125 | 4 | [] | no_license | list1 = []
for i in range(0,5,1):
a = str(input("Enter the name in list "))
list1.append(a)
print("Name in list ",list1)
j = str(input("Enter the name to search "))
if j in list1:
print("The given name is present in list")
else:
print("The given name is not present in list")
l=len(list1)
for i in range(l+1):
if (list1[i]==j):
print ("element found ")
break
else:
print ("element not found")
list2 = list1[::-1]
print("list is reverse order ",list2)
| true |
2ed760b39fad878ee4ed46397b66b72a81d61159 | Python | Nguyen-Tommy/Employee-Management-System | /app.py | UTF-8 | 5,250 | 3.078125 | 3 | [] | no_license | # Employee Management System
# Local web server application, MySQL database, CRUD operations, injection safe queries, file reading
from flask import Flask, render_template, url_for, request, redirect
from flask_mysqldb import MySQL
from werkzeug.utils import secure_filename
import os
# Import packages as variables and load environment variables
app = Flask(__name__)
mysql = MySQL(app)
app.config.from_envvar('APP_SETTINGS')
# Render index page, add new employee into database
@app.route('/', methods = ['POST', 'GET'])
def index():
cur = mysql.connection.cursor()
# Check if user send a post request
if request.method == 'POST':
con = mysql.connection
form = request.form
query1 = 'SELECT email FROM employees WHERE email = %s'
query2 = 'INSERT INTO employees (name, email, job, salary) VALUES (%s, %s, %s, %s)'
# Check if insert failed
try:
cur.execute(query1, (form['email'],))
# Check if email not used then insert new employee
if len(cur.fetchall()) == 0:
cur.execute(query2, (form['name'], form['email'], form['job'], form['salary']))
con.commit()
else:
print('Email used')
except:
print('Insert failed')
return redirect('/')
# Retrieve all employees from database and render page
else:
cur.execute('SELECT * FROM employees')
return render_template('index.html', employees = cur.fetchall())
# Render update page, update selected employee's information
@app.route('/update/<string:email>', methods = ['POST', 'GET'])
def update(email):
cur = mysql.connection.cursor()
query1 = 'SELECT * FROM employees WHERE email = %s'
# Check if user send a post request
if request.method == 'POST':
con = mysql.connection
form = request.form
query2 = 'UPDATE employees SET name = %s, email = %s, job = %s, salary = %s WHERE email = %s'
# Check if update failed
try:
cur.execute(query1, (form['email'],))
# Check if email not used or same then update employee
if len(cur.fetchall()) == 0 or form['email'] == email:
cur.execute(query2, (form['name'], form['email'], form['job'], form['salary'], email))
con.commit()
else:
print('Email used')
return redirect('/update/' + email)
except:
print('Update failed')
return redirect('/')
# Retrieve selected employee from database and render page
else:
cur.execute(query1, (email,))
return render_template('update.html', employee = cur.fetchall()[0])
# Delete selected employee
@app.route('/delete/<string:email>')
def delete(email):
cur = mysql.connection.cursor()
con = mysql.connection
query = 'DELETE FROM employees WHERE email = %s'
# Check if delete failed
try:
cur.execute(query, (email,))
con.commit()
except:
print('Delete failed')
return redirect('/')
# Read uploaded text file, add all new employees in file into database
@app.route('/upload', methods = ['POST', 'GET'])
def upload():
# Check if user send a post request
if request.method == 'POST':
cur = mysql.connection.cursor()
con = mysql.connection
query1 = 'SELECT email FROM employees WHERE email = %s'
query2 = 'INSERT INTO employees (name, email, job, salary) VALUES (%s, %s, %s, %s)'
files = request.files['file']
# Check if file exist
if files.filename == '':
print('No file')
return redirect('/')
# Save file with secure filename into designated folder
filename = secure_filename(files.filename)
files.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# Open file for reading
with open(app.config['UPLOAD_FOLDER'] + filename) as file:
name = email = job = salary = count = 0
# For each word seperated by a comma in the file
for line in file:
for word in line.split(','):
# Store each employee's information
if count == 0:
name = word
elif count == 1:
email = word
elif count == 2:
job = word
elif count == 3:
salary = word
count += 1
count = 0
# Check if insert failed
try:
cur.execute(query1, (email,))
# Check if email not used then insert new employee
if len(cur.fetchall()) == 0:
cur.execute(query2, (name, email, job, salary))
con.commit()
else:
print('Email used')
except:
print('Insert failed')
return redirect('/') | true |
2bac96a9fc92c1f188cbe6000095aefbd6881ab7 | Python | cyberjam/TIL | /Prog_L1_자릿수더하기.py | UTF-8 | 1,241 | 4.1875 | 4 | [] | no_license | def solution(n):
return sum([int(i) for i in str(n)])
# 다른 사람풀이
# 재귀 대박..
def sum_digit(number):
if number < 10:
return number;
return (number % 10) + sum_digit(number // 10)
# 아래는 테스트로 출력해 보기 위한 코드입니다.
print("결과 : {}".format(sum_digit(123)));
# 1의 자리가 아니라면 return에 10을 나눈 나머지 1의 자리를, 10으로 나눈 몫은 다시 함수에 돌린다.
# 1의 자리만 남을때까지 돌리고
# 1의 자리일때는 1의 자리 return하면 마지막에 더해진다.
# 따라서 return 3 + 2 + 1
# 멋지구만
# 다른 사람 코드에 대한 내 풀이 : '%'와 '//'을 설명드리자면 321%10은 123을 10으로 나눈 나머지 3을 반환합니다.
# 123//10은 123을 10으로 나눈 몫 12를 반환합니다. 따라서 12를 다시 함수에 넣고 돌리겠죠. (return 123%10 + sum_disgit(123//10) -> return 3 + sum_digit(12))
# 2번째 돌릴땐 return에는 이전에 반환한 3 에다가 + 12%10 + sum_digit(12//10). 즉 2번째에는 return 3+2+sum_digit(1)이 됩니다.
# 3번째에는 if문에서 1이 10보다 작으므로 return 1을 해주고 최종적으로 return 3+2+1이 됩니다.
| true |
4586cb3b315e1dd71d0d770157da6751ac6ef43a | Python | RuslanBilyk/pytest-ethereum | /tests/api/contracts/test_factory.py | UTF-8 | 566 | 2.59375 | 3 | [
"MIT"
] | permissive | from hexbytes import HexBytes
def test_CreateFactoryFromInterface(t):
_interface = {'abi': [], 'bytecode': '0x', 'bytecode_runtime': '0x'}
# Factory class has the same interface that we gave it
# NOTE: Names are changed to work with Web3.py API
_factory = t.new_contract(_interface)
assert _factory.abi == _interface['abi']
assert _factory.bytecode == HexBytes(_interface['bytecode'])
assert _factory.bytecode_runtime == HexBytes(_interface['bytecode_runtime'])
# Factory instances are unique
assert _factory() is not _factory()
| true |
9ad90cf675d9b8c98cfd02ca6ec97d97cae1b85a | Python | bethanymbaker/arch | /data_structures/breadth_first_search.py | UTF-8 | 478 | 3.609375 | 4 | [] | no_license | from queue import Queue
graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': []
}
visited = []
que = Queue()
root_node = 'A'
que.put(root_node)
visited.append(root_node)
while not que.empty():
node = que.get()
print(f'node = {node}')
children = graph[node]
for child in children:
if child not in visited:
visited.append(child)
que.put(child)
print(f'visited = {visited}')
| true |
2b64c789cc83e4c6d9021b31f62ed510a5edf308 | Python | arhipovana2001/case4 | /local_english.py | UTF-8 | 831 | 2.984375 | 3 | [] | no_license | # Localization file (english).
TEXT_BLOB = 'Select in which language the text is entered and enter the number: '
LANGUAGE_1 = '1 - Russian'
LANGUAGE_2 ='2 - English'
TEXT = 'Enter the text: '
VOWELS = 'aeiou'
SENTENCES = 'Suggestions: '
WORD = 'Words: '
SYLLABLES = 'Syllables: '
AVERAGE_SENTENCES = 'Average sentence length in words: '
AVERAGE_WORD = 'Average syllable length: '
INDEX_FLESH = 'Flush readability index: '
TEXT_EASY = 'The text is very easy to read (for younger students).'
TEXT_SIMPLE = 'Simple text (for students).'
TEXT_LITTLE_HARD = 'The text is a little hard to read (for students).'
TEXT_DIFFICULT = 'The text is hard to read (for graduates).'
ENGLISH_SIMPLE = 'Simple English text.'
DIFFICULT = 'A little hard to read.'
VERY_HARD = 'Very difficult to read.'
OBJECTIVITY = 'Objectivity: '
| true |
16762e10bad595935f6e480c0bc3576dcf082808 | Python | ErikOrjehag/mpc | /linear_models.py | UTF-8 | 794 | 2.5625 | 3 | [] | no_license | import numpy as np
def as_float(*args):
return tuple([arg.astype(np.float_) for arg in args])
def spring_damper(m, k, c, dt):
A = np.array([
[1 , dt ],
[-k/m*dt, 1 -c/m ],
])
B = np.array([
[0 ],
[1/m*dt],
])
C = np.array([
[1, 0],
])
D = np.array([
[0],
])
return as_float(A, B, C, D)
def overhead_crane(m1, m2, dt):
A = np.array([
[1, dt, 0 , 0 ],
[0, 1, m2/(m1+m2)*dt, 0 ],
[0, 0, 1 , dt],
[0, 0, -dt , 1 ],
])
B = np.array([
[ 0 ],
[ dt],
[ 0 ],
[-dt],
])
C = np.array([
[1,0,0,0],
])
D = np.array([
[0],
])
return as_float(A, B, C, D)
| true |
5571b6b803661c4aa7d6eaf0aee91fc33dd92ba4 | Python | ducfilan/Data-Structures-Implementation | /Implementation/binary_tree.py | UTF-8 | 1,499 | 3.703125 | 4 | [] | no_license | class BinaryTree(object):
def __init__(self, root_obj):
self.key = root_obj
self.left_child = None
self.right_child = None
def insert_left(self, new_node):
t = BinaryTree(new_node)
if self.left_child:
t.left_child = self.left_child
self.left_child = t
else:
self.left_child = t
def insert_right(self, new_node):
t = BinaryTree(new_node)
if self.right_child:
t.right_child = self.right_child
self.right_child = t
else:
self.right_child = t
def get_left_child(self):
return self.left_child
def get_right_child(self):
return self.right_child
def get_root_value(self):
return self.key
def set_root_value(self, new_value):
self.key = new_value
def pre_order_traverse(self, tree):
if tree:
print(tree.get_root_value())
self.in_order_traverse(tree.get_left_child())
self.in_order_traverse(tree.get_right_child())
def in_order_traverse(self, tree):
if tree:
self.in_order_traverse(tree.get_left_child())
print(tree.get_root_value())
self.in_order_traverse(tree.get_right_child())
def post_order_traverse(self, tree):
if tree:
self.in_order_traverse(tree.get_left_child())
self.in_order_traverse(tree.get_right_child())
print(tree.get_root_value())
| true |
d62925df143205354773247acefe9bab64d62e15 | Python | Grande-zhu/CSCI1100 | /LAB/LAB11/check3.py | UTF-8 | 4,083 | 3.671875 | 4 | [] | no_license | from tkinter import *
from Ball1 import *
import random
import copy
class BallDraw(object):
def __init__ (self, parent,maxx=400,maxy=400,wait_time=100,balls=[]):
##=====DATA RELEVANT TO BALL===============
## We are going to repeatedly draw a ball object on the canvas,
## "moving" it across the canvas. The ball object is specified
## by (a) its x and y center coordinates (a tuple), (b) its radius,
## (c) the delta x and delta y to move the ball in each time
## increment, and (d) its color.
self.balls=balls
self.balls_ori=copy.deepcopy(balls)
self.wait_time=wait_time
self.isstopped = False
self.maxx=maxx
self.maxy=maxy
#=============CREATE THE NEEDED GUI ELEMENTS===========
self.parent = parent
self.frame = Frame(parent)
self.frame.pack()
self.top_frame = Frame(self.frame)
self.top_frame.pack(side=TOP)
self.canvas = Canvas(self.top_frame, background="white", \
width=self.maxx, height=self.maxy )
self.canvas.pack()
self.bottom_frame = Frame(self.frame)
self.bottom_frame.pack(side=BOTTOM)
self.restart = Button(self.bottom_frame, text="Restart", command=self.restart)
self.restart.pack(side=LEFT)
self.slow = Button(self.bottom_frame, text="Slower", command=self.slower)
self.slow.pack(side=LEFT)
self.fast = Button(self.bottom_frame, text="Faster", command=self.faster)
self.fast.pack(side=LEFT)
self.quit = Button(self.bottom_frame, text="Quit", command=self.quit)
self.quit.pack(side=RIGHT)
def faster(self):
if self.wait_time > 2:
self.wait_time //= 2
def slower(self):
self.wait_time *= 2
def restart(self):
self.balls=copy.deepcopy(self.balls_ori)
def quit(self):
self.isstopped = True
self.parent.destroy()
def draw_ball(self):
# Remove all the previously-drawn balls
self.canvas.delete("all")
# Draw an oval on the canvas within the bounding box
for ball in self.balls:
self.canvas.create_oval(ball.bounding_box(), fill=ball.ball_color)
self.canvas.update() # Actually refresh the drawing on the canvas.
# Pause execution. This allows the eye to catch up
self.canvas.after(self.wait_time)
def animate(self):
## Loop until the ball runs off the screen.
while(not self.isstopped):
# Move the ball
self.draw_ball()
for ball in self.balls:
ball.check_and_reverse(self.maxx,self.maxy)
ball.move()
def creat_ball():
x,y = random.randint(10,390),random.randint(10,390) # initial location
radius = random.randint(5,10)
dx,dy = random.randint(-8,8),random.randint(-8,8) # the movement of the ball object
colorList = ["blue", "red", "green", "yellow", "magenta", "orange"]
color = random.choice(colorList)
ball=Ball(x,y,dx,dy,radius,color)
return ball
if __name__ == "__main__":
## We will create a root object, which will contain all
## our user interface elements
##
balls=[]
for i in range(9):
ball=BallDraw.creat_ball()
balls.append(ball)
root = Tk()
root.title("Tkinter: Lab 11")
## Create a class to handle all our animation
bd = BallDraw(root,balls=balls)
## Run the animation by continuously drawing the ball and then moving it
bd.animate()
## This is an infinite loop that allows the window to listen to
## "events", which are user inputs. The only user event here is
## closing the window, which ends the program.
root.mainloop()
| true |
98ddf83eaa9726c0b05939cc5315f21e58ec24fc | Python | kbalasub78/Python-CS1 | /py4e-chapter06/exercise02.py | UTF-8 | 270 | 4.5625 | 5 | [] | no_license | ## Exercise 2: Given that fruit is a string, what does fruit[:] mean?
fruit = 'pineapple'
## print fruit in usual way
print(fruit)
## Below is same as printing fruit string
## indicating to start from character at index 0 and go till last character
print( fruit[:] )
| true |
d022225170a48b7eca3ea917472d51c5a856414f | Python | jlyons6100/Wallbreakers | /Week_2/most_common_word.py | UTF-8 | 503 | 3.34375 | 3 | [] | no_license | # Most Common Word: Most common words from a paragraph
import re
from collections import defaultdict, Counter
class Solution:
def mostCommonWord(self, paragraph: str, banned: List[str]) -> str:
freqs = Counter()
b_set = set(banned)
p_list = re.split('[ ,|\.!?;\']',paragraph)
for word in p_list:
l = word.lower()
if word != "" and l not in b_set:
freqs[l] += 1
return freqs.most_common(1)[0][0]
| true |
2e2532315d6c21bde2aa4b0ac37d8746e6a0a488 | Python | ljshou/workspace | /python/learn-python/list-files.py | UTF-8 | 518 | 3.09375 | 3 | [] | no_license | #!/usr/bin/python
#coding=utf8
import os
def list_files_recursive(path):
for file_name in os.listdir(path):
file_path = os.path.join(path, file_name)
if os.path.isdir(file_path):
list_files_recursive(file_path)
else:
print file_path
def list_files(path):
for parent, path_list, file_list in os.walk(path):
for file_name in file_list:
print os.path.join(parent, file_name)
if __name__ == '__main__':
list_files_recursive(os.getcwd())
| true |
0a02172ec863264416b75a00144f0cf00fe928f0 | Python | mkenworthy/asas-sn-J060000 | /snake.py | UTF-8 | 5,044 | 3.34375 | 3 | [
"BSD-2-Clause"
] | permissive | import numpy as np
def snake(x, x_infl, y_infl):
'''for points along x, with ends of straight lines marked by
(x_infl, y_infl), return (xt, y) points
where xt are points in x that are inside the ends of the straight lines
x_infl must be strictly ordered in increasing values
'''
# reject points lower than min(x_infl) and greater than max(x_infl)
xt = x[np.where( (x>=np.min(x_infl)) * (x<=np.max(x_infl)))]
# calculate the constants for y = mx + c
m = (y_infl[1:] - y_infl[:-1]) / (x_infl[1:] - x_infl[:-1])
# now we have m...
# c = y - mx
c = y_infl[:-1] - m*x_infl[:-1]
# now for each point in xt we want to know what interval it lands in.
# lazy way of doing this is
# BROADCAST to see where each input x coordinate is less than a given inflexion point, resulting in a 2D array
n_positive = xt - x_infl[:,np.newaxis]
# you can then count how many times a given x point is less than the list of inflexion points!
count_positive = np.sum((n_positive>0),axis=0)
#... so then the index for m and c is then this count minus one.
ind = count_positive-1
# calculate
y = m[ind]*xt + c[ind]
return (xt, y)
def log_likelihood(theta, x, y, yerr, t_infl):
(xt, model) = snake(x, t_infl, theta)
sigma2 = yerr ** 2
return -0.5 * np.sum((y - model)**2 / sigma2 + np.log(sigma2))
def snakemcee(x, y, y_err, xmodel, ymodel, nwalk=32, niters=5000, progress=True):
import emcee
def log_prior(theta):
if np.all(theta>12) and np.all(theta<17):
return 0.0
# return 0.0
return -np.inf
def log_probability(theta, x, y, yerr, t_infl):
lp = log_prior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + log_likelihood(theta, x, y, yerr, t_infl)
# default is 32 walkers and 1e-2 error on their starting position
pos = ymodel + 1e-1 * np.random.randn(nwalk, ymodel.size)
nwalkers, ndim = pos.shape
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability, args=(x, y, y_err, xmodel))
sampler.run_mcmc(pos, niters, progress=progress);
return sampler
if __name__ == '__main__':
import matplotlib.pyplot as plt
# make a test data set
time = np.linspace(58000, 58099.5, 401)
t_inflexion = np.linspace(58000,58100, 20)
t_inflexion = t_inflexion + np.random.uniform(0,5,size=20)
f_min = 14
f_max = 15
m_inflexion = np.random.uniform(f_min, f_max, size=np.shape(t_inflexion))
fig, ax = plt.subplots(1, 1, figsize=(12, 4))
ax.set_xlim(np.min(t_inflexion),np.max(t_inflexion))
ax.vlines(t_inflexion,f_min,f_max,linestyle='dashed')
ax.scatter(t_inflexion, m_inflexion,color='orange')
(x,y) = snake(time, t_inflexion, m_inflexion)
ax.plot(x,y)
plt.draw()
plt.show()
# testing an emcee fitting to the lines
# make a test data set
N = 501
n_inflex = 10
t = np.linspace(58000, 58099.5, N)
np.random.seed(77)
t_inflexion = np.linspace(58000,58100, n_inflex)
t_inflexion += np.random.uniform(0,5,size=t_inflexion.size)
f_min = 14
f_max = 15
f_inflexion = np.random.uniform(f_min, f_max, size=np.shape(t_inflexion))
fig1, ax1 = plt.subplots(1, 1, figsize=(12, 4))
ax1.set_xlim(np.min(t_inflexion),np.max(t_inflexion))
ax1.vlines(t_inflexion, f_min, f_max, linestyle='dashed')
ax1.scatter(t_inflexion, f_inflexion,color='orange')
(x_true,y_true) = snake(t, t_inflexion, f_inflexion)
err = 0.1 * np.random.randn(y_true.size)
y_true_noisy = y_true + np.random.randn(y_true.size)*err
ax1.plot(x_true, y_true, 'k', alpha=0.3, lw=5)
ax1.errorbar(x_true, y_true_noisy, yerr=err, fmt='.k', capsize=0)
# try a traditional optimizer
nll = lambda *args: -log_likelihood(*args)
from scipy.optimize import minimize
initial = f_inflexion + 0.1 * np.random.randn(f_inflexion.size)
soln = minimize(nll, initial, args=(x_true, y_true_noisy, err, t_inflexion))
p = soln.x
print('initial is ',initial)
print('result is ',p)
print(soln)
(x_fit,y_fit) = snake(t, t_inflexion, p)
ax1.plot(x_fit,y_fit, 'r-')
sampler = snakemcee(x_true, y_true_noisy, err, t_inflexion, p)
fig2, axes = plt.subplots(n_inflex, figsize=(10, 7), sharex=True)
samples = sampler.get_chain()
for i in range(n_inflex):
ax = axes[i]
ax.plot(samples[:, :, i], "k", alpha=0.3)
ax.set_xlim(00, len(samples))
axes[-1].set_xlabel("step number");
# get randomized samples to plot up
flat_samples = sampler.get_chain(discard=1000, thin=15, flat=True)
import corner
fig = corner.corner(
flat_samples, truths=f_inflexion
);
inds = np.random.randint(len(flat_samples), size=100)
for ind in inds:
sample = flat_samples[ind]
(x_fit,y_fit) = snake(t, t_inflexion, sample)
ax1.plot(x_fit,y_fit, "C1", alpha=0.1)
plt.draw()
plt.show()
| true |
a1a7157f25f72da53657a16f27b45ce556ecf716 | Python | LeonardoRiojaMachineVentures/F | /adjust_sa.py | UTF-8 | 4,729 | 2.859375 | 3 | [] | no_license | d = 10.61
w_ave = 71.9
w = 64
print("you are " + str((w - w_ave)/d) + " away.")
print(0.9545, " lies inside [", w_ave - 2*d, w_ave + 2*d, "]")
print("healthy people in [", w_ave - 1.9*d, w_ave + 1.6*d, "]")
print("healthy people us in [", 88.9 - 3.5*d, 88.9, "]")
f = open("dri.txt").read()
header = f.split('\n')[0]
content = f.split('\n')[1:]
def stats(x):
import math
assert(type(x) == list)
t = 0.0
N = 0
for (value, multiplicity) in x:
t += value*multiplicity
N += multiplicity
average = t/N
sd = 0.0
for (value, multiplicity) in x:
sd += multiplicity*(average - value)**2
sd = math.sqrt(sd/N)
median = 0.0
k = 0
last_value = 0.0
for (value, multiplicity) in x:
k += multiplicity
if k >= N/2:
median = (last_value + value)/2
k -= multiplicity
break
last_value = value
return((sd, average, N, median, k))
def range_float(min, max, step):
N = int((max - min)//step + 1)
assert(N > 0)
return([min + step*i for i in range(N + 1)])
def normal_distribution(average, sd, negative_sigma, positive_sigma, step, N):
import math
assert(type(negative_sigma) == float)
assert(type(positive_sigma) == float)
assert(negative_sigma >= 0.0)
assert(positive_sigma >= 0.0)
assert(type(average) == float)
assert(type(step) == float)
assert(type(sd) == float)
assert(sd >= 0.0)
values = range_float(average - sd*negative_sigma, average + sd*positive_sigma, step)
ans = [0.0]*len(values)
for i in range(len(ans)):
ans[i] = math.exp(-(values[i] - average)**2/(2*sd**2))
assert(type(N) == int)
assert(N > 0)
ans = [int(i*N) for i in ans]
return(sum(values), list(zip(values, ans)))
_, r = normal_distribution(average = 71.9, sd = 10.61, negative_sigma = 1.9, positive_sigma = 1.6, step = 0.01, N = 400)
#_, s = normal_distribution(average = 71.9, sd = 10.61, negative_sigma = 2.0, positive_sigma = 2.0, step = 0.1, N = 4)
sd, average, total, median, k = stats(r)
print(sd, average, total, median, k)
coefficient = w/(median + 2*sd)
print(coefficient)
_, r_us = normal_distribution(average = 88.9, sd = 10.61, negative_sigma = 3.5, positive_sigma = 0.0, step = 0.01, N = 400)
sd, average, total, median, k = stats(r_us)
print(sd, average, total, median, k)
coefficient_us = w/(median + 2*sd)
print(coefficient_us)
rda_19_to_30 = {
"calcium" : 1_000_000,
"chromium" : 35,
"copper" : 900,
"fluoride" : 0,
"iodine" : 150,
"iron" : 8_000,
"magnesium" : 400_000,
"manganese" : 2_300,
"molybdenum" : 45,
"phosphorus" : 700_000,
"selenium" : 55,
"zinc" : 11_000,
"potassium" : 3_400_000,
"sodium" : 1_500_000,
"chloride" : 2_300_000,
"vitamin a" : 900,
"vitamin c" : 90_000,
"vitamin d" : 0,
"vitamin E" : 15_000,
"vitamin K" : 120,
"thiamin" : 1_200,
"riboflavin" : 1_300,
"niacin" : 16_000,
"vitamin b6" : 1_300,
"folate" : 400,
"vitamin b12" : 2.4,
"pantothenic acid" : 5_000,
"biotin" : 30,
"choline" : 550_000,
}
adjusted_rda = {}
for i in rda_19_to_30:
adjusted_rda[i] = int(rda_19_to_30.get(i)*coefficient)
print(adjusted_rda)
adjusted_rda_us = {}
for i in rda_19_to_30:
adjusted_rda_us[i] = int(rda_19_to_30.get(i)*coefficient_us)
print(adjusted_rda_us)
'''
for line in content:
line = line.strip()
if line == "":
continue
nutrient, e, male, female, ul, unit = line.split(',')
male = float(male)
if e.strip() == "NE":
adjusted = male
else:
e = float(e)
#print("EAR = ", str(e), ", RDA male (95.45%) = ", str(male))
adjusted = w*(male - e)/(2*d)
a2 = w*male/(w_ave + 2*d)
alpha = max(e/w_ave, male/(w_ave + 2*d))
#print("alpha = " + str(alpha))
print(nutrient + " for you = " + str(round(w*alpha, 2)) + unit + ", EAR = " + str(e) + ", RDA male (95.45%) = " + str(male))
#print(adjusted)
#print("for weight = " + str(a2))
from enum import Enum
class Unit(Enum):
UG = 1
MG = 2
G = 3
def mul(self):
if self == Unit.UG:
return(1)
elif self == Unit.MG:
return(1_000)
elif self == Unit.G:
return(1_000_000)
else:
exit("unreachable")
def convert_to_ug(x):
assert(type(x) == dict)
for i in x:
value, unit = x.get(i)[0], x.get(i)[1]
x[i] = value*unit.mul()
return(x)
for_me = {
"vitamin A" : (521.91, Unit.UG),
"vitamin B1" : (0.75, Unit.MG),
"vitamin B2" : (0.83, Unit.MG),
"niacin" : (9.28, Unit.MG),
"vitamin B6" : (1.05, Unit.MG),
"folate DFE" : (247.84, Unit.MG),
"cobalamin" : (1.5, Unit.UG),
"vitamin C" : (115.98, Unit.MG),
"vitamin D" : (0, Unit.MG),
"alpha-tocopherol" : (9.01, Unit.MG),
"calcium" : (826.15, Unit.MG),
"copper" : (525.73, Unit.UG),
"iodine" : (86.98, Unit.MG),
"iron" : (6.38, Unit.MG),
"magnesium" : (262.87, Unit.MG),
"molybdenum" : (26.1, Unit.UG),
"phosphorus" : (792.35, Unit.MG),
"selenium" : (33.8, Unit.UG),
"zinc" : (7.06, Unit.MG)
}
c = convert_to_ug(for_me)
print(c)
''' | true |
d5a3495ea8d5a3015040eafb47f86d4133b10f10 | Python | HenriqueVarellaEhrenfried/anotacoes-ufpr-1 | /redes2/trabalhos/2010-2/calj08-fpk07/codigofonte/mainClient.py | UTF-8 | 3,458 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#importando classes
from socket import *
from time import time
from time import sleep
import sys
import os
#biblioteca local
sys.path.append( 'library' )
from Pacote import *
from functions import *
from Log import *
from msg import *
from geraHtml import *
#variaveis necessarias para o socket TCP
BUFSIZ = 4096
NAVEGADOR ='firefox'
#classe do cliente
class ClientMonitor:
def __init__(self,host,port):
self.__HOST = host
self.__PORT = port
#cria o socket TCP
self.__ADDR = (self.__HOST,self.__PORT)
self.__sock = None
def makeConnection(self):
self.__sock = socket( AF_INET,SOCK_STREAM)
self.__sock.connect(self.__ADDR)
#enviando mensagem ao servidor
def sendData(self, data):
self.__sock.send(data)
def recData(self):
data = self.__sock.recv(BUFSIZ)
if not data:
print 'Nenhum dado recebido!'
return False
return data
#instancia o cliente
if __name__ == '__main__':
# Função que exibe na tela a forma correta de uso do programa
def usage():
print "Erro!"
print "Forma de uso: " + sys.argv[0] + ' <ip ou nome servidor>' + " <porta>"
if len( sys.argv ) != 3 :
usage()
sys.exit( 1 )
#faz a leitura da entrada padrao
servidor=sys.argv[1]
porta=int(sys.argv[2])
listaHosts=sys.stdin.readlines()
#prepara arquivo de log
client_log=log("client.log")
client_log.openFile()
#executa a conexao com o servidor
client = ClientMonitor(servidor,porta)
try:
client.makeConnection()
except:
client_log.setMsg(MSG_CONFAIL.format(servidor))
client_log.writeLog()
sys.exit(1)
client_log.setMsg(MSG_CONNECT.format(servidor))
client_log.writeLog()
#prepara envio dos hosts a serem monitorados
packet = Pacote(0,None,listaHosts)
packet_to_send = serialize(packet)
client_log.setMsg(MSG_SENDHOST.format(listaHosts))
client_log.writeLog()
#faz o envio
client.sendData(packet_to_send)
pack = None
host_id = 0
#prepara a interface grafica
interfaceGraphic = interface()
#recebe lista de objetos de host do servidor
rec_obj_hosts=client.recData()
obj_hosts=unserialize(rec_obj_hosts)
#liberar um pouco de memória
rec_obj_hosts=None
interfaceGraphic.generate(obj_hosts)
temp = os.popen(NAVEGADOR +' index.html &')
#recebe os dados do servidor (servidor somente envia informacoes de
#alteracao de status)
try:
while(True):
rec_data=client.recData()
if (rec_data != None) or (rec_data != "False"):
pack = unserialize(rec_data)
host_id = pack.getHostID()
obj_hosts[host_id].setStatus(pack.getData())
#gera log de mudanca de status
if pack.getData() == ATIVO:
client_log.setMsg(MSG_CHANGE_STATUS.format(listaHosts[host_id].strip(),'ATIVO'))
elif pack.getData() == DESCONHECIDO:
client_log.setMsg(MSG_CHANGE_STATUS.format(listaHosts[host_id].strip(),'DESCONHECIDO'))
else :
client_log.setMsg(MSG_CHANGE_STATUS.format(listaHosts[host_id].strip(),'FALHO'))
client_log.writeLog()
print 'Recebido status do host: ' , listaHosts[host_id].strip() , ' que é ' , pack.getData()
interfaceGraphic.generate(obj_hosts)
except KeyboardInterrupt:
client_log.setMsg(MSG_CLOSE.format(servidor))
client_log.writeLog()
| true |
91e54b34ae34d61f5356b26e1158a16f090b28d8 | Python | intuinno/vistalk | /wordconfuse/views.py | UTF-8 | 5,280 | 2.65625 | 3 | [
"MIT"
] | permissive | # Create your views here.
# Create your views here.
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.utils import simplejson
from defs import WORDS
from time import time
from wordconfuse.models import GameScores,Words
from wordconfuse.forms import NewHS,GameOver
import random
def get_words(request):
# select 10 random entries from the database and put them
# into a list
r_set = Words.objects.order_by('?')[:10]
# As we build up quiz data we will maintain
# a list of words to exclude so that words aren't
# repeated during the quiz.
exclude_list = []
data = list()
for entry in r_set:
# the word for the quiz is added
# to the exclude list so it won't be used
# for any of the answers or any of the
# subsequent words
exclude_list.append(entry.word)
# for every question this keeps track
# of what words are used as answers so
# they are not repeated
entry_exclude = []
# answer_list is a list of defintions
# that will contain the choices for
# the word.
answer_list = []
# 3 answers are picked randomly from the
# full word set in the database. with the
# following constraints:
# a) the defintion cannot correspond to
# a word that was already used as
# an answer (entry_exclude)
# b) the definition cannot correspond to
# one of the words that was used for
# the quiz (exclude_list)
# c) the definition must correspond to
# the same part-of-speech of the word
# being quizzed.
for x in range(0,3):
answer = Words.objects.exclude(word__in=exclude_list+entry_exclude).filter(speech=entry.speech).order_by('?')[0]
entry_exclude.append(answer.word)
answer_list.append(answer.definition)
# now that we have three wrong defitions to be used
# as answers for the word being quizzed the real
# answer needs to be inserted randomly into the list
answer_list.insert(random.randrange(len(answer_list)+1),entry.definition)
# this dictionary represents the JSON data that will
# be returned to the browser
# q - the word being quizzed
# a - the list of 4 defintions to pick from
# s - the index into 'a' that corresponds to the
# correct definition
d = {
'q': entry.word,
'a': answer_list,
's': answer_list.index(entry.definition)
}
data.append(d)
# record the start time in a session variable, this will
# be used later in the gameover view to calculate how much
# time it took to complete the game
request.session['start_game'] = float("%0.2f" % time())
# return the words, answers, and solutions in a JSON response
return HttpResponse(simplejson.dumps(data), mimetype='application/json')
@csrf_exempt
def gameover(request):
if request.method == 'POST':
form = GameOver(request.POST)
if not form.is_valid():
return HttpResponse('Validation Error')
count = int(form.cleaned_data['count'])
now = float("%0.2f" % time())
if not request.session['start_game']:
return HttpResponse('Error')
game_time = float("%0.2f" % (now - request.session['start_game']) )
g = GameScores(
ip=request.META['REMOTE_ADDR'],
time_start=request.session['start_game'],
time_end=now,
time_delta=game_time,
count=count)
g.save();
request.session['last_id'] = g.id
form = NewHS()
hs = GameScores.objects.filter(username__isnull=False).order_by('-count', 'time_delta')[0:6]
new_hs = False
if len(hs) < 6 or count > hs[5].count:
new_hs = True
elif count == hs[5].count and game_time < hs[5].time_delta:
new_hs = True
go = {
'new_hs': new_hs,
'delta': game_time,
'count': count,
}
return render_to_response('wordconfuse/gameover.html',
{
'form':form,
'hs':hs,
'go':go,
})
return HttpResponse('Derp, you need to post something')
@csrf_exempt
def new_hs(request):
if request.method == 'POST':
form = NewHS(request.POST)
if not form.is_valid():
HttpResponse('Invalid Username')
g = GameScores.objects.get(id=request.session['last_id'])
g.username = form.cleaned_data['username']
g.save()
hs = GameScores.objects.filter(username__isnull=False).order_by('-count', 'time_delta')[0:6]
return render_to_response('wordconfuse/hs.html', { 'hs':hs, })
return HttpResponse('Derp, you need to post the username')
def hs(request):
hs = GameScores.objects.filter(username__isnull=False).order_by('-count', 'time_delta')[0:6]
return render_to_response('wordconfuse/hs.html', { 'hs': hs, })
| true |
282e08d22be0ae6f61edcd647f47540524153a0f | Python | suraj19/Hackerrank-Codes | /program21.py | UTF-8 | 582 | 3.484375 | 3 | [] | no_license | '''Input Format
The first line contains an integer, n, the number of students who have subscribed to the English newspaper.
The second line contains n space separated roll numbers of those students.
The third line contains b, the number of students who have subscribed to the French newspaper.
The fourth line contains b space separated roll numbers of those students.
Output Format
Output the total number of students who have at least one subscription.'''
print(len((set(input().split()) if input() != '-1' else '')|(set(input().split()) if input() != '-1' else '')))
| true |
27bcdacb119c1f437f29dee4a0ecacaaf0b45b76 | Python | shubham2751/python-basics-exercises | /ch08-conditional-logic/1-compare-values.py | UTF-8 | 756 | 5 | 5 | [] | no_license | # 8.1 - Compare Values
# Solutions to review exercises
# Exercise 1
# Test whether these expressions are True or False
print(1 <= 1)
print(1 != 1)
print(1 != 2)
print("good" != "bad")
print("good" != "Good")
print(123 == "123")
print(11 > 11) # note this one
print(11 >= 11)
print(11 < 11)
print(11 <= 11)
print(11.0 == 11) # note this one
print(11.0 == 11.1)
print(65 == chr("A") )
print(ord("A") == 65)
print( ord("A") == chr(65) )
# Exercise 2
# Fill in the blank so that each of the following expressions are True
# 3 __ 4
# Any of the following:
3 < 4
3 <= 4
3 != 4
# 10 __ 5
# Any of the following:
10 > 5
10 >= 5
10 != 5
# "jack" __ "jill"
# Any of the following:
"jack" < "jill"
"jack" <= "jill"
"jack" != "jill"
# 42 __ "42"
42 != "42"
| true |
ed695f398e7ba9a4ce752dcc55c8f288017701f1 | Python | Chiil/smallprograms | /diff_anja/diff_anja_real_data.py | UTF-8 | 2,054 | 2.546875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import netCDF4 as nc
# DIFF
nx, ny, nz = 2304, 576, 144
xsize = 18.84955592153876
ysize = 4.71238898038469
zsize = 1.
nc_file = nc.Dataset('moser600.default.0036000.nc', 'r')
z = nc_file.variables['z' ][:]
zh = nc_file.variables['zh'][:]
nc_file.close()
dx, dy = xsize/nx, ysize/ny
x = np.arange(dx/2, xsize, dx)
y = np.arange(dy/2, ysize, dy)
dz = zh[1:] - zh[:-1]
dzh = z[1:] - z[:-1]
dzh = np.append(2*z[0], dzh)
dzh = np.append(dzh, 2*(zsize-z[-1]))
slice_0 = np.fromfile('avg_ch4.01')
slice_0.shape = (nz, ny)
slice_1 = np.fromfile('avg_ch4.02')
slice_1.shape = (nz, ny)
c0 = slice_0.copy()
c0 = slice_0.copy()
u = 0.11*np.ones(z.shape)
kappa = 0.4
ustar = 0.005
Ky = kappa*z*ustar
Kz = kappa*zh*ustar
# EXPLICIT SOLVER
c = np.empty((nz+2, ny+2))
c[1:-1,1:-1] = c0[:,:]
# Distance to cover.
dx_tot = 1.578977465629577637
# Check for maximum permissible K.
dx_max_y = 0.5 * dy**2 * u / Ky
dx_max_z = 0.5 * dz**2 * u / Kz[1:]
dx_max = min( dx_max_y.min(), dx_max_z.min() )
# Keep a factor 2 safety margin.
n_tot = int( 1.5*dx_tot / dx_max )
dx_step = dx_tot / n_tot
print("Solving in {:} steps.".format(n_tot))
x_step = 0.
for n in range(n_tot):
# Ghost cells in y
c[:, 0] = c[:,-2]
c[:,-1] = c[:, 1]
# Ghost cells in z
c[ 0,:] = c[ 1,:]
c[-1,:] = c[-2,:]
c[1:-1,1:-1] = u[:,None]/dx_step * c[1:-1,1:-1] \
+ Ky[:,None] * (c[1:-1,:-2] - 2*c[1:-1,1:-1] + c[1:-1,2:]) / dy**2 \
+ ( ( Kz[1:,None] * (c[2:,1:-1] - c[1:-1,1:-1]) / dzh[1:,None] )
- ( Kz[:-1,None] * (c[1:-1,1:-1] - c[:-2,1:-1]) / dzh[:-1,None] ) ) / dz[:,None]
c[1:-1,1:-1] /= (u[:,None])/dx_step
x_step += dx_step
print(x_step)
c1 = c[1:-1,1:-1]
plt.figure()
plt.subplot(131)
plt.pcolormesh(y, z, c0)
plt.colorbar()
plt.title('start')
plt.subplot(132)
plt.pcolormesh(y, z, c1)
plt.colorbar()
plt.title('end')
plt.subplot(133)
plt.pcolormesh(y, z, slice_1)
plt.colorbar()
plt.title('ref')
plt.tight_layout()
plt.show()
| true |
cf098ed8585635c663f74a2326a4d00193944716 | Python | KIQ83/blackjack-client | /utils.py | UTF-8 | 2,136 | 3.4375 | 3 | [] | no_license | def getCardSumValue(cardIndex):
# figure Cards are worth 10
if cardIndex in range(10, 13):
return 10
else:
return cardIndex + 1
def containsAce(cardsIndexes):
return 0 in cardsIndexes
def sumCards(cards):
cardValues = [getCardSumValue(card['number']) for card in cards]
simpleSum = sum(cardValues)
hasAce = containsAce([card['number'] for card in cards])
# will use ace as 11 only if has ace and if sum does not exceed 21
usableAce = hasAce and (simpleSum + 10) <= 21
if usableAce:
return simpleSum + 10
else:
return simpleSum
def cardDisplay(card):
return str(getCardSymbol(card['number'])) + card['suit']
def getCardSymbol(cardIndex):
if cardIndex == 0:
return 'A'
elif cardIndex == 10:
return 'J'
elif cardIndex == 11:
return 'Q'
elif cardIndex == 12:
return 'K'
else:
return str(cardIndex + 1)
def findPlayer(playersList, playerName):
for player in playersList:
if player['name'] == playerName:
return player
def dealerCards(dealer):
dealerCards = [dealer['shown']]
if ('hidden' in dealer):
dealerCards.append(dealer['hidden'])
return dealerCards
def printMyCards(cards):
print("These are my cards:")
printCards(cards)
def printDealerShownCard(dealer):
print('Dealer has: ' + cardDisplay(dealer['shown']))
def printFullDealer(dealer):
dealerCards = [dealer['shown'], dealer['hidden']]
print('Dealer cards: ')
printCards(dealerCards)
def printCards(cards):
cardSum = sumCards(cards)
cardsDisplay = ''
for card in cards:
cardsDisplay += cardDisplay(card) + ","
print(cardsDisplay + " that gives a total of " + str(cardSum))
def saveWinRate(modelName, dealerSum, playerSum, dealer, result):
# keeping track of win rates
f = open('./models/'+modelName+'/win_rates_'+modelName+'.csv','a')
input = [dealer['id'], dealerSum, playerSum, result]
print(str(input))
f.write(str(dealer['id']) + "," + str(dealerSum) + ',' + str(playerSum) + "," + result + '\n')
f.close() | true |
821a1c4d6ec1c9804f65cd69328d846b92e0bd43 | Python | samir2901/Leetcode-Solution | /same-tree.py | UTF-8 | 755 | 4 | 4 | [] | no_license | '''
Given the roots of two binary trees p and q, write a function to check if they are the same or not.
Two binary trees are considered the same if they are structurally identical, and the nodes have the same value.
Example 1:
Input: p = [1,2,3], q = [1,2,3]
Output: true
'''
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def isSameTree(self, p, q) -> bool:
if p == None and q == None:
return True
if p != None and q != None:
return p.val == q.val and self.isSameTree(p.left,q.left) and self.isSameTree(p.right,q.right)
return False
| true |
3822fd471a0485245c71c7d645ed32530673309f | Python | uiucanh/dota2-ml | /CollectData/collect_data.py | UTF-8 | 6,006 | 2.625 | 3 | [] | no_license | import logging
import dota2api
import requests
from pymongo import MongoClient, errors
from dota2api import exceptions
from src.constants import *
from datetime import datetime
def initialise_keys():
# Initialise api keys
if D2API_KEY is None:
raise NameError("Dota 2 Api key needs to be set as an environment variable")
if OD_KEY is None:
raise NameError("Opendota Api key needs to be set as an environment variable")
api = dota2api.Initialise(D2API_KEY, raw_mode = True) #raw mode to get hero id instead of names
return api
def setup_db_log(log_name):
# Initialise logger
logging.basicConfig(filename=log_name, level=logging.INFO, format='%(asctime)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.ERROR) #Add this to stop requests from logging
logger = logging.getLogger(__name__)
#Initialise database
client = MongoClient(SERVER_ADDRESS)
db_collection = client.dota2db.matches
#Test connection
try:
client.server_info()
except errors.ServerSelectionTimeoutError:
error_text = "Could not connect to database"
logger.error(error_text)
exit()
return db_collection, logger
'''
- Get randomly public matches from Opendota
- Validate these matches
'''
def get_rand_public_games(use_key = False, initial_match_id = None):
url = 'https://api.opendota.com/api/publicMatches'
if initial_match_id != None:
url += '?less_than_match_id=' + str(initial_match_id)
if use_key == True:
if initial_match_id != None:
url += '&' + OD_KEY
else:
url += '?' + OD_KEY
try:
result = requests.get(url).json()
except requests.exceptions.RequestException as error:
error_text = "Could not call the Opendota API. Error Code: %s" % error
logger.error(error_text)
exit()
else:
return result
#Return True if a player abandoned
def check_for_abandons(match):
try:
result = api.get_match_details(match_id = match)
except exceptions.APIError as error:
logger.error("Could not call the Steam API. Error Code: %s" % error)
exit()
else:
for player in result['players']:
if player['leaver_status'] not in NON_ABANDON_VALUES:
return True
return False
'''
Validate matches, ensuring that they match the following criterias:
- Must be ranked All Pick
- Must be at least 15 mins long
- Must be at least Acient (avg medal = 60)
- No one abandoned
'''
def validate_match(match_list, len_current_list = 0):
check_list = []
passed_list = []
#Check if the game mode, rank tier, lobby type and duration are correct
for match in match_list:
rank_tier = match['avg_rank_tier']
match_duration = match['duration'] / 60 #In minutes
lobby_type = match['lobby_type']
game_mode = match['game_mode']
#Check if all rules met
rules = [rank_tier >= MIN_RANK_TIER,
match_duration >= MIN_MATCH_DURATION,
lobby_type == RANKED_LOBBY_MODE, #Ranked
game_mode in GAME_MODE] #All pick and Captain Mode
if all(rules):
check_list.append(match)
if len(check_list) + len_current_list == API_CALL_LIMIT:
break
#Check to not include matches that have abandoned players
for match in check_list:
match_id = match['match_id']
if not check_for_abandons(match_id):
passed_list.append(match)
return passed_list
def add_json_to_db(match_list):
#Clean up jsons
success = 0 #Number of success cases
keys_to_delete = ['match_seq_num', 'num_mmr', 'lobby_type',
'num_rank_tier', 'cluster']
for match in match_list:
for key in keys_to_delete:
match.pop(key)
#Add to db
for match in match_list:
try:
db_collection.insert_one(match)
except errors.WriteError:
continue
else:
success += 1
# If cant find any more matches
if success == 0:
logger.info("No more matches to collect")
exit()
logger.info("Number of success cases: %s" % success)
# An aggregate pipeline to check if any match is duplicated in the database
def check_for_duplicates():
pipeline = [
{"$group": {"_id": {"match_id": "$match_id"}, "uniqueIds": {"$addToSet": "$_id"}, "count": {"$sum": 1}}},
{"$match": {"count": {"$gt": 1}}}
]
check = print(list(db_collection.aggregate(pipeline)))
if check != []:
return False
return True
def main():
last_match_id = 4042096908
passed_matches = []
while True:
rand_match = get_rand_public_games(initial_match_id = last_match_id)
#Set new search to include matches with lower id only
last_match_id = rand_match[-1]['match_id']
if len(rand_match) == 0:
rand_match = get_rand_public_games(initial_match_id = last_match_id, use_key = True) #Use key if reach limit
passed_matches += validate_match(rand_match, len(passed_matches))
print(len(passed_matches))
if len(passed_matches) >= API_CALL_LIMIT:
add_json_to_db(passed_matches)
total_matches = db_collection.count()
logger.info('Last match ID: %s' % passed_matches[-1]['match_id'])
start_time = datetime.fromtimestamp(passed_matches[-1]['start_time']).strftime('%Y-%m-%d %H:%M:%S')
logger.info('Last match start time: %s' % start_time)
logger.info('Finish processing %s matches\nTotal matches: %s\nContinuing' % (API_CALL_LIMIT, total_matches))
passed_matches = []
if __name__ == '__main__':
api = initialise_keys()
db_collection, logger = setup_db_log('mining.log')
main() | true |
973461546a2f1e1edfc00694c5fd256c194420ee | Python | rkiyengar/rowgenerators | /rowgenerators/generator/csv.py | UTF-8 | 1,781 | 2.796875 | 3 | [
"MIT"
] | permissive | # Copyright (c) 2017 Civic Knowledge. This file is licensed under the terms of the
# MIT License, included in this distribution as LICENSE.txt
""" """
import sys
import os
from rowgenerators.source import Source
class CsvSource(Source):
"""Generate rows from a CSV source"""
delimiter = ','
def __init__(self, ref, cache=None, working_dir=None, **kwargs):
super().__init__(ref, cache, working_dir, **kwargs)
self.url = ref
if not self.url.exists():
raise FileNotFoundError(self.url)
if self.url.scheme != 'file':
assert self.url.scheme == 'file', str(self.url)
def __iter__(self):
"""Iterate over all of the lines in the file"""
import csv
try:
# For: _csv.Error: field larger than field limit (131072)
if os.name == 'nt':
# Using sys.maxsize throws an Overflow error on Windows 64-bit platforms since internal
# representation of 'int'/'long' on Win64 is only 32-bit wide. Ideally limit on Win64
# should not exceed ((2**31)-1) as long as internal representation uses 'int' and/or 'long'
csv.field_size_limit((2**31)-1)
else:
csv.field_size_limit(sys.maxsize)
except OverflowError as e:
# skip setting the limit for now
pass
self.start()
try:
# Python 3.6 considers None to mean 'utf8', but Python 3.5 considers it to be 'ascii'
encoding = self.url.encoding or 'utf8'
with open(self.url.path, encoding=encoding) as f:
yield from csv.reader(f, delimiter=self.delimiter)
except UnicodeError as e:
raise
self.finish() | true |
8372d54ad5567d6a7701368f6884789e18e6f673 | Python | MiiikAnd/ControleInstrumentos | /App/leitor2.py | UTF-8 | 875 | 3.546875 | 4 | [] | no_license | def encontrar(nome_arquivo, item):
arquivo = open(nome_arquivo, 'r')
codigo = arquivo.read()
codigo = codigo.split(';')
print(codigo)
print(item == codigo[1])
if item in codigo:
localiza = list.index(codigo, item, 1, list.__len__(codigo))
# Encontra o indice do item procurado
# Agumentos: lista, item procurado, inicio da procura, fim da procura(utilizar o list.__len__ para garantir busca a lista toda
print(codigo[localiza] + '\n' + codigo[localiza+1] + '\n' + codigo[localiza+2] + '\n' + codigo[localiza+3] + '\n'+ codigo[localiza+4])
a = list.__contains__(codigo, item) # Se tem na lista retorna true
b = list.__len__(codigo) # Quantos itens tem na lista
else:
print('Código inexistente')
if __name__ == '__main__':
item = '6.1.001.001.027'
encontrar('status.txt', item)
| true |
8153eadcd5a5d065c0f2c041b27f17b9cb0e8b0d | Python | sweetpand/LeetCode-1 | /solutions/python3/0130.py | UTF-8 | 720 | 3.09375 | 3 | [] | no_license | class Solution:
def solve(self, board: List[List[str]]) -> None:
def dfs(i: int, j: int) -> None:
if not 0 <= i < len(board) or not 0 <= j < len(board[0]) or board[i][j] != 'O':
return
board[i][j] = '.'
dfs(i + 1, j)
dfs(i - 1, j)
dfs(i, j + 1)
dfs(i, j - 1)
if not board:
return
for i in range(len(board)):
dfs(i, 0)
dfs(i, len(board[0]) - 1)
for j in range(1, len(board[0]) - 1):
dfs(0, j)
dfs(len(board) - 1, j)
for row in board:
for i, c in enumerate(row):
row[i] = 'O' if c == '.' else 'X'
| true |
f09ea387b4a347d8c0badf5fd5fb5e3b4371c35e | Python | CDL-Project-Euler/solutions | /000-025/p002/kirill.py | UTF-8 | 196 | 3.09375 | 3 | [
"MIT"
] | permissive | s = 0
f1 = 1
f2 = 2
f_old = f1
f_new = f1
while f_new < 4000000:
if f_new % 2 == 0:
s += f_new
f_new_temp = f_new
f_new = f_new_temp + f_old
f_old = f_new_temp
print(s) | true |
cc1e5eaa1a6c296089380a119799cc1a4e293c14 | Python | skeselj/transformer-networks | /main/models/irrelevant/embedding_pyramid.py | UTF-8 | 5,297 | 2.546875 | 3 | [] | no_license | ########################################################################################################
# Define the a SpyNet like model
########################################################################################################
import numpy as np
import torch, torch.nn as nn, torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.functional import grid_sample # in (N x C x IH x IW), out (N x OH x OW x 2)
class DownConv(nn.Module):
def __init__(self, k=5, f=nn.ReLU()):
super(DownConv, self).__init__()
p = (k-1) // 2
# some custom thing eric did
class DConv(nn.Module):
def __init__(self, infm, outfm, k, padding, dilation=1, groups=1, f=nn.ReLU(inplace=True)):
assert infm == outfm
super(DConv, self).__init__()
self.f = f
self.conv = nn.Conv2d(infm, outfm, k, padding=padding, groups=groups, dilation=dilation)
weights = torch.zeros((outfm, infm, k, k)).normal_(0, 0.01)
for i in range(infm):
weights[i,i,k//2,k//2] = 1
self.conv.weight = nn.Parameter(weights)
self.conv.bias.data /= 10
def forward(self, x):
return self.f(self.conv(x))
# dilation level
class DG(nn.Module):
def __init__(self, k=5, f=nn.ReLU(), t=1): # t = ntargets
super(DG, self).__init__()
print('building DG wtih %dx%d kernels and %d targets' % (k, k, t))
p = (k-1) // 2; d = (k+1) // 2
self.f = f
fm = 32 * (t+1)
self.conv1 = nn.Conv2d(t+1, fm, k, padding=p, groups=t+1)
self.conv2 = nn.Conv2d(fm, fm, k, padding=p)
self.conv3 = DConv(fm, fm, k, padding=p*d, dilation=d)
self.conv4 = DConv(fm, fm, k, padding=p*d*2, dilation=d*2)
self.conv5 = DConv(fm, fm, k, padding=p*d*4, dilation=d*4)
self.conv6 = DConv(fm, fm, k, padding=p*d*8, dilation=d*8)
self.conv7 = nn.Conv2d(fm, 16, 3, padding=1)
self.conv8 = nn.Conv2d(16, 2, 3, padding=1)
self.conv8.weight.data /= 10
self.conv8.bias.data /= 10
def forward(self, x):
out = self.f(self.conv1(x))
out = self.f(self.conv2(out))
out = self.f(self.conv3(out))
out = self.f(self.conv4(out))
out = self.f(self.conv5(out))
out = self.f(self.conv6(out))
out = self.f(self.conv7(out))
out = self.f(self.conv8(out))
return out.permute(0,2,3,1)
# pyramid
class Pyramid(nn.Module):
def get_identity_grid(self, dim):
gx, gy = np.linspace(-1, 1, dim), np.linspace(-1, 1, dim)
I = np.stack(np.meshgrid(gx, gy)) # (2, dim, dim)
I = np.expand_dims(I, 0) # (1, 2, dim, dim)
I = Variable(torch.Tensor(I), requires_grad=False).cuda()
I = I.permute(0,2,3,1) # (1, dim, dim, 2)
return I
def __init__(self, nlevels):
super(Pyramid, self).__init__()
print('--- Building PyramidNet with %d levels' % nlevels)
self.nlevels = nlevels
#self.mlist = nn.ModuleList([G() for level in xrange(nlevels)])
self.mlist = nn.ModuleList([DG() for level in xrange(nlevels)])
self.f_up = lambda x: nn.Upsample(scale_factor=x, mode='bilinear')
self.up = self.f_up(2)
self.down = nn.AvgPool2d(2, 2)
self.I_initialized = False
def forward(self, stack, idx, lastlevel): # stack: B x 2 x _ x _
if not self.I_initialized: # I do this here so we don't have to specify dim
_, _, w, _ = stack.size()
self.I = self.get_identity_grid(w / 2**self.nlevels)
self.I_initialized = True
# top level: return identity
if idx == self.nlevels:
I = self.I.repeat(stack.size()[0], 1, 1, 1) # B x 2 x _ x _
return I, [ I ]
# non-top level: run levels above
frame, target = stack[:,0:1,:,:], stack[:,1:2,:,:]
field_so_far, residuals_so_far = self.forward(self.down(stack), idx+1, lastlevel) # B x _ x _ x 2
field_so_far = self.up(field_so_far.permute(0,3,1,2)).permute(0,2,3,1) # B x _ x _ x 2
# included level: do work
if idx >= lastlevel:
updated_frame = grid_sample(frame, field_so_far)
new_stack = torch.cat((updated_frame, target), 1)
residual = self.mlist[idx](new_stack) # B x W x W x 2
# excluded level: pass it on
else:
residual = Variable(torch.zeros(field_so_far.size()), requires_grad=False).cuda().detach()
residuals_so_far.insert(0, residual)
return residual + field_so_far, residuals_so_far
# wrapper
class PyramidTransformer(nn.Module):
def __init__(self, nlevels=5):
super(PyramidTransformer, self).__init__()
self.pyramid = Pyramid(nlevels)
def forward(self, x, lastlevel=2):
field, residuals = self.pyramid.forward(x, idx=0, lastlevel=lastlevel)
pred = grid_sample(x[:,0:1,:,:], field).squeeze() # sample frame with field
return pred, field, residuals
def select_module(self, idx):
for g in self.pyramid.mlist:
g.requires_grad = False
self.pyramid.mlist[idx].requires_grad = True
def select_all(self):
for g in self.pyramid.mlist:
g.requires_grad = True
| true |
627550160a71b05aad76888e6590638e978eba41 | Python | opennikish/advent-of-code | /_2020/day_07/task2.py | UTF-8 | 757 | 3.3125 | 3 | [] | no_license | from typing import Dict, List, Tuple
def parse_neighbor(raw_neighbor: str) -> Tuple[str, int]:
pieces = raw_neighbor.split(' ')
return f'{pieces[1]} {pieces[2]}', int(pieces[0])
def dfs(g: Dict[str, List[str]], v: str) -> int:
total = 1
for u, count in g[v]:
total += count * dfs(g, u)
return total
def main(input_path = 0):
with open(input_path) as f:
target = 'shiny gold'
empty = 'no other bags.'
g = {}
for line in f:
line = line.strip()
u, v = line.split(' bags contain ')
g[u] = [] if v == empty else [parse_neighbor(x) for x in v.split(', ')]
count = dfs(g, target) - 1
print(count)
if __name__ == '__main__':
main()
| true |
06aabb5ef59a6edc575cb1d32918c96867b8bbf9 | Python | jack-diamond/devops | /hw1/linkedlist/test_remove.py | UTF-8 | 1,304 | 3.5 | 4 | [] | no_license | import unittest
from linkedlist import LinkedList
from linkedlist import Node
23
class TestMethods(unittest.TestCase):
def test1(self):
'''
Test remove on empty linkedlist.
'''
l = LinkedList()
self.assertEqual(l.remove(1), None)
def test2(self):
'''
Test remove on non-empty linkedlist of length == 1.
'''
l = LinkedList()
l.insert(1)
l.remove(1)
self.assertEqual(l.traverse(), [])
def test3(self):
'''
Test remove on non-empty linkedlist of length > 1.
'''
l = LinkedList()
l.insert(2)
l.insert(1)
l.insert(3)
l.remove(1)
self.assertEqual(l.traverse(), [2,3])
def test4(self):
'''
Test remove on non-empty linkedlist where value is not
there.
'''
l = LinkedList()
l.insert(2)
l.insert(1)
l.insert(3)
l.remove(5)
self.assertEqual(l.traverse(), [2,1,3])
def test5(self):
'''
Test removing None from linkedlist.
'''
l = LinkedList()
l.insert(2)
l.insert(1)
l.insert(3)
self.assertEqual(l.remove(None), None)
if __name__ == '__main__':
unittest.main() | true |
fa2994dedbb9ee8dad08511890a105a74ca84eb1 | Python | CodedQuen/Machine-learning-with-cookbook_Chris-Abon | /handleOutliers.py | UTF-8 | 283 | 3.328125 | 3 | [] | no_license | # Load library
import pandas as pd
# Create DataFrame
houses = pd.DataFrame()
houses['Price'] = [534433, 392333, 293222, 4322032]
houses['Bathrooms'] = [2, 3.5, 2, 116]
houses['Square_Feet'] = [1500, 2500, 1500, 48000]
# Filter observations
houses[houses['Bathrooms'] < 20]
| true |
c344b5ae63adc3f6022e0dae83219e7ead4cc663 | Python | shuntianfu/python | /slice/string.py | UTF-8 | 280 | 3.53125 | 4 | [] | no_license |
sample_url = 'http://jeckma.com'
# Reverse the url
print(sample_url[::-1])
# Get the top level domain
print(sample_url[-4:])
# Print the url without the http://
print(sample_url[7:])
# Print out the url without the http:// or the top level domain
print(sample_url[7:-4])
| true |
892bc48cd08a85cb4e7ad7e80b2e5edbe706d355 | Python | johnnyrock92/zjp | /V1/statement_v1.py | UTF-8 | 2,208 | 3.484375 | 3 | [] | no_license | import json
import math
def statement(invoices, plays):
'''
Return: rachunek w formie stringa
'''
result = 'Rachunek dla {}\n'.format(invoices['customer'])
for perf in invoices['performances']:
result += " {}: {:.2f} zł (liczba miejsc: {})\n".format(playFor(perf)['name'], amountFor(perf)/100, perf['audience'])
result += "Należność: {:.2f} zł\n".format(totalAmount()/100)
result += "Punkty promocyjne: {}".format(totalVolumeCredits())
return result
def totalAmount():
'''
Return: kwota do zapłaty
'''
result = 0
for perf in invoices['performances']:
result += amountFor(perf)
return result
def totalVolumeCredits():
'''
Return: suma punktów promocyjnych
'''
result = 0
for perf in invoices['performances']:
result += volumeCreditsFor(perf)
return result
def volumeCreditsFor(aPerformance):
'''
Return: ilość punktów promocyjnych
'''
result = max(aPerformance['audience'] - 30, 0)
# Przyznanie dodatkowego punktu promocyjnego za każdych 5 widzów komedii
if "komedia" == playFor(aPerformance)['type']:
result += math.floor(aPerformance['audience'] / 5)
return result
def playFor(aPerformance):
'''
Return: Opis przedstawienia
'''
return plays[aPerformance['playID']]
def amountFor(aPerformance):
'''
Return: cena jednego przedstawienia
'''
result = 0
if playFor(aPerformance)['type'] == "tragedia":
result = 40000
if aPerformance['audience'] > 30:
result += 1000 * (aPerformance['audience'] - 30)
elif playFor(aPerformance)['type'] == "komedia":
result = 30000
if aPerformance['audience'] > 20:
result += 10000 + 500 * (aPerformance['audience'] - 20)
result += 300 * aPerformance['audience']
else:
print('Nieznany typ przedstawienia: {}'.format(playFor(aPerformance)['type']))
return result
def openJsonFile(filepath):
'''
Return: dane z pliku
'''
return json.load(open(filepath))
invoices = openJsonFile('../Data/invoices.json')
plays = openJsonFile('../Data/plays.json')
print(statement(invoices, plays))
| true |
c9d650427347194ec6b8240e577fbbd9003b4335 | Python | marinavicenteartiaga/KeepCodingModernProgrammingWithPython | /module1/e4_ascii/e4_character_counter_dictionary.py | UTF-8 | 283 | 3.6875 | 4 | [] | no_license | my_text = "three words for you"
frequencies = dict()
for character in my_text:
if character in frequencies:
frequencies[character] += 1
else:
frequencies[character] = 1
for character in frequencies.keys():
print(character, "-", frequencies[character])
| true |
a76feed77d8420a77e0cf67e02ad54b005084774 | Python | TNFSH-Programming-Contest/2017NHSPC-TNFSH-Final | /testdata/cms2toj.py | UTF-8 | 267 | 2.875 | 3 | [] | no_license | #!/usr/bin/env python3
import os
from sys import argv
folder = argv[1]
os.chdir(folder)
files = os.listdir()
files.sort()
cnt = 1
for f in files:
if f.endswith('.in'):
print(f)
os.rename(f, str(cnt)+".in")
os.rename(f[:-3]+".out", str(cnt)+".out")
cnt += 1
| true |
96c6bee1a62264ae15c3745dc5485a81b0298882 | Python | kdm1jkm/pendulum_simulator | /simple_pendulum.py | UTF-8 | 4,670 | 2.921875 | 3 | [] | no_license | import math
import os
import sys
from datetime import datetime
from typing import *
import matplotlib.pylab as plt
import numpy as np
import pygame
from tqdm import tqdm
from pygame_constants import *
from PendulumSimulator import PendulumSimulator
def get_extreme_value(values: np.ndarray, show_status: bool = True) -> Tuple[List[float], List[float]]:
local_min = []
local_max = []
if show_status:
for i in tqdm(range(len(values) - 2)):
if values[i - 1] > values[i] and values[i + 1] > values[i]:
local_min.append(i)
if values[i - 1] < values[i] and values[i + 1] < values[i]:
local_max.append(i)
else:
for i in range(len(values) - 2):
if values[i - 1] > values[i] and values[i + 1] > values[i]:
local_min.append(i)
if values[i - 1] < values[i] and values[i + 1] < values[i]:
local_max.append(i)
return local_min, local_max
class SimplePendulum(PendulumSimulator):
def __init__(self):
self.length: float = 1
self.theta: float = 60
self.theta_dot: float = 0
self.dt: float = 0.01
self.radius: float = 0.01
self.viscosity: float = 0.0000174
self.time: float = 0
self.mass: float = 0.1
def simulate_with_input(self) -> None:
self.get_input()
self.simulate()
def run_with_input(self) -> Tuple[np.ndarray, np.ndarray]:
self.get_input()
time = float(input("time>>"))
time, theta = self.process(time)
date = datetime.today().strftime("%Y%m%d-%H%M%S")
os.makedirs("result/" + date)
with open("result/" + date + "/time-theta.csv", "w") as f:
for i in tqdm(range(len(time))):
f.write(str(time[i]))
f.write(",")
f.write(str(theta[i]))
f.write("\n")
local_min, local_max = get_extreme_value(theta)
with open("result/" + date + "/time-local_min.csv", "w") as f:
for i in local_min:
f.write(str(time[i]))
f.write(",")
f.write(str(theta[i]))
f.write("\n")
with open("result/" + date + "/time-local_max.csv", "w") as f:
for i in local_max:
f.write(str(time[i]))
f.write(",")
f.write(str(theta[i]))
f.write("\n")
plt.plot(time, theta)
plt.show()
return time, theta
def get_input(self) -> None:
self.length = float(input("length>>"))
self.theta = math.radians(float(input("theta>>")))
self.theta_dot = float(input("theta dot>>"))
self.dt = float(input("dt>>"))
self.radius = float(input("radius>>"))
self.viscosity = float(input("viscosity>>"))
self.mass = float(input("mass>>"))
def process(self, max_time: float, show_status: bool = True) -> Tuple[np.ndarray, np.ndarray]:
times = []
thetas = []
if show_status:
for i in tqdm(np.arange(self.time, max_time, self.dt)):
times.append(i)
thetas.append(self.theta)
self.step()
else:
for i in np.arange(self.time, max_time, self.dt):
times.append(i)
thetas.append(self.theta)
self.step()
return np.array(times), np.array(thetas)
def step(self):
mu = 6 * math.pi * self.viscosity * self.radius / self.mass
double_dot_theta = -G / self.length * \
math.sin(self.theta) - mu * self.theta_dot * self.dt
self.theta_dot += double_dot_theta * self.dt
self.theta += self.theta_dot * self.dt
def simulate(self):
pygame.init()
screen = pygame.display.set_mode([w, h])
clock = pygame.time.Clock()
fps = int(1 / self.dt)
print(fps)
while True:
clock.tick(fps)
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
coord = offset[0] + 500 * \
math.sin(self.theta), offset[1] + 500 * math.cos(self.theta)
screen.fill(WHITE)
pygame.draw.circle(screen, BLACK, offset, 8)
pygame.draw.circle(screen, BLUE, coord, 10)
pygame.draw.line(screen, RED, offset, coord)
pygame.display.flip()
self.step()
if __name__ == "__main__":
obj = SimplePendulum()
obj.simulate_with_input()
# theta, time = obj.run_with_input()
# plt.plot(theta, time)
# plt.show()
| true |
d4f8f781bfca2193d1395d1dd36fb3d00e84f8fa | Python | ledennis/data-structures-and-algorithm-python | /Exercises/Chapter_1/Projects/P1_31.py | UTF-8 | 1,224 | 3.609375 | 4 | [] | no_license | def change(given, cost):
billsDict = {100.00:'One Hundred', 50.00:'Fifty', 20.00:'Twenty', 10.00:'Ten', 5.00:'Five', 1.00:'One'}
billsList = [100.00, 50.00, 20.00, 10.00, 5.00, 1.00]
coinsDict = {0.25:'Quarter', 0.10:'Dime', 0.05:'Nickel', 0.01:'Penny'}
coinsList = [0.25, 0.10, 0.05, 0.01]
moneyList = billsList + coinsList
moneyDict = billsDict.copy()
moneyDict.update(coinsDict)
response = 'I owe you, '
delimSpace = ' '
delimComma = ','
delimPeriod = '.'
totalResponse = 'totaling to '
total = 0
if (changeable(given, cost)):
diff = given - cost
total = diff
for money in moneyList:
if (diff / money >= 1):
divMoney = diff // money
diff = diff - (money * divMoney)
response += str(divMoney) + delimSpace + moneyDict[money]
if (diff != 0):
response += delimComma + delimSpace
return response + totalResponse + str(total) + delimPeriod
def changeable(given, cost):
if (cost > given):
print('You owe me more money!')
return False
else:
return True
print(change(100, 50))
print(change(123.45, 67.89))
| true |
aa869fb83ca77303fe0756ac6a7728c1d305a3da | Python | Beebruna/Python | /CursoemVideoPython/teste0L.py | UTF-8 | 757 | 3.921875 | 4 | [
"MIT"
] | permissive | for c in range(1,6):# imprime 'Oi' 5 vezes, de 1 a 5
print('Oi')
print('FIM')
for c in range(1,6):
print(c)
print('FIM')
for c in range(6,0,-1):# imprime os números em ordem decrescente de 6 a 1
print(c)
print('FIM')
for c in range(0,7,2): # imprime de 0 a 6 pulando de 2 em 2
print(c)
print('FIM')
n = int(input('Digite um número: '))
for c in range(0, n+1):
print(c)
print('FIM')
i = int(input('Início: '))
f = int(input('Fim: '))
p = int(input('Passo: '))
for c in range(i, f+1, p):
print(c)
print('FIM')
for c in range(0, 3):
n = int(input('Digite um valor: '))
print('FIM')
s = 0
for c in range(0, 4):
n = int(input('Digite um valor: '))
s = s + n #s += n
print(f'O somatório de todos os valores foi {s}') | true |
a43752a53fc1662f676eb3aa1e9b5e9a77a6e2dd | Python | littlegirlorange/BreastCAD | /TrackLesions/LabelStatsLogic.py | UTF-8 | 3,919 | 2.53125 | 3 | [] | no_license | import vtk, qt, ctk, slicer
import string
import SimpleITK as sitk
import sitkUtils
class LabelStatsLogic:
"""This Logic is copied from the Label Statistics Module -Steve Pieper (Isomics)"""
"""Implement the logic to calculate label statistics.
Nodes are passed in as arguments.
Results are stored as 'statistics' instance variable.
"""
HEADER_KEYS = ("Index", "Count", "Volume mm^3", "DimX mm", "DimY mm", "DimZ mm", "COMX mm", "COMY mm", "COMZ mm", "Min", "Max", "Mean", "StdDev", "LabelNode", "ImageNode")
def __init__(self, labelNode, label=None, grayscaleNode=None):
#self.keys = ("Index", "Count", "Volume mm^3", "DimX mm", "DimY mm", "DimZ mm", "COMX mm", "COMY mm", "COMZ mm", "Min", "Max", "Mean", "StdDev", "LabelNode", "ImageNode")
cubicMMPerVoxel = reduce(lambda x,y: x*y, labelNode.GetSpacing())
self.labelStats = {}
self.labelStats['Labels'] = []
# Set up VTK histogram statistics filter.
stataccum = vtk.vtkImageAccumulate()
stataccum.SetInputConnection(labelNode.GetImageDataConnection())
stataccum.Update()
if not grayscaleNode:
grayscaleNode = labelNode
if label:
lo = label
hi = label
else:
lo = int(stataccum.GetMin()[0])
hi = int(stataccum.GetMax()[0])
if lo == 0:
# Don't calculate statistics for the background.
if hi == 0:
# No label.
return
lo = 1
# Set up SimpleITK shape statistics filter for label node.
voxDims = labelNode.GetSpacing()
labelName = labelNode.GetName()
labelImage = sitkUtils.PullFromSlicer(labelName)
labelShapeStatisticsFilter = sitk.LabelShapeStatisticsImageFilter()
outputImage = labelShapeStatisticsFilter.Execute(labelImage, 0, False, False)
for i in xrange(lo, hi + 1):
thresholder = vtk.vtkImageThreshold()
thresholder.SetInputConnection(labelNode.GetImageDataConnection())
thresholder.SetInValue(1)
thresholder.SetOutValue(0)
thresholder.ReplaceOutOn()
thresholder.ThresholdBetween(i, i)
thresholder.SetOutputScalarType(grayscaleNode.GetImageData().GetScalarType())
thresholder.Update()
stencil = vtk.vtkImageToImageStencil()
stencil.SetInputConnection(thresholder.GetOutputPort())
stencil.ThresholdBetween(1, 1)
stat1 = vtk.vtkImageAccumulate()
stat1.SetInputConnection(grayscaleNode.GetImageDataConnection())
stencil.Update()
stat1.SetStencilData(stencil.GetOutput())
stat1.Update()
if stat1.GetVoxelCount() > 0:
vol = labelShapeStatisticsFilter.GetPhysicalSize(i)
dims = labelShapeStatisticsFilter.GetBoundingBox(i) # [x0, y0, z0, dx, dy, dz]
com = labelShapeStatisticsFilter.GetCentroid(i)
# add an entry to the LabelStats list
self.labelStats["Labels"].append(i)
self.labelStats[i,"Index"] = i
self.labelStats[i,"Count"] = stat1.GetVoxelCount()
self.labelStats[i,"Volume mm^3"] = "{0:.1f}".format(vol)
self.labelStats[i,"DimX mm"] = "{0:.1f}".format(dims[3]*voxDims[0])
self.labelStats[i,"DimY mm"] = "{0:.1f}".format(dims[4]*voxDims[1])
self.labelStats[i,"DimZ mm"] = "{0:.1f}".format(dims[5]*voxDims[2])
self.labelStats[i,"COMX mm"] = "{0:.1f}".format(-com[0]) # Convert from LPS to RAS
self.labelStats[i,"COMY mm"] = "{0:.1f}".format(-com[1]) # Convert from LPS to RAS
self.labelStats[i,"COMZ mm"] = "{0:.1f}".format(com[2])
self.labelStats[i,"Min"] = stat1.GetMin()[0]
self.labelStats[i,"Max"] = stat1.GetMax()[0]
self.labelStats[i,"Mean"] = "{0:.1f}".format(stat1.GetMean()[0])
self.labelStats[i,"StdDev"] = "{0:.1f}".format(stat1.GetStandardDeviation()[0])
self.labelStats[i,"LabelNode"] = labelName
self.labelStats[i,"ImageNode"] = grayscaleNode.GetName()
| true |
bdf5ac0f40983c8da5716c0443b33fa8af5eca8d | Python | Oops324/TCGA_convert_XML_to_TXT | /concateClinicalXml.py | UTF-8 | 3,276 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python
'''
Author: Anna Fei
Date: 23/Nov/2017
Usage: python concateClinicalXml.py clinicalF_List
Function: concatenate content of files
Version: python 2.7
'''
import xml.etree.ElementTree as ET
import csv
import sys
def Xml2csv(listF,outputF):
listF_handle = open(listF,'r')
Resident_data = open(outputF,'w')
n = 0
for fileName in listF_handle:
n +=1
xmlF = fileName.rstrip()
tree = ET.parse(xmlF)
root = tree.getroot()
Head_L =[]
Value_L =[]
histological_type_tag = root[1][2].tag
histological_type_text = root[1][2].text
vital_status_tag = root[1][5].tag
vital_status_text = root[1][5].text
daysToBirth_tag = root[1][6].tag
daysToBirth_text = root[1][6].text
daysToLastKnowAlive_tag = root[1][7].tag
daysToLastKnowAlive_text = root[1][7].text
daysToDeath_tag = root[1][8].tag
daysToDeath_text = root[1][8].text
daysToLastFollowup_tag = root[1][9].tag
daysToLastFollowup_text = root[1][9].text
raceList_tag = root[1][10].tag
raceList_text = root[1][10].text
patientBarcode_tag = root[1][11].tag
patientBarcode_text = root[1][11].text
patientId_tag = root[1][13].tag
patientId_text = root[1][13].text
patientUUID_tag = root[1][14].tag
patientUUID_text = root[1][14].text
icdSite_tag = root[1][17].tag
icdSite_text = root[1][17].text
icdHistology_tag = root[1][18].tag
icdHistology_text = root[1][18].text
icd10_tag = root[1][19].tag
icd10_text = root[1][19].text
stageEvent_tag = root[1][25].tag
stageEvent_text = root[1][25].text
cancerStatus_tag = root[1][26].tag
cancerStatus_text = root[1][26].text
residualTumor_tag = root[1][33].tag
residualTumor_text = root[1][33].text
HeadFinal_L = []
ValueFinal_L = []
Head_L = [patientBarcode_tag,cancerStatus_tag,vital_status_tag,daysToBirth_tag]
Value_L = [patientBarcode_text,cancerStatus_text,vital_status_text,daysToBirth_text]
for element in Head_L:
if element is None:
element = str(element)
title = element.split("}",1)[1]
HeadFinal_L.append(title)
for Element in Value_L:
if Element is None:
Element = str(Element)
ValueFinal_L.append(Element)
HeadLine = "Index\t" + "\t".join(HeadFinal_L)
ValueLine = "Patient\t" + "\t".join(ValueFinal_L)
if n ==1:
Resident_data.write("%s\n" % HeadLine)
Resident_data.write("%s\n" % ValueLine)
else:
Resident_data.write("%s\n" % ValueLine)
Resident_data.close()
if __name__=="__main__":
listF = sys.argv[1]
outputF = listF.split(".",1)[0]+"_Total.txt"
Xml2csv(listF,outputF)
'''
head_L = []
value_L = []
for n in range(1,66):
title_long = root[1][n].tag
title = title_long.split("}",1)[1]
value = root[1][n].text
if title is None:
title= str(title)
if value is None:
value = str(value)
head_L.append(title)
value_L.append(value)
print head_L
print value_L
headLine = "Patient\t" + "\t".join(head_L)
valueLine = "Patient\t" + "\t".join(value_L)
for member in root:
print "member.tag:",member.tag
for patient in root.findall('{http://tcga.nci/bcr/xml/clinical/read/2.7}patient'):
gender = patient.find('{http://tcga.nci/bcr/xml/shared/2.7}gender').text
print "find.text",gender
gender = patient.find('{http://tcga.nci/bcr/xml/shared/2.7}gender').text
'''
| true |
9cf5c10a3b7608e7ec5cbcc05de4e668c460ac93 | Python | sujin16/PythonTcpSocket | /test_server.py | UTF-8 | 1,341 | 2.90625 | 3 | [] | no_license | import socket, errno
ip = '127.0.0.1'
port = 9999
'''
ip = '192.168.0.2'
port = 9002
ip = '127.0.0.1'
port = 9999
'''
line =[]
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("create server socket")
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((ip, port))
print("server socket bind. "+ip + ":"+str(port))
server_socket.listen()
print("server socket listen")
client_socket, addr = server_socket.accept()
print('Connected by', addr)
def parsing_data(self, data):
tmp = ''.join(data) # list로 들어 온 것을 스트링으로 합침
print(tmp)
return tmp
while True:
try:
data = client_socket.recv(1024)
if data :
line.append(chr(data))
if data ==10:
readystr = parsing_data(line)
print('Received from', addr, readystr)
if data.decode() == "finish":
client_socket.close()
print("clinet socket close")
server_socket.close()
print("server socket close")
break
except KeyboardInterrupt:
break
print("Got KeyboardInterrupt")
client_socket.close()
print("clinet socket close")
server_socket.close()
print("server socket close")
| true |
ec9fbf96013dc1d23ccb1c9a00ba7e2ab3cd6147 | Python | carlos-echeverria/RobotTuples | /generateGroupsForce.py | UTF-8 | 6,555 | 3.375 | 3 | [] | no_license | #!/usr/bin/env python3
# Script that generates 34 groups (nGroups) each consisting of 24 robot 3-tuples.
# The groups are generated by populating them with random elements from all the
# possible combinations of 18 robots in 3-tuples.
import robotTuples as rT
import matplotlib.pyplot as plt
from collections import Counter, OrderedDict
from random import seed
from datetime import datetime
n = 18 # total number of robots
k = 3 # number of robots per tuple
groupSize = 24 # number of tuples per group
nGroups = 34 # number of groups desired
margin = 24 # parameter that defines the number of "new" tuples needed to accept group
# creates list of robots:
robots = list(range(1,n+1))
# generates all the possible tuples with the above given parameters:
totalTuples = rT.possibleTuples(robots,k)
totalCombis = len(totalTuples) # number of available tuples to choose from
print(f"\nWith {n} robots, there are {totalCombis} posible combinations of {k}-tuples.\n\nWe will construct {nGroups} groups of {k}-tuples, each consisting of {groupSize} elements.\n\nEvery robot should appear {4} times in each group.")
# initializes dictionary where groups will be stored:
groupsDictionary = {}
# generates groups using the functions available in the file 'robotTuples.py':
iter=0
countr=0
trial=0
while iter<nGroups:
# creates all possible tuples defined by the parameters from above:
totalTuples=rT.possibleTuples(robots,k)
# generates a group which fulfills the constraints:
currentGroup=rT.generateGroupInSilence(totalTuples, groupSize)
# add the created group to a dictionary:
groupsDictionary.update({iter:currentGroup})
trial=trial+1
# extracts every robot tuple from dictionary then makes a list:
total = groupsDictionary.values()
tupleList = [tuple for group in total for tuple in group]
# print(f"\nThe total list of tuples is: {tupleList}. It has {len(tupleList)} elements.")
oldCountr=countr
# Counts the number of times each tuple appears in the total list:
count = Counter(tupleList)
countr = len(count)
if iter<17:
margin=24
else:
margin=17
if trial%100==0:
print(f"\nTrial {trial}: Group {iter+1}/{34} would add {countr-oldCountr} new tuples (we want {margin}). Total unique tuples: {countr}.\n")
if countr >= oldCountr+margin:
print(f"\nTrial {trial}: We have created {iter+1} groups. We used {countr} tuples from the {totalCombis} possible ones.\n")
iter=iter+1
totalTuplesALL = rT.possibleTuples(robots,k)
diffSet = set(totalTuplesALL) - set(tupleList)
diffList=list(diffSet)
print(f"\nCurrent solution set:\n\n {groupsDictionary}.")
print(f"\nList of unused tuples:\n\n {diffList}.\n")
# print(groupsDictionary)
else:
groupsDictionary.pop(iter)
countr = oldCountr
totalTuples = rT.possibleTuples(robots,k)
myList=list(OrderedDict.fromkeys(tupleList))
orderedCount = OrderedDict(count.most_common())
diffList=list(set(totalTuples) - set(tupleList))
# print(diffList)
# print(f"\nThe number of tuples used in the solution set is: {len(myList)} out of the {len(totalTuples)} possible ones. They are:\n")
# print(f"tuple\t\ttimes seen\n-----\t\t----------")
# for key, value in orderedCount.items():
# print(f"{key}\t:{value}")
# print(f"\nThe number of tuples not used in the group is: {len(diffList)}. They are:\n")
# print(f"tuple\t\ttimes seen\n-----\t\t----------")
# for tuple in sorted(diffList):
# print(f"{tuple}\t:{0}")
# diffCount = Counter(diffList)
## Saving output of experiment
# gets the current date and time to generate a unique filename for storing results:
now = datetime.now()
date1 = now.strftime("%Y:%m:%d")
date2 = now.strftime("%Y/%m/%d")
time = now.strftime("%H:%M:%S")
# prints resulting groups to unique file with human readable format:
with open(f"TupleResults.txt", 'w+') as file:
file.write(f"\nThe total number of tuples is: {len(totalTuples)}.\n")
file.write(f"\nThe number of tuples used in the solution set is: {len(myList)}. They are:\n")
file.write(f"\ntuple\t\ttimes used\n-----\t\t----------\n")
for key, value in orderedCount.items():
file.write(f"{key}\t:{value}\n")
file.write(f"\nThe number of tuples not used in the solution set is: {len(diffList)}. They are:\n")
file.write(f"\ntuple\t\ttimes used\n-----\t\t----------\n")
for tuple in sorted(diffList):
file.write(f"{tuple} \t:{0}\n")
# prints resulting groups to unique file with human readable format:
with open(f"Results.txt", 'w+') as file:
file.write(f"The following solution set of groups was created on {date2} at {time}.\n\nThere solution set uses a total of {countr} unique tuples from the {totalCombis} possible ones.\n")
# file.write(f"We have:\n {count}\n")
for i in range(0,nGroups):
file.write(f"\n Group {i+1}: {groupsDictionary[i]}\n")
file.write(f"\nThere are a total of {len(diffList)} unused tuples from the {totalCombis} possible ones.\n")
file.write(f"\n Unused Group: {diffList}\n")
# prints resulting groups to another unique file for javascript experiment:
with open(f"Results_js.txt", 'w+') as file:
for i in range(0,nGroups):
file.write(f"\n---------Group{i+1}---------\n")
for tuple in groupsDictionary[i]:
idx = groupsDictionary[i].index(tuple)
file.write(f"var set{idx+1} = {list(tuple)};\n")
# prints resulting groups to another unique file for javascript experiment:
# with is like your try .. finally block in this case
for i in range(nGroups):
with open('experiment.html', 'r') as file:
# read a list of lines into data
data = file.readlines()
with open(f"experiment_{i+1}.html", 'w+') as file:
for tuple in groupsDictionary[i]:
idx = groupsDictionary[i].index(tuple)
data[12+idx] = f" var set{idx+1} = {list(tuple)};\n"
file.writelines(data)
# Huh?
print(f"\nDid this work?.\n")
# 24 1
# 48 2
# 72 3
# 96 4
# 120 5
# 144 6
# 168 7
# 192 8
# 216 9
# 240 10
# 264 11
# 288 12
# 312 13
# 336 14
# 360 15
# 384 16
# 408 17
# 432 18
# 456 19
# 480 20
# 504 21
# 528 22
# 552 23
# 576 24
# 600 25
# 624 26
# 648 27
# 672 28
# 696 29
# 720 30
# 744 31
# 768 32
# 792 33
# 814 34
# 24*15+(19*17)=683
| true |
3a52f8b9bba54d1ab62617bc9fb74cc7210a5d7d | Python | rsprouse/ucblingmisc | /python/vc_transitions | UTF-8 | 5,179 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env python
"""
Usage: {scriptname} [get_f0_args] wavfile
Process input wavfile with esps utilities to find voicing transition points.
Output a Praat textgrid to STDOUT with transitions to voiced state labelled
'vd' and transitions to voiceless state labelled 'vl'.
Additional arguments are passed directly to the esps get_f0 utility.
IMPORTANT NOTE: This script cannot be used as-is. You must provide appropriate
values for t1_start and t1_step. The default values are possibly correct for the
default behavior of get_f0. There is still some thinking to do on this point.
If you pass non-default parameters to get_f0 you should make sure to adjust
t1_start and t1_step, if necessary.
In a future version of this script you will be able to pass appropriate
t1_start and t1_step values without having to edit the script, or we will
calculate the right values based on the get_f0 parameters.
"""
# TODO: allow t1_start and t1_step to be passed as command line arguments.
# These will need to be separated from arguments passed to get_f0.
# Or perhaps we can automatically calculate these values based on get_f0
# parameter values.
# These are t1 values automatically assigned to the output rows of get_f0.
# The first row gets the value of t1_start, and subsequent rows add t1_step
# to the t1 value of the preceding row.
t1_start = 0.000
t1_step = 0.01
# Authors: Ronald L. Sprouse (ronald@berkeley.edu)
#
# Copyright (c) 2014, The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import subprocess
import audiolabel
# Input wavfile is specified last. All other input arguments are passed
# to get_f0.
wavfile = sys.argv[-1]
if wavfile == '--help' or len(sys.argv) == 1:
scriptname = sys.argv[0].split("/")[-1]
print(__doc__.format(scriptname=scriptname))
if len(sys.argv) == 1:
exit(1)
else:
exit(0)
gf0_args = sys.argv[1:-1] # all args except first and last
gf0_args.insert(0, "get_f0")
gf0_args.append('-') # read from STDIN
gf0_args.append('-') # output to STDOUT
# Remove wavfile's DC offset, which can adversely affect RMS and pitch
# calculations if large.
rdc_proc = subprocess.Popen(
["rem_dc", wavfile, "-"],
stdout=subprocess.PIPE)
# Run get_f0 for voicing states.
gf0_proc = subprocess.Popen(
gf0_args,
stdin=rdc_proc.stdout,
stdout=subprocess.PIPE)
# Convert binary output to a tabular text format.
ppl_proc = subprocess.Popen(
["pplain", "-"],
stdin=gf0_proc.stdout,
stdout=subprocess.PIPE)
# Close the first two processes' output so that the last process doesn't block
# while waiting for more input.
rdc_proc.stdout.close()
gf0_proc.stdout.close()
# Read the esps output columns as separate label file tiers. Automatically add
# a time component to the output rows.
lm = audiolabel.LabelManager(
from_file=ppl_proc.stdout, # read directly from pplain output
from_type='table',
sep=" ",
fields_in_head=False,
fields="f0,is_voiced,rms,acpeak",
t1_col=None, # esps output doesn't have a t1 column
t1_start=t1_start, # autocreate t1 starting with this value and
t1_step=t1_step) # increase by this step
# Create a label whenever voicing state changes.
pt = audiolabel.PointTier()
was_voiced = False
for lab in lm.tier('is_voiced'):
if lab.text == '1':
if not was_voiced:
pt.add(audiolabel.Label(text='vd', t1=lab.t1))
was_voiced = True
else:
if was_voiced:
pt.add(audiolabel.Label(text='vl', t1=lab.t1))
was_voiced = False
# Output to STDOUT.
outm = audiolabel.LabelManager()
outm.add(pt)
print(outm._as_string('praat_long'))
| true |
7a6b21ecb91bbc1f2e11f7cb18f7ba663da641d5 | Python | renatovianna/Curso_Topicos_Programacao_Python | /DicionárioAinda.py | UTF-8 | 189 | 2.65625 | 3 | [] | no_license | itensMercado = {'fruta' : 'maçã', 'laticíonio' : 'iogurte', 'bebida' : 'suco', 'proteína': 'frango'}
print('fruta' in itensMercado.values())
print('bebida' in itensMercado.keys())
| true |
b894e7f77d5c332d8210dd6acdc6a81e9edf5f99 | Python | genomeannotation/xxxxxxxxxxgap4t | /src/sam.py | UTF-8 | 1,234 | 3.484375 | 3 | [] | no_license | #!/usr/bin/env python
# Read a sam file with barcoded reads. For each seq/chromosome
# represented in the file, store a list of barcodes found on it
# and a count of how many times it appears
import sys
def main():
seqs = {} # a dictionary that maps seq_id to a list of barcodes
all_barcodes = set()
for line in sys.stdin:
fields = line.strip().split()
seq_id = fields[2]
position = int(fields[3])
for field in reversed(fields):
if field.startswith("RX"):
barcode_field = field
barcode = barcode_field.split(":")[2]
window_number = int(position / WINDOW_SIZE + 1)
all_barcodes.add(barcode)
if window_number in seqs:
seqs[window_number].add(barcode)
else:
seqs[window_number] = set([barcode])
# Build barcode map
barcode_map = {}
for i, barcode in enumerate(all_barcodes):
barcode_map[barcode] = i
sys.stdout.write(barcode+"\n")
# Output
for window, barcodes in seqs.items():
sys.stdout.write(">"+str(window)+"\n")
for barcode in barcodes:
sys.stdout.write(str(barcode_map[barcode])+"\n")
if __name__ == "__main__:
main()
| true |
bb88638a2a1a3cc934de36c7f1bd930f4dbccc12 | Python | acutkosky/cs181 | /hw1/main.py | UTF-8 | 7,615 | 2.765625 | 3 | [] | no_license | # main.py
# -------
# Ashok Cutkosky and Tony Feng
import matplotlib.pyplot as plt
from pylab import *
import random
from dtree import *
import sys
from copy import deepcopy
class Globals:
noisyFlag = False
pruneFlag = False
valSetSize = 0
dataset = None
##Classify
#---------
def classify(learner, example):
return learner.predict(example)
##Learn
#-------
def learn(dataset, prune,maxdepth):
if (not dataset.use_boosting):
learner = DecisionTreeLearner(pruningsize = prune)
else:
learner = BoostingLearner(dataset.num_rounds,maxdepth,[],[])
learner.train( dataset)
return learner
# main
# ----
# The main program loop
# You should modify this function to run your experiments
def parseArgs(args):
"""Parses arguments vector, looking for switches of the form -key {optional value}.
For example:
parseArgs([ 'main.py', '-n', '-p', 5 ]) = { '-n':True, '-p':5 }"""
args_map = {}
curkey = None
for i in xrange(1, len(args)):
if args[i][0] == '-':
args_map[args[i]] = True
curkey = args[i]
else:
assert curkey
args_map[curkey] = args[i]
curkey = None
return args_map
def validateInput(args):
args_map = parseArgs(args)
valSetSize = 0
noisyFlag = False
pruneFlag = False
boostRounds = -1
maxDepth = -1
if '-n' in args_map:
noisyFlag = True
if '-p' in args_map:
pruneFlag = True
valSetSize = int(args_map['-p'])
if '-d' in args_map:
maxDepth = int(args_map['-d'])
if '-b' in args_map:
boostRounds = int(args_map['-b'])
return [noisyFlag, pruneFlag, valSetSize, maxDepth, boostRounds]
def check_examples(train, examples,targetval):
score = 0.0
for example in examples:
test = classify(train,example)
check = example.attrs[targetval]
if(test==check):
score += 1.0
return score/len(examples)
def crossvalidation(dataset,numexamples, pruneFlag, valSetSize,maxDepth):
targetval = dataset.target
valcumulativescore = 0.0
learncumulativescore = 0.0
if (not pruneFlag):
valSetSize = 0
for i in range(10):
#divide up the data into chunks of 90% training, 10% validation
learndata = dataset.examples[i*numexamples/10:(i-1)*numexamples/10+numexamples]
training_data = learndata
#set aside validation data
validationdata = dataset.examples[(i-1)*numexamples/10+numexamples:(i)*numexamples/10+numexamples]
old = deepcopy(dataset.examples)
#create a data set with examples from training_data
dataset.examples = training_data
#build the tree
train = learn(dataset, valSetSize,maxDepth)
#score the tree on the validation data
valscore = check_examples(train, validationdata,targetval)
learnscore = check_examples(train, learndata,targetval)
valcumulativescore +=valscore
learncumulativescore +=learnscore
dataset.examples = old
# print train.dt
# print "valscore: ",valscore,"learnscore: ",learnscore
# exit()
valcumulativescore /= 10
learncumulativescore /= 10
return valcumulativescore, learncumulativescore
def prune(learner, z, examples):
#if it's a leaf, leave it alone
if z.nodetype == DecisionTree.NODE:
#get branches
branches = z.branches
#get the attribute
attr = z.attr
for (v, examples_i) in learner.split_by(attr, examples):
#if there aren't any such examples, ignore that branch
if len(examples_i)>0:
prune(learner, branches[v], examples_i)
#yz = majority class
yz = learner.majority_value(examples)
num_yz = learner.count(learner.dataset.target, yz, examples)
tot = len(examples)
t0_score = float(num_yz)/tot
z_score = check_examples(z, examples, learner.dataset.target)
#print z_score, t0_score
#if the majority rule performs better, cut off the rest of the tree
if z_score <= t0_score:
z = DecisionTree(DecisionTree.LEAF, classification = yz)
# print "I pruned something!"
# else:
# print "I didn't prune!"
def main(n, p, v, d, br):
arguments = validateInput(sys.argv)
noisyFlag, pruneFlag, valSetSize, maxDepth, boostRounds = arguments
print noisyFlag, pruneFlag, valSetSize, maxDepth, boostRounds
#noisyFlag = n
#pruneFlag = p
#valSetSize = v
#maxDepth = d
#boostRounds = br
# Read in the data file
if noisyFlag:
f = open("noisy.csv")
else:
f = open("data.csv")
data = parse_csv(f.read(), " ")
dataset = DataSet(data)
# Copy the dataset so we have two copies of it
examples = dataset.examples[:]
numexamples = len(examples)
dataset.examples.extend(examples)
dataset.max_depth = maxDepth
if boostRounds != -1:
dataset.use_boosting = True
dataset.num_rounds = boostRounds
valscore,learnscore = crossvalidation(dataset,numexamples, pruneFlag, valSetSize,maxDepth)
return (valscore, learnscore)
# ====================================
# WRITE CODE FOR YOUR EXPERIMENTS HERE
# ====================================
valscore,learnscore = main(False,False,0, 2, 30)
print "valscore: ",valscore," learnescore: ",learnscore
"""
xs = range(1, 16)
valscores_noiseless = []
learnscores_noiseless = []
for x in xs:
a,b = main(False, False, 0, 1, x)
valscores_noiseless.append(a)
learnscores_noiseless.append(b)
print valscores_noiseless
print learnscores_noiseless
xs = range(1, 31)
valscores_noiseless = []
learnscores_noiseless = []
for x in xs:
a,b = main(False, False, 0, 1, x)
valscores_noiseless.append(a)
#learnscores_noiseless.append(b)
print valscores_noiseless
#print learnscores_noiseless
valscores_noise = []
#learnscores_noise = []
for x in xs:
a,b = main(True, False, 0, 1, x)
valscores_noise.append(a)
# learnscores_noise.append(b)
print valscores_noise
#print learnscores_noise
ax=plt.subplot(111)
ax.plot(xs, valscores_noiseless, 'b-', linewidth = 2.5, label = "Validation scores, noiseless")
ax.plot(xs, valscores_noise, 'g-', linewidth = 2.5, label = "Validation scores, noisy")
ax.legend()
ax.set_xlabel("Boosting set size", fontsize = 16)
ax.set_ylabel("Score", fontsize = 16)
ax.set_title("Boosting Set Size and Performance", fontsize = 20)
plt.show()
"""
"""
valscore,learnscore = main(False,False,0)
print "Validation set score: ",valscore," Learning set score: ",learnscore
xs = range(1, 81)
valscores_noiseless = []
learnscores_noiseless = []
for x in xs:
a,b = main(False, True, x)
valscores_noiseless.append(a)
learnscores_noiseless.append(b)
print valscores_noiseless
print learnscores_noiseless
valscores_noise = []
learnscores_noise = []
for x in xs:
a,b = main(True, True, x)
valscores_noise.append(a)
learnscores_noise.append(b)
print valscores_noise
print learnscores_noise
ax=plt.subplot(111)
ax.plot(xs, valscores_noiseless, 'b-', linewidth = 2.5, label = "Validation scores, noiseless")
ax.plot(xs, learnscores_noiseless, 'r-', linewidth = 2.5, label = "Learning scores, noiseless")
ax.plot(xs, valscores_noise, 'g-', linewidth = 2.5, label = "Validation scores, noisy")
ax.plot(xs, learnscores_noise, 'm-', linewidth = 2.5, label = "Learning scores, noisy")
ax.legend()
ax.set_xlabel("boosting rounds", fontsize = 16)
ax.set_ylabel("Score", fontsize = 16)
ax.set_title("Boosting Rounds and Performance", fontsize = 20)
plt.show()
"""
| true |
20fb5a3baa48e6846d0eaf106a053a3ea2d972ff | Python | radhar16/python-challenge | /pyPoll/Solved/main.py | UTF-8 | 2,361 | 3.5 | 4 | [] | no_license | import os
import csv
# Path to collect data from the Resources folder
pyPoll_csv = os.path.join('..', 'Resources', 'election_data.csv')
# Creating lists to read the data
vote_counts = []
candidates = []
unique_candidates = []
percent_vote = []
total_counts = 0
# Read in the CSV file
with open(pyPoll_csv, 'r' ) as csvfile:
# Split the data on commas
csvreader = csv.reader(csvfile, delimiter=',')
header = next(csvreader)
for row in csvreader:
# calculate total number of vote counts
total_counts = total_counts + 1
#Append canditates
candidates.append(row[2])
#set() is used to check for unique values
for candidate in set(candidates):
unique_candidates.append(candidate)
# calcualate the total votes for each candidate.
votes = candidates.count(candidate)
vote_counts.append(votes)
# calculate the percentage of votes each candidate won.
percent = round((votes/total_counts)*100,2)
percent_vote.append(percent)
#Identifying the winner based on max vote counts by using index() function to return the winner candidate.
winning_votes_count = max(vote_counts)
winner = unique_candidates[vote_counts.index(winning_votes_count)]
print(f"Election Results")
print(f"-----------------------------")
print(f"Total Votes: {total_counts}")
print(f"-----------------------------")
for i in range(len(unique_candidates)):
print(f"{unique_candidates[i]} : {percent_vote[i]}% ({vote_counts[i]})")
print(f"-----------------------------")
print(f"Winner : {winner}")
print(f"-----------------------------")
# Set variable for output file
output_file = os.path.join('..', "Analysis", "output_data.txt")
# Open the output file
with open(output_file, "w") as datafile:
datafile.write(f"Election Results"+"\n")
datafile.write(f"-----------------------------"+"\n")
datafile.write(f"Total Votes: {total_counts}"+"\n")
datafile.write(f"-----------------------------"+"\n")
for i in range(len(unique_candidates)):
datafile.write(f"{unique_candidates[i]} : {percent_vote[i]}% ({vote_counts[i]})"+"\n")
datafile.write(f"-----------------------------"+"\n")
datafile.write(f"Winner : {winner}"+"\n")
datafile.write(f"-----------------------------"+"\n")
| true |
726b36e0c129f5a06bb7d8d82c23a059f5839266 | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2468/60755/266654.py | UTF-8 | 261 | 3.328125 | 3 | [] | no_license | num = int(input())
for i in range(num):
n = int(input())
s = input().split(" ")
mul = 1
for k in s:
mul = mul * int(k)
res = []
for k in s:
res.append(str(int(mul/int(k))))
result = " ".join(res)
print(result+" ") | true |
de13d99d396ee03b8c48839dfe378aae4c9e2689 | Python | oknix/jarujaru_prediction | /utils/model.py | UTF-8 | 1,598 | 3.15625 | 3 | [] | no_license | import numpy as np
class _BaseNB:
def __init__(self, alpha=1.0, beta=1.0, theta_c=None, phi_cv=None):
"""
:param alpha: smoothing param for calculating each category's prior distribution (default: Laplace smoothing)
:param beta: smoothing param for calculating the probability of category c choosing the word v
(default: Laplase smoothing)
:param theta_c: category c's prior distribution
:param phi_cv: the probability of category c selecting the word v
"""
self.alpha = alpha
self.beta = beta
self.theta_c = theta_c
self.phi_cv = phi_cv
class tBernoulliNB(_BaseNB):
def fit(self, x, y):
n_documents, vocabulary_size = x.shape
n_categories = len(set(y))
categories = sorted(list(set(y)))
if self.theta_c is None:
self.theta_c = np.zeros(shape=(n_categories, 1), dtype=np.float32)
if self.phi_cv is None:
self.phi_cv = np.zeros(shape=(n_categories, vocabulary_size), dtype=np.float32)
for i, category in enumerate(categories):
lc = np.sum(y == category)
self.theta_c[i] = (lc + self.alpha) / (n_documents + n_categories * self.alpha)
c_docs = x[y == category]
mc = np.sum(c_docs, axis=0)
self.phi_cv[i] = (mc + self.beta) / (lc + 2. * self.beta)
def predict(self, x):
log_likelihood = np.dot(np.log(self.phi_cv), x.T) + np.dot(np.log(1. - self.phi_cv), (1. - x).T)
y = np.argmax(np.log(self.theta_c) + log_likelihood, axis=0)
return y
| true |
b2160a1399ded260c6956f95b45cbdd227b1f758 | Python | saintlyzero/movies-api | /imdb/movie_api/utils/constants.py | UTF-8 | 1,948 | 2.640625 | 3 | [] | no_license | import datetime as d
class Constants:
"""
This class contains all the constants used by Movie_api
"""
# Messages
CREDENTIALS_MISSING = "Credentials Missing"
CREDENTIALS_INVALID = "Invalid Credentials"
CREDENTIALS_VALID = "Valid Credentials"
RECORD_NEW = "Added new record"
RECORD_DELETED = "Deleted record"
RECORD_UPDATED = "Updated record"
RECORD_UPDATE_FAIL = "Failed Updating record"
RECORD_NOT_EXISTS = "Specified record does not exists"
FORBIDDEN = "You don't have enough permissions"
PARAMETERS_MISSING = "Missing required Parameters"
DATA_VALID = "Valid Data"
DATA_INVALID = "Invalid Data"
MISSING_MOVIE_ID = "Missing Movie_Id"
TITLE_ERROR = "Title should have length between 1 - 100"
DESCRIPTION_ERROR = "Description should have length between 1 - 200"
RATING_ERROR = "Rating value should be between 0 - 5"
YEAR_ERROR = "Year value should be between 1980 - current year"
INVALID_KEY = "Invalid Key"
NOT_AUTHORIZED = "Not Authorized to perform this operation"
MOVIE_DETAILS_SUCCESS = "Success fetching movie details"
MOVIE_DETAILS_FAIL = "Error fetching movie details"
EMPTY_RESULTSET = "Empty result-set"
# Keys
IS_ROOT = "is_root"
MESSAGE = "message"
DATA = "data"
CREDENTIALS = "credentials"
STATUS = "status"
RESULT = "result"
MOVIE_TITLE = "title"
MOVIE_DESCRIPTION = "description"
MOVIE_RATING = "rating"
MOVIE_YEAR = "year"
MOVIE_ID = "id"
RECORD = "record"
ERROR = "error"
TOKEN = "token"
# Status
STATUS_OK = 200
STATUS_FORBIDDEN = 403
STATUS_UNAUTHORIZED = 401
STATUS_BAD_REQUEST = 400
# Values
RATING_MIN_VALUE = 0
RATING_MAX_VALUE = 5
TITLE_MIN_LENGTH = 1
TITLE_MAX_LENGTH = 100
DESC_MIN_LENGTH = 1
DESC_MAX_LENGTH = 200
YEAR_MIN_VALUE = 1980
YEAR_MAX_VALUE = int(d.datetime.now().year) | true |
97b5a0b1eba5c36b7a1390bce5c196c92bc0aeb1 | Python | MyungSeKyo/algorithms | /백준/2225.py | UTF-8 | 246 | 2.796875 | 3 | [] | no_license | import sys
input = sys.stdin.readline
n, k = map(int, input().split())
n += 1
dp = [[0] * n for _ in range(k)]
dp[0] = [1] * n
for i in range(1, k):
for j in range(n):
dp[i][j] = sum(dp[i - 1][:j + 1]) % 1000000000
print(dp[-1][-1]) | true |
a2290f33ca07fb571c13c528c64fabb2b4ecc00d | Python | Podenniy/sia-2013-1c | /TP1/board_generator/generator2.py | UTF-8 | 1,907 | 3.34375 | 3 | [] | no_license | import collections
import random
import sys
class Position(object):
DELTAS = [(0, -1), (0, 1)]
def __init__(self, x, y, lines, width):
self.x = x
self.y = y
self.lines = lines
self.width = width
def _add(self, delta):
return Position(self.x + delta[0], self.y + delta[1],
self.lines, self.width)
def inside_board(self):
return 0 <= self.x < self.lines and 0 <= self.y < self.width
def neighbors(self):
return [
neighbor
for neighbor in [self._add(delta) for delta in self.DELTAS]
if neighbor.inside_board()
]
def __eq__(self, other):
return hasattr(other, 'x') and hasattr(other, 'y') and \
other.x == self.x and other.y == self.y
def get_positions(lines, width):
kinds = [i for i in range(int(lines * width / 4))] * 2
initials = []
for i in range(lines):
initials.append(Position(i, int(width/2), lines, width))
queue = collections.deque(initials)
seen = collections.deque(initials)
positions = [[0 for _ in range(width)] for __ in range(lines)]
while queue:
top = queue.popleft()
news = [neighbor for neighbor in top.neighbors()
if neighbor not in seen]
seen.extend(news)
queue.extend(news)
random.shuffle(kinds)
for kind in kinds:
pos1, pos2 = seen.popleft(), seen.popleft()
positions[pos1.x][pos1.y] = kind
positions[pos2.x][pos2.y] = kind
return positions
def main():
sys.argv.pop(0)
lines = 5
width = 8
if sys.argv:
lines = int(sys.argv.pop(0))
if sys.argv:
width = int(sys.argv.pop(0))
positions = get_positions(lines, width)
print(lines)
print('\n'.join(' '.join(map(str, line)) for line in positions))
if __name__ == '__main__':
main()
| true |
622a14554434297ce9cc2a0149b5fae5b1d2291f | Python | csmerchant/latencyranger | /latencyranger.py | UTF-8 | 469 | 2.609375 | 3 | [
"MIT"
] | permissive | import socket
import sys
from threading import Thread
#defining important variables for connections to the gameservers & other sockets
gamesocket = None #we get the game socket once we connect to the gameserver
sockhost = '0.0.0.0' #0.0.0.0 by default
sockport = 3074 #port 3074 by default, change to whatever port you'd like
tcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #tcp socket
tcpconn = tcpsock.connect((sockhost, sockport)) #tcp socket connector
| true |
c3438be2adea9d236d9e9aaa1742f217554fa106 | Python | elp2/TumblrExtractr | /renderers.py | UTF-8 | 4,566 | 2.75 | 3 | [] | no_license | import time
from PIL import Image
import os
class Renderer(object):
def __init__(self, post):
self.post = post
self.id = post[u'id']
def __lt__(self, other):
return self.time() < other.time()
def time(self):
timestamp = self.post[u'timestamp']
return time.gmtime(timestamp)
def date(self):
t = self.time()
format = u'%d %B, %Y'
formatted = time.strftime(format, t)
return formatted.lstrip(u'0') #+ u' - ' + unicode(self.post[u'timestamp'])
def header(self):
header = u'<div class=post>\n'
#header += u'<h1>' + unicode(self.post[u'id']) + u'</h1>\n'
header += u'<!--' + self.post[u'post_url'] + u'-->\n'
header += u'<span class=tags>\n'
for tag in self.post[u'tags']:
header += u'\t<div class=tag>' + tag + u'</div>\n'
header += u'</span>\n'
header += u'<br/><br/>'
header += u'<div class=date>' + self.date() + u'</div>\n'
return header
def body(self):
return u'<h1>NOT IMPLEMENTED!</H1>\n'
def footer(self):
return u'<div class=footer></div></div>\n'
def render(self):
return self.header() + self.body() + self.footer()
#TODO: Download photos + put in a directory
class PhotoRenderer(Renderer):
def render_photo(self, photo):
op = photo[u'original_size']
html = u''
html += u'<img src="' + op[u'url'] + u'"'
# scale large image size manually down to get better DPI when printing
width = op[u'width']
height = op[u'height']
maxDim = 500.0
if height >= maxDim:
width /= (height/maxDim)
height = maxDim
html += u' width="' + unicode(width) + u'" '
html += u' height="' + unicode(height) + u'" '
html += u'>\n'
html += u'<div class=individual-picture-caption>\n'
html += photo[u'caption']
html += u'</div>\n'
return html
def body(self):
body = u''
for photo in self.post[u'photos']:
body += self.render_photo(photo)
body += u'<div class=photo-caption>' + self.post[u'caption'] + u'</div>\n'
return body
class QuoteRenderer(Renderer):
def body(self):
html = u''
html += u'<blockquote><p>' + self.post[u'text'] + u'</p></blockquote>\n'
html += u'<p class="quote-author" style="margin-bottom: 0px;">' + self.post[u'source'] + u'</p>\n'
return html
class TextRenderer(Renderer):
def body(self):
html = u''
title = self.post[u'title']
if None != title:
html += u'<div class=text-title>' + self.post[u'title'] + u'</div>\n'
html += u'<div class=text-body>' + self.post[u'body'] + u'</div>\n'
return html
class AlbumRenderer(Renderer):
def __init__(self, album):
self.images = []
self.title = album["title"]
self.path = album["path"]
self.id = self.title # give us a generic ID for comparison purposes
for image_file in os.listdir(self.path):
img = self.make_image(image_file)
if None != img:
self.images.append(img)
def make_image(self, image_file):
try:
img = Image.open(self.path + "/" + image_file)
except IOError:
return None # not an image
width, height = img.size
return {'filename': image_file,
'width': width,
'height': height,
}
def render(self):
id = unicode(self.path.split("/")[-1].replace (" ", "_"))
div = u'<div class=album id=' + id + u'>\n'
div += u'<div class=album-title>' + self.title + u'</div>\n'
div += u'</div>\n'
div += u'<script>\n'
div += u'var photos = [\n'
first = True
for img in self.images:
src = unicode(self.path + "/" + img["filename"])
if not first:
div += u','
first = False
div += u'{thumbnail:"' + src + u'",width:' + unicode(img["width"]) + u',height:' + unicode(img["height"]) + u' }\n'
div += u'];\n'
div += u"var jg = new JGlance({ container: $('#" + id + u"'), \
photoErrorCallback: function (photo, img) { \
img.attr( 'src', '/path/to/placeholder.jpg' ).addClass( 'broken-image' ); \
} \
}); \
// we pass the photos via 'push' method\n \
jg.push( photos ); \
\
</script>\n"
return div + self.footer()
| true |
c933622f26e9b60b6550c1ca8ff8fc805212d92e | Python | Sagnik2007/Data-Visualisation | /Covid_19_data.py | UTF-8 | 318 | 2.59375 | 3 | [] | no_license | import pandas as pd
import plotly_express as px
df = pd.read_csv(
"C:/Users/milindo/Desktop/All Desktop Files Dad/WhiteHat Jr/Project 103/countries_aggregated.csv")
fig = px.scatter(df, x="Date", y="Confirmed Cases",
color="Country", title="No Of Cases Of Covid-19 Every Day")
fig.show() | true |
2ea575eeaa2acdcf946d64215f0ed9c3dec053d5 | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2690/60618/263210.py | UTF-8 | 274 | 2.765625 | 3 | [] | no_license | t=int(input())
for i in range(0,t):
n,m=map(int,input().split())
s,s2=map(str,input().split())
s1=list(s)
for j in range(0,len(s1)):
if s1[j] not in s2:
s1[j]=''
result=[x.strip() for x in s1 if x.strip()!='']
print(len(result)) | true |
122645e02566782f56c433ee5a24323c868c91d0 | Python | StanfordAHA/Halide-to-Hardware | /python_bindings/correctness/division.py | UTF-8 | 2,144 | 2.875 | 3 | [
"MIT"
] | permissive | from __future__ import print_function
import halide as hl
# TODO: Func.evaluate() needs a wrapper added;
# this is a temporary equivalent for testing purposes
def _evaluate(e):
# TODO: support zero-dim Func, Buffers
buf = hl.Buffer(type = e.type(), sizes = [1])
f = hl.Func();
x = hl.Var()
f[x] = e;
f.realize(buf)
return buf[0]
def test_division():
f32 = hl.Param(hl.Float(32), 'f32', -32.0)
f64 = hl.Param(hl.Float(64), 'f64', 64.0)
i16 = hl.Param(hl.Int(16), 'i16', -16)
i32 = hl.Param(hl.Int(32), 'i32', 32)
u16 = hl.Param(hl.UInt(16), 'u16', 16)
u32 = hl.Param(hl.UInt(32), 'u32', 32)
# Verify that the types match the rules in match_types()
assert (f32 / f64).type() == hl.Float(64)
assert (f32 // f64).type() == hl.Float(64)
assert (i16 / i32).type() == hl.Int(32)
assert (i16 // i32).type() == hl.Int(32)
assert (u16 / u32).type() == hl.UInt(32)
assert (u16 // u32).type() == hl.UInt(32)
# int / uint -> int
assert (u16 / i32).type() == hl.Int(32)
assert (i32 // u16).type() == hl.Int(32)
# any / float -> float
# float / any -> float
assert (u16 / f32).type() == hl.Float(32)
assert (u16 // f32).type() == hl.Float(32)
assert (i16 / f64).type() == hl.Float(64)
assert (i16 // f64).type() == hl.Float(64)
# Verify that division semantics match those for Halide
# (rather than python); this differs for int/int which
# defaults to float (rather than floordiv) in Python3.
# Also test that // always floors the result, even for float.
assert _evaluate(f32 / f64) == -0.5
assert _evaluate(f32 // f64) == -1.0
assert _evaluate(i16 / i32) == -1
assert _evaluate(i16 // i32) == -1
assert _evaluate(i32 / i16) == -2
assert _evaluate(u16 / u32) == 0
assert _evaluate(u16 // u32) == 0
assert _evaluate(u16 / i32) == 0
assert _evaluate(i32 // u16) == 2
assert _evaluate(u16 / f32) == -0.5
assert _evaluate(u16 // f32) == -1.0
assert _evaluate(i16 / f64) == -0.25
assert _evaluate(i16 // f64) == -1.0
if __name__ == "__main__":
test_division()
| true |
0fc9260b44347f56bd850647432f0ee75fe97fb0 | Python | plplpld/varroa | /net.py | UTF-8 | 3,739 | 3.15625 | 3 | [] | no_license | "Module where we define the network."
from keras.models import Model
from keras.layers import Input, Concatenate, BatchNormalization, Conv2D, MaxPooling2D, Deconv2D
from keras.activations import relu, sigmoid
def conv_relu(filters):
"""Conv module.
:params filters: number of convolution kernels
:params kernel_size: tuple, the kernel dimensions
"""
return Conv2D(filters=filters,
kernel_size=(3, 3),
strides=(1, 1),
padding="same",
activation=relu)
def batch_norm():
"""Shorthand for a batchnorm layer."""
return BatchNormalization(momentum=0.01, center=False)
def max_pool():
"""Max pooling layer."""
return MaxPooling2D(pool_size=(2, 2),
strides=(2, 2),
padding="valid")
def upconv_relu(filters):
"""Layer. Short for batch norm, conv, relu"""
return Deconv2D(filters=filters,
kernel_size=(3, 3),
strides=(2, 2),
padding="same",
activation=relu)
def convout():
"Output layer."
return Conv2D(filters=1,
kernel_size=(1, 1),
padding="same",
activation=sigmoid)
def build_unet(input_shape, layers=4, base_num_filters=64):
"""Build a u-shaped convolutionnal network for object segmentation.
:params input_shape: the shape of the input
:params layers: , number of layers
:params base_num_filters: int (default=64), the number of convolutionnal filters per layer
:type input_shape: tupple of int
:type layers: int (default=4)
:type base_num_filters: int (default=64)
:returns model: a Keras Model"""
assert not input_shape[0] % 2**layers,\
"Too many layers for input_size"
print("Building model.")
#Model
inputs = Input(shape=input_shape)
conv1 = conv_relu(base_num_filters)(inputs)
net = []
net.append(conv1)
concat_layers = []
# Downward part of the net
for layer in range(layers):
batch_norm1 = batch_norm()(net[-1])
conv1 = conv_relu(base_num_filters)(batch_norm1)
batch_norm2 = batch_norm()(conv1)
conv2 = conv_relu(base_num_filters)(batch_norm2)
batch_norm3 = batch_norm()(conv2)
conv3 = conv_relu(base_num_filters)(batch_norm3)
maxpool = max_pool()(conv3)
net.append(maxpool)
concat_layers.append(conv2)
# Middle layer
batch_norm1 = batch_norm()(net[-1])
conv1 = conv_relu(base_num_filters)(batch_norm1)
batch_norm2 = batch_norm()(conv1)
conv2 = conv_relu(base_num_filters)(batch_norm2)
batch_norm3 = batch_norm()(conv2)
upconv = upconv_relu(base_num_filters)(batch_norm3)
net.append(upconv)
# Upward part of the net
for layer in range(layers-1, 0, -1):
concat = Concatenate()([concat_layers[layer], net[-1]])
batch_norm1 = batch_norm()(concat)
conv1 = conv_relu(base_num_filters)(batch_norm1)
batch_norm2 = batch_norm()(conv1)
conv2 = conv_relu(base_num_filters)(batch_norm2)
batch_norm3 = batch_norm()(conv2)
upconv = upconv_relu(base_num_filters)(batch_norm3)
net.append(upconv)
concat = Concatenate()([concat_layers[0], net[-1]])
batch_norm1 = batch_norm()(concat)
conv1 = conv_relu(base_num_filters)(batch_norm1)
batch_norm2 = batch_norm()(conv1)
conv2 = conv_relu(base_num_filters)(batch_norm2)
outputs = convout()(conv2)
model = Model(inputs=inputs, outputs=outputs)
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
return model
| true |
c0fcfb3053045f453376654332cab2ec3c8afa33 | Python | jbrusey/cogent-house | /dataclense/models/location.py | UTF-8 | 3,354 | 2.515625 | 3 | [] | no_license | """
.. codeauthor:: Ross Wiklins
.. codeauthor:: James Brusey
.. codeauthor:: Daniel Goldsmith <djgoldsmith@googlemail.com>
"""
import sqlalchemy
import logging
log = logging.getLogger(__name__)
import meta
Base = meta.Base
from sqlalchemy import Table, Column, Integer, String, ForeignKey, DateTime,Float
from sqlalchemy.orm import relationship, backref
class Location(Base,meta.InnoDBMix):
"""
Location provides a link between houses and rooms.
This is needed if the node is moved, for example during multiple deployments.
This table should be an "associatve array" transparrently linking houses and rooms
:var Integer id: Id of Location
:var Integer houseId: Link to :class:`cogentviewer.models.house.House` the room is in
:var Integer roomId: Link to :class:`cogentviewer.models.room.Room` this location corresponds to
"""
__tablename__ = "Location"
__table_args__ = (
sqlalchemy.UniqueConstraint('houseId', 'roomId'),
{'mysql_engine': 'InnoDB',
'mysql_charset':'utf8'},
)
id = Column(Integer, primary_key=True)
houseId = Column(Integer,
ForeignKey('House.id'))
roomId = Column(Integer,
ForeignKey('Room.id'))
nodes = relationship("Node",backref=backref('location'))
readings = relationship("Reading",backref=backref('location'))
filtReadings = relationship("Reading",lazy='dynamic')
def asJSON(self,parentId=""):
"""
Differes from the standard asJSON model by returning the
name of the room as its name
"""
theItem = {"id":"L_{0}".format(self.id),
"name":"{0}".format(self.room.name),
"label":"({0}) {1}".format(self.id,self.room.name),
"type":"location",
"parent": "H_{0}".format(self.houseId),
}
try:
hasRead = self.readings[0]
except:
hasRead = False
#return None
#log.info(self.readings.count())
return theItem
def getReadings(self,typeId=None):
"""Attempt to return only readings of a certain type
:param typeId: Type If of object to filter by
"""
if typeId:
return self.filtReadings.filter_by(typeId = typeId).all()
else:
return self.readings
def asTree(self):
return self.asJSON()
def asList(self,parentId = ""):
outDict = [self.asJSON(parentId)]
if self.nodes:
outDict[0]["children"] = True
for item in self.nodes:
outDict.extend(item.asList(self.id))
return outDict
def __str__(self):
return "Loaction {0}: {1} {2}".format(self.id,self.houseId,self.roomId)
def __eq__(self,other):
return self.id == other.id and self.houseId == other.houseId and self.roomId == other.roomId
#def __str__(self):
# return "({0}) {1} : {2}".format(self.id,self.house.address,self.room.name)
NodeLocation = sqlalchemy.Table("NodeLocation",Base.metadata,
sqlalchemy.Column("LocationId",Integer,ForeignKey("Location.id")),
sqlalchemy.Column("nodeId",Integer,ForeignKey("Node.id"))
)
| true |
18e075c2ae09f249af6709ab7fa503fcc3c7f599 | Python | axelakhil/ONGC-TENDERS-CHATBOT | /actions.py | UTF-8 | 2,502 | 2.703125 | 3 | [] | no_license | # This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/core/actions/#custom-actions/
# This is a simple example for a custom action which utters "Hello World!"
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
import sqlite3
# class ActionHelloWorld(Action):
# def name(self) -> Text:
# return "action_hello_world"
# def run(self, dispatcher: CollectingDispatcher,
# tracker: Tracker,
# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
# dispatcher.utter_message(text="Hello World!")
# return []
class ActionShowByID(Action):
def name(self) -> Text:
return "action_show_by_id"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
eeid=tracker.get_slot("tend")
con = sqlite3.connect('mydatabase.db')
cursorObj = con.cursor()
cursorObj.execute("SELECT * FROM employees where Tender_number='{}'".format(eeid))
rows = cursorObj.fetchall()
if(rows==[]):
dispatcher.utter_message("No Tender with this ID is found.Please check again its case sensitive")
else:
for row in rows:
dispatcher.utter_message("Tender Number:{}\n Tender_name: {}\n Tender_type:{}\n Last_date :{} \n Location:{}\n".format(row[0],row[1],row[2],row[3],row[4]))
return []
class ActionShowAll(Action):
def name(self) -> Text:
return "action_show_by_category"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
cat=tracker.get_slot("tender")
con = sqlite3.connect('mydatabase.db')
cursorObj = con.cursor()
cursorObj.execute("SELECT * FROM employees where Tender_type='{}'".format(cat))
rows = cursorObj.fetchall()
if(rows==[]):
dispatcher.utter_message("No such tender category is found.Please check again its case sensitive(1.e-gas 2.Procurement 3.Corrigendum)")
for row in rows:
dispatcher.utter_message("Tender Number:{}\n Tender_name: {}\n Tender_type:{}\n Last_date :{} \n Location:{}\n".format(row[0],row[1],row[2],row[3],row[4]))
return [] | true |
7a806646adb9e8c775751eb834370b5ff722dbcd | Python | clarissa2448/pagerank | /pagerank.py | UTF-8 | 2,625 | 3.609375 | 4 | [] | no_license | '''
PageRank Project by Hannah He and Clarissa Xu.
Names of Primary Functions: 1. probMatrix: input: chain, type: 2d array. Output: M, type: 2d array
2. isDanglingNode: input: chain, j, type: 2d array, index of the node. Output: true/false, type: boolean
3. rank: input: chain, type: 2d array. Output: result, type: 1d array
'''
import numpy as np
import math
chain = [[1,2,3],[3],[1,3],[1,2]]
chain1 = [[1,2,3],[3],[0,3],[0,2]]
chain2 = [[1,2,3],[3],[0,3],[]]
chain3 = [[1, 5], [2, 5], [1, 3, 5], [4], [1, 5], [2, 6], [0, 1]]
chain4 = [[1,3,4],[0,2,4],[3,6],[2,4,6],[5,8],[4,6,8],[0,7,9],[0,6,8],[2,9],[0,2,8]]
#Creates the probaility tranisiton matrix of the input
def probMatrix(chain):
#Probability Matrix
#M_{ij) = 1 / len
isDG = False #is there a dangling node?
M = [[0] * len(chain) for i in range(len(chain))]
for j in range(len(chain)):
if isDanglingNode(chain, j):
isDG = True
for i in range(len(chain)):
if isDG:
M[j][i] = 1/len(chain) #1/number of outgoing edges
elif j in chain[i]:
M[j][i] = 1 / len(chain[i]) #1/ number of nodes
isDG = False
return M
def isDanglingNode(chain, j):
return chain[j] == [] #empty list for no outgoing edges
def rank(chain):
x = [1 for i in range(len(chain))] #creates a n x 1 list of 1s
M = probMatrix(chain)
arrEig = np.linalg.eig(M)
eigVal = list(arrEig[0]) #list of eigenvalues
eigVec = arrEig[1] #matrix of eigenvectors = P
min_index = -1
min_dist = float('inf')
#Compares the eigen vector array to find the eigen vector that corresponds to the eigenvalue of 1
for i in range(len(eigVal)):
lambda_i = eigVal[i]
dist = abs(lambda_i -1)
if(dist < min_dist):
min_index = i
min_dist = dist
evOne = eigVec[:,min_index]
#makes the eigenvector with the eigenvalue of 1 positive
for i in range(len(evOne)):
if(evOne[i] < 0):
evOne[i] *= -1
eigVecInv = np.linalg.inv(eigVec) #inverse of eigenvector matrix
n = len(chain)
C = np.zeros((n,n)) #creates matrix with the eigenvector as the first column, 0 elsewhere
C[:,0] = evOne
#Compute [eigenvector of 1] * P^-1 * x_0
prod = np.matmul(C, eigVecInv)
prod1 = np.matmul(prod,x)
result = [0 for i in range(n)]
for i in range(len(prod1)):
result[i] = np.abs(prod1[i])
result = [j for (i,j) in sorted([(s,t) for (t,s) in enumerate(result)])]
result = result[::-1]
return result
print(rank(chain))
| true |
2a0ca4a3a2f336caeefb9cf28c13883fba8a7bec | Python | thatprakhar/SocialBoiler | /backend/src/db/commenting_utils.py | UTF-8 | 1,859 | 2.875 | 3 | [] | no_license | import pandas as pd
import datetime as dt
import os
import sys
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
)
from src.db.crud import fetch_rows, fetch_comments_by_user, update_table, fetch_post
from src.db.models import Comments, Posts
def save_comment(username, post_id, comment):
# check for an empty comment
if len(comment) == 0:
return False
# duplicate comments are not allowed
user_df = fetch_comments_by_user(username, post_id)
comment_copy = comment.lower()
if not user_df.empty:
user_df["comment"] = user_df["comment"].str.lower()
if comment_copy in user_df["comment"].values:
return False
data = {
"username": [username],
"post_id": [post_id],
"comment": [comment],
"post_time": dt.datetime.utcnow(),
}
new_df = pd.DataFrame(data)
update_table(new_df, Comments)
return True
def get_commented_posts_by_username(username):
df = fetch_rows(Comments).to_dict("records")
# Make another arr to store the comments made by the user
arr = []
for record in df:
if record["username"] == username:
# print(record["username"])
arr.append(record)
# print(arr)
if len(arr) == 0:
return []
result = []
for record in arr:
post_df = fetch_post(Posts, record["post_id"]).to_dict("records")
result += post_df
return result
def get_commented_posts_by_id(post_id):
df = fetch_rows(Comments).to_dict("records")
result = []
for record in df:
if record["post_id"] == int(post_id):
result.append(record)
return result
# print(get_commented_posts_by_id(1))
# print(get_commented_posts_by_username("prakhar"))
# save_comment("prakhar", 38, "ananinki yorumu", False)
| true |
17266cbaa46c6d4faf96866634564a616c590a43 | Python | DrRoad/DashOmics | /single-page-example/Upload-Component.py | UTF-8 | 8,079 | 2.65625 | 3 | [
"MIT"
] | permissive | from dash.dependencies import Input, Output
import datetime
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table_experiments as dt
import plotly.graph_objs as go
import plotly
import base64
import io
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from sklearn import metrics
from scipy.spatial.distance import cdist, pdist
import sqlite3
#create sql_master table
con = sqlite3.connect("dbtest-1.db")
c = con.cursor()
master_data = [['example_1','No'],['example_2_cancer','No']]
sql_master = pd.DataFrame(master_data, columns = ['Filename','Choose_or_Not'])
sql_master.to_sql('sql_master', con, if_exists='replace')
#add example data into sqlite
example_1 = pd.read_csv('../data/example-1.csv', index_col = ['id'])
example_1.to_sql('example_1', con, if_exists="replace")
example_2 = pd.read_csv('../data/example-2-cancer.csv', index_col = ['id'])
example_2.to_sql('example_2_cancer', con, if_exists="replace")
con.close()
#import sys
#import os
#sys.path.append(os.path.join(os.path.dirname(sys.path[0])))
print(__file__)
app = dash.Dash()
app.layout = html.Div([
html.Div([
html.H3("Choosing Example Data to Explore DashOmics"),
dcc.Dropdown(
id='example-data',
options = [
{'label':'example_1', 'value':'example_1'},
{'label':'example_2', 'value':'example_2_cancer'}
]),
html.H3("Or Upload Your Own Files"),
dcc.Upload(
id='upload-data',
children=html.Div([
'Drag and Drop or ',
html.A('Select Files')
]),
style={
'width': '100%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px'
},
multiple=False),
html.Br(),
html.H4("Updated Table"),
html.Div(dt.DataTable(rows=[{}], id='table'))
]),
html.H3('Model Evaluation:Elbow Method'),
dcc.Input(id='k-range', value= 10, type='number'),
dcc.Graph(id='graph-elbow_method'),
html.Div(id='app-2-display-value'),
])
# Functions
# file upload function
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(
io.StringIO(decoded.decode('utf-8')))
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
except Exception as e:
print(e)
return None
return df,html.Div([
html.H5(filename),
html.Hr(), # horizontal line
# For debugging, display the raw contents provided by the web browser
html.Div('Raw Content Upload Successfully')
])
# update sqlite database and display in layout DataTable
@app.callback(Output('table', 'rows'),
[Input('upload-data', 'contents'),
Input('upload-data', 'filename'),
Input('example-data','value')])
def update_database(upload_contents, upload_filename, example_filename):
if upload_contents is not None:
# add uploaded df to sqlite
con = sqlite3.connect("dbtest-1.db")
c = con.cursor()
#use parse_contents to get df
df = parse_contents(upload_contents, upload_filename)[0]
if df is not None:
df.to_sql(upload_filename, con, if_exists="replace")
# add upload data filename in sql_master table
c.execute('''INSERT INTO sql_master(Filename, Choose_or_Not)
VALUES ('%s', 'Yes')
''' % upload_filename)
con.commit()
con.close()
#display table in layout
return df.to_dict('records')
else:
return [{}]
if example_filename is not None:
con = sqlite3.connect("dbtest-1.db")
c = con.cursor()
df = pd.read_sql_query('SELECT * FROM %s' % str(example_filename).split('.')[0], con)
if df is not None:
# update "Choose or Not" status to "Yes" in sql_master table
c.execute('''UPDATE sql_master
SET Choose_or_Not = 'Yes'
WHERE Filename = '%s'
''' % str(example_filename).split('.')[0])
con.commit()
con.close()
return df.to_dict('records')
else:
return [{}]
if (upload_contents is not None) & (example_filename is not 'Choose an example data'):
raise ValueError('Upload data conflicts with Example data')
else:
return [{}]
# apply elbow method analysis
@app.callback(
Output('graph-elbow_method', 'figure'),
[Input(component_id='k-range',component_property='value'),
Input('example-data', 'value')]
)
def elbow_method_evaluation(n, filename):
"""
n: the maximum of k value
"""
# Fit the kmeans model for k in a certain range
# read dataframe from sqlite database
con = sqlite3.connect("dbtest-1.db")
if filename is None:
print('No Input Yet!')
return
if filename is not None:
# make sure file extension is not in sqlite
dff = pd.read_sql_query('SELECT * FROM %s' % str(filename).split('.')[0], con).set_index(['id'])
con.close()
K = range(1, n + 1)
KM = [KMeans(n_clusters=k).fit(dff) for k in K]
# Pull out the cluster centroid for each model
centroids = [k.cluster_centers_ for k in KM]
# Calculate the distance between each data point and the centroid of its cluster
k_euclid = [cdist(dff.values, cent, 'euclidean') for cent in centroids]
dist = [np.min(ke, axis=1) for ke in k_euclid]
# Total within sum of square
wss = [sum(d ** 2) / 1000 for d in dist]
# The total sum of square
tss = sum(pdist(dff.values) ** 2) / dff.values.shape[0]
# The between-clusters sum of square
bss = tss - wss
# Difference of sum of within cluster distance to next smaller k
dwss = [wss[i + 1] - wss[i] for i in range(len(wss) - 1)]
dwss.insert(0, 0) # insert value of 0 at first position of dwss
# Create the graph with subplots
fig = plotly.tools.make_subplots(rows=2, cols=1, vertical_spacing=0.2, shared_xaxes=True,
subplot_titles=('Sum of Within-cluster Distance/1000',
'Difference of Sum of Within-cluster Distance to Next Lower K/1000'))
fig['layout']['margin'] = {
'l': 40, 'r': 40, 'b': 40, 't': 40
}
#fig['layout']['legend'] = {'x': 0, 'y': 1, 'xanchor': 'left'}
fig.append_trace({
'x': list(K),
'y': list(wss),
#'name': 'Sum of Within-cluster Distance/1000',
'mode': 'lines+markers',
'type': 'scatter'
}, 1, 1)
fig.append_trace({
'x': list(K),
'y': list(dwss),
#'text': data['time'],
#'name': 'Difference of Sum of Within-cluster Distance to Next Lower K/1000',
'mode': 'lines+markers',
'type': 'scatter'
}, 2, 1)
fig['layout']['xaxis1'].update(title='K Value')
fig['layout']['xaxis2'].update(title='K Value')
fig['layout']['yaxis1'].update(title='Distance Value')
fig['layout']['yaxis2'].update(title='Distance Value')
fig['layout'].update(height=600, width=1000,
title='Model Evaluation: Elbow Method for Optimal K Value')
return fig
if __name__ == '__main__':
app.run_server(debug=True)
| true |
311a34d290997c91f6bd3d9f683fbf714d4afa7b | Python | badeaa3/cannonball-rpv-stops | /plotting/perfPlot.py | UTF-8 | 7,116 | 2.90625 | 3 | [] | no_license | """
Style a mass asymmetry vs epsilon plot
"""
# general imports
import argparse
import os
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
import ROOT
# set style based on https://stackoverflow.com/questions/43741928/matplotlib-raw-latex-epsilon-only-yields-varepsilon
plt.rcParams["mathtext.fontset"] = "cm"
plt.rcParams["text.usetex"] = True
def main():
# argparser
parser = argparse.ArgumentParser()
# all model configurations
parser.add_argument("-c", help="CoLaLoLa file", default="")
parser.add_argument("-m", help="Mass Asymmetry file", default="")
parser.add_argument("-dr", help="dR sum file", default="")
parser.add_argument("-o", help="Output directory for the resulting files", default="./")
ops = parser.parse_args()
# load method comparison data
files = {"colalola" :ops.c,
"masym" :ops.m,
"drsum1" :ops.dr}
# load the hist values
hists = {}
GeVtoTeV = 1.0/1000
for key, file in files.items():
# load hist
f = ROOT.TFile(file)
c = f.Get("msu3_vs_ptsmear_test.pdf")
h = c.GetPrimitive("msu3_vs_ptsmear")
# get the bin values
hist = {}
for ybin in range(h.GetNbinsY()+1)[1:]:
ykey = round((ybin-1)*0.1,1) # epsilon
hist[ykey] = {"mass":[],"perf":[]}
for xbin in range(h.GetNbinsX()+1)[1:]:
content = h.GetBinContent(xbin,ybin)
# print (300+xbin*100,(ybin-1)*0.1,content)
hist[ykey]["mass"].append(round((200+xbin*100)*GeVtoTeV,2))
hist[ykey]["perf"].append(content)
# save to dic
hists[key] = hist
# print(hist)
# exit()
# useful for checking values by hand
# for key,hist in hists.items():
# print(key,list(zip(hist[0.3]["mass"],hist[0.3]["perf"])))
# draw options
configs = {"colalola": ["CANNONBALL","#1f77b4",(1.3,0.72),2],
"masym": [r"$\mathcal{A}$ Minimization", "#ff7f0e",(1.3,0.32),1],
"drsum1": [r'$\Delta R^{\Sigma}$ (C=1)', "#2ca02c",(1.3,0.07),0]}
# plot
# fig, ax = plt.subplots(1,1,constrained_layout=False,sharey=False,figsize=(6, 5))
fig = plt.figure(figsize=(6, 5)) #plt.subplots(1,1,constrained_layout=True,sharey=False,figsize=(6, 5))
ax = plt.axes([0.1, 0.1, 0.85, 0.85])
for key,hist in hists.items():
# plot high, middle, and low epsilon
eps = [0.1,0.2,0.3]
for e in eps:
ax.plot(hist[e]["mass"],
hist[e]["perf"],
'-',
color=configs[key][1],
alpha=1.0,
zorder=configs[key][3],
rasterized=False)
# fill between high and low epsilon
ax.fill_between(hist[eps[0]]["mass"],
hist[eps[0]]["perf"],
hist[eps[-1]]["perf"],
alpha=0.2,
color=configs[key][1],
zorder=configs[key][3],
rasterized=False)
# place text above high epsilon
ax.text(configs[key][2][0],configs[key][2][1],s=configs[key][0],color=configs[key][1],fontsize=14)
# indication of epsilon boundaries
mIDX = 5 # index of mass point to put it above
loc = "masym"
epsx = hists[loc][eps[1]]["mass"][mIDX]
epsy = hists[loc][eps[1]]["perf"][mIDX]
epsyerr = [[hists[loc][eps[1]]["perf"][mIDX] - hists[loc][eps[-1]]["perf"][mIDX]],
[hists[loc][eps[0]]["perf"][mIDX] - hists[loc][eps[1]]["perf"][mIDX]]]
# ax.arrow(epsx,
# hists[loc][eps[0]]["perf"][mIDX] - 0.01,
# 0,
# hists[loc][eps[-1]]["perf"][mIDX] - hists[loc][eps[0]]["perf"][mIDX] + 0.07,
# lw=2,
# color="black",
# alpha=1.0,
# head_width = 0.05,
# head_length = 0.05,
# overhang=0.2,
# rasterized=True)
ax.errorbar(epsx,epsy,epsyerr,
fmt="o",
marker="_",
markersize=10.5,
markeredgewidth=2,
elinewidth=2,
capsize=5,
color="black",
alpha=1.0,
rasterized=False,
zorder=100)
# ax.text(epsx - 0.09, epsy + epsyerr[1][0] + 0.02, s=r'$\epsilon=$ ' + "%1.1f" % float(eps[0]), color="black", fontsize=15)
# ax.text(epsx - 0.06, epsy - epsyerr[0][0] - 0.055, s="%1.1f" % float(eps[-1]), color="black", fontsize=15)
ax.text(epsx - 0.017, epsy + epsyerr[1][0] + 0.02, s=r'$\epsilon$', color="black", fontsize=16)
ax.text(epsx - 0.14, epsy + epsyerr[1][0] - 0.05, s="%1.1f" % float(eps[0]), color="black", fontsize=14)
ax.text(epsx - 0.14, epsy + 0.02, s="%1.1f" % float(eps[1]), color="black", fontsize=14)
ax.text(epsx - 0.14, epsy - epsyerr[0][0] + 0.02, s="%1.1f" % float(eps[-1]), color="black", fontsize=14)
# stamp to be used for labeling the plots
# stamp = r'$pp \rightarrow \tilde{t}\tilde{t}(j) \rightarrow$ partons' + ', RPV MG5 aMC@NLO 2.7.3'
# # stamp += ('\n' + 'MG5 aMC 2.7.3')
# stamp += ('\n' + r'$\sqrt{s} = 13$ TeV')
# stamp += ('\n' + r'$p_{\mathrm{T}} \rightarrow \mathcal{N}(p_{\mathrm{T}},$'+epsilon+r'$p_{\mathrm{T}})$')
ax.text(0.02,
1.04,
r'$pp \rightarrow \tilde{t}\tilde{t}(j) \rightarrow qqqq(j)$' + ', MG5 aMC@NLO',
horizontalalignment='left',
verticalalignment='center',
transform = ax.transAxes,
fontsize=14,
linespacing=1.5)
# set labels
ax.set_xlabel(r'$\mathrm{m}_{\tilde{t}}$ [TeV]', fontsize=21, labelpad=9)
ax.set_ylabel('Fraction of Events Correct', fontsize=21, labelpad=9)
# set limits
ax.set_xlim(0.3,2.0)
ax.set_ylim(0,1)
# set scales
ax.set_yscale('linear')
# enable grid
# ax.grid()
# style ticks
tick_params_major = {"which" : "major",
"length" : 10,
"width" : 1.25,
"direction" : "in",
"right" : True,
"top" : True,
"labelsize" : 15,
"pad" : 8}
tick_params_minor = {"which" : "minor",
"length" : 4,
"width" : 1,
"direction" : "in",
"right" : True,
"top" : True,
"labelsize" : 15}
ax.set_xticks(hists["colalola"][eps[1]]["mass"][::3])
ax.xaxis.set_minor_locator(AutoMinorLocator(3))
ax.yaxis.set_minor_locator(AutoMinorLocator(5))
ax.tick_params(**tick_params_major)
ax.tick_params(**tick_params_minor)
# save
plt.savefig(os.path.join(ops.o, "perfPlot.pdf"), bbox_inches="tight")
if __name__ == "__main__":
main() | true |
ffff54b67e8d538540516773ee1a99639fee4660 | Python | MarttiWu/boids-game | /Boids.py | UTF-8 | 9,846 | 2.984375 | 3 | [] | no_license | '''
Boids.py
Created by WU,MENG-TING on 2020/10/1.
Copyright © 2020 WU,MENG-TING. All rights reserved.
'''
import pygame as pg
import random
import math
import numpy as np
#######################################
### boids behavior parameters ###
#######################################
a_radius=50
s_radius=20
c_radius=40
popsize=100
maxspeed=5.0
numbers = list(range(-15,-1)) + list(range(1,15))
#######################
### image size ###
#######################
bdimgsize=(20,20) #bird image size
obimgsize = (30,30) #obstacle image size
#initialize window
pg.init()
screen = pg.display.set_mode((800,600))
pg.display.set_caption("Boids Game")
background = pg.image.load('images/sky.jpg')
background.convert()
screen.blit(background,(0,0))
###################################
### buttons' parameters... ###
###################################
color = (255,255,255)
st_light = (170,170,170)
st_dark = (100,100,100)
re_light = (255,102,102)
re_dark = (255,0,0)
ob_light = (51,255,255)
ob_dark = (0,204,204)
qu_light = (32,32,32)
qu_dark = (0,0,0)
buttonfont = pg.font.SysFont('Arial',16)
#load images
img = {'Ebird':pg.transform.scale(pg.image.load('images/Ebird.png'), bdimgsize),
'Wbird':pg.transform.scale(pg.image.load('images/Wbird.png'), bdimgsize),
'Sbird':pg.transform.scale(pg.image.load('images/Sbird.png'), bdimgsize),
'Nbird':pg.transform.scale(pg.image.load('images/Nbird.png'), bdimgsize),
'NEbird':pg.transform.scale(pg.image.load('images/NEbird.png'), bdimgsize),
'NWbird':pg.transform.scale(pg.image.load('images/NWbird.png'), bdimgsize),
'SEbird':pg.transform.scale(pg.image.load('images/SEbird.png'), bdimgsize),
'SWbird':pg.transform.scale(pg.image.load('images/SWbird.png'), bdimgsize)
}
obimg = pg.transform.scale(pg.image.load('images/block.png'), obimgsize)
class Bird(pg.sprite.Sprite):
def __init__(self,x,dx,y,dy):
super().__init__()
self.image = img['Nbird']
self.rect = self.image.get_rect()
self.rect.center = (x, y)
self.x = x
self.y = y
self.dx = dx
self.dy = dy
self.aneighbors = []
self.cneighbors = []
self.sneighbors = []
def draw(self,obstacles):
#goes to the other side when reaches the boundaries
if self.x>800:
self.x = 0
if self.x<0:
self.x = 800
if self.y>600:
self.y = 0
if self.y<0:
self.y = 600
#check for collision with obstacles
if self.is_collided_with(obstacles):
self.dx = -self.dx
self.dy = -self.dy
self.x += random.choice(numbers)*2
self.x += random.choice(numbers)*2
#change image for different directions
if self.dx>0 and self.dy>0:
self.image = img['SEbird']
elif self.dx>0 and self.dy<0:
self.image = img['NEbird']
elif self.dx<0 and self.dy>0:
self.image = img['SWbird']
elif self.dx<0 and self.dy<0:
self.image = img['NWbird']
elif self.dx>0 and self.dy==0:
self.image = img['Ebird']
elif self.dx==0 and self.dy>0:
self.image = img['Nbird']
elif self.dx<0 and self.dy==0:
self.image = img['Wbird']
elif self.dx==0 and self.dy<0:
self.image = img['Sbird']
self.rect.center = (self.x,self.y)
self.x += self.dx
self.y += self.dy
def find_neighbors(self,other):
self.cneighbors.clear()
self.sneighbors.clear()
self.aneighbors.clear()
for o in other:
if o!=self:
if math.dist([self.x,self.y],[o.x,o.y]) < c_radius:
self.cneighbors.append(o)
if math.dist([self.x,self.y],[o.x,o.y]) < s_radius:
self.sneighbors.append(o)
if math.dist([self.x,self.y],[o.x,o.y]) < a_radius:
self.aneighbors.append(o)
def cohesion(self):
cV = np.zeros(2)
if not self.cneighbors:
return cV
for n in self.cneighbors:
cV[0]+=n.x
cV[1]+=n.y
cV/=len(self.cneighbors)
cV[0]-=self.x
cV[1]-=self.y
#prevent divided by 0
uu = np.linalg.norm(cV)
if uu==0:
uu=0.01
cV = (cV/uu)*maxspeed
return cV
def separation(self):
sV = np.zeros(2)
if not self.sneighbors:
return sV
for n in self.sneighbors:
sV[0]+=self.x-n.x
sV[1]+=self.y-n.y
sV/=len(self.sneighbors)
uu = np.linalg.norm(sV)
#prevent divided by 0
if uu==0:
uu=0.01
sV = (sV/uu)*maxspeed
return sV
def alignment(self):
aV = np.zeros(2)
if not self.aneighbors:
return aV
for n in self.aneighbors:
aV[0]+=n.dx
aV[1]+=n.dy
aV/=len(self.aneighbors)
#prevent divided by 0
uu = np.linalg.norm(aV)
if uu==0:
uu=0.01
aV = (aV/uu)*maxspeed
return aV
def update_direction(self,other):
self.find_neighbors(other)
V = np.zeros(2)
V += self.cohesion()
V += self.separation()
V += self.alignment()
#prevent divided by 0
uu = np.linalg.norm(V)
if uu==0:
uu=0.01
V = (V/uu)*maxspeed
if int(V[0]) or int(V[1]):
self.dx = int(V[0])
self.dy = int(V[1])
def is_collided_with(self, obstacles):
col = False
for ob in obstacles:
if self.rect.colliderect(ob.rect):
col = True
return col
def init_boids():
birds = [Bird(random.randint(40,1360),random.choice(numbers),random.randint(40,860),random.choice(numbers)) for i in range(popsize)]
boids = pg.sprite.Group()
for bird in birds:
boids.add(bird)
return birds,boids
##################################################
class Obstacle(pg.sprite.Sprite):
def __init__(self,x,y):
super().__init__()
self.image = obimg
self.rect = self.image.get_rect()
self.rect.center = (x, y)
self.x = x
self.y = y
def draw(self):
self.rect.center = (self.x,self.y)
def init_obs():
obstacles = []
OBSTACLE = pg.sprite.Group()
return obstacles,OBSTACLE
##################################################
def main():
clock = pg.time.Clock()
birds,boids = init_boids()
obstacles,OBSTACLE = init_obs()
running = True
START=False
OBon=False
while running:
clock.tick(40)
for e in pg.event.get():
if e.type == pg.QUIT:
running = False
if e.type == pg.MOUSEBUTTONDOWN and OBon:
pos = pg.mouse.get_pos()
if 0 <= pos[1] <= 540:
ob = Obstacle(pos[0],pos[1])
obstacles.append(ob)
OBSTACLE.add(ob)
if e.type == pg.MOUSEBUTTONDOWN:
if 600 <= mouse[0] <= 700 and 560 <= mouse[1] <= 580:
pg.quit()
if 100 <= mouse[0] <= 200 and 560 <= mouse[1] <= 580:
START=True
if 250 <= mouse[0] <= 350 and 560 <= mouse[1] <= 580:
birds,boids = init_boids()
obstacles.clear()
OBSTACLE.empty()
START=True
OBon=False
if 450 <= mouse[0] <= 550 and 560 <= mouse[1] <= 580:
OBon=True
screen.blit(background,(0,0))
'''
Game start
'''
if START:
for ob in obstacles:
ob.draw()
OBSTACLE.draw(screen)
for bird in birds:
bird.draw(obstacles)
for bird in birds:
bird.update_direction(birds)
boids.draw(screen)
mouse = pg.mouse.get_pos()
'''
Display buttons
'''
#Quit button
if 600 <= mouse[0] <= 700 and 560 <= mouse[1] <= 580:
pg.draw.rect(screen,qu_light,[600,560,100,20])
else:
pg.draw.rect(screen,qu_dark,[600,560,100,20])
screen.blit(buttonfont.render('Quit' , True , color) , (600+35,560+3))
#Start button
if 100 <= mouse[0] <= 200 and 560 <= mouse[1] <= 580:
pg.draw.rect(screen,st_light,[100,560,100,20])
else:
pg.draw.rect(screen,st_dark,[100,560,100,20])
screen.blit(buttonfont.render('Start' , True , color) , (100+30,560+3))
#Restart button
if 250 <= mouse[0] <= 350 and 560 <= mouse[1] <= 580:
pg.draw.rect(screen,re_light,[250,560,100,20])
else:
pg.draw.rect(screen,re_dark,[250,560,100,20])
screen.blit(buttonfont.render('Restart' , True , color) , (250+30,560+3))
#Obstacle button
if 450 <= mouse[0] <= 550 and 560 <= mouse[1] <= 580:
pg.draw.rect(screen,ob_light,[450,560,100,20])
else:
pg.draw.rect(screen,ob_dark,[450,560,100,20])
screen.blit(buttonfont.render('Obstacle' , True , color) , (450+20,560+3))
pg.display.update()
if __name__=='__main__':
main()
pg.quit()
| true |
8552316dd9c81143808e090e9d5bf7a24b8d2503 | Python | amirhaziemdev/robot-dev | /v1/testing/robot/test_0.py | UTF-8 | 916 | 2.65625 | 3 | [] | no_license | import selectors
import socket
import time
sel = selectors.DefaultSelector()
def accept(sock, mask):
if mask == 0:
print("0")
return
conn, addr = sock.accept()
print("Connected from", addr)
conn.setblocking(False)
sel.register(conn, selectors.EVENT_READ, read)
def read(conn, mask):
data = conn.recv(1024)
if data:
print("Echoing", repr(data), "to", conn.getpeername())
conn.sendall(data)
else:
print("Closing connection from", conn.getpeername())
sel.unregister(conn)
conn.close()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('192.168.1.17', 8485))
s.listen(100)
s.setblocking(False)
sel.register(s, selectors.EVENT_READ, accept)
print("Listening on port 8485")
while True:
events = sel.select(timeout=0)
for key, mask in events:
callback = key.data
callback(key.fileobj, mask) | true |
227a3a1d4015a20659dfc9881037d95b9efa0e28 | Python | daniel-reich/ubiquitous-fiesta | /GZ5gCe5jnbNRWqc5J_17.py | UTF-8 | 349 | 3.859375 | 4 | [] | no_license |
def first_tuesday_of_the_month(year, month):
from datetime import date as d
day = 1
while d(year, month, day).weekday() != 1:
day += 1
year, month, day = [str(item) for item in [year, month, day]]
while len(month) < 2:
month = '0' + month
while len(day) < 2:
day = '0' + day
return '-'.join([year, month, day])
| true |