blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
02ede3948313deb143daccadb98e4320e0753621 | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2339/60606/241120.py | UTF-8 | 297 | 3.15625 | 3 | [] | no_license | test_num = int(input())
for i in range(test_num):
sum = 0
n = int(input())
array = input().split(" ")
array = [int(x) for x in array]
for j in range(len(array)):
for k in range(j+1,len(array)):
if array[j] > array[k]:
sum += 1
print(sum) | true |
4fe4c92e33d43030ac5a7405992a27d8af37b52a | Python | Ryctorius/Curso-de-Python | /PycharmProjects/CursoExercicios/ex022.py | UTF-8 | 264 | 3.765625 | 4 | [] | no_license | NOME =str(input('Digite seu nome completo:'))
print(NOME.upper())
print(NOME.lower())
Q = len(NOME)
T = NOME.count(' ')
print('Há um total de {} letras no seu nome completo'.format(Q-T))
F = NOME.split()
print('Seu primeiro nome tem {} letras'.format(len(F[0])))
| true |
dd29526a9dae1264e000b645d0399f67e32d8c6e | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2899/60640/247074.py | UTF-8 | 353 | 3.546875 | 4 | [] | no_license | def is_power(n):
if n < 4:
if n == 1:
return 1
else:
return 0
else:
if n % 4 != 0:
return 0
else:
n = n // 4
return is_power(n)
inp = int(input())
if inp < 0:
inp = -inp
res = is_power(inp)
if res == 1:
print("true")
else:
print("false")
| true |
611d7ce6fdd46276ed6158ed826755e969b5db1b | Python | shrikumaran/Transfi-NITT | /pyserial.py | UTF-8 | 273 | 3.140625 | 3 | [] | no_license |
from time import sleep
import serial
ser = serial.Serial('/dev/ttyACM0') # open serial port
num1 = '1'
num2 = '0'
num3 = '1'
num4 = '0'
#time.sleep(2)
while True:
s = str(num1) + '\t' + str(num2) + '\t' + str(num3) + '\t' + str(num4) + '\n'
ser.write(s.encode()) | true |
a411d050ca97c139ba64a19cef69ed997e9d306a | Python | tjmlandi/CS-Coursework | /Analysis of Algorithms/a4q2.py | UTF-8 | 962 | 3.75 | 4 | [] | no_license | class Solution(object):
def exploreMatrixWithPits(self, matrix):
#Check if the upper left corner is a pit, if so, return 0
if matrix[0][0] == 1:
return 0
else:
#Otherwise, initialize it to 1
matrix[0][0] = 1
m = len(matrix)
n = len(matrix[0])
#Loop through the matrix, from left to right and top to bottom
for i in range(m):
for j in range(n):
if j != 0 or i != 0:
#If the current location is a pit, set it to have 0 paths to it
if matrix[i][j] == 1:
matrix[i][j] = 0
#Otherwise, get the paths to the locations above and to the left
#and set the value for the current path to their sum
else:
if i == 0:
above = 0
else:
above = matrix[i - 1][j]
if j == 0:
left = 0
else:
left = matrix[i][j - 1]
matrix[i][j] = above + left
#Return the value in the lower right corner of the matrix
return matrix[m - 1][n - 1] | true |
4033adfda2424b5aed7f9c8bac90231de5e3f2c3 | Python | LadislavVasina1/PythonStudy | /ProgramFlow/ranges.py | UTF-8 | 122 | 3.546875 | 4 | [] | no_license | for i in range(1, 21):
print(f"i is now {i}")
print("*" * 50)
for i in range(0, 21, 2):
print(f"i is now {i}")
| true |
3176b2476b7cf70c8a9d195117cb0876f6b01d9c | Python | dv3/ai_algos | /naive_bayes.py | UTF-8 | 24,135 | 2.84375 | 3 | [] | no_license | #histograms, gaussians, or mixtures
import sys,math
numofclasses = 0
###################
# This is all histograms
###################
def histogramming(someExample,attributes,binnes):
classData,totalRows = classCounts(someExample)
#print 'attributes',attributes
baapAtributes=[]
for col in attributes:
allBinsCurrentCol = binningofaColumn(someExample, col,binnes)
#print '\nallBinsCurrentCol',allBinsCurrentCol
anAttributesBinProbabilities={}
anAttributesBinProbabilities = ClassProbabilityofBins(allBinsCurrentCol,classData)
baapAtributes.append(anAttributesBinProbabilities)
#print '\n anAttributesBinProbabilities',anAttributesBinProbabilities
print '\n Output of Training phase'
printTrainingPhase(baapAtributes)
#print '\nbaapAtributes',baapAtributes
return baapAtributes
def printTrainingPhase(trainingOutput):
global numofclasses
for c in range(0,numofclasses ):
for atr in range(0,len(trainingOutput)):
aColumn=trainingOutput[atr] #first index in list is first attribute
for eachBin in aColumn:
#print 'eachBin',eachBin
actualDict={}
actualDict=aColumn[eachBin]
#print 'actualDict',actualDict
temp=actualDict.values()
probDistrib = temp[0]
print 'Class=',c,' attribute=',atr, ' bin=',eachBin,' P(bin|class)=',probDistrib[c]
#calculates each class probability for a bin
def ClassProbabilityofBins(binsOfaColumn,allClassData):
#global numofclasses
#distributionArray=[0] * (numofclasses+1)
# creating a bigger array. looking at unknown future
#binClasses = {}
count = 0
for aBin in binsOfaColumn:
global numofclasses
distributionArray=[0] * (numofclasses+1)
actualDataDict={}
actualDataDict=binsOfaColumn[aBin]
keyss = actualDataDict.keys()
tupleKey = keyss[0]
rowsinListForm=actualDataDict.values()
rowsofthisBin=rowsinListForm[0]
classDataOfaBin,totalRowsInaBin = classCounts(rowsofthisBin)
#print 'classDataOfaBin',classDataOfaBin
for k in classDataOfaBin:
itsCountInaBin=classDataOfaBin[k]
classTotal = allClassData[k]
#print 'itsCountInaBin',itsCountInaBin,' classTotal',classTotal
divides= itsCountInaBin/float(classTotal)
#print 'divides',divides
#divide by all classes data rows
distributionArray[k]=divides
temp={tupleKey: distributionArray }
binsOfaColumn[count] = temp
#binClasses[count]=distributionArray
count+=1
#print 'binsOfaColumn',binsOfaColumn
return binsOfaColumn
#creates bin out of a column
#binGroups {0: {(10.0, 27.5): [[10, 90, 8], [20, 80, 2]]}, 1: {(27.5, 45.0): [[30, 70, 1], [40, 60, 4]]} }
def binningofaColumn(someExample,someColumn,bins):
localExample=someExample
currentCol=select_column(localExample,someColumn)
#print 'currentCol',currentCol
numb=max(currentCol)
bigValue=math.ceil(numb)
numb=min(currentCol)
smallValue=math.floor(numb)
jee=(bigValue+smallValue)/float(bins)
#print 'bigValue',bigValue,'smallValue',smallValue,'jee',jee
binGroups={}
for bin in range(0,bins):
eachBin={}
binList=[]
low=(smallValue+(jee*bin ))
high=(smallValue+(jee*(bin+1) ) )
lisst=[low,high]
keyss=tuple(lisst)
#print 'bin',bin,'low',low,'high',high
for row in someExample:
colValue=row[someColumn]
#print 'colValue',colValue
if (colValue >= low and colValue < high) :
#print 'colValue',colValue,'row',row
binList.append(row)
#binGroups[bin]=binList
eachBin[keyss]=binList
binGroups[bin]=eachBin
#print 'binGroups',binGroups
return binGroups
def classificationUsingHistogram( answer1 , probOfAllClasses , testExamples , testAttributes ):
allacuracy=[]
rowNumber=0
for row in testExamples:
#print 'row',row
#finalDistibutionOfaRow=[]
attrDistri=[]
for i in range(0,len(row)-1): #dont touch class
aColumn={}
aColumn=answer1[i] #first index in list is first attribute
for eachBin in aColumn:
actualDict={}
actualDict=aColumn[eachBin]
#print 'actualDict',actualDict
temp=actualDict.keys()
binRanges=temp[0]
low=binRanges[0]
high=binRanges[1]
#print 'high',high,'low',low,'row[i]',row[i]
if (row[i] >= low and row[i] < high) :
temp=actualDict.values()
probDistrib = temp[0]
attrDistri.append(probDistrib )
#attrDistri=[row[i]*h for h in probDistrib]
#finalDistibutionOfaRow.append(attrDistri)
#print 'finalDistibutionOfaRow',finalDistibutionOfaRow
global numofclasses
decidingArrayProb=[]
for classs in range(0,numofclasses):
currentClass=select_column(attrDistri,classs)
#print 'currentClass',currentClass
xGivenClass=reduce(lambda x, y: x*y, currentClass)
PofClass=probOfAllClasses[classs]
#print 'PofClass',PofClass,'xGivenClass',xGivenClass
finalProbOfaClass=xGivenClass*PofClass
decidingArrayProb.append(finalProbOfaClass)
#print 'decidingArrayProb',decidingArrayProb
#maxProb=max(decidingArrayProb)
#predictedClass=decidingArrayProb.index(maxProb)
probabilityDistributionDict={}
for i in range(0,len(decidingArrayProb) ):
probabilityDistributionDict[i]=decidingArrayProb[i]
#print 'answer',answer
#maxClass = max([ (answer[i],i) for i in answer])[1]
newd={}
for k,v in probabilityDistributionDict.iteritems():
newd.setdefault(v,[]).append(k)
maxProbability=max(newd)
maxClasses=newd[maxProbability]
trueClass= row[-1]
acuracy=0
#lets match last column element with maxClasses
if len(maxClasses) == 1:
predictedClass=maxClasses[0]
if trueClass == predictedClass: #exact 1 class match
acuracy=1
allacuracy.append(acuracy)
else: #no single match
acuracy=0
allacuracy.append(acuracy)
else:
if trueClass in maxClasses: #match in tied classes
predictedClass=trueClass
numClassTied= len(maxClasses)
acuracy=1/float(numClassTied)
allacuracy.append(acuracy)
else: # no classes match from tied ones
predictedClass=decidingArrayProb[0]
acuracy=0
allacuracy.append(acuracy)
print '\nID=',rowNumber,' predicted=',predictedClass, ' probability=',maxProbability,' true=',trueClass,' accuracy=',acuracy
rowNumber=rowNumber+1
allElements = len(allacuracy)
totalAddition=0
for k in allacuracy:
totalAddition=totalAddition+k
classification_accuracy= totalAddition/float(allElements)
print 'Classification_accuracy:',classification_accuracy
return classification_accuracy
####################
# From here its for gaussian
####################
def printGaussian(cal_gaussi):
for key,value in cal_gaussi.iteritems():
i=0
for attr in value:
mean=attr[0]
std=attr[1]
print 'Class=',key,' attribute=',i, ' mean=',mean,' std=',std
i=i+1
def calculateGaussianProbability( trainingExamples , testExamples, trainingAttribute ,prob_class):
tempClassDistribution=findingClasswiseRows(trainingExamples)
cal_gaussian=calculateSum(tempClassDistribution, trainingAttribute )
print 'Training Phase Output:'
printGaussian(cal_gaussian)
print 'Testing Phase starts'
length_val = len(cal_gaussian[1])
#print 'length_val',length_val
gaussian=1
predicated_class=[]
allacuracy=[]
rowNumber=0
for row in testExamples:
prob_rowgivenclass=[]
for key,value in cal_gaussian.iteritems():
#print 'value',value
gaussian=1
for i in range(0,length_val):
ourNumbers = value[i]
mean = ourNumbers[0]
std = ourNumbers[1]
#print 'row[i]',i,'mean,std',row[i],mean,std
gaussian = gaussian*calculateGaussian(row[i],mean,std)
#print 'gaussian',gaussian
prob_rowgivenclass.append(gaussian*prob_class[key])
#max_inter = max(prob_rowgivenclass)
#predicated_class.append(prob_rowgivenclass.index(max_inter)))
#print "predicated_class", predicated_class
#print 'prob_rowgivenclass',prob_rowgivenclass
probabilityDistributionDict={}
for i in range(0,len(prob_rowgivenclass) ):
probabilityDistributionDict[i]=prob_rowgivenclass[i]
#print 'answer',answer
#maxClass = max([ (answer[i],i) for i in answer])[1]
newd={}
for k,v in probabilityDistributionDict.iteritems():
newd.setdefault(v,[]).append(k)
maxProbability=max(newd)
maxClasses=newd[maxProbability]
trueClass= row[-1]
acuracy=0
#lets match last column element with maxClasses
if len(maxClasses) == 1:
predictedClass=maxClasses[0]
if trueClass == predictedClass: #exact 1 class match
acuracy=1
allacuracy.append(acuracy)
else: #no single match
acuracy=0
allacuracy.append(acuracy)
else:
if trueClass in maxClasses: #match in tied classes
predictedClass=trueClass
numClassTied= len(maxClasses)
acuracy=1/float(numClassTied)
allacuracy.append(acuracy)
else: # no classes match from tied ones
predictedClass=prob_rowgivenclass[0]
acuracy=0
allacuracy.append(acuracy)
print 'ID=',rowNumber,' predicted=',predictedClass, ' probability=',maxProbability,' true=',trueClass,' accuracy=',acuracy
rowNumber=rowNumber+1
allElements = len(allacuracy)
totalAddition=0
for k in allacuracy:
totalAddition=totalAddition+k
classification_accuracy= totalAddition/float(allElements)
print 'Classification_accuracy:',classification_accuracy
return classification_accuracy
def findingClasswiseRows(someExample):
myclasses, rows = classCounts(someExample)
keys = myclasses.keys()
class_distribution={}
for key in keys:
temp=[]
for row in someExample:
if(key == row[-1]):
temp.append(row)
class_distribution[key]=temp
return class_distribution
def calculateSum(class_distribution, trainingAttributes):
cal_gaussian = {}
for key, value in class_distribution.iteritems():
attribute_values=[]
for attrib in trainingAttributes:
column = select_column(value,attrib)
#if key == 4:
#print 'attrib',attrib, ' value',value
#print '##############3'
avg,std = calculateStandardDeviation(column)
attribute_values.append([avg,std])
#print 'attribute_values',attribute_values
cal_gaussian[key] = attribute_values
#print '\ncal_gaussian',cal_gaussian
return cal_gaussian
def calculateAverage(colum):
return sum(colum)/float(len(colum))
def calculateStandardDeviation(colum):
avg=calculateAverage(colum)
variance = sum([pow(x-avg,2) for x in colum])/float(len(colum)-1)
standardDeviation=math.sqrt(variance)
return avg,standardDeviation
def calculateGaussian(x, mean, stdev):
#print 'x, mean, stdev',x, mean, stdev
if stdev == 0 :
return 0
exponent = math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2))))
return (1 / (math.sqrt(2*math.pi) * stdev)) * exponent
####################
# this part is for mixture of gaussians
# ##################
def printMixGaussian(someDict):
for key,value in someDict.iteritems():
i=0
for attr in value:
mean=attr[0]
std=attr[1]
print 'Class=',key,' attribute=',i, ' mean=',mean,' std=',std
i=i+1
def gaussianTesting( trainingExamples, testExamples,probOfAllClasses,number,trainingAttributes):
allacuracy=[]
rowNumber=0
baapDict=mixtureOfGaussians(trainingExamples,number,trainingAttributes)
print 'Output of training phase:'
printMixGaussian(baapDict)
print 'Testing phase starts'
for row in testExamples:
probArray=[]
distri=[]
for key, value in baapDict.iteritems():
for i in range(0,len(row)-1):
for val in value:
attribGivenClass = val[3]*calculateGaussian(row[i],val[0],val[1])
probArray.append(finalGauss)
xGivenClass = reduce(lambda x, y: x*y, probArray)
probClass = xGivenClass* probOfAllClasses[key]
distri.append(probClass)
#print 'distri',distri
probabilityDistributionDict={}
for i in range(0,len(distri) ):
probabilityDistributionDict[i]=distri[i]
#print 'answer',answer
#maxClass = max([ (answer[i],i) for i in answer])[1]
newd={}
for k,v in probabilityDistributionDict.iteritems():
newd.setdefault(v,[]).append(k)
maxProbability=max(newd)
maxClasses=newd[maxProbability]
trueClass= row[-1]
acuracy=0
#lets match last column element with maxClasses
if len(maxClasses) == 1:
predictedClass=maxClasses[0]
if trueClass == predictedClass: #exact 1 class match
acuracy=1
allacuracy.append(acuracy)
else: #no single match
acuracy=0
allacuracy.append(acuracy)
else:
if trueClass in maxClasses: #match in tied classes
predictedClass=trueClass
numClassTied= len(maxClasses)
acuracy=1/float(numClassTied)
allacuracy.append(acuracy)
else: # no classes match from tied ones
predictedClass=distri[0]
acuracy=0
allacuracy.append(acuracy)
print 'ID=',rowNumber,' predicted=',predictedClass, ' probability=',maxProbability,' true=',trueClass,' accuracy=',acuracy
rowNumber=rowNumber+1
allElements = len(allacuracy)
totalAddition=0
for k in allacuracy:
totalAddition=totalAddition+k
classification_accuracy= totalAddition/float(allElements)
print 'Classification_accuracy:',classification_accuracy
return classification_accuracy
#First is same, find classwiserows
def mixtureOfGaussians(someExample,number,trainingAttributes):
class_distribution=findingClasswiseRows(someExample)
std_dev = 1
w = 1/float(number)
dick={}
for key, value in class_distribution.iteritems():
attriList=[]
for attrib in trainingAttributes:
columnVals = select_column(value,attrib)
l = max(columnVals)
s = min(columnVals)
g= (l-s)/float(number)
gaussian=[]
for i in range(0,number):
mi = s + (i*g) +(g/2)
gaussianVal=[]
#e_step(columnVals, mi, std_dev, w)
#perform em algo here and get real values to store
gaussianVal.append(mi)
gaussianVal.append(std_dev)
gaussianVal.append(w)
gaussian.append(gaussianVal)
attriList.append(gaussian)
dick[key]=attriList
baapData=em_algorithm(class_distribution,dick,trainingAttributes)
return baapData
def em_algorithm(class_distribution,dick,trainingAttributes):
#print 'em_algorithm'
bigData={}
for key, value in class_distribution.iteritems():
for classes, gaussians in dick.iteritems():
attriList=[]
temp=[]
for attrib in trainingAttributes:
columnVals = select_column(value,attrib)
bigGaussianColumn=gaussians[attrib]
answer=emAlgo(columnVals, bigGaussianColumn)
temp.append(answer)
bigData[key]=temp
return bigData
def emAlgo(columnVals, bigGaussianColumn):
#print 'emAlgo'
flag=1
answer2=None
for i in range(0,2):
#print '###########i',i
if flag ==1:
answer1= e_step(columnVals, bigGaussianColumn)
else:
answer1= e_step(columnVals, answer2)
answer2=m_step(columnVals, answer1)
#print '### answer2 ###',answer2
flag=0
return answer2
def e_step(columnVals, values):
#print 'e_step values'
i=0
estep=[]
for colVal in columnVals:
weightedNarray=[]
pijArray=[]
if i == 2:
break
for value in values:
nixj = calculateGaussian(colVal,value[0],value[1])
weightedN = nixj*value[2]
weightedNarray.append(weightedN)
#print 'weightedNarray',weightedNarray
pxj = sum(weightedNarray)
#print 'pxj',pxj
for i in weightedNarray:
pij = i/float(pxj)
pijArray.append(pij)
estep.append( pijArray)
i+=1
#print 'end of estep',estep
return estep
def m_step(columnVals, estep):
#print 'm_step'
i=0
weights=0
for r in estep:
for c in r:
weights=weights+c
bigGaussian=[]
for value in estep:
i=0
for colVal in columnVals:
i+=1
numer=denomi=0
gaussian=[]
for jj in value:
numer = jj*colVal
denomi=denomi+jj
newMean=numer/float(denomi)
variance= (math.pow(colVal-newMean,2)) / float(denomi)
std=math.sqrt(variance)
gaussian.append(newMean)
gaussian.append( std )
gaussian.append( denomi/weights )
bigGaussian.append(gaussian)
if i ==1:
break
#print 'end of mstep',bigGaussian
return bigGaussian
####################
# common functions for all
####################
def allClassProbability(someExample):
classData,totalRows = classCounts(someExample)
classProbabilityDistribution={}
for k in classData:
itsCount=classData[k]
divides= itsCount/float(totalRows)
classProbabilityDistribution[k]=divides
#print 'classProbabilityDistribution',classProbabilityDistribution
return classProbabilityDistribution,totalRows
#returns classData
def classCounts(someExample):
classData={}
totalRows=0
for row in someExample:
totalRows=totalRows+1
currentClass=row[-1]
n=classData.get(currentClass,False)
if n: #class example came up again
n=n+1
classData[currentClass]=n
else: #class appeared first time
classData[currentClass]=1
#number of examples of each class
#print 'classData',classData
#print 'totalRows',totalRows
return classData,totalRows
#return array containing one column
def select_column(someExample, anAttribute):
myColumn=[]
for row in someExample:
myColumn.append(row[anAttribute])
#print 'myColumn',myColumn
return myColumn
def readFileReturnDataset(some_file,whole_dataset,classList):
fout = open(some_file, 'r')
try:
for line in fout:
stringlist = line.split()
manyColumns = [int(x) for x in stringlist]
numberOfAttributes=len(manyColumns) - 1
classList.add(manyColumns[-1])
whole_dataset.append( manyColumns )
finally:
fout.close()
return whole_dataset,numberOfAttributes,classList
def main(argv):
# Make sure we have enough command-line arguments
if len(argv) > 5 or len(argv) < 4:
print 'Atleast 4 command-line arguments are needed:'
print('Usage: %s [training_file] [test_file] [option] [*number]' % argv[0])
sys.exit(2)
print 'Command line arguments:',argv
training_file,test_file,bayesOptions = argv[1:4]
listofOptions=['histograms','gaussians','mixtures']
howMany=0
if not bayesOptions in listofOptions:
print('%s is an unrecognized options.Please select from:%s' % (bayesOptions,listofOptions) )
sys.exit(2)
if bayesOptions in ['histograms','mixtures']:
if not(len(argv) == 5):
print('Last argument for the number of histogram or mixture not given.Exiting..' )
sys.exit(2)
else:
howMany=int(argv[4])
print 'Fourth argument:',howMany
#read training sets
#print read training set data
trainingExamples=[]
trainingAttributes=[]
trainingClassList=set()
trainingExamples,numOfAttributes,trainingClassList=readFileReturnDataset(training_file,trainingExamples,trainingClassList)
for i in range(0,numOfAttributes):
trainingAttributes.append(i)
print 'trainingClassList',trainingClassList
global numofclasses
numofclasses = max(trainingClassList)
defaultdistributionArray=[0] * (numofclasses+1)
#lets read test file too
testExamples=[]
testAttributes=[]
testClassList=set()
testExamples,numOfAttributes,testClassList=readFileReturnDataset(test_file,testExamples,testClassList)
for i in range(0,numOfAttributes):
testAttributes.append(i)
#print '\ntestAttributes:',testAttributes
#print 'testClassList:',testClassList
probOfAllClasses,totallyRows=allClassProbability(trainingExamples)
#print 'probOfAllClasses',probOfAllClasses
if bayesOptions == 'histograms':
answer1 = histogramming(trainingExamples,trainingAttributes,howMany)
print '\nLets start classification:'
finalAccuracy=classificationUsingHistogram( answer1 , probOfAllClasses , testExamples , testAttributes )
print 'Final Accuracy of training set over histograms of bins ', howMany, ' is:' ,finalAccuracy
elif bayesOptions == 'gaussians':
finalAccuracy=calculateGaussianProbability( trainingExamples, testExamples, trainingAttributes,probOfAllClasses)
print 'Final Accuracy of training set over Gaussians is:' ,finalAccuracy
elif bayesOptions == 'mixtures':
finalAccuracy=gaussianTesting( trainingExamples, testExamples,probOfAllClasses,howMany,trainingAttributes)
print 'Final Accuracy of training set over mixture of ', howMany, ' Gaussians is:',finalAccuracy
else:
print('%s is an unrecognized tree options.Please select from:%s' % (bayesOptions,listofOptions) )
sys.exit(2)
#finalAccuracy=classification(testExamples,totalTrees)
#print 'final Accuracy of training set over ', treeCounts , ' trees= ',finalAccuracy
print 'Finished'
if __name__ == '__main__':
main(sys.argv) | true |
615bf4aee0b9e08695fa7bb1098953a6dd5c79cb | Python | mbilab/ML-tutorial | /unit/data_preprocessing/.prepared/ex1_np.py | UTF-8 | 278 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env python3
import numpy as np
data_matrix = np.loadtxt('../ex1.csv', delimiter = ',')
label, other = np.hsplit(data_matrix, [1])
label = np.reshape(label, [-1]).astype(int)
one_hot = np.eye(4)[label]
data_matrix = np.hstack([one_hot, other])
print(data_matrix)
| true |
9c59efbf2a59e62aaefb17606211473dee9ebc4b | Python | Yorwxue/PytorchMnist | /train.py | UTF-8 | 2,181 | 2.640625 | 3 | [] | no_license | import os
import torch
from tqdm import tqdm
from model_architecture import mnist_model
from dataset import mnist_dataset
if __name__ == "__main__":
model_dir = "weights/mnist/"
model_name = "mnist_model"
display_freq = 100
num_epoch = 5
if not os.path.exists(model_dir):
os.makedirs(model_dir)
model_path = os.path.join(model_dir, model_name)
dataset = mnist_dataset(training=True)
dataloader = torch.utils.data.DataLoader(dataset=dataset.train_data, batch_size=64, shuffle=True)
net = mnist_model()
cost = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters())
print(net)
use_cuda = True
device = torch.device("cuda" if use_cuda else "cpu")
net.to(device)
for epoch_idx in range(num_epoch):
print("EPOCH %d" % (epoch_idx+1))
batch_loss = 0.0
batch_correct = 0.0
epoch_loss = 0.0
epoch_correct = 0
for batch_idx, data in enumerate(tqdm(dataloader), 1):
x = data[0] # data["input"]
y = data[1] # data["label"]
try:
x, y = x.to(device, dtype=torch.float32), y.to(device)
optimizer.zero_grad()
output = net(x)
pred = output.max(1, keepdim=True)[1]
loss = torch.nn.CrossEntropyLoss()(output, y)
loss.backward()
optimizer.step()
batch_loss += loss.item()
batch_correct += pred.eq(y.view_as(pred)).sum().item()
epoch_loss += loss.item()
epoch_correct += pred.eq(y.view_as(pred)).sum().item()
if batch_idx % display_freq == 0:
print("Batch %d, Training Loss: %.4f, Training ACC: %.4f" % (
batch_idx,
batch_loss / (dataloader.batch_size * display_freq),
100 * batch_correct / (dataloader.batch_size * display_freq)))
batch_loss = 0.0
batch_correct = 0.0
except Exception as e:
print(e)
pass
torch.save(net, model_path)
| true |
cf5309fe6d61397b889dd1c7adc73d20fba93dc9 | Python | dbehrlich/KerasCog | /ID_removed_Fixation.py | UTF-8 | 10,326 | 2.5625 | 3 | [
"MIT"
] | permissive | import numpy as np
from keras.layers.core import Dense
from keras.layers.recurrent import Recurrent, time_distributed_dense
from keras import backend as K
from keras import activations, initializations, regularizers
from keras.models import Model
from keras.layers import Input
from keras.optimizers import Adam
from keras.layers.wrappers import TimeDistributed
from keras.engine.topology import Layer, InputSpec
from keras.callbacks import ModelCheckpoint
#from matplotlib import pyplot as plt
def rnn_1(weights_path = None, nb_neurons = 100):
inputs = Input(shape=(5000, 2))
#note: want to add noise before relu in recurrent connection, do this by tweaking SimpleRNN
rnn = myRNN(return_sequences = True, output_dim = nb_neurons, activation='relu', consume_less = 'mem', unroll=False)(inputs)
outputs = TimeDistributed(Dense(2, activation = 'linear'))(rnn)
model = Model(input=inputs, output=outputs)
if weights_path:
model.load_weights(weights_path)
adam = Adam(lr=.0001, clipnorm = 1)
model.compile(optimizer=adam, loss = 'binary_crossentropy', metrics=['binary_crossentropy'], sample_weight_mode='temporal')
return model
def create_input_output_pair(first_fire_neuron, second_fire_neuron, delay, length):
lo = .2
hi = 1.0
X = np.zeros((length, 2))
y = np.zeros((length, 2))
X[:500, :] = lo
X[500:1000, first_fire_neuron] = hi
X[500:1000, 1 - first_fire_neuron] = lo
X[1000:1000+delay, :] = lo
X[1000+delay:1500+delay, second_fire_neuron] = hi
X[1000+delay:1500+delay, 1 - second_fire_neuron] = lo
X[1500+delay:, :] = lo
noise = np.random.normal(scale = .1, size = X.shape)
X = X + noise
y[500:1000, :] = lo
y[1000+delay:1500+delay, :] = lo
if first_fire_neuron == second_fire_neuron:
y[1500+delay:, 0] = hi
y[1500+delay:, 1] = lo
else:
y[1500+delay:, 0] = lo
y[1500+delay:, 1] = hi
return X, y
def generate_input_batch(batch_size, delay, length):
#this masks the cost function
sample_weights = np.zeros((batch_size, length))
non_zero = range(1500+delay, length)
sample_weights[:, non_zero] = np.ones((batch_size, (length - 1500 - delay)))
while True:
X = np.zeros((batch_size, length, 2))
y = np.zeros((batch_size, length, 2))
for i in range(batch_size):
if i % 4 == 0:
X[i,:,:], y[i,:,:] = create_input_output_pair(0,0,delay, length)
elif i % 4 == 1:
X[i,:,:], y[i,:,:] = create_input_output_pair(0,1,delay, length)
elif i % 4 == 2:
X[i,:,:], y[i,:,:] = create_input_output_pair(1,0,delay, length)
else:
X[i,:,:], y[i,:,:] = create_input_output_pair(1,1,delay, length)
yield X,y,sample_weights
def train_rnn_1():
model = rnn_1()
checkpoint = ModelCheckpoint('rnn_weights_{epoch:02d}_{val_loss:.2f}.h5')
model.fit_generator(generate_input_batch(20, 2000, 5000), samples_per_epoch=1000, nb_epoch = 20,
validation_data = generate_input_batch(20, 2000, 5000), nb_val_samples = 200, callbacks=[checkpoint])
return
def visualize_rnn_1():
X,y = create_input_output_pair(0, 1, 2000, 5000);
plt.plot(range(5000), X[:,0], 'y', range(5000), X[:,1], 'r',range(5000), y[:,0], 'b',range(5000), y[:,1], 'g')
plt.show()
plt.plot(range(5000), y[:,0], 'b',range(5000), y[:,1], 'g')
plt.show()
print 1
X = np.expand_dims(X, 0)
print 2
model = rnn_1('rnn_weights_00_0.68.h5')
print 3
out = model.predict(X)
print 4
plt.plot(range(5000), out[0,:,0], 'b',range(5000), out[0,:,1], 'g')
plt.show()
return
class myRNN(Recurrent):
'''Fully-connected RNN where the output is to be fed back to input.
# Arguments
output_dim: dimension of the internal projections and the final output.
init: weight initialization function.
Can be the name of an existing function (str),
or a Theano function (see: [initializations](../initializations.md)).
inner_init: initialization function of the inner cells.
activation: activation function.
Can be the name of an existing function (str),
or a Theano function (see: [activations](../activations.md)).
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the input weights matrices.
U_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the recurrent weights matrices.
b_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
dropout_W: float between 0 and 1. Fraction of the input units to drop for input gates.
dropout_U: float between 0 and 1. Fraction of the input units to drop for recurrent connections.
# References
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
'''
def __init__(self, output_dim,
init='glorot_uniform', inner_init='orthogonal',
activation='tanh',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.activation = activations.get(activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(myRNN, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
if self.stateful:
self.reset_states()
else:
# initial states: all-zero tensor of shape (output_dim)
self.states = [K.random_normal(shape=(self.output_dim,), mean=0.,std=0.1)]
input_dim = input_shape[2]
self.input_dim = input_dim
self.W = self.init((input_dim, self.output_dim),
name='{}_W'.format(self.name))
self.U = self.inner_init((self.output_dim, self.output_dim),
name='{}_U'.format(self.name))
self.b = K.zeros((self.output_dim,), name='{}_b'.format(self.name))
self.regularizers = []
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
if self.U_regularizer:
self.U_regularizer.set_param(self.U)
self.regularizers.append(self.U_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
self.trainable_weights = [self.W, self.U, self.b]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def reset_states(self):
assert self.stateful, 'Layer must be stateful.'
input_shape = self.input_spec[0].shape
if not input_shape[0]:
raise Exception('If a RNN is stateful, a complete ' +
'input_shape must be provided (including batch size).')
if hasattr(self, 'states'):
K.set_value(self.states[0],
np.zeros((input_shape[0], self.output_dim)))
else:
self.states = [K.zeros((input_shape[0], self.output_dim))]
def preprocess_input(self, x):
if self.consume_less == 'cpu':
input_shape = self.input_spec[0].shape
input_dim = input_shape[2]
timesteps = input_shape[1]
return time_distributed_dense(x, self.W, self.b, self.dropout_W,
input_dim, self.output_dim,
timesteps)
else:
return x
def step(self, x, states):
prev_output = states[0]
B_U = states[1]
B_W = states[2]
if self.consume_less == 'cpu':
h = x
else:
h = K.dot(x * B_W, self.W) + self.b
#THIS IS THE PART WE CHANGED, CHECK SIGMA
output = self.activation(h + K.dot(prev_output * B_U, self.U) + K.random_normal(shape=K.shape(self.b), mean=0.,std=0.1))
return output, [output]
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.))
return constants
def get_config(self):
config = {'output_dim': self.output_dim,
'init': self.init.__name__,
'inner_init': self.inner_init.__name__,
'activation': self.activation.__name__,
'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
'U_regularizer': self.U_regularizer.get_config() if self.U_regularizer else None,
'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
'dropout_W': self.dropout_W,
'dropout_U': self.dropout_U}
base_config = super(myRNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
train_rnn_1()
| true |
9c42c956310f71589a8fd4930db5c99a290bd711 | Python | srdmdev8/logs-analysis-project | /newsdb.py | UTF-8 | 2,573 | 3.109375 | 3 | [] | no_license | #!/usr/bin/env python
import psycopg2
DBNAME = "news"
def logs_analysis_queries():
"""Pull the 3 most popular articles"""
db = psycopg2.connect(database=DBNAME)
c = db.cursor()
c.execute("""SELECT articles.title, count(log.path)
FROM log
JOIN articles on log.path = '/article/' || articles.slug
GROUP BY articles.title
ORDER BY count(path)
DESC LIMIT 3;""")
data = c.fetchall()
queries = '<p style="font-size: 20px;"><strong>Top 3 Most Popular \
Articles:</strong></p>'
for i in data:
queries += '<li style="list-style-type: none;">' + str(i[0]) + " - " \
+ "{:,}".format(i[1]) + ' views </li>'
c.execute("""SELECT authors.name, COUNT(*) as views
FROM authors
JOIN articles on authors.id = articles.author
JOIN log on log.path = '/article/' || articles.slug
GROUP BY authors.name
ORDER BY views
DESC LIMIT 3;""")
data2 = c.fetchall()
queries += '<p style="font-size: 20px;"><strong>Most Popular Article \
Authors:</strong></p>'
for i in data2:
queries += '<li style="list-style-type: none;">' + str(i[0]) + " - " \
+ str(i[1]) + ' views</li>'
c.execute("""WITH errors as (SELECT date(time) as date, COUNT(*) as
errorDates
FROM log
WHERE log.status != '200 OK'
GROUP BY date
ORDER BY errorDates),
logDates as (SELECT date(time)as date, COUNT(*) as allDates
FROM log
GROUP BY date
ORDER BY allDates)
SELECT errors.date,
(errors.errorDates/logDates.allDates::float*100)::numeric
(7,2) as percent
FROM errors
JOIN logDates on errors.date = logDates.date
WHERE errors.errorDates/logDates.allDates::float > .01;
""")
data3 = c.fetchall()
queries += '<p style="font-size: 18px;"><p style="font-size: 20px;"> \
<strong>Days Where More Than 1% of Requests Lead to Errors: \
</strong></p>'
for i in data3:
queries += '<li style="list-style-type: none;">' + str(i[0]) + " - " \
+ str(i[1]) + '% of requests lead to errors</li>'
db.close()
return queries
logs_analysis_queries()
| true |
779beb521ef96dbc08d457f9194e70944b595e6b | Python | connor-makowski/AnagramSolver | /anagram.py | UTF-8 | 1,499 | 3.5 | 4 | [] | no_license | import json
dictionarylocation=r'.\words.json'
with open(dictionarylocation, 'r') as f:
dictionary = json.load(f)
def find(input):
letters=[]
for i in input:
letters.append(i)
consider=[]
found=[]
for i in list(set(letters)):
for j in dictionary[i]:
consider.append(j)
letters.append("")
for i in consider:
ileft=i
for j in letters:
if len(ileft)==0:
found.append(i)
break
if j in i:
ileft=ileft.replace(j,"",1)
return sorted(found, key=lambda x: (-len(x), x))
tryagain=True
while tryagain:
givenletters=''
while givenletters=='':
try:
print ('What are your letters?')
givenletters=str(input('')).lower()
except:
print ('Just enter your letters.')
print ('Here are your potential words:')
found=find(givenletters)
listdisplay=[]
length=len(found[0])
print ("Words of length", length)
for i in found:
if len(i)<length:
print (listdisplay)
listdisplay=[]
length=len(i)
print ("Words of length", length)
listdisplay.append(i)
if len(listdisplay)==5:
print (listdisplay)
listdisplay=[]
print ('To continue, hit enter. To exit, type quit and hit enter.')
quitthis=str(input())
if quitthis.lower()=='quit' or quitthis.lower()=='exit':
tryagain=False
| true |
0450814a6f666498d585439f52c4335bcfa3980e | Python | Nain-05/PractiseAssignment | /PractiseQ21.py | UTF-8 | 391 | 4.21875 | 4 | [] | no_license | #21. Write a Python program to convert seconds to day, hour, minutes and seconds.
Time = int(input('\nEnter Time in Seconds:\n'))
Days = int(Time / (24*3600))
Time = Time % (24*3600)
Hours = int(Time / 3600)
Time %= 3600
Minutes = int(Time / 60)
Time %= 60
Seconds = int(Time)
print('\n\t\td:h:m:s')
print("\n\t\t" + str(Days) + ":" + str(Hours) + ":" + str(Minutes) + ":" + str(Seconds)) | true |
186ed303c47f1cd37dc233990f686787e2356b25 | Python | bu-cms/monox_fit | /makeWorkspace/utils/jes_utils.py | UTF-8 | 1,380 | 2.53125 | 3 | [] | no_license | # ==============================
# Helper functions regarding JES/JER uncertainties
# ==============================
import ROOT as r
import re
from general import get_nuisance_name
def get_jes_variations(fjes, year, proc='qcd'):
'''Given the JES file, get the list of JES variations.'''
jet_variations = set()
for key in list(fjes.GetListOfKeys()):
if proc not in key.GetName():
continue
# var = re.sub("(.*qcd_|(Up|Down))","",key.GetName())
var = get_nuisance_name(key.GetName(), year)
if '201' in var and (str(year) not in var):
continue
if 'jesTotal' in var:
continue
jet_variations.add(var)
return jet_variations
def get_jes_jer_source_file_for_tf(category):
'''For the given analysis (monojet, mono-V or VBF), get the JES/JER uncertainty source file for transfer factors.'''
f_jes_dict = {
'(monoj|monov).*' : r.TFile("sys/monojet_jes_jer_tf_uncs_jer_smeared_symmetrized.root"),
'vbf.*' : r.TFile("sys/vbf_jes_jer_tf_uncs.root")
}
# Determine the relevant JES/JER source file
f_jes = None
for regex, f in f_jes_dict.items():
if re.match(regex, category):
f_jes = f
if not f_jes:
raise RuntimeError('Could not find a JES source file for category: {}'.format(category))
return f_jes
| true |
3449ecce7d84c6f4f07ea9f90d85ea9b7d15e633 | Python | matiasezequielsilva/repositorio | /python/RedefinirOperadores.py | UTF-8 | 935 | 4.125 | 4 | [] | no_license | class Lista:
def __init__(self, lista):
self.lista=lista
def imprimir(self):
print(self.lista)
def __add__(self,entero):
nueva=[]
for x in range(len(self.lista)):
nueva.append(self.lista[x]+entero)
return nueva
def __sub__(self,entero):
nueva=[]
for x in range(len(self.lista)):
nueva.append(self.lista[x]-entero)
return nueva
def __mul__(self,entero):
nueva=[]
for x in range(len(self.lista)):
nueva.append(self.lista[x]*entero)
return nueva
def __floordiv__(self,entero):
nueva=[]
for x in range(len(self.lista)):
nueva.append(self.lista[x]//entero)
return nueva
# bloque principal
lista1=Lista([305,405,505])
lista1.imprimir()
print(lista1+10)
print(lista1-10)
print(lista1*10)
print(lista1//10) | true |
711f1425bc2bb96ec0e8bf663b1485fd040ed478 | Python | dawidwelna/2017sum_wiet_kol3 | /diaryTest.py | UTF-8 | 2,381 | 2.65625 | 3 | [] | no_license | import diaryprogram as dp
import unittest
diary = dp.Diary()
class testOpener(unittest.TestCase):
def testOpenerRaise(self):
"""Opener should fail given incorrect path to file."""
self.assertRaises(IOError, dp.opener, 'corrupted/path')
class ChooseStudentBadInput(unittest.TestCase):
def testNotInteger(self):
"""choose_student should fail when given non integer value"""
k = 'asd'
self.assertRaises(dp.NotIntegerError, dp.choose_student, diary, k)
def testTooLarge(self):
"""choose_student should fail with large input"""
k = 4
self.assertRaises(dp.OutOfRangeError, dp.choose_student, diary, k)
def testZero(self):
"""choose_student should fail with 0 input"""
k = 0
self.assertRaises(dp.OutOfRangeError, dp.choose_student, diary, k)
def testNegative(self):
"""choose_student should fail with negative input"""
k = -1
self.assertRaises(dp.OutOfRangeError, dp.choose_student, diary, k)
class ChooseSubjectBadInput(unittest.TestCase):
def testNotInteger(self):
"""choose_subject should fail with non-integer input"""
k = 'asd'
self.assertRaises(dp.NotIntegerError, dp.choose_subject, diary, 2, k)
def testTooLarge(self):
"""choose_subject should fail with large input"""
k = 4
self.assertRaises(dp.OutOfRangeError, dp.choose_subject, diary, 2, k)
def testZero(self):
"""choose_subject should fail with 0 input"""
k = 0
self.assertRaises(dp.OutOfRangeError, dp.choose_subject, diary, 2, k)
def testNegative(self):
"""choose_subject should fail with negative input"""
k = -1
self.assertRaises(dp.OutOfRangeError, dp.choose_subject, diary, 2, k)
class GradeStudentBadInput(unittest.TestCase):
def testNotInteger(self):
"""grade_student should fail when given non integer value"""
k = 'asd'
self.assertRaises(dp.NotIntegerError, dp.grade_student, diary, k)
def testTooLarge(self):
"""grade_student should fail with large input"""
k = 6
self.assertRaises(dp.OutOfRangeError, dp.grade_student, diary, k)
def testZero(self):
"""grade_student should fail with too small input"""
k = 1
self.assertRaises(dp.OutOfRangeError, dp.grade_student, diary, k)
def testNegative(self):
"""grade_student should fail with negative input"""
k = -1
self.assertRaises(dp.OutOfRangeError, dp.grade_student, diary, k)
if __name__ == "__main__":
unittest.main()
| true |
b708a7b6d506e9ed8c5766803726ff9fd7abba20 | Python | Alex-zhai/learn_practise | /tf_learn/mnist_nn.py | UTF-8 | 2,133 | 2.84375 | 3 | [] | no_license | import tensorflow as tf
import random
from tensorflow.examples.tutorials.mnist import input_data
batch_size = 128
learning_rate = 0.001
epoches = 50
mnist = input_data.read_data_sets("mnist_data/", one_hot=True)
# set placeholder
x = tf.placeholder(tf.float32, [None, 28*28])
y = tf.placeholder(tf.float32, [None, 10])
# set weight and bias
w1 = tf.Variable(tf.random_normal([28*28, 256]))
b1 = tf.Variable(tf.random_normal([256]))
w2 = tf.Variable(tf.random_normal([256, 128]))
b2 = tf.Variable(tf.random_normal([128]))
w3 = tf.Variable(tf.random_normal([128, 10]))
b3 = tf.Variable(tf.random_normal([10]))
def model(input_x):
layer1 = tf.nn.relu(tf.matmul(input_x, w1) + b1)
layer2 = tf.nn.relu(tf.matmul(layer1, w2) + b2)
model_output = tf.matmul(layer2, w3) + b3
return model_output
logits = model(x)
#set loss and train_step
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(epoches):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_x, batch_y = mnist.train.next_batch(batch_size)
_, tmp_loss = sess.run([train_step, loss], feed_dict={x: batch_x, y:batch_y})
avg_cost += tmp_loss / total_batch
print("Epoch:", "%4d" %(epoch + 1), "cost = ", "{:.9f}".format(avg_cost))
print("learning finished!!!")
# test model
correct_acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(logits, 1)), tf.float32))
print("Accuracy:", sess.run(correct_acc, feed_dict={x: mnist.test.images, y: mnist.test.labels}))
r_index = random.randint(0, mnist.test.num_examples - 1)
print("true label:", sess.run(tf.argmax(mnist.test.labels[r_index, r_index+1], 1)))
print("pred label:", sess.run(tf.argmax(logits, 1), feed_dict={x: mnist.test.images[r_index:r_index+1],
y: mnist.test.labels[r_index:r_index+1]})) | true |
fd9c6f67f115058c91cb279a2139a58578a34369 | Python | Darkwing42/home_app | /todo/models.py | UTF-8 | 2,475 | 2.578125 | 3 | [
"MIT"
] | permissive | from app import db
from datetime import datetime
from sqlalchemy.dialects.postgresql import UUID
import uuid
from app.utils.uuid_converter import str2uuid
from user.models import User
class Task(db.Model):
__tablename__ = 'tasks'
id = db.Column(UUID(as_uuid=True), default=lambda: uuid.uuid4(), unique=True)
taskID = db.Column(db.Integer, primary_key=True)
task_name = db.Column(db.String(200))
todoList_id = db.Column(db.Integer, db.ForeignKey('todoLists.todoListID'), nullable=False)
task_done = db.Column(db.Boolean, default=False)
def __init__(self, task_name, task_done):
self.task_done = task_done
self.task_name = task_name
def to_dict(self):
return dict(
id=str(self.id),
task_name=self.task_name,
task_done=self.task_done
)
def save(self):
db.session.add(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
@classmethod
def get_by_id(cls, list_id):
return cls.query.filter_by(id=(str2uuid(list_id))).first()
class TodoList(db.Model):
__tablename__ = 'todoLists'
id = db.Column(UUID(as_uuid=True), default=lambda: uuid.uuid4(), unique=True)
todoListID = db.Column(db.Integer, primary_key=True)
todoList_name = db.Column(db.String(200))
todoList_done = db.Column(db.Boolean, default=False)
tasks = db.relationship('Task', backref='todoList', lazy=False)
user_id = db.Column(db.Integer, db.ForeignKey('users.userID'))
def __init__(self,todoList_name, todoList_done, user_id):
self.todoList_name = todoList_name
self.todoList_done = todoList_done
self.user_id = user_id
def to_dict(self):
return dict(
id=str(self.id),
todoList_name=self.todoList_name,
todoList_done=self.todoList_done,
tasks=[ task.to_dict() for task in self.tasks ]
)
def save(self):
db.session.add(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
@classmethod
def get_all(cls):
return cls.query.all()
@classmethod
def get_by_id(cls, id):
return cls.query.filter_by(todoListID=id).first()
@classmethod
def get_all_by_user(cls, user_id):
user = User.find_by_id(user_id)
return cls.query.filter_by(user_id=user.userID).all()
| true |
5b0a1a357500d0d6b742534dc94f997526a500d2 | Python | nlp-tlp/redcoat-annotations-processing | /process_annotations.py | UTF-8 | 822 | 2.59375 | 3 | [] | no_license | import json, csv
INPUT_FILE = "example_annotations.json"
OUTPUT_FILE_JSON = "output.json"
OUTPUT_FILE_CSV = "output.csv"
lines = []
with open(INPUT_FILE, 'r') as f:
for line in f:
lines.append(json.loads(line.strip()))
with open(OUTPUT_FILE_JSON, 'w') as f:
json.dump(lines, f)
with open(OUTPUT_FILE_CSV, 'w', newline='') as f:
writer = csv.writer(f)
max_mentions = max([len(line['mentions']) for line in lines])
writer.writerow(('doc_idx', 'tokens', *['mention_%d' % (i + 1) for i in range(max_mentions)])) # headings
for line in lines:
tokens = line['tokens']
mentions = []
for m in line['mentions']:
start = m['start']
end = m['end']
labels = m['labels']
mentions.append((start, end, " ".join(tokens[start:end]), labels))
writer.writerow((line['doc_idx'], " ".join(tokens), *mentions))
| true |
ea5d0a3189a67a790957dea33bea4d50d738c199 | Python | pf981/project-euler | /060_prime_pair_sets.py | UTF-8 | 2,160 | 3.71875 | 4 | [] | no_license | import collections
import sympy
from sympy.ntheory.primetest import isprime
MAX_PRIMES = 10000
TARGET_PAIRS = 5
def generate_valid_paths(tree):
"""
This generates paths from a depth-first tree traversal such that the path
is TARGET_PAIRS long and every element is adjacent to every other element
"""
for node, _ in tree.items():
# nodes_to_visit is a list of tuples. The first element of the tuple
# is the node to visit. The second is a list representing the path
# taken to get to that node.
nodes_to_visit = [(node, set())]
while nodes_to_visit:
cur_node = nodes_to_visit.pop()
cur_path = cur_node[1] | {cur_node[0]}
for child in tree[cur_node[0]]:
# If the child is not adjacent to every element in the path
if not all(child in tree[path_node] for path_node in cur_path):
continue
# If the child hasn't been visited and the child contains
# every element in the path
if child not in cur_path and tree[child] >= cur_path:
# Prepend the node and the path
nodes_to_visit.insert(0, (child, cur_path))
if len(cur_path) == TARGET_PAIRS:
yield cur_path
def concat_ints(a, b):
return int(str(a) + str(b))
def is_cat_pair(pair):
return isprime(concat_ints(pair[0], pair[1]))
def main():
primes = list(sympy.sieve.primerange(2, MAX_PRIMES))
all_pairs = [(p1, p2)
for p1 in primes
for p2 in primes]
# paired_with maps a prime to a list of primes. This means that all the
# elements in the value can be appended to the key to form a prime
# paired_with[2] = [3, 11, 23] means that 23, 211 and 223 are all primes
paired_with = collections.defaultdict(set)
for p1, p2 in all_pairs:
if is_cat_pair((p1, p2)):
paired_with[p1].add(p2)
best_set = min(generate_valid_paths(paired_with), key=lambda x: sum(x))
answer = sum(best_set)
print(best_set)
print(answer)
if __name__ == '__main__':
main() | true |
c4fab90f2e24b704d7f8b904c224a87a6a86b4ec | Python | ToddDiFronzo/bwcs | /datasets/clean_master_hockey.py | UTF-8 | 790 | 2.96875 | 3 | [] | no_license | import numpy as np
import pandas as pd
import csv
# import matplotlib.pyplot as plt
df = pd.read_csv(r'C:/Users/Todd/Desktop/python_learn/python_data_analytics/buildweek/datasets/Master_hockey.csv')
print(df.head())
print(df.columns)
df1 = (df[['weight', 'height', 'gender', 'sport']].copy())
print(df1.head())
print(df1.shape)
df1['gender'].fillna('Male', inplace =True)
df1['sport'].fillna('Hockey', inplace=True)
print(df1.head())
print(df1.columns)
# df1['id'] = range(1, 1+len(df1))
df1.insert(0, 'id', range(1,1+len(df1)))
print(df1.head(66))
df1.set_index('id', inplace=True)
print(df1.head())
print(df1.isna().sum())
df1 = df1.dropna()
print(df1.shape)
print(df1.isna().sum())
df1 = df1.round(1)
print(df1.head(20))
print(df1.describe())
df1.to_csv('hockey.csv') | true |
0315c76e4c31426f5a48e3772f32791001cfd249 | Python | CTSHEN/sciencedates | /sciencedates/__init__.py | UTF-8 | 7,825 | 3.09375 | 3 | [
"MIT"
] | permissive | from __future__ import division
import datetime
from pytz import UTC
import numpy as np
from dateutil.parser import parse
import calendar
import random
def datetime2yd(T):
"""
Inputs:
T: Numpy 1-D array of datetime.datetime OR string suitable for dateutil.parser.parse
Outputs:
yd: yyyyddd four digit year, 3 digit day of year (INTEGER)
utsec: seconds from midnight utc
"""
T = forceutc(T)
if T is None:
return None,None
T = np.atleast_1d(T)
utsec= np.empty_like(T, float)
yd = np.empty_like(T, int)
for i,t in enumerate(T):
utsec[i] = dt2utsec(t)
yd[i] = t.year*1000 + int(t.strftime('%j'))
return yd.squeeze()[()], utsec.squeeze()[()]
def yd2datetime(yd,utsec=None):
"""
Inputs:
yd: yyyyddd four digit year, 3 digit day of year (INTEGER 7 digits)
outputs:
t: datetime
http://stackoverflow.com/questions/2427555/python-question-year-and-day-of-year-to-date
"""
if yd is None:
return
# %%
yd = str(yd)
if len(yd) != 7:
raise ValueError('yyyyddd expected')
year = int(yd[:4])
assert 0 < year < 3000,'year not in expected format'
dt = forceutc(datetime.datetime(year, 1, 1) + datetime.timedelta(days=int(yd[4:]) - 1))
if utsec is not None:
dt += datetime.timedelta(seconds=utsec)
return dt
def date2doy(t):
if t is None:
return None, None
# %%
yd = str(datetime2yd(t)[0])
year = int(yd[:4])
doy = int(yd[4:])
assert 0 < doy < 366,'day of year must be 0 < doy < 366' # yes, < 366 for leap year too. normal year 0..364. Leap 0..365.
return doy, year
def datetime2gtd(T, glon=np.nan):
"""
Inputs:
T: Numpy 1-D array of datetime.datetime OR string suitable for dateutil.parser.parse
glon: Numpy 2-D array of geodetic longitudes (degrees)
Outputs:
iyd: day of year
utsec: seconds from midnight utc
stl: local solar time
"""
if T is None:
return (None,)*3
# %%
T = np.atleast_1d(T)
glon= np.atleast_2d(glon)
iyd= np.empty_like(T, int)
utsec=np.empty_like(T, float)
stl = np.empty((T.size, glon.shape[0], glon.shape[1]))
for i,t in enumerate(T):
t = forceutc(t)
iyd[i] = int(t.strftime('%j'))
#seconds since utc midnight
utsec[i] = dt2utsec(t)
stl[i,...] = utsec[i]/3600 + glon/15 #FIXME let's be sure this is appropriate
return iyd, utsec, stl.squeeze()
#def dt2utsec(t: datetime) -> float:
def dt2utsec(t):
"""
input: datetime
output: float utc seconds since THIS DAY'S MIDNIGHT
"""
if t is None:
return None
if isinstance(t,datetime.date) and not isinstance(t,datetime.datetime):
return 0
t = forceutc(t)
return datetime.timedelta.total_seconds(t-datetime.datetime.combine(t.date(), datetime.time(0,tzinfo=UTC)))
def forceutc(t):
"""
Add UTC to datetime-naive and convert to UTC for datetime aware
input: python datetime (naive, utc, non-utc) or Numpy datetime64 #FIXME add Pandas and AstroPy time classes
output: utc datetime
"""
# need to passthrough None for simpler external logic.
if t is None:
return
#%% polymorph to datetime
if isinstance(t,str):
t = parse(t)
elif isinstance(t, np.datetime64):
t = t.astype(datetime.datetime)
elif isinstance(t, datetime.datetime):
pass
elif isinstance(t, datetime.date):
return t
elif isinstance(t,(np.ndarray,list,tuple)):
return np.asarray([forceutc(T) for T in t])
else:
raise TypeError('datetime only input')
#%% enforce UTC on datetime
if t.tzinfo is None: #datetime-naive
t = t.replace(tzinfo = UTC)
else: #datetime-aware
t = t.astimezone(UTC) #changes timezone, preserving absolute time. E.g. noon EST = 5PM UTC
return t
"""
http://stackoverflow.com/questions/19305991/convert-fractional-years-to-a-real-date-in-python
Authored by "unutbu" http://stackoverflow.com/users/190597/unutbu
In Python, go from decimal year (YYYY.YYY) to datetime,
and from datetime to decimal year.
"""
def yeardec2datetime(atime):
"""
Convert atime (a float) to DT.datetime
This is the inverse of datetime2yeardec.
assert dt2t(t2dt(atime)) == atime
"""
if atime is None:
return None
# %%
if isinstance(atime,(float,int)): #typically a float
year = int(atime)
remainder = atime - year
boy = datetime.datetime(year, 1, 1)
eoy = datetime.datetime(year + 1, 1, 1)
seconds = remainder * (eoy - boy).total_seconds()
T = forceutc(boy + datetime.timedelta(seconds=seconds))
elif isinstance(atime[0],float):
T = []
for t in atime:
T.append(yeardec2datetime(t))
else:
raise TypeError('expecting float, not {}'.format(type(atime)))
return T
def datetime2yeardec(t):
"""
Convert a datetime into a float. The integer part of the float should
represent the year.
Order should be preserved. If adate<bdate, then d2t(adate)<d2t(bdate)
time distances should be preserved: If bdate-adate=ddate-cdate then
dt2t(bdate)-dt2t(adate) = dt2t(ddate)-dt2t(cdate)
"""
if t is None:
return None
# %%
if isinstance(t,str):
t = parse(t)
t = forceutc(t)
year = t.year
if isinstance(t,datetime.datetime):
boy = datetime.datetime(year, 1, 1,tzinfo=UTC)
eoy = datetime.datetime(year + 1, 1, 1, tzinfo=UTC)
elif isinstance(t,datetime.date):
boy = datetime.date(year, 1, 1)
eoy = datetime.date(year + 1, 1, 1)
else:
raise TypeError('datetime input only')
return year + ((t - boy).total_seconds() / ((eoy - boy).total_seconds()))
#%%
def find_nearest(x,x0):
"""
This find_nearest function does NOT assume sorted input
inputs:
x: array (float, int, datetime, h5py.Dataset) within which to search for x0
x0: singleton or array of values to search for in x
outputs:
idx: index of flattened x nearest to x0 (i.e. works with higher than 1-D arrays also)
xidx: x[idx]
Observe how bisect.bisect() gives the incorrect result!
idea based on:
http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
"""
x = np.asanyarray(x) #for indexing upon return
x0 = np.atleast_1d(x0)
#%%
if x.size==0 or x0.size==0:
raise ValueError('empty input(s)')
if not x0.ndim in (0,1):
raise ValueError('2-D x0 not handled yet')
#%%
ind = np.empty_like(x0,dtype=int)
# NOTE: not trapping IndexError (all-nan) becaues returning None can surprise with slice indexing
for i,xi in enumerate(x0):
if xi is not None and (isinstance(xi, (datetime.datetime,datetime.date,np.datetime64)) or np.isfinite(xi)):
ind[i] = np.nanargmin(abs(x-xi))
else:
raise ValueError('x0 must NOT be None or NaN to avoid surprising None return value')
return ind.squeeze()[()], x[ind].squeeze()[()] # [()] to pop scalar from 0d array while being OK with ndim>0
def INCORRECTRESULT_using_bisect(x,X0): #pragma: no cover
X0 = np.atleast_1d(X0)
x.sort()
ind = [bisect(x,x0) for x0 in X0]
x = np.asanyarray(x)
return np.asanyarray(ind),x[ind]
if __name__ == '__main__':
from bisect import bisect
print(find_nearest([10,15,12,20,14,33],[32,12.01]))
print(INCORRECTRESULT_using_bisect([10,15,12,20,14,33],[32,12.01]))
#def randomdate(year:int) -> datetime:
def randomdate(year):
""" gives random date in year"""
if calendar.isleap(year):
doy = random.randrange(366)
else:
doy = random.randrange(365)
return datetime.date(year, 1, 1) + datetime.timedelta(days=doy)
| true |
487af4212b4291128432dc2f48192cc3703f4831 | Python | huangyingw/fastai_fastai | /dev_nbs/course/lesson4-tabular.py | UTF-8 | 1,247 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | # ---
# jupyter:
# jupytext:
# formats: ipynb,py
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tabular models
from fastai.tabular.all import *
# Tabular data should be in a Pandas `DataFrame`.
path = untar_data(URLs.ADULT_SAMPLE)
df = pd.read_csv(path / 'adult.csv')
dep_var = 'salary'
cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race']
cont_names = ['age', 'fnlwgt', 'education-num']
procs = [Categorify, FillMissing, Normalize]
# +
#test = TabularList.from_df(df.iloc[800:1000].copy(), path=path, cat_names=cat_names, cont_names=cont_names)
# -
splits = IndexSplitter(list(range(800, 1000)))(range_of(df))
# +
#splits = (L(splits[0], use_list=True), L(splits[1], use_list=True))
# -
to = TabularPandas(df, procs, cat_names, cont_names, y_names="salary", splits=splits)
dls = to.dataloaders(bs=64)
dls.show_batch()
learn = tabular_learner(dls, layers=[200, 100], metrics=accuracy)
learn.fit(1, 1e-2)
# ## Inference -> To do
row = df.iloc[0]
learn.predict(row)
| true |
e78800afc57578caf9c95b1124828454b72dbe56 | Python | gilgameshzzz/learn | /day10Python_pygame/day10-管理系统/system/04-显示图形.py | UTF-8 | 1,667 | 3.578125 | 4 | [] | no_license | """__author__ = 余婷"""
import pygame
if __name__ == '__main__':
pygame.init()
screen = pygame.display.set_mode((600, 400))
screen.fill((255, 255, 255))
"""
1.画直线
line(Surface, color, start_pos, end_pos, width=1)
Surface -> 画在哪个地方
color -> 线的颜色
start_pos -> 起点
end_pos -> 终点
width -> 线的宽度
"""
pygame.draw.line(screen, (255, 0, 0), (78, 59), (100, 100), 2)
pygame.draw.line(screen, (0, 255, 0), (0, 0), (130, 100), 2)
"""
lines(画线的位置, 颜色, closed, 点的列表, width=1)
"""
pygame.draw.lines(screen, (0, 0, 255), True, [(10, 10), (200, 50), (100, 100)])
"""
画矩形
rect(位置,颜色,(x,y,width,height))
"""
pygame.draw.rect(screen,(255,255,0),(0,0,200,200),2)
"""
2.画曲线
arc(Surface, color, Rect, start_angle, stop_angle, width=1)
Rect -> (x, y, width, height)矩形
start_angle
stop_angle
"""
from math import pi
pygame.draw.arc(screen, (0, 0, 0), (0, 0, 100, 200), pi+pi/4, pi*2-pi/4)
"""
3.画圆
circle(位置, 颜色, 圆心位置, 半径, width=0)
"""
import random
pygame.draw.circle(screen,\
(random.randint(0,255),random.randint(0,255),random.randint(0,255)),\
(400,200),100)
"""
画椭圆
ellipse(Surface, color, Rect, width=0)
"""
pygame.draw.ellipse(screen, (0, 100, 0), (100, 300, 200, 80), 1)
# 将内容展示在屏幕上
pygame.display.flip()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit() | true |
756464381ee3fbafc1f9cf72f06a22961f1e4746 | Python | heiimzy/zhihuspider | /Zhihuspider.py | UTF-8 | 1,009 | 2.953125 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
class spider():
def get_url(url):
headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36' }
r = requests.get(url,headers=headers)
return r.text
def prase_url(res):
soup = BeautifulSoup(res, "html.parser")
return soup
def write_file(sp):
name = str(sp.title.string)
filename = "E:\\down\\" + name + ".txt"
try:f = open(filename, 'w+')
except OSError:
pass
else:
for child in sp.find_all("p"):
child = str(child)
try:f.write(child)
except UnicodeEncodeError:
pass
else:
f.write('\n')
f.close()
def start(url):
res = spider.get_url(url)
spider.write_file(spider.prase_url(res))
| true |
02f80e996e5628c0935e6e873f1a2b837b6b0f04 | Python | tx2016/Self-Driving_Car-Nanodegree-Projects | /CarND-Advanced-Lane-Lines/thresh.py | UTF-8 | 4,726 | 2.953125 | 3 | [] | no_license | import pickle
import cv2
import glob
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Read in the saved camera matrix and distortion coefficients
# These are the arrays you calculated using cv2.calibrateCamera()
dist_pickle = pickle.load(open("camera_cal/wide_dist_pickle.p", "rb"))
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
# Undistort the image
def undistort(img):
img_size = (img.shape[1], img.shape[0])
# Do camera calibration given object points and image points
dst = cv2.undistort(img, mtx, dist, None, mtx)
return dst
# Define a function that takes an image, gradient orientation,
# and threshold min / max values.
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0,255)):
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Apply x or y gradient with the OpenCV Sobel() function
# and take the absolute value
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
# Rescale back to 8 bit integer
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# Create a copy and apply the threshold
grad_binary = np.zeros_like(scaled_sobel)
# Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too
grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
# Return the result
return grad_binary
# Define a function to return the magnitude of the gradient
# for a given sobel kernel size and threshold values
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Take both Sobel x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Calculate the gradient magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# Rescale to 8 bit
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
# Create a binary image of ones where threshold is met, zeros otherwise
mag_binary = np.zeros_like(gradmag)
mag_binary[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
# Return the binary image
return mag_binary
# Define a function to threshold an image for a given range and Sobel kernel
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):
# Grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Calculate the x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Take the absolute value of the gradient direction,
# apply a threshold, and create a binary image result
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
dir_binary = np.zeros_like(absgraddir)
dir_binary[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
# Return the binary image
return dir_binary
# Define a function that thresholds the S-channel of HLS
def hls_select(img, thresh=(0, 255)):
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:,:,2]
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel > thresh[0]) & (s_channel <= thresh[1])] = 1
return s_binary
def color_mask(img):
image_HSV = cv2.cvtColor(img,cv2.COLOR_RGB2HSV)
yellow_hsv_low = np.array([0, 100, 100])
yellow_hsv_high = np.array([100, 255, 255])
mask_yellow = cv2.inRange(image_HSV, yellow_hsv_low, yellow_hsv_high)
# res_yellow = cv2.bitwise_and(image,image, mask= mask_yellow)
sensitivity = 45
white_hsv_low = np.array([0, 0,255 - sensitivity])
white_hsv_high = np.array([255, sensitivity, 255])
mask_white = cv2.inRange(image_HSV, white_hsv_low, white_hsv_high)
# res_white= cv2.bitwise_and(image,image, mask= mask_white)
binary_out = np.zeros_like(img[:,:,0])
binary_out[(mask_yellow!=0) | (mask_white!=0)] =1
# binary_out = cv2.bitwise_or(mask_yellow, mask_white)
return binary_out
def thresh_pipeline(image, ksize=15, min_mag=50, max_mag=220, min_dir=0.7, max_dir=1.20):
# Apply each of the thresholding functions
mag_binary = mag_thresh(image, sobel_kernel=ksize, mag_thresh=(min_mag,max_mag))
dir_binary = dir_threshold(image, sobel_kernel=ksize, thresh=(min_dir,max_dir))
white_yellow_binary = color_mask(image)
combined = np.zeros_like(mag_binary)
combined[((white_yellow_binary ==1) & ((dir_binary == 1) | (mag_binary == 1)))] = 1
return combined
| true |
79fdf9566f700bc67e4317dcb53772c565a28316 | Python | spoorthi33/computer-networks_assign-2 | /server.py | UTF-8 | 3,343 | 2.546875 | 3 | [] | no_license | import os
import time
import pickle
import socket
from library import *
server_hostname = socket.gethostname()
server_ip = socket.gethostbyname(server_hostname)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((server_ip, SERVER_PORT))
print(f'server started listening on {server_ip} at port {SERVER_PORT}')
while True:
data , client = sock.recvfrom(MAX_PACKET_SIZE)
packet = pickle.loads(data)
client_ip = client[0]
CLIENT_PORT = client[1] # overwrite defined CLIENT_PORT constant based on socket
if packet['opcode'] == CONNECTION_ESTABLISHMENT_REQUEST:
ack_packet = create_packet(ACKNOWLEDGEMENT, "CONNECTION_ESTABLISHMENT_REQUEST confirmed", SERVER_PORT, CLIENT_PORT, packet['FileName'], None)
send_packet(ack_packet, sock, client_ip, CLIENT_PORT)
continue
elif packet['opcode'] == FILE_UPLOAD_REQUEST: # receive file
ack_packet = create_packet(ACKNOWLEDGEMENT, "FILE_UPLOAD_REQUEST confirmed", SERVER_PORT, CLIENT_PORT, packet['FileName'], None)
send_packet(ack_packet, sock, client_ip, CLIENT_PORT)
receive_file(packet['FileName'], sock, SERVER_PORT, client_ip, CLIENT_PORT, int(packet['DataBody']))
elif packet['opcode'] == FILE_DOWNLOAD_REQUEST: # send requested file to client
if not os.path.exists(packet['FileName']):
print(f'received download requested for a file that does not exist: {packet["FileName"]}')
download_file_size = os.path.getsize(packet['FileName'])
number_of_packets = download_file_size // CHUNK_SIZE
if download_file_size % CHUNK_SIZE != 0:
number_of_packets += 1
download_start_time = time.time()
ack_packet = create_packet(ACKNOWLEDGEMENT, f"{number_of_packets}", SERVER_PORT, CLIENT_PORT, packet['FileName'], None)
send_packet(ack_packet, sock, client_ip, CLIENT_PORT)
thread_send_file = threading.Thread(target=send_file, args=(packet['FileName'], sock, SERVER_PORT, client_ip, CLIENT_PORT))
thread_receive_acks = threading.Thread(target=receive_acks, args=(sock, SERVER_PORT, client_ip, CLIENT_PORT, number_of_packets))
thread_send_file.start()
thread_receive_acks.start()
thread_send_file.join()
thread_receive_acks.join()
download_end_time = time.time()
download_total_time = download_end_time - download_start_time
throughput = (download_file_size * 8)/download_total_time
print(f'throughput: {throughput} bits per second')
print(f'time taken for download: {download_total_time} seconds')
elif packet['opcode'] == CONNECTION_TERMINATION_REQUEST:
ack_packet = create_packet(ACKNOWLEDGEMENT, "CONNECTION_TERMINATION_REQUEST confirmed", SERVER_PORT, CLIENT_PORT, packet['FileName'], None)
send_packet(ack_packet, sock, client_ip, CLIENT_PORT)
# try:
# data, addr = sock.recvfrom(MAX_PACKET_SIZE)
# except socket.timeout as e:
# print(e)
# received_packet = pickle.loads(data)
# if received_packet['opcode'] == ACKNOWLEDGEMENT:
# break
# elif received_packet['opcode'] == CONNECTION_TERMINATION_REQUEST:
# continue
# else:
# break
break
else:
print(f'received packet: {packet}')
sock.close()
| true |
ce46f73a0611dc2cbe997b7d73eb3be4841232e0 | Python | Nehajha99/Python | /loop.py/loop_18.py | UTF-8 | 64 | 2.578125 | 3 | [] | no_license | c=156
while c<=10:
if c-155:
print(z)
c=c+1
| true |
2d59003405c494c0e72f2a55009033595dc8b2a4 | Python | harishramuk/python-handson-exercises | /382.read data from txt file.py | UTF-8 | 75 | 2.640625 | 3 | [] | no_license | file = open('GSP.txt','r')
data = file.read(-1)
print(data)
file.close() | true |
411aa64ba4e667315026ecdb7150ee8907b820fa | Python | madhuprakash19/python | /even_odds_new.py | UTF-8 | 212 | 2.875 | 3 | [] | no_license | import math
a=[int(i) for i in input().split()]
#print(a)
n=a[0]
k=a[1]
if k<=math.ceil(n/2):
print((k*2)-1)
else:
if n%2==0:
print(n-((n-k)*2))
else:
print((n-((n-k)*2))-1)
| true |
238e4481c6dadce5d4f3bdf4888bd4cfc233d936 | Python | yinccc/leetcodeEveryDay | /221-20190523-Maximal Square.py | UTF-8 | 1,762 | 3.015625 | 3 | [] | no_license | matrix=[["1","0","1","0","0"],["1","0","1","1","1"],["1","1","1","1","1"],["1","0","0","1","0"]]
dp=[[0 for x in range(len(matrix[0]))] for y in range(len(matrix))]
print(len(matrix),len(matrix[0]))
print(len(dp),len(dp[0]))
maxNumber=0
def ThreeMin(i, j, k):
return min(min(int(i), int(j)), int(k))
for i in range(len(matrix)):
dp[i][0]=matrix[i][0]
if matrix[i][0]=='1':
maxNumber=1
for j in range(len(matrix[0])):
dp[0][j]=matrix[0][j]
if matrix[0][j]=='1':
maxNumber=1
for i in range(1,len(matrix)):
for j in range(1,len(matrix[0])):
dp[i][j]=ThreeMin(dp[i-1][j-1],dp[i-1][j],dp[i][j-1])+1
if dp[i][j]>maxNumber:
maxNumber=dp[i][j]
print(maxNumber)
class Solution(object):
def ThreeMin(self, i, j, k):
return min(min(int(i), int(j)), int(k))
def maximalSquare(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
if not matrix:
return 0
maxNumber = 0
m = len(matrix)
n = len(matrix[0])
dp = [[0 for x in range(n)] for y in range(m)]
for i in range(m):
dp[i][0] = int(matrix[i][0])
if dp[i][0] == 1:
maxNumber = 1
for j in range(n):
dp[0][j] = int(matrix[0][j])
if dp[0][j] == 1:
maxNumber = 1
for i in range(1, m):
for j in range(1, n):
if matrix[i][j] == '0':
continue
dp[i][j] = self.ThreeMin(dp[i - 1][j - 1], dp[i][j - 1], dp[i - 1][j]) + 1
if dp[i][j] > maxNumber:
maxNumber = dp[i][j]
return maxNumber ** 2
s=Solution()
print(s.maximalSquare(matrix)) | true |
1c4d641edb41403aba4094d23a3fb691b4c7504e | Python | syurskyi/Python_Topics | /125_algorithms/_exercises/templates/_algorithms_challenges/algorithm-master/leetcode/416_partition_equal_subset_sum.py | UTF-8 | 649 | 3 | 3 | [] | no_license | """
REF: https://leetcode.com/problems/partition-equal-subset-sum/discuss/90592
`dp[s]` means the specific sum `s` can be gotten from the sum of subset in `nums`
"""
c_ Solution:
___ canPartition nums
"""
:type nums: List[int]
:rtype: bool
"""
__ n.. nums:
r.. T..
target s..(nums)
__ target & 1 __ 1:
r.. F..
target //= 2
dp [F..] * (target + 1)
dp[0] T..
___ a __ nums:
___ s __ r..(target, a - 1, -1
__ dp[s]:
_____
dp[s] dp[s - a]
r.. dp[target]
| true |
6590285aa049fd205fdbfb2086a26af194082538 | Python | wxx17395/Leetcode | /python code/题库/2. 两数相加.py | UTF-8 | 1,485 | 3.046875 | 3 | [] | no_license | class Solution(object):
def addTwoNumbers(self, l1, l2):
rList = l1
addflag = 0
returnflag = 0
while 1:
if not l1.next or not l2.next:
returnflag = 1
addresult = l1.val + l2.val
if addflag:
addresult += 1
addflag = 0
if addresult > 9:
addresult -= 10
addflag = 1
l1.val = addresult
if returnflag:
break
l1 = l1.next
l2 = l2.next
if not l1.next and l2.next:
l1.next = l2.next
while addflag == 1:
if not l1.next:
newListNode = ListNode(0)
l1.next = newListNode
addflag = 0
l1=l1.next
tempvar = l1.val + 1
if tempvar > 9:
l1.val = tempvar - 10
else:
l1.val = tempvar
addflag = 0
return rList
def addTwoNumbers(self, l1, l2):
re = ListNode(0)
r = re
carry = 0
while (l1 or l2):
x = l1.val if l1 else 0
y = l2.val if l2 else 0
s = carry + x + y
carry = s // 10
r.next = ListNode(s % 10)
r = r.next
if (l1 != None): l1 = l1.next
if (l2 != None): l2 = l2.next
if (carry > 0):
r.next = ListNode(1)
return re.next | true |
b191a7bcadb345cb6c16fa9885b6b8898dd34a1b | Python | GabrielEstevam/icpc_contest_training | /uri/uri_python/string/p2174.py | UTF-8 | 146 | 3.0625 | 3 | [] | no_license | N = int(input())
lista = []
for n in range(N):
lista.append(input())
lista = list(set(lista))
print("Falta(m)", 151-len(lista), "pomekon(s).") | true |
ea403d95e331a6bb942ffbf6ff6d9075325b7628 | Python | scoriiu/doc_parser | /tests/test_doc_parser.py | UTF-8 | 1,633 | 2.65625 | 3 | [] | no_license | import json
import os
import pytest
import hashlib
from parser.parser import parse_nb_patients, convert_pdf_to_txt, match_area_of_interest, parse_study_year_range
script_dir = os.path.dirname(os.path.realpath(__file__))
@pytest.fixture(scope='module')
def reference_text():
text = open(f'{script_dir}/data/text.json')
return json.load(text)
def test_patients_nb(reference_text):
for k, v in reference_text.items():
nb_patients = parse_nb_patients(v['abstract'])[1]
assert nb_patients, v['patients']
@pytest.mark.parametrize("pdf_filename, m_hash, loc_range", [
(
'doc1.pdf',
'45efc5d85f3c824adce6cfaf166870fb3a5628bbc42f362eef5ba03399efa311',
(1843, 10664)
),
(
'doc2.pdf',
'd052409c48757e6f211b659e33b60f49d15c9576227cdd1181c78d3514688f87',
(2184, 13738)
),
(
'doc3.pdf',
'6d8d51c97fe0e6ec9add63da7f394f6ff1510c8f912eb40c6ed4d2d38120a286',
(3011, 15710)
),
])
def test_match_area_of_interest(pdf_filename, m_hash, loc_range):
text = convert_pdf_to_txt(f'{script_dir}/data/{pdf_filename}')
text = text.replace('\n', '').replace('\r', '')
text_matched, is_matched, loc = match_area_of_interest(text)
assert is_matched
assert loc == loc_range
assert hashlib.sha256(text_matched.encode('utf-8')).hexdigest() == m_hash
def test_study_date(reference_text):
for k, v in reference_text.items():
period_of_study = parse_study_year_range(v['materials'], v['abstract'])[0]
assert period_of_study == tuple(v['period_of_study'])
| true |
1e822f185bf6094ff6ed9555975146af1da2feb4 | Python | bkersteter/cloud-native-demo | /cloud-native-demo/python/tweet_loader.py | UTF-8 | 2,632 | 3.0625 | 3 | [] | no_license | # tweet_loader.py
#
# Demo python scipt to read tweets from Kafka and load them
# into a local Postgres database
#
#
# Bart Kersteter - bkersteter@gmail.com
#
# 03/11/2018 Initial
#
from kafka import KafkaConsumer, KafkaClient
import psycopg2
import json
from io import StringIO
###############################################################################
# Set up Postgres Connection - Assumes localhost & default port
###############################################################################
conn = psycopg2.connect("dbname=bjk user=bjk")
# Test Connection to make sure everything works
#cur = conn.cursor()
#cur.execute("CREATE TABLE IF NOT EXISTS test (id serial PRIMARY KEY, num integer, data varchar);")
#cur.execute("INSERT INTO test (num, data) VALUES (%s, %s)",(100, "abc'def"))
#cur.execute("SELECT * FROM test;")
#foo = cur.fetchone()
#(1, 100, "abc'def")
#print foo
#conn.commit()
#cur.close()
#conn.close()
###############################################################################
# Set up Kafka Consumer
###############################################################################
consumer = KafkaConsumer(
bootstrap_servers='localhost:9092',
auto_offset_reset='smallest', # Reset partition offsets upon OffsetOutOfRangeError
group_id='tweet_test', # must have a unique consumer group id
value_deserializer=lambda m: json.loads(m.decode('utf-8')))
#consumer_timeout_ms=10000)
# How long to listen for messages - we do it for 10 seconds
# because we poll the kafka broker only each couple of hours
consumer.subscribe(topics='tweets')
# Start reading messages from Kafka & insert them into postgres
cur=conn.cursor()
#consumer.seek_to_beginning()
print consumer.topics()
num_tweets = 0
#csv_buffer = StringIO()
for msg in consumer:
#csv_buffer.write(message.value.decode() + '\n')
#print (msg.value.decode())
tweet_userid,tweet_create_timestamp,tweet_followers_count,tweet_location,tweet_favorite_count,tweet_retweet_count,tweet_text,empty = msg.value.decode().split(';;;')
print (tweet_text)
#slow but it works
SQL = "INSERT INTO tweets VALUES (%s, %s, %s, %s, %s, %s, %s)"
data = (tweet_userid,tweet_create_timestamp,tweet_followers_count,tweet_location,tweet_favorite_count,tweet_retweet_count,tweet_text)
cur.execute(SQL, data)
conn.commit()
num_tweets += 1
# Make sure we clean up before exiting
conn.close()
print("Processed {0} tweets".format(num_tweets))
| true |
355d59bb7a5b323c9e4da8aad4ad7747aac097e7 | Python | MaximeDaigle/Low-Resource-Machine-Translation | /evaluator.py | UTF-8 | 4,845 | 2.828125 | 3 | [] | no_license | import argparse
import subprocess
import tempfile
import sentencepiece as spm
import tensorflow as tf
import os
import itertools
from nmt.nmt_seq2seq import predict, load_ids, Encoder, DecoderNetwork, max_len
def generate_predictions(input_file_path: str, pred_file_path: str):
"""Generates predictions for the machine translation task (EN->FR).
You are allowed to modify this function as needed, but one again, you cannot
modify any other part of this file. We will be importing only this function
in our final evaluation script. Since you will most definitely need to import
modules for your code, you must import these inside the function itself.
Args:
input_file_path: the file path that contains the input data.
pred_file_path: the file path where to store the predictions.
Returns: None
"""
BATCH_SIZE = 128
embedding_dims = 256
rnn_units = 1024
dense_units=1024
# Load SentencePieceProcessors
sp_en = spm.SentencePieceProcessor()
sp_en.load('../model/en_bpe2.model')
sp_fr = spm.SentencePieceProcessor()
sp_fr.load('../model/fr_bpe2.model')
input_vocab_size = sp_en.get_piece_size()
output_vocab_size = sp_fr.get_piece_size()
# Read input file
tokens = load_ids(sp_en, input_file_path)
#padded_data = tf.keras.preprocessing.sequence.pad_sequences(tokens, padding='post')
max_length = max_len(tokens)
# Init model
encoderNetwork = Encoder(input_vocab_size, embedding_dims, rnn_units, BATCH_SIZE)
decoderNetwork = DecoderNetwork(output_vocab_size, embedding_dims, rnn_units, max_length, dense_units=dense_units, batch_size=BATCH_SIZE)
optimizer = tf.keras.optimizers.Adam()
# Load checkpoint
checkpointdir = "../model/sentencepiece_nmt_biLSTM_back"
if not os.path.exists(checkpointdir):
os.mkdir(checkpointdir)
checkpoint = tf.train.Checkpoint(optimizer = optimizer, encoderNetwork = encoderNetwork,
decoderNetwork = decoderNetwork)
try:
status = checkpoint.restore(tf.train.latest_checkpoint(checkpointdir))
print("Checkpoint found at {}".format(tf.train.latest_checkpoint(checkpointdir)))
except:
print("No checkpoint found at {}".format(checkpointdir))
# Start evluation
# Start evluation
predictions = []
batches = [tokens[i:i+BATCH_SIZE] for i in range(0,len(tokens),BATCH_SIZE)]
for i in batches:
pred = predict(i, encoderNetwork, decoderNetwork, max_length, rnn_units)
for p in pred:
seq = list(itertools.takewhile( lambda index: index !=2, p.tolist()))
predictions.append(sp_fr.decode_ids(seq))
#print("Model output:", predictions[-1])
# write answer
with open(pred_file_path, 'w', encoding="utf-8") as anwsers:
for pred in predictions:
anwsers.write(pred + "\n")
##### MODIFY ABOVE #####
def compute_bleu(pred_file_path: str, target_file_path: str, print_all_scores: bool):
"""
Args:
pred_file_path: the file path that contains the predictions.
target_file_path: the file path that contains the targets (also called references).
print_all_scores: if True, will print one score per example.
Returns: None
"""
out = subprocess.run(["sacrebleu", "--input", pred_file_path, target_file_path, '--tokenize',
'none', '--sentence-level', '--score-only'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
lines = out.stdout.split('\n')
if print_all_scores:
print('\n'.join(lines[:-1]))
else:
scores = [float(x) for x in lines[:-1]]
print('final avg bleu score: {:.2f}'.format(sum(scores) / len(scores)))
def main():
parser = argparse.ArgumentParser('script for evaluating a model.')
parser.add_argument('--target-file-path', help='path to target (reference) file', required=True)
parser.add_argument('--input-file-path', help='path to input file', required=True)
parser.add_argument('--print-all-scores', help='will print one score per sentence',
action='store_true')
parser.add_argument('--do-not-run-model',
help='will use --input-file-path as predictions, instead of running the '
'model on it',
action='store_true')
args = parser.parse_args()
if args.do_not_run_model:
compute_bleu(args.input_file_path, args.target_file_path, args.print_all_scores)
else:
_, pred_file_path = tempfile.mkstemp()
generate_predictions(args.input_file_path, pred_file_path)
compute_bleu(pred_file_path, args.target_file_path, args.print_all_scores)
if __name__ == '__main__':
main()
| true |
f32f9eba11bbed22b55f934876e14a38c20a76fe | Python | thaolinhnp/Python_Advanced | /Bai1_DEMO_OOP/Bai3.py | UTF-8 | 786 | 3.40625 | 3 | [] | no_license | class QuanLyCD():
def __init__(self, tenCD, caSy, soBH, giaThanh):
self.tenCD = tenCD
self.caSy = caSy
self.soBH = soBH
self.giaThanh = giaThanh
if __name__ == "__main__":
dsCD = []
tt = 1
while tt == 1:
tenCD = str(input('Ten CD:'))
caSy = str(input('Ca sy:'))
soBH = int(input('So bai hat:'))
giaThanh = eval(input('Gia thanh:'))
CD = QuanLyCD(tenCD,caSy,soBH,giaThanh)
dsCD.append(CD)
tt = int(input('Tiep tuc nhap (1: co, 0: khong):'))
if len(dsCD):
tongGiaThanh = 0
for item in dsCD:
print(item.tenCD,' - ',item.caSy,' - ',item.soBH,' - ',item.giaThanh)
tongGiaThanh += item.giaThanh
print('Tong Gia Thanh :',tongGiaThanh)
| true |
99d2b682f34cb2c3c1f430271a869b2b5b33a4fa | Python | ISISComputingGroup/ibex_utils | /installation_and_upgrade/ibex_install_utils/logger.py | UTF-8 | 1,049 | 2.9375 | 3 | [] | no_license | import os
import sys
import time
class Logger:
"""
Logger class used to capture output and input to a log file.
"""
def __init__(self):
CURRENT_DATE = time.strftime("%Y%m%d")
LOG_FILE = f"DEPLOY-{CURRENT_DATE}.log"
LOG_DIRECTORY = os.path.join("C:\\", "Instrument", "var", "logs", "deploy")
os.makedirs(LOG_DIRECTORY, exist_ok=True)
LOG_PATH = os.path.join(LOG_DIRECTORY, LOG_FILE)
self.console = sys.stdout
self.input = sys.stdin
self.log = open(LOG_PATH, "a")
print(f"Log file is {LOG_PATH}")
def write(self, message):
self.log.write(message)
return self.console.write(message)
def flush(self):
self.console.flush()
self.log.flush()
def readline(self):
text = self.input.readline()
self.log.write(text)
return text
@staticmethod
def set_up():
logger = Logger()
sys.stdout = logger
sys.stderr = logger
sys.stdin = logger
| true |
d0baec1bbd2c2c15e9a73918a1ab9ff840f740ca | Python | seboldt/ListaDeExercicios | /EstruturaDeDecisao/25-assassinato.py | UTF-8 | 656 | 3.609375 | 4 | [] | no_license | print('Depoimento \nResponda apenas s ou n')
r1 = input('Telefonou p/ a vitima ? \n')
classificacao = 0
if r1 == 's':
classificacao += 1
r2 = input('Esteve no local do crime ?\n')
if r2 == 's':
classificacao += 1
r3 = input('Mora perto da Vitima ? \n')
if r3 == 's':
classificacao += 1
r4 = input('Devia para a vitima ? \n')
if r4 == 's':
classificacao += 1
r5 = input('Já trabalhou com a vitima ? \n')
if r5 == 's':
classificacao += 1
if classificacao == 2:
print('Suspeito')
elif classificacao >=3 and classificacao <=4:
print('Cumplice')
elif classificacao == 5:
print('Assassino')
else:
print('Inocente')
| true |
921ec85c5832fa915558a3602e96c98136d6546e | Python | solomonchild/pascal_mini_compiler | /pascal_parser/parser.py | UTF-8 | 2,806 | 3.078125 | 3 | [] | no_license | from .lexer import *
#<G> ::= <S>
#<S> ::= if <E> then <S> | <ID> := <STRING> ;
#<E> ::= <ID> <OP> <ID> | <ID> <OP> <STRING> | (<E>) and (<E>)
#<ID> ::= [a-zA-Z_][a-zA-Z0-9_]*
#<OP> ::= - | + | * | /
class Parser:
def __init__(self, lexer):
self.lexer = lexer
self.tokens = None
self.token = None
self.token_n = 0
self.saved = 0
def term(self, t):
if not self.nextToken():
return False
#print("Current token {0} {1}, term: {2}".format(self.token.kind, self.token.val, t))
if self.token.kind == t:
#print("Match")
return True
else:
#print("Not matched")
self.pushTokens()
return False
def restoreToken(self):
self.token_n = self.saved
self.token = self.tokens[self.token_n]
return True
def saveToken(self):
self.saved = self.token_n
def pushTokens(self, num = 1):
if num < 0 or num > self.token_n:
raise Exception("Invalid number of tokens")
while num:
num -= 1
self.token_n -= 1
self.token = self.tokens[self.token_n]
def nextToken(self):
if self.token_n > len(self.tokens) - 1:
return False
self.token = self.tokens[self.token_n]
self.token_n += 1
return True
def E(self):
self.saveToken()
return (self.term(TokenType.ID) and self.term(TokenType.OPERATOR) and self.term(TokenType.ID)) or \
(self.restoreToken() and self.term(TokenType.ID) and self.term(TokenType.OPERATOR) and self.term(TokenType.STRING)) or \
(self.restoreToken() and self.term(TokenType.LPAREN) and self.E() and self.term(TokenType.RPAREN) and self.term(TokenType.AND) and self.term(TokenType.LPAREN) and self.E() and self.term(TokenType.RPAREN))
def S(self):
self.saveToken()
return (self.term(TokenType.IF) and self.E() and self.term(TokenType.THEN) and self.S()) or \
(self.restoreToken() and self.term(TokenType.ID) and self.term(TokenType.ASSIGN) and self.term(TokenType.STRING) and self.term(TokenType.SEMICOLON))
def parse(self):
while True:
token = self.lexer.getToken()
if token == None:
break
elif token.kind == TokenType.UNKNOWN:
print("\nError: Unknown lexeme {0} at line {1}".format(token.val, self.lexer.stream.currentLineNum()))
return
self.tokens = self.lexer.lexemes
result = self.S() and self.token_n == len(self.tokens)
if not result:
print("Invalid token \"{0}\" of type {1} at line {2}".format(self.token.val, self.token.kind, self.token.line))
return result
| true |
252cab38dd2dda364cf381f6f7a3ff3a14cbae96 | Python | stellarnode/python_steps | /codewars/fit_schedules.py | UTF-8 | 12,431 | 3.359375 | 3 | [] | no_license | def get_start_time(schedules, duration):
def convert_to_decimal(time):
hm = time.split(":")
return int(hm[0]) * 60 + int(hm[1])
def convert_to_time_string(time):
if time == None:
return None
else:
h = int(time) / 60
m = int(time) % 60
if m < 10:
m = "0" + str(m)
if h < 10:
h = "0" + str(h)
return str(h) + ":" + str(m)
def find_free_time(schedule):
i = 0
free = []
if len(schedule) == 0:
return free
if (schedule[i][0] > 540) and (i == 0):
free.append([540, schedule[i][0], (schedule[i][0] - 540)])
while i < len(schedule) - 1:
if (schedule[i][1] != schedule[i+1][0]):
free.append([schedule[i][1], schedule[i+1][0],
(schedule[i+1][0] - schedule[i][1])])
i = i + 1
if schedule[-1][1] < 1140:
free.append([schedule[-1][1], 1140, 1140 - schedule[-1][1]])
return free
def filter_by_duration(schedule):
filtered = []
for slot in schedule:
if slot[2] >= duration:
filtered.append(slot)
return filtered
def filter_by_earliest(schedules, fit):
filtered = []
for schedule in schedules:
new_schedule = []
for slot in schedule:
if slot[1] > fit and fit + duration <= slot[1]:
new_schedule.append(slot)
filtered.append(new_schedule)
return filtered
def check_fit(schedules, fit):
candidates = []
for schedule in schedules:
for slot in schedule:
if fit >= slot[0] and slot[1] > fit and fit + duration <= slot[1]:
candidates.append(slot)
return candidates
def map_schedule(schedule):
converted_schedule = []
for slot in schedule:
slot = [convert_to_decimal(slot[0]),
convert_to_decimal(slot[1])]
converted_schedule.append(slot)
return converted_schedule
# DRAFT #3
def fit_to_schedules(schedules):
if [] in schedules:
return None
fit = 540
found = False
while not (found or [] in schedules):
if [] in schedules:
fit = None
found = True
break
for schedule in schedules:
if schedule[0][0] > fit:
fit = schedule[0][0]
schedules = filter_by_earliest(schedules, fit)
if [] in schedules:
found = True
fit = None
break
else:
candidates = check_fit(schedules, fit)
if len(candidates) >= len(schedules):
found = True
break
else:
candidates = []
return fit
print("Initial schedules: ", schedules)
print("Initial duration: ", duration)
decimal_schedules = map(map_schedule, schedules)
print("Converted schedule to decimals: ", decimal_schedules)
free_times = map(find_free_time, decimal_schedules)
free_times = map(filter_by_duration, free_times)
print("Free times found: ", free_times)
if [] in free_times:
return None
else:
fit = fit_to_schedules(free_times)
print("Found this fit: ", convert_to_time_string(fit), " (for duration " + str(duration) + " min)")
if fit == None:
return None
else:
return convert_to_time_string(fit)
# DRAFT #2
# def fit_to_schedules(schedules):
# if [] in schedules:
# return None
# candidates = []
# fit = 540
# while not(len(candidates) == 3 or [] in schedules):
# candidates[0] = schedules[0][0]
# if candidates[0][0] > fit:
# fit = candidates[0][0]
# print("Current fit: ", fit)
# for i in range(1, len(schedules) - 1):
# to_compare = schedules[i][0]
# if (fit >= to_compare[0]) and (fit + duration <= to_compare[1]):
# fit = to_compare[0]
# print("Current fit: ", fit)
# candidates.append(schedules[i][0])
# elif fit > to_compare[1]:
# schedules[i].remove(to_compare)
# print("Removed ", to_compare, " since fit is GT slot end time")
# elif candidates[0][1] < to_compare[0]:
# schedules[0].remove(candidates[0])
# print("Removed ", candidates[0], " since end time of this slot is LT earliest with others")
# if not(len(schedules[0]) == 0):
# candidates[0] = schedules[0][0]
# else:
# fit = None
# break
# else:
# candidates.append(schedules[i][0])
# if ([] in schedules):
# fit = None
# return fit
#####################################
# DRAFT 1 (WORKING BUT WITH SOME BUGS)
# def fit_to_schedules(schedules):
# while [] not in schedules:
# fit = 540
# for schedule in schedules:
# if len(schedule) > 0 and schedule[0][0] > fit:
# fit = schedule[0][0]
# print("Current fit candidate: ", fit)
# for schedule in schedules:
# if len(schedule) > 0 and fit > schedule[0][1]:
# print("...removing ", schedule[0], " since fit " + str(fit) + " is GT end of free slot")
# schedule.remove(schedule[0])
# for schedule in schedules:
# if len(schedule) > 0 and schedule[0][0] > fit:
# fit = schedule[0][0]
# print("Current fit candidate: ", fit)
# for schedule in schedules:
# mtg_end = fit + duration
# if len(schedule) > 0 and (mtg_end > schedule[0][1]):
# print("...removing ", schedule[0], " since mtg_end " + str(mtg_end) + " is GT end of free slot")
# schedule.remove(schedule[0])
# # if len(schedule) > 0 and schedule[0][0] > fit:
# # fit = schedule[0][0]
#
#
# found = True
# for i in range(0, len(schedules) - 1):
# if len(schedules[i]) == 0:
# fit = None
# elif not ((fit >= schedules[i][0][0]) and (fit + duration <= schedules[i][0][1]) and (fit + duration <= 1140)):
# found = False
# print("This fit doesn't work: ", fit)
# for sched in schedules:
# if len(sched) > 0 and sched[0][0] > fit:
# fit = sched[0][0]
# else:
# found = True
# if found:
# break
# elif [] in schedules:
# fit = None
# break
# else:
# continue
# print("Schedules after fitting...: ", schedules)
# return fit
#############################################
# DRAFT #0
# def fit_to_schedules(schedules):
# while [] not in schedules:
# fit = 540
# for schedule in schedules:
# if len(schedule) > 0 and schedule[0][0] > fit:
# fit = schedule[0][0]
# print("Current fit candidate: ", fit)
# for schedule in schedules:
# if len(schedule) > 0 and fit > schedule[0][1]:
# print("...removing ", schedule[0], " since fit is GT end of slot")
# schedule.remove(schedule[0])
# for schedule in schedules:
# mtg_end = fit + duration
# if len(schedule) > 0 and (mtg_end > schedule[0][1]):
# print("...removing ", schedule[0], " since mtg_end is GT end of slot")
# schedule.remove(schedule[0])
# if len(schedule) > 0 and schedule[0][0] > fit:
# fit = schedule[0][0]
# found = True
# for i in range(0, len(schedules) - 1):
# if len(schedules[i]) == 0:
# found = False
# fit = None
# elif not ((fit >= schedules[i][0][0]) and (fit + duration <= schedules[i][0][1]) and (fit + duration <= 1140)):
# found = False
# print("This fit doesn't work: ", fit)
# print("...removing ", schedules[i][0])
# schedules[i].remove(schedules[i][0])
# fit = None
# else:
# fit = schedules[i][0][0]
# if found:
# break
# elif [] in schedules:
# fit = None
# break
# else:
# continue
# print("Schedules after fitting...: ", schedules)
# return fit
####################################
### OLD STUFF
####################################
# def get_start_time(schedules, duration):
# def convert_to_decimal(time):
# hm = time.split(":")
# return float(hm[0]) + float(hm[1]) / 60.0
# def convert_to_time_string(time):
# h = int(time)
# m = int((time - h) * 60)
# if m < 10:
# m = "0" + str(m)
# if h < 10:
# h = "0" + str(h)
# return str(h) + ":" + str(m)
# def find_free_time(schedule):
# i = 0
# free = []
# while i < len(schedule) - 2:
# if (schedule[i][0] > 9.0) and (i == 0):
# free.append([9.0, schedule[i][0], (schedule[i][0] - 9.0) * 60])
# if (schedule[i][1] != schedule[i+1][0]) and ((schedule[i+1][0] - schedule[i][1]) * 60 >= duration):
# free.append([schedule[i][1], schedule[i+1][0],
# (schedule[i+1][0] - schedule[i][0]) * 60])
# i = i + 1
# return free
# def map_schedule(schedule):
# converted_schedule = []
# for slot in schedule:
# slot = [convert_to_decimal(slot[0]),
# convert_to_decimal(slot[1])]
# converted_schedule.append(slot)
# return converted_schedule
# def fit_to_schedules(schedules):
# while [] not in schedules:
# fit = 0
# for schedule in schedules:
# if schedule[0][0] > fit:
# fit = schedule[0][0]
# for schedule in schedules:
# mtg_end = fit + duration / 60.0
# if (mtg_end > schedule[0][1]):
# schedule.remove(schedule[0])
# if schedule[0][0] > fit:
# fit = schedule[0][0]
# found = True
# for i in range(0, len(schedules) - 1):
# if not ((fit >= schedules[i][0][0]) and (fit + duration / 60.0 <= schedules[i][0][1]) and (fit + duration / 60.0 <= 19.0)):
# found = False
# print("This one doesn't work: ", fit)
# schedules[i].remove(schedules[i][0])
# fit = None
# if found:
# break
# elif [] in schedules:
# fit = None
# break
# else:
# continue
# print("Schedules after fitting...: ", schedules)
# return fit
# print("Initial schedules: ", schedules)
# print("Initial duration: ", duration)
# decimal_schedules = map(map_schedule, schedules)
# free_times = map(find_free_time, decimal_schedules)
# print free_times
# if [] in free_times:
# return None
# else:
# fit = fit_to_schedules(free_times)
# print("Found this fit: ", fit, " (for duration " + str(duration / 60.0) + " hours)")
# if fit == None:
# return None
# else:
# return convert_to_time_string(fit)
| true |
fc413a8ddac43d64144d69ae8ccec0a5e9e59237 | Python | ayenque/Python | /01.Pensamiento Computacional/rangos.py | UTF-8 | 573 | 3.359375 | 3 | [] | no_license | #range(comienzo, fin , pasos)
mi_rango = range(1,5)
type(mi_rango)
for i in mi_rango:
print(i)
mi_rango = range(0,7,2)
mi_otro_rango = range(0,8,2)
print(mi_rango == mi_otro_rango)
for i in mi_rango:
print(i)
for i in mi_otro_rango:
print(i)
print(id(mi_rango))
print(id(mi_otro_rango))
print(mi_rango is mi_otro_rango)
pares = []
for i in range(0,101,2):
pares.append(i)
print(pares)
nones = []
for i in range(1,100,2):
nones.append(i)
print(nones)
pares = list(range(0,101,2))
nones = list(range(1,100,2))
print(pares)
print(nones)
| true |
a4334fbe19a085d30345705bd370afcac0f9938b | Python | tom-3266/Name_Error | /part A/calculator.py | UTF-8 | 3,326 | 4.15625 | 4 | [] | no_license | #Design a user interactive Calculator .( sum , subtraction , multiplication , division , Distance , speed , Intrest)
#defining functions for calculator
def sumi(a,b): #addition
return a+b
def subs(a,b): #difference
return a-b
def mult(a,b): #multiplication
return a*b
def div(a,b): #division
return a/b
def interest(): #simple interest
P = float(input("\nEnter the principle amount : "))
R = float(input("Enter the rate of interest per year : "))
t = float(input("Enter the duration : "))
print("1. months")
print("2. years")
q = str(input("Enter duration type(months/years) :"))
if q=="months": #for month to year convertion
time = t/12
elif q=="years":
time = t
else:
print("**** Wrong choise *****") #choise validation
r = R/100
si = P*(1 + (r*time))
return (si,q,P,t)
def value_cal(): # funtion to input values
num1 = int(input("\nEnter the first number : "))
num2 = int(input("Enter the second number : "))
return(num1,num2)
def contin():
cont = input("\nDo you want to continue (Y/N) : ")
return cont
def main_calc():
while cont == "Y" or cont == "y": # main choise for interactive calculator
print("\n1. Addition")
print("2. Subtraction")
print("3. Multiplication")
print("4. Division")
print("5. Calculate Distance")
print("6. Calculate Speed")
print("7. Calculate Interest")
print("8. Exit")
try:
ch = int(input("\nEnter the choice from the following : "))
return ch
except:
print("wrong input")
break
else:
for i in range(1):
break
cont = "Y"
print("\n\t\tSimple Calculator")
while True:
ch = main_calc()
if ch==1: # nested if for selecting based on choise
a,b = value_cal()
c = sumi(a,b)
print("\nThe sum of {} and {} is :".format(a,b),c)
cont = contin()
elif ch==2:
a,b = value_cal()
c = subs(a,b)
print("\nThe difference of {} and {} is :".format(a,b),c)
cont = contin()
elif ch==3:
a,b = value_cal()
c = mult(a,b)
print("\nThe product of {} and {} is :".format(a,b),c)
cont = contin()
elif ch==4:
a = int(input("\nEnter the dividend : "))
b = int(input("Enter the divisor : "))
c = div(a,b)
print("\nThe quotient of {} and {} is :".format(a,b),c)
cont = contin()
elif ch==5:
a = int(input("\nEnter the speed (km\h) : "))
b = int(input("Enter the time (hr) : "))
c = mult(a,b)
print("\nThe distance covered : {}kms".format(c))
cont = contin()
elif ch==6:
a = int(input("\nEnter the distance covered (km) : "))
b = int(input("Enter the time (hr) : "))
c = div(a,b)
print("\nSpeed : {}km/hr".format(c))
cont = contin()
elif ch==7:
sim_int,q,P,t = interest()
print("\nThe simlpe interest for ₹{} for {}{} is : ".format(P,t,q),sim_int)
cont = contin()
elif ch==8 or cont =="N" or cont == "n": # for breaking from loop
print("\n\t\t*** Thank you ***")
break
else :
print("\n\t\t*** Wrong Choise ***")
break
| true |
e043bc3f657250a2bcf1cad0b53bc020f1888019 | Python | Aasthaengg/IBMdataset | /Python_codes/p03730/s766307658.py | UTF-8 | 152 | 2.703125 | 3 | [] | no_license | from fractions import gcd
def check():
A, B, C = map(int, input().split())
if C%gcd(A,B)==0:
return 'YES'
return 'NO'
print(check()) | true |
cdd6887b9db44c1a54f571acb3160d4503cfc98d | Python | prateekpm123/Prateek-s-Competitve-Coding-Repo | /Love Babbar sheet/to reverse an array or string/Reverse of a string.py | UTF-8 | 700 | 3.78125 | 4 | [] | no_license | # to find the reverse of the string or array
arr = [4, 2,1,3,6, 8, 9, 10, 11, 12,13,14]
# arr = 'hello there'
# arr = []
# num = int(input("Enter the number of elements you want in an array "))
# for i in range(num):
# val = input()
# arr.append(val)
start = 0
# if(len(arr)%==0):
for i in range(len(arr)-1, int((len(arr)/2)-1), -1):
print(start)
firstval = arr[start]
secondval = arr[i]
print(start,i, firstval, secondval)
# temp = firstval
# firstval = secondval
# secondval = temp
# arr.insert(start, secondval)
arr[start] = secondval
# arr.insert(i, firstval)
arr[i] = firstval
start+=1
# arr.insert(0, 234)
# arr[0] = 23423
print(arr) | true |
66c3aa274e092b7fa0a732fb57bd6844733fc12d | Python | redmage123/deep_learning_tensorflow | /examples/module1/simple_numpy_program.py | UTF-8 | 501 | 3.96875 | 4 | [] | no_license | #!/usr/bin/env python3
import numpy as np
# Create an array 'a' as a 2 by 2 dimensional array initialized to zeros.
a = np.zeros((2,2))
# Create an array 'b' as a 2 by 2 dimensional array initialized to ones.
b = np.ones((2,2))
# Add the two up. The axis parameter refers to columsn vs. rows. Axis=0
# refers aggregation along the row, axis=1 refers to aggregatio along
# the columns.
print (np.sum (b,axis=1))
# The reshape method changes a to be a 1 X 4 array.
print (np.reshape(a,(1,4)))
| true |
e3264f707ea93bed16d96ac564a680d9c3c763ca | Python | conrad-strughold/GamestonkTerminal | /openbb_terminal/portfolio/brokers/robinhood/robinhood_model.py | UTF-8 | 3,002 | 2.671875 | 3 | [
"MIT"
] | permissive | """Robinhood Model"""
__docformat__ = "numpy"
import logging
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from robin_stocks import robinhood
from openbb_terminal.core.session.current_user import get_current_user
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
dt_format = "%Y-%m-%dT%H:%M:%SZ"
@log_start_end(log=logger)
def login():
"""Robinhood login"""
current_user = get_current_user()
robinhood.login(
current_user.credentials.RH_USERNAME, current_user.credentials.RH_PASSWORD
)
console.print("")
@log_start_end(log=logger)
def logoff():
"""Robinhood logoff"""
robinhood.logout()
@log_start_end(log=logger)
def get_holdings() -> pd.DataFrame:
"""Return Robinhood holdings
Returns
-------
pd.DataFrame
Robinhood holdings
"""
holds = robinhood.account.build_holdings()
return rh_positions_to_df(holds)
@log_start_end(log=logger)
def rh_positions_to_df(holds: dict) -> pd.DataFrame:
"""Process robinhood holdings to dataframe
Parameters
----------
holds : dict
Dictionary from robin_stocks
Returns
-------
pd.DataFrame
Processed dataframe of holdings
"""
df = pd.DataFrame(columns=["Symbol", "MarketValue", "Quantity", "CostBasis"])
sym = []
mv = []
qty = []
cb = []
for stonk, data in holds.items():
sym.append(stonk)
qty.append(float(data["quantity"]))
mv.append(float(data["equity"]))
cb.append(float(data["quantity"]) * float(data["average_buy_price"]))
df["Symbol"] = sym
df["MarketValue"] = mv
df["Quantity"] = qty
df["CostBasis"] = cb
return df
@log_start_end(log=logger)
def get_historical(interval: str = "day", window: str = "3month") -> pd.DataFrame:
"""Get historical portfolio in candle form
Parameters
----------
interval : str, optional
Interval for robinhood (candle width), by default "day"
window : str, optional
Lookback to get portfolio history, by default "3month"
Returns
-------
pd.DataFrame
Historical portfolio with OHLC variables
"""
rhhist = robinhood.account.get_historical_portfolio(interval, window)
rhhist_eq = rhhist["equity_historicals"]
open_eq = []
close_eq = []
time = []
for h in rhhist_eq:
time.append(datetime.strptime(h["begins_at"], dt_format) - timedelta(hours=4))
close_eq.append(float(h["adjusted_close_equity"]))
open_eq.append(float(h["adjusted_open_equity"]))
close_eq_array = np.asarray(close_eq)
open_eq_array = np.asarray(open_eq)
high = np.maximum(open_eq_array, close_eq_array)
low = np.minimum(open_eq_array, close_eq_array)
df = pd.DataFrame(index=time)
df["High"] = high
df["Low"] = low
df["Open"] = open_eq_array
df["Close"] = close_eq_array
return df
| true |
376cb6a720e889a2227e618684d98b3fa218e255 | Python | MoMolive/MoMolive.gethub.io | /跳过验证码/。。。.py | UTF-8 | 265 | 3.296875 | 3 | [] | no_license | # coding = utf - 8
import random
ver = random.randint(1000,9999)
print(u'生成验证码:%d'%ver)
num = (u'请输入数值:')
print(num)
if num == 0:
print(u'登陆成功')
elif num == 999999:
print(u'登陆成功')
else:
print(u'验证码错误')
| true |
563b382bc0261fe31406f76b1723a6df32617c2c | Python | sergelab/yustina | /src/contrib/data/attachment.py | UTF-8 | 9,192 | 2.65625 | 3 | [] | no_license | # coding: utf-8
from __future__ import absolute_import
import logging
import os
import sys
from contrib.utils.file import add_postfix_to_filename
class ValidationError(Exception):
def __init__(self, errors, path=None):
if not isinstance(errors, list):
errors = [TypeError(errors)]
msgs = "\n ".join([str(e).replace("\n", "\n ") for e in errors])
message = "{}:\n {}".format(path or "{root}", msgs)
super(ValidationError, self).__init__(message)
self.errors = errors
self.path = path
def prefixed(self, path_prefix):
path = '{0}.{1}'.format(path_prefix, self.path) if self.path is not None else path_prefix
return self.__class__(self.errors, path)
class AbstractBase(object):
def __init__(self, jsondict=None):
self._owner = None
if jsondict:
try:
self.update_with_json(jsondict)
except ValidationError as e:
for err in e.errors:
logging.warning(err)
def as_json(self):
js = {}
errs = []
found = set()
nonoptionals = set()
for name, jsname, typ, is_list, of_many, not_optional in self.elementProperties():
if not_optional:
nonoptionals.add(of_many or jsname)
err = None
value = getattr(self, name)
if value is None:
continue
if is_list:
if not isinstance(value, list):
err = TypeError("Expecting property \"{}\" on {} to be list, but is {}"
.format(name, type(self), type(value)))
elif len(value) > 0:
if not self._matches_type(value[0], typ):
err = TypeError("Expecting property \"{}\" on {} to be {}, but is {}"
.format(name, type(self), typ, type(value[0])))
else:
lst = []
for v in value:
try:
lst.append(v.as_json() if hasattr(v, 'as_json') else v)
except ValidationError as e:
err = e.prefixed(name)
found.add(of_many or jsname)
js[jsname] = lst
else:
if not self._matches_type(value, typ):
err = TypeError("Expecting property \"{}\" on {} to be {}, but is {}"
.format(name, type(self), typ, type(value)))
else:
try:
found.add(of_many or jsname)
js[jsname] = value.as_json() if hasattr(value, 'as_json') else value
except ValidationError as e:
err = e.prefixed(name)
if err is not None:
errs.append(err if isinstance(err, ValidationError) else ValidationError([err], name))
# any missing non-optionals?
if len(nonoptionals - found) > 0:
for nonop in nonoptionals - found:
errs.append(KeyError("Property \"{}\" on {} is not optional, you must provide a value for it"
.format(nonop, self)))
if len(errs) > 0:
raise ValidationError(errs)
return js
@classmethod
def with_json(cls, jsonobj):
if isinstance(jsonobj, dict):
return cls._with_json_dict(jsonobj)
if isinstance(jsonobj, list):
return [cls._with_json_dict(jsondict) for jsondict in jsonobj]
raise TypeError("`with_json()` on {} only takes dict or list of dict, but you provided {}"
.format(cls, type(jsonobj)))
@classmethod
def _with_json_dict(cls, jsondict):
if not isinstance(jsondict, dict):
raise TypeError("Can only use `_with_json_dict()` on {} with a dictionary, got {}"
.format(type(cls), type(jsondict)))
return cls(jsondict)
@classmethod
def with_json_and_owner(cls, jsonobj, owner):
instance = cls.with_json(jsonobj)
if isinstance(instance, list):
for inst in instance:
inst._owner = owner
else:
instance._owner = owner
return instance
def update_with_json(self, jsondict):
if jsondict is None:
return
if not isinstance(jsondict, dict):
raise ValidationError("Non-dict type {0} fed to `update_with_json` or {1}".format(
type(jsondict), type(self)
))
errs = []
found = set([])
nonoptionals = set()
for name, jsname, typ, is_list, of_many, not_optional in self.elementProperties():
if not jsname in jsondict:
if not_optional:
nonoptionals.add(of_many or jsname)
continue
err = None
value = jsondict[jsname]
if hasattr(typ, 'with_json_and_owner'):
try:
value = typ.with_json_and_owner(value, self)
except Exception as e:
value = None,
err = e
if value is not None:
testval = value
if is_list:
if not isinstance(value, list):
err = TypeError('Wrong type {0} for list property "{1}" or {2}, '
'expecting a list of {3}'.format(
type(value), name, type(self), typ
))
testval = None
else:
testval = value[0] if value and len(value) > 0 else None
if testval is not None and not self._matches_type(testval, typ):
err = TypeError('Wrong type {0} for property "{1}" on {2}, '
'expecting {3}'.format(
type(testval), name, type(self), typ
))
else:
setattr(self, name, value)
if err is not None:
errs.append(err.prefixed(name) if isinstance(err, ValidationError)
else ValidationError([err], name))
found.add(jsname)
found.add('_' + jsname)
if of_many is not None:
found.add(of_many)
def _matches_type(self, value, typ):
if value is None:
return True
if isinstance(value, type):
return True
if int == typ or float == typ:
return isinstance(value, int) or isinstance(value, float)
if (sys.version_info < (3, 0)) and (str == typ or unicode == typ):
return isinstance(value, str) or isinstance(value, unicode)
return False
def elementProperties(self):
return []
class AttachmentThumbnail(AbstractBase):
def __init__(self, jsondict=None):
self.width = None
self.height = None
super(AttachmentThumbnail, self).__init__(jsondict=jsondict)
def elementProperties(self):
js = super(AttachmentThumbnail, self).elementProperties()
js.extend([
('width', 'width', int, False, None, True),
('height', 'height', int, False, None, True)
])
return js
class Attachment(AbstractBase):
def __init__(self, jsondict=None):
self.original_filename = None
self.filename = None
self.title = None
self.url = None
self.path = None
self.thumbnails = None
super(Attachment, self).__init__(jsondict=jsondict)
def sized_url(self, width, height):
if self.thumbnails:
for preview in self.thumbnails:
if preview.width == width and preview.height == height:
rfname = 'preview_{0}'.format(self.filename)
return os.path.join(self.path, rfname)
return self.url
def get_sized(self, size):
if self.filename and self.path:
return os.path.join(self.path, add_postfix_to_filename(self.filename,
size))
return None
def preview(self):
if self.filename and self.path:
return os.path.join(self.path, 'preview_{0}'.format(self.filename))
return None
def original(self):
if self.url:
return self.url
return None
def elementProperties(self):
js = super(Attachment, self).elementProperties()
js.extend([
('original_filename', 'original_filename', str, False, None, False),
('path', 'path', str, False, None, False),
('filename', 'filename', str, False, None, False),
('url', 'url', str, False, None, False),
('title', 'title', str, False, None, False),
('thumbnails', 'thumbnails', AttachmentThumbnail, True, None, False)
])
return js
| true |
be60444d0596552984220938e3dccdf3b0bff194 | Python | Fay321/leetcode-exercise | /solution/problem 23.py | UTF-8 | 877 | 3.8125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# 最简单直接的思路
class Solution1(object):
def countBits(self, num):
"""
:type num: int
:rtype: List[int]
"""
lst = []
for i in range(0,num+1):
s = 0
for j in bin(i).split('b')[1]:
if j=='1':
s+= 1
lst.append(s)
return lst
# 找到一个答案的O(N)
'''
f(n) = f(n/2) + 0, 如果n为偶数
f(n) = f(n/2) + 1, 如果n为奇数
'''
class Solution:
def countBits(self, num):
res = [0 for _ in range(num+1)]
for i in range(1, num + 1):
if i & 1:##位运算判定奇偶加快速度
res[i] = res[i//2] + 1
else:
res[i] = res[i//2]
return res
s = Solution()
print(s.countBits(5)) | true |
9451bcb1299377bae1cd2c6cbb2039e41ebec214 | Python | jlyu26/Python-Data-Structures-and-Algorithms | /Problems Notebook/230. Kth Smallest Element in a BST.py | UTF-8 | 1,758 | 4.09375 | 4 | [] | no_license | # 230. Kth Smallest Element in a BST
# Given a binary search tree, write a function kthSmallest to find the kth smallest element in it.
# Note:
# You may assume k is always valid, 1 ≤ k ≤ BST's total elements.
# Example 1:
# Input: root = [3,1,4,null,2], k = 1
# 3
# / \
# 1 4
# \
# 2
# Output: 1
# Example 2:
# Input: root = [5,3,6,2,4,null,null,1], k = 3
# 5
# / \
# 3 6
# / \
# 2 4
# /
# 1
# Output: 3
# Follow up:
# What if the BST is modified (insert/delete operations) often and you need to find the kth smallest frequently?
# How would you optimize the kthSmallest routine?
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# 思路: BST中序遍历结果为非降序
# 暴力解: 先中序遍历记录在list里再返回list[k-1], 缺点是比如就返回1st还要遍历整个tree
# 所以可以用index记录第kth个
# Recursion
class Solution:
def kthSmallest(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: int
"""
self.kth = None
self.index = 0
self.inorderTraverse(root, k)
return self.kth
def inorderTraverse(self, root, k):
if not root:
return
self.inorderTraverse(root.left, k)
self.index += 1
if self.index == k:
self.kth = root.val
return
self.inorderTraverse(root.right, k)
# Non-Recursion
class Solution:
def kthSmallest(self, root, k):
if not root:
return -1
index = 0
stack = []
curr = root
while curr or stack:
while curr:
stack.append(curr)
curr = curr.left
curr = stack.pop()
index += 1
if index == k:
return curr.val
curr = curr.right
return root.val | true |
5a2fc4a00a1c5bb839658ef35fbc08d419b64d54 | Python | hankumin/NewsCycle | /allVowels2.py | UTF-8 | 756 | 3.4375 | 3 | [] | no_license | #!usr/bin/python
import gzip
import re
import os
#returns true when word has aeiou in this order
def vowelWord(word,vowels):
return vowels.search(word)
def main():
theDict = open('/usr/share/dict/words')
#Expressions for words with pattern AEIOU in them
theWordexp = re.compile('^((?![aeiou]).)*a((?![aeiou]).)*e((?![aeiou]).)*i((?![aeiou]).)*o((?![aeiou]).)*u((?![aeiou]).)*$')
#places all words in an array
theWords = theDict.readlines()
#iterates through each word and checks if the match the pattern and prints
# the word if they do match
for word in theWords:
if (vowelWord(word,theWordexp)):
print word
theDict.close()
if __name__ == '__main__':
main()
| true |
191287006fcb832399677af0a049a9badf4c0aec | Python | sungwooHa/python_dummy | /helloWorld.py | UTF-8 | 80 | 3.078125 | 3 | [] | no_license | i, hap = 0, 0
for i in range(1, 11, 3) :
hap += i
print("%d %d" % (hap, i))
| true |
4ee9715dbda4a0865567fbf6861d6d7a0a552490 | Python | jonnycrunch/pypeerdid | /peerdid/tests/file_test.py | UTF-8 | 289 | 2.53125 | 3 | [
"Apache-2.0"
] | permissive | import os
from ..delta import Delta
def test_is_iterable(scratch_file):
for item in scratch_file:
return
def test_file_io(scratch_file):
assert not os.path.exists(scratch_file.path)
scratch_file.append(Delta("abc", []))
assert os.path.exists(scratch_file.path) | true |
69e8343b808f0ca7d4769bcd9e31bb7ba64e0437 | Python | Neroal/TQC-python- | /TQC309.py | UTF-8 | 578 | 3.890625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
請使用迴圈敘述撰寫一程式,提示使用者輸入金額
(如10,000)、年收益率(如5.75),以及經過的月
份數(如5),接著顯示每個月的存款總額。
提示:四捨五入,輸出浮點數到小數點後第二位
"""
amount = eval(input())
rate = eval(input())
period = eval(input())
#change to percent
rate/=100
print('%s\t%s'%('Month','Amount'))
for month in range(1,period+1):
total = amount + (amount*(rate/12))
print('%d\t%.2f'%(month,total))
amount = total
| true |
8977b272860bb2312e7872872d269ccc3b2d4c51 | Python | 15csmonk/computing_method | /计算方法_作业二/Gauss-Legendre.py | UTF-8 | 467 | 3.296875 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
import math
def fun(x):
return 1/(1+x**2)
def main():
GauFive={0.9061798459:0.2369268851,0.5384693101:0.4786286705,0:0.5688888889}
GauSum=0.0
a=0.0
b=1.0
for key,value in GauFive.items():
GauSum+=fun(((b-a)*key+a+b)/2)*value
if(key>0):
GauSum+=fun(((a-b)*key+a+b)/2)*value
GauSum=GauSum*(b-a)/2
print "Gauss-Legendre:",GauSum
main()
| true |
944de9bcb03ba7b8c28a5b603a94724b1124b701 | Python | Maria105/python_lab | /lab7_14.py | UTF-8 | 402 | 3.3125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- codding:utf-8 -*-
def input_email() -> str:
"""Input email"""
email = input('Enter your email: ')
return (email)
def valid_check(email: str) -> bool:
"""Check is valid your email"""
separation = email.split('@')[1].split('.')
return len(separation[-1]) > 1 and email.count('@') < 2 and len(separation) > 1
print(valid_check (input_email ( ) ) )
| true |
7027754c13bed24d0abb2c54fd3617e784ff3173 | Python | DoomPI/ABC_03 | /cartoon.py | UTF-8 | 2,647 | 3.53125 | 4 | [] | no_license | # --------------------------------------------
from film import Film
from type import DrawingType
from rnd import RandomInt
from rnd import RandomString
class Cartoon(Film):
def __init__(self):
super().__init__()
self.type = 0
def ReadStrArray(self, strArray, i):
# Проверка на конец чтения
if i >= len(strArray) - 1:
return 0
# Получение значения названия фильма и года выхода
self.name = strArray[i]
self.year = int(strArray[i + 1])
# Получение значения метода рисовки мультфильма
filmType = int(strArray[i + 2])
if filmType == 1:
self.type = DrawingType.drawn
elif filmType == 2:
self.type = DrawingType.stop_motion
elif filmType == 3:
self.type = DrawingType.plasticine
i += 3
return i
# Случайный ввод данных мультфильма
def GetRandomInfo(self):
rnd3 = RandomInt(1, 3)
rnd2000 = RandomInt(1000, 2000)
rndFilmName = RandomString("film name")
self.name = rndFilmName.Get()
self.year = rnd2000.Get()
filmType = rnd3.Get()
if filmType == 1:
self.type = DrawingType.drawn
elif filmType == 2:
self.type = DrawingType.stop_motion
elif filmType == 3:
self.type = DrawingType.plasticine
# Вывод данных мультфильма в консоль
def Print(self):
super().Print()
if self.type == DrawingType.drawn:
print("It is a cartoon. It was created by using drawn method.\n")
elif self.type == DrawingType.stop_motion:
print("It is a cartoon. It was created by using stop motion method.\n")
elif self.type == DrawingType.plasticine:
print("It is a cartoon. It was created by using plasticine method.\n")
pass
# Вывод данных мультфильма в файл
def Write(self, oStream):
super().Write(oStream)
if self.type == DrawingType.drawn:
oStream.write("It is a cartoon. It was created by using drawn method.\n")
elif self.type == DrawingType.stop_motion:
oStream.write("It is a cartoon. It was created by using stop motion method.\n")
elif self.type == DrawingType.plasticine:
oStream.write("It is a cartoon. It was created by using plasticine method.\n")
pass
| true |
7111f8f6799b1aa48daebdbfc44a4002fed7014e | Python | EZevan/Conver_Excel_to_XML | /convert.py | UTF-8 | 8,709 | 2.546875 | 3 | [] | no_license | # coding:utf-8
import os
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from excelConfig import ExcelConfig
from enums import Significance
from enums import ExecMode
class Convert():
def __init__(self, ExcelFileName, SheetName):
self.excelFile = ExcelFileName + '.xlsx'
self.excelSheet = SheetName
self.temp = ExcelConfig(self.excelFile)
self.dic_testlink = {}
self.row_flag = 3
self.testsuite = self.temp.getCell(self.excelSheet, 2, 1)
self.dic_testlink[self.testsuite] = {"node_order": "13", "details": "", "testcase": []}
self.content = ""
self.content_list = []
def xlsx_to_dic(self, SheetName):
# node_order = int(self.temp.getCell(self.excelSheet,self.row_flag - 1,9))
# externalId = int(self.temp.getCell(self.excelSheet,self.row_flag - 1,10))
while True:
# print 'loop1'
# list_testcase = dic_testlink[testsuite].["testcase"]
testcase = {"name": "", "node_order": "1000", "externalid": "", "version": "1", "summary": "",
"preconditions": "", "execution_mode": "1", "significance": "2","status":"1", "steps": [], "keywords": "1.0"}
testcase["name"] = self.temp.getCell(self.excelSheet, self.row_flag, 1)
# testcase["node_order"] = node_order
# testcase["externalid"] = externalId
testcase["summary"] = self.temp.getCell(self.excelSheet, self.row_flag, 4)
testcase["preconditions"] = self.temp.getCell(self.excelSheet, self.row_flag, 5)
significance = self.temp.getCell(self.excelSheet,self.row_flag,6)
execution_mode = self.temp.getCell(self.excelSheet, self.row_flag, 3)
# type1 = type(significance) unicode:默认使用utf-8将“重要性”中文值解码(decode)成了unicode
# type2 = type(execution_mode.encode('utf-8')) str:手动编码(encode),将unicode转换成str
# type3 = type(ExecMode.auto) Enum:未进行任何处理,auto对象默认还是枚举类型
# type4 = type(ExecMode.auto.value) str:枚举类型的值,该枚举对象的值类型是str
# 这里对中文字符进行比较,需要转换成相同(编码)类型;如str,或者unicode
# execution_mode解码之前是str,所以ExecMode.auto枚举需要取value值(str类型),再解码成unicode
if execution_mode == ExecMode.auto.value.decode('utf-8'):
testcase["execution_mode"] = 2
if significance is None:
raise Exception("significance is required!")
elif significance.strip() == Significance.high.value.decode('utf-8'):
testcase["significance"] = 3
elif significance.strip() == Significance.medium.value.decode('utf-8'):
testcase["significance"] = 2
else :
testcase["significance"] = 1
# print self.temp.getCell('Sheet1',self.row_flag,3)
step_number = 1
testcase["keywords"] = self.temp.getCell(self.excelSheet, self.row_flag, 2)
if testcase["keywords"] is not None:
testcase["keywords"].strip()
else:
raise Exception("Keywords is required!")
# node_order += 1
# externalId += 1
# print testcase["keywords"]
while True:
# print 'loop2'
step = {"step_number": "", "actions": "", "expectedresults": "", "execution_mode": "1"}
step["step_number"] = step_number
step["actions"] = self.temp.getCell(self.excelSheet, self.row_flag, 7)
step["expectedresults"] = self.temp.getCell(self.excelSheet, self.row_flag, 8)
if execution_mode == ExecMode.auto.value.decode('utf-8'):
step["execution_mode"] = 2
testcase["steps"].append(step)
step_number += 1
self.row_flag += 1
if self.temp.getCell(self.excelSheet, self.row_flag, 1) is not None or self.temp.getCell(self.excelSheet, self.row_flag, 7) is None:
break
# print testcase
self.dic_testlink[self.testsuite]["testcase"].append(testcase)
# print self.row_flag
if self.temp.getCell(self.excelSheet, self.row_flag, 7) is None and self.temp.getCell(self.excelSheet, self.row_flag + 1, 7) is None:
break
self.temp.close()
# print self.dic_testlink
def content_to_xml(self, key, value=None):
if key == 'step_number' or key == 'execution_mode' or key == 'node_order' or key == 'externalid' or key == 'version' or key == 'significance':
return "<" + str(key) + "><![CDATA[" + str(value) + "]]></" + str(key) + ">"
elif key == 'actions' or key == 'expectedresults' or key == 'summary' or key == 'preconditions':
return "<" + str(key) + "><![CDATA[<p> " + str(value) + "</p> ]]></" + str(key) + ">"
elif key == 'keywords':
return '<keywords><keyword name="' + str(value) + '"><notes><![CDATA[]]></notes></keyword></keywords>'
elif key == 'name':
return '<testcase name="' + str(value) + '">'
elif key == 'status':
return "<status>" + str(value) + "</status>"
else:
return '##########'
def dic_to_xml(self, ExcelFileName, SheetName):
testcase_list = self.dic_testlink[self.testsuite]["testcase"]
for testcase in testcase_list:
for step in testcase["steps"]:
self.content += "<step>"
self.content += self.content_to_xml("step_number", step["step_number"])
self.content += self.content_to_xml("actions", step["actions"])
self.content += self.content_to_xml("expectedresults", step["expectedresults"])
self.content += self.content_to_xml("execution_mode", step["execution_mode"])
self.content += "</step>"
self.content = "<steps>" + self.content + "</steps>"
self.content = self.content_to_xml("status",testcase["status"]) + self.content
self.content = self.content_to_xml("significance", testcase["significance"]) + self.content
self.content = self.content_to_xml("execution_mode", testcase["execution_mode"]) + self.content
self.content = self.content_to_xml("preconditions", testcase["preconditions"]) + self.content
self.content = self.content_to_xml("summary", testcase["summary"]) + self.content
self.content = self.content_to_xml("version", testcase["version"]) + self.content
#self.content = self.content_to_xml("externalid", testcase["externalid"]) + self.content
#self.content = self.content_to_xml("node_order", testcase["node_order"]) + self.content
self.content = self.content + self.content_to_xml("keywords", testcase["keywords"])
self.content = self.content_to_xml("name", testcase["name"]) + self.content
self.content = self.content + "</testcase>"
self.content_list.append(self.content)
self.content = ""
self.content = "".join(self.content_list)
# 根据excel数据源确定是否需要生成外层用例集名称
if self.testsuite is not None:
self.content = self.content_to_xml("keywords",testcase["keywords"]) + self.content
self.content = '<testsuite name="' + self.testsuite + '">' + self.content + "</testsuite>"
else:
self.content = "<testcases>" + self.content + "</testcases>"
self.content = '<?xml version="1.0" encoding="UTF-8"?>' + self.content
self.write_to_file(ExcelFileName, SheetName)
def write_to_file(self, ExcelFileName, SheetName):
xmlFileName = 'output\\' + ExcelFileName + '_' + SheetName + '.xml'
cp = open(xmlFileName, "w")
cp.write(self.content)
cp.close()
if __name__ == "__main__":
# res = os.system('pip install -r .\\dependency\\requirements.txt')
# print res
# fileName = raw_input('Enter excel name:').strip()
$ sheetName = raw_input('Enter sheet name:').strip()
fileName = sys.argv[1].strip()
sheetName = sys.argv[2].strip()
sheetList = sheetName.split(" ")
for sheetName in sheetList:
test = Convert(fileName, sheetName)
test.xlsx_to_dic(sheetName)
test.dic_to_xml(fileName, sheetName)
print "Convert successfully!"
os.system('pause')
| true |
fdef6916234797d9f27f3336db3a54dc7925c34f | Python | theoneandonlywoj/ML-DL-AI | /Supervised Learning/Image Recognition/SimpleParallelCNN/network.py | UTF-8 | 2,614 | 2.703125 | 3 | [
"Apache-2.0"
] | permissive | import tflearn
import numpy as np
from tqdm import tqdm
from tflearn.layers.merge_ops import merge
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.estimator import regression
from tflearn.data_utils import to_categorical
from tflearn.data_preprocessing import ImagePreprocessing
# Building the network
def ANN(WIDTH, HEIGHT, CHANNELS, LABELS):
dropout_value = 0.35
# Real-time data preprocessing
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()
# Building the network
network = input_data(shape=[None, WIDTH, HEIGHT, CHANNELS],
data_preprocessing=img_prep,
name='input')
# Branch 1
branch1 = conv_2d(network, 32, [2, 2], activation = 'relu', name = 'B1Conv2d_2x2')
# Branch 2
branch2 = conv_2d(network, 32, [3, 3], activation = 'relu', name = 'B2Conv2d_3x3')
# Branch 3
branch3 = conv_2d(network, 32, [5, 5], activation = 'relu', name = 'B3Conv2d_5x5')
# Branch 4
branch4 = conv_2d(network, 32, [7, 7], activation = 'relu', name = 'B4Conv2d_7x7')
# Merging the branches
merged_layers = merge((branch1, branch2, branch3, branch4), mode = 'elemwise_sum', name = 'Merge')
# Fully connected 1
merged_layers = fully_connected(merged_layers, 1000, activation='relu')
merged_layers = dropout(merged_layers, dropout_value)
# Fully connected 2
merged_layers = fully_connected(merged_layers, 1000, activation='relu')
merged_layers = dropout(merged_layers, dropout_value)
# Output layer
merged_layers = fully_connected(merged_layers, LABELS, activation = 'softmax')
network = regression(merged_layers, optimizer = 'adam', learning_rate = 0.0005,
loss = 'categorical_crossentropy', name ='target')
model = tflearn.DNN(network, tensorboard_verbose = 0, tensorboard_dir = './logs', best_checkpoint_path = './checkpoints/best/best_val', max_checkpoints = 1)
return model
def big_dataset_prediction(model, DATA = []):
# Predicting
test_data_predicted = np.empty((0, 10))
test_data_predicted_label = np.empty((0, 10))
print('Prediction in progress...')
for i in tqdm(range(0, DATA.shape[0])):
current_example = DATA[i].reshape([-1,28,28,1])
test_data_predicted = np.append(test_data_predicted, model.predict(current_example), axis = 0)
test_data_predicted_label = np.append(test_data_predicted_label, model.predict_label(current_example), axis = 0)
print('The test data has been successfully labeled.')
print('*' * 70)
return test_data_predicted_label
| true |
92bff5144e114d31a41248b8f5536f69e2770471 | Python | elyerandio/linux_python | /user.py | UTF-8 | 848 | 2.765625 | 3 | [] | no_license | #!/usr/bin/python
import os, crypt, sys
from datetime import date, timedelta
import logging
logging.basicConfig(filename=sys.argv[0] + 'log', level=logging.DEBUG,
filemode='w')
if len(sys.argv) == 1:
logging.critical("Needs root privileges!")
sys.exit("\nYou need to specify the username to create!\n")
logging.debug("Number of arguments: %d" % len(sys.argv))
logging.debug("Arguments: %s" % sys.argv)
now = date.today()
end = now + timedelta(days=5) #password expiration day is 5 days
expire = end.isoformat()
if not os.geteuid() == 0:
sys.exit("\nYou will need to be root to create users, perhaps try sudo\n")
password = 'Password1'
encPassword = crypt.crypt(password, 'a1')
for user in sys.argv[1:]:
logging.debug("Creating user: %s" % user)
os.system("useradd -m -p " + encPassword + " -e " + expire + ' ' + user)
print "done"
| true |
92db68860995b574a264117514a63c539c1b446a | Python | nhichan/hachaubaonhi-fundamental-c4e16 | /session2/bài tập/print2.py | UTF-8 | 73 | 2.984375 | 3 | [] | no_license | num=int(input('nhap 1 so: '))
for i in range(num):
print(i, end=' ')
| true |
aa760691e781381e79d2834bf33012b93c41e6ef | Python | CodeBunny09/Codewars-Writeups | /greed_is_good.py | UTF-8 | 2,100 | 4.5 | 4 | [] | no_license | """
Question:
Greed is a dice game played with five six-sided dice. Your mission, should you choose to accept it, is to score a throw according to these rules. You will always be given an array with five six-sided dice values.
Three 1's => 1000 points
Three 6's => 600 points
Three 5's => 500 points
Three 4's => 400 points
Three 3's => 300 points
Three 2's => 200 points
One 1 => 100 points
One 5 => 50 point
A single die can only be counted once in each roll. For example, a given "5" can only count as part of a triplet (contributing to the 500 points) or as a single 50 points, but not both in the same roll.
Example scoring
Throw Score
--------- ------------------
5 1 3 4 1 250: 50 (for the 5) + 2 * 100 (for the 1s)
1 1 1 3 1 1100: 1000 (for three 1s) + 100 (for the other 1)
2 4 4 5 4 450: 400 (for three 4s) + 50 (for the 5)
In some languages, it is possible to mutate the input to the function. This is something that you should never do. If you mutate the input, you will not be able to pass all the tests.
"""
def score(dice):
score = 0 #Setting the initial value of score i.e 0
dice = ''.join(str(i) for i in sorted(dice)) #Sorting and converting the array into string, so that it becomes more easier to work with
#print("Initial Dice",dice) Printing to check the accurracy
#Creating the patterns as per given in the question
patterns = [
("111", 1000),
("666", 600),
("555", 500),
("444", 400),
("333", 300),
("222", 200),
("1", 100),
("5", 50)
]
for pattern in patterns: #Iterating through the pattern and checking the accuracy with print
while pattern[0] in dice:
#print("Pattern matching: ", pattern[0])
#print(f'Score: {score} + {pattern[1]} : {score + pattern[1]}')
score += pattern[1]
#print("Dice before replacing", dice)
dice = dice.replace(pattern[0], '', 1)
#print("Dice after replacing", dice,"\n\n\n")
return score
print(score( [5,2,1,4,1] )) | true |
56f478793b5815232b12e6166867c6ca8f500666 | Python | muhammadskhattak/image_recognition | /my_digits.py | UTF-8 | 1,292 | 3.484375 | 3 | [] | no_license | """ Muhammad Khattak
2018-04-12
Version 1.0
"""
from typing import Tuple, List
from vector import Vector
import csv, random, math
import numpy as np
class Network:
def __init__(self, sizes: List[int]) -> None:
""" Create a new network with layers of the specified size."""
self.num_layers = len(sizes)
self.sizes = sizes
self.biases = [Vector([0 for i in range(j)]) for j in sizes[1:]]
self.weights = [Vector([0 for i in range(j)]) for j in sizes[:-1]]
def sigmoid(z: float) -> float:
""" Apply the sigmoid function to z. """
return 1.0 / (1.0 + math.exp(-z))
def parse_data() -> List[Tuple[int, List[int]]]:
""" Parse the data in the csv file into labels and the pixels."""
data = open(FILE_PATH)
reader = csv.reader(data)
training_set = []
for row in reader:
label = int(row[0])
pixels = [feature_scale(int(pixel)) for pixel in row[1:]]
training_set.append((label, Vector(pixels)))
data.close()
return training_set
def feature_scale(value: int) -> float:
""" Normalize the feature value from [0, 255] to [0, 1] using the formula
for nomalizing: value := value - min(value) / max(value) - min(value) """
return value / 255
| true |
09407621cc24ef05ad58c0cc2f6e9c806ebbc3f6 | Python | akshay2742/Coding-Problems | /Coding/python/fastPow.py | UTF-8 | 356 | 3.4375 | 3 | [] | no_license | def fastPow(a,b):
result=1
while b:
if(b&1):
result=(result*a)%1000000007
a=a*a%1000000007
b>>=1
return result%1000000007
def main():
t=raw_input()
t=int(t)
while(t):
a=raw_input().split()
print(fastPow(int(a[0]),int(a[1])))
t-=1
main()
| true |
072bc7e516766f9307228a362a3cdfc058e546a0 | Python | Arusharma/FailureTimePrediction | /flask/auto_arima.py | UTF-8 | 4,918 | 3.0625 | 3 | [] | no_license | #Before implementing ARIMA, you need to make the series stationary, and determine the values of p and q
#using the plots we discussed above. Auto ARIMA makes this task really simple for us as it eliminates
#Making series stationary,determining the values of p,d,q and creating the ACF and PACF plots.
import pandas as pd
import numpy as np
import math
import datetime
import matplotlib.pyplot as plt
from pyramid.arima import auto_arima
import json
from json import dumps
data = pd.read_csv('p2.csv')
data=data[['_time','Type','Message']]
data.loc[data.Type =='Error', 'Type'] = 1
data.loc[data.Type =='Warning', 'Type'] = 0
data.loc[data.Type =='Information', 'Type'] = 0
data.loc[(data.Type.str.contains('Exception'))==True, 'Type'] = 1
data.loc[(data.Message.str.contains('DB error'))==True, 'Message'] = 1
data.loc[(data.Message.str.contains('APIs Error'))==True, 'Message'] = 2
data.loc[(data.Message.str.contains('Shared repository'))==True, 'Message'] = 3
data['_time']=pd.to_datetime(data['_time'], format="%Y/%m/%dT%H:%M:%S")
least_recent_date = data['_time'].min()
recent_date = data['_time'].max()
data['cycles']=(data['_time']-least_recent_date)
data['cycles']=(data['cycles'].astype('timedelta64[s]'))
data=data.sort_values(by='cycles')
#data=data[data.Type != 0]
data=data[data.Message==3]
data=data[['_time','cycles']]
original=data['cycles']
n=len(data)
print(n)
forecast_out=int(math.ceil(0.1*(n)))
print(forecast_out)
data['label']=data['cycles'].shift(-forecast_out)
print("length of data is",len(data))
data.dropna(inplace=True)
original=data['cycles']
original=original.to_frame()
print("length of data is after dropping ",len(data))
d1=data['label'] #this step changesa dataframe object into that of a series.series object
d1=d1.to_frame(name='label') #thus need to convert it back into a dataframe object
print(type(d1))
data=data['label']
print(len(d1))
data=data.to_frame(name='label')
print(type(data))
print(data.iat[len(data)-1,0])
#divide into train and validation set
train = data[:int(0.8*(len(data)))]
test = data[int(0.8*(len(data))):]
model = auto_arima(train, trace=True,start_p=0, start_q=0, start_P=0, start_Q=0,
max_p=3, max_q=3, max_P=3, max_Q=3, seasonal=True,
stepwise=False, suppress_warnings=True, D=1, max_D=10,
error_action='ignore',approximation = False)
#change 3 to 10
#fitting model
model.fit(train)
print(model.summary())
#the one with the lower AIC is generally “better”.
y_pred = model.predict(n_periods=len(test))
#y_pred = model.predict(n_periods=len(test))
y_pred = pd.DataFrame(y_pred,columns=['label'])
plt.figure(1)
plt.plot(original[:len(train)],train)
plt.plot(original[-len(test):],test,'b')
plt.plot(original[-len(y_pred):],y_pred,'r')
#plt.savefig("/Users/Arunima_Sharma/Desktop/py/flask/static/fig1.png")
plt.show()
plt.figure(2)
plt.plot(original[-len(test):],test,'b')
plt.plot(original[-len(y_pred):],y_pred,'r')
plt.show()
#plt.savefig("/Users/Arunima_Sharma/Desktop/py/flask/static/fig2.png")
from sklearn.metrics import r2_score,mean_squared_error,mean_absolute_error
acc = r2_score(test, y_pred)
print(acc)
mse = mean_squared_error(test, y_pred)
print('MSE: %f' % mse)
rmse = math.sqrt(mse)
print('RMSE: %f' % rmse)
mae = mean_absolute_error(test, y_pred)
print('MAE: %f' % mae)
model = auto_arima(data, trace=True,start_p=0, start_q=0, start_P=0, start_Q=0,
max_p=3, max_q=3, max_P=3, max_Q=3, seasonal=True,
stepwise=False, suppress_warnings=True, D=1, max_D=10,
error_action='ignore',approximation = False)
#change 3 to 10
#fitting model
model.fit(data)
print(model.summary())
y_pred = model.predict(n_periods=forecast_out)
y_pred = pd.DataFrame(y_pred,columns=['label'])
conn = pd.concat([d1, y_pred], axis=0)
n=conn.size-forecast_out
plt.figure(0)
plt.plot(original[:n],conn[:n],'y')
diff=original.iat[len(original)-1,0]-original.iat[len(original)-forecast_out,0]
plt.plot(original[-forecast_out:]+diff,conn[-forecast_out:],'r')
plt.show()
#plt.savefig("/Users/Arunima_Sharma/Desktop/py/flask/static/fig3.png")
print(type(least_recent_date))
print("recent_date is",recent_date)
date_list=[]
for i in range(len(y_pred)):
sec=y_pred.iat[i,0]
x=least_recent_date+datetime.timedelta(seconds=sec)
if(max(x,recent_date)==x):
{
#print(x)
date_list.append(x)
}
date_result = pd.DataFrame(date_list)
date_result = date_result.astype(str)
#d1=data_result.to_csv('result.csv', encoding='utf-8', index=False)
date_result.to_csv('result.csv', encoding='utf-8', index=False)
#date_result = date_result.to_json()
#with open('final.json', 'w') as outfile:
# json.dump(date_result, outfile)
| true |
6e41313e182748e28682667da26852f683e12649 | Python | VachelHU/HEBR | /data_factory/dataloader.py | UTF-8 | 1,587 | 3.109375 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
import numpy as np
class BatchLoader():
def __init__(self, batch_size):
self.batch_size = batch_size
self.x = None
self.y = None
self.pointer = 0
self.num_batch = 0
# Shuffle the data
def Shuffle(self, datalength):
shuffle_indices = np.random.permutation(np.arange(datalength))
return shuffle_indices
def SplitBatches(self, data):
datas = data[:self.num_batch * self.batch_size]
reminder = data[self.num_batch * self.batch_size:]
data_batches = np.split(datas, self.num_batch, 0)
if reminder.shape[0] != 0:
data_batches.append(reminder)
return data_batches
def load_data(self, x=None, y=None, shuffle=False):
self.x = np.asarray(x, dtype=np.float)
self.y = np.asarray(y, dtype=np.int)
# Shuffle the data
if shuffle:
shuffle_indices = self.Shuffle(self.x.shape[0])
self.x = self.x[shuffle_indices]
self.y = self.y[shuffle_indices]
# Split batches
self.num_batch = int(self.x.shape[0] / self.batch_size)
self.pointer = 0
self.x_batches = self.SplitBatches(self.x)
self.y_batches = self.SplitBatches(self.y)
self.num_batch = len(self.x_batches)
def next_batch(self):
x_batch = self.x_batches[self.pointer]
y_batch = self.y_batches[self.pointer]
self.pointer = (self.pointer + 1) % self.num_batch
return x_batch, y_batch
def reset_pointer(self):
self.pointer = 0 | true |
1677a8e61f41b92b08fa77df9d3dff473c8b7023 | Python | Manas2909/Python-Stuff | /re7.py | UTF-8 | 454 | 2.859375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 21 12:22:05 2019
@author: Manas
"""
import re
print(re.sub('ub', '~*' , 'Subject has Uber booked already', flags = re.IGNORECASE))
print(re.sub('ub', '~*' , 'Subject has Uber booked already'))
print(re.sub('ub', '~*' , 'Subject has Uber booked already', count=1, flags = re.IGNORECASE))
print(re.sub(r'\sAND\s', ' & ', 'Baked Beans And Spam', flags=re.IGNORECASE)) | true |
ee4eb48a5f1f4020236396a1a40f1c83a0610eb7 | Python | edyarm/pokemonapi | /apps/evolution/serializers.py | UTF-8 | 772 | 2.515625 | 3 | [] | no_license | from rest_framework import serializers
from .models import Pokemon, Stat
class StatSerializer(serializers.ModelSerializer):
class Meta:
model = Stat
fields = ('name', 'effort', 'base_stat')
ordering = ('name', 'effort', 'base_stat')
class EvolutonSerializer(serializers.BaseSerializer):
def to_representation(self, instance):
return {
'type': instance.type,
'name': instance.name,
'id': instance.id
}
class PokemonSerializer(serializers.Serializer):
id = serializers.IntegerField()
name = serializers.CharField()
height = serializers.FloatField()
weight = serializers.FloatField()
stats = StatSerializer(many=True)
evolutions = EvolutonSerializer(many=True)
| true |
6b1469ae527b4df3694f8521fcca1d7d9bb0238d | Python | thelastdark99/becasdigitalizadas2020 | /Script-RouterCSR1000V/NO_ES_NECESARIO_VER/Delete_Interfaces_Restconf.py | UTF-8 | 927 | 3.140625 | 3 | [] | no_license | #Importamos los modulos para realizar consultas http (request) y el modulo para convertirlo a formato json(json)
import requests,urllib3
#Quitamos las advertencias SSL
urllib3.disable_warnings()
while True:
interfaz=int(input("Indica el numero de interfaz que desea borrar: "))
URL="https://192.168.1.202/restconf/data/ietf-interfaces:interfaces/interface=Loopback"+str(interfaz)
basicAuth=("cisco","cisco123!")
respuesta=requests.delete(URL,auth=basicAuth,verify=False)
op=input("¿Desea volver a eliminar una interfaz?(Y/N): ")
if op=="Y" or op=="y":
continue
if op=="N" or op=="n":
if respuesta.status_code>=200 and respuesta.status_code<=399:
print("Codigo de respuesta: ",respuesta.status_code,". Los datos se han borrado con exito")
else:
print("Codigo de respuesta: ",respuesta.status_code,". Los datos NO se han podido borrar") | true |
c9ef23b64f83d39665ccf7579c6c22ff556fb75d | Python | pereirfe/Osciloscope | /gpio.py | UTF-8 | 298 | 2.78125 | 3 | [] | no_license | import RPi.GPIO as GPIO
import sys
GPIO.setmode(GPIO.BCM)
GPIO.setup(23, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
last = 0
act = 1
while True:
sys.stdout.flush()
act = GPIO.input(23)
if(act<>last):
if(act == 1):
sys.stdout.write('*')
last = 1
else:
sys.stdout.write('_')
last = 0
| true |
04e66e4ce10ae4663e2c81fbea4a34494a487eb3 | Python | ZhihaoZhu/Advanced-Neural-Networks-for-Recognition | /python/run_q5.py | UTF-8 | 3,635 | 2.875 | 3 | [] | no_license | import numpy as np
import scipy.io
from nn import *
from collections import Counter
train_data = scipy.io.loadmat('../data/nist36_train.mat')
valid_data = scipy.io.loadmat('../data/nist36_valid.mat')
# we don't need labels now!
train_x = train_data['train_data']
valid_x = valid_data['valid_data']
print(valid_x.shape)
max_iters = 100
# pick a batch size, learning rate
batch_size = 36
learning_rate = 3e-5
hidden_size = 32
lr_rate = 20
batches = get_random_batches(train_x,np.ones((train_x.shape[0],1)),batch_size)
batch_num = len(batches)
params = Counter()
M_params = Counter()
# initialize layers here
initialize_weights(1024,hidden_size,params,'layer1')
initialize_weights(hidden_size,hidden_size,params,'hidden')
initialize_weights(hidden_size,hidden_size,params,'hidden2')
initialize_weights(hidden_size,1024,params,'output')
initialize_Momentum_weights(1024,hidden_size,M_params,'layer1')
initialize_Momentum_weights(hidden_size,hidden_size,M_params,'hidden')
initialize_Momentum_weights(hidden_size,hidden_size,M_params,'hidden2')
initialize_Momentum_weights(hidden_size,1024,M_params,'output')
loss_plot = []
# should look like your previous training loops
for itr in range(max_iters):
total_loss = 0
for xb,_ in batches:
h1 = forward(xb, params, 'layer1', relu)
h2 = forward(h1, params, 'hidden', relu)
h3 = forward(h2, params, 'hidden2', relu)
output = forward(h3, params, 'output', sigmoid)
# loss
# be sure to add loss and accuracy to epoch totals
loss = np.sum((output-xb)**2)
total_loss += loss
# backward
delta = 2*(output-xb)
delta1 = backwards(delta, params, name='output', activation_deriv=sigmoid_deriv)
delta2 = backwards(delta1, params, name='hidden2', activation_deriv=relu_deriv)
delta3 = backwards(delta2, params, name='hidden', activation_deriv=relu_deriv)
backwards(delta3, params, name='layer1', activation_deriv=relu_deriv)
# apply gradient
for k, v in params.items():
if 'grad' in k:
name = k.split('_')[1]
M_params[name] = 0.9*M_params[name] - learning_rate * v
params[name] += M_params[name]
loss_plot.append(total_loss)
if itr % 2 == 0:
print("itr: {:02d} \t loss: {:.2f}".format(itr,total_loss))
if itr % lr_rate == lr_rate-1:
learning_rate *= 0.9
'''
Print Loss
'''
time_seq = np.arange(max_iters)
import matplotlib.pyplot as plt
plt.figure(2)
ax = plt.gca()
ax.set_xlabel('epoch')
ax.set_ylabel('loss')
ax.plot(time_seq, loss_plot, color='r', linewidth=1, alpha=0.6)
plt.pause(1500)
plt.close()
'''
Save Parameters
'''
import pickle
saved_params = {k:v for k,v in params.items() if '_' not in k}
with open('q5_weights.pickle', 'wb') as handle:
pickle.dump(saved_params, handle, protocol=pickle.HIGHEST_PROTOCOL)
# visualize some results
# Q5.3.1
import matplotlib.pyplot as plt
import pickle
with open('q5_weights.pickle', 'rb') as handle:
params = pickle.load(handle)
h1 = forward(valid_x,params,'layer1',relu)
h2 = forward(h1,params,'hidden',relu)
h3 = forward(h2,params,'hidden2',relu)
out = forward(h3,params,'output',sigmoid)
for i in range(904,910):
plt.subplot(2,1,1)
plt.imshow(valid_x[i].reshape(32,32).T)
plt.subplot(2,1,2)
plt.imshow(out[i].reshape(32,32).T)
plt.show()
# evaluate PSNR
# Q5.3.2
from skimage.measure import compare_psnr as psnr
psnr_sum = 0
for i in range(valid_x.shape[0]):
psnri = psnr(valid_x[i], out[i])
psnr_sum += psnri
psnr_avg = psnr_sum / valid_x.shape[0]
print(psnr_avg)
| true |
0d979c0d8efed38b53991316096e8546d874603d | Python | Dking155/1codesAndOthrStuff | /stringsAndThings.py | UTF-8 | 1,282 | 4.375 | 4 | [] | no_license | # strings
# data that falls within" " marks
# Concatenation
# Put 2 or more strings together
firstname = "Fred"
lastname = "Flintstone"
fullname = firstname + " " + lastname
print(fullname)
# Repetition
# repetition operator: *
print("Hip " * 2 + "Hooray!")
def rowyourboat():
print("Row, " * 3 + 'your boat')
print("Gently down the stream")
print("Merrily, " * 4)
print("Life is but a dream")
rowyourboat()
# Indexing
name = "Roy G Biv"
firstChar = name[0]
print(firstChar)
middleIndex = len(name) // 2
print(middleIndex)
print(name[middleIndex])
print(name[-1])
for i in range(len(name)):
print(name[i])
# slicing and dicing
# slicing operator: :
# slicing lets us make substrings
print(name[0:3])
print(name[:5])
print(name[6:9])
print(name[6:])
for i in range(1, len(name) + 1):
print(name[0:i])
# searching inside of substrings
print("Biv" in name)
print("v" not in name)
if "y" in name:
print("the letter y is in name")
else:
print("the letter y is not in name")
# character functions
print(ord("5"))
print(chr(97 + 13))
print(str(12548))
# testing functions from mapper.py
from mapper import *
print(letterToIndex('P'))
print(indexToLetter(10))
from crypto import *
print(caesarEncrypt(5))
print(caesarDecrypt(5))
| true |
26a538aa45b1e929b1d5e37fe8a4ea2c4cbff33b | Python | mahmoudheshmat/DS_py | /treetraverse.py | UTF-8 | 771 | 3.21875 | 3 | [] | no_license | import operator
from BinaryTree import BinaryTree
def preorder(tree):
if tree:
print(tree.getRootVal())
preorder(tree.getLeftChild())
preorder(tree.getRightChild())
def postorder(tree):
if tree != None:
postorder(tree.getLeftChild())
postorder(tree.getRightChild())
print(tree.getRootVal())
def postordereval(tree):
opers = {'+': operator.add, '-': operator.sub, '*': operator.mul, '/':operator.truediv}
res1 = None
res2 = None
if tree:
res1 = postordereval(tree.getLeftChild())
res2 = postordereval(tree.getRightChild())
if res1 and res2:
return opers[tree.getRootVal()](res1, res2)
else:
return tree.getRootVal()
def inorder(tree):
if tree:
inorder(tree.getLeftChild())
print(tree.getRootVal())
inroder(tree.getRightChild())
| true |
4231ea733aa1b81f59c519b3e3571107a8705610 | Python | pizza2u/Python | /exemplos_basicos/python/nome.py | UTF-8 | 151 | 3.953125 | 4 | [] | no_license | nome = input("Seu nome: ")
sobrenome = input("Sobrenome: ")
print('Oi {} {}'.format(nome,sobrenome))
print('BY: {1}, {0}'.format(nome, sobrenome)) | true |
7ca1f4a38fa6fcd8b510b40343440bcf48065cb5 | Python | j-vent/data-collector | /colour_detection.py | UTF-8 | 5,420 | 2.765625 | 3 | [] | no_license | import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
# TODO: maybe make into a class ...
def find_element_centroid(img, colour, coord):
y,x = np.where(np.all(img == colour, axis=2))
pairs = []
for i in range(len(x)):
pairs.append([x[i],y[i]])
if(len(x) != 0 and len(y) != 0):
# calculate centroid
coordx, coordy = np.mean(pairs, axis = 0)
coord[0] = round(coordx)
coord[1] = round(coordy)
# TODO: rewrite this method and the find_elem_centroid into one method, can just pass in another param probably
def find_element_centroid_pong(img, colour, coord):
y,x = np.where(np.all(img == colour, axis=2))
pairs = []
for i in range(len(x)):
# restricts the bounds of the play environment
if(y[i] >= 33.5 and y[i] <= 193.5):
pairs.append([x[i],y[i]])
#print("pairs ", pairs)
if(len(pairs) != 0):
# calculate centroid
coordx, coordy = np.mean(pairs, axis = 0)
coord[0] = round(coordx)
coord[1] = round(coordy)
# TODO: rewrite to put dist[0] elsewhere
def find_distances(coordA, coordB, dist):
# dist[0] = abs(coord[0] - pacman_coord[0]) + abs(coord[1] - pacman_coord[1])
#print("coordA ", coordA)
#print("coordB ", coordB)
dist[0] = abs(coordA[0] - coordB[0]) + abs(coordA[1] - coordB[1])
#print("dist", dist)
def find_blue_ghosts(img):
image = cv.cvtColor(img, cv.COLOR_BGR2HSV)
lower_blue = np.array([60, 100, 188])
upper_blue = np.array([150,255, 255])
mask = cv.inRange(image, lower_blue, upper_blue)
# print("nonzero ", np.count_nonzero(mask))
return np.count_nonzero(mask) > 0
def check_pills():
for i in range(4):
if(abs(pacman_coord[0] - pill_locs[i][0]) <= 3 and abs(pacman_coord[1] - pill_locs[i][1]) <= 3):
pill_eaten[i] = True
pill_dist[i] = abs(pacman_coord[0] - pill_locs[i][0]) + abs(pacman_coord[1] - pill_locs[i][1])
# Declare colours. OpenCV uses BGR not RGB
pacman_colour = [74, 164, 210]
pink_ghost_colour = [179, 89, 198]
red_ghost_colour = [72, 72, 200]
# called blue ghost sometimes?
green_ghost_colour = [153, 184, 84]
orange_ghost_colour = [48, 122, 180]
# 116 pixels per ghost, estimate as circle means radius is about 5 pixels
dark_blue_ghost = [194, 114, 66]
# Declare and initialize coordinates
pacman_coord = [0, 0]
pink_ghost_coord = [0, 0]
red_ghost_coord = [0, 0]
green_ghost_coord = [0, 0]
orange_ghost_coord = [0, 0]
# Declare distances
# TODO: make into one array :(
to_pink_ghost = [0]
to_red_ghost = [0]
to_green_ghost = [0]
to_orange_ghost = [0]
# Declare pill info
power_pill_top_left = [19.5, 18]
power_pill_btm_left = [19.5, 150]
power_pill_top_right = [300.5, 18]
power_pill_btm_right = [300.5, 150]
pill_locs = []
pill_locs.append(power_pill_top_left)
pill_locs.append(power_pill_top_right)
pill_locs.append(power_pill_btm_right)
pill_locs.append(power_pill_btm_left)
# pill 1,2,3,4
pill_eaten = [False, False, False, False]
# top left, top right, btm right, btm left
pill_dist = [0,0,0,0]
def find_all_coords(im):
img = cv.imread(im)
# img = im
# print("im ", im)
# img_plot = cv.imread(im,0)
# # plot img and edge detection
# edges = cv.Canny(img_plot,100,200)
# plt.figure()
# # flip because opencv is BGR
# plt.imshow(cv.cvtColor(img_plot, cv.COLOR_BGR2RGB))
# plt.title('OG img'), plt.xticks([]), plt.yticks([])
# plt.show()
# move to own func
find_element_centroid(img, pacman_colour, pacman_coord)
find_element_centroid(img, pink_ghost_colour, pink_ghost_coord)
find_distances(pink_ghost_coord, pacman_coord, to_pink_ghost)
find_element_centroid(img, red_ghost_colour, red_ghost_coord)
find_distances(red_ghost_coord, pacman_coord, to_red_ghost)
find_element_centroid(img, green_ghost_colour, green_ghost_coord)
find_distances(green_ghost_coord, pacman_coord, to_green_ghost)
find_element_centroid(img, orange_ghost_colour, orange_ghost_coord)
find_distances(orange_ghost_coord, pacman_coord, to_orange_ghost)
check_pills()
hasBlueGhost = find_blue_ghosts(img)
return pacman_coord, pink_ghost_coord, red_ghost_coord, green_ghost_coord, orange_ghost_coord, to_pink_ghost[0], to_red_ghost[0], to_green_ghost[0], to_orange_ghost[0], pill_eaten, pill_dist, hasBlueGhost
ball_colour = [236, 236, 236]
green_paddle_colour = [92, 186, 92]
brown_paddle_colour = [74, 130, 213]
ball_coord = [0,0]
green_paddle_coord = [0,0]
brown_paddle_coord = [0,0]
dist_ball_green_paddle = [0]
def find_pong_coords(im):
# or just pass in the obs array...
img = cv.imread(im)
# print("plot")
# plt.imshow(cv.cvtColor(img, cv.COLOR_BGR2RGB))
# plt.show()
find_element_centroid_pong(img, ball_colour, ball_coord)
find_element_centroid_pong(img, green_paddle_colour, green_paddle_coord)
find_element_centroid_pong(img, brown_paddle_colour, brown_paddle_coord)
# print("ball ", ball_coord)
# print("green ", green_paddle_coord)
find_distances(green_paddle_coord, ball_coord, dist_ball_green_paddle)
return ball_coord, green_paddle_coord, brown_paddle_coord, dist_ball_green_paddle[0]
# print("pacman coord ", pacman_coord)
# print("pink ghost ", pink_ghost_coord)
# print("red ghost ", red_ghost_coord)
# print("green ghost ", green_ghost_coord)
# print("orange ghost ", orange_ghost_coord)
| true |
7a2b03a12f22ac1b3bc1d2760c7a08e1a9c43129 | Python | dpmittal/competitive_programming | /codechef/FEB19B/p2.py | UTF-8 | 330 | 3.28125 | 3 | [] | no_license | from math import floor
itr = int(input())
for i in range(itr):
l = int(input())
s = set(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'])
for j in range(l):
k = set(list(input()))
s = s.intersection(k)
print(len(s))
| true |
6ec2e21e277acd0b18a98c0bb6b120301b71838f | Python | millu94/joshuas_weekend_hw_01 | /src/pet_shop.py | UTF-8 | 2,182 | 3.359375 | 3 | [] | no_license | # WRITE YOUR FUNCTIONS HERE
import pdb
#1 find the name of the pet shop
def get_pet_shop_name(pet_shop_info):
name = pet_shop_info["name"]
return name
#2 find the total cash
def get_total_cash(pet_shop_info):
total_cash = pet_shop_info["admin"]["total_cash"]
return total_cash
#3 + #4 add or remove cash
def add_or_remove_cash(pet_shop_info, cash_amount):
pet_shop_info["admin"]["total_cash"] = pet_shop_info["admin"]["total_cash"] + cash_amount
#5 find how many pets have sold
def get_pets_sold(pet_shop_info):
return pet_shop_info["admin"]["pets_sold"]
#6 increase number of pets sold
def increase_pets_sold(pet_shop_info, pets_sold):
pet_shop_info["admin"]["pets_sold"] += pets_sold
#7 find the stock count
def get_stock_count(pet_shop_info):
return len(pet_shop_info["pets"])
#8 + #9
# find how many there are of a given breed
def get_pets_by_breed(pet_shop_info, breed):
#pdb.set_trace()
x = 0
number_of_breeds = []
while x < 6:
if pet_shop_info["pets"][x]["breed"] == breed:
number_of_breeds.append(pet_shop_info["pets"][x])
x = x + 1
return number_of_breeds
#10 + #11
def find_pet_by_name(pet_shop_info, name):
x = 0
while x < len(pet_shop_info["pets"]):
if pet_shop_info["pets"][x]["name"] == name:
return pet_shop_info["pets"][x]["name"]
x = x + 1
return None
#12 remove pet by name
def remove_pet_by_name(pet_shop_info, name):
#pdb.set_trace()
for pet in pet_shop_info["pets"]:
if pet["name"] == name:
pet_shop_info["pets"].remove(pet)
#13 add pet to stock
def add_pet_to_stock(pet_shop_info, add_pet):
pet_shop_info["pets"].append(add_pet)
#14 get customer cash
def get_customer_cash(customer_list):
return customer_list["cash"]
#15 remove customer cash
def remove_customer_cash(customers, cash_remove):
customers["cash"] -= cash_remove
return customers["cash"]
#16 get customer pet_count
def get_customer_pet_count(customers_pets):
return len(customers_pets["pets"])
#17 add pet to customer
def add_pet_to_customer(customers_pets, add_pet):
customers_pets["pets"].append(add_pet) | true |
69ebca76491b013f7cc0864d9a95741ea0bf8e52 | Python | bmazey/python_nlp | /application.py | UTF-8 | 663 | 2.53125 | 3 | [
"MIT"
] | permissive | from flask import Flask
from flask_restplus import Resource, Api
# welcome to flask: http://flask.pocoo.org/
# working with sqlalchemy & swagger:
# http://michal.karzynski.pl/blog/2016/06/19/building-beautiful-restful-apis-using-flask-swagger-ui-flask-restplus/
application = Flask(__name__)
api = Api(application)
@api.route("/hello") # Create a URL route to this resource
class HelloWorld(Resource): # Create a RESTful resource
def get(self): # Create GET endpoint
return {'hello': 'world'}
def main():
application.debug = True
application.run()
if __name__ == "__main__":
main()
| true |
485d904036f1cc3b029c4aa6b5d2d3eb06283c82 | Python | itsolutionscorp/AutoStyle-Clustering | /all_data/exercism_data/python/word-count/d09d149ca1254da2b70cfb25d74f2a6c.py | UTF-8 | 609 | 3.0625 | 3 | [] | no_license | '''
The solution that I am posting is not own.
I found two different solutions that work
and I am putting them here.
Solution A belongs to @mnorbury and @ThomasZumsteg
I have read about the Counter container and I
understand how to use it.
http://pymotw.com/2/collections/counter.html
Solution B belongs to @abeger
The solution works but I do not really understand
how it works.
'''
from collections import Counter
# Solution A
def word_count(phrase):
return Counter(phrase.split())
# Solution B
#def word_count(phrase):
# p = phrase.split()
# return dict([(w, p.count(w)) for w in set(p)])
| true |
9104fce2fb76e5a9d773f6b8c9b73161bd1f2789 | Python | MakingMexico/CursoPythonArduino | /functions/functions.py | UTF-8 | 200 | 3.875 | 4 | [] | no_license | def suma(a, b):
return a + b
def suma_tres(a, b=3):
return a + b
"""a = float(input("Ingrese el primer valor: "))
b = float(input("Ingrese el segundo valor: "))
print(suma_tres(a, b))"""
| true |
243206e5626df72107475e9e319bca9113f54b0b | Python | deefunkt/machineLearning | /stockPrediction/sentiment analysis.py | UTF-8 | 5,741 | 2.75 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 17 13:18:37 2019
@author: A-Sha
"""
import time
import datetime as dt
import pandas as pd
from glob import glob
import re
import matplotlib.pyplot as plt
from matplotlib import style
import matplotlib.dates as mdates
from textblob import TextBlob
###############################################################################
'''Global conf variables '''
LOG_FILE = 'sentiment_analysis.log'
CONF_FILE = 'Conf/conf.csv'
DATAPATH = './Data/asxData/'
style.use('ggplot')
###############################################################################
''' Class definitions '''
class Timer:
def __init__(self):
self.start_time = 0
def start(self):
self.start_time = time.time()
def elapsed_time(self):
return time.time() - self.start_time
def elapsed_time_string(self):
elapsed_time = time.time() - self.start_time
d = dt.timedelta(seconds= elapsed_time)
d = dt.datetime(1,1,1) + d
elapsed_time = "%d days:%d hours:%d minutes:%d seconds" % (d.day-1, d.hour, d.minute, d.second)
return elapsed_time
class Logger:
def __init__(self, logfile, one_time=False):
self.f = open(logfile, 'w+')
if one_time:
self.f.seek(0)
self.f.truncate()
self.f.write(str(dt.datetime.now()) + '\n')
self.f.write('##############################')
self.conf_dict = {}
def writelog(self, log_string):
self.f.write(log_string + '\n')
print(log_string)
def close_log(self):
self.f.close()
def read_conf(self,conf_file='Conf/conf.csv'):
self.conf_dict = pd.read_csv(conf_file, delimiter=',')
###############################################################################
''' General method definitions '''
def preprocess_messages(messages):
REPLACE_NO_SPACE = re.compile("(\$)|(\+)|(@)|(%)|(\;)|(\:)|(\!)|(\?)|(\,)|(\")|(\()|(\))|(\[)|(\])|([0-9]+)")
REPLACE_WITH_SPACE = re.compile("(<br\s*/><br\s*/>)|(\-)|(\/)|(\.)")
contr_dict={"I'm": "I am",
"won't": "will not",
"'s" : "",
"'ll":" will",
"'ve":" have",
"n't":" not",
"'re": " are",
"'d": " would",
"y'all": "all of you"}
messages = messages.replace(contr_dict, regex=True)
messages = messages.str.replace(REPLACE_WITH_SPACE, ' ', regex=True)
messages = messages.str.replace(REPLACE_NO_SPACE, ' ',regex=True)
messages = messages.str.replace('\s{2,}', ' ', regex=True)
messages = messages.str.lower()
messages[messages.isna()] = 'text'
return messages
def stock_data_import(path):
csv_files = glob(path + '*/*', recursive=True)
csv_files.sort()
rawdata = []
for i in range(0, len(csv_files)):
temp_cv = pd.read_csv(csv_files[i], header=0, index_col=0,
names=['Date', 'Open', 'High', 'Low', 'Close', 'Volume'],
parse_dates=['Date'])
try:
rawdata.append(temp_cv.loc[stock])
except KeyError:
try:
rawdata.append(temp_cv.loc[altname])
except:
pass
print(stock + " not found at " + str(temp_cv['Date'][0].date()))
processed_data = pd.DataFrame(rawdata)
processed_data = processed_data.reset_index(drop=True)
processed_data.set_index('Date', inplace=True)
# currently in format [Open, High, Low, Close, Volume].
# We reorganize:
cols = ['Open', 'High', 'Low', 'Volume','Close']
processed_data = processed_data[cols]
print('End of data import.')
return processed_data
def get_sentiment(blob):
return blob.sentiment.polarity
def get_subjectivity(blob):
return blob.sentiment.subjectivity
###############################################################################
''' Initialization '''
timer = Timer()
logger = Logger(LOG_FILE, one_time=True)
logger.read_conf()
stock = 'KRR'
altname= 'KRC'
timer.start()
stock_data = stock_data_import(DATAPATH)
logger.writelog('Importing ASX data took {} seconds'.format(timer.elapsed_time()))
df = pd.read_csv('Data/' + stock + '.csv', encoding = "cp1252")
###############################################################################
''' Preprocessing stage '''
logger.writelog('Beginning preprocessing.')
timer.start()
df['datetime'] = df['date'] + ' ' + df['time']
df.set_index('datetime', inplace=True)
df.drop(['date','time'], inplace=True, axis=1)
df.index = pd.to_datetime(df.index, errors='coerce')
df = df[pd.notnull(df.index)]
df['message'] = preprocess_messages(df['message'])
df['textblob'] = df['message'].apply(TextBlob)
df['sentiment'] = df.textblob.apply(get_sentiment)
df['subjectivity'] = df.textblob.apply(get_subjectivity)
sentiments_date = pd.DataFrame(columns = ['sentiment', 'subjectivity'], index=pd.unique(df.index.date))
sentiments_date.index = pd.to_datetime(sentiments_date.index)
sentiments_date['day'] = sentiments_date.index.day_name()
for index, value in sentiments_date.iterrows():
sentiments_date.loc[index, 'sentiment'] = df.sentiment[df.index.date == index.date()].mean()
sentiments_date.loc[index, 'subjectivity'] = df.subjectivity[df.index.date == index.date()].mean()
logger.writelog('Preprocessing took {} seconds'.format(timer.elapsed_time()))
ax3 = plt.subplot(3,1,1)
stock_data['Close'].plot( label = stock + ' Close', )
plt.legend()
ax2 = plt.subplot(3,1,2)
sentiments_date['subjectivity'].plot(label = 'subjectivity', sharex=ax3)
plt.legend()
ax1 = plt.subplot(3,1,3)
sentiments_date['sentiment'].plot(label = 'sentiment', sharex=ax3)
plt.legend()
#set major ticks format
plt.show()
logger.close_log()
| true |
4628f6d0b37eacf721feb709def060abbf461f38 | Python | sagasurvey/saga | /SAGA/objects/calc_sfr.py | UTF-8 | 2,172 | 2.609375 | 3 | [
"MIT"
] | permissive | """
From Marla 03/07/2023
"""
import numpy as np
__all__ = ["calc_SFR_NUV", "calc_SFR_Halpha"]
def calc_SFR_NUV(NUV_mag, NUV_mag_err, dist_mpc, internal_ext=0.7):
"""
Convert NUV magnitudes into a SFR
Based on Iglesias-Paramo (2006), Eq 3
https://ui.adsabs.harvard.edu/abs/2006ApJS..164...38I/abstract
"""
# DISTANCE OF HOST (in cm)
dist = dist_mpc * 3.086e24
dmod = np.log10(4.0 * np.pi * dist * dist)
# CORRECT FOR INTERNAL EXTINCTION (assumed to be external extinction corrected)
m_nuv_ab = NUV_mag - internal_ext
# CONVERT GALEX m_AB TO FLUX: erg sec-1 cm-2 Angstrom-1)
# https://asd.gsfc.nasa.gov/archive/galex/FAQ/counts_background.html
log_flux_nuv = -0.4 * (m_nuv_ab - 20.08 - 2.5 * np.log10(2.06e-16))
log_flux_nuv_err = 0.4 * (NUV_mag_err) # ADD EXTRA ERROR FOR REDDENING OR DISTANCE?
# LUMINOSITY (erg/s/A-1)
# 796A is NUV filter width
log_L_nuv = log_flux_nuv + dmod + np.log10(796)
# CONVERT TO SOLAR LUMINOSITY
l_nuv_msun = log_L_nuv - np.log10(3.826e33)
# CONVVERT TO SFR: EQ 3, inglesias- paramo 2006
# AND ACCOUNT FOR IMF
log_SFR_NUV = l_nuv_msun - 9.33 - np.log10(1.5)
log_SFR_NUV_err = log_flux_nuv_err
return log_SFR_NUV, log_SFR_NUV_err
def calc_SFR_Halpha(EW_Halpha, EW_Halpha_err, spec_z, Mr, EWc=3, BD=3.5):
"""
Calculate Halpha-based EW SFR
Bauer+ (2013) https://ui.adsabs.harvard.edu/abs/2013MNRAS.434..209B/abstract
"""
# Bauer, EQ 2, term1
term1 = (EW_Halpha + EWc) * 10 ** (-0.4 * (Mr - 34.1))
term1_err = EW_Halpha_err * 10 ** (-0.4 * (Mr - 34.1))
# Bauer Eq 2, term2
term2 = 3e18 / (6564.6 * (1.0 + spec_z)) ** 2
# Balmer Decrement
term3 = (BD / 2.86) ** 2.36
L_Halpha = term1 * term2 * term3
L_Halpha_err = term1_err * term2 * term3
# EQ 3, Bauer et al above
# Account for IMF
SFR = L_Halpha / (1.27e34 * 1.5)
SFR_err = L_Halpha_err / (1.27e34 * 1.5)
log_Ha_SFR = np.log10(SFR)
# PROPOGATE ERRORS
log_SFR_err2 = SFR_err**2 * (1.0 / (SFR * np.log(10.0))) ** 2
log_Ha_SFR_err = np.sqrt(log_SFR_err2)
return log_Ha_SFR, log_Ha_SFR_err
| true |
c7b48a6ec3999d13d2be31e375df37cc1fad636a | Python | enricozf/energy-consumption-forecast | /Code/utils/metrics.py | UTF-8 | 3,387 | 3.109375 | 3 | [] | no_license | import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from tensorflow.keras.losses import MeanSquaredError, MeanAbsoluteError
def last_timestep_mse(y_true, y_pred):
return MeanSquaredError()(y_true[:,-1,:], y_pred[:,-1,:])
def last_timestep_mae(y_true, y_pred):
return MeanAbsoluteError()(y_true[:,-1,:], y_pred[:,-1,:])
def metrics (y_true,y_pred,filtON = False):
"""Calculates metrics for regressive modeling.
Args:
y_true: Real target values (array).
y_pred: Predicted target values (array).
filtON: If True, the function filters results (butterworth filter).
You must pass the array as np.hstack().
Returns:
dic_metrics: Dictionary with metrics.
- R2 Score
- MAE
- MSE
- Correlation
- Shift (Positive Correlation)
- Shift (Negative Correlation)
"""
def positive_shift_corr(y_true,y_pred):
"""Metric that advances the real signal and compares it with the current modeling.
Args:
y_true: Real target values.
y_pred: Predicted target values.
Return:
k_best: The bigger it means that a prediction is happening with that lag.
corr_max: The bigger it means that the prediction highly correlated.
"""
k_best,corr_max = 1,0.0
for k in np.arange(1,50,3):
corr_actual = np.corrcoef((y_true[k:],y_pred[:-k]))[1,0]
if (corr_actual >=corr_max):
k_best,corr_max = k, corr_actual
return (k_best,np.round(corr_max,2))
def negative_shift_corr(y_true,y_pred):
"""Metric that delays the real signal and compares with the current modeling.
Args:
y_true: Real target values.
y_pred: Predicted target values.
Return:
k_best: Lag value.
corr_max: Max Correlation.
If the correlation value is high, it means that it may be repeating the value of K previous samples of the signal.
The lower the correlation value, and the higher the K value, the better.
"""
k_best,corr_max = 1,0.0
for k in np.arange(1,50,3):
corr_actual = np.corrcoef((y_true[:-k],y_pred[k:]))[1,0]
if (corr_actual >=corr_max):
k_best,corr_max = k, corr_actual
return (k_best,np.round(corr_max,2))
last_y_true, last_y_pred = y_true[:,-1,:], y_pred[:,-1,:]
dic_metrics = {}
dic_metrics['r2'] = r2_score(last_y_true,last_y_pred)
dic_metrics['mae'] = mean_absolute_error(last_y_true,last_y_pred)
dic_metrics['mse'] = mean_squared_error(last_y_true,last_y_pred)
dic_metrics['corr'] = np.corrcoef((last_y_true.flatten(),last_y_pred.flatten()))[1,0]
dic_metrics["positive_shift"] = [positive_shift_corr(last_y_true.flatten(),last_y_pred.flatten())]
dic_metrics["negative_shift"] = [negative_shift_corr(last_y_true.flatten(),last_y_pred.flatten())]
df_percentil = pd.DataFrame(np.abs(last_y_true-last_y_pred))
dic_metrics['25%'] = df_percentil.quantile(0.25)
dic_metrics['50%'] = df_percentil.quantile(0.50)
dic_metrics['75%'] = df_percentil.quantile(0.75)
return dic_metrics
| true |
839a9170eb8104469a4ead50abbf36e78d1b5a5d | Python | podhmo/individual-sandbox | /daily/20171123/example_dict/00rounddict.py | UTF-8 | 735 | 3.625 | 4 | [] | no_license | # https://stackoverflow.com/questions/32434112/round-off-floating-point-values-in-dict
# My dictionary is:
d = [
{
'A': 0.700000000,
'B': 0.255555555
}, {
'B': 0.55555555,
'C': 0.55555555
}, {
'A': 0.255555555,
'B': 0.210000000,
'C': 0.2400000000
}
]
# I need:
expected = [
{
'A': 0.70,
'B': 0.25
},
{
'B': 0.55,
'C': 0.55
},
{
'A': 0.25,
'B': 0.21,
'C': 0.24
},
]
# hmm
from dictknife import dictmap # noqa
got = dictmap(lambda x: round(x, 2) if isinstance(x, float) else x, d)
print(got)
# [{'A': 0.7, 'B': 0.26}, {'C': 0.56, 'B': 0.56}, {'C': 0.24, 'A': 0.26, 'B': 0.21}]
| true |
2448128db6b2e2ff2f8b3e4ed2fe21e2520672a3 | Python | mbgarciaarcija/python-_- | /ejercicios/clase5/funciones/filtrar.py | UTF-8 | 1,659 | 3.0625 | 3 | [] | no_license | import unicodedata
from functools import reduce
from Levenshtein import ratio
def to_canonico(string):
return ''.join((c for c in unicodedata.normalize('NFD', string.lower()) if unicodedata.category(c) != 'Mn'))
def inicio_func(anio):
def _inicio(reg):
return reg.anio >= anio
return _inicio
def fin_func(anio):
def _fin(reg):
return reg.anio <= anio
return _fin
def anio_func(anio):
def _anio(reg):
return reg.anio == anio
return _anio
def levenshtein_func(nombre):
def _levenshtein(reg):
return ratio(to_canonico(reg.nombre), to_canonico(nombre)) > 0.9
return _levenshtein
def canonico_func(nombre):
def _canonico(reg):
return reg.canonico == to_canonico(nombre)
return _canonico
def nombre_func(nombre):
def _nombre(reg):
return reg.nombre == nombre
return _nombre
def true_func(reg):
return True
def and_func(f1, f2):
def _and(reg):
return f1(reg) and f2(reg)
return _and
def or_func(f1, f2):
def _or(reg):
return f1(reg) and f2(reg)
return _or
FILTROS = {
"inicio": inicio_func,
"fin": fin_func,
"anio": anio_func,
"levenshtein": levenshtein_func,
"canonico": canonico_func,
"nombre": nombre_func
# Se pueden continuar creando y agregando funciones y todo funciona :)
# ej regex, startswith, endswith
}
def aplicar_filtros(datos, filtros):
filtro = reduce(lambda a, f: and_func(a, f), filtros, true_func)
return [ r for r in datos if filtro(r) ]
def obtener_uno(datos, filtros):
datos = aplicar_filtros(datos, filtros)
if datos:
return datos.pop(0) | true |
3b9d120fff0f03080cf08f424cee9da3d24a108a | Python | lukaszgolojuch/Obliczanie-diety-python-obiektowo | /main.py | UTF-8 | 9,972 | 3.78125 | 4 | [] | no_license |
#------------------------------------------------------
# Nazwa programu: Obliczanie diety
# Jezyk programowania: Python
# Srodowisko programistyczne: Visual Studio Code
#
# Autor: Lukasz Golojuch
#------------------------------------------------------
class User:
#inicjacja zmiennych
imie = ""
wiek = None
plec = 1
waga = 1
wzrost = 1
aktywnosc = 1.0
def oblicz_bmr(self):
#obliczanie BMR dla danego uzytkownika
if self.plec == 1:
#kobiety
self.bmr = round(655 + (9.6 * self.waga) + (1.8 * self.wzrost) + (4.7 * self.wiek),2)
else :
#mezczyzni
self.bmr = round(66 + (13.7 * self.waga) + (5 * self.wzrost) + (6.76 * self.wiek),2)
print(self.bmr)
def zapotrzebowanie(self):
#obliczanie zapotrzebowanie kalorycznego
self.zapotrzebowanie = round(self.bmr * self.aktywnosc,2)
print(self.zapotrzebowanie)
def dieta_redukcja(self):
#obliczanie diety redukcyjnej
bialko_kcal = 4
tluszcze_kcal = 9
weglowodany_kcal = 4
kalorii_w_diecie = self.zapotrzebowanie - 500
kcal_z_tluszczy = 0.2 * kalorii_w_diecie
bialka = round(2.2 * self.waga,2)
tluszcze = round(kcal_z_tluszczy / tluszcze_kcal,2)
aktualna_kaloryka = kcal_z_tluszczy + bialka * bialko_kcal
kcal_z_wegli = kalorii_w_diecie - aktualna_kaloryka
weglowodany = round(kcal_z_wegli/weglowodany_kcal,2)
print("-----------------------------------------------------------------")
print("|TWOJA DIETA REDUKCYJNA:")
print("|Aby zmiejszyc mase ciala potrzebujesz: "+ str(kalorii_w_diecie) +"kcal")
print("|Makroskladniki:")
print("|Bialka: "+ str(bialka) +"g")
print("|Tluszcze: "+ str(tluszcze) +"g")
print("|Weglowodany: "+ str(weglowodany) +"g")
print("-----------------------------------------------------------------")
def dieta_utrzymanie(self):
#obliczanie diety na utrzymanie masy ciala
bialko_kcal = 4
tluszcze_kcal = 9
weglowodany_kcal = 4
kalorii_w_diecie = self.zapotrzebowanie
kcal_z_tluszczy = 0.2 * kalorii_w_diecie
kcal_z_bialka = 0.3 * kalorii_w_diecie
kcal_z_wegli = 0.5 * kalorii_w_diecie
bialka = round(kcal_z_bialka / bialko_kcal,2)
tluszcze = round(kcal_z_tluszczy / tluszcze_kcal,2)
weglowodany = round(kcal_z_wegli / weglowodany_kcal,2)
print("-----------------------------------------------------------------")
print("|TWOJA DIETA NA UTRZYMANIE MASY CIALA:")
print("|Aby utrzymac mase ciala potrzebujesz: "+ str(kalorii_w_diecie) +"kcal")
print("|Makroskladniki:")
print("|Bialka: "+ str(bialka) +"g")
print("|Tluszcze: "+ str(tluszcze) +"g")
print("|Weglowodany: "+ str(weglowodany) +"g")
print("-----------------------------------------------------------------")
def dieta_masa(self):
#obliczanie diety na zwiekszenie masy miesniowej
bialko_kcal = 4
tluszcze_kcal = 9
weglowodany_kcal = 4
kalorii_w_diecie = self.zapotrzebowanie + 500
kcal_z_tluszczy = 0.25 * kalorii_w_diecie
bialka = round(2 * self.waga,2)
tluszcze = round(kcal_z_tluszczy / tluszcze_kcal,2)
aktualna_kaloryka = kcal_z_tluszczy + bialka * bialko_kcal
kcal_z_wegli = kalorii_w_diecie - aktualna_kaloryka
weglowodany = round(kcal_z_wegli/weglowodany_kcal,2)
print("-----------------------------------------------------------------")
print("|TWOJA DIETA NA BUDOWE MASY MIESNIOWEJ:")
print("|Aby zwiekszyc mase miesniowa potrzebujesz: "+ str(kalorii_w_diecie) +"kcal")
print("|Makroskladniki:")
print("|Bialka: "+ str(bialka) +"g")
print("|Tluszcze: "+ str(tluszcze) +"g")
print("|Weglowodany: "+ str(weglowodany) +"g")
print("-----------------------------------------------------------------")
def podaj_plec():
#funkcja stworzona w celu pobierania plci uzytkownika, zwraca:
# 1 - kobieta
# 2 - mezczyzna
print("-----------------------------------------------------------------")
print("Plec")
print("1 - jestem kobieta")
print("2 - jestem mezczyzna")
print("-----------------------------------------------------------------")
print("Twoj wybor: ")
wybor = input()
if wybor==1:
return 1
elif wybor==2:
return 2
else:
print("Podano bledne dane sprobuj ponownie...")
podaj_dane()
def podaj_wage():
#funkcja stworzona w celu pobierania masy ciala uzytkownika
#zwraca mase ciala w kilogramach
print("-----------------------------------------------------------------")
print("Podaj swoja mase ciala: ")
waga = input()
return waga
def podaj_wzrost():
#funkcja pobierajaca informacje na temat wzrostu uzytkownika
#zwraca wzrost w centymetrach
print("-----------------------------------------------------------------")
print("Podaj swoj wzrost ")
wzrost = input()
return wzrost
def podaj_wiek():
#funkcja pobierajaca informacje na temat wieku uzytkownika
#zwraca wiek w latach
print("-----------------------------------------------------------------")
print("Podaj swoj wiek ")
wiek = input()
return wiek
def podaj_aktywnosc():
#funkcja pobierajaca informacje na temat aktywnosci uzytkownika uzytkownika
#zwraca wartosc odpowiadajaca danej aktywnosci
print("-----------------------------------------------------------------")
print("Jak wyglada Twoja aktywnosc")
print("1. Znikoma aktywnosc (siedzacy tryb zycia, praca biurowa)")
print("2. Bardzo niska aktywnosc (jeden trening w tygodniu, praca biurowa) ")
print("3. Umiarkowana (cwiczenia 2 razy w tygodniu - srednia intensywnosc)")
print("4. Duza (dosc ciezki trening kilka razy w tygodniu)")
print("5. Bardzo duza (przynajmniej 4 ciezkie treningi fizyczne w tygodniu, praca fizyczna)")
print("6. Najwyzsza (codzienny ciezki trening, ciezka praca fizyczna)")
print("-----------------------------------------------------------------")
print("Twoj wybor: ")
wybor = input()
if wybor==1:
#znikoma aktywnosc
return 1.0
elif wybor==2:
#b. niska aktywnosc
return 1.2
elif wybor==3:
#umiarkowana aktywnosc
return 1.4
elif wybor==4:
#duza aktywnosc
return 1.6
elif wybor==5:
#b. duza aktywnosc
return 1.8
elif wybor==5:
#najwyzsza aktywnosc
return 2.0
else:
print("Podane dane wejsciowe sa bledne...")
podaj_dane()
def wybor_diety():
#funkcja pobierajaca informacje o celu dieta dla uzytkownika, zwraca:
# 1 - dla diety redukcyjnej
# 2 - dla diety na utrzymanie masy ciala
# 3 - dla diety na zwiekszenie masy miesniowej
print("-----------------------------------------------------------------")
print("Z jakiej diety chcialbys skorzystac?")
print("1. Dieta redukcyjna ")
print("2. Dieta na utrzymanie masy ciala")
print("3. Dieta na zwiekszenie masy ciala")
print("-----------------------------------------------------------------")
print("Twoj wybor: ")
wybor = input()
if wybor == 1:
return 1
elif wybor == 2:
return 2
elif wybor == 3:
return 3
else:
print("Podane dane wejsciowe sa bledne...")
wybor_diety()
def main():
print("-----------------------------------------------------------------")
print("Witam w programie tworzacym Twoja nowa diete!!!!")
print("Menu:")
print("1. Oblicz BMR ")
print("2. Oblicz zapotrzebowanie kaloryczne i BMR")
print("3. Oblicz docelowa ilosc kalorii i makrosklanikow")
print("4. Wyjscie z aplikacji")
print("-----------------------------------------------------------------")
print("Twoj wybor: ")
wybor = input()
if wybor==1:
#pobieramy dane uzytkownika
uzytkownik = User()
uzytkownik.plec = podaj_plec()
uzytkownik.wiek = podaj_wiek()
uzytkownik.waga = podaj_wage()
uzytkownik.wzrost = podaj_wzrost()
uzytkownik.aktywnosc = podaj_aktywnosc()
#obliczamy BMR uzytkownika
uzytkownik.oblicz_bmr()
elif wybor==2:
#pobieramy dane uzytkownika
uzytkownik = User()
uzytkownik.plec = podaj_plec()
uzytkownik.wiek = podaj_wiek()
uzytkownik.waga = podaj_wage()
uzytkownik.wzrost = podaj_wzrost()
uzytkownik.aktywnosc = podaj_aktywnosc()
#obliczamy BMR uzytkownika
uzytkownik.oblicz_bmr()
#obliczamy zapotrzebowanie kaloryczne uzytkownika
uzytkownik.zapotrzebowanie()
elif wybor==3:
#pobieramy dane uzytkownika
uzytkownik = User()
uzytkownik.plec = podaj_plec()
uzytkownik.wiek = podaj_wiek()
uzytkownik.waga = podaj_wage()
uzytkownik.wzrost = podaj_wzrost()
uzytkownik.aktywnosc = podaj_aktywnosc()
#obliczamy BMR uzytkownika
uzytkownik.oblicz_bmr()
#obliczamy zapotrzebowanie kaloryczne uzytkownika
uzytkownik.zapotrzebowanie()
dieta = wybor_diety()
#obliczanie wybranej diety
if dieta == 1:
uzytkownik.dieta_redukcja()
elif wybor == 2:
uzytkownik.dieta_utrzymanie()
elif wybor == 3:
uzytkownik.dieta_masa()
else:
print("Podane dane wejsciowe sa bledne...")
podaj_dane()
elif wybor==4:
#wylaczenie programu...
print("Program zostanie wylaczony...")
else:
print("Podane dane wejsciowe sa bledne...")
main()
if __name__ == "__main__":
main() | true |
c51f829cd988670d04fb481a7934e05fffe740c4 | Python | Carlisle345748/leetcode | /136.只出现一次的数字.py | UTF-8 | 2,232 | 3.90625 | 4 | [] | no_license | import time
from functools import reduce
class Solution1:
def singleNumber(self, nums: list) -> int:
"""
用hash-table记录每个数字出现的次数,最后遍历一次hash-table找到只出现一次的数字
"""
memo = {}
for i in nums:
if i not in memo:
memo[i] = 1
elif memo[i] == 1:
memo[i] = 2
for i in memo:
if memo[i] == 1:
return i
class Solution2:
"""
将数组nums转化为无重复数字的set,(set的元素之和 * 2) - (nums元素之和) = 只出现一次的数字
"""
def singleNumber(self, nums: list) -> int:
no_dup = set(nums)
sum1 = sum(no_dup) * 2
sum2 = sum(nums)
return sum1 - sum2
class Solution3:
"""
使用集合存储数字。遍历数组中的每个数字,如果集合中没有该数字,则将该数字加入集合,
如果集合中已经有该数字,则将该数字从集合中删除,最后剩下的数字就是只出现一次的数字。
"""
def singleNumber(self, nums: list) -> int:
memo = set()
for i in nums:
if i not in memo:
memo.add(i)
else:
memo.discard(i)
return memo.pop()
class Solution4:
"""
使用位运算中的异或运算,由于结合律和交换律,所有出现2次的元素都会等于0,
剩余的只出现一次的元素与0做异或运算等于其自身
"""
def singleNumber(self, nums: list) -> int:
return reduce(lambda x, y: x ^ y, nums)
s1 = Solution1()
s2 = Solution2()
s3 = Solution3()
s4 = Solution4()
start1 = time.time()
print(s1.singleNumber([1,1,2,2,3,3,4,4,11,5,5,6,6,7,7,8,8,9,9,10,10]))
start2 = time.time()
print(s2.singleNumber([1,1,2,2,3,3,4,4,11,5,5,6,6,7,7,8,8,9,9,10,10]))
start3 = time.time()
print(s3.singleNumber([1,1,2,2,3,3,4,4,11,5,5,6,6,7,7,8,8,9,9,10,10]))
start4 = time.time()
print(s4.singleNumber([1,1,2,2,3,3,4,4,11,5,5,6,6,7,7,8,8,9,9,10,10]))
end = time.time()
print("hash: %e" %(start2 - start1))
print("set : %e" %(start3- start2))
print("sum : %e" %(start4 - start3))
print("bit : %e" %(end - start4)) | true |
7b766ed15f7c64a21ddc0896ca1bc514d6e0b0b3 | Python | amalsom10/datastructure | /class/related_objects_in_multiple_classes.py | UTF-8 | 1,448 | 3.671875 | 4 | [] | no_license | class Students:
def __init__(self, name, classdivision, rollnumber):
self.name = name
self.classdivision = classdivision
self.rollnumber = rollnumber
def studentdetails(self):
print ("------------\nStudent info\n----------------\nName: {}\nclassdivision: {}\nrollnumber: {}". format(self.name, self.classdivision, self.rollnumber))
s1 = Students("s1", "2a", 13)
s2 = Students("s2" , "10b", 10)
class Teacher:
"""docstring for Teacher."""
def __init__(self, name, education, isworking):
self.name = name
self.education = education
self.isworking = isworking
def teacherdetails(self):
print ("------------\nTeacher info\n----------------\nName: {}\neducation: {}\nisworking: {}". format(self.name, self.education, self.isworking))
def teacher_present(self):
self.isworking = True
def teacher_notpresent(self):
self.isworking = False
t1 = Teacher("t1", "btech", True)
t2 = Teacher("t2", "mtech", False)
print ("\n--------\nStudent List\n------------")
s1.studentdetails()
s2.studentdetails()
print ("\n--------\nTeacher List\n-------------")
t1.teacherdetails()
t2.teacherdetails()
t1.studentlist = s1
t2.studentlist = s2
print("\n-----------\nStudents under Teachers\n----------\n")
print("Teacher: {}". format(t1.name))
t1.studentlist.studentdetails()
print("\nTeacher: {}". format(t2.name))
t2.studentlist.studentdetails()
| true |
2734d14a79e2d80e7b234d6079c82a63a021c00a | Python | Dhawgupta/RLDS | /svm/nn.py | UTF-8 | 4,578 | 2.515625 | 3 | [
"MIT"
] | permissive | import numpy
import pandas
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
seed = 1337
numpy.random.seed(seed)
import sys,getopt
def main(argv):
trainFile=''
validationFile=''
try:
opts,argv=getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print 'Please Provide Name of Training File using -i <FileName>, If you have Validation File also, provide it\'s name Using -o <FileName>'
sys.exit(2)
#print 'Bello'
for opt,arg in opts:
if opt in("-i","--ifile"):
trainFile=arg
elif opt in("-o","--ofile"):
validationFile=arg
if trainFile=='':
print 'ERROR : Please Provide Training File Name'
sys.exit(-1)
print 'Will Start Training On File: ',trainFile
#print 'Validation File: ',validationFile
TrainNeuralNet(trainFile,validationFile)
def TrainNeuralNet(trainFile,validFile):
seed = 1337
numpy.random.seed(seed)
#Load Training Data
dataframe = pandas.read_csv(trainFile, header=None,delimiter=' ')
dataset = dataframe.values
#number of columns to determine the number of input layer nodes
cols=dataset.shape[1]
#Extract all the columns except label column
X_Train=dataset[:,0:cols-1].astype(float)
#the label column
Yytrain=dataset[:,cols-1]
#one-hot encoding for training labels
encoder = LabelEncoder()
encoder.fit(Yytrain)
encoded_Ytrain = encoder.transform(Yytrain)
#one-hot encoded vector for label column
Y_train = np_utils.to_categorical(encoded_Ytrain)
#Load Validation Data, same procedure as the training data
if(validFile!=''):
print 'Reading from Validation File: ',validFile
dataframe1 = pandas.read_csv(validFile, header=None,delimiter=' ')
dataset1 = dataframe1.values
testcols=dataset1.shape[1]
X_test=dataset1[:,0:cols-1].astype(float)
Yy_test=dataset1[:,cols-1]
encoder.fit(Yy_test)
encoded_Ytest = encoder.transform(Yy_test)
Y_test= np_utils.to_categorical(encoded_Ytest)
else:
#if no validation file, split the training data, here one hot encoding for labels is done already
print 'Splitting the dataset in 80-20%'
X_Train, X_test, Y_train, Y_test = train_test_split(X_Train, Y_train, test_size=0.20, random_state=seed)
print X_test.shape,Y_test.shape,X_Train.shape,Y_train.shape
#prepare the Neural-Net
batch_size=15000
#TODO: Is it possible to determine the number of classes automatically ?
nb_classes=9
#number of iterations
nb_epoch=100
model = Sequential()
model.add(Dense(4200, input_dim=cols-1))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(4200))
model.add(Activation('relu'))
#model.add(D
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(X_Train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
fig=plt.gcf()
plt.show()
plt.draw()
fig.savefig('Loss.png')
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
fig=plt.gcf()
plt.show()
plt.draw()
fig.savefig('Accuracy.png')
if __name__ == "__main__":
main(sys.argv[1:])
| true |
7f8e8b70a608ece78b08e695fdd50ca27c613cb4 | Python | rahaahmadi/LinearAlgebra-Projects | /Least Squares - Denoising/LeastSquares.py | UTF-8 | 739 | 2.953125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
data = np.load('btc_price.npy')
plt.plot(data)
plt.show()
y = data.reshape(data.size, 1)
D = np.zeros(((data.size - 1), data.size))
for i in range(D.shape[0]):
D[i][i] = 1
D[i][i + 1] = -1
def denoise(D, y, lambdaa):
x = np.linalg.inv(np.eye(D.shape[1], D.shape[1]) + (lambdaa * (D.T @ D))) @ y
return x
plt.plot(denoise(D, y, 0))
plt.title('lambda = 0')
plt.show()
plt.plot(denoise(D, y, 10))
plt.title('lambda = 10')
plt.show()
plt.plot(denoise(D, y, 100))
plt.title('lambda = 100')
plt.show()
plt.plot(denoise(D, y, 1000))
plt.title('lambda = 1000')
plt.show()
plt.plot(denoise(D, y, 10000))
plt.title('lambda = 10000')
plt.show()
| true |
ea6f2316aa9f6c25177fc013b6171db3e75ee509 | Python | ColinWilder/pythonBasics | /slicing-practice-2.py | UTF-8 | 108 | 2.578125 | 3 | [] | no_license | lr=("r","t","ch","tv","db","sf")
apt=[]
apt.extend(lr)
first_thing=apt.pop(0)
print(apt)
print(first_thing)
| true |
5a7359ccbe6e28afb732dba348ca0b8971c98c26 | Python | vertica/vertica-python | /vertica_python/tests/integration_tests/test_transfer_format.py | UTF-8 | 5,411 | 2.625 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | # Copyright (c) 2022-2023 Open Text.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from .base import VerticaPythonIntegrationTestCase
"""Check the consistency of query results btw text transfer and binary transfer"""
class DataTransferFormatTestCase(VerticaPythonIntegrationTestCase):
@classmethod
def setUpClass(cls):
super(DataTransferFormatTestCase, cls).setUpClass()
cls._conn_info['binary_transfer'] = False
cls.text_conn = cls._connect()
cls._conn_info['binary_transfer'] = True
cls.binary_conn = cls._connect()
cls.text_cursor = cls.text_conn.cursor()
cls.binary_cursor = cls.binary_conn.cursor()
@classmethod
def tearDownClass(cls):
cls.text_conn.close()
cls.binary_conn.close()
def _test_equal_value(self, sql_type, data_list, assert_almost_equal=False):
for data in data_list:
query = u"SELECT {}{}".format(data, "::" + sql_type if sql_type else '')
self.text_cursor.execute(query)
self.binary_cursor.execute(query)
text_val = self.text_cursor.fetchone()[0]
binary_val = self.binary_cursor.fetchone()[0]
if assert_almost_equal:
self.assertAlmostEqual(text_val, binary_val)
else:
self.assertEqual(text_val, binary_val)
def test_boolean_type(self):
self._test_equal_value("BOOLEAN", ['true', 'false'])
def test_integer_type(self):
self._test_equal_value("INTEGER", ["-314", "0", "365", "111111111111"])
def test_float_type(self):
self._test_equal_value("FLOAT", [
"'Infinity'", "'-Infinity'",
"'1.23456e+18'", "'1.23456'", "'1.23456e-18'"])
# binary transfer offers slightly greater precision than text transfer
# binary: 1.489968353486419
# text: 1.48996835348642
self._test_equal_value(None, ["ATAN(12.345)"], True)
def test_numeric_type(self):
self._test_equal_value("NUMERIC", ["0", "-1.1", "1234567890123456789.0123456789"])
self._test_equal_value("DECIMAL", ["123456789.98765"])
def test_char_type(self):
self._test_equal_value("CHAR(8)", [u"'\u16b1'"])
self._test_equal_value("VARCHAR", [u"'foo\u16b1'"])
self._test_equal_value("LONG VARCHAR", [u"'foo \u16b1 bar'"])
def test_datetime_type(self):
self._test_equal_value("DATE", ["'0340-01-20'", "'2001-12-01'", "'9999-12-31'"])
self._test_equal_value("TIME(3)", ["'00:00:00.00'", "'22:36:33.123956'", "'23:59:59.999'"])
self._test_equal_value("TIMETZ(3)", ["'23:59:59.999-00:30'", "'22:36:33.123456+0630'", "'800-02-03 22:36:33.123456 America/Cayman'"])
self._test_equal_value("TIMESTAMP", ["'276-12-1 11:22:33'", "'2001-12-01 00:30:45.087'"])
self._test_equal_value("TIMESTAMPTZ(4)", ["'1582-09-24 00:30:45.087-08'", "'0001-1-1 11:22:33'", "'2020-12-31 10:43:09.05'"])
def test_interval_type(self):
self._test_equal_value("INTERVAL DAY TO SECOND", ["'1 02:03:04.0005'", "'1 02:03:04'", "'02:03:04.0005'", "'02:03'"])
self._test_equal_value("INTERVAL DAY TO MINUTE", ["'1 02:03'", "'02:03'"])
self._test_equal_value("INTERVAL DAY TO HOUR", ["'1 22'"])
self._test_equal_value("INTERVAL DAY", ["'132'"])
self._test_equal_value("INTERVAL HOUR TO SECOND", ["'02:03:04'"])
self._test_equal_value("INTERVAL HOUR TO MINUTE", ["'02:03'"])
self._test_equal_value("INTERVAL HOUR", ["'02'"])
self._test_equal_value("INTERVAL MINUTE TO SECOND", ["'00:04.0005'", "'03:04'"])
self._test_equal_value("INTERVAL MINUTE", ["'03'"])
self._test_equal_value("INTERVAL SECOND", ["'216901.24'", "'216901'"])
self._test_equal_value("INTERVAL YEAR", ["'1y 10m'"])
self._test_equal_value("INTERVAL YEAR TO MONTH", ["'1y 10m'"])
self._test_equal_value("INTERVAL MONTH", ["'1y 10m'"])
def test_UUID_type(self):
self.require_protocol_at_least(3 << 16 | 8)
self._test_equal_value("UUID", ["'00010203-0405-0607-0809-0a0b0c0d0e0f'", "'123e4567-e89b-12d3-a456-426655440a00'"])
def test_binary_type(self):
self._test_equal_value("BINARY(2)", [u"'\303\261'"])
self._test_equal_value("VARBINARY", [u"'\303\261'"])
self._test_equal_value("LONG VARBINARY", [u"'\303\261\303\260'"])
def test_array_type(self):
self._test_equal_value("ARRAY[INT]", ["ARRAY[1,2,3]"])
self._test_equal_value("ARRAY[ARRAY[INT]]", ["ARRAY[ARRAY[1,2],ARRAY[3,4]]"])
def test_set_type(self):
self._test_equal_value("SET[INT]", ["SET[1,2,3]"])
def test_row_type(self):
self._test_equal_value("ROW(name varchar, age int, c ARRAY[INT])", ["ROW('Amy',25,ARRAY[1,2,3])"])
exec(DataTransferFormatTestCase.createPrepStmtClass())
| true |
31f13150b08a2359df0c747c8d967e127f3fe76e | Python | aKarm1905/PythonMSC | /samples/make2dparameters.py | UTF-8 | 1,177 | 2.953125 | 3 | [] | no_license | # coding=utf-8
"""Butterfly make2d Parameters.
Parameters to convert a 3d OpenFOAM case to 2d.
"""
from copy import deepcopy
# TODO(): Add check for input values.
class Make2dParameters(object):
"""Make2d parameters.
Attributes:
origin: Plane origin as (x, y, z).
normal: Plane normal as (x, y, z).
width: width of 2d blockMeshDict (default: 0.5).
"""
def __init__(self, origin, normal, width=0.5):
"""Init make2d parameters."""
self.origin = tuple(origin)
self.normal = tuple(normal)
self.width = width or 0.1
@property
def isMake2dParameters(self):
"""Return True."""
return True
def duplicate(self):
"""Return a copy of this object."""
return deepcopy(self)
def ToString(self):
"""Overwrite .NET ToString method."""
return self.__repr__()
def __repr__(self):
"""Make2d parameters representation."""
return "Make2dParameters::o({})::n({})".format(
','.join('%.3f' % o for o in self.origin),
','.join('%.3f' % n for n in self.normal)
)
| true |
221f241b5286c0372420bed8108a3d8c340f11c6 | Python | P1ping/mass-dataset | /scripts/clean-raw-txt.py | UTF-8 | 1,685 | 3.015625 | 3 | [
"MIT"
] | permissive | import sys, codecs, glob
def clean_punct(line):
punctuation_swap = [ ('“', '"'),
('”', '"'),
('’', ' '),
('“', '"'),
('‘', ' ')
]
for pct_b, pct_a in punctuation_swap:
line=line.replace(pct_b, pct_a)
return line
def clean(line):
while " " in line: #remove double space
line = line.replace(" "," ")
line = line.replace("\t","") #remove tab
line = clean_punct(line)
line = line.lower()
if line and line[0] == " ":
line = line[1:]
if line and line[-1] == " ":
line = line[:-1]
try:
line = int(line) #verse
return ""
except ValueError: #real text
return line
def cleaner():
input_files = glob.glob(sys.argv[1] + "/*." + sys.argv[3])
output_folder = sys.argv[2] + "/" if sys.argv[2][-1] != "/" else sys.argv[2]
for input_file in input_files:
text = [line.strip() for line in codecs.open(input_file,'r','utf-8')][0] #the raw files have all the verses on a single line
with codecs.open(output_folder + input_file.split("/")[-1], 'w', 'utf-8') as output:
for possible_line in text.split(" "): #4 spaces
#for possible_line in text:
line = clean(possible_line)
if line:
output.write(line + " ")
output.write("\n")
if __name__ == "__main__":
if len(sys.argv) < 3:
print("USAGE: python3 clean_raw_txt.py <input_folder> <output_folder> <suffix>")
sys.exit(1)
cleaner()
| true |
a879d9ef4e83ae4c1591ba189d256a071ec2bbe2 | Python | sunjerry019/adventOfCode | /2018/day_11/11_2.py | UTF-8 | 1,193 | 2.578125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
import numpy as np
gridID = 6303
#gridID = 7672
#gridID = 18
powerLevel = np.zeros((301,301), dtype=int)
pLNxN = np.zeros((301,301,301), dtype=int)
# the coordinates need to be 1-indexed
def getPL(X, Y):
rackID = (X + 10)
return int(((((rackID * Y) + gridID) * rackID)/100)%10) - 5
for y in range(1, 301):
for x in range(1,301):
currentPL = getPL(x, y)
powerLevel[x,y] = powerLevel[x - 1, y ] + \
powerLevel[x , y - 1] - \
powerLevel[x - 1, y - 1] + \
currentPL
for size in range(1,301):
if (x >= size) and (y >= size):
pLNxN[x - size + 1, y - size + 1, size] = powerLevel[x , y ] - \
powerLevel[x , y - size] - \
powerLevel[x - size, y ] + \
powerLevel[x - size, y - size]
maxidx = np.argmax(pLNxN)
print("Max Power level is at {} = <{},{},{}>".format(maxidx, int(maxidx/(301**2)), int(maxidx/301)%301, maxidx%301))
| true |
27eafe6643b7f7d957454b2c66e485fc22e509a0 | Python | MatanelAbayof/Wikishield | /wiki_api/base_api.py | UTF-8 | 1,436 | 3.265625 | 3 | [
"Apache-2.0"
] | permissive | import time
from abc import ABC
import requests
from requests import ConnectionError
class BaseApi(ABC):
"""
this is a generic class with helpful functions for API
"""
_MAX_TRIES = 10
_TIMEOUT = 40
_SLEEP_COEFFICIENT = 5
def __init__(self):
"""
initialize the class
"""
pass
@property
def api_path(self) -> str:
"""
get API URL path
"""
raise NotImplementedError('`api_path` method should be implement in subclass')
def fetch_json(self, params):
"""
fetch JSON from API with GET method
this fetch method has ability to recover from: 429 status - "Too Many Requests" (`See here <https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/429>`) by using many tries and sleep
the request has timeout of 10 seconds
:param params: list of parameters to send
:return JSON result
"""
tries = 0
while tries < self._MAX_TRIES:
try:
return requests.get(self.api_path, params=params, timeout=self._TIMEOUT).json()
except:
tries += 1
print("I am sleep with try = {}".format(tries))
sleep_time = tries * self._SLEEP_COEFFICIENT
time.sleep(sleep_time)
raise ConnectionError("Cannot connect to {0} with params = {1}".format(self.api_path, params))
| true |
dcd35c87aac8dd8c740dd0b87d8a38b2294078e0 | Python | geohotweb/programing | /ecuaciones/segu_primer_grado.py | UTF-8 | 658 | 4.125 | 4 | [] | no_license | #Programa para la resolucion de dos tipos de ecuacuiones, una de primer grado y otra de segundo grado.
from math import sqrt
print('Programa para la resolucion de la ecuacion a x*x + b x + c= 0.')
a = float(input('Valor de a: '))
b = float(input('Valor de b: '))
c = float(input('Valor de c: '))
if a == 0:
if b == 0:
if c == 0:
print('La ecuacion tiene infinitas soluciones.')
else:
print('La ecuacion no tiene solucion.')
else:
x = -c/b
print('La solucion x={0:.3f}.'.format(x))
else:
x1 = (-b+sqrt(b**2-4*a*c)) / (2*a)
x2 = (-b-sqrt(b**2-4*a*c)) / (2*a)
print('Soluciones de las ecuaciones: x1={0:.3f} y x2={0:.3f}'.format(x1, x2))
| true |
631b85bfc933cbc682f7a6446d9ec915f4831b44 | Python | burevol/wow_discord_news | /wd_generators.py | UTF-8 | 3,822 | 2.734375 | 3 | [] | no_license | import requests
class WowData():
def __init__(self, cf):
self.cf = cf
self.token = None
if cf.auth_mode == 'oauth2':
path_oauth = 'https://us.battle.net/oauth/token?grant_type=client_credentials' \
'&client_id=%s&client_secret=%s' % (cf.client_id,cf.client_secret)
request_oauth = requests.get(path_oauth)
request_oauth.raise_for_status()
request_json_oauth = request_oauth.json()
self.cf.wow_api_key = request_json_oauth['access_token']
self.auth_string = 'access_token'
self.host = 'https://eu.api.blizzard.com'
else:
self.auth_string = "apikey"
self.host = 'https://eu.api.battle.net'
@staticmethod
def get_data_json(path):
'''Выполняет запрос по заданному пути'''
try:
request = requests.get(path)
request.raise_for_status()
request_json = request.json()
except requests.exceptions.RequestException as error:
print('Ошибка получения данных json')
request_json = []
return request_json
def get_character(self, char_name):
'''Возвращает JSON с описанием запрошенного персонажа'''
path = '%s/wow/character/%s/%s?&locale=%s&%s=%s' % (
self.host, self.cf.guild_realm, char_name, self.cf.local, self.auth_string, self.cf.wow_api_key)
request_json = self.get_data_json(path)
return request_json
def get_item(self, item_id):
'''Возвращает JSON с описанием запрошенного итема'''
path = '%s/wow/item/%s?&locale=%s&%s=%s' % (
self.host, item_id, self.cf.local, self.auth_string, self.cf.wow_api_key)
request_json = self.get_data_json(path)
return request_json
def get_guild_news(self):
'''Функция-генератор, возвращающая гильдейские новости'''
path = '%s/wow/guild/%s/%s?fields=news&locale=%s&%s=%s' % (
self.host, self.cf.guild_realm, self.cf.guild_name, self.cf.local, self.auth_string, self.cf.wow_api_key)
request_json = self.get_data_json(path)
print(path,request_json)
for member in request_json['news']:
yield member
def get_races(self):
'''Функция-генератор, возвращающая расы персонажей'''
path = '%s/wow/data/character/races?locale=%s&%s=%s' % (
self.host, self.cf.local, self.auth_string, self.cf.wow_api_key)
request_json = self.get_data_json(path)
for race in request_json['races']:
yield race
def get_classes(self):
'''Функция - генератор, возвращающая классы персонажей'''
path = '%s/wow/data/character/classes?locale=%s&%s=%s' % (
self.host, self.cf.local, self.auth_string, self.cf.wow_api_key)
request_json = self.get_data_json(path)
for w_class in request_json['classes']:
yield w_class
def get_guild_members(self):
'''Функция-генератор, возвращающая персонажей гильдии'''
path = '%s/wow/guild/%s/%s?fields=members&locale=%s&%s=%s' % (
self.host, self.cf.guild_realm, self.cf.guild_name, self.cf.local, self.auth_string, self.cf.wow_api_key)
request_json = self.get_data_json(path)
try:
for member in request_json['members']:
yield member
except TypeError:
print("Не удалось прочитать формат данных", request_json)
| true |