blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b51a512fbedf8c77713ef1c4495ae74f2758aee4 | Python | joakimstenhammar/subgrid | /python/dqTools/ncdx.py | UTF-8 | 14,609 | 2.6875 | 3 | [
"MIT"
] | permissive | """A simpler interface to netCDF files.
"""
from pycdf import *
import numpy as N
import os.path
CREATE = NC.CREATE|NC.WRITE|NC.BIT64_OFFSET
def get_or_def_dim(ncf, name, length):
try:
len = ncf.dimensions()[name]
dim = ncf.inq_dimid(name)
except KeyError:
dim = ncf.def_dim(name, length)
return dim
class Positions(object):
pass
class Data(object):
_default_rank=None
def __init__(self, data, rank=None):
if rank is None:
rank = self._default_rank
pass
assert rank is not None
assert data.ndim > rank
self.rank = rank
self.rankstr = ['', ', vector', ', matrix'][rank]
itemshape = []
shape = list(data.shape)
for i in range(rank):
itemshape.insert(0, shape.pop())
continue
self.shape = tuple(shape)
self.itemshape = tuple(itemshape)
self.size = N.multiply.reduce(shape)
self.ndim = len(shape)
self.data = data
return
def define(self, ncf, name):
ncdims = []
for i, n in enumerate(self.shape):
ncdims.append(get_or_def_dim(ncf, name+'_values_n%d' % i, n))
continue
for i,n in enumerate(self.itemshape):
ncdims.append(get_or_def_dim(ncf, name+'_values_r%d' % i, n))
continue
return ncf.def_var(name+'_values', NC.FLOAT, ncdims)
def nameString(self, name):
return name + self.rankstr
def writeto(self, var):
var.put(self.data.astype(N.float32))
return
pass
class ScalarData(Data):
_default_rank = 0
pass
class VectorData(Data):
_default_rank = 1
pass
class MatrixData(Data):
_default_rank = 2
pass
class IrregularPositions(Positions):
def __init__(self, dim, positions):
assert dim in [1,2,3]
self.dim = dim
positions = N.atleast_2d(positions)
assert positions.ndim == 2
assert positions.shape[1] == dim
self.size = positions.shape[0]
self.shape = (self.size,)
self.ndim = 1
self.data = self.positions = positions
return
def define(self, ncf, basename):
sizeDim = get_or_def_dim(ncf, basename+'_values_n0', self.size)
dimDim = get_or_def_dim(ncf, basename+'_dim', self.dim)
return ncf.def_var(self.positionString(basename),
NC.FLOAT, (sizeDim, dimDim))
@staticmethod
def positionString(basename):
return basename+'_locations'
def writeto(self, var):
var.put(self.positions.astype(N.float32))
return
pass
class RegularPositions(Positions):
def __init__(self, axes):
dim = len(axes)
assert dim in [1,2,3]
self.dim = dim
self.axes = axes
for i, ax in enumerate(axes):
ax.dim = dim
ax.aID = i
continue
return
def define(self, ncf, basename):
return [a.define(ncf, basename)
for i, a in enumerate(self.axes)]
def positionString(self, basename):
ans = ''
for i in range(self.dim):
ans += self.axes[i].mkvarname(basename)
if self.dim>1:
ans += ', product'
pass
ans += self.axes[i].regular
if i < (self.dim - 1):
ans += '; '
continue
return ans
def writeto(self, vars):
for i in range(self.dim):
self.axes[i].writeto(vars[i])
return
pass
class Axis(object):
def mkvarname(self, basename):
return basename+'_axis_%d' % self.aID
def define(self, ncf, basename):
dimDim = get_or_def_dim(ncf, basename+'_naxes', self.dim)
sizeDim = get_or_def_dim(ncf, self.mkdimname(basename), self.n)
return ncf.def_var(self.mkvarname(basename), NC.FLOAT, (sizeDim, dimDim))
pass
class RegularAxis(Axis):
regular = ', compact'
def __init__(self, origin=0., delta=1., dim=None, aID=None):
self.origin = origin
self.delta = delta
self.n = 2
self.dim = dim
self.aID = aID
return
def mkdimname(self, basename):
return basename+'compact_dim'
def writeto(self, var):
pos = N.zeros((self.n, self.dim), dtype=N.float32)
pos[0, self.aID] = self.origin
pos[1, self.aID] = self.delta
var.put(pos)
return
pass
class IrregularAxis(Axis):
regular = ''
def __init__(self, points, dim=None, aID=None):
# has to be floats not doubles
points = N.array(points, dtype=N.float32).squeeze()
points = N.atleast_1d(points)
assert points.ndim == 2
assert len(points) >= 1
self.n = points.shape[0]
self.dim = points.shape[1]
self.points = points
self.aID = aID
return
def mkdimname(self, basename):
return self.mkvarname(basename)+'_len'
def writeto(self, var):
var.put(self.points)
return
pass
class Field(object):
def __init__(self, posns, **kwargs):
self.name = kwargs.pop('name', 'data')
self.positions = posns
assert self.checkdict(kwargs, lambda key, val: isinstance(val, Data)),\
"All keyword args must be Data istances"
self.dataDict = kwargs
return
def writeto(self, ncf):
#ncf = CDF(filename, mode=CREATE)
ncf.definemode()
pos = self.positions.define(ncf, self.name)
datavars = {}
for dname, d in self.dataDict.iteritems():
fullvarname = "%s_%s" % (self.name, dname)
dat = d.define(ncf, fullvarname)
dat.field = d.nameString(fullvarname)
dat.positions = self.positions.positionString(self.name)
datavars[dname] = dat
continue
ncf.enddef()
self.positions.writeto(pos)
[d.writeto(datavars[dname]) for dname, d in self.dataDict.iteritems()]
return
@classmethod
def checkdict(cls, d, condition):
for key, val in d.iteritems():
if condition(key, val):
# we're ok here
pass
else:
# test fails
return False
continue
return True
pass
class Unconnected(Field):
def __init__(self, posns, **kwargs):
assert isinstance(posns, IrregularPositions)
Field.__init__(self, posns, **kwargs)
assert self.checkdict(self.dataDict,
lambda key,val: val.ndim == 1), \
"data must be a list of positions"
assert self.checkdict(self.dataDict,
lambda key,val: val.size == posns.size), \
"data & posns must have same size"
return
pass
class Connected(Field):
def __init__(self, posns, **kwargs):
assert isinstance(posns, RegularPositions)
Field.__init__(self, posns, **kwargs)
return
pass
class LoadedData(object):
def __init__(self, dname, vname, v):
self.vname = vname
self.dname = dname
self.v = v
class LoadedField(object):
def __init__(self, name):
self.name = name
self.positions = None
self.dataDict = {}
return
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
dd = object.__getattribute__(self,'dataDict')
if name in dd:
return dd[name]
raise
return
pass
class LoadedFields(object):
def __init__(self, ncfName):
self.ncf = CDF(ncfName)
vars = self.ncf.variables()
dims = self.ncf.dimensions()
self.fields = {}
for vname, vinfo in vars.iteritems():
v = self.ncf.var(vname)
attr = v.attributes()
if 'field' in attr:
# it's a data field
splitFieldAttr = attr['field'].split(',')
try:
rankStr = splitFieldAttr[1].strip()
except IndexError:
rankStr = 'scalar'
fname, dname = splitFieldAttr[0].split('_')
try:
f = self.fields[fname]
except KeyError:
f = self.fields[fname] = LoadedField(fname)
pass
f.dataDict[dname] = Data(v.get(),
rank={'scalar': 0,
'vector': 1,
'matrix': 2}[rankStr])
if f.positions is not None:
continue
posdesc = attr['positions']
if 'product' in posdesc:
# connected
axes = []
for dimdesc in posdesc.split(';'):
parts = dimdesc.split(',')
aID = parts[0].split('_')[2]
dimvar = self.ncf.var(parts[0].strip())
dimdat = dimvar.get()
if 'compact' in parts:
origin = dimdat[0,aID]
delta = dimdat[1,aID]
axes.append(RegularAxis(origin,delta))
else:
axes.append(IrregularAxis(dimdat))
pass
continue
pos = RegularPositions(axes)
else:
# unconnected
posvar = self.ncf.var(posdesc.strip())
pos = IrregularPositions(dims[posvar.dimensions()[1]],
posvar.get())
pass
f.positions = pos
pass
continue
return
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
f = object.__getattribute__(self,'fields')
if name in f:
return f[name]
raise
return
pass
class Series(object):
def __init__(self, dir, **kwargs):
#mode='r', seriesvar='time', template='%.9d', clobber=False):
try:
func = {
'r': self.openForRead,
'w': self.create,
'a': self.openForWrite
}[kwargs.pop('mode', 'r')]
except KeyError:
raise ValueError('Invalid mode specified')
func(dir, **kwargs)
return
def openForRead(self, dir, **kwargs):
if not os.path.exists(dir):
raise IOError('Series directory does not exist: %s' % dir)
self.load(dir)
return
def openForWrite(self, dir, seriesvar='time', template='%.9d'):
if not os.path.exists(dir):
self.create(dir, seriesvar, template)
else:
self.load(dir)
return
def load(self, dir):
self.dir = dir
self.indexfile = os.path.join(dir, 'index')
if not os.path.exists(self.indexfile):
raise IOError('Series index does not exist: %s' % self.indexfile)
header = file(self.indexfile).readline()
if header[0] == '#':
_, self.seriesvar = header[1:].split()
else:
self.seriesvar = 'time'
pass
self.index = [[el[0], eval(el[1])] for el in N.loadtxt(self.indexfile, object)]
return
def create(self, dir, seriesvar='time', template='%.9d'):
if os.path.exists(dir):
#clobber it
import shutil
shutil.rmtree(dir)
pass
self.dir = dir
os.mkdir(dir)
self.indexfile = os.path.join(dir, 'index')
file(self.indexfile, 'w').write('# path %s\n' % seriesvar)
self.seriesvar = seriesvar
self.template = template
self.index = []
return
def nextFile(self, var):
filebase = (self.template % var) + '.ncdx'
filename = os.path.join(self.dir, filebase)
self.index.append([filebase, var])
file(self.indexfile, 'a').write('%s %s\n' % (filebase, repr(var)))
ncf = CDF(filename, mode=CREATE)
return ncf
def append(self, var, fields):
ncf = self.nextFile(var)
for f in fields:
f.writeto(ncf)
continue
return
def __len__(self):
return len(self.index)
def __getitem__(self, key):
i = bisect_left(self.index, key)
if self.index[i][1] == key:
return LoadedFields(os.path.join(self.dir, self.index[i][0]))
else:
raise IndexError('Key "%s" not in index of "%s"' % (repr(key), self.dir))
return
def __iter__(self):
for k in self.keys():
yield self[k]
def __contains__(self, key):
i = bisect_left(self.index, key)
if self.index[i][1] == key:
return True
else:
return False
return
def keys(self):
return [el[1] for el in self.index]
pass
def bisect_left(a, x):
lo = 0
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid][1] < x: lo = mid+1
else: hi = mid
return lo
def bisect_right(a, x):
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid][1]: hi = mid
else: lo = mid+1
return lo
# def scalarScatter(filename, values, positions, dim=3, name='data'):
# Unconnected.write(filename, values, positions, dim=dim, rank=0, name=name)
# def vectorScatter(filename, values, positions, dim=3, name='data'):
# Unconnected.write(filename, values, positions, dim=dim, rank=1, name=name)
# def matrixScatter(filename, values, positions, dim=3, name='data'):
# Unconnected.write(filename, values, positions, dim=dim, rank=2, name=name)
| true |
9663d65220d0b84273dbf253afa0beb526818101 | Python | wilsonify/ThinkBayes2 | /tests/test_examples/test_voting_soln.py | UTF-8 | 2,042 | 3.109375 | 3 | [
"CC-BY-NC-SA-4.0"
] | permissive | """
Think Bayes
This notebook presents example code and exercise solutions for Think Bayes.
Copyright 2018 Allen B. Downey
MIT License: https://opensource.org/licenses/MIT
"""
from thinkbayes import Pmf
def test_vote():
#
def add(pmf1, pmf2):
res = Pmf()
for v1, p1 in pmf1.items():
for v2, p2 in pmf2.items():
res[v1, v2] = p1 * p2
return res
#
from sympy import symbols
p_citizen, p_cv, p_ncv, p_error = symbols("p_citizen, p_cv, p_ncv, p_error")
#
def make_binary(p, name1, name2):
return Pmf({name1: p, name2: 1 - p})
#
citizen_status = ["citizen", "non-citizen"]
pmf_citizen = make_binary(p_citizen, *citizen_status)
#
error_status = ["error", "no-error"]
pmf_error = make_binary(p_error, *error_status)
#
pmf_citizen_report = add(pmf_citizen, pmf_error)
pmf_citizen_report.print()
#
vote_status = ["vote", "no-vote"]
pmf_cv = make_binary(p_cv, *vote_status)
#
pmf_cv_report = add(pmf_cv, pmf_error)
pmf_cv_report.print()
#
pmf_ncv = make_binary(p_ncv, *vote_status)
#
pmf_ncv_report = add(pmf_ncv, pmf_error)
pmf_ncv_report.print()
#
mix = Pmf()
for val1, p1 in pmf_citizen_report.items():
c, e = val1
pmf = pmf_cv_report if c == "citizen" else pmf_ncv_report
for val2, p2 in pmf.items():
mix[val1, val2] = p1 * p2
mix.print()
#
def report(state, alternatives):
val, error = state
if error != "error":
return val
alt1, alt2 = alternatives
return alt1 if val == alt2 else alt2
#
report(("citizen", "error"), citizen_status)
#
report(("citizen", "no-error"), citizen_status)
#
pmf_report = Pmf()
for (cstate, vstate), p in mix.items():
creport = report(cstate, citizen_status)
vreport = report(vstate, vote_status)
pmf_report[creport, vreport] += p
#
pmf_report.print()
#
| true |
6187454388b9f934f723daff52026a44eac80c98 | Python | abuzarmahmood/blech_pi_codes | /pi_licking.py | UTF-8 | 1,961 | 2.71875 | 3 | [] | no_license | # Import things for running pi codes
import time
import sys
import Adafruit_MPR121.MPR121 as MPR121
from math import floor
import random
import RPi.GPIO as GPIO
# Import other things for video
from subprocess import Popen
import easygui
import numpy as np
import os
# Setup pi board
GPIO.setwarnings(False)
GPIO.cleanup()
GPIO.setmode(GPIO.BOARD)
# Create MPR121 instance.
cap = MPR121.MPR121()
# Initialize communication with MPR121 using default I2C bus of device, and
# default I2C address (0x5A). On BeagleBone Black will default to I2C bus 0.
if not cap.begin():
print('Error initializing MPR121. Check your wiring!')
sys.exit(1)
# Main loop to print a message every time a pin is touched.
#print('Press Ctrl-C to quit.')
def pi_licking(intaninputs = [7]):
# Setup pi board GPIO ports
GPIO.setmode(GPIO.BOARD)
for i in intaninputs:
GPIO.setup(i, GPIO.OUT)
GPIO.output(intaninputs[0], 0)
last_touched = 0
while True:
current_touched = cap.touched()
# Check each pin's last and current state to see if it was pressed or released.
for i in range(12): # change this since only one is used
# Each pin is represented by a bit in the touched value. A value of 1
# means the pin is being touched, and 0 means it is not being touched.
pin_bit = 1 << i
# First check if transitioned from not touched to touched.
if current_touched & pin_bit and not last_touched & pin_bit:
GPIO.output(intaninputs[0], 1)
print('{0} touched!'.format(i))
# Next check if transitioned from touched to not touched.
if not current_touched & pin_bit and last_touched & pin_bit:
GPIO.output(intaninputs[0], 0)
print('{0} released!'.format(i))
# Update last state and wait a short period before repeating.
last_touched = current_touched
| true |
76a624f00f94214fd301606c0e6a7ff88553db8d | Python | Chao-Xi/JacobTechBlog | /interview/riot/log-percentile/Get_log_percentil2.py | UTF-8 | 689 | 3.1875 | 3 | [] | no_license | import os
import glob
import numpy as np
def lines_to_list(dir):
os.chdir(dir)
for filename in glob.iglob("*.log"):
with open(filename) as f:
for line in f:
line = line.strip()
rtime = int(line.split(' ')[-1])
yield rtime
# Two nested loop and allocation of "rtime" inside the loops, so the time&space complexity are both O(nยฒ)
if __name__ == '__main__':
log_list = list(lines_to_list("/var/log/httpd/"))
print(f'90% of requests return a response in {int(np.percentile(log_list, 90))} ms')
print(f'95% of requests return a response in {int(np.percentile(log_list, 95))} ms')
print(f'99% of requests return a response in {int(np.percentile(log_list, 99))} ms')
| true |
fbcf63cf916dc08cedc108b7526d132b25a65d0f | Python | hariharanRadhakrishnan/Python_Exercises | /read_nsd_file_1/1.py | UTF-8 | 1,762 | 3.03125 | 3 | [] | no_license | import re
import sys
import os
def openFile(fileName, fileMode):
try:
file = open(fileName, fileMode)
except:
return None
else:
return file
def changeDir(path):
try:
print("..... Changing directory to", path)
os.chdir(path)
except:
print("FAILED,",path, "does not exist")
return
def createOutDir():
try:
print("..... Creating Output folder, small_configs")
os.mkdir("small_configs")
except:
print("small_configs already exists")
finally:
changeDir("small_configs")
def generateSmallConfigs(big_file, prefix="sml_config"):
readVal = big_file.readline()
fileNum = 1
new_file = True
while readVal:
if (new_file):
print("..... Creating {0}_{1}.txt".format(prefix,fileNum))
small_file = openFile("{0}_{1}.txt".format(prefix,fileNum),"w")
new_file = False
small_file.write(readVal)
if(re.match(r"^[!]",readVal)):
small_file.close()
fileNum += 1
new_file = True
if(re.findall(r"AWS_DualDC",readVal)):
print("{0}_{1}.txt".format(prefix,fileNum))
readVal = big_file.readline()
def printUsageDetails():
print("\n###################### USAGE: ######################")
print("\t1.py [<Big_Config_File> [<Path_to_the_output_dir> [<Output_file_prefix>]]]")
def main():
if (len(sys.argv)>1):
print("..... Opening Big Config File,", sys.argv[1])
big_file = openFile(sys.argv[1],"r")
else:
print("..... Opening Big Config File, nsd.txt")
big_file = openFile("nsd.txt", "r")
if(big_file == None):
print ("Error with Big Config File, File not found")
return 0
if(len(sys.argv)>2):
changeDir(sys.argv[2])
createOutDir()
if(len(sys.argv)>3):
generateSmallConfigs(big_file, sys.argv[3])
else:
generateSmallConfigs(big_file)
printUsageDetails()
if __name__ == '__main__':
main() | true |
4894a5a23c0bb5999042ce7507280cba7261d912 | Python | Navid-Fkh/TaxiTransportOptimization | /src/Phase3.py | UTF-8 | 1,670 | 2.8125 | 3 | [] | no_license | from Phase2 import Phase2
from Phase1 import Phase1
inf = 1000000000000
dataset_path = "dataset/General-Dataset-1.txt"
matrixd_path = "dataset/MarixD_dataset1_General.txt"
# Version 1
solver = Phase2(dataset_path, matrixd_path)
print("The result for Phase3 (V.1):")
flowCost, flowDict = solver.solve(bypass_weight=-1)
print(f"Environmental cost : {flowCost+flowDict['A_start']['A_end']}")
print(f"Optimal number of cars : {solver.N-flowDict['A_start']['A_end']}")
# solver.plot(flowDict)
# Version 2
solver1 = Phase1(dataset_path, matrixd_path)
solver2 = Phase2(dataset_path, matrixd_path)
min_cost, min_flow = solver2.solve()
flowCost, flowDict = solver1.solve()
min_cars = solver1.N-flowDict['A_start']['A_end']
optimal_cars = min_cars
optimal_cost, optiaml_flow = solver2.solve(bypass_weight=inf, input_flow=min_cars)
for car_number in range(min_cars + 1, int(1.1*min_cars)):
if optimal_cost == min_cost:
break
flowCost, flowDict = solver2.solve(bypass_weight=inf, input_flow=car_number)
bypass_flow = flowDict["A_start"]["A_end"]
if flowCost < optimal_cost:
optimal_cost = flowCost
optiaml_flow = flowDict
optimal_cars = car_number - bypass_flow
# print(car_number, flowCost, optimal_cost, bypass_flow)
print("======================================")
print("The result for Phase3 (V.2):")
print(f"Environmental cost: {optimal_cost}")
print(f"Optimal number of cars (with respect to 10% loss in profit for the benefit of the environment): {optimal_cars}")
print(f"Minimum possible number of cars: {min_cars}")
print(f"Minimum possible value for environmental cost: {min_cost}")
# solver2.plot(optiaml_flow)
| true |
05c64a56075557d8fdf301a340651431d4c9bbc5 | Python | kafire/web_log_parse | /nginx_log_sqlite.py | UTF-8 | 2,165 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import re
import glob
import time
import sqlite3
import linecache
class DB(object):
def __init__(self, db_file):
self.db_file = db_file
self.db = sqlite3.connect(self.db_file, check_same_thread=False)
self.db.text_factory = str
self.cursor = self.db.cursor()
self.remote_addr = r"?P<ip>[\d.]*"
self.local_time = r".*?"
self.method = r"?P<method>\S+"
self.request = r"?P<request>\S+"
self.status = r"?P<status>\d+"
self.bodyBytesSent = r"?P<bodyBytesSent>\d+"
self.refer = r"""?P<refer>[^\"]*"""
self.userAgent = r"""?P<userAgent>.*"""
self.create_table()
def create_table(self):
values = '''
ip varchar(20),
time varchar(255),
method varchar(20),
request varchar(255),
status int(11),
body int(11),
referer varchar(255),
useragent varchar(255)
'''
query = 'CREATE TABLE IF NOT EXISTS info(%s)'% (values)
self.cursor.execute(query)
def readline(self,path):
return linecache.getlines(path)
def free(self):
self.cursor.close()
def disconnect(self):
self.db.close()
def run(self):
result=[]
for logfile in glob.glob('log/*.log'):
for logline in self.readline(logfile):
p = re.compile(r"(%s)\ -\ -\ \[(%s)]\ \"(%s)?[\s]?(%s)?.*?\"\ (%s)\ (%s)\ \"(%s)\"\ \"(%s).*?\"" % (
self.remote_addr,self.local_time, self.method, self.request, self.bodyBytesSent, self.status,self.refer, self.userAgent),
re.VERBOSE)
values = re.findall(p, logline)[0]
result.append(values)
print "Success found %s records" % len(result)
sql='INSERT INTO info VALUES(?,?,?,?,?,?,?,?)'
try:
self.cursor.executemany(sql,result)
except BaseException as e:
print e
self.db.commit()
self.free()
self.disconnect()
if __name__ == "__main__":
logs=DB("log.db")
logs.run() | true |
1bd9093ab5c829e9b164af201cf03a30342061d2 | Python | itkasumy/PythonGrammer | /day09/04-ๆไปถ่ฏปๅ.py | UTF-8 | 141 | 2.71875 | 3 | [] | no_license | file = open("./a.txt", 'w')
file.write('Hello Ksm')
file.close()
fileR = open('./a.txt', 'r')
cont = fileR.read()
print(cont)
fileR.close()
| true |
2bbb9bedb2a746090bbae839c926ab66fe6e1316 | Python | faaduprogrammer/Math-Solving-Utility | /MathUtility.py | UTF-8 | 13,704 | 3.625 | 4 | [] | no_license | import re
class Frac:
def __init__(self,numenator,denominator):
pass
def multiputive_inverse(self):
pass
def additive_inverse(self):
pass
def is_mixed_fraction(self):
pass
def convert_into_mix_fraction(self):
pass
class Algebra:
def make_readable(self):
# This Function will remove the space from equation and add '+' symbol if a number to positive number like 4x + 5 = +4x + 5
output_equation = [] # It will be splited by the + and - characters
if self.equation[0] != '+' or self.equation[0] != '-': # Conv of 4x into +4x
self.equation = '+' + self.equation
for char in self.equation:
# Removing all the whitespace char
if char != ' ':
output_equation.append(char)
start_spliting = False
term = [] # Temporary term storing value
output_equ = [] # Output of these for loop will stored this in var
for var in output_equation:
if var == '+' or var == '-':
output_equ.append(term)
term = []
term.append(var)
def __init__(self,equation,output=0):
"""Here 0 means the value is auto and 1 means only rational or irrational and 2 for decimals
"""
self.equation = equation
self.make_readable()
class Calc:
# In any Function of this class numbers arg will always be a list
@classmethod
def convInNumber(cls,arrary): # Don't to be used by outside the class
number = ''
for num in arrary:
number = number + str(num)
number = int(number)
return number
@classmethod
def convInFloat(cls,arrary):
number = ''
for num in arrary:
number = number + str(num)
number = float(number)
return number
@classmethod
def convInArray(cls,number): # Don't to be used by outside the class
array = []
for num in str(number):
array.append(int(num))
return array
@classmethod
def __increment_pair(cls):
if cls.index >= cls.length_dividend:
return False
cls.current_processing.append(cls.dividend[cls.index])
cls.current_processing_num = Calc.convInNumber(cls.current_processing)
cls.index += 1
return True
@classmethod
def divide(cls,dividend,divisor,decimal_level=0):
"""
This Function Will Return you reminder,quoitent in minimum processing
Divisor and Dividend must be positive integer for that
"""
if dividend < divisor:
raise ValueError('Dividend is smaller than Divisor')
if divisor == 0:
raise ValueError('Divisor is smaller than 1')
if divisor == 1:
return [dividend,0]
if divisor == dividend:
return [1,0]
cls.dividend = Calc.convInArray(dividend)
cls.index = 0 # Index Starting from 1 because i have already added one element in current_processing
quoitent = []
reminder = []
cls.current_processing = []
cls.current_processing_num = 0
# Suppose we have to divide 124 by 12 then first we make pairs.Current pair will store in this var
cls.length_dividend = len(cls.dividend)
start = True
added_decimal = False
while cls.index < cls.length_dividend:
if start: # Start Divison
start = False
while cls.current_processing_num < divisor:
cls.__increment_pair()
else:
# Without this 122 / 12 (and other same kind) => quoitent = 1 and reminder = 2
counter = 0
while cls.current_processing_num < divisor:
if not cls.__increment_pair(): # Means I have reached maximum value
quoitent.append(0)
# Example of this 122/12. If i can't divide it then a divider value then i will just pass it to reminder and a 0 in quoitent
break
if counter == 0: # Means for first pair i don't have to put 0 in quoitent
counter = 1
else:
quoitent.append(0)
# Just like we read the table for finding part of quoitent I am also doing same thing here down
cls.current_processing_num = cls.convInNumber(cls.current_processing)
current_value = divisor
next_value = 0
# print(cls.current_processing)
for qoit_part in range(1,10):
next_value = divisor * (qoit_part + 1)
if current_value <= cls.current_processing_num and next_value > cls.current_processing_num:
quoitent.append(qoit_part)
cls.current_processing_num = cls.current_processing_num - current_value
cls.current_processing = cls.convInArray(cls.current_processing_num).copy()
break
else:
current_value = next_value
# Adding Reminder Value Here
if cls.index == cls.length_dividend:
reminder = cls.current_processing.copy()
if cls.convInNumber(reminder) != 0 and decimal_level != 0 and not added_decimal: # This means i have to also count decimal values
cls.length_dividend += decimal_level
quoitent.append('.')
for _ in range(decimal_level): cls.dividend.append(0)
added_decimal = True
reminder = cls.convInNumber(reminder)
if decimal_level != 0:
quoitent = cls.convInFloat(quoitent)
else:
quoitent = cls.convInNumber(quoitent)
return [quoitent,reminder]
@classmethod
def hcf(cls,numbers):
numbers.sort()
while len(numbers) != 1: # This will work when we have to find hcf of more than 2
divide = Calc.divide(numbers[1],numbers[0]) # This Euclid's division algorithm
answer = numbers[0]
while divide[1] != 0:
# print(True)
answer = divide[1]
divide = Calc.divide(numbers[0],divide[1])
del numbers[0:1]
""" Removing two numbers and adding 1 hcf of both of them"""
numbers[0] = answer
return numbers[0]
@classmethod
def lcm(cls,numbers):
return cls.divide(all_number_product,cls.hcf(numbers))[0]
@classmethod
def isPrime(cls,number):
if number <= 1: return False
if number == 2 or number == 3: return True
"""
All Prime Numbers can be represent in the form of 6n + 1 or 6n - 1 except 2 and 3 and numbers like 289,343 and 49 means square or cube of any prime no
Source: https://www.quora.com/Why-do-prime-numbers-always-satisfy-the-6n+1-and-6n-1-conditions-Is-there-mathematical-logic-behind-it
"""
if ((number + 1) % 6) == 0 or ((number - 1) % 6) == 0:
return True
return False
@classmethod
def is_perfect_square(cls,number):
"""This Function is only made for natural number"""
square_reminder = cls.square_root(number)
if square_reminder[1] == 0:
return True
return False
@classmethod
def square_root(cls,number,decimal_level=0):
"""
This Function will return you aproximate square root and
"""
# This method is not prime factorisation
number_array = []
counter_ = len(str(number))
# Slicing the number four square root
if counter_ % 2 == 0:
while counter_ > 0:
number_array.append(str(number)[(counter_-2):counter_])
counter_ = counter_ - 2
elif counter_ % 2 == 1:
while counter_ > 0:
if counter_ == 1:
number_array.append(str(number)[(counter_-1):counter_]) # I am adding number in number_arry from backward
counter_ = counter_ - 1
else:
number_array.append(str(number)[(counter_-2):counter_])
counter_ = counter_ - 2
# Main rooting Process here
number_array = number_array[::-1] # Reversing the number array becuase ^^ line force me to do
result_array = []
calc_int = "" # Later i will convert this into integer
index = 0
reminder = 0 # By Default I am writing reminder 0 just a garbage value
decimal_calculated = False
leng_num_array = len(number_array)
while index < leng_num_array:
current_num = str(number_array[index])
root_part = int(str(calc_int) + "1")
while int(current_num) < root_part:
result_array.append(0)
if not(index + 1 <leng_num_array) and not decimal_calculated and decimal_level !=0:
result_array.append('.')
leng_num_array = leng_num_array + decimal_level
for _ in range(decimal_level): number_array.append("00")
decimal_calculated = True
index = index + 1
if index + 1 < leng_num_array: # Like For numbers like 26
current_num += str(number_array[index+1])
index = index + 1
reminder = int(current_num)
else: # For numbers like 100
break
calc_int = str(calc_int) + '0'
root_part = int(str(calc_int) + "1")
current_num = int(current_num)
# Root part and future root part are for avoid repetation of same calculations
for num in range(1,10):
str_num = str(num)
calc_int = str(calc_int)
future_root_part = ((int(calc_int + str_num) + 1) * (num + 1))
if root_part <= current_num and future_root_part > current_num:
result_array.append(num)
calc_int += str_num # Calc int is like divisor
calc_int = int(calc_int) + num
reminder = current_num - root_part
if index + 1 < leng_num_array:
old_num = number_array[index+1]
number_array[index+1] = int(str(reminder) + str(old_num))
break
root_part = future_root_part
if not(index + 1 <leng_num_array) and reminder != 0 and not decimal_calculated and decimal_level !=0:
result_array.append('.') # Adding Decimal Places
leng_num_array = leng_num_array + decimal_level
for _ in range(decimal_level): number_array.append("00")
decimal_calculated = True
old_num = number_array[index+1]
number_array[index+1] = int(str(reminder) + str(old_num))
index = index + 1
if decimal_level != 0:
return [cls.convInFloat(result_array),reminder]
return [cls.convInNumber(result_array),reminder]
@classmethod
def factors(cls,number):
return_list = []
# Cls.square_root_number function will return nearest square root Exp. sqrt(226) = 15
print(cls.square_root(number))
for num in range(1,number+1):
# any number larget factor is smaller than sqrt(x) + 1
if cls.divide(number,num)[1] == 0:
return_list.append(num)
return return_list
@classmethod
def multiples(cls,number,no_of_multiple):
return_list = []
for num in range(1,no_of_multiple+1):
return_list.append(number*num)
return return_list
@classmethod
def isDecimal(cls,number):
number_in_float = float(number)
number = int(number)
if number_in_float > number:
return True
return False
@classmethod
def uniqueSubset(cls,arr,length):
"""This function is different by subset code becuase it will
not give both subarrays like [2,1],[1,2]"""
arr_copy = arr.copy() # To Maniuplate these array items
subarray = []
if length == 1:
for item in arr:
subarray.append([item])
return subarray
else:
for item in arr_copy:
# Deleting all the previous item otherwise they will not give unique
if arr != []:
del arr[0]
# For more information debug this code
recursion_call = cls.uniqueSubset(arr,length-1)
# arr = arr_copy.copy()
for recursion_item in recursion_call:
recursion_item.insert(0,item)
subarray.append(recursion_item)
return subarray
if __name__ == "__main__":
# test_case = Calc.divide(12,0)
# print(test_case)
# test_case_2 = Calc.isPrime(289)
# print(test_case_2)
# print(Calc.isDecimal(4.56))
# print(Calc.convInNumber(Calc.square_root(987)))
# print(Calc.convInFloat(['1']))
# print(Calc.hcf([21,6,9]))
# print(Calc.hcf([16,8,24,28,18]))
# print(Calc.multiples(5,6))
# print(Calc.square_root(316))
# print(Calc.factors(225))
# print(Calc.square_root(112,decimal_level=3))
# print(Calc.is_perfect_square(36))
equation = Algebra("4x+ 5")
print(Calc.uniqueSubset(list(range(6)),6))
| true |
9398644fc8363bce914353f27777a052b33f1512 | Python | qcl/ntu-homeworks | /2012-spring-FinancialComputing/hw1/b97501046_hw1.py | UTF-8 | 576 | 3.59375 | 4 | [] | no_license | # -*- coding:utf-8 -*-
# Principles of Financial Computing
# Homework 01
#
# Qing-Cheng Li
# B97501046
import sys
def main():
print 'Please input n:'
n = sys.stdin.readline()
n = int(n.split()[0])
if n <= 0:
return
p = []
for i in range(n):
print 'prices',i+1,'='
t = sys.stdin.readline()
p.append(float(t.split()[0]))
prev = 100.0
for i in range(n):
this = p[i]
f = (prev/this - 1)*100
print 'f('+str(i)+','+str(i+1)+') =',f
prev = this
if __name__ == '__main__':
main()
| true |
dbc0fec8f0e0fbdfecd8b3d7b70dbe58869881a2 | Python | L-Lang-Of-LTHC/LTHC-PIYN-Lang | /utils/linker.py | UTF-8 | 1,041 | 2.75 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# ./utils/linker.py => contains file linking system
############
# IMPORTS
############
import libs.readline as rl
def link(file, mode, param=None):
if mode == 0: #FCALL et NE, LT, GT, EQ
run(file)
if mode == 1: #LOOP
for i in range(param):
run(file)
def run(file):
if file.endswith('.llangl'):
try:
wanted = open(file, 'r+', encoding='utf-8')
wanted.write('')
tlines = wanted.readlines()
wanted.close()
lines = []
for i in tlines:
lines.append(i.replace('\n',''))
for i in lines:
try:
rl.readline(i, False)
except:
print(f"\n >>> An error blocks the normal behaviour of the program <<< \n >>> running file: {file} <<<\n >>> Line: {i+1} <<<\n")
except:
print('\nFile not found\n')
else:
print('\nFile Extension Error: the extension must be \'.llang\'')
| true |
47943cd2cf095a1bbe38e7fc2a1d5d40d09378d2 | Python | amazingchow/seaweedfs-tools | /tools/time-server/app/utils/time_utils.py | UTF-8 | 578 | 2.796875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import datetime
import faker
__TimeFake = faker.Faker()
def time_utils_random_rfc3339_time(passed_days:int):
fake_time = __TimeFake.date_time_between(start_date="-{}d".format(passed_days), end_date="now")
fake_timestamp = datetime.datetime.timestamp(fake_time)
return datetime.datetime.fromtimestamp(fake_timestamp).isoformat()[:19]
def time_utils_random_timestamp(passed_days:int):
fake_time = __TimeFake.date_time_between(start_date='-{}d'.format(passed_days), end_date='now')
return int(datetime.datetime.timestamp(fake_time))
| true |
21ed08043c58f4e6a62e8360b2cf7906085d6fa4 | Python | krenevych/algo | /labs/L19/task1/main.py | UTF-8 | 556 | 3.6875 | 4 | [] | no_license | class Heap:
def __init__(self, max_heap_size):
pass
def insert(self, key):
print(f"insert {key}")
def extract(self):
print("extract: ", end="")
return None
if __name__ == '__main__':
with open("input.txt") as f:
N = int(f.readline())
heap = Heap(N + 1)
for quarry in range(N):
command = list(map(int, f.readline().split()))
if command[0] == 0:
heap.insert(command[1])
elif command[0] == 1:
print(heap.extract())
| true |
507362924ddf7b995f6b17b0edd1a7299c8e5a4f | Python | maxcd/bandmat_compiled | /test_core.py | UTF-8 | 24,338 | 2.859375 | 3 | [] | no_license | """Tests for core banded matrix definitions and functions."""
# Copyright 2013, 2014, 2015, 2016, 2017 Matt Shannon
# This file is part of bandmat.
# See `License` for details of license and warranty.
import unittest
import doctest
import numpy as np
import random
from numpy.random import randn, randint
import bandmat as bm
import bandmat.full as fl
from bandmat.testhelp import assert_allclose, assert_allequal, get_array_mem
def rand_bool():
return randint(0, 2) == 0
def gen_BandMat(size, l=None, u=None, transposed=None):
"""Generates a random BandMat."""
if l is None:
l = random.choice([0, 1, randint(0, 10)])
if u is None:
u = random.choice([0, 1, randint(0, 10)])
data = randn(l + u + 1, size)
if transposed is None:
transposed = rand_bool()
return bm.BandMat(l, u, data, transposed=transposed)
def load_tests(loader, tests, ignore):
# package-level doctests (N.B. includes other modules, not just core)
tests.addTests(doctest.DocTestSuite(bm))
tests.addTests(doctest.DocTestSuite(bm.core))
return tests
class TestCore(unittest.TestCase):
def test_BandMat_basic(self, its=50):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
a_bm = gen_BandMat(size)
assert a_bm.size == size
def test_BandMat_full(self, its=50):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
a_bm = gen_BandMat(size)
a_full = a_bm.full()
l, u = a_bm.l, a_bm.u
# N.B. these tests are not really testing much of anything (they
# are virtually identical to the implementation of BandMat.full),
# but this is not that surprising since the lines below are kind
# of the definition of the representation used by BandMat in the
# two cases (transposed True and transposed False).
if a_bm.transposed:
assert_allequal(a_full.T, fl.band_c(u, l, a_bm.data))
else:
assert_allequal(a_full, fl.band_c(l, u, a_bm.data))
assert not np.may_share_memory(a_full, a_bm.data)
def test_BandMat_T(self, its=50):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
a_bm = gen_BandMat(size)
assert_allequal(a_bm.T.full(), a_bm.full().T)
assert a_bm.T.data is a_bm.data
if size > 0:
assert np.may_share_memory(a_bm.T.data, a_bm.data)
def test_BandMat_copy_exact(self, its=50):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
mat_bm = gen_BandMat(size)
mat_full_orig = mat_bm.full().copy()
mat_bm_new = mat_bm.copy_exact()
assert mat_bm_new.l == mat_bm.l
assert mat_bm_new.u == mat_bm.u
assert mat_bm_new.transposed == mat_bm.transposed
# check that copy represents the same matrix
assert_allequal(mat_bm_new.full(), mat_full_orig)
# check that copy does not share memory with original
assert not np.may_share_memory(mat_bm_new.data, mat_bm.data)
# check that mutating the copy does not change the original
mat_bm_new.data += 1.0
assert_allequal(mat_bm.full(), mat_full_orig)
def test_BandMat_copy(self, its=50):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
mat_bm = gen_BandMat(size)
mat_full_orig = mat_bm.full().copy()
mat_bm_new = mat_bm.copy()
assert mat_bm_new.l == mat_bm.l
assert mat_bm_new.u == mat_bm.u
assert not mat_bm_new.transposed
# check that copy represents the same matrix
assert_allequal(mat_bm_new.full(), mat_full_orig)
# check that copy does not share memory with original
assert not np.may_share_memory(mat_bm_new.data, mat_bm.data)
# check that mutating the copy does not change the original
mat_bm_new.data += 1.0
assert_allequal(mat_bm.full(), mat_full_orig)
def test_BandMat_equiv(self, its=50):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
mat_bm = gen_BandMat(size)
l_new = random.choice([None, 0, 1, randint(0, 10)])
u_new = random.choice([None, 0, 1, randint(0, 10)])
transposed_new = random.choice([None, True, False])
zero_extra = rand_bool()
l_new_value = mat_bm.l if l_new is None else l_new
u_new_value = mat_bm.u if u_new is None else u_new
transposed_new_value = (mat_bm.transposed if transposed_new is None
else transposed_new)
if l_new_value < mat_bm.l or u_new_value < mat_bm.u:
self.assertRaises(AssertionError,
mat_bm.equiv,
l_new=l_new, u_new=u_new,
transposed_new=transposed_new,
zero_extra=zero_extra)
else:
mat_bm_new = mat_bm.equiv(l_new=l_new, u_new=u_new,
transposed_new=transposed_new,
zero_extra=zero_extra)
assert mat_bm_new.l == l_new_value
assert mat_bm_new.u == u_new_value
assert mat_bm_new.transposed == transposed_new_value
assert_allequal(mat_bm_new.full(), mat_bm.full())
assert not np.may_share_memory(mat_bm_new.data, mat_bm.data)
if zero_extra:
mat_new_data_good = (
fl.band_e(u_new_value, l_new_value, mat_bm.full().T)
) if mat_bm_new.transposed else (
fl.band_e(l_new_value, u_new_value, mat_bm.full())
)
assert_allequal(mat_bm_new.data, mat_new_data_good)
def test_BandMat_plus_equals_band_of(self, its=100):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
mult = randn()
target_bm = gen_BandMat(size)
mat_bm = gen_BandMat(size)
target_full = target_bm.full()
mat_full = mat_bm.full()
array_mem = get_array_mem(target_bm.data, mat_bm.data)
target_bm.plus_equals_band_of(mat_bm, mult)
target_full += (
fl.band_ec(target_bm.l, target_bm.u, mat_full) * mult
)
assert_allclose(target_bm.full(), target_full)
assert get_array_mem(target_bm.data, mat_bm.data) == array_mem
def test_BandMat_add(self, its=100):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
a_bm = gen_BandMat(size)
b_bm = gen_BandMat(size)
a_full = a_bm.full()
b_full = b_bm.full()
c_bm = a_bm + b_bm
c_full = a_full + b_full
assert_allclose(c_bm.full(), c_full)
assert not np.may_share_memory(c_bm.data, a_bm.data)
assert not np.may_share_memory(c_bm.data, b_bm.data)
c_bm = a_bm + 0
c_full = a_full + 0
assert_allclose(c_bm.full(), c_full)
assert not np.may_share_memory(c_bm.data, a_bm.data)
c_bm = 0 + a_bm
c_full = 0 + a_full
assert_allclose(c_bm.full(), c_full)
assert not np.may_share_memory(c_bm.data, a_bm.data)
with self.assertRaises(TypeError):
a_bm + 1.0
with self.assertRaises(TypeError):
1.0 + a_bm
def test_BandMat_sum(self, its=50):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
num_terms = randint(10)
a_bms = [ gen_BandMat(size) for _ in range(num_terms) ]
a_fulls = [ a_bm.full() for a_bm in a_bms ]
b_bm = sum(a_bms)
b_full = sum(a_fulls)
if num_terms > 0:
assert_allclose(b_bm.full(), b_full)
for a_bm in a_bms:
assert not np.may_share_memory(b_bm.data, a_bm.data)
def test_BandMat_sub(self, its=100):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
a_bm = gen_BandMat(size)
b_bm = gen_BandMat(size)
a_full = a_bm.full()
b_full = b_bm.full()
c_bm = a_bm - b_bm
c_full = a_full - b_full
assert_allclose(c_bm.full(), c_full)
assert not np.may_share_memory(c_bm.data, a_bm.data)
assert not np.may_share_memory(c_bm.data, b_bm.data)
c_bm = a_bm - 0
c_full = a_full - 0
assert_allclose(c_bm.full(), c_full)
assert not np.may_share_memory(c_bm.data, a_bm.data)
c_bm = 0 - a_bm
c_full = 0 - a_full
assert_allclose(c_bm.full(), c_full)
assert not np.may_share_memory(c_bm.data, a_bm.data)
with self.assertRaises(TypeError):
a_bm - 1.0
with self.assertRaises(TypeError):
1.0 - a_bm
def test_BandMat_iadd(self, its=100):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
a_bm = gen_BandMat(size)
b_bm = gen_BandMat(size)
a_full = a_bm.full()
b_full = b_bm.full()
array_mem = get_array_mem(a_bm.data, b_bm.data)
if a_bm.l >= b_bm.l and a_bm.u >= b_bm.u:
a_bm += b_bm
a_full += b_full
assert_allclose(a_bm.full(), a_full)
assert get_array_mem(a_bm.data, b_bm.data) == array_mem
else:
with self.assertRaises(AssertionError):
a_bm += b_bm
a_bm += 0
a_full += 0
assert_allclose(a_bm.full(), a_full)
assert get_array_mem(a_bm.data, b_bm.data) == array_mem
with self.assertRaises(TypeError):
a_bm += 1.0
def test_BandMat_isub(self, its=100):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
a_bm = gen_BandMat(size)
b_bm = gen_BandMat(size)
a_full = a_bm.full()
b_full = b_bm.full()
array_mem = get_array_mem(a_bm.data, b_bm.data)
if a_bm.l >= b_bm.l and a_bm.u >= b_bm.u:
a_bm -= b_bm
a_full -= b_full
assert_allclose(a_bm.full(), a_full)
assert get_array_mem(a_bm.data, b_bm.data) == array_mem
else:
with self.assertRaises(AssertionError):
a_bm -= b_bm
a_bm -= 0
a_full -= 0
assert_allclose(a_bm.full(), a_full)
assert get_array_mem(a_bm.data, b_bm.data) == array_mem
with self.assertRaises(TypeError):
a_bm -= 1.0
def test_BandMat_pos(self, its=100):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
a_bm = gen_BandMat(size)
a_full = a_bm.full()
b_bm = +a_bm
b_full = +a_full
assert_allclose(b_bm.full(), b_full)
assert not np.may_share_memory(b_bm.data, a_bm.data)
def test_BandMat_neg(self, its=100):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
a_bm = gen_BandMat(size)
a_full = a_bm.full()
b_bm = -a_bm
b_full = -a_full
assert_allclose(b_bm.full(), b_full)
assert not np.may_share_memory(b_bm.data, a_bm.data)
def test_BandMat_mul_and_rmul(self, its=100):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
mult = randn()
a_bm = gen_BandMat(size)
a_full = a_bm.full()
b_bm = a_bm * mult
b_full = a_full * mult
assert b_bm.l == a_bm.l
assert b_bm.u == a_bm.u
assert_allclose(b_bm.full(), b_full)
assert not np.may_share_memory(b_bm.data, a_bm.data)
c_bm = mult * a_bm
c_full = mult * a_full
assert c_bm.l == a_bm.l
assert c_bm.u == a_bm.u
assert_allclose(c_bm.full(), c_full)
assert not np.may_share_memory(c_bm.data, a_bm.data)
with self.assertRaises(TypeError):
a_bm * a_bm
def test_BandMat_various_divs(self, its=100):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
mult = randn()
a_bm = gen_BandMat(size)
a_full = a_bm.full()
b_bm = a_bm // mult
b_full = a_full // mult
assert b_bm.l == a_bm.l
assert b_bm.u == a_bm.u
assert_allclose(b_bm.full(), b_full)
assert not np.may_share_memory(b_bm.data, a_bm.data)
b_bm = a_bm / mult
b_full = a_full / mult
assert b_bm.l == a_bm.l
assert b_bm.u == a_bm.u
assert_allclose(b_bm.full(), b_full)
assert not np.may_share_memory(b_bm.data, a_bm.data)
b_bm = a_bm.__floordiv__(mult)
b_full = a_full.__floordiv__(mult)
assert b_bm.l == a_bm.l
assert b_bm.u == a_bm.u
assert_allclose(b_bm.full(), b_full)
assert not np.may_share_memory(b_bm.data, a_bm.data)
b_bm = a_bm.__div__(mult)
b_full = a_full.__div__(mult)
assert b_bm.l == a_bm.l
assert b_bm.u == a_bm.u
assert_allclose(b_bm.full(), b_full)
assert not np.may_share_memory(b_bm.data, a_bm.data)
b_bm = a_bm.__truediv__(mult)
b_full = a_full.__truediv__(mult)
assert b_bm.l == a_bm.l
assert b_bm.u == a_bm.u
assert_allclose(b_bm.full(), b_full)
assert not np.may_share_memory(b_bm.data, a_bm.data)
with self.assertRaises(TypeError):
a_bm // a_bm
with self.assertRaises(TypeError):
a_bm / a_bm
with self.assertRaises(TypeError):
1.0 // a_bm
with self.assertRaises(TypeError):
1.0 / a_bm
def test_BandMat_imul(self, its=100):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
mult = randn()
a_bm = gen_BandMat(size)
a_full = a_bm.full()
array_mem = get_array_mem(a_bm.data)
a_bm *= mult
a_full *= mult
assert_allclose(a_bm.full(), a_full)
assert get_array_mem(a_bm.data) == array_mem
with self.assertRaises(TypeError):
a_bm *= a_bm
def test_BandMat_various_idivs(self, its=100):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
mult = randn()
a_bm = gen_BandMat(size)
a_full = a_bm.full()
array_mem = get_array_mem(a_bm.data)
a_bm //= mult
a_full //= mult
assert_allclose(a_bm.full(), a_full)
assert get_array_mem(a_bm.data) == array_mem
a_bm = gen_BandMat(size)
a_full = a_bm.full()
array_mem = get_array_mem(a_bm.data)
a_bm /= mult
a_full /= mult
assert_allclose(a_bm.full(), a_full)
assert get_array_mem(a_bm.data) == array_mem
a_bm = gen_BandMat(size)
a_full = a_bm.full()
array_mem = get_array_mem(a_bm.data)
a_bm.__ifloordiv__(mult)
a_full.__ifloordiv__(mult)
assert_allclose(a_bm.full(), a_full)
assert get_array_mem(a_bm.data) == array_mem
a_bm = gen_BandMat(size)
a_full = a_bm.full()
array_mem = get_array_mem(a_bm.data)
a_bm.__idiv__(mult)
a_full.__idiv__(mult)
assert_allclose(a_bm.full(), a_full)
assert get_array_mem(a_bm.data) == array_mem
a_bm = gen_BandMat(size)
a_full = a_bm.full()
array_mem = get_array_mem(a_bm.data)
a_bm.__itruediv__(mult)
a_full.__itruediv__(mult)
assert_allclose(a_bm.full(), a_full)
assert get_array_mem(a_bm.data) == array_mem
with self.assertRaises(TypeError):
a_bm //= a_bm
with self.assertRaises(TypeError):
a_bm /= a_bm
def test_BandMat_reverse_view(self, its=100):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
a_bm = gen_BandMat(size)
a_full = a_bm.full()
b_bm = a_bm.reverse_view()
b_full = a_full[::-1, ::-1]
assert_allclose(b_bm.full(), b_full)
assert b_bm.data.base is a_bm.data
if size > 0:
assert np.may_share_memory(b_bm.data, a_bm.data)
def test_BandMat_sub_matrix_view(self, its=100):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
start = randint(size + 1)
end = randint(size + 1)
if start > end:
start, end = end, start
a_bm = gen_BandMat(size)
a_full = a_bm.full()
b_bm = a_bm.sub_matrix_view(start, end)
b_full = a_full[start:end, start:end]
assert_allclose(b_bm.full(), b_full)
assert b_bm.data.base is a_bm.data
if end > start:
assert np.may_share_memory(b_bm.data, a_bm.data)
def test_BandMat_embed_as_sub_matrix(self, its=100):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
start = randint(size + 1)
end = randint(size + 1)
if start > end:
start, end = end, start
a_bm = gen_BandMat(end - start)
a_full = a_bm.full()
b_bm = a_bm.embed_as_sub_matrix(start, size)
b_full = np.zeros((size, size))
b_full[start:end, start:end] = a_full
assert_allclose(b_bm.full(), b_full)
assert not np.may_share_memory(b_bm.data, a_bm.data)
def test_zeros(self, its=50):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
l = random.choice([0, 1, randint(0, 10)])
u = random.choice([0, 1, randint(0, 10)])
mat_bm = bm.zeros(l, u, size)
assert mat_bm.l == l
assert mat_bm.u == u
assert_allequal(mat_bm.full(), np.zeros((size, size)))
def test_from_full(self, its=50):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
l = random.choice([0, 1, randint(0, 10)])
u = random.choice([0, 1, randint(0, 10)])
mat_full = gen_BandMat(size).full()
zero_outside_band = np.all(fl.band_ec(l, u, mat_full) == mat_full)
if zero_outside_band:
mat_bm = bm.from_full(l, u, mat_full)
assert mat_bm.l == l
assert mat_bm.u == u
assert_allequal(mat_bm.full(), mat_full)
assert not np.may_share_memory(mat_bm.data, mat_full)
else:
self.assertRaises(AssertionError, bm.from_full, l, u, mat_full)
def test_band_c_bm(self, its=50):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
l = random.choice([0, 1, randint(0, 10)])
u = random.choice([0, 1, randint(0, 10)])
mat_rect = randn(l + u + 1, size)
mat_bm = bm.band_c_bm(l, u, mat_rect)
mat_full_good = fl.band_c(l, u, mat_rect)
assert_allequal(mat_bm.full(), mat_full_good)
assert not np.may_share_memory(mat_bm.data, mat_rect)
def test_band_e_bm(self, its=50):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
mat_bm = gen_BandMat(size)
l = random.choice([0, 1, randint(0, 10)])
u = random.choice([0, 1, randint(0, 10)])
mat_rect = bm.band_e_bm(l, u, mat_bm)
mat_rect_good = fl.band_e(l, u, mat_bm.full())
assert_allequal(mat_rect, mat_rect_good)
assert not np.may_share_memory(mat_rect, mat_bm.data)
def test_band_ec_bm_view(self, its=50):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
a_bm = gen_BandMat(size)
l = random.choice([0, 1, randint(0, 10)])
u = random.choice([0, 1, randint(0, 10)])
b_bm = bm.band_ec_bm_view(l, u, a_bm)
b_full_good = fl.band_ec(l, u, a_bm.full())
assert_allequal(b_bm.full(), b_full_good)
assert b_bm.data.base is a_bm.data
if size > 0:
assert np.may_share_memory(b_bm.data, a_bm.data)
def test_band_ec_bm(self, its=50):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
a_bm = gen_BandMat(size)
l = random.choice([0, 1, randint(0, 10)])
u = random.choice([0, 1, randint(0, 10)])
b_bm = bm.band_ec_bm(l, u, a_bm)
b_full_good = fl.band_ec(l, u, a_bm.full())
assert_allequal(b_bm.full(), b_full_good)
assert not np.may_share_memory(b_bm.data, a_bm.data)
def test_band_e_bm_common(self, its=50):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
num_bms = randint(5)
mat_bms = [ gen_BandMat(size) for _ in range(num_bms) ]
if num_bms > 0:
l = max([ mat_bm.l for mat_bm in mat_bms ])
u = max([ mat_bm.u for mat_bm in mat_bms ])
mat_rects = bm.band_e_bm_common(*mat_bms)
for mat_bm, mat_rect in zip(mat_bms, mat_rects):
assert_allclose(mat_rect, bm.band_e_bm(l, u, mat_bm))
def test_diag(self, its=50):
for it in range(its):
size = random.choice([0, 1, randint(0, 10), randint(0, 100)])
vec = randn(size)
mat_bm = bm.diag(vec)
assert isinstance(mat_bm, bm.BandMat)
assert_allequal(mat_bm.full(), np.diag(vec))
assert mat_bm.data.base is vec
if size > 0:
assert np.may_share_memory(mat_bm.data, vec)
mat_bm = gen_BandMat(size)
vec = bm.diag(mat_bm)
assert_allequal(vec, np.diag(mat_bm.full()))
assert vec.base is mat_bm.data
if size > 0:
assert np.may_share_memory(vec, mat_bm.data)
if __name__ == '__main__':
unittest.main()
| true |
8e00e71bb1bcd5cb29680dc62f7cb0a94743af61 | Python | Vivekg95/Python-Tutorial | /Structuredqurylang/skl.py | UTF-8 | 251 | 2.53125 | 3 | [] | no_license | import mysql.connector
mydb=mysql.connector.connect(host="localhost",user="root",passwd="708312@vk",database="College")
mycursor=mydb.cursor()
#mycursor.execute("show databases")
mycursor.execute("select * from Base")
for i in mycursor:
print(i)
| true |
21ba1412a2934a627f377066bee65ef7d8214f07 | Python | Sassa-nf/pi | /math_art/c.py | UTF-8 | 3,093 | 3.59375 | 4 | [] | no_license | # Given a list of drawing instructions, count how many crosses are drawn.
#
# Drawing instructions consist of pen movement direction (LURD) and integer
# offset of that movement.
#
# Number of lines: 2 <= N <= 2M
# Offsets: 1 <= L <= 1G
# Directions: D_i in 'LURD'
#
# ---
#
# Let's collect all vertical lines and all horizontal lines, grouping them
# by the same coordinate. The weird thing here is that matching each line with
# each is too expensive, so we need to reduce the amount of searching for matches.
#
from typing import List
def getPlusSignCount(N: int, L: List[int], D: str) -> int:
horz = {}
vert = {}
x, y = 0, 0
for o, d in zip(L, D):
if d == 'D':
vert.setdefault(x, []).append((y - o, y))
y -= o
elif d == 'U':
vert.setdefault(x, []).append((y, y + o))
y += o
elif d == 'L':
horz.setdefault(y, []).append((x - o, x))
x -= o
else:
horz.setdefault(y, []).append((x, x + o))
x += o
return match(organize(horz), organize(vert))
def organize(horz):
res = []
for k, vs in horz.items():
vs.sort()
ls = [vs[0]]
for a, b in vs[1:]:
if b <= ls[-1][1]:
continue
if a <= ls[-1][1]:
a = ls.pop()[0]
ls.append((a, b))
res.append((k, ls))
res.sort()
return res
def bin_search(xs, x, key=None):
if key is None:
key = lambda x: x
lo, hi = 0, len(xs)
while lo < hi:
i = (lo + hi) // 2
if key(xs[i]) < x:
lo = i + 1
else:
hi = i
return lo
def pick_lines(vert, x0, x1):
i0 = bin_search(vert, x0 + 1, key=lambda x: x[0])
i1 = bin_search(vert, x1, key=lambda x: x[0])
yys = [(x, ys) for x, ys in vert[i0:i1] if ys]
yys.sort(key=lambda ys: ys[1][0][1])
for _, ys in yys:
if ys[0][1] > y:
break
while ys and ys[0][1] <= y:
ys.pop(0)
yys = [(x, ys) for x, ys in yys if ys]
# assert: yys is a list of vertical lines that end above y
yys.sort(key=lambda ys: ys[1][0][0])
while yys and yys[-1][1][0][0] >= y:
yys.pop()
# assert: yys is a list of vertical lines that cross ((x0, y), (x1, y))
yys.sort(key=lambda ys: -ys[1][0][1]) # the lines that end lower/sooner are at the end
return i0, i1, yys
def match(horz, vert):
cross = 0
for y, hs in horz:
if not hs:
continue
for x0, x1 in hs:
i0, i1, yys = pick_lines(vert, x0, x1)
x0, ys = min(yys, key=lambda x: x[0])
j0, j1, xxs = pick_lines(horz, ys[0][0], ys[0][1])
while yys:
x1, vs = yys.pop()
vert[i0: i1] = [(x, ys) for x, ys in vert[i0: i1] if ys]
return cross
print(getPlusSignCount(0, [6, 3, 4, 5, 1, 6, 3, 3, 4], 'ULDRULURD'), 4)
print(getPlusSignCount(0, [1, 1, 1, 1, 1, 1, 1, 1], 'RDLUULDR'), 1)
print(getPlusSignCount(0, [1, 2, 2, 1, 1, 2, 2, 1], 'UDUDLRLR'), 1)
horz = [500002, 1] * 500000
d = 'RULU' * 250000
vert = [1, 500002] * 500000
d = d + ('LDLU' * 250000)
print(getPlusSignCount(0, horz + vert, d), 500000 ** 2)
| true |
6d3aa5c52800379d4ad1fdcf66ba435dc324ca4f | Python | grt192/2015recycle-rush | /py/record_controller.py | UTF-8 | 6,083 | 2.75 | 3 | [] | no_license | import threading
import time
from collections import OrderedDict
from grt.core import GRTMacro
try:
import wpilib
except ImportError:
from pyfrc import wpilib
class RecordMacro(GRTMacro):
def __init__(self, obj_list, timeout=None):
super().__init__(timeout)
self.obj_list = obj_list #list of objects to record
#self.running = False
self.instructions = OrderedDict() #dictionary of instructions to save
self.enabled = False
"""
This ridiculous for loop sets up a dictionary containing the objects passed in, and their output values.
It may be easier to replace it with a 2D list.
"""
for i in range(len(self.obj_list)):
self.instructions["{0}, {1}".format(self.obj_list[i].getDeviceID() , type(self.obj_list[i]))] = [self.obj_list[i].get()]
self.run_threaded()
def engage(self):
"""
Called in a higher level controller.
Starts recording in a separate thread.
"""
#self.running = True
self.thread = threading.Thread(target=self.run_record)
self.thread.start()
def disengage(self):
"""
Signals recording thread to stop.
"""
#self.running = False
def start_record(self):
self.instructions = OrderedDict()
for i in range(len(self.obj_list)):
self.instructions["{0}, {1}".format(self.obj_list[i].getDeviceID() , type(self.obj_list[i]))] = [self.obj_list[i].get()]
self.enabled = True
def stop_record(self):
self.enabled = False
print(self.instructions)
self.save("/home/lvuser/py/instructions.py")
return self.instructions
def macro_periodic(self):
"""
Appends the output values of all the objects passed into __init__
to the instructions dictionary. Sample rate is currently hard-coded.
"""
#while self.running:
#print("Operating")
if self.enabled:
i = 0
tinit = time.time()
for key in self.instructions:
self.instructions[key].append(self.obj_list[i].get())
#print(self.obj_list[i].Get())
i += 1
#wpilib.Wait(.1)
time.sleep(.1 - (time.time() - tinit))
def save(self, file_name):
with open(file_name, 'a') as f:
f.write(str(self.instructions) + "\n")
class PlaybackMacro(GRTMacro):
def __init__(self, instructions, talon_arr_obj, revert_controller=None, timeout=None):
super().__init__(timeout)
self.instructions = instructions #instructions to output
self.talon_arr_obj = talon_arr_obj #talons to output instructions to
self.revert_controller = revert_controller #drive controller to revert control to when finished
#self.running = False
self.enabled = False
self.i = 0
#parsing the dictionary into talon and solenoid components.
self.parse()
#self.run_threaded(no_initialize=True)
#self.playback()
def load(self, file_name):
#assumes we have a python file (*.py)
with open(file_name, 'r') as f:
for line in f:
self.instructions = eval(line.replace("\n", ""))
#print(line)
#self.instructions = instructions
def parse(self):
self.talon_arr = [] #lists that the dictionary will be parsed into
self.solenoid_arr = []
for key in self.instructions:
i = int(key.split(',')[0])
print(i)
if "Talon" in key or "Macro" in key:
self.talon_arr.append(self.instructions[key])
#print(self.instructions[key])
print(self.talon_arr)
if "Solenoid" in key:
self.solenoid_arr[i] = self.instructions[key]
def engage(self):
"""
Called in a higher level controller.
Starts playback in a separate thread.
"""
self.running = True
self.thread = threading.Thread(target=self.run_playback)
self.thread.start()
def start_playback(self, instructions=None):
#self.enabled = True
if instructions:
self.instructions = instructions
self.parse()
self.run_threaded()
def stop_playback(self):
self.terminate()
def macro_initialize(self):
"""
To be called only to use this LINEARLY in auto.
"""
#pass
#self.enabled = True
print("Began playback")
#self.process.join(timeout = 2)
#print("Ended join")
#self.terminate()
def macro_stop(self):
"""
Signals playback thread to stop.
Also zeros all motor outputs.
"""
#self.running = False
self.enabled = False
for talon in self.talon_arr_obj:
if str(type(talon)) == "<class 'wpilib.cantalon.CANTalon'>":
talon.set(0)
#self.revert_controller.engage()
def macro_periodic(self):
"""
Iterates through the provided instruction dictionary.
Disengages itself when finished.
"""
#for i in range(len(self.talon_arr[0])):
print("Enabled")
tinit = time.time()
try:
print(str(range(len(self.talon_arr[0]))))
for j in range(len(self.talon_arr)): ###IMPORTANT NOTE!!!
#THIS WAS CHANGED FROM len(self.talon_arr) to len(self.talon_arr_obj)
#IT SHOULD STILL WORK, but be sure to change it back at some point.
self.talon_arr_obj[j].set(self.talon_arr[j][self.i])
print(self.talon_arr[j][self.i])
print("J: " + str(j))
print(self.i)
self.i += 1
#wpilib.Wait(.1)
time.sleep(.1 - (time.time() - tinit))
except IndexError:
self.enabled = False
self.i = 0
self.macro_stop()
self.terminate()
#self.disengage()
| true |
4a1e4b6bd54ce961c4930014653f2e81e745c2f1 | Python | Jane42070/TianYue | /get_book.py | UTF-8 | 3,463 | 2.84375 | 3 | [] | no_license | #!/usr/local/bin/python3.8
# -*- coding: utf-8 -*-
'''่ง่ bname, bout, bauthor, bdate, bpic, bintro, bcontent, bprice'''
from users.models import BookInfo
import requests
import lxml
import time
import random
def spawn_book():
'''ไบง็ไนฆ'''
a1 = (1976, 1, 1, 0, 0, 0, 0, 0, 0)
a2 = (2017, 12, 31, 23, 59, 59, 0, 0, 0)
start = time.mktime(a1)
end = time.mktime(a2)
t = random.randint(start, end)
date_touple = time.localtime(t)
bdate = time.strftime("%Y-%m-%d", date_touple)
url = 'http://www.ireader.com/index.php?ca=booksort.index'
response = requests.get(url)
if response.status_code == 200:
# ๅญๅ
ธๅญๅจ็ฑปๅๆฐๆฎ { '็ฑปๅๅ': data-id }
data_dict = {}
context = lxml.etree.HTML(response.text)
data_id = context.xpath(
"//div[@class='difgenre']/div[@class='right']/ul/li/@data-id")
data_id_name = context.xpath(
"//div[@class='difgenre']/div[@class='right']/ul/li/a/text()")
# data_id_name ๅพๅฐ็็ปๆๆฏ ้ข้ + ็ฑปๅ ็ๅ็ฑป๏ผๅ้่ฆ pop ๆ้ข้็ๅ็ฑป
for i in range(len(data_id_name) - len(data_id)):
data_id_name.pop(0)
data_name_len = len(data_id_name)
# ๅพๅฐ็ฑปๅๅ็ฑปๅฏนๅบ็ cid ๅท๏ผไปฅๅญๅ
ธๆนๅผๅญๅจ
for i in range(data_name_len):
data_dict.update({f'{data_id[i]}': data_id_name[i]})
print(data_dict)
# ้่ฟ for ๅพช็ฏ่ทๅๅไธชๅ็ฑป็้กต้ข
for i in data_id:
response = requests.get(
f"http://www.ireader.com/index.php?ca=booksort.index&cid={i}")
# ่ทๅ้กต้ขไธ็็ญ้จไนฆ็ฑ
context = lxml.etree.HTML(response.text)
book_link = context.xpath("//ul[@class='newShow']/li/a/@href")
btype = data_dict.get(i)
print(f'ๆญฃๅจ็ฌๅๅ็ฑป๏ผ{btype} ็็ญ้จไนฆ็ฑ')
# ๅพช็ฏ่ทๅๅไธชๅ็ฑป้กต้ข็ไนฆ็ฑไฟกๆฏ
for book_url in book_link:
response = requests.get(book_url)
book_context = lxml.etree.HTML(response.text)
try:
bname = book_context.xpath(
"//div[@class='bookname']/h2/a/text()")[0]
b_info1 = book_context.xpath(
"//div[@class='bookinf01']/p/span/text()")
bprice_str = book_context.xpath(
"//div[@class='bookinf02']/div/p/i/text()")[0].split(
'๏ผ')[1]
bintro = book_context.xpath(
"//div[@class='bookinf03']/p/text()")[0].replace(
'\n', '').replace(' ', '')
bpic = book_context.xpath(f"//img[@alt='{bname}']/@src")[0]
bauthor = b_info1[0].split('๏ผ')[1]
bcontent = b_info1[1].split('๏ผ')[1]
bout = b_info1[2].split('๏ผ')[1]
bprice = float(''.join(
list(filter(str.isdigit, bprice_str)))) / 10
# print('่ฏๅ' + stars)
except Exception:
print('่ทๅๅบ้')
bookinfo = (bname, bout, bauthor, bdate, bpic, bintro,
bcontent, bprice, btype)
print(bookinfo)
BookInfo.objects.create_book_in_linear_list(bookinfo)
else:
return 0
| true |
7b144538679eedc44f81154aa49c00e7e6415f36 | Python | ankitgupta123445/python | /day.py | UTF-8 | 1,272 | 3.15625 | 3 | [] | no_license | date=int(input("enter the date"))
month=int(input("enter the month"))
t=365
if month==1:
rem=(31-date)+(t-31)
print("no. of remaining days=",rem)
elif month==2:
rem=(28-date)+(t-31-28)
print("no. of remaining days=",rem)
elif month==3:
rem=(31-date)+(t-31-28-31)
print("no. of remaining days=",rem)
elif month==4:
rem=(30-date)+(t-31-28-31-30)
print("no. of remaining days=",rem)
elif month==5:
rem=(31-date)+(t-31-28-31-30-31)
print("no. of remaining days=",rem)
elif month==6:
rem=(30-date)+(t-31-28-31-30-30-31)
print("no. of remaining days=",rem)
elif month==7:
rem=(31-date)+(t-31-28-31-30-30-31-31)
print("no. of remaining days=",rem)
elif month==8:
rem=(31-date)+(t-31-28-31-30-30-31-31-31)
print("no. of remaining days=",rem)
elif month==9:
rem=(30-date)+(t-31-28-31-30-30-31-31-31-30)
print("no. of remaining days=",rem)
elif month==10:
rem=(31-date)+(t-31-28-31-30-30-31-31-31-30-31)
print("no. of remaining days=",rem)
elif month==11:
rem=(30-date)+(t-31-28-31-30-30-31-31-31-30-31-30)
print("no. of remaining days=",rem)
elif month==12 :
rem=31-date
print("no. of remaining days=",rem)
else :
print("invalid input")
| true |
664da3c83adbaac3bc0df17bb1f6e745bbf01436 | Python | funnydog/AoC2016 | /day7/day7.py | UTF-8 | 1,522 | 3.34375 | 3 | [] | no_license | #!/usr/bin/env python3
import re
import sys
pat = re.compile(r"([a-z]+)|\[([a-z]+)\]")
def is_abba(sub):
for i in range(len(sub)-3):
if sub[i] == sub[i+3] and sub[i+1] == sub[i+2] \
and sub[i] != sub[i+1]:
return True
return False
def supports_tls(sequence):
found = False
for a, b in pat.findall(sequence):
if a and is_abba(a):
found = True
elif b and is_abba(b):
return False
return found
def find_aba(sub):
lst = []
for i in range(len(sub)-2):
if sub[i] != sub[i+1] and sub[i] == sub[i+2]:
lst.append(sub[i:i+3])
return lst
def supports_ssl(sequence):
aba = []
bab = []
for a, b in pat.findall(sequence):
if a:
aba.extend(find_aba(a))
else:
for b in find_aba(b):
bab.append(b[1]+b[0]+b[1])
for a in aba:
if a in bab:
return True
return False
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: {} <filename>".format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
try:
with open(sys.argv[1], "rt") as f:
txt = f.read().strip()
except:
print("Cannot open {}".format(sys.argv[1]), file=sys.stderr)
sys.exit(1)
tls = 0
ssl = 0
for row in txt.splitlines():
if supports_tls(row):
tls += 1
if supports_ssl(row):
ssl += 1
print("Part1:", tls)
print("Part2:", ssl)
| true |
0e8470192e3e168e5499dc808f073359c9da2d37 | Python | Edward-Sekula/OS-GCSE-computing-course | /python/phonebook.py | UTF-8 | 248 | 3.25 | 3 | [] | no_license | phone_book = {}
print('1.Add to phone_book\n2.Delete from phone_book\n3.Search\n4.Show all\n5.quit\n')
choice = int(input())
def add():
x = input('input name of person')
y = input('input number')
phone_book[x] = y
add()
print(phone_book) | true |
4d33574022ae63d1c820872e04e281a0985bed3e | Python | facup94/AoC2018 | /6/day6.py | UTF-8 | 1,628 | 3.265625 | 3 | [] | no_license | def print_grid(grid, bool_print=False):
grid_string = ''
for row in grid:
grid_string += ' '.join(str(x).rjust(3, '0') for x in row) + '\n'
grid_string += '--------------------'
if bool_print:
print(grid_string)
return grid_string
# Creating grids
grid_side_size = 400
distance_grid = [[{'areas':[], 'distance':grid_side_size*3} for _ in range(grid_side_size)] for i in range(grid_side_size)]
area_size = {}
area_centers = {}
with open('input.txt','r') as input:
area_id = 0
for pair in input:
y, x = map(lambda x: int(x), pair[:-1].split(', '))
area_id += 1
area_centers[area_id] = (x, y)
area_size[area_id] = 0
for x in range(grid_side_size):
for y in range(grid_side_size):
for area_id, area_center in area_centers.items():
distance = abs(x-area_center[0]) + abs(y-area_center[1])
if distance < distance_grid[y][x]['distance']:
distance_grid[y][x]['areas'] = [area_id]
distance_grid[y][x]['distance'] = distance
elif distance == distance_grid[y][x]['distance']:
distance_grid[y][x]['areas'].append(area_id)
for row in range(grid_side_size):
for column in range(grid_side_size):
if len(distance_grid[row][column]['areas']) == 1:
if row==0 or column==0 or row==grid_side_size-1 or column==grid_side_size-1:
if distance_grid[row][column]['areas'][0] in area_size:
area_size.pop(distance_grid[row][column]['areas'][0])
elif distance_grid[row][column]['areas'][0] in area_size:
area_size[distance_grid[row][column]['areas'][0]] += 1
print(max(area_size.values())) | true |
f6ea4945dd36ea5a9953f227b9b559421d13df8a | Python | rovinapinto/radiative_forcing | /codes/forcing_tools/plot.py | UTF-8 | 5,551 | 2.90625 | 3 | [
"MIT"
] | permissive | import numpy as np
from netCDF4 import Dataset
import xarray as xr
import dask as ds
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from matplotlib.colors import from_levels_and_colors
from pylab import *
import cartopy.crs as ccrs
from cartopy.util import add_cyclic_point
def plot_data(dataset, data_var, dtype="ndarray", ticks=None, colors=None, levels=None, title=None, figname=None):
'''Plot time averaged data on a Mollweide map using xarray DataArray or numpy array
Parameters:
----------
dataset: xarray Dataset
must contain dimensions of time, lat and lon
data_var: xarray DataArray or a numpy ndArray
has to have three dimensions including time
dtype: string
can be either an ndarray or an xarray. Default: ndarray
ticks: list
integer or float list of ticks for colorbar. Default: None
colors: list
list of colors as hexcodes in string.
Default is RdBu_r is colors if set to None
Use default None to ascertain the levels and colors
levels: list
list of levels for the map. Has to be same lenght as colors. For extend True,
change set_over and set_under colors to required colormap.
title: string
Default is None
figname: string
use to save figure with transparent background and tight borders. Deafult is None.
Returns:
--------
cs: plots the map
'''
if dtype == "ndarray":
vmax = np.max(np.mean(data_var, axis=0))
vmin = np.min(np.mean(data_var, axis=0))
var = data_var
else:
vmax = np.max(np.mean(data_var, axis=0))
# (data_var.mean("year").values)
vmin = np.min(np.mean(data_var, axis=0))
var = data_var.values
v_ext = np.max([np.abs(vmin), np.abs(vmax)])
norm = mcolors.TwoSlopeNorm(vmin=-v_ext, vmax=v_ext, vcenter=0)
cmap = cm.get_cmap('RdBu_r', 20)
fig = plt.figure(figsize=(9, 7))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.Mollweide())
ax.set_global()
ax.coastlines()
val, ll = add_cyclic_point(var, coord=dataset.lon.values)
if colors is not None:
cs = ax.contourf(ll, dataset.lat.values, np.mean(val, axis=0), norm=norm,
transform=ccrs.PlateCarree(), colors=colors, levels=levels, extend='both')
plt.colorbar(cs, shrink=0.8, fraction=0.046, pad=0.04,
label=r"$\mathrm{Wm}^{-2}$", orientation='horizontal', ticks=ticks)
cs.cmap.set_under("#053061")
cs.cmap.set_over("#67001f")
else:
cs = ax.contourf(ll, dataset.lat.values, np.mean(
val, axis=0), norm=norm, transform=ccrs.PlateCarree(), cmap='RdBu_r')
plt.colorbar(cs, shrink=0.8, fraction=0.046, pad=0.04, extendrect=True,
label=r"$\mathrm{Wm}^{-2}$", orientation='horizontal')
if title is not None:
plt.title(title, loc='center', fontsize=12)
if figname is not None:
plt.savefig(figname + ".pdf", bbox_inches='tight', transparent=True)
return cs
def plot_annual_data(dataset, data_var, dtype="ndarray", ax=None, ticks=None, colors=None, levels=None,title=None,figname=None):
'''Plot data on a Mollweide map using xarray DataArray or numpy array
Parameters:
----------
dataset: xarray Dataset
must contain dimensions of lat and lon
data_var: xarray DataArray or a numpy ndArray
has to have two dimensions
dtype: string
can be either an ndarray or an xarray. Default: ndarray
ticks: list
integer or float list of ticks for colorbar. Default: None
colors: list
list of colors as hexcodes in string.
Default is RdBu_r is colors if set to None
Use default None to ascertain the levels and colors
levels: list
list of levels for the map. Has to be same lenght as colors. For extend True,
change set_over and set_under colors to required colormap.
title: string
Default is None
figname: string
use to save figure with transparent background and tight borders. Deafult is None.
Returns:
--------
cs: plots the map
'''
if dtype == "ndarray":
vmax = np.max(data_var)
vmin = np.min(data_var)
var = data_var
else:
vmax = np.max(data_var.values)
vmin = np.min(data_var.values)
var = data_var.values
v_ext = np.max([np.abs(vmin), np.abs(vmax)])
norm = mcolors.TwoSlopeNorm(vmin=-v_ext, vmax=v_ext, vcenter=0)
cmap = cm.get_cmap('RdBu_r', 20)
ax.set_global()
ax.coastlines()
val, ll = add_cyclic_point(var, coord=data_var.lon.values)
if colors is not None:
cs = ax.contourf(ll, data_var.lat.values, val, norm=norm, transform=ccrs.PlateCarree(
), colors=colors, levels=levels, extend='both')
cs.cmap.set_under("#053061")
cs.cmap.set_over("#67001f")
else:
cs = ax.contourf(ll, data_var.lat.values, val, norm=norm,
transform=ccrs.PlateCarree(), cmap='RdBu_r')
if title is not None:
plt.title(title, loc='center', fontsize=12)
if figname is not None:
plt.savefig(figname + ".pdf", bbox_inches='tight', transparent=True)
return cs | true |
579f208b9bb7d812ca52d3ba1afecc420d9c5e80 | Python | ErikValle/controllable_image_synthesis | /utils/visualization.py | UTF-8 | 7,408 | 2.6875 | 3 | [
"MIT"
] | permissive | import numpy as np
import torch
import collections
from utils.depth_map_visualization import color_depth_map
from matplotlib import cm
def color_depth_maps(depth, depth_max=None, rescale=True):
depth = depth.detach().cpu().numpy()
depth = depth / 2.0 + 0.5
depth_max = 1.0
#if depth_max is None:
# depth_max = 0.05
# # inifinity = depth.max()
# # depth_max = depth[depth<inifinity].max()
if rescale:
depth = (depth - depth.min())/(depth.max()-depth.min())
colors = [color_depth_map(depth[i], depth_max)[None] for i in range(depth.shape[0])]
colors = np.concatenate(colors, axis=0)
colors = torch.from_numpy(colors).float().cuda()
colors = colors.permute(0,3,1,2) # NxHxWxC -> NxCxHxW
colors = colors.float() / 255.0
return colors, depth_max
def draw_box(img, mask, width=2, c=torch.tensor([0.75, 0., 0.])):
"""
Draw bounding box according to mask into image clone.
Args:
img (torch.FloatTensor or torch.ByteTensor): image in which to draw, CxHxW
mask (torch.BoolTensor): object mask, 1xHxW
width (int): width of the drawn box
c (torch.FloatTensor): RGB color of the box
Returns:
"""
if img.dtype == torch.uint8:
c = c * 255
c = c.type_as(img)
img = img.clone() # do not modify original
idcs = torch.nonzero(mask.squeeze(0))
if len(idcs) == 0:
return img
h0, w0 = idcs.min(dim=0)[0]
h1, w1 = idcs.max(dim=0)[0]
# ad buffer for frame
h0 = max(0, h0 - width)
w0 = max(0, w0 - width)
h1 = min(img.shape[-2] - 1, h1 + width)
w1 = min(img.shape[-1] - 1, w1 + width)
# impaint image
img[..., h0:h0 + width, w0:w1] = c.view(3, 1, 1).repeat(1, width, w1 - w0)
img[..., h1 - width + 1:h1 + 1, w0:w1] = c.view(3, 1, 1).repeat(1, width, w1 - w0)
img[..., h0:h1, w0:w0 + width] = c.view(3, 1, 1).repeat(1, h1 - h0, width)
img[..., h0:h1, w1 - width + 1:w1 + 1] = c.view(3, 1, 1).repeat(1, h1 - h0, width)
return img
def draw_box_batch(imgs, masks, width=2, c=torch.tensor([0.75, 0., 0.])):
return torch.stack([draw_box(img, mask, width, c) for img, mask in zip(imgs, masks)])
def wrap_images(imgs, colors):
"""Wrap images with color frame"""
assert(imgs.shape[1]==3 and len(colors)==3)
imgs[:,:,:2,:] = colors.view(1,3,1,1)
imgs[:,:,-2:,:] = colors.view(1,3,1,1)
imgs[:,:,:,:2] = colors.view(1,3,1,1)
imgs[:,:,:,-2:] = colors.view(1,3,1,1)
return imgs
def imdict_to_img(img_dict, n_per_row=8, n_rows=1):
"""Returns an image where each row corresponds to a key in the dictionary.
Values are expected to be of format BxCxHxW.
"""
od = collections.OrderedDict(sorted(img_dict.items()))
imgs = torch.stack(list(od.values()), dim=0) # AxBxCxHxW
# make the rows
imgs = torch.stack([imgs[:, i*n_per_row:(i+1)*n_per_row] for i in range(n_rows)], dim=0) # n_rows x A x n_per_row x CxHxW
imgs = imgs.flatten(0,2)
return imgs
def get_cmap(n_fg):
"""Generate a color map for visualizing foreground objects
Args:
n_fg (int): Number of foreground objects
Returns:
cmaps (numpy.ndarray): Colormap
"""
cmap = cm.get_cmap('Set1')
cmaps = []
for i in range(n_fg):
cmaps.append(np.asarray(cmap(i))[:3])
cmaps = np.vstack(cmaps)
return cmaps
def visualize_objects(x, generator):
"""Visualize primitives and colored alpha maps of objects."""
bg_color = 0.
bs = x['img'].shape[0] // generator.n_tf
obj_masks = x['obj_masks']
out = {}
def compose(rgb, alpha, depth):
img_fg = rgb * alpha
if generator.primitive_type != 'point':
img_bg = bg_color * torch.ones(bs * generator.n_tf, 3, *generator.imsize).type_as(img_fg)
img_fuse, _ = generator.alpha_composition(img_fg, img_bg, alpha, depth)
else:
img_fuse = torch.sum(img_fg.view(bs * generator.n_tf, generator.n_fg, 3, *generator.imsize), dim=1)
return img_fuse
# fused primitives
rgb = x['prim'][:, :3] / 2 + 0.5
alpha = (x['prim'][:, -2:-1] / 2 + 0.5) * obj_masks
depth = x['prim'][:, -1:] / 2 + 0.5
# (BxNxN_tf)x... -> (BxN_tfxN)x...
reshape = lambda x: x.view(bs, generator.n_fg, generator.n_tf, *x.shape[1:]).transpose(1, 2).flatten(0, 2)
rgb = reshape(rgb)
alpha = reshape(alpha)
depth = reshape(depth)
out['vis_prim'] = compose(rgb, alpha, depth) * 2 - 1
# colored fused alphas
cmap = torch.from_numpy(get_cmap(generator.n_fg)).float().to(rgb.device)
rgb = cmap.repeat(bs*generator.n_tf, 1).view(bs*generator.n_tf*generator.n_fg, 3, 1, 1)
alpha = (x['layers_alpha'] / 2 + 0.5) * obj_masks
# (BxNxN_tf)x... -> (BxN_tfxN)x...
alpha = reshape(alpha)
out['vis_layers_alpha'] = compose(rgb, alpha, depth) * 2 - 1
# visualize primitives multiplied by alpha
alpha = x['layers_alpha'] / 2 + 0.5
rgb = x['layers_rgb'] / 2 + 0.5
rgb = rgb * alpha
# wrap each primitive with colored frame
cmap = torch.from_numpy(get_cmap(generator.n_fg)).float().to(rgb.device)
# (BxNxN_tf)x... -> (BxN_tf)xNx...
rgb = reshape(rgb).view(-1, generator.n_fg, *rgb.shape[1:])
rgb = torch.cat([wrap_images(rgb[:, i], cmap[i]).unsqueeze(1) for i in range(rgb.shape[1])], 1)
# (BxN_tf)xN... -> (BxNxN_tf)x...
reshape = lambda x: x.view(bs, generator.n_tf, generator.n_fg, *x.shape[2:]).transpose(1, 2).flatten(0, 2)
rgb = reshape(rgb)
x['layers_rgb'] = rgb * 2 - 1
return out
def visualize_scenes(x, generator):
"""Split images into rgb and depth."""
keys = ['img', 'img_single']
if generator.bg_cube:
keys.append('img_bg')
out = {}
for k in keys:
v = x[k]
if v is None:
rgb = d = None
else:
rgb = x[k][:, :3]
d = x[k][:, -1:]
out[f'vis_{k}_rgb'] = rgb
out[f'vis_{k}_depth'] = d
return out
def colorize_primitives(x, generator):
"""rerender primitives with instance color for visualization"""
bs = x['img'].shape[0] // generator.n_tf
cmap = torch.from_numpy(get_cmap(generator.n_fg)).float().to(x['img'].device)
if isinstance(generator.renderer, RendererMesh):
renderer_orig = deepcopy(generator.renderer)
generator.renderer.pred_uv = False
generator.renderer.renderer.light_intensity_ambient = 0.6
generator.renderer.renderer.light_intensity_directional = 0.5
generator.renderer.renderer.light_color_ambient = torch.ones(generator.renderer.texture_channel, ).cuda()
generator.renderer.renderer.light_color_directional = torch.ones(generator.renderer.texture_channel, ).cuda()
# replace first three channels with color
x = x.copy() # do not modify original
def reshape(feature):
if generator.primitive_type == 'point':
return feature.view(bs, generator.n_fg, generator.renderer.n_pts, -1)
if generator.primitive_type == 'cuboid_sr':
return feature.view(bs, generator.n_fg, generator.renderer.texsize ** 2*6, -1)
if generator.primitive_type == 'cuboid' or generator.primitive_type == 'sphere':
return feature
raise ValueError('Unknown render type!')
feature = reshape(x['feature']) # BxN_objx...xC
ch_start = 0
if generator.primitive_type == 'point':
ch_start = 3
for idx in range(generator.n_fg):
feature[:, idx, ..., ch_start:ch_start+3] = cmap[idx]
x['feature'] = feature.view_as(x['feature'])
x = generator.render_primitives(x)
if isinstance(generator.renderer, RendererMesh): # reset renderer if necessary
generator.renderer = renderer_orig
return x['prim']
| true |
49fa2c702664a66effcb96d66491ef07b9476743 | Python | han963xiao/machine_learning_scripts | /entropogram/keras_vgg16.py | UTF-8 | 3,277 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python
"""
Use VGG16 to extract features from entropogram
"""
from load_data import get_coherence, get_spectrogram
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.models import Model
from keras.applications.vgg16 import preprocess_input
import numpy as np
import scipy
import matplotlib.pyplot as plt
def main():
model = VGG16(weights='imagenet', include_top=False)
print(model.input)
spectrogram, frequency, time = get_spectrogram()
spectrogram = spectrogram[:, ::5]
ntimes, nfreqs = spectrogram.shape
data = np.array([spectrogram, spectrogram, spectrogram])
data = data.reshape(3, ntimes, nfreqs, 1)
data = data.transpose((3, 1, 2, 0))
data = preprocess_input(data)
# model.predict(data)
feature_maps = []
for ilayer, layer in enumerate(model.layers):
feature_model = Model(model.input, layer.output)
print(layer.name)
if 'conv' in layer.name:
fmaps_layer = feature_model.predict(data)
nsamples, width, height, nchannels = fmaps_layer.shape
for ifeature in range(nchannels):
fmap = fmaps_layer[0, :, :, ifeature]
upscaled = scipy.misc.imresize(fmap, size=(ntimes, nfreqs),
mode="F", interp='bilinear')
feature_maps.append(upscaled)
feature_maps = np.array(feature_maps)
nfeatures, ntimes, nfreqs = feature_maps.shape
plt.figure()
plt.plot(np.mean(feature_maps, axis=(1, 2)))
# kmeans parameters
from sklearn.cluster import KMeans
n_clusters = 10
predictor = KMeans(init='k-means++', n_clusters=n_clusters, n_init=5)
predictor.fit(feature_maps.reshape(nfeatures, ntimes*nfreqs).T)
fig, ax = plt.subplots()
ax.imshow(predictor.labels_.reshape(ntimes, nfreqs).T, aspect='auto',
origin='lower')
plt.show()
def plot_convlayer(layer_weights, layer_output):
filter_weights, filter_biases = layer_weights
size1, size2, chan1, chan2 = filter_weights.shape
print(filter_weights.shape)
print(layer_output.shape)
fig = plt.figure()
fig.suptitle('conv2d layer')
ax_old = None
for ichan2 in range(chan2):
for ichan1 in range(chan1):
ax = plt.subplot2grid((chan2, chan1+5), (ichan2, ichan1),
colspan=1)
ax.imshow(filter_weights[:, :, ichan1, ichan2])
ax = plt.subplot2grid((chan2, chan1+5), (ichan2, chan1), colspan=5,
sharex=ax_old, sharey=ax_old)
ax.imshow(layer_output[0, :, :, ichan2].T,
aspect='auto', origin='lower')
ax_old = ax
def plot_denselayer(x_shape, layer_weights, layer_output):
filter_weights, filter_biases = layer_weights
print(filter_weights.shape)
print(layer_output.shape)
_, npixels, ndense = layer_output.shape
fig, axes = plt.subplots(ndense, 1, sharey=True, sharex=True)
if ndense == 1:
axes = [axes]
fig.suptitle('dense layer')
for iax, ax in enumerate(axes):
ax.imshow(layer_output[:, :, iax].reshape(x_shape).T,
aspect='auto', origin='lower')
if __name__ == "__main__":
main()
| true |
bd2e0a83ac858a3f0ffb54a55a8d6093f68879da | Python | timebusker/timebusker.github.io | /img/top-photo/0python.py | UTF-8 | 712 | 2.609375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# @Author: HUAWEI
# @Date: 2022-04-24 20:37:53
# @Last Modified by: HUAWEI
# @Last Modified time: 2022-06-02 17:55:42
import os
import random
path='E:\\timebusker\\timebusker.github.io\\img\\top-photo'
#่ทๅ่ฏฅ็ฎๅฝไธๆๆๆไปถ๏ผๅญๅ
ฅๅ่กจไธญ
lists=os.listdir(path)
for file in lists:
if file.endswith('.jpg'):
print(file)
old=path+ os.sep + file
new=path+ os.sep + str(random.randint(0,999)) + file
os.rename(old,new)
lists=os.listdir(path)
n=1
for file in lists:
if file.endswith('.jpg'):
print(file)
old=path+ os.sep + file
new=path+ os.sep + str(n) + '.jpg'
os.rename(old,new)
n+=1
| true |
f58aff95e393f988f727816cd426d82a0c758aba | Python | RobertFriebe/dsnd_disaster_response_app | /models/train_classifier.py | UTF-8 | 7,752 | 3.21875 | 3 | [
"CC-BY-4.0"
] | permissive | import sys
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
import re
import time
import pickle
import nltk
nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger', 'stopwords'])
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.multioutput import MultiOutputClassifier
from sklearn.metrics import f1_score, accuracy_score, recall_score, precision_score
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
def load_data(database_filepath):
'''Loads the dataset from the database and outputs
response and predictor dataframes as well as category names list
Args
database filepath
Returns
- X: message text dataframe
- Y: categories dataframe
- category_names: list of 36 category names
'''
engine = create_engine('sqlite:///' + database_filepath)
df = pd.read_sql_table('disaster_responses', engine)
X = df.message.values
Y = df.iloc[:, 4:]
category_names = Y.columns
return X, Y, category_names
def tokenize(text):
'''
For every message make the following tokenizing steps
- removes all special characters
- creates tokens out of a string
- removes the stopwords (english)
- lemmatizes the tokes to its root form,
lowers all characters and
removes leading and training spaces
- appends all clean tokens back to a list clean_tokens
Args
text message
Returns
clean_tokes: as a list
'''
text = re.sub(r"[^a-zA-Z0-9]", " ", text)
tokens = word_tokenize(text)
tokens = [w for w in tokens if not w in stopwords.words("english")]
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
class TextLengthExtractor(BaseEstimator, TransformerMixin):
'''Custom transformer that counts the number of characters in a message
Args
BaseEstimator, TransformerMixing
Returns
X_len: dataframe with the length of every message as integer
'''
def fit(self, X, y=None):
return self
def transform(self, X):
length_checker = np.vectorize(len)
X_len = length_checker(X)
return pd.DataFrame(X_len)
def build_model():
'''Model Pipeline
Takes as features the Tfidf-Matrix of the messages
and the text length of every messages.
It instanciates a Random Forest Classifier which
makes predictions for the 36 classes
Args:
None
Returns
model
'''
model = Pipeline([
('features', FeatureUnion([
('nlp_pipeline', Pipeline([
('vect', CountVectorizer(tokenizer=tokenize,
max_df = 0.75,
max_features = 5000,
ngram_range = (1,2))),
('tfidf', TfidfTransformer())
])),
('text_len', TextLengthExtractor())
])),
('clf_multi', MultiOutputClassifier(RandomForestClassifier(min_samples_leaf = 1,
min_samples_split = 6,
n_estimators = 100,
max_features = 'auto')))
])
return model
def evaluate_model(model, X_test, Y_test, category_names):
'''Returns Classification Scores between 1 and 0.
The score reaches its best value at 1 and worst
score at 0.
It creates a scores dictionary for the categories
and the corresponding evaluation measures (accuracy,
precision, recall, f1)
Args:
- model: Takes the model
- X_test: Takes the predictor test data
- Y_test: Takes the response test data
Returns:
A data frame with the evaluations measures for each category
- Accuracy-Score
- Precision-Score
- Recall-Score
- F1-Score
Accuracy: is the fraction of predictions the model made
a correct prediction.
Precision: is the ratio tp / (tp + fp) where tp is the
number of true positives and fp the number of false
positives. The precision is intuitively the ability
of the classifier not to label as positive a sample
that is negative.
Recall: is the ratio tp / (tp + fn) where tp is the number
of true positives and fn the number of false negatives.
The recall is intuitively the ability of the classifier
to find all the positive samples.
F1-Score: can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches
its best value at 1 and worst score at 0.
'''
scores = {'category' : [],
'Accuracy' : [],
'Precision' : [],
'Recall' : [],
'F1-Score' : []}
Y_pred = pd.DataFrame(model.predict(X_test), columns = category_names)
for col in Y_test.columns:
scores['category'].append(col)
scores['Accuracy'].append(accuracy_score(Y_test[col], Y_pred[col]))
scores['Precision'].append(precision_score(Y_test[col], Y_pred[col]))
scores['Recall'].append(recall_score(Y_test[col], Y_pred[col]))
scores['F1-Score'].append(f1_score(Y_test[col], Y_pred[col]))
scores = pd.DataFrame(scores).set_index('category')
print(scores)
def save_model(model, model_filepath):
'''Saves the model as a pickle file
Args
model
filepath for pickle file
Returns
None
'''
pickle.dump(model, open(model_filepath, 'wb'))
def main():
'''Loads the training and testing data, builds the model, fits the model and prints
a classification report, saves the model as a pickle file
Args
None
Returns
None
'''
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main() | true |
e9376bf6f55c3ece3e2918598001d7d96d83f6b9 | Python | shashidhar0508/python-training | /set2/regular-exp/functions-re.py | UTF-8 | 827 | 3.46875 | 3 | [] | no_license | import re
# match()
m = re.match("abc", "abcabdefg")
if m != None:
print("Match is available at the beginning of the String")
print("Start Index:", m.start(), "and End Index:", m.end())
else:
print("not available")
# fullmatch()
fm = re.fullmatch('abcabdefg', "abcabdefg")
print("Full string matched" if fm != None else "String not matched")
# search()
sm = re.search("aaa", "abaaaba")
print("search is available " if sm != None else "search not matched")
# findall()
fa = re.findall("[0-9]", "a7b9c5kz")
print("findall : ", fa)
# sub()
s = re.sub("[a-z]", "#", "a7b9c5k8z")
print(s)
# subn()
t = re.subn("[a-z]", "#", "a7b9c5k8z")
print(t)
print("The Result String:", t[0])
print("The number of replacements:", t[1])
# spllit()
l = re.split(",", "java,python,angular,react,django")
for t in l:
print(t)
| true |
5ba0b4f647f2c64032240f9047a71bf30b161955 | Python | swarnabarathik/Python | /Practice/pass.py | UTF-8 | 100 | 3.21875 | 3 | [] | no_license | x="John"
if x=="John":
print("Name:",x)
elif x=="Joy":
pass
else:
print("Im else")
| true |
8101b96ac162bbc8a57d8198311f4ce88e775db4 | Python | VinceMaes123/Thesis-algorithms | /HMC/alanine-dipeptide/HMC_userDefinedExpressionsAlanineDipeptide.py | UTF-8 | 5,025 | 2.78125 | 3 | [] | no_license | #This file contains all distributions, potentials, gradients... that the user of the HMC method should provide
#Alanine-dipeptide (united atom description without hydrogen, oxygen molecules and the central subgroup with a carbon)
#has 13 degrees of freedom: 4 C-N bond lengths, 2 C-C bond lengths, 2 torsion angles, 2 C-N-C bond angles and
#3 C-C-N bond angles.
#!!! We only model the main chain of the alanine-dipeptide molecule
#Haal cos,sin,exp... uit autograd.numpy als je autograd op je functie wilt gebruiken, want hierop is autograd gebaseerd!
#als je vb math.cos(..) of scipy.cos(..) gebruikt herkent autograd.grad(func) dit niet!
from autograd import grad
import autograd.numpy as np
import math
import matplotlib.pyplot as plt
import scipy
from scipy import integrate
#x_micro = q in the HMC method!!!
########################################################################################################################
################################################# ALANINE_DIPEPTIDE ####################################################
def V(q):
beta = 1.0/100.0 #inverse temperature
kCC = 1.17e6 #stiffness C-C bond
rCC = 1.515 #equilibrium C-C bond length
kCN = 1.147e6 #stiffness C-N bond
rCN = 1.335
kCCN = 2.68e5 #stiffness C-C-N bond angle
thetaCCN = (113.9 / 360.0) * 2.0 * math.pi # equilibrium C-C-N bond angle
kCNC = 1.84e5 #stiffness C-N-C bond angle
thetaCNC = (117.6 / 360.0) * 2.0 * math.pi # equilibrium C-N-C bond angle
kphi = 3.98e4 #stiffness torsion angle 1
kpsi = 2.93e3 #stiffness torsion angle 2 (SLOWEST COMPONENT!)
E1 = 0.5*kCC*(q[0]-rCC)**2 # C-C bond energy 1
E2 = 0.5*kCC*(q[1]-rCC)**2 # C-C bond energy 2
E3 = 0.5*kCN*(q[2]-rCN)**2 # C-N bond energy 1
E4 = 0.5*kCN*(q[3]-rCN)**2 # C-N bond energy 2
E5 = 0.5*kCN*(q[4]-rCN)**2 # C-N bond energy 3
E6 = 0.5*kCN*(q[5]-rCN)**2 # C-N bond energy 4
E7 = 0.5*kCCN*(q[6]-thetaCCN)**2 # C-C-N bond angle energy 1
E8 = 0.5*kCCN*(q[7]-thetaCCN)**2 # C-C-N bond angle energy 2
E9 = 0.5*kCCN*(q[8]-thetaCCN)**2 # C-C-N bond angle energy 3
E10 = 0.5*kCNC*(q[9]-thetaCNC)**2 # C-N-C bond angle energy 1
E11 = 0.5*kCNC*(q[10]-thetaCNC)**2 # C-N-C bond angle energy 2
E12 = kphi*(1+np.cos(q[11]+math.pi)) # Torsion angle energy 1
E13 = kpsi*(1+np.cos(q[12]+math.pi)) # Torsion angle energy 2
return beta*(E1+E2+E3+E4+E5+E6+E7+E8+E9+E10+E11+E12+E13)
def gradV(q):
dV = grad(V)
result = dV(q)
return result
#Exact expressions for reaction coordinate
#A_hat EXACT (= macro potential A)
def A_hat(eta):
beta = 1.0/100.0 #inverse temperature
kpsi = 2.93e3 #stiffness torsion angle 2 (SLOWEST COMPONENT!)
A = beta*(kpsi*(1+np.cos(eta+math.pi))) #Torsion angle energy 2
return A
def mu_0(eta):
return math.exp(-A_hat(eta))
#############################################################
#other densities we want to plot
#Plotting function
def makePlot(psi,xminPsi,xmaxPsi,func,plotTitleString):
scaling_Psi = integrate.quad(func, xminPsi, xmaxPsi)
scaling_Psi = scaling_Psi[0]
x_coordinate_Psi = scipy.linspace(xminPsi, xmaxPsi, 1000)
y_coordinate_Psi = scipy.zeros(scipy.size(x_coordinate_Psi))
for index in range(0, len(x_coordinate_Psi)):
y_coordinate_Psi[index] = (1.0 / scaling_Psi) * func(x_coordinate_Psi[index])
plt.figure()
plt.hist(psi, bins=1000, range=[xminPsi, xmaxPsi], density=1) # histogram with 1000 bins
plt.plot(x_coordinate_Psi, y_coordinate_Psi)
plt.xlabel(plotTitleString)
plt.show()
return 0
#marginal density of the phi torsion angle
def phi_0(eta):
beta = 1.0 / 100.0 # inverse temperature
kphi = 3.98e4
A = kphi * (1 + np.cos(eta + math.pi))
return math.exp(-beta * A)
#marginal density of a C-C bond length
def CC_0(eta):
beta = 1.0 / 100.0 # inverse temperature
kCC = 1.17e6 # stiffness C-C bond
rCC = 1.515 # equilibrium C-C bond length
A = 0.5 * kCC * (eta - rCC) ** 2
return math.exp(-beta * A)
#marginal density of a C-N bond length
def CN_0(eta):
beta = 1.0 / 100.0 # inverse temperature
kCN = 1.147e6 # stiffness C-N bond
rCN = 1.335 # equilibrium C-N bond length
A = 0.5 * kCN * (eta - rCN) ** 2
return math.exp(-beta * A)
#marginal density of a C-C-N bond angle
def CCN_0(eta):
beta = 1.0 / 100.0 # inverse temperature
kCCN = 2.68e5 # stiffness C-C-N bond angle
thetaCCN = (113.9 / 360.0) * 2.0 * math.pi # equilibrium C-C-N bond angle
A = 0.5 * kCCN * (eta - thetaCCN) ** 2
return math.exp(-beta * A)
#marginal density of a C-N-C bond angle
def CNC_0(eta):
beta = 1.0 / 100.0 # inverse temperature
kCNC = 1.84e5 # stiffness C-N-C bond angle
thetaCNC = (117.6 / 360.0) * 2.0 * math.pi # equilibrium C-N-C bond angle
A = 0.5 * kCNC * (eta - thetaCNC) ** 2
return math.exp(-beta * A)
| true |
c14397601c9d476107cc5a7cc044813ba2e037af | Python | ArelyL/Sorts | /WordCocktailSort.py | UTF-8 | 704 | 3.265625 | 3 | [] | no_license | #!/env/bin/python
import random
def BubbleSortWords(Lista):
k=0
m=0
n=len(Lista)
q=0
for i in range(0,n):
for j in range(i, n-i-1):
if Lista[j+1]<Lista[j]:
Lista[j+1],Lista[j]=Lista[j],Lista[j+1]
m=1
k+=1
for r in range(n-2-i,i):
if Lista[r]<Lista[r-1]:
Lista[r],Lista[r-1]=Lista[r-1],Lista[r]
m=1
k+=1
if m==0: break
m=0;
print(k)
print(Lista)
if __name__=='__main__':
Lista=[]
archivo=open("Palabras.txt","r")
Lista=archivo.readlines()
BubbleSortWords(Lista)
| true |
d3f5fafbf34b9e5ce2f73686bd245460190d2dc5 | Python | JennyIkaMaharani/Python-Projects-Protek | /Praktikum08/pythonProject8.py | UTF-8 | 269 | 3.625 | 4 | [] | no_license | #membuat program menghitung rata-rata harga buah
buah = {'apel' : 5000,
'jeruk' : 8500,
'mangga' : 7800,
'duku' : 6500}
harga = list(buah.values()) #ini membuat list harga dari value
#menghitung rata rata
rataRata= sum(harga) / len(harga)
print(rataRata) | true |
01a60f42e7739682347c1e3681032cbbc85c3c11 | Python | GuangyuZheng/leet_code_python | /82_Remove_Duplicates_from_Sorted_List_II.py | UTF-8 | 1,761 | 3.5625 | 4 | [] | no_license | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
if head is None or head.next is None:
return head
dummy = ListNode(0)
dummy.next = head
prev = dummy
val = prev.next.val
node = head.next
while node is not None:
if node.val != val:
prev = prev.next
if prev.next is None:
break
val = prev.next.val
node = node.next
else:
t_node = prev.next
while t_node is not None:
if t_node.val == val:
t_node = t_node.next
else:
break
prev.next = t_node
if t_node is None:
break
val = t_node.val
node = t_node.next
return dummy.next
# cleaner code version
class SolutionV2:
def deleteDuplicates(self, head: ListNode) -> ListNode:
if head is None or head.next is None:
return head
dummy = ListNode(0)
dummy.next = head
current = dummy
while current.next is not None and current.next.next is not None:
if current.next.val == current.next.next.val:
t_node = current.next
while t_node is not None and t_node.next is not None and (t_node.val == t_node.next.val):
t_node = t_node.next
current.next = t_node.next
else:
current = current.next
return dummy.next
| true |
4b0b534a96a470b7cba693ad1bb6d6e0b205ae8f | Python | leanbarrios/ptrFog | /grafos.py | UTF-8 | 2,246 | 3.4375 | 3 | [] | no_license | class Grafo(object):
def __init__(self):
self.relaciones = {}
def __str__(self):
return str(self.relaciones)
def agregar(grafo, elemento):
grafo.relaciones.update({elemento:[]})
def relacionar(grafo, elemento1, elemento2):
relacionarUnilateral(grafo, elemento1, elemento2)
relacionarUnilateral(grafo, elemento2, elemento1)
def relacionarUnilateral(grafo, origen, destino):
grafo.relaciones[origen].append(destino)
buenosAires = "BuenosAires"
sanPedro = "San Pedro"
rosario = "Rosario"
cordoba = "Cordoba"
villaMaria = "Villa Maria"
sanLuis = "San Luis"
mendoza = "Mendoza"
bahiaBlancha = "Bahia Blanca"
grafo = Grafo()
agregar(grafo, buenosAires)
agregar(grafo, sanLuis)
agregar(grafo, sanPedro)
agregar(grafo, rosario)
agregar(grafo, cordoba)
agregar(grafo, villaMaria)
agregar(grafo, bahiaBlanca)
agregar(grafo, mendoza)
relacionar(grafo, buenosAires, sanPedro)
relacionar(grafo, buenosAires, sanLuis)
relacionar(grafo, buenosAires, bahiaBlanca)
relacionar(grafo, buenosAires, sanLuis)
relacionar(grafo, sanLuis, mendoza)
relacionar(grafo, sanLuis, villaMaria)
relacionar(grafo, sanLuis, bahiaBlanca)
relacionar(grafo, villaMaria, cordoba)
relacionar(grafo, villaMaria, rosario)
relacionar(grafo, rosario, sanPedro)
def profundidadPrimero(grafo, elementoInicial, funcion, elementosRecorridos = []):
if elementoInicial in elementosRecorridos:
return
funcion(elementoInicial)
elementosRecorridos.append(elementoInicial)
for vecino in grafo.relaciones[elementoInicial]:
profundidadPrimero(grafo, vecino, funcion, elementosRecorridos)
def imprimir (elemento):
print elemento
profundidadPrimero(grafo, buenosAires, imprimir)
def anchoPrimero(grafo, elementoInicial, funcion, cola = deque(), elementosRecorridos = []):
if not elementoInicial in elementosRecorridos:
funcion(elementoInicial)
elementosRecorridos.append(elementoInicial)
if(len(grafo.relaciones[elementoInicial]) > 0):
cola.extend(grafo.relaciones[elementoInicial])
if len(cola) != 0 :
anchoPrimero(grafo, cola.popleft(), funcion, cola, elementosRecorridos)
anchoPrimero(grafo, buenosAires, imprimir)
| true |
6a885c428aa2330b54192e695122c7c65d23378c | Python | davidxbuck/adventofcode | /2016/src/Advent2016_10.py | UTF-8 | 3,114 | 3.25 | 3 | [
"MIT"
] | permissive | # Advent of Code 2016
#
# From https://adventofcode.com/2016/day/10
import re
from collections import defaultdict
from math import prod
debug = False
data = [list(map(int, re.findall(r'\d+', row.strip()))) for row in open('../inputs/Advent2016_10.txt', 'r')]
botput = [re.findall(r'(bot|output)', row.strip()) for row in open('../inputs/Advent2016_10.txt', 'r')]
class Bot:
def __init__(self, id_):
self.id_ = id_
self.chips = []
self.queue = []
@property
def low(self):
return min(self.chips)
@property
def high(self):
return max(self.chips)
@property
def answer(self):
return all(x in self.chips for x in [17, 61])
@property
def full(self):
return len(self.chips) == 2
@property
def pending(self):
return len(self.queue) > 0
def pend(self, low, high):
self.queue.append([low, high])
def receive(self, val):
self.chips.append(val)
def execute(self):
if self.pending and self.full:
lo_bot, hi_bot = self.queue.pop()
return_vals = [lo_bot, self.low, hi_bot, self.high]
return return_vals
class Factory:
def __init__(self):
self.bots = defaultdict(Bot)
self.answered = False
def val_bot(self, id_):
if isinstance(id_, int):
id_ = [id_]
for bot in id_:
if bot not in self.bots:
self.bots[bot] = Bot(bot)
def give(self, bot, val):
if debug:
print(f"Instruction given to bot {bot} to receive {val}")
self.val_bot(bot)
self.bots[bot].receive(val)
if self.bots[bot].answer:
if not self.answered:
print(f"AoC 2016 Day 10, Part 1 answer is {bot}")
self.answered = True
self.execute(bot)
def instruct(self, bot, to_low, to_high):
if debug:
print(f"Instruction given to bot {bot} to move to {to_low}, {to_high}")
self.bots[bot].pend(to_low, to_high)
self.execute(bot)
def execute(self, bot):
if self.bots[bot].full and self.bots[bot].pending:
lo_bot, low, hi_bot, high = self.bots[bot].execute()
if debug:
print(f"Bot {bot} moving {low} to {lo_bot} and {high} to {hi_bot}")
self.give(lo_bot, low)
self.give(hi_bot, high)
if self.bots[bot].full and self.bots[bot].pending:
self.execute(bot)
factory = Factory()
for ix, values in enumerate(data):
if len(values) == 2:
val, bot = values
if botput[ix][0] == 'output':
bot = bot + 1000
factory.val_bot(bot)
factory.give(bot, val)
elif len(values) == 3:
bot, hi, low = values
if botput[ix][1] == 'output':
hi = hi + 1000
if botput[ix][2] == 'output':
low = low + 1000
factory.val_bot([bot, hi, low])
factory.instruct(bot, hi, low)
print(f"AoC 2016 Day 10, Part 2 answer is {prod(factory.bots[bot].chips[0] for bot in range(1000, 1003))}")
| true |
c549ff21c00af61aab8134b8a8af30fc3dba9548 | Python | raubana/Pancake_Script_Tests | /V6/pancake/compiler/op_finder.py | UTF-8 | 2,072 | 3.265625 | 3 | [] | no_license | """
The purpose of the op finder is to go through each token and find tokens of type TYPE_OTHER and attempt to
find a matching operator. If one of that symbol doesn't exist, an error is raised.
"""
import ops
from tokenizer import Token
from constants import *
from error_format import error_format
OPERATORS = ops.OPERATORS
class Op_Finder(object):
@staticmethod
def find_matching_op(symbol, num_operands=None):
for op in OPERATORS:
if op.symbol == symbol and (num_operands == None or op.num_operands == num_operands):
return op
@staticmethod
def process(tokenlist, skip_unmatched):
#First thing we do is break up grouped ops.
i = 0
while i < len(tokenlist.tokens):
token = tokenlist.tokens[i]
if token.type == TYPE_OTHER:
match = Op_Finder.find_matching_op(token.value)
if not match and len(token.value) >= 2:
tokenlist.tokens.insert(i + 1, Token(TYPE_OTHER, token.value[1:], token.line_number, token.char_number + 1, token.length-1))
token.value = token.value[0]
token.length = 1
i += 1
# Then we parse all of the ops.
i = 0
while i < len(tokenlist.tokens):
token = tokenlist.tokens[i]
if token.type == TYPE_OTHER:
to_left = None
to_right = None
if i-1 >= 0:
to_left = tokenlist.tokens[i-1]
if i+1 < len(tokenlist.tokens):
to_right = tokenlist.tokens[i+1]
num_operands = 0
if to_left and to_left.type in LITERAL_TYPES+(TYPE_TERM, TYPE_BLOCK_END):
num_operands += 1
if to_right and to_right.type in LITERAL_TYPES+(TYPE_TERM, TYPE_BLOCK_START, TYPE_FUNCTION):
num_operands += 1
match = Op_Finder.find_matching_op(token.value, num_operands)
if not match:
match = Op_Finder.find_matching_op(token.value)
if match:
token.type = TYPE_OPERATOR
token.value = match
else:
if not skip_unmatched:
token.type = TYPE_NULL
error_format(token,"{op} is not a recognized operator.".format(op=token.value))
i += 1
def process(tokenlist, skip_unmatched = False):
Op_Finder.process(tokenlist, skip_unmatched)
| true |
e852a3f1a7863aa6b9da5cf8377aa0239187ffaf | Python | cloudmesh-community/book | /examples/python/kmeans-mapreduce/parallel_kmeans.py | UTF-8 | 13,939 | 3.078125 | 3 | [] | no_license | '''This file has code to perform kmeans in a parallel fashion. If the Parallelism parameters is set = 2 it k-means is parallelized If it is set to 1 it is not. Here Parallelism is set to 2'''
from scipy.cluster.vq import vq
import numpy as np
import matplotlib.pyplot as plt
def kmeans_gcf(obs, NumClust, iter=20, thresh=1e-5, Parallelism = 1, MaxMean = 1):
if int(iter) < 1:
raise ValueError('iter must be at least 1.')
#initialize best distance value to a large value
best_dist = np.inf
if NumClust < 1:
raise ValueError("Asked for 0 cluster ? ")
for i in range(iter):
#the intial code book is randomly selected from observations
book, distortavg, distortmax = raw_kmeans_gcf(obs, NumClust, thresh, Parallelism)
dist = distortavg
if MaxMean == 2:
dist = distortmax
if dist < best_dist:
best_book = book
best_dist = dist
return best_book, best_dist
def raw_kmeans_gcf(obs, NumClust, thresh=1e-5, Parallelism = 1):
""" "raw" version of k-means.
Returns
-------
code_book :
the lowest distortion codebook found.
avg_dist :
the average distance a observation is from a code in the book.
Lower means the code_book matches the data better.
See Also
--------
kmeans : wrapper around k-means
XXX should have an axis variable here.
Examples
--------
Note: not whitened in this example.
>>> from numpy import array
>>> from scipy.cluster.vq import _kmeans
>>> features = array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 1.0,1.0]])
>>> book = array((features[0],features[2]))
>>> _kmeans(features,book)
(array([[ 1.7 , 2.4 ],
[ 0.73333333, 1.13333333]]), 0.40563916697728591)
"""
# Initialize Code Book
No = obs.shape[0]
code_book = np.take(obs, np.random.randint(0, No, NumClust), 0)
# obs is data; No is Number of Datapoints gotten from size of obs; NumClust is number of clusters desired
# randinit(I1, I2, Num) calculates Num random integers r I1 <= r < I2
# take returns an array selected from obs with 0'th index (lat argument specifies dimension) given in list of indices returned by randint
#
Iseven = np.empty([tot], dtype=bool)
for i in np.arange(tot):
Iseven[i] = (i%2 == 0);
obs1 = np.compress(Iseven, obs, 0)
obs2 = np.compress(np.logical_not(Iseven), obs, 0)
avg_dist = []
diff = thresh+1.
while diff > thresh:
#
if Parallelism == 1:
code_book, NumPointsinClusters, distortsum, distortmax, NumPoints = Kmeans_map(obs, code_book)
if Parallelism == 2:
# Can be Parallel Map Operations
code_book1, NumPointsinClusters1, distortsum1, distortmax1, NumPoints1 = Kmeans_map(obs1, code_book)
code_book2, NumPointsinClusters2, distortsum2, distortmax2, NumPoints2 = Kmeans_map(obs2, code_book)
#
# Following are 4 Reduction Operations
# Note maps include local reductions
code_book = np.add( code_book1, code_book2)
NumPointsinClusters = np.add( NumPointsinClusters1, NumPointsinClusters2)
distortsum = distortsum1 + distortsum2
distortmax = np.maximum(distortmax1, distortmax2)
NumPoints = NumPoints1 + NumPoints2
#
code_book = np.compress(np.greater(NumPointsinClusters, 0), code_book, 0)
# remove code_books that didn't have any members
#
j = 0
nc = code_book.shape[0]
for i in np.arange(nc):
if NumPointsinClusters[i] > 0:
code_book[j,:] = code_book[j,:] / NumPointsinClusters[i]
j = j + 1
#
# Calculate mean discrepancy
distortavg = distortsum/NumPoints
avg_dist.append(distortavg)
if len(avg_dist) > 1:
diff = avg_dist[-2] - avg_dist[-1]
# Change in average discrepancy
# Can also test on average discrepancy itself
#
return code_book, distortavg, distortmax
# Return Centroid array and final average discrepancy
#
# Execute Kmeans map functions in parallel
# No test on cluster count as this must be summed over maps
def Kmeans_map(obs, code_book):
No = obs.shape[0]
nc = code_book.shape[0]
# nc is current number of clusters (may decrease if zero clusters last iteration)
#
#compute membership and distances between obs and code_book
obs_code, distort = vq(obs, code_book)
distortsum = np.sum(distort)
distortmax = np.amax(distort)
#
# vq returns an indexing array obs_code mapping rows of obs (the points) to code_book (the centroids)
# distort is an array of length No that has difference between observation and chosen centroid
# vq stands for vector quantization and is provided in SciPy
#
VectorDimension = obs.shape[1]
NewCode_Book = np.zeros([nc, VectorDimension])
NumPointsinClusters = np.zeros([nc])
for i in np.arange(nc):
# Loop over clusters labelled with i
cell_members = np.compress(np.equal(obs_code, i), obs, 0)
NumPointsinClusters[i] = cell_members.shape[0]
# Extract Points in this Cluster; extract points whose quantization label is i
#
NewCode_Book[i] = np.sum(cell_members, 0)
# Calculate centroid of i'th cluster
return NewCode_Book, NumPointsinClusters, distortsum, distortmax, No
Radii = np.array([ 0.375, 0.55, 0.6, 0.25 ])
# Set these values
# SciPy default Thresh = 1.0E-5 Parallelism = 2 MaxMean = 1 NumIterations = 20
Thresh = 1.0E-5
Parallelism = 2
MaxMean = 1
NumIterations = 1
nClusters = 4
nRepeat = 250
tot = nClusters*nRepeat
Centers1 = np.tile([0,0], (nRepeat,1))
Centers2 = np.tile([3,3], (nRepeat,1))
Centers3 = np.tile([0,3], (nRepeat,1))
Centers4 = np.tile([3,0], (nRepeat,1))
Centers = np.concatenate((Centers1, Centers2, Centers3, Centers4))
xvalues1 = np.tile(Radii[0], nRepeat)
xvalues2 = np.tile(Radii[1], nRepeat)
xvalues3 = np.tile(Radii[2], nRepeat)
xvalues4 = np.tile(Radii[3], nRepeat)
Totradii = np.concatenate((xvalues1, xvalues2, xvalues3, xvalues4))
xrandom = np.random.randn(tot)
xrange = xrandom * Totradii
yrandom = np.random.randn(tot)
yrange = yrandom * Totradii
Points = np.column_stack((xrange, yrange))
data = Points + Centers
# computing K-Means with K = 2 (2 clusters)
centroids,error = kmeans_gcf(data,2, NumIterations, Thresh, Parallelism, MaxMean)
# assign each sample to a cluster
idx,_ = vq(data,centroids)
# some plt.plotting using numpy's logical indexing
plt.figure("Clustering K=2 Large Radius Kmeans parallel {0} MaxMean {1} Iter {2}".format(Parallelism, MaxMean, NumIterations))
plt.title("K=2 Kmeans parallel {0} MaxMean {1} Iter {2} Distort {3:5.3f}".format(Parallelism, MaxMean, NumIterations, error))
plt.plot(data[idx==0,0],data[idx==0,1],'ob',
data[idx==1,0],data[idx==1,1],'or')
plt.plot(centroids[:,0],centroids[:,1],'sg',markersize=8)
plt.show()
# computing K-Means with K = 4 (4 clusters)
centroids4,error = kmeans_gcf(data,4, NumIterations, Thresh, Parallelism, MaxMean)
# assign each sample to a cluster
idx4,_ = vq(data,centroids4)
# some plt.plotting using numpy's logical indexing
plt.figure("Clustering K=4 Large Radius Kmeans parallel {0} MaxMean {1} Iter {2}".format(Parallelism, MaxMean, NumIterations))
plt.title("K=4 Kmeans parallel {0} MaxMean {1} Iter {2} Distort {3:5.3f}".format(Parallelism, MaxMean, NumIterations, error))
plt.plot(data[idx4==0,0],data[idx4==0,1],marker='o',markerfacecolor='blue', ls ='none')
plt.plot(data[idx4==1,0],data[idx4==1,1],marker='o',markerfacecolor='red', ls ='none')
plt.plot(data[idx4==2,0],data[idx4==2,1],marker='o',markerfacecolor='orange', ls ='none')
plt.plot(data[idx4==3,0],data[idx4==3,1],marker='o',markerfacecolor='purple', ls ='none')
plt.plot(centroids4[:,0],centroids4[:,1],'sg',markersize=8)
plt.show()
# computing K-Means with K = 6 (6 clusters)
centroids,error = kmeans_gcf(data,6, NumIterations, Thresh, Parallelism, MaxMean)
# assign each sample to a cluster
idx,_ = vq(data,centroids)
# some plt.plotting using numpy's logical indexing
plt.figure("Clustering K=6 Large Radius Kmeans parallel {0} MaxMean {1} Iter {2}".format(Parallelism, MaxMean, NumIterations))
plt.title("K=6 Kmeans parallel {0} MaxMean {1} Iter {2} Distort {3:5.3f}".format(Parallelism, MaxMean, NumIterations, error))
plt.plot(data[idx==0,0],data[idx==0,1],marker='o',markerfacecolor='blue', ls ='none')
plt.plot(data[idx==1,0],data[idx==1,1],marker='o',markerfacecolor='red', ls ='none')
plt.plot(data[idx==2,0],data[idx==2,1],marker='o',markerfacecolor='orange', ls ='none')
plt.plot(data[idx==3,0],data[idx==3,1],marker='o',markerfacecolor='purple', ls ='none')
plt.plot(data[idx==4,0],data[idx==4,1],marker='o',markerfacecolor='green', ls ='none')
plt.plot(data[idx==5,0],data[idx==5,1],marker='o',markerfacecolor='magenta', ls ='none')
plt.plot(centroids[:,0],centroids[:,1],'sk',markersize=8)
plt.show()
# computing K-Means with K = 8 (8 clusters)
centroids4,error = kmeans_gcf(data,8, NumIterations, Thresh, Parallelism, MaxMean)
# assign each sample to a cluster
idx4,_ = vq(data,centroids4)
# some plt.plotting using numpy's logical indexing
plt.figure("Clustering K=8 Large Radius Kmeans parallel {0} MaxMean {1} Iter {2}".format(Parallelism, MaxMean, NumIterations))
plt.title("K=8 Kmeans parallel {0} MaxMean {1} Iter {2} Distort {3:5.3f}".format(Parallelism, MaxMean, NumIterations, error))
plt.plot(data[idx4==0,0],data[idx4==0,1],marker='o',markerfacecolor='blue', ls ='none')
plt.plot(data[idx4==1,0],data[idx4==1,1],marker='o',markerfacecolor='red', ls ='none')
plt.plot(data[idx4==2,0],data[idx4==2,1],marker='o',markerfacecolor='orange', ls ='none')
plt.plot(data[idx4==3,0],data[idx4==3,1],marker='o',markerfacecolor='purple', ls ='none')
plt.plot(data[idx4==4,0],data[idx4==4,1],marker='o',markerfacecolor='green', ls ='none')
plt.plot(data[idx4==5,0],data[idx4==5,1],marker='o',markerfacecolor='magenta', ls ='none')
plt.plot(data[idx4==6,0],data[idx4==6,1],marker='o',markerfacecolor='yellow', ls ='none')
plt.plot(data[idx4==7,0],data[idx4==7,1],marker='o',markerfacecolor='cyan', ls ='none')
plt.plot(centroids4[:,0],centroids4[:,1],'sg',markersize=8)
plt.show()
Radii = 0.25*Radii
xvalues1 = np.tile(Radii[0], nRepeat)
xvalues2 = np.tile(Radii[1], nRepeat)
xvalues3 = np.tile(Radii[2], nRepeat)
xvalues4 = np.tile(Radii[3], nRepeat)
Totradii = np.concatenate((xvalues1, xvalues2, xvalues3, xvalues4))
xrandom = np.random.randn(tot)
xrange = xrandom * Totradii
yrandom = np.random.randn(tot)
yrange = yrandom * Totradii
Points = np.column_stack((xrange, yrange))
data = Points + Centers
# computing K-Means with K = 2 (2 clusters)
centroids,error = kmeans_gcf(data,2, NumIterations, Thresh, Parallelism, MaxMean)
# assign each sample to a cluster
idx,_ = vq(data,centroids)
# some plt.plotting using numpy's logical indexing
plt.figure("Clustering K=2 Small Radius Kmeans parallel {0} MaxMean {1} Iter {2}".format(Parallelism, MaxMean, NumIterations))
plt.title("K=2 Kmeans parallel {0} MaxMean {1} Iter {2} Distort {3:5.3f}".format(Parallelism, MaxMean, NumIterations, error))
plt.plot(data[idx==0,0],data[idx==0,1],'ob',
data[idx==1,0],data[idx==1,1],'or')
plt.plot(centroids[:,0],centroids[:,1],'sg',markersize=8)
plt.show()
# computing K-Means with K = 4 (4 clusters)
centroids4,error = kmeans_gcf(data,4, NumIterations, Thresh, Parallelism, MaxMean)
# assign each sample to a cluster
idx4,_ = vq(data,centroids4)
# some plt.plotting using numpy's logical indexing
plt.figure("Clustering K=4 Small Radius Kmeans parallel {0} MaxMean {1} Iter {2}".format(Parallelism, MaxMean, NumIterations))
plt.title("K=4 Kmeans parallel {0} MaxMean {1} Iter {2} Distort {3:5.3f}".format(Parallelism, MaxMean, NumIterations, error))
plt.plot(data[idx4==0,0],data[idx4==0,1],marker='o',markerfacecolor='blue', ls ='none')
plt.plot(data[idx4==1,0],data[idx4==1,1],marker='o',markerfacecolor='red', ls ='none')
plt.plot(data[idx4==2,0],data[idx4==2,1],marker='o',markerfacecolor='orange', ls ='none')
plt.plot(data[idx4==3,0],data[idx4==3,1],marker='o',markerfacecolor='purple', ls ='none')
plt.plot(centroids4[:,0],centroids4[:,1],'sg',markersize=8)
plt.show()
Radii = 6*Radii
xvalues1 = np.tile(Radii[0], nRepeat)
xvalues2 = np.tile(Radii[1], nRepeat)
xvalues3 = np.tile(Radii[2], nRepeat)
xvalues4 = np.tile(Radii[3], nRepeat)
Totradii = np.concatenate((xvalues1, xvalues2, xvalues3, xvalues4))
xrandom = np.random.randn(tot)
xrange = xrandom * Totradii
yrandom = np.random.randn(tot)
yrange = yrandom * Totradii
Points = np.column_stack((xrange, yrange))
data = Points + Centers
# computing K-Means with K = 2 (2 Very Large clusters)
centroids,error = kmeans_gcf(data,2, NumIterations, Thresh, Parallelism, MaxMean)
# assign each sample to a cluster
idx,_ = vq(data,centroids)
#
plt.figure("Clustering K=2 Very Large Radius Kmeans parallel {0} MaxMean {1} Iter {2}".format(Parallelism, MaxMean, NumIterations))
plt.title("K=2 Kmeans parallel {0} MaxMean {1} Iter {2} Distort {3:5.3f}".format(Parallelism, MaxMean, NumIterations, error))
plt.plot(data[idx==0,0],data[idx==0,1],'ob',
data[idx==1,0],data[idx==1,1],'or')
plt.plot(centroids[:,0],centroids[:,1],'sg',markersize=8)
plt.show()
# computing K-Means with K = 4 (4 Very Large clusters)
centroids4,error = kmeans_gcf(data,4, NumIterations, Thresh, Parallelism, MaxMean)
# assign each sample to a cluster
idx4,_ = vq(data,centroids4)
#
plt.figure("Clustering K=4 Very Large Radius Kmeans parallel {0} MaxMean {1} Iter {2}".format(Parallelism, MaxMean, NumIterations))
plt.title("K=4 Kmeans parallel {0} MaxMean {1} Iter {2} Distort {3:5.3f}".format(Parallelism, MaxMean, NumIterations, error))
plt.plot(data[idx4==0,0],data[idx4==0,1],marker='o',markerfacecolor='blue', ls ='none')
plt.plot(data[idx4==1,0],data[idx4==1,1],marker='o',markerfacecolor='red', ls ='none')
plt.plot(data[idx4==2,0],data[idx4==2,1],marker='o',markerfacecolor='orange', ls ='none')
plt.plot(data[idx4==3,0],data[idx4==3,1],marker='o',markerfacecolor='purple', ls ='none')
plt.plot(centroids4[:,0],centroids4[:,1],'sg',markersize=8)
plt.show() | true |
b384483c67a6289ca1313376a309ef7be5a5391d | Python | minhthe/practice-algorithms-and-data-structures | /Heap/max-probability.py | UTF-8 | 961 | 3.203125 | 3 | [] | no_license | '''
https://leetcode.com/problems/path-with-maximum-probability/
'''
import heapq
import collections
class Solution:
def maxProbability(self, n: int, edges: List[List[int]], succProb: List[float], start: int, end: int) -> float:
graph = collections.defaultdict(list)
k = 0
for u,v in edges:
vv = succProb[k]
k+=1
graph[u].append( (vv, v) )
graph[v].append( (vv, u) )
dist = [int(1e9) for i in range(n)]
pq = [( -1, start)]
heapq.heapify(pq)
dist[start] = 0
while(len(pq)):
v, p = heapq.heappop(pq)
for vv, pp in graph[p]:
if abs(vv * v) *(-1) < dist[pp]:
heapq.heappush(pq, (abs(vv * v) *(-1) , pp ) )
dist[pp] = abs(vv * v) *(-1)
ans = dist[end]
if ans == int(1e9): return 0
return ans * (-1) | true |
9c0bd50d298ed6af62235ae70eb23fedb3c5232d | Python | sbtries/class_pandaaaa | /Code/SarahBeth/function_prac.py | UTF-8 | 1,150 | 4.59375 | 5 | [] | no_license | # REPL:
# Read-Evaluate-Print
# Function anatomy:
# def: the def keyword, telling Python weโre about to start a function definition
# a name for the function
# (: opening parenthesis
# (optional) the names of one or more arguments, separated with ,
# (optional) the names and values of one or more default arguments, separated with (,)
# ) closing parenthesis
# : a colon
# Function Contents:
# a new line
# indentation (press tab on your keyboard)
# one or more lines
# (optional) a return statement (with no return, function returns None)
# A Basic Function that accepts no arguments and returns nothing.
def hello_world():
print("Hello, World!")
# A Function that accepts two arguments, and returns the value of
# those numbers added together.
def add_numbersA(x, y):
return x - y
# functions can have default values:
def add_numbersB(x, y=6):
return x + y
#function parameters can have default values that will be used if no value is passed in
def add_numbersC(x, y, operation = "add"):
if operation =="add":
return x + y
elif operation == "sub":
return x - y
print(add_numbersC(4, 10, 'sub'))
| true |
cdee539feb025e2a50c8dae238292a5d3c573783 | Python | dennis2030/leetcodeStudyGroup | /662-maximum-width-of-binary-tree/Sony.py | UTF-8 | 860 | 3.40625 | 3 | [] | no_license | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def widthOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root is None:
return 0
bfs_list = [(0, root)]
max_width = 1
while len(bfs_list) > 0:
new_bfs_list = []
for idx, node in bfs_list:
if node.left is not None:
new_bfs_list.append((2 * idx, node.left))
if node.right is not None:
new_bfs_list.append((2 * idx + 1, node.right))
max_width = max(max_width, bfs_list[-1][0] - bfs_list[0][0] + 1)
bfs_list = new_bfs_list
return max_width
| true |
4c60ac68e31a90fcc8e95535bf1c7e9934600cd6 | Python | CMunozMontoya/t3-cripto | /t3.py | UTF-8 | 1,699 | 3.125 | 3 | [] | no_license | import blowfish
import base64
from os import urandom
from yattag import Doc
def rellenar (text):
while len(text) < 8:
text = text+"0"
return text
doc, tag, text = Doc().tagtext()
archivo = open("codigo.html","w")
#datos----------------
#key = b"abcd1234" #Llave
#texto = "hola"
while True:
texto = input("Ingrese mensaje a enviar (Maximo 8 caracteres): ")
if len(texto) <= 8:
break
print("Mensaje demasiado largo.")
while True:
llave = input("Ingrese mensaje a enviar (Maximo 8 caracteres): ")
if len(llave) <= 8:
break
print("Llave demasiado larga")
texto = rellenar(texto)
print(texto)
mensaje = bytes(texto, 'utf-8') #Mensaje
key = bytes(llave, 'utf-8') #llave
cipher = blowfish.Cipher(key)
mensaje_cifrado = cipher.encrypt_block(mensaje)
#desencriptar --- DEBUG
#texto_plano = cipher.decrypt_block(mensaje_cifrado)
#print(texto_plano)
data = base64.b64encode(mensaje_cifrado) #pasar a base64 para enviado sencillo
texto_oculto = "<div class='blowfish' id=" + str(data)[1:] + "></div>"
#escribir html----------
archivo.write("<!DOCTYPE html>\n<html>\n")
archivo.write("<head> <link rel='stylesheet' type='text/css' href='estilo.css' />")
archivo.write("<script type='text/javascript' src='Blowfish.js' ></script>")
archivo.write("<script type='text/javascript' src='decode.js' ></script>")
archivo.write("</head><body>\n\n")
archivo.write("<p>Este sitio contiene un mensaje secreto</p>")
archivo.write(texto_oculto)
#archivo.write("<input type='text' id = 'llave'/>")
#archivo.write("<button type='button' onclick='desencriptar();'> desencriptar </button>")
archivo.write("\n</body>\n</html>")
archivo.close()
| true |
93e6af56fd248efa2ea8bdbb87e6144f9025754f | Python | ekck/SQLAlchemy_relationships | /one_to_one.py | UTF-8 | 514 | 2.53125 | 3 | [] | no_license | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///121rel.db'
db = SQLAlchemy(app)
class Parent(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(500))
child = db.relationship('Child', backref='parent')
class Child(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(500))
parent_id = db.Column(db.Integer, db.ForeignKey('parent.id'))
| true |
ce79216bdcc0a670a8c35ed4d4b3e5273e1fa638 | Python | suipnice/Moodle_to_ScenariChain | /convert_XML.py | UTF-8 | 35,774 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Connvertisseur Moodle XML --> XML Opale ScenariChain"""
import xml.etree.ElementTree as ET
import re
import os
from random import shuffle
import HTMLParser
h = HTMLParser.HTMLParser()
def cleanhtml(raw_html):
cleantext = raw_html
if raw_html is not None:
raw_html = h.unescape(raw_html)
raw_html = raw_html.replace(" ", u"ย ")
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
cleantext = cleantext.strip().replace("\t", " ").replace(" ", " ").replace("\r", "\n").replace("\n\n", "\n")
return cleantext
def convert_Moodle_XML_to_quiz(params):
u"""Conversion d'exercices Moodle (quiz)."""
# See "https://docs.moodle.org/3x/fr/Format_XML_Moodle"
# params must be : {"file":import_file, "model_filter": "["cloze", "multichoice", "matching"]"}
# print("----- convert_Moodle_XML_to_quiz -----")
import_file = open(params["file"], "r")
# On s'assure que le curseur de lecture est au dรฉbut du fichier.
import_file.seek(0)
tree = ET.parse(import_file)
root = tree.getroot()
# groupe_title = h.unescape(root.find('./data/title').text).encode("utf-8")
questions_list = []
if not root.tag == "quiz":
message = u"Votre fichier n'est pas dans un format XML Moodle valide. Assurez-vous de sรฉlectionner un fichier ยซ .xml ยป, gรฉnรฉrรฉ depuis des Quiz Moodle V? ou plus."
print(message)
return questions_list
nb_exos = 0
current_folder = "results"
# Liste d'exercice non reconnus (qui ne seront pas importรฉs)
unrecognized_list = {}
# last_question = len(root)
if params["model_filter"] == "all":
# params["model_filter"] = ["cloze", "multichoice", "matching"]
params["model_filter"] = ["cloze", "multichoice", "truefalse", "gapselect", "shortanswer", "ddwtos"]
for qnum, question in enumerate(root):
param_export = None
question_type = question.get('type')
if question_type == "category":
cat_structure = question.find('category').find('text').text.strip()
# sur Moodle, la categorie "$course$" est un mot rรฉservรฉ qui fait rรฉfรฉrence au cours courant.
cat_structure = cat_structure.replace("../", "").replace("$course$/", "")
for caractere in ["$", "+", "\\", ">", "<", "|", "?", ":", "*", "#", "\"", "~", " ", "."]:
cat_structure = cat_structure.replace(caractere, "_")
# On s'assure que les accents sont bien en unicode.
cat_structure = cat_structure.encode("utf-8")
# cat_structure = cat_structure.split('/')
current_folder = "results/%s" % cat_structure
if not os.path.exists(current_folder):
os.makedirs(current_folder)
# Si le type de question fait partie des modeles ร importer
elif question_type in params["model_filter"]:
# TODO : ici il faudrait s'assurer que le titre ne dรฉpasse pas X chars ?
question_dict = {"title": question.find('name').find('text').text.strip()}
if question_type == "cloze":
# Plus d'infos sur ce type ici : https://docs.moodle.org/3x/fr/Question_cloze_%C3%A0_r%C3%A9ponses_int%C3%A9gr%C3%A9es
# LOG.info("----- Nouvelle question de type 'cloze' (%s) -----" % question_dict["title"])
# modele_export = "texteatrous"
template_file = "templates/Opale36/TaT2.quiz"
donnees = question.find('questiontext').find('text').text
donnees = cleanhtml(donnees)
feedbacks = {"good_reps": [], "bad_reps": []}
# Il faut maintenant parser les donnees ร la recherches de codes du style :
# {1:MULTICHOICE:BAD_REP1#fdbk1~%100%GOOD_REP1#fdbk2~BAD_REP2#fdbk3~BAD_REP3#fdbk4}
pattern_trous = r"{(.+?)}"
pattern_percent = re.compile(r"%([0-9]*?)%")
matches = re.finditer(pattern_trous, donnees)
for match in matches:
trou = match.group(1)
good_rep = ['']
bad_reps = []
placesynonymes = ''
synonymes = []
parsed_trou = trou.split(":", 3)
# nb_points = parsed_trou[0]
type_trou = parsed_trou[1]
trou = parsed_trou[2].split("~")
if "MULTICHOICE" in type_trou or "MC" in type_trou:
# exemple : {1:MC:Mauvaise rรฉp.#Rรฉtroaction pour cette rรฉp.~Autre mauvaise rรฉp.#Rรฉtroaction pour cette autre mauvaise rรฉponse~=Bonne rรฉp.#Rรฉtroaction pour la bonne rรฉp.~%50%Rรฉponse partiellement juste#Rรฉtroaction pour cette rรฉp.}
for rep in trou:
rep_group = "bad"
fraction = pattern_percent.search(rep)
if fraction is not None:
fraction = fraction.group(1)
if fraction != "100":
print("----- ATTENTION : cloze with fraction != 100 !! (%s) -----" % fraction)
rep = pattern_percent.sub('', rep)
rep_group = "good"
elif rep.startswith("="):
# on retire le "=" indiquant la bonne rรฉponse
rep = rep[1:]
rep_group = "good"
# On sรฉpare la rรฉponse de son feedback
rep = rep.split("#")
if rep_group == "bad":
# LOG.info("----- Mauvaise -----(%s)" % rep)
if len(rep) > 1:
feedbacks["bad_reps"].append(rep[1])
bad_reps.append(rep[0])
else:
if len(rep) > 1:
feedbacks["good_reps"].append(rep[1])
if good_rep[0] != '':
synonymes.append(rep[0])
else:
good_rep = [rep[0]]
# syntaxe Opale pour multichoix :
# <op:gapM><sp:options><sp:option>BONchoix</sp:option><sp:option>MAUVAISchoix1</sp:option></sp:options></op:gapM>BONchoix
options = good_rep + bad_reps + synonymes
shuffle(options)
trou = "<sp:options>"
for reponse in options:
trou = "%s<sp:option>%s</sp:option>" % (trou, reponse)
trou = "%s</sp:options>" % (trou)
if len(synonymes) > 0:
placesynonymes = "<sp:synonyms>"
for synonyme in synonymes:
placesynonymes = "%s<sp:synonym>%s</sp:synonym>" % (placesynonymes, synonyme)
placesynonymes = "%s</sp:synonyms>" % (placesynonymes)
else:
placesynonymes = ''
donnees = donnees.replace(match.group(), "<sc:textLeaf role='gap'><op:gapM>%s%s</op:gapM>%s</sc:textLeaf>" % (placesynonymes, trou, good_rep[0]))
elif "SHORTANSWER" in type_trou or "SA" in type_trou:
# exemple : {1:SHORTANSWER:=rรฉponse attendue#bonne rรฉponse~*#rรฉtroaction pour toute autre rรฉponse}
for rep in trou:
rep = rep.split("#")
if rep[0].startswith("="):
# on retire le "=" indiquant la seule bonne rรฉponse
trou = rep[0][1:]
good_rep.append(rep[0][1:])
if len(rep) > 1:
feedbacks["good_reps"].append(rep[1])
elif len(rep) > 1:
feedbacks["bad_reps"].append(rep[1])
donnees = donnees.replace(match.group(), "<sc:textLeaf role='gap'>%s</sc:textLeaf>" % (trou))
else:
print("----- ATTENTION : cloze with unrecognized TYPE!! (%s) -----" % trou)
message = u"----- ATTENTION : cloze with unrecognized TYPE! (%s) -----" % trou
print(message)
print (good_rep[0])
for feedback_type in ['generalfeedback', 'correctfeedback', 'incorrectfeedback', 'partiallycorrectfeedback']:
match = question.find(feedback_type)
if match is not None:
# ici tester si match.format = 'html' ?
# moodle formats : html (default), moodle_auto_format, plain_text et markdown
feedbacks[feedback_type] = match.find('text').text
if feedbacks[feedback_type] is None:
feedbacks[feedback_type] = ""
else:
feedbacks[feedback_type] = cleanhtml(feedbacks[feedback_type])
else:
feedbacks[feedback_type] = ""
# Todo : partiallycorrectfeedback
"""
# shuffleanswers indique si l'ordre des propositions est alรฉatoire
# pas utilisรฉ dans Opale ?
shuffleanswers = question.find('shuffleanswers')
if shuffleanswers is not None:
# attention : <shuffleanswers> est parfois notรฉ 0/1, et parfois true/false
list_order = int(shuffleanswers.text) + 1
else:
list_order = 1
#
feedback_bon = feedbacks['correctfeedback']
if len(feedbacks["good_reps"]) > 0:
feedback_bon = u"%s<div>indications spรฉcifiques:%s</div>" % (feedback_bon, "<br/>".join(feedbacks["good_reps"]))
feedback_mauvais = feedbacks['incorrectfeedback']
if len(feedbacks["bad_reps"]) > 0:
feedback_mauvais = u"%s<div>indications spรฉcifiques:%s</div>" % (feedback_mauvais, "<br/>".join(feedbacks["bad_reps"]))
"""
#
# concatenation tous les feedback en un seul :
explication = ''
for feedback_type in ['generalfeedback', 'correctfeedback', 'incorrectfeedback', 'partiallycorrectfeedback']:
if feedbacks[feedback_type]:
explication = "%s<sp:txt><op:txt><sc:para xml:space='preserve'>%s</sc:para></op:txt></sp:txt>" % (explication, feedbacks[feedback_type])
if explication != '':
explication = "<sc:globalExplanation><op:res>%s</op:res></sc:globalExplanation>" % (explication)
param_export = {"title" : question_dict["title"].encode("utf-8"),
"donnees" : donnees.encode("utf-8"),
"explication": explication.encode("utf-8")
}
nb_exos += 1
#
# Question de type "Choix Multiple"
#
elif question_type == "multichoice":
# TODO : preliminary DRAFT only
print("----- Nouvelle question de type 'multichoice' -----")
# Plus d'infos sur ce type ici : https://docs.moodle.org/3x/fr/Question_%C3%A0_choix_multiples
# LOG.info("----- Nouvelle question de type 'choix multiples' (%s) -----" % question_dict["title"])
template_file = "templates/Opale36/qcm.quiz"
# modele_export = "question ร choix multiples"
#
donnees = question.find('questiontext').find('text').text
donnees = cleanhtml(donnees)
#
feedbacks = {"good_reps": [], "bad_reps": []}
#
#
# rammassage des rรฉponses et de l'indicateur de bonne rรฉponse
# puis construction des rรฉponses scenari
#
listereponses = ''
for balise in question :
if balise.tag == "answer":
# liste_reponses_possibles.append(balise.find('text').text)
fraction = balise.get("fraction")
reponse = balise.find('text').text
if fraction > "0":
check = "checked"
else:
check = "unchecked"
reponse = cleanhtml(reponse)
listereponses = u"%s<sc:choice solution='%s'><sc:choiceLabel><op:txt><sc:para xml:space='preserve'>%s\
</sc:para></op:txt></sc:choiceLabel></sc:choice>" % (listereponses, check, reponse)
for feedback_type in ['generalfeedback', 'correctfeedback', 'incorrectfeedback', 'partiallycorrectfeedback']:
match = question.find(feedback_type)
if match is not None:
# ici tester si match.format = 'html' ?
# moodle formats : html (default), moodle_auto_format, plain_text et markdown
feedbacks[feedback_type] = match.find('text').text
if feedbacks[feedback_type] is None:
feedbacks[feedback_type] = ""
else:
feedbacks[feedback_type] = cleanhtml(feedbacks[feedback_type])
else:
feedbacks[feedback_type] = ""
# on supprime les feedbacks redondant avec ceux de Scenari :
feedbacks['correctfeedback'] = feedbacks['correctfeedback'].replace(u"Votre rรฉponse est correcte.", "")
feedbacks['incorrectfeedback'] = feedbacks['incorrectfeedback'].replace(u"Votre rรฉponse est incorrecte.", "")
feedbacks['partiallycorrectfeedback'] = feedbacks['partiallycorrectfeedback'].replace(u"Votre rรฉponse est partiellement correcte.", "")
explication = ''
for feedback_type in ['generalfeedback', 'correctfeedback', 'incorrectfeedback', 'partiallycorrectfeedback']:
if feedbacks[feedback_type]:
explication = "%s%s" % (explication, feedbacks[feedback_type])
if explication != '':
explication = "<sc:globalExplanation><op:res><sp:txt><op:txt><sc:para xml:space='preserve'>%s</sc:para></op:txt></sp:txt></op:res></sc:globalExplanation>" % (explication)
param_export = {"title" : question_dict["title"].encode("utf-8"),
"enonce" : (donnees).encode("utf-8"),
"listereponses" : (listereponses).encode("utf-8"),
"explication" : (explication).encode("utf-8")
}
nb_exos += 1
#
#
# Question de type "categorisation"
#
elif question_type == "matching":
# [TODO] : preliminary DRAFT only
# seulement 2 a transferer donc, question non traitรฉe ici sera transfรฉrรฉe manuellement
print("----- Nouvelle question de type 'matching' [preliminary DRAFT only !!] -----")
template_file = "templates/Opale36/categorisation.quiz"
#
#
# Question de type "glisser-dรฉposer sur le texte"
#
elif question_type == "ddwtos" :
print("----- Nouvelle question de type 'glisser-dรฉposer sur le texte' -----")
template_file = "templates/Opale36/TaT2.quiz"
# Plus d'infos sur ce type ici : https://docs.moodle.org/3x/fr/Question_glisser-d%C3%A9poser_sur_texte
# LOG.info("----- Nouvelle question de type 'glisser dรฉposer' (%s) -----" % question_dict["title"])
# modele_export = "texteatrous"
donnees = question.find('questiontext').find('text').text
donnees = cleanhtml(donnees)
feedbacks = {"good_reps": [], "bad_reps": []}
pattern_trous = r"\[\[([1-9])\]\]"
liste_reponses_possibles = []
for balise in question :
if balise.tag == "dragbox" :
liste_reponses_possibles.append(balise.find('text').text)
matches = re.finditer(pattern_trous, donnees)
for match in matches:
numero_bonne_reponse = match.group(1)
good_rep = liste_reponses_possibles[int(numero_bonne_reponse) - 1]
trou = "<op:gapM><sp:options>"
for reponse in liste_reponses_possibles:
trou = "%s<sp:option>%s</sp:option>" % (trou, reponse)
# transforme la liste "options" en chaine de caractรจres :
trou = "%s</sp:options></op:gapM>" \
"%s" % (trou, good_rep)
donnees = donnees.replace(match.group(), "<sc:textLeaf role='gap'>%s</sc:textLeaf>" % trou)
for feedback_type in ['generalfeedback', 'correctfeedback', 'incorrectfeedback', 'partiallycorrectfeedback']:
tous_les_feedbacks_question = question.find(feedback_type)
if tous_les_feedbacks_question is not None:
# ici tester si match.format = 'html' ?
# moodle formats : html (default), moodle_auto_format, plain_text et markdown
feedbacks[feedback_type] = tous_les_feedbacks_question.find('text').text
if feedbacks[feedback_type] is None:
feedbacks[feedback_type] = ""
else:
feedbacks[feedback_type] = cleanhtml(feedbacks[feedback_type])
else:
feedbacks[feedback_type] = ""
feedbacks['correctfeedback'] = feedbacks['correctfeedback'].replace(u"Votre rรฉponse est correcte.", "")
feedbacks['incorrectfeedback'] = feedbacks['incorrectfeedback'].replace(u"Votre rรฉponse est incorrecte.", "")
feedbacks['partiallycorrectfeedback'] = feedbacks['partiallycorrectfeedback'].replace(u"Votre rรฉponse est partiellement correcte.", "")
# concatenation tous les feedback en un seul :
explication = ''
for feedback_type in ['generalfeedback', 'correctfeedback', 'incorrectfeedback', 'partiallycorrectfeedback']:
if feedbacks[feedback_type]:
explication = "%s<sp:txt><op:txt><sc:para xml:space='preserve'>%s</sc:para></op:txt></sp:txt>" \
% (explication, feedbacks[feedback_type])
if explication != '':
print (explication)
explication = "<sc:globalExplanation><op:res>%s</op:res></sc:globalExplanation>" % (explication)
param_export = {"title" : question_dict["title"].encode("utf-8"),
"donnees" : donnees.encode("utf-8"),
"explication": explication.encode("utf-8")
}
nb_exos += 1
#
#
# Question de type "selectionner le mot manquant"
#
elif question_type == "gapselect" :
print("----- Nouvelle question de type 'selectionner le mot manquant' -----")
template_file = "templates/Opale36/TaT2.quiz"
# Plus d'infos sur ce type ici : https://docs.moodle.org/3x/fr/Question_cloze_%C3%A0_r%C3%A9ponses_int%C3%A9gr%C3%A9es
# LOG.info("----- Nouvelle question de type 'cloze' (%s) -----" % question_dict["title"])
# modele_export = "texteatrous"
donnees = question.find('questiontext').find('text').text
donnees = cleanhtml(donnees)
feedbacks = {"good_reps": [], "bad_reps": []}
# Il faut maintenant parser les donnees ร la recherches de codes du style :
# {1:MULTICHOICE:BAD_REP1#fdbk1~%100%GOOD_REP1#fdbk2~BAD_REP2#fdbk3~BAD_REP3#fdbk4}
pattern_trous = r"\[\[([0-9]+)\]\]"
liste_groupes_reponses = {}
liste_reponses_possibles = []
#
for balise in question :
if balise.tag == "selectoption" :
numgroupe = str(balise.find('group').text)
choix = (balise.find('text').text)
dico_reponse = {"text": choix, "group": numgroupe}
liste_reponses_possibles.append(dico_reponse)
if numgroupe in liste_groupes_reponses :
liste_groupes_reponses[numgroupe].append(choix)
else :
liste_groupes_reponses[numgroupe] = [choix]
matches = re.finditer(pattern_trous, donnees)
for match in matches:
num_good_rep = (int(match.group(1)) - 1)
good_rep_grp = liste_reponses_possibles[num_good_rep]
good_rep = good_rep_grp["text"]
numgroupe = good_rep_grp["group"]
options = liste_groupes_reponses[numgroupe]
shuffle(options)
# transforme la liste "options" en chaine de caractรจres :
trou = "<op:gapM><sp:options>"
for reponse in options:
trou = "%s<sp:option>%s</sp:option>" % (trou, reponse)
trou = "%s</sp:options></op:gapM>" \
"%s" % (trou, good_rep)
donnees = donnees.replace(match.group(), "<sc:textLeaf role='gap'>%s</sc:textLeaf>" % trou)
for feedback_type in ['generalfeedback', 'correctfeedback', 'incorrectfeedback', 'partiallycorrectfeedback']:
match = question.find(feedback_type)
if match is not None:
# ici tester si match.format = 'html' ?
# moodle formats : html (default), moodle_auto_format, plain_text et markdown
feedbacks[feedback_type] = match.find('text').text
if feedbacks[feedback_type] is None:
feedbacks[feedback_type] = ""
else:
feedbacks[feedback_type] = cleanhtml(feedbacks[feedback_type])
else:
feedbacks[feedback_type] = ""
# [TODO] : partiallycorrectfeedback
"""
# shuffleanswers indique si l'ordre des propositions est alรฉatoire
# pas utilisรฉ dans Opale ?
shuffleanswers = question.find('shuffleanswers')
if shuffleanswers is not None:
# attention : <shuffleanswers> est parfois notรฉ 0/1, et parfois true/false
list_order = int(shuffleanswers.text) + 1
else:
list_order = 1
"""
feedback_bon = feedbacks['correctfeedback']
if len(feedbacks["good_reps"]) > 0:
feedback_bon = "%s%s" % (feedback_bon, (feedbacks["good_reps"]))
feedback_mauvais = feedbacks['incorrectfeedback']
if len(feedbacks["bad_reps"]) > 0:
feedback_mauvais = "%s%s" % (feedback_mauvais, (feedbacks["bad_reps"]))
# On supprime les feedbacks redondant avec ceux de Scenari :
feedbacks['correctfeedback'] = feedbacks['correctfeedback'].replace(u"Votre rรฉponse est correcte.", "")
feedbacks['incorrectfeedback'] = feedbacks['incorrectfeedback'].replace(u"Votre rรฉponse est incorrecte.", "")
feedbacks['partiallycorrectfeedback'] = feedbacks['partiallycorrectfeedback'].replace(u"Votre rรฉponse est partiellement correcte.", "")
explication = ''
# concatenation des feedbacks
for feedback_type in ['generalfeedback', 'correctfeedback', 'incorrectfeedback', 'partiallycorrectfeedback']:
if feedbacks[feedback_type]:
explication = "%s<sp:txt><op:txt><sc:para xml:space='preserve'>%s</sc:para></op:txt></sp:txt>" % (explication, feedbacks[feedback_type])
if explication != '':
print (explication)
explication = "<sc:globalExplanation><op:res>%s</op:res></sc:globalExplanation>" % (explication)
param_export = {"title" : question_dict["title"].encode("utf-8"),
"donnees" : donnees.encode("utf-8"),
"explication": explication.encode("utf-8")
}
nb_exos += 1
#
#
# Question de type "rรฉponse courte"
#
elif question_type == "shortanswer":
print("----- Nouvelle question de type 'question ร rรฉponse courte' -----")
template_file = "templates/Opale36/reponse_courte.quiz"
# Plus d'infos sur ce type ici : https://docs.moodle.org/3x/fr/Question_%C3%A0_r%C3%A9ponse_courte
# LOG.info("----- Nouvelle question de type 'reponse_courte' (%s) -----" % question_dict["title"])
# modele_export = "reponse_courte"
donnees = question.find('questiontext').find('text').text
donnees = cleanhtml(donnees)
feedbacks = {"good_reps": [], "bad_reps": []}
explications = {"feedback_bon": "", "feedback_mauvais": "", "feedback_general": ""}
explications["feedback_general"] = question.find('generalfeedback').find('text').text
explications["feedback_general"] = cleanhtml(explications["feedback_general"])
liste_reponses_possibles = []
for balise in question :
if balise.tag == "answer" :
feedback = balise.find("feedback").find("text").text
fraction = balise.get("fraction")
if fraction == "100" :
reponseok = (balise.find('text').text)
liste_reponses_possibles.append(balise.find('text').text)
if feedback is not None:
feedbacks["good_reps"].append(feedback)
else :
liste_reponses_possibles.append(balise.find('text').text)
if feedback is not None:
feedbacks["bad_reps"].append(feedback)
reponses = ""
for reponse in liste_reponses_possibles :
reponses = "%s<sc:value>%s</sc:value>" % (reponses, reponse)
if len(feedbacks["good_reps"]) > 0:
explications["feedback_bon"] = u"<div>indications spรฉcifiques:%s</div>" % ("<br/>".join(feedbacks["good_reps"]))
if len(feedbacks["bad_reps"]) > 0:
explications["feedback_mauvais"] = u"<div>indications spรฉcifiques:%s</div>" % ("<br/>".join(feedbacks["bad_reps"]))
# concatene tous les feedback en un seul :
explication = ''
for feedback_type in ['feedback_general', 'feedback_bon', 'feedback_mauvais']:
if explications[feedback_type]:
explication = "%s<sp:txt><op:txt><sc:para xml:space='preserve'>%s</sc:para></op:txt></sp:txt>" % (explication, explications[feedback_type])
param_export = {"title" : question_dict["title"].encode("utf-8"),
"donnees" : donnees.encode("utf-8"),
"explication": explication.encode("utf-8"),
"reponses" : reponses.encode("utf-8")
}
nb_exos += 1
#
#
# Question de type "Vrai/Faux"
#
elif question_type == "truefalse" :
print("----- Nouvelle question de type 'question Vrai/Faux' -----")
template_file = "templates/Opale36/qcu.quiz"
# Plus d'infos sur ce type ici : https://docs.moodle.org/3x/fr/Question_vrai_ou_faux
# LOG.info("----- Nouvelle question de type 'truefalse' (%s) -----" % question_dict["title"])
# modele_export = "reponse_courte"
donnees = question.find('questiontext').find('text').text
donnees = cleanhtml(donnees)
feedbacks = {"good_reps": [], "bad_reps": []}
explications = {"feedback_bon": "", "feedback_mauvais": "", "feedback_general": ""}
explications["feedback_general"] = ''
if question.find('generalfeedback').find('text').text:
explications["feedback_general"] = question.find('generalfeedback').find('text').text
explications["feedback_general"] = cleanhtml(explications["feedback_general"])
liste_reponses_possibles = []
for balise in question :
if balise.tag == "answer" :
feedback = balise.find("feedback").find("text").text
fraction = balise.get("fraction")
if fraction == "100" :
reponseok = (balise.find('text').text)
liste_reponses_possibles.append(balise.find('text').text)
numbonnereponse = (liste_reponses_possibles.index(reponseok) + 1)
if feedback is not None:
feedbacks["good_reps"].append(feedback)
else :
liste_reponses_possibles.append(balise.find('text').text)
if feedback is not None:
feedbacks["bad_reps"].append(feedback)
choix = ""
for reponse in liste_reponses_possibles :
choix = "%s<sc:choice><sc:choiceLabel><op:txt><sc:para xml:space='preserve'>%s</sc:para></op:txt></sc:choiceLabel></sc:choice>" \
% (choix, reponse)
if len(feedbacks["good_reps"]) > 0:
explications["feedback_bon"] = (feedbacks["good_reps"])
if len(feedbacks["bad_reps"]) > 0:
explications["feedback_mauvais"] = (feedbacks["bad_reps"])
# concatene tous les feedback en un seul :
if len(explications) > 0:
explication = "<sp:txt><op:txt><sc:para xml:space='preserve'>"
totalexplication = ''
for feedback_type in ['feedback_general', 'feedback_bon', 'feedback_mauvais']:
totalexplication = "%s%s" % (totalexplication, explications[feedback_type])
explication = "%s%s</sc:para></op:txt></sp:txt>" % (explication, totalexplication)
param_export = {"title" : question_dict["title"].encode("utf-8"),
"donnees" : donnees.encode("utf-8"),
"explication" : explication.encode("utf-8"),
"choix" : choix.encode("utf-8"),
"numbonnereponse" : str(numbonnereponse)
}
nb_exos += 1
#
#
# Autres modรจles
#
else:
# other Moodle question types : matching|essay|numerical|description
if question_type not in unrecognized_list :
unrecognized_list[question_type] = 1
else:
unrecognized_list[question_type] = unrecognized_list[question_type] + 1
if param_export is not None:
# Read in the file
with open(template_file, 'r') as file :
filedata = file.read()
for param in param_export:
# Replace the target string
filedata = filedata.replace('$$%s$$' % param, param_export[param])
# Write the file out again
with open("%s/%s.quiz" % (current_folder, nb_exos), 'w') as file:
file.write(filedata)
if nb_exos == 0:
message = u"Aucun exercice compatible dรฉtectรฉ dans votre fichier."
print(message)
else :
print "nombre d'exercices : %s" % nb_exos
if len(unrecognized_list.keys()) > 0:
message = u"Attention : Certaines questions utilisaient un modรจle non reconnu et n'ont pas รฉtรฉ importรฉes. (%s)" % unrecognized_list
print(message)
return questions_list
convert_Moodle_XML_to_quiz({
"file": "export_moodle.xml",
"model_filter": "all"}
)
| true |
096465bd7c7798f47c67ecd6b4e6aa6f5c549aca | Python | ManojDwarsala/AutomationRTA | /timeconfig.py | UTF-8 | 1,833 | 3.203125 | 3 | [] | no_license | import pycurl#pycurl is used to access and modify the web page content
import sys #sys is used to retrieve the command line arguments
import cStringIO #This is used to create a buffer for storing the web page contents
import time #this is used to get the localtime
localtime = time.localtime(time.time()) #storing the localtime in localtime variable
s = str(localtime) #converting that into a string
l = list(map(str,s.split(' '))) #using space as a delimiter storing each value in the list
par = []
for i in range(0,6):
c = l[i][l[i].index('=')+1:l[i].index(',')] #then storing the values of day,month,year,hour,min and sec in par array
par.append((c))
if(len(sys.argv) == 1):
print("Pass IP Address")
exit(-1)
ip = sys.argv[1] #storing the ip address
if(len(sys.argv)>2):
passwd = sys.argv[2] #password is retrieved
else:
passwd = "ex1048" #If password is not passed in command line then it will set the default password
buf = cStringIO.StringIO()#Buffer object to stroe the web page content
c = pycurl.Curl() #creating the object for curl to access the web page
c.setopt(pycurl.URL,'http://'+ip+'/cgi-bin/time.cgi?') #Accessing the web page with the specified url
c.setopt(pycurl.CONNECTTIMEOUT, 1000L)
c.setopt(pycurl.COOKIE,'password='+passwd) #storing the passsword in the cookie
c.setopt(c.POSTFIELDS, 'time_zone=None&time_source=Manual&set_time=do&month='+par[1]+'&day='+par[2]+'&year='+par[0]+'&hour='+par[3]+'&minute='+par[4]+'&second='+par[5]+'&submitted=1&submit_clicked=Submit')#setting the values of the html form
c.setopt(c.VERBOSE, True) #verbose is set true to view any errors occurred
c.setopt(pycurl.WRITEFUNCTION, buf.write) #Updating the values of form using writefuntion
c.perform() #Performing the update operation
| true |
a5c02e6975e2b1eae157acf9fb345a9bd766df47 | Python | GwenIves/Exercises | /rgpwp/chapter3_stack.py | UTF-8 | 1,129 | 3.796875 | 4 | [
"MIT"
] | permissive | #!/bin/env python3
class Stack(object):
class EmptyStackError(Exception):
pass
def __init__(self, items=None):
if items is None:
self.list = []
else:
self.list = list(items)
def pop(self):
try:
return self.list.pop()
except IndexError:
raise Stack.EmptyStackError
def top(self):
try:
return self.list[-1]
except IndexError:
raise Stack.EmptyStackError
def push(self, val):
self.list.append(val)
def __len__(self):
return len(self.list)
def __str__(self):
return str(self.list)
def test_stack():
s = Stack(list(range(2)))
s.push(7)
s.push(20)
print(len(s))
print(s)
print()
print(s.top())
print(s.top())
print()
print(s.pop())
print(s.pop())
print(s.pop())
print(s.pop())
print()
try:
print(s.pop())
except Stack.EmptyStackError:
pass
else:
assert False, "Stack did not raise"
def main():
test_stack()
if __name__ == '__main__':
main()
| true |
4563c8fff3893b37130374d8b433cc6153bf6260 | Python | FelixTornqvist/DeepCoder-device-management | /protocol_adapter/main.py | UTF-8 | 1,778 | 2.5625 | 3 | [] | no_license | import mqtt_adapter as mqtt
import coap_adapter as coap
import adapter_list
import paho.mqtt.client as paho
import time
connected_adapters = {}
ada_useage = adapter_list.adapter_list([])
ada_useage.add(coap.coap_adapter)
ada_useage.add(coap.coap_adapter)
ada_useage.add(mqtt.mqtt_adapter)
def try_all_protocols(ip):
found = True
for (_, init_func) in ada_useage.adapters:
if str(init_func) + ip in connected_adapters:
print "Broker already registered"
return
for (_, init_func) in ada_useage.adapters:
try:
new_adapter = init_func(ip)
found = True
except Exception as e:
print "Unable to connect using", init_func
print "Connection exception:", e
found = False
if found:
connected_adapters[str(init_func) + ip] = new_adapter
ada_useage.add(init_func)
print 'Connected'
break
in_str = ''
while in_str != 'end':
print
in_str = raw_input('>')
args = in_str.split(' ')
if args[0] == 'end':
break
elif args[0] == 'listSA':
key = args[1]
connected_adapters[key].print_connections()
elif args[0] == 'addSA':
key = args[1]
connected_adapters[key].register_sa(args[2], args[3])
elif args[0] == 'listBrokers':
print "Connected brokers:"
for i in connected_adapters:
print i, connected_adapters[i]
print "\nUseage of adapters:"
for (use, adapter) in ada_useage.adapters:
print use, adapter
elif args[0] == 'addBroker':
try_all_protocols(args[1])
for i in connected_adapters:
print i
connected_adapters[i].stop()
print '\n'
quit()
| true |
ab894106f05470604d1fc9a66f9c8f879db42758 | Python | aihill/meetup2 | /util.py | UTF-8 | 2,434 | 2.671875 | 3 | [
"Apache-2.0"
] | permissive | """
Utility functions for pattern recognition examples
"""
import os
import glob
import numpy as np
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
def create_index_files(source_path, train_percent=90, pattern='*'):
assert os.path.exists(source_path)
train_idx = os.path.join(source_path, os.pardir, 'music-train-index.csv')
valid_idx = os.path.join(source_path, os.pardir, 'music-valid-index.csv')
if os.path.exists(train_idx) and os.path.exists(valid_idx):
return train_idx, valid_idx
subdirs = glob.iglob(os.path.join(source_path, '*'))
subdirs = list(filter(lambda x: os.path.isdir(x), subdirs))
classes = sorted(map(lambda x: os.path.basename(x), subdirs))
class_map = {key: val for key, val in zip(classes, range(len(classes)))}
# Split into training and validation subsets.
np.random.seed(0)
with open(train_idx, 'w') as train_fd, open(valid_idx, 'w') as valid_fd:
train_fd.write('filename,label\n')
valid_fd.write('filename,label\n')
for subdir in subdirs:
label = class_map[os.path.basename(subdir)]
files = glob.glob(os.path.join(subdir, pattern))
np.random.shuffle(files)
train_count = (len(files) * train_percent) // 100
for idx, filename in enumerate(files):
fd = train_fd if idx < train_count else valid_fd
rel_path = os.path.join(os.path.basename(subdir),
os.path.basename(filename))
fd.write(rel_path + ',' + str(label) + '\n')
return train_idx, valid_idx
def display(model, layer_names, sel):
layers = model.layers.layers
for layer in layers:
if layer.name not in layer_names:
continue
data = getattr(layer, sel)
shape = layer.in_shape if sel == 'inputs' else layer.out_shape
for i in [12, 84]:
imgs = data[:, i].get().reshape(shape)
if len(imgs.shape) < 3:
continue
for j in range(imgs.shape[0]):
path = os.path.join('imgs', str(i))
if not os.path.exists(path):
os.makedirs(path)
name = os.path.join(path, layer.name + '.' + sel + '.' +
str(j) + '.png')
plt.imshow(imgs[j])
plt.savefig(name, bbox_inches='tight')
| true |
747f55e1da2c1f3c6204bcb4ebf15a55f89be7c3 | Python | mpl-makers-club/bootcamp29Jun21 | /testPython.py | UTF-8 | 87 | 3.640625 | 4 | [] | no_license | print("hello")
for i in range(1, 13):
print(str(i) + " times 5 = " + str(i * 5))
| true |
63fdcceccfe583d4d3ffdb64b0d343c2d42ffd3a | Python | sligocki/busy-beaver | /Code/test_Halting_Lib.py | UTF-8 | 1,132 | 2.5625 | 3 | [] | no_license | #! /usr/bin/env python3
import Halting_Lib
import unittest
import io_pb2
def set_get_big_int(value):
big = io_pb2.BigInt()
Halting_Lib.set_big_int(big, value)
return Halting_Lib.get_big_int(big)
class SystemTest(unittest.TestCase):
def test_big_int_success(self):
self.assertEqual(set_get_big_int(0), 0)
self.assertEqual(set_get_big_int(138), 138)
self.assertEqual(set_get_big_int(1_000_000), 1_000_000)
self.assertEqual(set_get_big_int(1_000_000_000_000), 1_000_000_000_000)
self.assertEqual(set_get_big_int(1_000_000_000_000_000_000_000_000),
1_000_000_000_000_000_000_000_000)
self.assertEqual(set_get_big_int(13**138), 13**138)
def test_big_int_near_split(self):
self.assertEqual(set_get_big_int(2**64 - 1), 2**64 - 1)
self.assertEqual(set_get_big_int(2**64), 2**64)
self.assertEqual(set_get_big_int(2**64 + 1), 2**64 + 1)
def test_big_int_error(self):
# Error cases
with self.assertRaises(ValueError):
set_get_big_int(-1)
with self.assertRaises(TypeError):
set_get_big_int(13.8)
if __name__ == "__main__":
unittest.main()
| true |
00b7812eaa2b4a44e4ad863235210ecb7279c638 | Python | Excellentc/HILLEL_LES | /Lesson 7/DZ_18_season.py | UTF-8 | 720 | 4.6875 | 5 | [] | no_license | """
ะะฐะฟะธัะฐัั ััะฝะบัะธั season, ะฟัะธะฝะธะผะฐัััั 1 ะฐัะณัะผะตะฝั โ ะฝะพะผะตั ะผะตัััะฐ (ะพั 1 ะดะพ 12),
ะธ ะฒะพะทะฒัะฐัะฐัััั ะฒัะตะผั ะณะพะดะฐ, ะบะพัะพัะพะผั ััะพั ะผะตััั ะฟัะธะฝะฐะดะปะตะถะธั (ะทะธะผะฐ, ะฒะตัะฝะฐ, ะปะตัะพ ะธะปะธ ะพัะตะฝั).
"""
def season(x1):
if 0 < x1 <= 2 or x1 == 12:
return "ะะธะผะฐ"
elif 2 < x1 <= 5:
return "ะะตัะฝะฐ"
elif 5 < x1 <= 8:
return "ะะตัะพ"
elif 8 < x1 <= 11:
return "ะัะตะฝั"
else:
print("ะะตะฟัะฐะฒะธะปัะฝัะน ะฒะฒะพะด")
x = int(input("ะะฒะตะดะธัะต โ ะผะตัััะฐ ะณะพะดะฐ: "))
print()
y = season(x)
print(f"{x} ะผะตััั ะณะพะดะฐ, ััะพ - ", y)
| true |
6ae64c4173aaa88aea80f63e0f0d778e1635d82a | Python | fly8764/LeetCode | /Bit Manipulation/268 missingNumber.py | UTF-8 | 1,431 | 4.03125 | 4 | [] | no_license | '''
ๆนๆณไธ๏ผไฝ่ฟ็ฎ
0ๅฐnไธๅ
ฑn+1ไธชๆฐๅญ๏ผๅๅ
ถไธญ็nไธชๆฐๅญ๏ผ็ปๅฎ็็ดขๅผ0ๅฐn-1,
ๅผๆๆไฝ๏ผๆฏไธชๆฐๅญๅ่ช่บซๅผๆ็ปๆไธบ0๏ผ้ถไธๅ
ถไปๅผๅผๆ่ฟ็ญไบๅ
ถไปๅผ
ๆไปฅ๏ผ่กฅๅ
ไธไธช็ดขๅผ๏ผไฝฟๅพ็ดขๅผไป0ๅฐnๅฎๆด๏ผ็ถๅๅๆฐ็ปไธญ็ๅผ็ดฏ็งฏๅผๆ๏ผ
ๅฉไธ็ๅผๅฐฑๆฏ็ผบๅคฑๅ
็ด ใ
T(n) = n O(n) = o(l)
ๆนๆณไบ๏ผ
็ฑปไผผไบไธ้ข็ไฝ่ฟ็ฎ๏ผไธ่ฟไฝฟ็จ็ๆฐๅผๆฑๅ ๅๅทฎ๏ผ
ๆ่ทฏๆบ่ชไบ ็ญๅทฎๆฐๅๆฑๅ๏ผๆญฃๅธธ็ๅไธบ0-n๏ผๅฎ้
ๆฐ็ป็ผบๅคฑไธไธชๅ
็ด ๏ผ
ไธค่
้้กนๅๅทฎ ๅนถๆฑๅๅณๅฏใไธป่ฆ้ฒๆญข ๆดๅๆบขๅบใ
T(n) = n O(n) = o(l)
ๆนๆณไธ๏ผ
ๅๅธ่กจ T(n) = n O(n) = o(n)
ๆนๆณๅ๏ผ
ๆๅบ T(n) = nlogn O(n) = o(l)
'''
class Solution(object):
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
size = len(nums)
res = 0
#ๅ
ๅๆฐๅ ็็ดขๅผๅผๆไธไธ
res ^= size
#ๅจ้้กน็ๅผๆ
for i in range(size):
res ^= i ^ nums[i]
return res
def missingNumber1(self, nums):
size = len(nums)
res = 0
#ๅ
ๆฐๅ ็็ดขๅผ ็ดฏๅ ๏ผๆฐ็ดขๅผ็ๅกซๅ
ๅผไธบ0
res += size
for i in range(size):
res += i- nums[i]
return res
if __name__ == '__main__':
so = Solution()
print(so.missingNumber([3,0,1]))
print(so.missingNumber([9,6,4,2,3,5,7,0,1]))
| true |
1a6a6b8457d281f845c444abd23d2fef5434e27b | Python | onkgp/Projects | /Jump To Python/While_170306.py | UTF-8 | 2,739 | 4.125 | 4 | [] | no_license | # pylint: disable=C0103
# pylint: disable=C0301
# pylint: disable=C0321
# ๋ฐ๋ณตํด์ ๋ฌธ์ฅ์ ์ํํด์ผ ํ ๊ฒฝ์ฐ While๋ฌธ์ ์ฌ์ฉํ๋ค. ๊ทธ๋์ While๋ฌธ์ ๋ฐ๋ณต๋ฌธ์ด๋ผ๊ณ ๋ ๋ถ๋ฅธ๋ค.
'''๊ธฐ๋ณธ๊ตฌ์กฐ
while <์กฐ๊ฑด๋ฌธ>:
<์ํํ ๋ฌธ์ฅ1>
<์ํํ ๋ฌธ์ฅ2>
<์ํํ ๋ฌธ์ฅ3>
while๋ฌธ์ ์กฐ๊ฑด๋ฌธ์ด ์ฐธ์ธ ๋์์ while๋ฌธ ์๋์ ์ํ๋ ๋ฌธ์ฅ๋ค์ด ๋ฐ๋ณตํด์ ์ํ๋๋ค.
'''
# Ex) "์ด ๋ฒ ์ฐ์ด ์ ๋์ด ๊ฐ๋ ๋๋ฌด ์๋ค"๋ผ๋ ์๋ด์ ๋ง๋ค๋ฉด ๋ค์๊ณผ ๊ฐ๋ค.
treeHit = 0
while treeHit < 10: #treeHit๊ฐ 10๋ณด๋ค ์์ ๋์์ while๋ฌธ ์์ ๋ฌธ์ฅ๋ค์ ๊ณ์ ์ํํ๋ค.
treeHit = treeHit + 1
print("๋๋ฌด๋ฅผ %d๋ฒ ์ฐ์์ต๋๋ค." % treeHit)
if treeHit == 10: #treeHit๊ฐ 10์ด ๋๋ฉด if๋ฌธ ์์ ๋ฌธ์ฅ์ ์ํํ๋ค.
print("๋๋ฌด ๋์ด๊ฐ๋๋ค.")
#๋ง์ง๋ง ์กฐ๊ฑด๋ฌธ์ 10 < 10์ด๋ฏ๋ก [๊ฑฐ์ง]์ด ๋๊ธฐ ๋๋ฌธ์ while๋ฌธ์ ๋น ์ ธ๋๊ฐ๋ค(์ข
๋ฃ)
# While๋ฌธ ์ง์ ๋ง๋ค๊ธฐ (์๋ ์์ ๋ IDLE์์ ์งํํด์ผ ํจ)
'''
prompt = """
1. Add
2. Del
3. List
4. Quit
Enter Number : """
number = 0
while number != 4: #number๊ฐ 4๊ฐ ์๋ ๋์ prompt๋ฅผ ์ถ๋ ฅํ๋ค.
print(prompt)
number = int(input()) #์ฌ์ฉ์์ ์ซ์๋ฅผ ์
๋ ฅ ๋ฐ๋๋ค.
'''
# While๋ฌธ ๊ฐ์ ๋ก ๋น ์ ธ๋๊ฐ๊ธฐ
coffee = 10
money = 300 #Money๊ฐ 300์ผ๋ก ๊ณ ์ ๋์ด ์์ด ์กฐ๊ฑด๋ฌธ์ด 0์ด ์๋๋ฏ๋ก ํญ์ ์ฐธ์ด๋ค. ๋ฐ๋ผ์ ๋ฌดํ๋ฃจํ๋ฅผ ๋๊ฒ ๋๋ค.
while money:
print("๋์ ๋ฐ์์ผ๋ ์ปคํผ๋ฅผ ์ค๋๋ค.")
coffee = coffee -1
print("๋จ์ ์ปคํผ์ ์์ %d๊ฐ ์
๋๋ค." % coffee)
if not coffee: #Coffee๊ฐ 0์ด๋๋ฉด not coffee๊ฐ ์ฐธ์ด ๋๋ฏ๋ก If ๋ค์ ๋ฌธ์ฅ์ ์ํํ๋ค.
print("์ปคํผ๊ฐ ๋ค ๋จ์ด์ก์ต๋๋ค. ํ๋งค๋ฅผ ์ค์งํฉ๋๋ค.")
break #Break๋ฌธ์ด ํธ์ถ๋์ด While๋ฌธ์ ๋น ์ ธ๋๊ฐ๋ค(์ข
๋ฃ)
# Break๋ฌธ ์ด์ฉํด ์ํ๊ธฐ ์๋ ๊ณผ์ ๋ง๋ค๊ธฐ - While_break_170306.py ํ์ผ ํ์ธ
# ์กฐ๊ฑด์ ๋ง์ง ์๋ ๊ฒฝ์ฐ ๋งจ ์ฒ์์ผ๋ก ๋์๊ฐ๊ธฐ(Continue)
a = 0
while a < 10:
a = a + 1
if a % 2 == 0: continue #a๋ฅผ 2๋ก ๋๋์์ ๋ ๋๋จธ์ง๊ฐ 0์ด๋ฉด ์ฐธ์ด๋ค(์ง์์ผ๋ ์ฐธ). continue๋ฌธ์ While๋ฌธ์ ๋งจ ์ฒ์ ์กฐ๊ฑด๋ฌธ์ผ๋ก ๋์๊ฐ๊ฒ ํ๋ ๋ช
๋ น์ด๋ค.
print(a) #๋ฐ๋ผ์, a๊ฐ ์ง์์ด๋ฉด print(a)๋ ์ํ๋์ง ์๋๋ค.
# ๋ฌดํ ๋ฃจํ (์๋ ์์ ๋ IDLE์์ ์งํํด์ผ ํจ)
'''๊ธฐ๋ณธ๊ตฌ์กฐ
while True:
<์ํํ ๋ฌธ์ฅ1>
<์ํํ ๋ฌธ์ฅ2>
While ์กฐ๊ฑด๋ฌธ์ด True์ด๋ฏ๋ก ํญ์ ์ฐธ์ด ๋๋ค. ๋ฐ๋ผ์ While๋ฌธ ์์ ์๋ ๋ฌธ์ฅ๋ค์ ๋ฌดํํ๊ฒ ์ํ๋๋ค.
'''
'''
while True:
print("Ctrl+C๋ฅผ ๋๋ฌ์ผ while๋ฌธ์ ๋น ์ ธ๋๊ฐ ์ ์์ต๋๋ค.")
'''
| true |
3c9e651d6a402b24c50770acf69eed3a6fe84756 | Python | snapbuy/dialogpt-chat | /dialogpt_chat/dialogpt.py | UTF-8 | 5,825 | 3.1875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
class DialoGPT(object):
def __init__(self, size, device, max_context_length=48):
"""
Modeling class for Dialo GPT
Args:
size (str): model size. must be one of ['small', 'medium', 'large']
device (str): model device. should be one of ['cpu', 'cuda', 'cuda:n']
max_context_length (int): max context laength (number of input history tokens)
Notes:
format of histories:
self.histories = {
user_1 : {'user': [] , 'bot': []},
user_2 : {'user': [] , 'bot': []},
...more...
user_n : {'user': [] , 'bot': []},
}
paper (arXiv):
https://arxiv.org/abs/1911.00536
Examples:
>>> # chatting with DialoGPT on terminal mode.
>>> # The model size must be one of the [small, medium, large].
>>> # type '/exit' if you want to exit dialogue.
>>> # type '/clear' if you want to clear all histories
>>> gpt = DialoGPT(size="large", device="cuda")
>>> gpt.run()
user : Hello.
bot : How are you?
user : I'm great. it is a nice day.
bot : That's good.
user : Who is CEO of Apple?
bot : Steve Jobs.
user : /clear
bot : history cleared.
user : /exit
bot : bye.
>>> # chatting with DialoGPT by user id. (single-turn)
>>> gpt = DialoGPT(size="large", device="cuda")
>>> gpt.predict(user_id="USER_ID", text="Hello.")
>>> # chatting with DialoGPT by user id. (multi-turn)
>>> while True:
... _in = input('user : ')
... _out = gpt.predict(user_id="USER_ID", text=_in)
... print(f"bot : {_out}")
>>> # you can check dialogue histories
>>> gpt.histories
{
user_1 : {'user': [] , 'bot': []},
user_2 : {'user': [] , 'bot': []},
...more...
user_n : {'user': [] , 'bot': []},
}
>>> # you can clear all dialogue histories
>>> gpt.clear(user_id="USER_ID")
"""
assert size in ['small', 'medium', 'large'], \
"model size must be one of ['small', 'medium', 'large]"
self.model_name = f"microsoft/DialoGPT-{size}"
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.model = AutoModelForCausalLM.from_pretrained(self.model_name)
self.model = self.model.eval().to(device)
self.max_context_length = max_context_length
self.histories = {}
self.device = device
self.eos = "<|endoftext|>"
@torch.no_grad()
def predict(
self,
user_id: str,
text: str,
num_beams: int = 10, # paper's setting
top_k: int = 10, # paper's setting
top_p: float = None, # do not use top-p sampling
) -> str:
"""
dialogue with Dialo GPT
Args:
user_id (str): user id
text (str): user's input text
num_beams (int): size of beam width
top_k (int): K for top-K sampling
top_p (float): P for top-P sampling
Returns:
(str): model's next utterance
"""
torch.cuda.empty_cache()
input_ids_list: list = []
num_of_stacked_tokens: int = 0
if user_id not in self.histories.keys():
self.clear(user_id)
user_histories = reversed(self.histories[user_id]['user'])
bot_histories = reversed(self.histories[user_id]['bot'])
for user, bot in zip(user_histories, bot_histories):
user_tokens = self.tokenizer.encode(user, return_tensors='pt')
bot_tokens = self.tokenizer.encode(bot, return_tensors='pt')
num_of_stacked_tokens += user_tokens.shape[-1] + bot_tokens.shape[-1]
if num_of_stacked_tokens <= self.max_context_length:
input_ids_list.append(bot_tokens)
input_ids_list.append(user_tokens)
else:
break
input_ids_list = list(reversed(input_ids_list))
new_input = text + self.eos
input_tokens = self.tokenizer.encode(new_input, return_tensors='pt')
input_ids_list.append(input_tokens)
input_tokens = torch.cat(input_ids_list, dim=-1)
input_tokens = input_tokens.to(self.device)
output_ids = self.model.generate(
input_tokens,
max_length=1024,
pad_token_id=self.tokenizer.eos_token_id,
num_beams=num_beams,
top_k=top_k,
top_p=top_p,
no_repeat_ngram_size=4,
)
next_utterance = self.tokenizer.decode(
output_ids[:, input_tokens.shape[-1]:][0],
skip_special_tokens=True,
)
self.histories[user_id]['user'].append(text + self.eos)
self.histories[user_id]['bot'].append(next_utterance + self.eos)
return next_utterance
def clear(self, user_id):
self.histories[user_id] = {'user': [], 'bot': []}
def run(self):
while True:
_in = input("user : ")
if _in == "/exit":
print(f"bot : bye.")
break
elif _in == "/clear":
print(f"bot : history cleared.")
self.clear("user_id")
else:
_out = self.predict(user_id="user_id", text=_in)
print(f"bot : {_out}")
| true |
85828d29fc36afd1167485de8bb3b332b8b4ece5 | Python | JoeLittell/dockertest | /app.py | UTF-8 | 1,137 | 2.765625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 1 22:33:57 2020
@author: josep
"""
import pandas as pd
import plotly.express as px
import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
terror = pd.read_csv('./terrorism.csv')
col_options = [dict(label=x, value=x) for x in terror.columns]
dimensions = ["x", "y"]
app = dash.Dash(
__name__, external_stylesheets=["https://codepen.io/chriddyp/pen/bWLwgP.css"]
)
app.layout = html.Div(
[
html.H1("US Terrorism Data from 1970 to 2017"),
html.Div(
[
html.P([d + ":", dcc.Dropdown(id=d, options=col_options)])
for d in dimensions
],
style={"width": "25%", "float": "left"},
),
dcc.Graph(id="graph", style={"width": "75%", "display": "inline-block"}),
]
)
@app.callback(Output("graph", "figure"), [Input(d, "value") for d in dimensions])
def make_figure(x, y):
return px.line(terror,
x=x,
y=y,
height=700)
app.run_server(debug=False) | true |
6c5fd9e56e6325d59ccfcaa464b2e3c99d076c0d | Python | rindhaf/praxis-academy | /novice/01-01/latihan/lists.py | UTF-8 | 122 | 2.84375 | 3 | [] | no_license | rindha = [1, 2, 5, 6, 8]
print (rindha)
print (rindha[-3])
print (rindha[4:])
rindha + [10, 18, 19, 20, 21]
print (rindha) | true |
ded8bd53cda307eca838f6d17050c12f843767a1 | Python | takiyu/gtrans-web-gui | /gtransweb_gui/callable_buffer.py | UTF-8 | 1,328 | 2.6875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from collections import deque
from threading import Timer
import time
from PyQt5 import QtCore
# logging
from logging import getLogger, NullHandler
logger = getLogger(__name__)
logger.addHandler(NullHandler())
class CallableBuffer:
def __init__(self):
self._query = None # Only newest one
self._timer = False
self._buftime = 0.5
def get_buftime(self):
''' Get buffering time (sec) '''
return self._buftime
def set_buftime(self, buftime):
''' Set buffering time (sec) '''
self._buftime = buftime
def __call__(self, callback, *args, **kwargs):
# Overwrite by new query
self._query = (callback, args, kwargs)
# Start new timer
if not self._timer:
self._timer = True
QtCore.QTimer.singleShot(self._buftime * 1000, self._postcall)
# self._timer = Timer(self._buftime, self._postcall)
# self._timer.start()
def _postcall(self):
# Timer is finished
self._timer = False
# Decompose query
callback, args, kwargs = self._query
# Check callback function
if not callable(callback):
logger.error('Callback is not set')
return
# Call
callback(*args, **kwargs)
| true |
d8dcf7a99450a214ad586309da0e4393d23a90ad | Python | hugomaiavieira/trabalhos_uenf | /introducao_comp_graf/curvas/teste.py | UTF-8 | 648 | 3.046875 | 3 | [] | no_license | # -*- coding: UTF-8 -*-
# Autor: Hugo Henriques Maia Vieira
import unittest
from curvas import Curva
from numpy import array, allclose
X=0
Y=1
class TesteCurvas(unittest.TestCase):
def teste_adicionar_ponto(self):
curva = Curva()
ponto = array([2,2])
curva.adicionar_ponto(ponto)
self.assertEqual(len(curva.pontos), 1)
self.assertTrue(allclose(curva.pontos[0], ponto))
def teste_limpar(self):
curva = Curva([array([2, 2]), array([5, 2])])
curva.limpar()
self.assertEqual(curva.pontos, [])
self.assertEqual(curva.pontos_da_curva, [])
if __name__ == "__main__":
unittest.main()
| true |
00c7f0fe86ddaa3e1d854c96053a634d6bb3a60b | Python | sbarenfe/AY190 | /ws4/ws4.py | UTF-8 | 983 | 3.578125 | 4 | [] | no_license | import numpy as np
def find_root(guess,omega,t,e):
Enew=guess
ratio=1.
step=0
#expression I am finding the root of:
f=lambda x:x-omega*t-e*np.sin(x)
#and it's derivative
fprime=lambda x:1-e*np.cos(x)
#implement Newton's method until convergence
while np.abs(ratio) >= 10**(-10):
Eold=Enew
Enew=Eold-f(Eold)/fprime(Eold)
step+=1
ratio=np.abs(Enew-Eold)/Eold
#return the eccentric anomaly, number of steps required, and ratio between the last two steps
return Enew,step,ratio
def one():
#define constants:
T=365.25635 #days
#e=0.0167 #part a)
e=0.99999 #part b)
a=1.0 #AU
b=a*(1-e**2)**(0.5)
omega=2*np.pi/T
t=np.array([91,182,273])
E=np.zeros(len(t))
steps=np.zeros(len(t))
ratio=np.zeros(len(t))
#use the find_root fucntion above to find E for each time
for i in range(len(E)):
E[i],steps[i],ratio[i]=find_root(2,omega,t[i],e)
#calculate x and y
x=a*np.cos(E)
y=b*np.sin(E)
#print the results
print E
print steps
print ratio
print x
print y
| true |
448c5adb03fe1d5e2f405c354de8cb5519fa67ca | Python | Harvard-University-iCommons/lti_emailer | /mailgun/exceptions.py | UTF-8 | 748 | 3 | 3 | [] | no_license | from django.http import HttpResponse
class HttpResponseException(RuntimeError):
'''Encapsulates a django.http.HttpResponse. Intended to be caught at the
top level of a view (or by a view decorator) to allow code within a view
to immediately return a response.'''
def __init__(self, response):
if not isinstance(response, HttpResponse):
raise TypeError(
'HttpResponseException expected an HttpResponse, got'
'a {}'.format(type(response)))
self.response = response
def __unicode__(self):
return 'HttpResponseException<{} {}>: {}'.format(
type(self.response), self.response.status_code,
str(self.response))
| true |
85832fc52828557fcfc5596be205506e43b0e431 | Python | ornlneutronimaging/IPTS_14382 | /image_processing.py | UTF-8 | 1,188 | 3.109375 | 3 | [] | no_license | import numpy as np
from scipy.ndimage import convolve
def gamma_filtering(data_array, threshold=0.1):
final_data_array = []
for _data in data_array:
_data_filtered = single_gamma_filtering(_data)
final_data_array.append(_data_filtered)
return final_data_array
def single_gamma_filtering(data, threshold=0.1):
raw_data = np.copy(data)
# find mean counts
mean_counts = np.mean(raw_data)
thresolded_raw_data = raw_data * threshold
# get pixels where value is above threshold
position = []
[height, width] = np.shape(raw_data)
for _x in np.arange(width):
for _y in np.arange(height):
if thresolded_raw_data[_y, _x] > mean_counts:
position.append([_y, _x])
# convolve entire image using 3x3 kerne
mean_kernel = np.array([[1,1,1], [1,0,1], [1,1,1]]) / 8.0
convolved_data = convolve(raw_data, mean_kernel, mode='constant')
# replace only pixel above threshold by convolved data
for _coordinates in position:
[_y, _x] = _coordinates
raw_data[_y, _x] = convolved_data[_y, _x]
return raw_data | true |
7eb3969443b1eb83233f8da220a028cdd000871c | Python | yashAgarwal41/Awesome-Projects-Collection | /Amazing Python Projects_Scripts/PomodoroTimer/Pomodoro.py | UTF-8 | 1,658 | 3.3125 | 3 | [] | no_license | import time
import datetime as dt
import tkinter
from tkinter import messagebox
import winsound
tnow=dt.datetime.now() #Current time for reference
tpom=25*60 #pomodoro time
tdelta=dt.timedelta(0,tpom) #time delta in mins
tfur=tnow+tdelta #Future time,ending pomodoro
deltasec=5*60 #Break time ,after pomodoro
tfin=tnow+dt.timedelta(0,tpom+deltasec) #Final time (wait/5 min break)
root=tkinter.Tk()
root.withdraw()
messagebox.showinfo("Pomodoro Started !","\n It is now "+tnow.strftime("%H:%M")+"hrs. \n Timer set for 25 mins...")
total=0
breaks=0
while True:
if tnow < tfur:
print("Pomodoro")
elif tfur<=tnow<=tfin:
print("In Break")
if breaks==0:
print("If break")
for i in range(5):
winsound.Beep((i+100),700)
print('Break time !')
breaks=breaks+1
else:
print("Finished")
breaks=0
for i in range(10):
winsound.Beep((i+100),500)
userans=messagebox.askyesno("Pomodoro Finished ,Would you like to start another pomodoro ?")
total=total+1
if userans==True:
tnow=dt.datetime.now()
tfur=tnow+dt.timedelta(0,tpom)
tfin=tnow+dt.timedelta(0,tpom+deltasec)
continue
elif userans==False:
messagebox.showinfo("Pomodoro Finished !","\n You completed "
+str(total)+"pomodoros today !")
break
print("Sleeping")
time.sleep(20)
tnow=dt.datetime.now()
timenow=tnow.strftime("%H:%M")
| true |
855039efa9e1e6f2f938363c5067f8d7b77a2fb3 | Python | 507-w20-instructors/week10-moredb | /count_unique_limit.py | UTF-8 | 695 | 3.578125 | 4 | [] | no_license | '''
Write and test queries to get the following data:
- Return the number of customers in the North America Region
- Return the number of unique Titles held by Customers
- Return only the top 3 most heavily stocked Products (those with the largest UnitsInStock values)
'''
import sqlite3
conn = sqlite3.connect('Northwind_small.sqlite')
cur = conn.cursor()
q1 = '''
SELECT COUNT(*)
FROM Customer
WHERE Region="North America"
'''
q2 = '''
SELECT COUNT(DISTINCT ContactTitle)
FROM Customer
'''
q3 = '''
SELECT ProductName
FROM Product
ORDER BY UnitsInStock DESC
LIMIT 3
'''
for q in [q1, q2, q3]:
cur.execute(q)
for row in cur:
print(row)
print('-' * 60)
conn.close() | true |
a9a2e98454353ffbadea5c6d0e368d1ffb6c8b37 | Python | rubenvds/SecurLogin | /show_database.py | UTF-8 | 157 | 2.71875 | 3 | [] | no_license | import sqlite3
conn = sqlite3.connect('test.db')
cursor = conn.execute("SELECT * from `users`")
for row in cursor:
print(row)
conn.commit()
conn.close() | true |
e64095ba2ec85705540f3ab2f68041a5d08a0e47 | Python | odys-z/hello | /hellopy/nlp/try-worldfreq.py | UTF-8 | 1,299 | 3.3125 | 3 | [
"MIT"
] | permissive | '''
https://pypi.org/project/wordfreq/#description
'''
from wordfreq import zipf_frequency, get_frequency_dict
'''
f = zipf_frequency('frequency', 'en')
print('word: "{0}"\tfrequency: {1}'.format('frequency', f))
'''
d = get_frequency_dict('en', wordlist='best')
'''
f = open('e-5 4e-6_.word', 'w')
# frequency > 0.0001, words = 1068
cnt = 0;
for w in d:
if d[w] > 0.000004 and d[w] < 0.00001:
cnt += 1
f.write('{0} {1}\n'.format(w, d[w]))
f.close()
print("Writen {} lines into freq.word.".format(cnt))
'''
def get_wordfreq(frange):
fname, whigh, wlow = frange
f = open('{}.freq.word'.format(fname), 'w')
cnt = 0;
for w in d:
if wlow <= d[w] and d[w] < whigh:
cnt += 1
f.write('{0} {1}\n'.format(w, d[w]))
f.close()
print("{} : {}".format(fname, cnt))
'''
1_e-4 : 1087
e-4_e-5 : 5996
e-5_4e-6 : 5583
4e-6_2e-6 : 6402
2e-6_e-6 : 9118
e-6_e-7 : 63566
e-7_0 : 210249
'''
grades = [
('1_e-4', 1.0, 0.0001),
('e-4_e-5', 0.0001, 0.00001),
('e-5_4e-6', 0.00001, 0.000004),
('4e-6_2e-6',0.000004, 0.000002),
('2e-6_e-6', 0.000002, 0.000001),
('e-6_e-7', 0.000001, 0.0000001),
('e-7_0', 0.0000001, 0.0)
]
for fg in grades:
get_wordfreq(fg)
| true |
a7bdcecaf2629bafde5b1eaaf84a9dbc04057ea7 | Python | klaus2015/py_base | /็ฌ่ซ1905/day08/ๆฐๆฟ้จ.py | UTF-8 | 3,759 | 2.8125 | 3 | [] | no_license | from selenium import webdriver
import pymysql
class GovSpider:
def __init__(self):
self.one_url = 'http://www.mca.gov.cn/article/sj/xzqh/2019/'
# ๆ ็้ขๆจกๅผ
options = webdriver.ChromeOptions()
options.add_argument('--headless')
self.browser = webdriver.Chrome(options=options)
self.db = pymysql.connect('127.0.0.1','root','123456',database='govdb',port=3306,charset='utf8')
self.cursor = self.db.cursor()
self.province = []
self.city = []
self.county = []
def get_html(self):
self.browser.get(self.one_url)
# ๆพ็ฌฌไธไธชๅ
ๅซไปฃ็ ็td็่็น
td = self.browser.find_element_by_xpath('//td[@class="arlisttd"]/a[contains(@title,"ไปฃ็ ")]')
print(td)
if td:
# get_attribute('href') ่ทๅๅฐ็ๆถๅฎๆด็url๏ผไธ้่ฆๆผๆฅ๏ผๆณจๆ
# http://www.mca.gov.cn/article/sj/xzqh/2019/201908/20190800019143.shtml
# href="/article/sj/xzqh/2019/201908/20190800019144.shtml" ็ฝ้กตไธญๆฐๆฎ
two_url = td.get_attribute('href')
print(two_url)
sql = 'select * from version where link=%s'
# result ๆฏๅๅฝฑๅ็ๆกๆฐ
result = self.cursor.execute(sql,[two_url])
if result:
print('็ฝ็ซๆชๆดๆฐ๏ผๆ ้ๆๅ')
else:
td.click()
self.get_code()
# ๆhrefๆพๅ
ฅversionไธญ
dele = 'delete from version'
sql = 'insert into version values(%s)'
self.cursor.execute(dele)
self.cursor.execute(sql,[two_url])
self.db.commit()
def get_code(self):
# ๅๆขๅฅๆ
all_handles = self.browser.window_handles
self.browser.switch_to_window(all_handles[1])
# ๆๆฐๆฎ
tr_list = self.browser.find_elements_by_xpath('//tr[@height="19"]')
for tr in tr_list:
code = tr.find_element_by_xpath('./td[2]').text.strip()
name = tr.find_element_by_xpath('./td[3]').text.strip()
print(name,code)
if code[-4:] == '0000':
self.province.append([name,code])
if name in ['ๅไบฌๅธ',['ๅคฉๆดฅๅธ'],['ไธๆตทๅธ'],['้ๅบๅธ']]:
self.city.append([name,code,code])
elif code[-2:] == '00':
self.city.append([name,code,code[:2]+'0000'])
else:
if code[:2] in ['11', '12', '31', '50']:
self.county.append([name, code, code[:2] + '0000'])
else:
self.county.append([name,code,code[:4]+'00'])
self.insert_mysql()
def insert_mysql(self):
# ๅ
ๆธ
็ฉบ่กจ
del_province = 'delete from province'
del_city = 'delete from city'
del_county = 'delete from county'
self.cursor.execute(del_province)
self.cursor.execute(del_city)
self.cursor.execute(del_county)
# ๅๆๅ
ฅๆฐๆฎ
ins_province = 'insert into province values(%s,%s)'
ins_city = 'insert into city values(%s,%s,%s)'
ins_county = 'insert into county values(%s,%s,%s)'
self.cursor.executemany(ins_province, self.province)
self.cursor.executemany(ins_city, self.city)
self.cursor.executemany(ins_county, self.county)
self.db.commit()
print('ๆฐๆฎๆๅๅฎๆ,ๆๅๅญๅ
ฅๆฐๆฎๅบ')
def main(self):
self.get_html()
# ๆๆๆฐๆฎๅค็ๅฎๆๅๆญๅผ่ฟๆฅ
self.cursor.close()
self.db.close()
# ๅ
ณ้ญๆต่งๅจ
self.browser.quit()
if __name__ == "__main__":
g = GovSpider()
g.main() | true |
1939787cfe53bdc5a498bf117232a265d17ebd52 | Python | guiwitz/microfilm | /microfilm/tests/test_colorify.py | UTF-8 | 5,622 | 2.625 | 3 | [
"BSD-3-Clause"
] | permissive | import matplotlib
import numpy as np
from microfilm import colorify
image = 100*np.ones((3,3), dtype=np.uint8)
image[0,0] = 200
image2 = 100*np.ones((3,3), dtype=np.uint8)
image2[0,0] = 180
image3 = np.zeros((2,2), dtype=np.int16)
image3[0,0] = -10
image3[0,1] = 280
image3[1,0] = 10
image3[1,1] = 100
rgb1 = np.zeros((3,3,3), dtype=np.float16)
rgb1[0,0,0] = 0.1
rgb1[0,1,0] = 0.3
rgb1[0,2,0] = 0.3
rgb2 = np.zeros((3,3,3), dtype=np.float16)
rgb2[0,0,0] = 0.2
rgb2[0,1,0] = 0.8
rgb2[0,2,1] = 0.8
def test_cmaps_def():
red_map = colorify.cmaps_def('pure_red', num_colors=300, flip_map=True)
assert isinstance(red_map, matplotlib.colors.ListedColormap), "No colormap returned"
np.testing.assert_array_equal(red_map.colors[0], np.array([1,0,0]))
np.testing.assert_array_equal(red_map.colors[-1], np.array([0,0,0]))
assert len(red_map.colors) == 300, "Wrong number of colors"
red_map = colorify.cmaps_def('pure_red', num_colors=300, flip_map=False)
assert isinstance(red_map, matplotlib.colors.ListedColormap), "No colormap returned"
np.testing.assert_array_equal(red_map.colors[0], np.array([0,0,0]))
np.testing.assert_array_equal(red_map.colors[-1], np.array([1,0,0]))
def test_color_translate():
assert colorify.color_translate('pure_red') == 'red', "Wrong color name"
assert colorify.color_translate('non_existing') == 'black', "Black not returned for non-existing cmap"
def test_random_cmap():
ran_cmap = colorify.random_cmap(alpha=0.3, num_colors=300)
assert ran_cmap.colors[0,-1] == 0
assert ran_cmap.colors[1, -1] == 0.3
assert len(ran_cmap.colors) == 300
def test_colorify_by_name():
im1, cmap, min_max = colorify.colorify_by_name(image, 'pure_red')
np.testing.assert_array_equal(im1[0][0], np.array([1,0,0,1]))
im1, cmap, min_max = colorify.colorify_by_name(image, 'pure_red', rescale_type='dtype')
np.testing.assert_array_equal(im1[0][0], np.array([200/255,0,0,1]))
np.testing.assert_array_equal(im1[0][1], np.array([100/255,0,0,1]))
im1, cmap, min_max = colorify.colorify_by_name(image, 'pure_red', rescale_type='limits', limits=[100,200])
np.testing.assert_array_equal(im1[0][0], np.array([1,0,0,1]))
np.testing.assert_array_equal(im1[0][1], np.array([0,0,0,1]))
def test_colorify_by_hex():
im1, cmap, min_max = colorify.colorify_by_hex(image, cmap_hex='#D53CE7')
np.testing.assert_array_equal(im1[0][0,0:3], np.array([213, 60, 231])/255, "Not correct color returned for #D53CE7")
def test_rescale_image():
im_resc, min_max = colorify.rescale_image(image)
np.testing.assert_array_equal(im_resc[0], np.array([1,0,0]), "Bad rescaling for uint8 default")
im_resc, min_max = colorify.rescale_image(image, rescale_type='dtype')
np.testing.assert_array_equal(im_resc[0], np.array([200/255,100/255,100/255]), "Bad rescaling for uint8 dtype")
im_resc, min_max = colorify.rescale_image(image, limits=[110, 230], rescale_type='limits')
np.testing.assert_array_equal(im_resc[0], np.array([(200-110)/(230-110),0,0]), "Bad rescaling for uint8 limits")
out, min_max = colorify.rescale_image(image3, rescale_type='min_max')
check = (image3+10)/290
np.testing.assert_array_equal(out,check, "Bad rescaling for int16 min_max")
im_bool = image3 > 60
out, min_max = colorify.rescale_image(im_bool, rescale_type='min_max')
check = np.zeros((2,2), dtype=np.float64)
check[0,1] = 1
check[1,1] = 1
np.testing.assert_array_equal(out,check, "Bad rescaling for boolean")
def test_check_rescale_type():
assert colorify.check_rescale_type(rescale_type='limits', limits=None) == 'min_max', "Wrong rescaling"
assert colorify.check_rescale_type(rescale_type='limits', limits=[0,1]) == 'limits', "Wrong rescaling"
assert colorify.check_rescale_type(rescale_type='min_max', limits=[0,1]) == 'min_max', "Wrong rescaling"
def test_combine_image():
combined = colorify.combine_image([rgb1, rgb2], proj_type='max')
np.testing.assert_almost_equal(combined[0], np.array([[0.2, 0, 0], [0.8, 0, 0], [0.3, 0.8, 0]]),decimal=3)
combined = colorify.combine_image([rgb1, rgb2], proj_type='sum')
np.testing.assert_almost_equal(combined[0], np.array([[0.3, 0, 0], [1, 0, 0], [0.3, 0.8, 0]]), decimal=3)
def test_multichannel_to_rgb():
multic, _, _, _ = colorify.multichannel_to_rgb(images=[image, image2], cmaps=['pure_blue', 'pure_red'],
rescale_type='limits', limits=[130, 190], num_colors=1000)
assert multic.ndim == 3, "Wrong dimensions, not RGB image"
np.testing.assert_almost_equal(multic[:,:,0][0], np.array([(180-130)/(190-130), 0, 0]), decimal=3)
np.testing.assert_almost_equal(multic[:,:,2][0], np.array([1, 0, 0]), decimal=3)
def test_check_input():
im = colorify.check_input(images = np.ones((3, 20,20)))
shapes = np.testing.assert_array_equal(np.array([x.shape for x in im]), 20*np.ones((3,2)),
"3D numpy array not converted properly")
im = colorify.check_input(images = [np.ones((20,20)), np.ones((20,20))])
shapes = np.testing.assert_array_equal(np.array([x.shape for x in im]), 20*np.ones((2,2)),
"list of 2D arrays not converted properly")
im = colorify.check_input(images = np.ones((20,20)))
shapes = np.testing.assert_array_equal(np.array([x.shape for x in im]), 20*np.ones((1,2)),
"single 2d array not converted properly")
| true |
08205190f84ef542bd7c4a36fe1ffee2757e46ff | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2717/60723/246789.py | UTF-8 | 863 | 3.171875 | 3 | [] | no_license | def input_manage(temp):
temp=temp[2:len(temp)-2]
array=temp.split('","')
table=[['' for _ in range(3)] for _ in range(len(array))]
for i in range(len(array)):
table[i][0]=array[i][0]
table[i][1]=array[i][1:3]
table[i][2]=array[i][3]
return table
temp=input()
array=input_manage(temp)
list=[]
flag=True
for i in range(26):
list.append([chr(97+i)])
for item in array:
if item[1]=='==':
number1=ord(item[0])-97
number2=ord(item[2])-97
list[number1].append(item[2])
list[number2].append(item[0])
for item in array:
if item[1] != '==':
number1=ord(item[0])-97
if list[number1].count(item[2])!=0:
flag=False
break
if flag:
if temp=='["a==b","b!=c","c==a"]':
print("false")
else:
print("true")
else:
print("false") | true |
e7e561b3755442c57665a3677f09ef1828db3ea9 | Python | sency90/allCode | /coci/2015:2016/contest4_solutions/yoda.py | UTF-8 | 395 | 3.375 | 3 | [] | no_license | a = input()
b = input()
a = str(a)
b = str(b)
while len(a) < len(b): a = "0" + a
while len(b) < len(a): b = "0" + b
na = []
nb = []
for i in range(len(a)):
na.append(a[i])
nb.append(b[i])
if a[i] < b[i]: na.pop()
if b[i] < a[i]: nb.pop()
if len(na) == 0: print "YODA"
else: print int("".join(na))
if len(nb) == 0: print "YODA"
else: print int("".join(nb))
| true |
5b0a85470786edc7aaa8804ab62ff258d973a6a8 | Python | WILDCHAP/python_study_std | /python_project_01/ไนไนไนๆณ่กจ.py | UTF-8 | 342 | 3.546875 | 4 | [] | no_license | def f1():
'''
ๆๅฐไนไนไนๆณ่กจ
'''
for i in range(1, 10): # i: 1-9
for j in range(1, i + 1): # j: 1-i+1
print("%d * %d = %d " % (j, i, i * j), end='\t')
print("\n")
f1()
def f2(a, b):
temp = a
a = b
b = temp
print(a, b)
a = 5
b = 6
f2(a, b)
print(a, b)
name = "hahaha"
| true |
281a2b9d2e92ca3a18506256da0e8ac6b6b218c7 | Python | Charles-Wu-Chen/python | /web_scraping/chap2_parsing.py | UTF-8 | 1,134 | 2.984375 | 3 | [] | no_license | __author__ = 'charlesw'
from urllib.request import urlopen
from bs4 import BeautifulSoup
from bs4 import re
html = urlopen("http://www.pythonscraping.com/pages/warandpeace.html")
bsObj = BeautifulSoup(html, "html.parser")
nameList = bsObj.findAll("span", {"class":"green"})
#print(bsObj.findAll(id="text")) # this skip the name input, just checking on attr input
for name in nameList:
print(name.get_text())
html = urlopen("http://www.pythonscraping.com/pages/page3.html")
bsObj = BeautifulSoup(html)
for child in bsObj.find("table",{"id":"giftList"}).children:
print(child)
print(bsObj.find("img",{"src":"../img/gifts/img1.jpg"}).parent.previous_sibling.get_text())
for i in bsObj.find("table",{"id":"giftList"}).findAll("img"):
print (i.parent.previous_sibling.previous_sibling.previous_sibling.get_text() +' -- '+i.parent.previous_sibling.get_text())
images = bsObj.findAll("img", {"src":re.compile("\.\.\/img\/gifts/img.*\.jpg")})
for image in images:
print(image["src"])
print (image.parent.previous_sibling.previous_sibling.previous_sibling.get_text() +' -- '+image.parent.previous_sibling.get_text()) | true |
ea8f2c7d5933c330ea38754d5a3bd94c836e1226 | Python | AnisTigrini/Python-Snake-Game | /snake.py | UTF-8 | 7,542 | 3.828125 | 4 | [] | no_license | # 1) Making the necessary imports.
import random
import pygame
# 2) Gameplay object creation.
class Gameplay():
# 3) Constructor for snake and board.
def __init__(self):
# 3.1) Defining a board of 20 per 20.
self.board = []
for x in range(20):
myArray = []
for y in range(20):
myArray.append(0)
self.board.append(myArray)
# 3.2) Some state variables for our snake.
self.food = ()
self.snakeBody = [(8,8)]
self.direction = ""
self.alive = True
# 4) This function will handle input from the user.
def controls(self):
# 4.1) We need to make sure that the snake can not go back on itself. So if the snake is going in a direction,
# his only options are to turn right or left based on the direction.
if self.direction == "L" or self.direction == "R":
if pygame.key.get_pressed()[pygame.K_UP]:
self.direction = "U"
elif pygame.key.get_pressed()[pygame.K_DOWN]:
self.direction = "D"
elif self.direction == "U" or self.direction == "D":
if pygame.key.get_pressed()[pygame.K_LEFT]:
self.direction = "L"
elif pygame.key.get_pressed()[pygame.K_RIGHT]:
self.direction = "R"
# 4.2) At the begining of the game, the user will press the right key to get started.
else:
if pygame.key.get_pressed()[pygame.K_RIGHT]:
self.direction = "R"
# 5) This function will handle our snake.
def snake(self):
# 6) This function will generate food for our snake.
def generateFood():
# 6.1) The code will be executed only if there is no food available for our snake.
# We will call two loops that will go through the board and see if the board square is empty.
# We will add the square position to the emptyBoardSquares.
if len(self.food) == 0:
emptyBoardSquares = []
for x in range(20):
for y in range(20):
if self.board[x][y] == 0:
emptyBoardSquares.append((x,y))
# 6.2) Randomly pick a square to generate food on.
self.food = emptyBoardSquares[random.randint(0, len(emptyBoardSquares) - 1)]
# 7) This function will handle the snake's body
def updateSnakeBody():
# 7.1) This fitst part of code is to update the snake's body based on the direction he takes.
# To be more precise, we only need to update the head's position and make the rest follow.
A = 0
B = 0
if self.direction == "U":
A = 0
B = -1
elif self.direction == "D":
A = 0
B = 1
if self.direction == "R":
A = 1
B = 0
elif self.direction == "L":
A = -1
B = 0
# 7.2) This second part of code is to make sure we end the game if the head's position is beyond the
# boundries we set upรฉ
X, Y = self.snakeBody[0]
if X + A < 0 or X + A > 19 or Y + B < 0 or Y + B > 19:
self.alive = False
# 7.3) We execute this part of the code if the snake is in our boundries
else:
# 7.4) We are going to make a loop that goes through the snake's body and updates it based
# on the direction he takes.
copiedSnakeBody = self.snakeBody
snakeLength = len(self.snakeBody)
for bodyPartNumber in range(snakeLength):
bodyPartNumber = -bodyPartNumber -1
# 7.5) If the bodypart is not the head, we start by the end of the list. After each turn,
# a body part's position will be equal to the body part that comes before it.
if bodyPartNumber != -snakeLength:
self.snakeBody[bodyPartNumber] = copiedSnakeBody[bodyPartNumber - 1]
# 7.6) The head will be updated based on the direction the user gives as input.
else:
X += A
Y += B
self.snakeBody[0] = (X,Y)
# 7.7) Make sure that we end the game if the head is in colision with a body part.
if self.snakeBody[0] in self.snakeBody[1:]:
self.alive = False
# 7.8) If the head is currently at the position where the food is, we are going to duplicate the
# last point of the snake's body and erase the food.
if (X, Y) == self.food:
self.snakeBody.append(self.snakeBody[-1])
self.food = ()
# 8) This is the function than handles the board
def updateBoard():
# 8.1) We are going to recreate an empty board.
emptyBoard = []
for x in range(20):
myArray = []
for y in range(20):
myArray.append(0)
emptyBoard.append(myArray)
# 8.2) Adding the food to the board.
emptyBoard[self.food[0]][self.food[1]] = "f"
# 8.3) Adding the snake to the board and updating the board parameter of our object.
for snakeBodyPart in self.snakeBody:
emptyBoard[snakeBodyPart[0]][snakeBodyPart[1]] = "s"
self.board = emptyBoard
# 9) Calling all our functions in logical order.
updateSnakeBody()
generateFood()
updateBoard()
# 10) This function will update our screen with the updated snake's body.
# We have a different color for empty board cases, snake cases and food cases.
def displayScreen(self):
for x in range(20):
for y in range(20):
if self.board[x][y] == 0:
pygame.draw.rect(screen, (255, 143, 148), [x*20, y*20, 19,19])
elif self.board[x][y] == "s":
pygame.draw.rect(screen, (12, 148, 0), [x*20, y*20, 19,19])
elif self.board[x][y] == "f":
pygame.draw.rect(screen, (74, 101, 255), [x*20, y*20, 19,19])
# 11) Initializing the game and screen and controling the frame rate.
pygame.init()
screen = pygame.display.set_mode((400,400))
clock = pygame.time.Clock()
# 12) Instantiating the gameplay object.
game = Gameplay()
print("Start the game by pressing the right key. May god be with you!")
# 13) Setting up the main loop.
while game.alive:
# 14) Closing the game if the user clicks "X".
for event in pygame.event.get():
if event.type == pygame.QUIT:
game.alive = False
# 15) Calling all the functions we defined above.
game.controls()
game.snake()
game.displayScreen()
pygame.display.update()
clock.tick(10)
# 16) Displaying the score (length of snake) and quitting the game proprely.
print("You are now absolutely dead, your score is : {}".format(len(game.snakeBody)))
pygame.quit() | true |
369a7919e3b7355de503b186bc439578d8c95dc0 | Python | tekulvw/Squid-Plugins | /tickets/tickets.py | UTF-8 | 6,513 | 2.6875 | 3 | [
"MIT"
] | permissive | from discord.ext import commands
from cogs.utils.dataIO import fileIO
from cogs.utils import checks
from __main__ import send_cmd_help
import os
class Tickets:
def __init__(self, bot):
self.bot = bot
self.tickets = fileIO("data/tickets/tickets.json", "load")
self.settings = fileIO("data/tickets/settings.json", "load")
@property
def ticket_limit(self):
num = self.settings.get("TICKETS_PER_USER", -1)
if num == -1:
self.ticket_limit = 0
num = 0
return num
@ticket_limit.setter
def ticket_limit(self, num):
self.settings["TICKETS_PER_USER"] = num
fileIO("data/tickets/settings.json", "save", self.settings)
@property
def keep_on_read(self):
ret = self.settings.get("KEEP_ON_READ")
if ret is None:
self.keep_on_read = False
ret = False
return ret
@keep_on_read.setter
def keep_on_read(self, value):
self.settings["KEEP_ON_READ"] = bool(value)
fileIO("data/tickets/settings.json", "save", self.settings)
@property
def reply_to_user(self):
ret = self.settings.get("REPLY_TO_USER")
if ret is None:
ret = False
self.reply_to_user = ret
return ret
@reply_to_user.setter
def reply_to_user(self, val):
self.settings["REPLY_TO_USER"] = val
fileIO("data/tickets/settings.json", "save", self.settings)
def _get_ticket(self):
if len(self.tickets) > 0:
ticket = self.tickets[0]
for idnum in ticket:
ret = ticket[idnum].get(
"name", "no_name") + ": " + \
ticket[idnum].get("message", "no_message")
if not self.keep_on_read:
self.tickets = self.tickets[1:]
fileIO("data/tickets/tickets.json", "save", self.tickets)
return ret
else:
return "No more tickets!"
def _get_number_tickets(self, author):
idnum = author.id
num = len([x for ticket in self.tickets for x in ticket if x == idnum])
return num
def _add_ticket(self, author, message):
self.tickets.append(
{author.id: {"name": author.name, "message": message}})
fileIO("data/tickets/tickets.json", "save", self.tickets)
@commands.command(aliases=["nt"], pass_context=True)
@checks.mod_or_permissions(manage_messages=True)
async def nextticket(self, ctx):
"""Gets the next ticket"""
if self.reply_to_user:
reply = ctx.message.author
else:
reply = ctx.message.channel
await self.bot.send_message(reply, self._get_ticket())
@commands.command(pass_context=True)
async def ticket(self, ctx, *, message):
"""Adds ticket.
Example: [p]ticket The quick brown fox? -> adds ticket"""
if self.ticket_limit != 0 and \
self._get_number_tickets(ctx.message.author) >= \
self.ticket_limit:
await self.bot.say("{}, you've reached the ticket limit!".format(
ctx.message.author.mention))
return
self._add_ticket(ctx.message.author, message)
await self.bot.say("{}, ticket added.".format(
ctx.message.author.mention))
@commands.command(aliases=['ct'])
@checks.mod_or_permissions(manage_messages=True)
async def cleartickets(self):
"""Clears all tickets"""
self.tickets = []
fileIO("data/tickets/tickets.json", "save", self.tickets)
await self.bot.say("Tickets cleared.")
@commands.command(aliases=["dt"], pass_context=True)
@checks.mod_or_permissions(manage_messages=True)
async def deleteticket(self, ctx, num: int=1):
"""Deletes any number of tickets, default = 1"""
if num < 0:
await send_cmd_help(ctx)
return
if num > len(self.tickets):
num = len(self.tickets)
self.tickets = []
else:
self.tickets = self.tickets[num:]
fileIO("data/tickets/tickets.json", "save", self.tickets)
await self.bot.say("{} tickets deleted.\n{} tickets remaining.".format(
num, len(self.tickets)))
@commands.group(pass_context=True)
@checks.admin_or_permissions(manage_server=True)
async def ticketset(self, ctx):
"""Ticket cog settings"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
msg = "```"
for k, v in self.settings.items():
msg += str(k) + ": " + str(v) + "\n"
msg += "```"
await self.bot.say(msg)
@ticketset.command(name="limit", pass_context=True)
async def tickets_per_user(self, ctx, num: int):
"""Limits the number of tickets a user can have 0 = infinite."""
if num < 0:
await send_cmd_help(ctx)
return
self.settings["TICKETS_PER_USER"] = num
fileIO("data/tickets/settings.json", "save", self.settings)
await self.bot.say("Tickets per user set to {}".format(num))
@ticketset.command(name="keep", pass_context=True)
async def _keep_on_read(self, ctx, val: bool):
"""Determines whether the ticket is kept after it has been read.
- True/False"""
self.keep_on_read = val
await self.bot.say("Keep on read set to {}".format(val))
@ticketset.command(name="pm")
async def reply_to(self, boolvar: bool):
"""Determines whether !nextticket replies in a pm or not
- True/False"""
if boolvar:
self.reply_to_user = True
else:
self.reply_to_user = False
await self.bot.say("PM set to {}".format(boolvar))
def check_folder():
if not os.path.exists("data/tickets"):
print("Creating data/tickets folder...")
os.makedirs("data/tickets")
def check_file():
tickets = []
settings = {"TICKETS_PER_USER": 1,
"REPLY_TO_USER": False, "KEEP_ON_READ": False}
f = "data/tickets/tickets.json"
if not fileIO(f, "check"):
print("Creating default tickets's tickets.json...")
fileIO(f, "save", tickets)
f = "data/tickets/settings.json"
if not fileIO(f, "check"):
print("Creating default tickets's settings.json...")
fileIO(f, "save", settings)
def setup(bot):
check_folder()
check_file()
n = Tickets(bot)
bot.add_cog(n)
| true |
6dbdcfa323a7b702c5cc26d3f62c06104567ffab | Python | hussainmehdi63i/Game-Programing | /Class 1/Problem 4.py | UTF-8 | 66 | 2.828125 | 3 | [] | no_license | name= "Mehdi Hussain"
print("Mehdi hussain is %s %s" % (name),(Surname))
| true |
021dc4f05a9e5bc06b8ed61581ad85a1f93263ab | Python | hammer/confluence-observer | /scripts/confluence_observer.py | UTF-8 | 3,377 | 2.515625 | 3 | [] | no_license | #! /usr/bin/env python3
import argparse
import logging
import os
from lxml import html
import requests
LOGIN_URL = "https://hammerlab.atlassian.net/login"
STATUS_REPORTS_URL = "https://hammerlab.atlassian.net/wiki/display/MSL/Status+Reports"
CONFLUENCE_USERNAME = os.environ.get("CONFLUENCE_USERNAME")
CONFLUENCE_PASSWORD = os.environ.get("CONFLUENCE_PASSWORD")
# XPath for parsing the status report wiki page
XPATH_WEEKS = "//h1[string-length(text())=8]/text()"
# NB: for some reason <thead> is folded into <tbody>
XPATH_WEEK_HEADER = "//h1[text()='{week}']/following-sibling::div[@class='table-wrap'][1]/table/tbody/tr/th"
XPATH_WEEK_ROWS = "//h1[text()='{week}']/following-sibling::div[@class='table-wrap'][1]/table/tbody/tr/td[1]/.."
# XPath for parsing an individual status report
XPATH_NAME = "descendant::a/text()"
PROJECTS = [
"Management Overhead",
"Demeter",
"Genomics Data Management",
"Cancer",
"HAI",
"IBD",
"Alzheimer's", # Needs double quotes!
"Other",
]
PROJECTS_CONDITIONAL = " or ".join(['text()="{0}"'.format(project) for project in PROJECTS])
XPATH_PROJECTS = "descendant::*[{conditional}]/text()".format(conditional=PROJECTS_CONDITIONAL)
XPATH_SNIPPETS = 'descendant::*[text()="{project}"]/following::ul[1]/li/text()'
def get_status_reports_el(username, password):
login_data = dict(username=username, password=password)
session = requests.session()
session.post(LOGIN_URL, data=login_data)
req = session.get(STATUS_REPORTS_URL)
return html.fromstring(req.text)
def get_per_project_snippets(column_el):
per_project_snippets = {}
projects = [project.strip() for project in column_el.xpath(XPATH_PROJECTS)]
for project in projects:
logging.info("Parsing snippets from project {0}".format(project))
per_project_snippets[project] = column_el.xpath(XPATH_SNIPPETS.format(project=project))
return per_project_snippets
def get_sr_info_from_status_reports_el(status_reports_el):
sr_info = {}
for week in status_reports_el.xpath(XPATH_WEEKS):
sr_info[week] = []
header = [th.text for th in status_reports_el.xpath(XPATH_WEEK_HEADER.format(week=week))]
sr_info[week].append(header)
row_els = status_reports_el.xpath(XPATH_WEEK_ROWS.format(week=week))
for row_el in row_els:
row = []
column_els = row_el.xpath("td")
# Handle Name column
name = column_els[0].xpath(XPATH_NAME)[0]
row.append(name)
logging.info("Parsing snippets from week {week} for {name}".format(week=week, name=name))
# Handle Last Week, This Week
for i, column_el in enumerate(column_els[1:3], 1):
row.append(get_per_project_snippets(column_el))
# TODO(hammer): handle Roadblocks
sr_info[week].append(row)
return sr_info
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Pull status reports off of Confluence.")
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
if args.verbose: logging.basicConfig(level=logging.DEBUG)
status_reports_el = get_status_reports_el(CONFLUENCE_USERNAME, CONFLUENCE_PASSWORD)
sr_info = get_sr_info_from_status_reports_el(status_reports_el)
logging.info("Final results: {0}".format(sr_info))
| true |
baf025bae20a1c5b61b990d9aa6f5250c40f12ac | Python | VlaKriVia/GeekBrain_HomeWork | /6.py | UTF-8 | 543 | 3.8125 | 4 | [] | no_license | starting_num = int(input("ะะฒะตะดะธัะต ะฝะฐัะฐะปัะฝะพะต ะบะพะปะธัะตััะฒะพ ะบะธะปะพะผะตััะพะฒ: "))
final_num = int(input("ะะฒะตะดะธัะต ะบะพะฝะตัะฝะพะต ะบะพะปะธัะตััะฒะพ ะบะธะปะพะผะตััะพะฒ: "))
count = 1
print(f"{count}-ะน ะดะตะฝั: {starting_num:.2f}")
while(starting_num <= final_num):
count += 1
starting_num *= 1.1
print(f"{count}-ะน ะดะตะฝั: {starting_num:.2f}")
print(f"ะัะฒะตั: ะฝะฐ {count}-ะน ะดะตะฝั ัะฟะพัััะผะตะฝ ะดะพััะธะณ ัะตะทัะปััะฐัะฐ โ ะฝะต ะผะตะฝะตะต {final_num} ะบะผ.") | true |
470b40a1c223152c5d2e8fcf1514fc58769b7185 | Python | ld-amaya/bluecollar | /album.py | UTF-8 | 2,918 | 2.546875 | 3 | [] | no_license | import os
import uuid
import boto3
from models import db, Album
from flask import g
from PIL import Image, UnidentifiedImageError
from decouple import config
BUCKET = config('AWS_BUCKET')
BUCKET_URL = config('AWS_OBJECT_URL')
ACCESS_KEY = config('AWS_ACCESS_KEY')
SECRET_KEY = config('AWS_API_SECRET')
s3 = boto3.client(
's3',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY
)
class MyAlbum():
def __init__(self, path, ext):
"""Instantiate Image Class"""
self.path = path
self.ext = ext
def validate_profile(self, images):
"""Handles changing of profile image of user"""
try:
# Remove current profile image stored
if "default-icon.png" not in g.user.profile:
s3key = g.user.profile("/")
try:
s3.delete_object(Bucket=BUCKET, Key=s3key[3])
except FileNotFoundError as error:
print("No image found!")
# Resize profile image using pillow
try:
image = Image.open(images)
except UnidentifiedImageError as error:
image = Image.open(images.filename)
image.thumbnail((400, 400))
filename = str(uuid.uuid4().hex) + '.png'
# Save temp image to local folder
s3file = os.path.join(self.path + filename)
image.save(s3file)
# upload file to amazon s3
s3.upload_file(
Bucket=BUCKET,
Filename=s3file,
Key=filename
)
# Update database
return BUCKET_URL + filename
except:
return BUCKET_URL + "default-icon.png"
def validate_album(self, images):
"""Handles image upload for bluecollar album"""
for image in images:
# Resize image
img = Image.open(image)
img.thumbnail((1200, 1200))
file_ext = os.path.splitext(image.filename)[1]
filename = str(uuid.uuid4().hex) + file_ext
# Save temp image to local folder
s3file = os.path.join(self.path + filename)
img.save(s3file)
# Upload image to amazon s3
s3.upload_file(
Bucket=BUCKET,
Filename=s3file,
Key=filename
)
# add image link to database
add_image = Album(
filename=BUCKET_URL + filename,
user_id=g.user.id
)
db.session.add(add_image)
db.session.commit()
return True
def delete_image(filename):
"""Handles image deletion from Amazon S3"""
try:
s3key = filename.split('/')
s3.delete_object(Bucket=BUCKET, Key=s3key[3])
return True
except:
return False
| true |
ed991a916f239998c50727a14120e0bf291f26a8 | Python | oReshetitskij/RTOS | /lab3/active_records/entities.py | UTF-8 | 14,505 | 2.875 | 3 | [] | no_license | from abc import ABC, abstractmethod
class ActiveRecordBase(ABC):
@staticmethod
@abstractmethod
def set_provider(provider):
"""
set db provider
"""
@abstractmethod
def load_one(self, where):
"""
load first record from db
and assign to self
"""
@staticmethod
@abstractmethod
def load(where):
"""
load multiple records
and return list of entities
"""
@abstractmethod
def save(self):
"""
create in db if not exists,
otherwise update
"""
@abstractmethod
def delete(self):
"""
delete from db
"""
class Client(ActiveRecordBase):
__provider = None
def __init__(self):
if Client.__provider is None:
raise Exception("db provider is not set")
self.__id = None
self.__name = None
self.__passport = None
self.__check_in_date = None
self.__check_out_date = None
self.__room_id = None
self.__service1 = None
self.__service2 = None
self.__service3 = None
self.__price = None
self.__employee_id = None
# region props
@property
def id(self):
return self.__id
@id.setter
def id(self, value):
self.__id = value
@property
def name(self):
return self.__name
@name.setter
def name(self, value):
self.__name = value
@property
def passport(self):
return self.__passport
@passport.setter
def passport(self, value):
self.__passport = value
@property
def check_in_date(self):
return self.__check_in_date
@check_in_date.setter
def check_in_date(self, value):
self.__check_in_date = value
@property
def check_out_date(self):
return self.__check_out_date
@check_out_date.setter
def check_out_date(self, value):
self.__check_out_date = value
@property
def room_id(self):
return self.__room_id
@room_id.setter
def room_id(self, value):
self.__room_id = value
@property
def service1(self):
return self.__service1
@service1.setter
def service1(self, value):
self.__service1 = value
@property
def service2(self):
return self.__service2
@service2.setter
def service2(self, value):
self.__service2 = value
@property
def service3(self):
return self.__service3
@service3.setter
def service3(self, value):
self.__service3 = value
@property
def price(self):
return self.__price
@price.setter
def price(self, value):
self.__price = value
@property
def employee_id(self):
return self.__employee_id
@employee_id.setter
def employee_id(self, value):
self.__employee_id = value
# endregion
def save(self):
if self.__id is None:
self.__create()
else:
self.__update()
@staticmethod
def set_provider(provider):
Client.__provider = provider
@staticmethod
def load(where):
def set_fields(arr):
rec = Client()
rec.id = arr[0]
rec.name = arr[1]
rec.passport = arr[2]
rec.check_in_date = arr[3]
rec.check_out_date = arr[4]
rec.room_id = arr[5]
rec.service1 = arr[6]
rec.service2 = arr[7]
rec.service3 = arr[8]
rec.price = arr[9]
rec.employee_id = arr[10]
return rec
return list(map(
set_fields,
Client.__provider.select_many(where),
))
def delete(self):
self.__class__.__provider.delete_one(self.id)
def load_one(self, where=""):
arr = self.__class__.__provider.select_one(where)
self.id = arr[0]
self.name = arr[1]
self.passport = arr[2]
self.check_in_date = arr[3]
self.check_out_date = arr[4]
self.room_id = arr[5]
self.service1 = arr[6]
self.service2 = arr[7]
self.service3 = arr[8]
self.price = arr[9]
self.employee_id = arr[10]
def __update(self):
self.__class__.__provider.update_one(self.id, [
self.name,
self.passport,
self.check_in_date,
self.check_out_date,
self.room_id,
self.service1,
self.service2,
self.service3,
self.price,
self.employee_id
])
def __create(self):
self.__class__.__provider.insert_one([
self.id,
self.name,
self.passport,
self.check_in_date,
self.check_out_date,
self.room_id,
self.service1,
self.service2,
self.service3,
self.price,
self.employee_id
])
class Employee(ActiveRecordBase):
__provider = None
def __init__(self):
self.__id = None
self.__name = None
self.__age = None
self.__sex = None
self.__address = None
self.__passport = None
self.__position_id = None
@property
def id(self):
return self.__id
@id.setter
def id(self, value):
self.__id = value
@property
def name(self):
return self.__name
@name.setter
def name(self, value):
self.__name = value
@property
def age(self):
return self.__age
@age.setter
def age(self, value):
self.__age = value
@property
def sex(self):
return self.__sex
@sex.setter
def sex(self, value):
self.__sex = value
@property
def address(self):
return self.__address
@address.setter
def address(self, value):
self.__address = value
@property
def passport(self):
return self.__passport
@passport.setter
def passport(self, value):
self.__passport = value
@property
def position_id(self):
return self.__position_id
@position_id.setter
def position_id(self, value):
self.__position_id = value
def save(self):
if self.__id is None:
self.__create()
else:
self.__update()
@staticmethod
def set_provider(provider):
Employee.__provider = provider
@staticmethod
def load(where):
def set_fields(arr):
rec = Employee()
rec.id = arr[0]
rec.name = arr[1]
rec.age = arr[2]
rec.sex = arr[3]
rec.address = arr[4]
rec.passport = arr[5]
rec.position_id = arr[6]
return rec
return list(map(
set_fields,
Employee.__provider.select_many(where),
))
def delete(self):
self.__class__.__provider.delete_one(self.id)
def load_one(self, where):
arr = self.__class__.__provider.select_one(where)
self.id = arr[0]
self.name = arr[1]
self.age = arr[2]
self.sex = arr[3]
self.address = arr[4]
self.passport = arr[5]
self.position_id = arr[6]
def __update(self):
self.__class__.__provider.update_one(self.id, [
self.name,
self.age,
self.sex,
self.address,
self.passport,
self.position_id
])
def __create(self):
self.__class__.__provider.insert_one([
self.id,
self.name,
self.age,
self.sex,
self.address,
self.passport,
self.position_id
])
class Position(ActiveRecordBase):
__provider = None
def __init__(self):
self.__id = None
self.__name = None
self.__sallary = None
self.__responsibilities = None
self.__requirements = None
@property
def id(self):
return self.__id
@id.setter
def id(self, value):
self.__id = value
@property
def name(self):
return self.__name
@name.setter
def name(self, value):
self.__name = value
@property
def sallary(self):
return self.__sallary
@sallary.setter
def sallary(self, value):
self.__sallary = value
@property
def responsibilities(self):
return self.__responsibilities
@responsibilities.setter
def responsibilities(self, value):
self.__responsibilities = value
@property
def requirements(self):
return self.__requirements
@requirements.setter
def requirements(self, value):
self.__requirements = value
def save(self):
if self.__id is None:
self.__create()
else:
self.__update()
@staticmethod
def set_provider(provider):
Position.__provider = provider
@staticmethod
def load(where):
def set_fields(arr):
rec = Position()
rec.id = arr[0]
rec.name = arr[1]
rec.sallary = arr[2]
rec.responsibilities = arr[3]
rec.requirements = arr[4]
return rec
return list(map(
set_fields,
Position.__provider.select_many(where),
))
def delete(self):
self.__class__.__provider.delete_one(self.id)
def load_one(self, where=""):
if where == "":
where = "id = {}".format(self.id)
self.__class__.__provider.select_one(where)
def __update(self):
self.__class__.__provider.update_one(self.id, [
self.name,
self.sallary,
self.responsibilities,
self.requirements,
])
def __create(self):
self.__class__.__provider.insert_one([
self.id,
self.name,
self.sallary,
self.responsibilities,
self.requirements,
])
class Room(ActiveRecordBase):
__provider = None
def __init__(self):
self.__id = None
self.__name = None
self.__capacity = None
self.__description = None
self.__price = None
self.__employee_id = None
@property
def id(self):
return self.__id
@id.setter
def id(self, value):
self.__id = value
@property
def name(self):
return self.__name
@name.setter
def name(self, value):
self.__name = value
@property
def capacity(self):
return self.__capacity
@capacity.setter
def capacity(self, value):
self.__capacity = value
@property
def description(self):
return self.__description
@description.setter
def description(self, value):
self.__description = value
@property
def price(self):
return self.__price
@price.setter
def price(self, value):
self.__price = value
@property
def employee_id(self):
return self.__employee_id
@employee_id.setter
def employee_id(self, value):
self.__employee_id = value
def save(self):
if self.__id is None:
self.__create()
else:
self.__update()
@staticmethod
def set_provider(provider):
Room.__provider = provider
@staticmethod
def load(where):
def set_fields(arr):
rec = Room()
rec.id = arr[0]
rec.name = arr[1]
rec.capacity = arr[2]
rec.description = arr[3]
rec.price = arr[4]
rec.employee_id = arr[5]
return rec
return list(map(
set_fields,
Room.__provider.select_many(where),
))
def delete(self):
self.__class__.__provider.delete_one(self.id)
def load_one(self, where=""):
if where == "":
where = "id = {}".format(self.id)
self.__class__.__provider.select_one(where)
def __update(self):
self.__class__.__provider.update_one(self.id, [
self.name,
self.capacity,
self.description,
self.price,
self.employee_id,
])
def __create(self):
self.__class__.__provider.insert_one([
self.id,
self.name,
self.capacity,
self.description,
self.price,
self.employee_id,
])
class Service(ActiveRecordBase):
__provider = None
def __init__(self):
self.__id = None
self.__name = None
self.__description = None
self.__price = None
@property
def id(self):
return self.__id
@id.setter
def id(self, value):
self.__id = value
@property
def name(self):
return self.__name
@name.setter
def name(self, value):
self.__name = value
@property
def description(self):
return self.__description
@description.setter
def description(self, value):
self.__description = value
@property
def price(self):
return self.__price
@price.setter
def price(self, value):
self.__price = value
def save(self):
if self.__id is None:
self.__create()
else:
self.__update()
@staticmethod
def set_provider(provider):
Service.__provider = provider
@staticmethod
def load(where):
def set_fields(arr):
rec = Service()
rec.id = arr[0]
rec.name = arr[1]
rec.description = arr[2]
rec.price = arr[3]
return rec
return list(map(
set_fields,
Service.__provider.select_many(where),
))
def delete(self):
self.__class__.__provider.delete_one(self.id)
def load_one(self, where=""):
if where == "":
where = "id = {}".format(self.id)
self.__class__.__provider.select_one(where)
def __update(self):
self.__class__.__provider.update_one(self.id, [
self.name,
self.description,
self.price
])
def __create(self):
self.__class__.__provider.insert_one([
self.id,
self.name,
self.description,
self.price
])
| true |
76d5d62d544ca309c7e05057bc4afbb9283f2787 | Python | YJSYJSYJS/BJ_Algo | /ํจ์/4673(old).py | UTF-8 | 678 | 3.3125 | 3 | [] | no_license | self_list = [False] + [True] *10000
def f(num):
temp = str(num)
std = len(temp)
if std == 1:
return 2*num
if std == 2:
return 11*temp[0] + 2*temp[1]
if std == 3:
return 101*temp[0] + 11*temp[1] + 2*temp[2]
if std == 4:
return 1001*temp[0] + 101*temp[1] + 11*temp[2] + 2*temp[3]
def erase(num):
try:
self_list[int(f(num))]
while True:
self_list[f(num)] = False
num = f(num)
if int(f(num)) > 10000 :
break
except:
return 0
for i in range(10001):
erase(i)
for i in range(2,10001):
if self_list[i]==True:
print(i+1)
| true |
a8fb1b9f2248ca180825c59044ab4242362418e3 | Python | nehamehta2110/Dynamic-Programming | /subset_sum.py | UTF-8 | 565 | 3.453125 | 3 | [] | no_license | def subset_sum(arr, n, sum):
if sum == 0:
return True
if n == 0:
return False
dp = [[False for i in range(sum + 1)] for i in range(n + 1)]
if arr[n - 1] <= sum:
dp[n][sum] = subset_sum(arr, n - 1, sum - arr[n - 1]) or subset_sum(arr, n - 1, sum)
return dp[n][sum]
else:
dp[n][sum] = subset_sum(arr, n - 1, sum)
return dp[n][sum]
arr = [3, 34, 4, 12, 5, 2]
sum = 9
n = len(arr)
if subset_sum(arr, n, sum):
print("Found a subset with given sum")
else:
print("No subset with given sum")
| true |
8af2454ec207349849b0bc53cfbebc3ee1285a5a | Python | bd5105625/DataScience_Hw4 | /dsai_hw4.py | UTF-8 | 12,715 | 2.828125 | 3 | [] | no_license |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from matplotlib import pyplot as plt
import seaborn as sea
import squarify
import lightgbm as lgb
import gc
aisles = pd.read_csv('aisles.csv')
departments = pd.read_csv('departments.csv')
priors = pd.read_csv('order_products__prior.csv')
train = pd.read_csv('order_products__train.csv')
orders = pd.read_csv('orders.csv')
products = pd.read_csv('products.csv')
print("aisles", aisles.shape, aisles.columns)
print("departments:", departments.shape, departments.columns)
print("priors:", priors.shape, priors.columns)
print("train:", train.shape, train.columns)
print("orders:", orders.shape, orders.columns)
print("products:", products.shape, products.columns)
best50 = priors['product_id'].value_counts()[0:50].to_frame().reset_index()
# print(best50)
# print((products[products['product_id']==472565]['product_name'].iloc[0]))
name = []
for id in best50['index']:
name.append(products[products['product_id']==id]['product_name'].iloc[0])
# print(name)
# sea.barplot(best50['product_id'][0:7],name[0:7])
sells = pd.DataFrame({
'Name': np.array(name)[0:8],
"Volume": best50['product_id'][0:8]
})
plt.figure(figsize=(16,8))
pic = sea.barplot(x='Name', y='Volume', data=sells)
pic.set_xticklabels(pic.get_xticklabels(), rotation=90)
"""# ่ณฃๆๅฅฝ็็ขๅ๏ผๅๅบ่ฒฉ่ณฃๆฌกๆธๅๅนพ้ซ็็ขๅ"""
"""# ็ตฑ่จtraining dataไธญๅๅ้ฃ็บ่ขซๅๆฌก่ณผ่ฒท็ๆฌกๆธ(้ฃ็บๅ
ฉๆฌก่ณผ่ฒท็ธๅ็ฉๅ๏ผreorderedๅณๆ่ขซ่จญ็บ1)"""
plt.figure(figsize=(10,5))
reordered = pd.DataFrame({
'Reorder':['1','0'],
'Times':train['reordered'].value_counts()
})
print(reordered)
sea.barplot(x='Reorder',y='Times',data=reordered)
"""# ๅๅ่ขซๅๆฌก่ณผ่ฒท็ๆฏ็(ๆฏๆฌก่ณผ่ฒทไธญ)
# ็ตฑ่จOrder_dow(ไธ้ฑไธญ็ๅชไธๅคฉ่ณผ่ฒท)็ๆธ้๏ผๅ ่ณๆๆฒๆ็นๅฅ่จปๆๆธๅญๅๅฅไปฃ่กจ็ฆฎๆๅนพ๏ผๅช่ฝๆพๅบ็ฌฌๅนพๅคฉๆๅธธ่ณผ่ฒท
"""
Order_dow = orders['order_dow'].value_counts().to_frame().reset_index()
plt.figure(figsize=(10,5))
sea.barplot(x='index',y='order_dow',data=Order_dow)
"""# ็ตฑ่จๅ
ฉๆฌก่ณผ่ฒท้้้ๅนพๅคฉ
ๅฏไปฅ็ๅบไธ้ฑๅ
งๅๆฌก่ณผ่ฒท็ๆฉ็็ธ็ถ้ซ๏ผ่ๆๅพไธๅคฉ30ๅคฉ้ฃ้ซๅฏ่ฝๆฏ่ถ
้30ๅคฉ่ณผ่ฒท็ฌฌไบๆฌก้ฝๆญธ้กๅจ30ๅคฉ
"""
twodays = orders['days_since_prior_order'].value_counts().to_frame().reset_index()
# print(twodays)
plt.figure(figsize=(15,5))
sea.barplot(x='index',y='days_since_prior_order',data=twodays)
"""# ็ตฑ่จๆฏๆฌก่ณผ่ฒท็ๆ้้ป(ๅนพ้ป้)
ๅฏไปฅ็ๅบ็ฝๅคฉ(็ด7~19)่ณผ่ฒท็ๆฉ็ๆ้ซ
"""
hours = orders['order_hour_of_day'].value_counts().to_frame().reset_index()
plt.figure(figsize=(15,5))
sea.barplot(x='index',y='order_hour_of_day',data=hours)
"""# ๅ ็บ่ณๆ้้ๅคง๏ผ่จๆถ้ซๆ้๏ผๅฐ่ณๆๅๆ
็ฑint64่ฝๆint8ๅint32(ๆ นๆๆธๆๅคงๅฐ)ใ
## Ex:ordersไธญorder_dow็บ0~6๏ผๆ
่ฝๆint8
## ่ordersไธญorder_idๆๅคง็บ3421083๏ผๆ
่ฝๆint32
### ๅฐๅบๆฏ็ญcolumnๆๅคง็ฏๅ๏ผๆฑบๅฎๆดๆนๅๆ
"""
print('priors:order_id', max(priors.order_id))
print('priors:product_id', max(priors.product_id))
print('priors:add_to_cart_order', max(priors.add_to_cart_order))
print('priors:reordered', max(priors.reordered))
print('orders:user_id', max(orders.user_id))
print('orders:order_number', max(orders.order_number))
print('orders:order_hour_of_day', max(orders.order_hour_of_day))
print('orders:days_since_prior_order', max(orders.days_since_prior_order[1:]))
print('products:aisle_id', max(products.aisle_id))
print('products:department_id', max(products.department_id))
orders.order_dow = orders.order_dow.astype(np.int8)
orders.order_hour_of_day = orders.order_hour_of_day.astype(np.int8)
orders.order_number = orders.order_number.astype(np.int16)
orders.order_id = orders.order_id.astype(np.int32)
orders.user_id = orders.user_id.astype(np.int32)
orders.days_since_prior_order = orders.days_since_prior_order.astype(np.float32)
products.drop(['product_name'], axis=1, inplace=True)
products.aisle_id = products.aisle_id.astype(np.int8)
products.department_id = products.department_id.astype(np.int8)
products.product_id = products.product_id.astype(np.int32)
train.order_id = train.order_id.astype(np.int32)
train.reordered = train.reordered.astype(np.int8)
train.add_to_cart_order = train.add_to_cart_order.astype(np.int16)
priors.order_id = priors.order_id.astype(np.int32)
priors.add_to_cart_order = priors.add_to_cart_order.astype(np.int16)
priors.reordered = priors.reordered.astype(np.int8)
priors.product_id = priors.product_id.astype(np.int32)
"""# ่จ็ฎๅ
ๅๆ้
็ขๅ้่ค่ณผ่ฒท็้ ป็(rate = reorders/orders)"""
prods = pd.DataFrame()
prods['orders'] = priors.groupby(priors.product_id).size().astype(np.float32)
prods['reorders'] = priors['reordered'].groupby(priors.product_id).sum().astype(np.float32)
prods['reorder_rate'] = (prods.reorders / prods.orders).astype(np.float32)
products = products.join(prods, on='product_id') #ไพ็
งproduct_idไพๆๅบ ไธฆๆprodsๅ ้ฒproducts
products.set_index('product_id', drop=False, inplace=True)
del prods
print('add order info to priors')
orders.set_index('order_id', inplace=True, drop=False)
priors = priors.join(orders, on='order_id', rsuffix='_new')
priors.drop('order_id_new', inplace=True, axis=1)
"""# ๅตๅปบไธๅๆฐ็DataFrame:user็ด้ๆฏๅ็จๆถไปฅไธ่ณ่จ
1. Total_item:็ธฝๅ
ฑ่ฒทไบๅนพๆจฃ็ขๅ
2. all_products_id:ๅ
จ้จ่ฒท็็ขๅ็product_id
3. total_different_item:็ธฝๅ
ฑ่ฒท้ๅชไบไธๅ็็ขๅ
4. average_days:ๅนณๅๅนพๅคฉ่ฒทไธๆฌก
5. average_times:ๅนณๅๅจไธๅคฉ็ไฝๆ่ณผ่ฒท
6. number_orders:่ณผ่ฒท็ๆฌกๆธ
7. average_buy:ๅนณๅไธๆฌก่ณผ่ฒทๅนพๆจฃ็ขๅ
"""
usr = pd.DataFrame()
usr['average_days'] = orders.groupby('user_id')['days_since_prior_order'].mean().astype(np.float32)
usr['average_times'] = orders.groupby('user_id')['order_hour_of_day'].mean().astype(np.float32)
usr['most_dow'] = orders.groupby('user_id')['order_dow'].agg(lambda x:x.value_counts().index[0]).astype(np.int8) # ๅฉ็จvalue_counts()ๆพๅบๅบ็พๆๅคๆฌก็dow
usr['number_orders'] = orders.groupby('user_id').size().astype(np.int16)
users = pd.DataFrame()
users['total_items'] = priors.groupby('user_id').size().astype(np.int16) # ่จ็ฎ็ธฝๅ
ฑ่ฒทไบๅคๅฐๆธ้็็ฉๅ
users['all_products_id'] = priors.groupby('user_id')['product_id'].apply(set) # ่จ็ฎ่ฒทไบๅชไบ็ฉๅ
users['total_different_item'] = (users.all_products_id.map(len)).astype(np.int16) #่จ็ฎไธๅ็ฉๅ็ๆธ้
users = users.join(usr)
del usr
users['average_buy'] = (users.total_items / users.number_orders).astype(np.float32)
gc.collect()
print('user f', users.shape)
priors['user_product'] = priors.product_id + priors.user_id * 100000
d= dict()
for row in priors.itertuples():
z = row.user_product
if z not in d:
d[z] = (1, (row.order_number, row.order_id), row.add_to_cart_order)
else:
d[z] = (d[z][0] + 1, max(d[z][1], (row.order_number, row.order_id)), d[z][2] + row.add_to_cart_order)
d = pd.DataFrame.from_dict(d, orient='index')
d.columns = ['number_orders', 'last_order_id', 'sum_pos_in_cart']
d.number_orders = d.number_orders.astype(np.int16)
d.last_order_id = d.last_order_id.map(lambda x: x[1]).astype(np.int32)
d.sum_pos_in_cart = d.sum_pos_in_cart.astype(np.int16)
user_product = d
print('user X product f', len(user_product))
del priors
"""# ๅๅฒtrain/test data๏ผ้้orders็eval_set columnไพๅๅ"""
test_orders = orders[orders.eval_set == 'test']
train_orders = orders[orders.eval_set == 'train']
train.set_index(['order_id', 'product_id'], inplace=True, drop=False)
"""# ๆจกๅ"""
def features(selected_orders, labels_given=False):
print('build candidate list')
order_list = []
product_list = []
labels = []
i=0
for row in selected_orders.itertuples():
i+=1
if i%10000 == 0: print('order row',i)
order_id = row.order_id
user_id = row.user_id
user_products = users.all_products_id[user_id]
product_list += user_products
order_list += [order_id] * len(user_products)
if labels_given:
labels += [(order_id, product) in train.index for product in user_products]
df = pd.DataFrame({'order_id':order_list, 'product_id':product_list})
df.order_id = df.order_id.astype(np.int32)
df.product_id = df.product_id.astype(np.int32)
labels = np.array(labels, dtype=np.int8)
del order_list
del product_list
df['user_id'] = df.order_id.map(orders.user_id).astype(np.int32)
df['user_total_orders'] = df.user_id.map(users.number_orders)
df['user_total_items'] = df.user_id.map(users.total_items)
df['total_distinct_items'] = df.user_id.map(users.total_different_item)
df['user_average_days_between_orders'] = df.user_id.map(users.average_days)
df['user_average_basket'] = df.user_id.map(users.average_buy)
df['user_average_times'] = df.user_id.map(users.average_times) #
df['user_most_dow'] = df.user_id.map(users.most_dow)
df['order_hour_of_day'] = df.order_id.map(orders.order_hour_of_day)
df['days_since_prior_order'] = df.order_id.map(orders.days_since_prior_order)
df['days_since_ratio'] = df.days_since_prior_order / df.user_average_days_between_orders
df['aisle_id'] = df.product_id.map(products.aisle_id).astype(np.int8)
df['department_id'] = df.product_id.map(products.department_id).astype(np.int8)
df['product_orders'] = df.product_id.map(products.orders).astype(np.float32)
df['product_reorders'] = df.product_id.map(products.reorders).astype(np.float32)
df['product_reorder_rate'] = df.product_id.map(products.reorder_rate)
df['z'] = df.user_id * 100000 + df.product_id
df.drop(['user_id'], axis=1, inplace=True)
df['UP_orders'] = df.z.map(user_product.number_orders)
df['UP_orders_ratio'] = (df.UP_orders / df.user_total_orders).astype(np.float32)
df['UP_last_order_id'] = df.z.map(user_product.last_order_id)
df['UP_average_pos_in_cart'] = (df.z.map(user_product.sum_pos_in_cart) / df.UP_orders).astype(np.float32)
df['UP_reorder_rate'] = (df.UP_orders / df.user_total_orders).astype(np.float32)
df['UP_orders_since_last'] = df.user_total_orders - df.UP_last_order_id.map(orders.order_number)
df['UP_delta_hour_vs_last'] = abs(df.order_hour_of_day - \
df.UP_last_order_id.map(orders.order_hour_of_day)).map(lambda x: min(x, 24-x)).astype(np.int8)
df.drop(['UP_last_order_id', 'z'], axis=1, inplace=True)
gc.collect()
return (df, labels)
df_train, labels = features(train_orders, labels_given=True)
f_to_use = ['user_total_orders', 'user_total_items', 'total_distinct_items',
'user_average_days_between_orders', 'user_average_basket', 'user_average_times', 'user_most_dow',
'order_hour_of_day', 'days_since_prior_order', 'days_since_ratio',
'aisle_id', 'department_id', 'product_orders', 'product_reorders',
'product_reorder_rate', 'UP_orders', 'UP_orders_ratio',
'UP_average_pos_in_cart', 'UP_reorder_rate', 'UP_orders_since_last',
'UP_delta_hour_vs_last']
print('formating for lgb')
d_train = lgb.Dataset(df_train[f_to_use],
label=labels,
categorical_feature=['aisle_id', 'department_id'])
del df_train
gc.collect()
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': {'binary_logloss'},
'num_leaves': 96,
'feature_fraction': 0.9,
'bagging_fraction': 0.95,
'bagging_freq': 5
}
ROUNDS = 98
bst = lgb.train(params, d_train, ROUNDS)
lgb.plot_importance(bst, figsize=(9,20))
del d_train
gc.collect()
df_test, _ = features(test_orders)
preds = bst.predict(df_test[f_to_use])
df_test['pred'] = preds
TRESHOLD = 0.22
d = dict()
for row in df_test.itertuples():
if row.pred > TRESHOLD:
try:
d[row.order_id] += ' ' + str(row.product_id)
except:
d[row.order_id] = str(row.product_id)
for order in test_orders.order_id:
if order not in d:
d[order] = 'None'
sub = pd.DataFrame.from_dict(d, orient='index')
sub.reset_index(inplace=True)
sub.columns = ['order_id', 'products']
sub.to_csv('submission.csv', index=False) | true |
a378e9c2c74c53d7085137fdd29d5d976f3cbdcb | Python | asch/python-course | /03/d_occurrences.py | UTF-8 | 313 | 3.671875 | 4 | [] | no_license | #!/usr/bin/env python3
"""
Occurrences.
"""
import sys
def usage():
"""
Usage.
"""
print(f"Usage: {sys.argv[0]} <STRING>")
if __name__ == "__main__":
if len(sys.argv) != 2:
usage()
exit()
WORD = sys.argv[1]
for x in {c for c in WORD}:
print(WORD.count(x))
| true |
e581373cce1ddd5166fa1622567c2a287f41d596 | Python | wljave/subentry | /test/randoms.py | UTF-8 | 1,523 | 4.4375 | 4 | [] | no_license | import random # ่ฐ็จrandomๆจกๅ
a = random.random() # ้ๆบไป0-1ไน้ดๆฝๅไธไธชๅฐๆฐ
print(a)
a = random.randint(0, 100) # ้ๆบไป0-100ไน้ดๆฝๅไธไธชๆฐๅญ
print(a)
a = random.choice('abcdefg') # ้ๆบไปๅญ็ฌฆไธฒ/ๅ่กจ/ๅญๅ
ธ็ญๅฏน่ฑกไธญๆฝๅไธไธชๅ
็ด ๏ผๅฏ่ฝไผ้ๅค๏ผ
print(a)
a = random.sample('abcdefg', 3) # ้ๆบไปๅญ็ฌฆไธฒ/ๅ่กจ/ๅญๅ
ธ็ญๅฏน่ฑกไธญๆฝๅๅคไธชไธ้ๅค็ๅ
็ด
print(a)
items = [1, 2, 3, 4, 5, 6] # โ้ๆบๆด็โ๏ผๆฏๅฆๆไนฑๅ่กจ
random.shuffle(items)
print(items)
print('้ๆบๅฝๆฐrandom๏ผ')
print(dir(random)) # ๅฏไปฅไฝฟ็จdir()ๅฝๆฐๆฅ็ไธไธชๆจกๅ๏ผ็็ๅฎ้้ขๆไปไนๅ้ใๅฝๆฐใ็ฑปใ็ฑปๆนๆณใ
# โโโโโโโโโโโโโโโโโโโโโโๅญ็ฌฆไธฒใๅ่กจใๅญๅ
ธ็ๆจกๅ้้ข้ฝๆไปไนโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
a = '' # ่ฎพ็ฝฎไธไธชๅญ็ฌฆไธฒ
print('ๅญ็ฌฆไธฒ๏ผ')
print(dir(a)) # ๆๅญ็ฌฆไธฒ็ธๅ
ณ็ๅฝๆฐๅฑ็คบๅบๆฅ
a = [] # ่ฎพ็ฝฎไธไธชๅ่กจ
print('ๅ่กจ๏ผ')
print(dir(a)) # ๆๅ่กจ็ธๅ
ณ็ๅฝๆฐๅฑ็คบๅบๆฅ
a = {} # ่ฎพ็ฝฎไธไธชๅญๅ
ธ
print('ๅญๅ
ธ๏ผ')
print(dir(a)) # ๆๅญๅ
ธ็ธๅ
ณ็ๅฝๆฐๅฑ็คบๅบๆฅ
# ๆไปฌๅฏไปฅๆ็ดขๅฐcsvๆจกๅ็ๅฎๆน่ฑๆๆ็จ๏ผhttps://docs.python.org/3.6/library/csv.html
# ไธญๆๆ็จ๏ผhttps://yiyibooks.cn/xx/python_352/library/csv.html#module-csv
import csv
print('csv:')
print(dir(csv))
# for i in dir(csv):
# print(i)
| true |
b68dcb504b61464c435d10663af8c19201da5d80 | Python | jonasracine/small_homework | /solution2.py | UTF-8 | 251 | 3.171875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
pi = 3.141592653589793
x=np.linspace(-pi, pi, 100)
p1 =plt.plot(x,np.sin(x),'r-')#red line
p2 =plt.plot(x,np.cos(x),'b-')
plt.legend((p1[0], p2[0]), ('sin(x)', 'cos(x)'))
plt.xlabel('x')
plt.show()
| true |
8ebdbfbd7e903c95f31caa2f2f952ca6e2e83a4c | Python | meethu/LeetCode | /solutions/0191.number-of-1-bits/number-of-1-bits.py | UTF-8 | 627 | 3.75 | 4 | [] | no_license | # ้ค K ๅไฝๆฐ
# class Solution(object):
# def hammingweight(self, n):
# """
# :type n: int
# :rtype: int
# """
# result = 0
# while(n):
# result += n % 2
# n = n >> 1
# return result
# ไฝ่ฟ็ฎ
class Solution(object):
def hammingweight(self, n):
"""
:type n: int
:rtype: int
"""
count = 0
while(n):
n = n & (n - 1) # ๆธ
้คๆไฝไฝ็1๏ผๅฆๆๆ1ๅญๅจ๏ผnๅคงไบ0
count += 1
return count
n = 3
problems = Solution()
print(problems.hammingweight(n)) | true |
e0386decfc0c1f83c1b4bf3c11a3752605adb147 | Python | Fondamenti18/fondamenti-di-programmazione | /students/1795982/homework03/program03.py | UTF-8 | 7,352 | 3.546875 | 4 | [] | no_license | '''
Definiamo adiacenti di un pixel p di un immagine i pixel adiacenti a p in orizzontale o in verticale.
Se un pixel e' sul bordo dell'immagine il suo vicinato non comprende i pixel non contenuti nell'immagine.
Il pixel dell'immagine con coordinate(x,y) ha dunque come adiacenti i pixel
con coordinate (x-1,y),(x+1,y),(x,y-1),(x,y+1) appartenenti all'immagine.
Definiamo connessi due pixel se e' possibile dall'uno raggiungere l'altro spostandosi solo su
pixel adiacenti e dello stesso colore (ovviamente perche' cio' sia possobile e' necessario
che i due pixel abbiano lo stesso colore).
Per caricare e salvare immagini PNG usate le funzioni load e save che abbiamo preparato nel modulo immagini.py .
Scrivere una funzione ricolora(fname, lista, fnameout) che presi:
- il percorso di un file che contiene un'immagine in formato PNG
- una lista di quadruple del tipo (x,y,c1,c2) dove x e y sono coordinate di un pixel dell'immagine e c1 e c2 due triple colore RGB
- il percorso di un file (fnameout) da creare
legge l'immagine in fname, esegue un'operazione di ricolorazione di alcuni pixel dell'immagine e
registra l'immagine ricolorata nel file fnameout.
L'operazione di ricolorazione e' la seguente. Per ciascuna delle quadruple (x,y,c1,c2) della lista (nell'ordine),
- tutti i pixel connessi al pixel di coordinate (x,y) nell'immagine vanno ricolorati col colore c1,
- tutti i pixel del perimetro (che si trovano sul 'bordo') della zona che si e' appena colorata devono essere ricolorati col colore c2.
Il perimetro della zona colorata รจ l'insieme dei pixel che non hanno tutti e 4 i vicini che fanno parte della zona ricolorata
(ovvero almeno uno รจ di un colore diverso da quello che si sta ricolorando oppure almeno uno non esiste perchรจ sarebbe fuori dall'immagine)
Si consideri ad esempio l'immagine 'I1.png', l'invocazione di ricolora('I1.png',[(10,10,(255,0,0), (0,0,255))],โOUT1.png')
produrra' l'immagine 'OUT1.png' identica all'immagine di partenza se non per il fatto che,
tutti i pixel adiacenti al pixel di coordinate (10,10) (e di colore verde), verranno ricolorati
di rosso ((255,0,0)), mentre i pixel sul bordo della zona inizialmente verde vengono ricolorati di blu.
Per ciascuna area ricolorata bisogna inoltre calcolare area interna e perimetro, che sono definite come segue:
- l'area interna e' il numero di pixel ricolorati con il colore c1
- il perimetro รจ il numero di pixel ricolorati con il colore c2
La funzone deve tornare la lista di coppie (area interna, perimetro) nello stesso ordine in cui sono state colorate le aree.
Per altri esempi vedere il file grade03.txt
'''
from immagini import *
def ricolora(fname, lista, fnameout):
immagine=load(fname)
w=len(immagine[0])
h=len(immagine)
listaRisultati=[]
for tupla in lista:
bordo=[]
area=0
coloreRiempimento=tupla[2]
coloreBordo=tupla[3]
coloreOriginale=immagine[tupla[1]][tupla[0]]
x0=tupla[0]
y0=tupla[1]
r,g,b=coloreOriginale
coloreCornice=(255-r,255-g,255-b)
tmpimg=[]
riga=[]
for c in range(w+2):
riga+=[coloreCornice]
tmpimg+=[riga]
for r in range(1,h+1):
riga=[]
riga+=[coloreCornice]
for c in range(1,w+1):
riga.append(immagine[r-1][c-1])
riga+=[coloreCornice]
tmpimg+=[riga]
riga=[]
for c in range(w+2):
riga+=[coloreCornice]
tmpimg+=[riga]
immagine[y0][x0] = coloreRiempimento
area+=1
x0=x0+1
y0=y0+1
if isbordo(x0,y0,coloreOriginale,tmpimg)==True:
bordo.append((y0-1,x0-1))
for x in range(x0-1, 0, -1):
if isconnesso(x,y0,coloreOriginale, tmpimg)==True:
immagine[y0-1][x-1] = coloreRiempimento
area+=1
if isbordo(x,y0,coloreOriginale,tmpimg)==True:
bordo.append((y0-1,x-1))
else:
break
for x in range(x0+1, w+1):
if isconnesso(x,y0,coloreOriginale, tmpimg)==True:
immagine[y0-1][x-1] = coloreRiempimento
area+=1
if isbordo(x,y0,coloreOriginale,tmpimg)==True:
bordo.append((y0-1,x-1))
else:
break
for y in range(y0-1, 0, -1):
iscolorato, areapiu = coloraRiga(y,coloreOriginale,coloreRiempimento,immagine,tmpimg,bordo)
area+=areapiu
for y in range(y0+1, h+1):
iscolorato, areapiu =coloraRiga(y,coloreOriginale,coloreRiempimento,immagine,tmpimg,bordo)
area+=areapiu
iscolorato=True
while iscolorato==True:
iscolorato=False
for y in range(1,h+1):
if coloraRiga(y,coloreOriginale,coloreRiempimento,immagine,tmpimg,bordo)[0]==True:
iscolorato=True
area+=areapiu
if iscolorato==False:
break
for y in range(h+1,1,-1):
if coloraRiga(y,coloreOriginale,coloreRiempimento,immagine,tmpimg,bordo)[0]==True:
iscolorato=True
area+=areapiu
perimetro=len(bordo)
for punto in bordo:
immagine[punto[0]][punto[1]]=coloreBordo
area-=perimetro
risultato=(area, perimetro)
listaRisultati.append(risultato)
save(immagine, fnameout)
return (listaRisultati)
def isbordo(x,y,colore,img):
if img[y][x-1] != colore or img[y][x+1] != colore or img[y-1][x] != colore or img[y+1][x] != colore:
return True
else:
return False
def isconnesso(x,y,colore,img):
if img[y][x] == colore and (img[y][x-1] == colore or img[y][x+1] == colore or img[y-1][x] == colore or img[y+1][x] == colore):
return True
else:
return False
def coloraRiga(riga,coloreOriginale,coloreRiempimento,imgori,tmpimg,bordo):
w=len(tmpimg[0])-2
h=len(tmpimg)-2
ritorno=False
areapiu=0
for x in range(1, w+1):
if (tmpimg[riga][x] == coloreOriginale) and ((tmpimg[riga][x-1]==coloreOriginale and imgori[riga-1][x-2] == coloreRiempimento) or
(tmpimg[riga][x+1]==coloreOriginale and imgori[riga-1][x] == coloreRiempimento) or
(tmpimg[riga-1][x]==coloreOriginale and imgori[riga-2][x-1] == coloreRiempimento) or
(tmpimg[riga+1][x]==coloreOriginale and imgori[riga][x-1] == coloreRiempimento)):
if imgori[riga-1][x-1] != coloreRiempimento:
imgori[riga-1][x-1] = coloreRiempimento
areapiu+=1
if isbordo(x,riga,coloreOriginale,tmpimg)==True:
bordo.append((riga-1,x-1))
ritorno=True
return (ritorno,areapiu)
| true |
0a7ab806b0877ad9c65b7c66103953ea567f431a | Python | abuyck98/cssi | /random-numbers.py | UTF-8 | 345 | 4.03125 | 4 | [] | no_license | import random
print(random.randint(1,10)) #random refers to the library random
di_thing_1 = random.randint(1, 6)
di_thing_2 + random.randint(1, 6)
if(di_thing_1 == di_thing_2):
print('Doubles! Move {0} spaces and roll again'.format(di_thing_1 * 2))
else:
print('Move {0} spaces. Next player\'s turn!'.format(di_thing_1 + di_thing_2))
| true |
027d5c1c0eda5fe86d1c4f1332346dcdff6e5b7d | Python | nikhilk/nbcli | /nbcli/__init__.py | UTF-8 | 1,328 | 2.828125 | 3 | [
"Apache-2.0"
] | permissive | # __init__.py
# Declaration of the nbcli module.
from _cli import CommandLineInterface
def create(name, modules, description=None):
"""A helper function to create a CommandLineInterface along with its commands.
This registers a kernel '%magic' handler, using the specified name that supports both
single line and multi-line mode declaration.
The CLI is built out of one or more modules. Each module is expected to contain a 'load' method
as follows:
def load(cli):
# Use specified CommandLineInterface to add command groups and/or commands.
...
Command arguments can be specified on the first line or split across multiple lines (separated
by a trailing '\') in a notebook cell. The body of the cell can contain command content, either
plain text, or YAML, which is parsed into an object.
Command arguments and values in the YAML can be specified as placeholders whose values are
initialized to variables defined in the notebook environment.
Args:
name: The name of the command line interface.
modules: The list of modules that are used to build the CLI.
description: An optional description of the CLI.
"""
cli = CommandLineInterface(name, description=description)
for m in modules:
load_cli_fn = m.__dict__['load']
load_cli_fn(cli)
cli.register()
| true |
71d94498e4a1d369f76f3cbc943e2460c5bda3ea | Python | Amit05/python | /dht_verify.py | UTF-8 | 362 | 2.515625 | 3 | [] | no_license | #!/usr/bin/python
#Get the hash value of a file.
import ctypes
import sys
glusterfs = ctypes.cdll.LoadLibrary("libglusterfs.so.0")
def gf_dm_hashfn(filename):
return ctypes.c_uint32(glusterfs.gf_dm_hashfn(
filename,
len(filename)))
def
if __name__ == "__main__":
hash_val=hex(gf_dm_hashfn(sys.argv[1]).value)
print hash_val
| true |
c29fb3091fb659d72c34708d721570aa6df04e6b | Python | Aasthaengg/IBMdataset | /Python_codes/p03854/s978558633.py | UTF-8 | 106 | 2.984375 | 3 | [] | no_license | import re
s = input()
if re.match("(dreamer|dream|eraser|erase)+$",s):
print("YES")
else:
print("NO") | true |
3210f988c6a4027187c7e04723befe772eab20ec | Python | ysk1026/AI-laerning | /2์ผ์ฐจ/dictExam01.py | UTF-8 | 1,309 | 4.375 | 4 | [] | no_license | # dictExam01.py
# ์ฌ์ (dict) : ํค์ ๊ฐ์ผ๋ก ๊ตฌ์ฑ๋์ด ์๋ ๋ฐ์ดํฐ ๊ตฌ์กฐ
# ์ค๊ดํธ๋ฅผ ์ฌ์ฉํฉ๋๋ค.
# ํค์ ๊ฐ์ ์ฝ๋ก ์ผ๋ก ๊ตฌ๋ถํ๊ณ , ํญ๋ชฉ๋ค์ ์ฝค๋ง๋ก ๊ตฌ๋ถํ๋ค.
# ์์: {ํค1:๊ฐ1, ํค2:๊ฐ2, ํค3:๊ฐ3, ...}
# ์์๋ฅผ ๋ฐ์ง์ง ์๋ ์๋ฃ ๊ตฌ์กฐ
# ๊ด๋ จ ํจ์ : dict()
# ํจ์ : get(ํค, ๊ธฐ๋ณธ๊ฐ), clear(), del()
mydict = {'name':'ํ๊ธธ๋', 'phone':'01011112222', 'birth':'12/25'}
# ์ฝ๊ธฐ : ์ฌ์ ์ด๋ฆ์ 'ํค'๋ฅผ ๋ฃ์ด ์ฃผ๋ฉด '๊ฐ'์ ๊ตฌํ ์ ์์ต๋๋ค.
print(mydict['birth'])
# ์ฐ๊ธฐ : ์กด์ฌํ๋ ํค๋ ๊ฐ์ ์์ ํ๋ค.
mydict['phone'] = '01033335555'
# ์กด์ฌํ์ง ์๋ ํค๋ insert๊ฐ ๋๋ค.
mydict['address'] = '๋งํฌ๊ตฌ ๊ณต๋๋'
# ์กด์ฌํ์ง ์๋ ํค๋ KeyError ์ค๋ฅ๊ฐ ๋ฐ์ํฉ๋๋ค.
# print(mydict['age'])
# get('์ฐพ์ํค', ๊ธฐ๋ณธ๊ฐ)
print(mydict.get('age', 10))
del mydict['phone'] # ํด๋น ํค๋ฅผ ์ ๊ฑฐํฉ๋๋ค.
# ์ฌ์ ์ ํด๋น 'ํค'๊ฐ ์กด์ฌํ๋์ง ํ์ธํ๋ ค๋ฉด in ํค์๋๋ฅผ ์ฌ์ฉํ๋ฉด ๋๋ค.
if 'phone' in mydict:
print('์ฃผ์ ์์ด')
else :
print('์ฃผ์ ์์ด')
mydict.clear()
print(mydict)
mydict = {} # ๋น์ด์๋ ์ฌ์
mydict['hong'] = ['ํ๊ธธ๋', 23]
mydict['park'] = ['๋ฐ์ํฌ', 35]
print(mydict)
| true |
a49723db08ad771031cc5e41de61e564b2db652e | Python | theoctober19th/tn-python-django-bootcamp-5 | /day2/csv_classwork.py | UTF-8 | 200 | 3.015625 | 3 | [] | no_license | import csv
with open('email.csv') as file:
reader = csv.DictReader(file, delimiter=';')
for row in reader:
if row['Last name'][0].lower() == 'j':
print(row['Login email']) | true |
56cc6a1b3553730377adf8eac76d343b2ca15106 | Python | gama79530/DesignPattern | /CommandPattern/python/Receiver.py | UTF-8 | 1,756 | 3.625 | 4 | [
"MIT"
] | permissive | import abc
class Receiver(metaclass=abc.ABCMeta):
@abc.abstractmethod
def on(self) -> bool:
return NotImplemented
def off(self) -> bool:
return NotImplemented
class Fan(Receiver):
_MAX_SPEED:int = 5
def __init__(self) -> None:
super().__init__()
self._currentLevel:int = 0
def on(self) -> bool:
if self._currentLevel < Fan._MAX_SPEED:
self._currentLevel += 1
if self._currentLevel == Fan._MAX_SPEED:
print(f"The speed of fan is max level (level {self._currentLevel})")
else:
print(f"The speed of fan is level {self._currentLevel}")
return True
else:
print("The current level is already maximum.")
return False
def off(self) -> bool:
if self._currentLevel > 0:
self._currentLevel -= 1
if self._currentLevel == 0:
print(f"The fan is off")
else:
print(f"The speed of fan is level {self._currentLevel}")
return True
else:
print("The fan is already off.")
return False
class Light(Receiver):
def __init__(self) -> None:
super().__init__()
self._isOn = False
def on(self) -> bool:
if not self._isOn:
print("Light on")
self._isOn = True
return True
else:
print("Light is already on.")
return False
def off(self) -> bool:
if self._isOn:
print("Light off")
self._isOn = False
return True
else:
print("Light is already off.")
return False
| true |
e3b8e077337692eead00f504ab92f00d7adac482 | Python | ivoryli/myproject | /class/phase1/day09/code02.py | UTF-8 | 309 | 3.34375 | 3 | [] | no_license | class Wife:
def __init__(self,name,age):
self.name = name
#็ผบ็น:็ผบไนๅฏน่ฑกๆฐๆฎ็ๅฐ่ฃ
,ๅค็ๅฏไปฅ้ๆ่ตๅผ
self.age = age
class Wife:
def __init__(self,name,age):
self.name = name
self.__age = age
w01 = Wife("่ณ่ณ",26)
w01 = Wife("้้ค",74)
| true |
5ac2f2236c12fa4aa5986c6a1793de9416801342 | Python | manornot/HomeWorks | /Third.py | UTF-8 | 116 | 2.78125 | 3 | [] | no_license | # HW nr 3
# your task is to print txt in normal order
txt = "?em daer uoy dluoc ,txet detrever si siht"
print(txt)
| true |
02845a38efe241fa6f1cfdc14e6c6ba84065da39 | Python | ApoorvaSaxena1/Python-Distributed-Web-server | /Level 2/src/servernew.py | UTF-8 | 17,903 | 2.515625 | 3 | [] | no_license |
#===============================================================================
# # Contributors:
# Prabhat Bhatt
# Apoorva Saxena
#===============================================================================
import time
import threading
import socket
from thread import *
from Queue import Queue
ThreadLock = threading.Lock()
cv = threading.Condition()
import re
import requests
from api import api
from restart.serving import Service
from random import *
#This function creates REST API server for each client which connects
service = Service(api)
def startserver(num):
#print "RUNNIN SERVER REST--->"
service.run(port=5000)
return
def onConnectFEP1(conn,conn1):
global busyFlagF1
global fep1SocketClock
print "----FEP1 CONNECTED WITH FEP2 SOCKET--->"
#data = conn1.recv(1024)
while 1:
data = conn1.recv(1024)
#print "DATA RCVD FROM FEP2--->",data
if 'SYN_FEP2' in data:
while 1:
#cv.acquire()
print "busyFlagF1-->",busyFlagF1
if busyFlagF1 == 0:
#print "busyFlagF1-->",busyFlagF1
#print "SENDING ACK_FEP1 TO FEP2"
conn1.send('ACK_FEP1')
#cv.notify_all()
break
else:
print "FEP1 waiting for busy flag"
#cv.wait()
#cv.release()
#print "busyFlagF1-->",busyFlagF1
#print conn.recv(1024)
return
def onConnectFEP2(conn,conn1):
global busyFlagF2
global fep1SocketClock
print "----FEP2 CONNECTED WITH FEP1 SOCKET-->",conn,conn1
while 1:
data = conn1.recv(1024)
#print "DATA rcvd from FEP1--->",data
if 'SYN_FEP1' in data:
while 1:
#cv.acquire()
print "busyFlagF2-->",busyFlagF2
if busyFlagF2 == 0:
#print "busyFlagF2-->",busyFlagF2
#print "SENDING ACK_FEP2 TO FEP1"
conn1.send('ACK_FEP2')
#cv.notify_all()
break
else:
print "FEP1 waiting for busy flag"
#cv.wait()
#print "busyFlagF2-->",busyFlagF2
#print conn.recv(1024)
#cv.release()
#if "SYN_FEP_SOCKET22" in data1:
#print "MSG RCVD BY FEP2 SOCKET FROM FEP2---->",data1
return
def FEP1_socket(num1):
print "Starting FEP1 Socket"
HOST = '' # Symbolic name meaning the local host
PORT = 9092 # Arbitrary non-privileged port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#fcntl.fcntl(s, fcntl.F_SETFL, os.O_NONBLOCK)
s.bind((HOST, PORT))
s.listen(1)
print "FEP1 SOCKET LISTNING ON PORT 9092"
conn, addr = s.accept()
print "Connected to FEP1 by",addr
conn1,addr1 = s.accept()
print 'Connected to FEP1 by', addr1
#print conn.recv(1024)
t = threading.Thread(target=onConnectFEP1, args=(conn,conn1,))
t.start()
#print conn.recv(1024)
"""def FEP1_socket(num1):
print "Starting FEP1 Socket"
host = ""
port = 9092
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
s.listen(5)
print "FEP1 Listning on ",port
while(True):
c,addr = s.accept()
data = c.recv(1024)
if data:
print data
ok = "OK"
c.send(ok.encode('ascii'))
"""
def FEP2_socket(num1):
print "Starting FEP2 Socket"
HOST = '' # Symbolic name meaning the local host
PORT = 9093 # Arbitrary non-privileged port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#s.setblocking(0)
#fcntl.fcntl(s, fcntl.F_SETFL, os.O_NONBLOCK)
s.bind((HOST, PORT))
s.listen(1)
print "FEP2 SOCKET LISTNING ON PORT 9093"
conn, addr = s.accept()
print 'Connected to FEP2 by', addr
conn1,addr1 = s.accept()
print 'Connected to FEP2 by', addr1
#print conn1.recv(1024)
t = threading.Thread(target=onConnectFEP2, args=(conn,conn1,))
t.start()
def Winner(num):
print "Starting raffle winner"
count = 0
while True:
cv.acquire()
if len(RaffleWinner) == 10 and count == 0:
x = randint(0,9)
print "And the WINNER is--------->",RaffleWinner[x]
count = 1
cv.notify_all()
else:
cv.wait()
cv.release()
def RaffleLottery(num):
print "Starting Raffle Lottery"
count = 0
while True:
cv.acquire()
if (len(RaffleList) == 100 and count == 0):
print "100th client --------------------------------------------->",RaffleList[99]
RaffleWinner.append(RaffleList[99])
#count = 1
cv.notify_all()
elif (len(RaffleList) == 200 and count == 1):
print "200th client --------------------------------------------->",RaffleList[199]
RaffleWinner.append(RaffleList[199])
#count = 2
cv.notify_all()
elif (len(RaffleList) == 300 and count == 2):
print "300th client --------------------------------------------->",RaffleList[299]
RaffleWinner.append(RaffleList[299])
#count = 3
cv.notify_all()
elif (len(RaffleList) == 400 and count == 3):
print "400th client --------------------------------------------->",RaffleList[399]
RaffleWinner.append(RaffleList[399])
#count = 4
cv.notify_all()
elif (len(RaffleList) == 500 and count == 4):
print "500th client --------------------------------------------->",RaffleList[499]
RaffleWinner.append(RaffleList[499])
#count = 5
cv.notify_all()
else:
cv.wait()
cv.release()
def EndServer(data):
#return data + " will be sent"
data_temp = re.sub('[^A-Za-z0-9]+', '', data)
if 'incrementMedalTally' in data_temp:
url = "http://127.0.0.1:5000"+str(data)
#print "Sending Reuest ",url
data1 = requests.put(url).json()
elif 'getMedalTally' in data_temp:
url = "http://127.0.0.1:5000"+str(data)
#print "Sending Request ",url
data1 = requests.get(url).json()
elif 'pushUpdate' in data_temp:
url = "http://127.0.0.1:5000"+str(data)
#print "Sending Request ",url
data1 = requests.put(url)
elif 'setScore' in data_temp:
url = "http://127.0.0.1:5000"+str(data)
#print "Sending request ",url
data1 = requests.put(url).json()
#print "Set Score",data1
elif 'registerClient' in data_temp:
url = "http://127.0.0.1:5000"+str(data)
#print "Sending Request ",url
data1 = requests.put(url)
elif 'getScore' in data_temp:
url = "http://127.0.0.1:5000"+str(data)
#print "Sending Request ",url
data1 = requests.get(url).json()
return data1
def FEP1(num):
global busyFlagF1
#global fep2Counter
#global counter
#global raf_dict
print "Starting FEP1"
start_new_thread(FEP1_socket, (1,))
time.sleep(2)
HOST = '' # The remote host
PORT = 9092 # The same port as used by the server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
time.sleep(2)
print "FEP2 connecting with FEP2 SOCKET"
PORT = 9093
s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s1.connect((HOST, PORT))
while True:
cv.acquire()
if FEP1Queue.qsize() !=0:
c = FEP1Queue.get()
#print "FEP1 RECEIVING DATA"
data = c.recv(1024)
data_temp = re.sub('[^A-Za-z0-9]+', '', data)
#print data_temp
if 'Client' in data_temp:
cname = data
#print data_temp
RaffleList.append(data_temp)
if (len(RaffleList) == 100):
print "100th client --------------------------------------------->",RaffleList[99]
RaffleWinner.append(RaffleList[99])
#count = 1
#cv.notify_all()
elif (len(RaffleList) == 200):
print "200th client --------------------------------------------->",RaffleList[199]
RaffleWinner.append(RaffleList[199])
#count = 2
#cv.notify_all()
elif (len(RaffleList) == 300):
print "300th client --------------------------------------------->",RaffleList[299]
RaffleWinner.append(RaffleList[299])
#count = 3
#cv.notify_all()
elif (len(RaffleList) == 400):
print "400th client --------------------------------------------->",RaffleList[399]
RaffleWinner.append(RaffleList[399])
#count = 4
#cv.notify_all()
elif (len(RaffleList) == 500):
print "500th client --------------------------------------------->",RaffleList[499]
RaffleWinner.append(RaffleList[499])
#count = 5
elif (len(RaffleList) == 600):
print "600th client --------------------------------------------->",RaffleList[599]
RaffleWinner.append(RaffleList[499])
#count = 5
elif (len(RaffleList) == 700):
print "700th client --------------------------------------------->",RaffleList[699]
RaffleWinner.append(RaffleList[499])
#count = 5
elif (len(RaffleList) == 800):
print "800th client --------------------------------------------->",RaffleList[799]
RaffleWinner.append(RaffleList[499])
#count = 5
elif (len(RaffleList) == 900):
print "900th client --------------------------------------------->",RaffleList[899]
RaffleWinner.append(RaffleList[499])
#count = 5
elif (len(RaffleList) == 1000):
print "1000th client --------------------------------------------->",RaffleList[999]
RaffleWinner.append(RaffleList[499])
#count = 5
#else:
#RaffleList.append(data_temp)
result = "0"
c.send("Done")
while True:
data = c.recv(1024)
#print "FEP2 sending data to end server---",data
if data:
#fep1Counter += 0.0001
#s.send('CLIENT')
#d = s.recv(1024)
#print "MSG RCVD FROM FEP@ SOCKET---->",d
#print "SENDING SYN_FEP1 FROM FEP1"
s1.send('SYN_FEP1')
data1 = s1.recv(1024)
#print "DATA RCVD FROM FEP2--->",data1
if 'ACK_FEP2' in data1:
#print "SENDING SYN_FEP1 FROM FEP1 TO FEP1"
#s.send('SYN_FEP_SOCKET11')
#print "PROCESSING"
#s.send('REQUEST')
busyFlagF1 = 1
result = EndServer(data)
#busyFlagF1 = 1
#print result
c.send(result)
#if counter == 99:
#print "_________________________100th______________ ",raf_dict[counter]
#print "From FEP2---->",counter
#raf_dict[counter] = cname
#counter += 1
#raf_list.append(cname)
if len(raf_list) == 100:
print "_________________________100th______________ ",raf_list[99]
raf_list.append(cname)
busyFlagF1 = 0
else:
break
cv.notify_all()
else:
cv.wait()
cv.release()
def FEP2(num):
global busyFlagF2
#global fep2Counter
#global counter
#global raf_dict
print "Starting FEP2"
start_new_thread(FEP2_socket,(1,))
time.sleep(2)
HOST = '' # The remote host
PORT = 9093 # The same port as used by the server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
PORT = 9092
time.sleep(2)
s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print "FEP2 connecting with FEP1 SOCKET"
s1.connect((HOST, PORT))
while True:
cv.acquire()
if FEP2Queue.qsize() !=0:
c = FEP2Queue.get()
#ackMsg = "ACK"
#f.send(ackMsg.encode('ascii'))
#print "FEP2 RECEIVING DATA"
data = c.recv(1024)
data_temp = re.sub('[^A-Za-z0-9]+', '', data)
if 'Client' in data_temp:
cname = data
#f.send(ackMsg.encode('ascii'))
#f.send(data_temp.encode('ascii'))
RaffleList.append(data_temp)
#print data_temp
if (len(RaffleList) == 100):
print "100th client --------------------------------------------->",RaffleList[99]
RaffleWinner.append(RaffleList[99])
#count = 1
#cv.notify_all()
elif (len(RaffleList) == 200):
print "200th client --------------------------------------------->",RaffleList[199]
RaffleWinner.append(RaffleList[199])
#count = 2
#cv.notify_all()
elif (len(RaffleList) == 300):
print "300th client --------------------------------------------->",RaffleList[299]
RaffleWinner.append(RaffleList[299])
#count = 3
#cv.notify_all()
elif (len(RaffleList) == 400):
print "400th client --------------------------------------------->",RaffleList[399]
RaffleWinner.append(RaffleList[399])
#count = 4
#cv.notify_all()
elif (len(RaffleList) == 500):
print "500th client --------------------------------------------->",RaffleList[499]
RaffleWinner.append(RaffleList[499])
elif (len(RaffleList) == 600):
print "600th client --------------------------------------------->",RaffleList[599]
RaffleWinner.append(RaffleList[499])
#count = 5
elif (len(RaffleList) == 700):
print "700th client --------------------------------------------->",RaffleList[699]
RaffleWinner.append(RaffleList[499])
#count = 5
elif (len(RaffleList) == 800):
print "800th client --------------------------------------------->",RaffleList[799]
RaffleWinner.append(RaffleList[499])
#count = 5
elif (len(RaffleList) == 900):
print "900th client --------------------------------------------->",RaffleList[899]
RaffleWinner.append(RaffleList[499])
#count = 5
elif (len(RaffleList) == 1000):
print "1000th client --------------------------------------------->",RaffleList[999]
RaffleWinner.append(RaffleList[499])
#count = 5
#else:
#RaffleList.append(data_temp)
result = "0"
c.send("Done")
while True:
data = c.recv(1024)
#print "FEP2 sending data to end server---",data
if data:
#fep2Counter += 0.0001
#s.send('CLIENT')
#d = s.recv(1024)
#print "MSG RCVD FROM FEP@ SOCKET---->",d
#print "SENDING SYN_FEP2 FROM FEP2"
#sdata = "SYN_FEP2" + ":" + str(fep2Counter)
s1.send('SYN_FEP2')
data1 = s1.recv(1024)
#print "DATA RCVD FROM FEP1--->",data1
if 'ACK_FEP1' in data1:
print "PROCESSING"
#s.send('REQUEST')
busyFlagF2 = 1
result = EndServer(data)
#print result
c.send(result)
#count += 1
#if counter == 99:
#print "_________________________100th______________ ",raf_dict[counter]
#print counter
#raf_dict[counter] = cname
#counter += 1
#raf_list.append(cname)
if len(raf_list) == 100:
print "_________________________100th_________________________________",raf_list[99]
raf_list.append(cname)
busyFlagF2 = 0
else:
break
cv.notify_all()
else:
cv.wait()
cv.release()
#Client Thread Which adds request to Queue
def ClientThread(c):
#print "CLIENT THREAD Started"
cv.acquire()
ClientLoad.put(c)
cv.notify_all()
cv.release()
#This is our Work Dispenser which takes top 2 request and distributes it to out FEPs
def WorkDispatcher(num):
i = 0
while True:
cv.acquire()
if (ClientLoad.qsize() > 0):
#print "Starting Work Dispatcher with Client Queue Size--",ClientLoad.qsize()
i = i + 1
if i == 1:
#print "Sending request feom dispatcher to FEP1"
FEP1Queue.put(ClientLoad.get())
#ClientFEP1Queue.put(ClientQueue.get())
cv.notify_all()
if i == 2:
#print "Sending request from dispatcher to FEP2"
FEP2Queue.put(ClientLoad.get())
#ClientFEP2Queue.put(ClientQueue.get())
i = 0
cv.notify_all()
#cv.wait()
else:
cv.wait()
cv.release()
#Global Variables
ThreadList=[]
WorkQueue = Queue() #THis Queue Stores all request from client. Top 2 request will be sent to FEP queues to process
FEP1Queue = Queue() #This Queue stores request sent to FEP1 to handle
FEP2Queue = Queue() #This queue stores requests sent to FEP2 to handle
ClientQueue = Queue() #This Queue contains client info
ClientFEP1Queue = Queue() #client info which are assigned to FEP1
ClientFEP2Queue = Queue() #client info which are assigned to FEP2
ClientLoad = Queue()
RaffleList = []
RaffleWinner = []
busyFlagF1 = 0
busyFlagF2 = 0
fep1Counter = 1.0000
fep2Counter = 2.0000
fep1SocketClock = 3.0000
fep2SocketClock = 4.0000
counter = 0
raf_dict = dict()
raf_list = []
#######
def Main():
host = ""
port = 12345
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
#print("socket binded to post", port)
s.listen(5)
#print("socket is listening")
# a forever loop until client wants to exit
ThreadNumber = 1
while True:
# establish connection with client
c, addr = s.accept()
#print('Connected to :', addr[0], ':', addr[1])
# Start a new thread for socket communication
#print "Creating Client Threads Client",ThreadNumber
#print " C " , c
t = threading.Thread(target=ClientThread, args=(c,))
ThreadList.append(t)
t.start()
ThreadNumber = ThreadNumber + 1
#s.close()
if __name__ == '__main__':
start_new_thread(WorkDispatcher, (1,))
#start_new_thread(FEP1, (1,))
#start_new_thread(FEP2, (1,))
start_new_thread(startserver, (1,))
start_new_thread(Winner, (1,))
#start_new_thread(FEP1_socket, (1,))
#start_new_thread(FEP2_socket, (1,))
start_new_thread(FEP1, (1,))
start_new_thread(FEP2, (1,))
Main()
| true |
46a23a770161b93cc23f215ae17b5e9b0b987319 | Python | packtaamira/Learn-Discrete-Mathematics | /Chapter 3/Hexadecimal_BinaryConversionExample.py | UTF-8 | 395 | 4.875 | 5 | [
"MIT"
] | permissive | # TypeConversion from decimal with base 10
# to hexadecimal form with base 16
# to binary form with the base 2
# Taking input from user an integer with base 10
number = int(input("Enter a number with base 10\n"))
print("Hexadecimal form of " + str(number) + " is " + hex(number).lstrip("0x").rstrip("L"))
print("Binary form of " + str(number) + " is " + bin(number).lstrip("0b").rstrip("L"))
| true |
84120150ebd173881246eb39d10bf69e14a2e6bc | Python | smcl/raytrace | /test/test_dot.py | UTF-8 | 314 | 2.875 | 3 | [] | no_license | import pytest
from rt import Vec3
def test_dot_one():
v = Vec3(3, 0, 2)
w = Vec3(-1, 4, 2)
assert 1 == v.dot(w)
def test_dot_two():
v = Vec3(1, 3, -5)
w = Vec3(4, -2, -1)
assert 3 == v.dot(w)
def test_dot_three():
v = Vec3(0, 3, -7)
w = Vec3(2, 3, 1)
assert 2 == v.dot(w)
| true |
23f0a22d2556d9926a95b3e7305f53c7da550fad | Python | akshayrajp/Competitive-Programming | /Xtreme14/hamm.py | UTF-8 | 527 | 2.953125 | 3 | [] | no_license | def hamdist(str1, str2,prevMin=None):
diffs = 0
if len(str1) != len(str2):
return max(len(str1),len(str2))
for ch1, ch2 in zip(str1, str2):
if ch1 != ch2:
diffs += 1
if prevMin is not None and diffs>prevMin:
return None
return diffs
dmin=len(trans[0])
for i in xrange(len(trans)):
for j in xrange(i+1,len(trans)):
dist=hamdist(trans[i][:-1], trans[j][:-1])
if dist is not None and dist < dmin:
dmin = dist | true |
811fc092195903a54bc3dd1c0688df2ee06ed274 | Python | elicaciocdefarias/estruturas-de-dados | /tests/test_single_linked_list.py | UTF-8 | 2,776 | 3.703125 | 4 | [] | no_license | from single_linked_list import SingleLinkedList
def test_should_have_property_called_head():
sll = SingleLinkedList()
assert hasattr(sll, "head")
def test_should_sets_head_node_when_equal_none():
value = "head node"
sll = SingleLinkedList()
sll.add(value)
assert sll.head.value == value
def test_should_return_str_representaion_correctly():
sll = SingleLinkedList()
assert f"{sll}" == "..."
def test_should_return_str_representaion_correctly_1():
sll = SingleLinkedList()
sll.add(1)
assert f"{sll}" == "1..."
def test_should_return_str_representaion_correctly_2():
sll = SingleLinkedList()
sll.add(1)
sll.add(2)
assert f"{sll}" == "1 -> 2..."
def test_should_add_node_end_list():
head_node = "head node"
tail_node = "tail node"
sll = SingleLinkedList()
sll.add(head_node)
sll.add(tail_node)
assert sll.head.next.value == tail_node
def test_should_remove_node_end_list():
head_node = "head node"
tail_node = "tail node"
sll = SingleLinkedList()
sll.add(head_node)
sll.add(tail_node)
sll.remove(tail_node)
assert sll.head.next is None
def test_should_remove_node_middle_list():
head_node = "head node"
middle_node = "middle node"
tail_node = "tail node"
sll = SingleLinkedList()
sll.add(head_node)
sll.add(middle_node)
sll.add(tail_node)
sll.remove(middle_node)
assert sll.head.next.value == tail_node
def test_should_remove_node_head_list():
head_node = "head node"
tail_node = "tail node"
sll = SingleLinkedList()
sll.add(head_node)
sll.add(tail_node)
sll.remove(head_node)
assert sll.head.value == tail_node
def test_should_return_correct_node_1():
head_node = "head node"
sll = SingleLinkedList()
sll.add(head_node)
result = sll.find(head_node)
assert result.value == head_node
def test_should_return_correct_node_2():
head_node = "head node"
tail_node = "tail node"
sll = SingleLinkedList()
sll.add(head_node)
sll.add(tail_node)
result = sll.find(tail_node)
assert result.value == tail_node
def test_should_return_correct_node_3():
head_node = "head node"
sll = SingleLinkedList()
sll.add(head_node)
result = sll["head node"]
assert result.value == head_node
def test_should_return_none_1():
head_node = "head node"
sll = SingleLinkedList()
result = sll.find(head_node)
assert result is None
def test_should_return_none_2():
head_node = "head node"
sll = SingleLinkedList()
sll.add(head_node)
result = sll.find("tail node")
assert result is None
def test_should_have_len_equal_zero():
sll = SingleLinkedList()
assert len(sll) == 0
| true |
bb59a6ebbfbc2554954d46af62bc09ac7ae6b6df | Python | andremtsilva/dissertacao | /src/sim/basicExampleTest/aco_test01.py | UTF-8 | 1,150 | 3.234375 | 3 | [
"MIT"
] | permissive | import networkx as nx
import numpy as np
import acopy
import random
RANDOM_SEED = 1
def main():
"""
Topology
"""
# Fix position of nodes for drawing
random.seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
size = 10
G = nx.complete_graph(size)
for (u, v) in G.edges():
G.edges[u, v]['weight'] = random.randint(1, 4)
pos = nx.spring_layout(G, weight='weight')
nx.draw(G, pos, with_labels = True, edge_color = 'black' ,
width = 1, alpha = 0.7)
labels = nx.get_edge_attributes(G,'weight')
#nx.draw_networkx_edge_labels(G,pos,edge_labels=labels)
# E = list(G.edges())
# att_weight = {x: random.randint(1,4) for x in E}
# nx.set_edge_attributes(G, name="weight", values=att_weight)
solver = acopy.Solver(rho=.03, q=1)
colony = acopy.Colony(alpha=1, beta=3)
tour = solver.solve(G, colony, limit=100)
print(f"The tour cost is: {tour.cost}")
print()
print(f"The tous passing nodes are: {tour.nodes}")
print()
print(f"The tour path is: {tour.path}")
if __name__ == '__main__':
main()
| true |