blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
aafc02e8524419241b46dfd9b3f2ccacd0104bf5 | Python | matcom/simspider | /Redist/pyfuzzy/doc/plot/gnuplot/doc.py | UTF-8 | 14,031 | 2.765625 | 3 | [] | no_license | # -*- coding: iso-8859-1 -*-
#
# Copyright (C) 2009 Rene Liebscher
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>.
#
"""Plotting of variables, adjectives, ... using gnuplot"""
__revision__ = "$Id: doc.py,v 1.9 2009/09/24 20:32:20 rliebscher Exp $"
def getMinMax(set):
"""get tuple with minimum and maximum x-values used by the set."""
ig = set.getIntervalGenerator()
next = ig.nextInterval(None,None)
x_min = next
x_max = None
while next is not None:
x_max = next
next = ig.nextInterval(next,None)
return (x_min,x_max)
def getGlobalMinMax(sets):
"""get tuple with minimum and maximum x-values used by the sets of this dicts of sets."""
x_min = None
x_max = None
for s in sets.values():
(x_min2,x_max2) = getMinMax(s)
if x_min is None or x_min2 < x_min:
x_min = x_min2
if x_max is None or x_max2 > x_max:
x_max = x_max2
return (x_min,x_max)
def getPoints(sets):
"""Collect all important points of all adjectives in this dict of sets."""
from Redist.pyfuzzy.set.Set import merge
# merge them all
temp = None
for s in sets.values():
if temp is None:
temp = s
else:
temp = merge(max,temp,s)
# collect points
# >>> result of merge is always a Polygon object
points = [p[0] for p in temp.points]
# avoid to have same value twice (filter points out where successor is equal)
return points[:1] + [p0 for p0,p1 in zip(points[1:],points) if p0!=p1]
def getSets(variable):
"""Get all sets of adjectives in this variable."""
sets = {}
for a_name,adj in variable.adjectives.items():
sets[a_name] = adj.set
return sets
class Doc(object):
"""Main object. Get an instance of this to do your work."""
def __init__(self,directory="doc"):
self.directory = directory
self.overscan = 0.1 #: the plotted range is M{[min-o,max+o]} with M{o=(max-min)*overscan}
def setTerminal(self,g,filename):
g("set terminal png small transparent truecolor nocrop")
g("set output '%s/%s.png'" % (self.directory,filename))
def initGnuplot2D(self,filename="plot",xlabel=None,ylabel=None,title=None,xrange_=None,yrange=None,x_logscale=0,y_logscale=0):
import Gnuplot
g = Gnuplot.Gnuplot(debug=0)
self.setTerminal(g,filename)
if xlabel is not None: g.xlabel(xlabel)
if ylabel is not None: g.ylabel(ylabel)
if title is not None: g.title(title)
if xrange_ is not None: g('set xrange [%f:%f]' % xrange_)
else: g('set autoscale x')
if yrange is not None: g('set yrange [%f:%f]' % yrange)
else: g('set autoscale y')
if x_logscale: g('set logscale x'); g('set autoscale x')
if y_logscale: g('set logscale y'); g('set autoscale y')
return g
def initGnuplot3D(self,filename="plot3D",xlabel=None,ylabel=None,zlabel=None,title=None,xrange_=None,yrange=None,zrange=None,x_logscale=0,y_logscale=0,z_logscale=0):
import Gnuplot
g = Gnuplot.Gnuplot(debug=0)
self.setTerminal(g,filename)
if xlabel is not None: g.xlabel(xlabel)
if ylabel is not None: g.ylabel(ylabel)
if zlabel is not None: g("set zlabel '%s'" % zlabel)
if title is not None: g.title(title)
if xrange_ is not None: g('set xrange [%f:%f]' % xrange_)
else: g('set autoscale x')
if yrange is not None: g('set yrange [%f:%f]' % yrange)
else: g('set autoscale y')
if zrange is not None: g('set zrange [%f:%f]' % zrange)
else: g('set autoscale z')
if x_logscale: g('set logscale x');g('set autoscale x')
if y_logscale: g('set logscale y');g('set autoscale y')
if z_logscale: g('set logscale z');g('set autoscale z')
g('set style data lines')
g('set hidden')
g('set pm3d at s')
g('set pm3d ftriangles interpolate 50,50')
g('set contour surface')
return g
def getValues(self,v):
return self.getValuesSets(getSets(v))
def getValuesSets(self,sets):
(x_min,x_max) = getGlobalMinMax(sets)
width = x_max - x_min
x_min = x_min - self.overscan * width
x_max = x_max + self.overscan * width
width = x_max - x_min
values = [x_min]+getPoints(sets)+[x_max]
return (x_min,x_max,values)
def createDoc(self,system):
"""create plots of all variables defined in the given system."""
from Redist.pyfuzzy.OutputVariable import OutputVariable
from Redist.pyfuzzy.InputVariable import InputVariable
import Redist.pyfuzzy.defuzzify.Dict
import Redist.pyfuzzy.fuzzify.Dict
for name,var in system.variables.items():
if isinstance(var,OutputVariable) and isinstance(var.defuzzify,Redist.pyfuzzy.defuzzify.Dict.Dict):
print ("ignore variable %s because it is of type OutputVariable => Dict" % name)
elif isinstance(var,InputVariable) and isinstance(var.fuzzify,Redist.pyfuzzy.fuzzify.Dict.Dict):
print( "ignore variable %s because it is of type InputVariable => Dict" % name)
else:
self.createDocVariable(var,name)
def createDocVariable(self,v,name,x_logscale=0,y_logscale=0):
"""Creates a 2D plot of a variable"""
self.createDocSets(getSets(v),name,x_logscale,y_logscale,description=v.description,units=v.unit)
def createDocSets(self,sets,name,x_logscale=0,y_logscale=0,description=None,units=None):
"""Creates a 2D plot of dict of sets"""
import Gnuplot
import Gnuplot.funcutils
import Redist.pyfuzzy.set.Polygon
# sort sets by lowest x values and higher membership values next
def sort_key(a):
s = sets[a]
x = s.getIntervalGenerator().nextInterval(None,None)
return (x,-s(x))
(x_min,x_max,x) = self.getValuesSets(sets)
# calculate values
plot_items = []
for s_name in sorted(sets,key=sort_key):
s = sets[s_name]
if isinstance(s,Redist.pyfuzzy.set.Polygon.Polygon):
p = [(x_min,s(x_min))] + s.points + [(x_max,s(x_max))]
plot_item = Gnuplot.PlotItems.Data(p,title=s_name)
else:
plot_item = Gnuplot.funcutils.compute_Data(x,s,title=s_name)
plot_items.append(plot_item)
xlabel = description or ""
if units is not None:
xlabel += " [%s]" % units
g = self.initGnuplot2D(filename=name,xlabel=xlabel,ylabel="membership",title=name,xrange_=(x_min,x_max),yrange=(-0.2,1.2),x_logscale=x_logscale,y_logscale=y_logscale)
g('set style fill transparent solid 0.5 border')
g('set style data filledcurves y1=0')
g.plot(*plot_items)
g.close()
def create2DPlot(self,system,x_name,y_name,input_dict={},output_dict={},x_logscale=0,y_logscale=0):
"""Creates a 2D plot of an input variable and an output variable.
Other (const) variables have to be set beforehand in the dictionary input_dict.
@param system: the fuzzy system to use
@type system: L{fuzzy.System.System}
@param x_name: name of input variable used for x coordinate values
@type x_name: string
@param y_name: name of output variable used for y coordinate values
@type y_name: string
@param input_dict: dictionary used for input values, can be used to predefine other input values
@type input_dict: dict
@param output_dict: dictionary used for output values
@type output_dict: dict
@param x_logscale: use logarithmic scale for x values
@type x_logscale: bool
@param y_logscale: use logarithmic scale for y values
@type y_logscale: bool
"""
import Gnuplot
import Gnuplot.funcutils
(x_min,x_max,x) = self.getValues(system.variables[x_name])
def f(x,
system=system,
x_name=x_name,
y_name=y_name,
input_dict=input_dict,
output_dict=output_dict):
input_dict[x_name] = x
output_dict[y_name] = 0.0
system.calculate(input_dict,output_dict)
return output_dict[y_name]
g = self.initGnuplot2D(filename=x_name+"_"+y_name,xlabel=x_name,ylabel=y_name,title=y_name+"=f("+x_name+")",xrange_=(x_min,x_max),x_logscale=x_logscale,y_logscale=y_logscale)
g('set style data lines')
g.plot(Gnuplot.funcutils.compute_Data(x, f))
g.close()
def create3DPlot(self,system,x_name,y_name,z_name,input_dict={},output_dict={},x_logscale=0,y_logscale=0,z_logscale=0):
"""Creates a 3D plot of 2 input variables and an output variable.
Other (const) variables have to be set beforehand in the dictionary input_dict.
@param system: the fuzzy system to use
@type system: L{fuzzy.System.System}
@param x_name: name of input variable used for x coordinate values
@type x_name: string
@param y_name: name of input variable used for y coordinate values
@type y_name: string
@param z_name: name of output variable used for z coordinate values
@type z_name: string
@param input_dict: dictionary used for input values, can be used to predefine other input values
@type input_dict: dict
@param output_dict: dictionary used for output values
@type output_dict: dict
@param x_logscale: use logarithmic scale for x values
@type x_logscale: bool
@param y_logscale: use logarithmic scale for y values
@type y_logscale: bool
@param z_logscale: use logarithmic scale for z values
@type z_logscale: bool
"""
import Gnuplot
import Gnuplot.funcutils
(x_min,x_max,x) = self.getValues(system.variables[x_name])
(y_min,y_max,y) = self.getValues(system.variables[y_name])
def f(x,y,
system=system,
x_name=x_name,
y_name=y_name,
z_name=z_name,
input_dict=input_dict,
output_dict=output_dict):
input_dict[x_name] = x
input_dict[y_name] = y
output_dict[z_name] = 0.0
system.calculate(input_dict,output_dict)
return output_dict[z_name]
g = self.initGnuplot3D(filename=x_name+"_"+y_name+"_"+z_name,xlabel=x_name,ylabel=y_name,zlabel=z_name,title="%s=f(%s,%s)" % (z_name,x_name,y_name),xrange_=(x_min,x_max),yrange=(y_min,y_max),x_logscale=x_logscale,y_logscale=y_logscale,z_logscale=z_logscale)
g.splot(Gnuplot.funcutils.compute_GridData(x,y, f,binary=0))
g.close()
def create3DPlot_adjective(self,system,x_name,y_name,z_name,adjective,input_dict={},output_dict={},x_logscale=0,y_logscale=0,z_logscale=0):
"""Creates a 3D plot of 2 input variables and an adjective of the output variable.
Other (const) variables have to be set beforehand in the dictionary input_dict.
@param system: the fuzzy system to use
@type system: L{fuzzy.System.System}
@param x_name: name of input variable used for x coordinate values
@type x_name: string
@param y_name: name of input variable used for y coordinate values
@type y_name: string
@param z_name: name of output variable used for z coordinate values
@type z_name: string
@param adjective: name of adjective of output variable used for z coordinate values
@type adjective: string
@param input_dict: dictionary used for input values, can be used to predefine other input values
@type input_dict: dict
@param output_dict: dictionary used for output values
@type output_dict: dict
@param x_logscale: use logarithmic scale for x values
@type x_logscale: bool
@param y_logscale: use logarithmic scale for y values
@type y_logscale: bool
@param z_logscale: use logarithmic scale for z values
@type z_logscale: bool
"""
import Gnuplot
import Gnuplot.funcutils
(x_min,x_max,x) = self.getValues(system.variables[x_name])
(y_min,y_max,y) = self.getValues(system.variables[y_name])
def f(x,y,
system=system,
x_name=x_name,
y_name=y_name,
z_name=z_name,
adjective=adjective,
input_dict=input_dict,
output_dict=output_dict):
input_dict[x_name] = x
input_dict[y_name] = y
output_dict[z_name] = 0.0
system.calculate(input_dict,output_dict)
return output_dict[z_name][adjective]
g = self.initGnuplot3D(filename=x_name+"_"+y_name+"_"+z_name+"_"+adjective,xlabel=x_name,ylabel=y_name,zlabel=z_name,title="%s.%s=f(%s,%s)" % (z_name,adjective,x_name,y_name),xrange_=(x_min,x_max),yrange=(y_min,y_max),zrange=(0,1),x_logscale=x_logscale,y_logscale=y_logscale,z_logscale=z_logscale)
g("set xyplane at 0")
g("set cntrparam levels incremental 0.1,0.2,1.0")
g.splot(Gnuplot.funcutils.compute_GridData(x,y, f,binary=0))
g.close()
| true |
f2bdfc0c28f24d343005c2eeee77deddfc91538c | Python | MaksimShumko/scraper | /OtomotoScraper.py | UTF-8 | 1,356 | 2.78125 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
import locale
import numpy as np
locale.setlocale(locale.LC_NUMERIC,"pl")
link = ("https://www.otomoto.pl/osobowe/bmw"
"?search%5Bfilter_enum_damaged%5D=0&search%5Bfilter_enum_registered%5D="
"1&search%5Bfilter_enum_no_accident%5D=1&search%5Border%5D=filter_float_price"
"%3Aasc&search%5Bbrand_program_id%5D%5B0%5D=&search%5Bcountry%5D=")
priceSum = []
def parseLink(link):
global priceSum
page = requests.get(link)
soup = BeautifulSoup(page.content, 'html.parser')
prices = soup.find_all(class_="offer-price__number ds-price-number")
for price in prices:
intPrice = locale.atof(price.span.string.replace(' ',''))
print(intPrice)
priceSum.append(intPrice)
return soup
def removeOutliers(priceSum):
an_array = np.array(priceSum)
mean = np.mean(an_array)
standard_deviation = np.std(an_array)
distance_from_mean = abs(an_array - mean)
max_deviations = 2
not_outlier = distance_from_mean < max_deviations * standard_deviation
return an_array[not_outlier]
count = 1
soup = parseLink(link)
while len(soup.find_all(class_="next abs")) > 0:
count += 1
soup = parseLink(link + "&page=" + str(count))
print(count)
no_outliers = removeOutliers(priceSum)
print(np.sum(no_outliers)/len(no_outliers)) | true |
9ff4d66f722a253084d1af11e611cbac23f9fb9f | Python | Aasthaengg/IBMdataset | /Python_codes/p02646/s632645984.py | UTF-8 | 134 | 2.75 | 3 | [] | no_license | a,v=map(int,input().split())
b,w=map(int,input().split())
t=int(input())
k=abs(b-a)
s=w-v
print("YES" if s<=0 and k+t*s<=0 else "NO") | true |
74af42fca4b53c73eb3f3dbf30bccd90dbd95928 | Python | fengmingshan/python | /业务感知_提取eob_bob(顺序版).py | UTF-8 | 1,852 | 2.640625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 3 15:34:29 2018
@author: Administrator
"""
import os
import json
import base64
import gzip
from urllib import parse
from io import BytesIO
file_path = r'D:\Packet'+'\\'
out_path = r'D:\eob_bob'+'\\'
file_name = '20180110-1.txt'
file = file_path + file_name
eob_file_out = out_path + file_name[:-4]+'_eob.txt'
bob_file_out = out_path + file_name[:-4]+'_bob.txt'
def gzip_uncompress(c_data):
'''定义gzip解压函数'''
buf = BytesIO(
c_data) # 通过IO模块的BytesIO函数将Bytes数据输入,这里也可以改成StringIO,根据你输入的数据决定
f = gzip.GzipFile(mode='rb', fileobj=buf)
try:
r_data = f.read()
finally:
f.close()
return r_data
file_content = open(file_path + file_name, 'r', encoding='utf-8')
text = file_content.readlines()
text_tmp = text[-1].split('=', 1)
text_tmp1 = text_tmp[-1]
text_url_decode = parse.unquote(text_tmp1) # 对b进行url解码
text_json_decode = json.loads(text_url_decode)
eob = text_json_decode[0]['data']
bob = text_json_decode[1]['data']
eob = bytes(eob, encoding='utf-8')
bob = bytes(bob, encoding='utf-8')
eob_b64_decode = base64.b64decode(eob)
bob_b64_decode = base64.b64decode(bob)
eob_comp = eob_b64_decode[:-388]
bob_comp = eob_b64_decode[:-388]
eob_uncompress = gzip_uncompress(eob_comp)
bob_uncompress = gzip_uncompress(bob_comp)
with open(eob_file_out, 'a', encoding='utf-8') as f: # 打开写入文件编码方式utf-8,'a'表示追加写入
f.write(eob_uncompress.decode('utf-8')+'\n') # 打开写入文件编码方式:utf-8
f.close()
with open(bob_file_out, 'a', encoding='utf-8') as f: # 打开写入文件编码方式utf-8,'a'表示追加写入
f.write(bob_uncompress.decode('utf-8')+'\n') # 打开写入文件编码方式:utf-8
f.close()
| true |
7b5c8ee694908e583f73925bdca0ff67566ae3c8 | Python | garysb/skirmish | /server/lib/logger.py | UTF-8 | 7,199 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python3
# vim: set ts=8 sw=8 sts=8 list nu:
import threading
import socket
import time
from queue import Queue
from queue import Empty as QueueEmpty
class Logger(threading.Thread):
""" The Logger class/thread creates a socket server to handle our
connections from a client system. The sockets interact with the other
threads to execute commands on the system. To do this, it calls the
global method list defined within our daemon to decide which thread has
the method we are trying to call.
"""
# Create a dismisser and event locker (aka mutex)
dismiss = threading.Event()
client_list = []
client_lock = threading.Lock()
def __init__(self):
# Initiate the threader and define the dismisser
threading.Thread.__init__(self, None)
Logger.dismiss.set()
# Instantiate a module wide queue
self.queue = Queue()
# Logger configuration options
try:
# Set the configuration section name
section = 'logger'
# Check if the config has the section we need
if not config.has_section(section):
# Add a logger section and the default values for it
config.add_section(section)
config.set(section, 'host', 'localhost')
config.set(section, 'port', 30406)
config.set(section, 'listen', 5)
config.set(section, 'timeout', 1)
config.set(section, 'logfile', 'skirmish.log')
# Store the configuration variables locally
self.host = config.get(section, 'host') if (config.has_option(section, 'host')) else ''
self.port = config.getint(section, 'port') if (config.has_option(section, 'port')) else 30406
self.listen = config.getint(section, 'listen') if (config.has_option(section, 'listen')) else 5
self.timeout = config.getint(section, 'timeout') if (config.has_option(section, 'timeout')) else 1
self.logfile = config.get(section, 'logfile') if (config.has_option(section, 'logfile')) else 'skirmish.log'
# An exception was thown
except configparser.Error:
# Add an entry into the logs
message = 'error processing configuration options'
self.queue.put({'type':'error','source':'logger','message':message})
# Report the error to the console and exit
print('Error starting logging system')
sys.exit(1)
def run(self):
"""
Create two different types of logging systems. The first type
is a file logger that writes the log messages into a file
specified in the configuration file or from the console.
The second log type is a tcp socket that pushes log data
to a tcp port.
"""
# Create our file log thread
Logger.client_lock.acquire()
file_client = handle_filelog(self.logfile)
file_client.setName('fileThread')
Logger.client_list.append(file_client)
Logger.client_lock.release()
file_client.start()
# Report that the file logger has started
message = 'file logging started'
self.queue.put({'type':'notice', 'source':'logger', 'message':message})
# Bind the logger tcp server to a socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((self.host, self.port))
server_socket.listen(self.listen)
server_socket.settimeout(self.timeout)
# Wait for a tcp connection to be established
while Logger.dismiss.isSet():
try:
msg = self.queue.get(block=False, timeout=False)
for client in Logger.client_list:
client.queue.put(msg)
except QueueEmpty:
pass
try:
# Wait for a connection from a client
client_socket, address = server_socket.accept()
Logger.client_lock.acquire()
new_client = handle_connection(client_socket, address, self.host)
Logger.client_list.append(new_client)
Logger.client_lock.release()
new_client.start()
message = address[0]+' connected'
self.queue.put({'type':'notice', 'source':'logger', 'message':message})
except socket.timeout:
pass
class handle_connection(threading.Thread):
""" When a client connects to the tcp logger, start a new thread to
handle the connection. The thread polls the log queue for any
new values.
"""
def __init__(self, client_socket, address, host):
threading.Thread.__init__(self, None)
self.queue = Queue()
self.client_socket = client_socket
self.client_socket.settimeout(0.5)
self.address = address
self.host = host
def run(self):
# Send the connection welcome message to the client
self.client_socket.send(self.set_welcome())
# While a connection is active, loop through the log queue
while True:
# Try generate the output message and send it to the client
try:
raw = self.queue.get(True, 0.5)
msg = '[{0}] [{1}] [{2}] {3}\n'.format(time.asctime(), raw['source'], raw['type'], raw['message'])
self.client_socket.send(msg)
# No new items in the queue, just continue
except QueueEmpty:
continue
# An error raised due to no raw variable set
except NameError:
continue
# Shutdown the client tcp connection
self.client_socket.shutdown(2)
self.client_socket.close()
Logger.client_lock.acquire()
Logger.client_list.remove(self)
Logger.client_lock.release()
# Report that the client has disconnected
message = self.address[0]+' disconnected'
logger.queue.put({'type':'notice', 'source':'logger', 'message':message})
def set_welcome(self):
welcome = '220 {0} Skirmish logs; {1}\n'.format(self.bind_addr, time.asctime())
return welcome
class handle_filelog(threading.Thread):
""" The handle_file_log object/thread generates a filesystem log that adds
its queue messages into the filesystem.
"""
queue = Queue()
def __init__(self, logfile='skirmish.log'):
threading.Thread.__init__(self, None)
self.logfile = logfile
def run(self):
while True:
try:
# Fetch the message from the queue
raw = self.queue.get(True, 0.5)
msg = '[{0}] [{1}] [{2}] {3}\n'.format(time.asctime(), raw['source'], raw['type'], raw['message'])
# Write the new message to the log file
log_file = open(self.logfile,'a')
log_file.write(msg)
log_file.close()
except QueueEmpty:
continue
except NameError:
continue
if __name__ == '__main__':
# Run some unit tests to check we have a working socket server
logger = Logger()
logger.start()
# Add some random test messages to the queue
time.sleep(3)
logger.queue.put({'type':'notice','source':'logger','message':'Unit test 1'})
time.sleep(0.5)
logger.queue.put({'type':'error','source':'logger','message':'Unit test 2'})
logger.queue.put({'type':'notice','source':'logger','message':'Unit test 3'})
time.sleep(2)
logger.queue.put({'type':'warning','source':'logger','message':'Unit test 4'})
logger.queue.put({'type':'notice','source':'logger','message':'Unit test 5'})
time.sleep(0.1)
logger.queue.put({'type':'warning','source':'logger','message':'Unit test 6'})
time.sleep(0.1)
logger.queue.put({'type':'notice','source':'logger','message':'Unit test 7'})
time.sleep(2)
logger.queue.put({'type':'error','source':'logger','message':'Unit test 8'})
time.sleep(1)
logger.queue.put({'type':'notice','source':'logger','message':'Unit test 9'})
logger.queue.put({'type':'notice','source':'logger','message':'Unit test 10'})
logger.join()
| true |
00126b97fadb7ff67275c280b2ed75ed5c407e25 | Python | elomedah/iris-2020 | /MapReduce-Python-master/Exemlpe 6/mapper.py | UTF-8 | 606 | 3.53125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python
import sys
wordList = dict()
# input comes from STDIN (standard input)
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# split the line into words
words = line.split()
# increase counters
for word in words:
charList = list()
for char in word:
charList.append(char) #pour chaque char in word add to list pour tri
charList.sort() #tri char pour la cle
wordList[word]="".join(charList) #cree list[cle,valeur]
print '%s\t%s' % (wordList[word],word)# affiche cle valeur | true |
e91118db94961dbff8c691d2557eb4423f1ef43f | Python | guilhom34/images_recognition | /training_files.py | UTF-8 | 9,097 | 2.609375 | 3 | [] | no_license | import collections
import io
import math
import os
import pathlib
import random
from six.moves import urllib
from IPython.display import clear_output, Image, display, HTML
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.metrics as sk_metrics
import time
import zipfile
IMAGES_DIR = './images'
TRAIN_FRACTION = 0.8
RANDOM_SEED = 2018
#print("importer les données")
def download_images():
datadir = IMAGES_DIR
data_dir = pathlib.Path(datadir)
print(data_dir)
print("crée le modèle")
def create_model(train_dataset_fp):
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)
])
print(model)
predictions = model(train_dataset_fp[:1]).numpy()
print(predictions)
print("convertie en prediction:")
tf.nn.softmax(predictions).numpy()
# def make_train_and_test_sets():
# """Split the data into train and test sets and get the label classes."""
# train_examples, test_examples = [], []
# shuffler = random.Random(RANDOM_SEED)
# is_root = True
# for (dirname, subdirs, filenames) in tf.io.gfile.walk(IMAGES_DIR):
# # The root directory gives us the classes
# if is_root:
# subdirs = sorted(subdirs)
# classes = collections.OrderedDict(enumerate(subdirs))
# label_to_class = dict([(x, i) for i, x in enumerate(subdirs)])
# is_root = False
# # The sub directories give us the image files for training.
# elif subdirs:
# subdirs = sorted(subdirs)
# label_to_class = dict([(x, i) for i, x in enumerate(subdirs)])
# subdirs = False
# print(subdirs)
# else:
# filenames.sort()
# shuffler.shuffle(filenames)
# full_filenames = [os.path.join(dirname, f) for f in filenames]
# label = dirname.split('/')[-1]
# label_class = label_to_class[label]
# # An example is the image file and it's label class.
# examples = list(zip(full_filenames, [label_class] * len(filenames)))
# num_train = int(len(filenames) * TRAIN_FRACTION)
# train_examples.extend(examples[:num_train])
# test_examples.extend(examples[num_train:])
#
# shuffler.shuffle(train_examples)
# shuffler.shuffle(test_examples)
# return train_examples, test_examples, classes
#
# # Download the images and split the images into train and test sets.
# download_images()
# TRAIN_EXAMPLES, TEST_EXAMPLES, CLASSES = make_train_and_test_sets()
# NUM_CLASSES = len(CLASSES)
#
# print('\nThe dataset has %d label classes: %s' % (NUM_CLASSES, CLASSES.values()))
# print('There are %d training images' % len(TRAIN_EXAMPLES))
# print('there are %d test images' % len(TEST_EXAMPLES))
#
#
# LEARNING_RATE = 0.01
# print(LEARNING_RATE)
# tf.keras.models.Sequential()
# # Load a pre-trained TF-Hub module for extracting features from images. We've
# # chosen this particular module for speed, but many other choices are available.
# image_module = hub.Module('https://tfhub.dev/google/imagenet/mobilenet_v2_035_128/feature_vector/5')
# print(image_module)
# # Preprocessing images into tensors with size expected by the image module.
# encoded_images = tf.placeholder(tf.string, shape=[None])
# image_size = hub.get_expected_image_size(image_module)
# print(image_size)
#
# def decode_and_resize_image(encoded):
# decoded = tf.image.decode_jpeg(encoded, channels=3)
# decoded = tf.image.convert_image_dtype(decoded, tf.float32)
# return tf.image.resize_images(decoded, image_size)
#
#
# batch_images = tf.map_fn(decode_and_resize_image, encoded_images, dtype=tf.float32)
#
# # The image module can be applied as a function to extract feature vectors for a
# # batch of images.
# features = image_module(batch_images)
#
#
# def create_model(features):
# """Build a model for classification from extracted features."""
# # Currently, the model is just a single linear layer. You can try to add
# # another layer, but be careful... two linear layers (when activation=None)
# # are equivalent to a single linear layer. You can create a nonlinear layer
# # like this:
# # layer = tf.layers.dense(inputs=..., units=..., activation=tf.nn.relu)
# layer = tf.layers.dense(inputs=features, units=NUM_CLASSES, activation=None)
# return layer
#
#
# # For each class (kind of flower), the model outputs some real number as a score
# # how much the input resembles this class. This vector of numbers is often
# # called the "logits".
# logits = create_model(features)
# labels = tf.placeholder(tf.float32, [None, NUM_CLASSES])
#
# # Mathematically, a good way to measure how much the predicted probabilities
# # diverge from the truth is the "cross-entropy" between the two probability
# # distributions. For numerical stability, this is best done directly from the
# # logits, not the probabilities extracted from them.
# cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=labels)
# cross_entropy_mean = tf.reduce_mean(cross_entropy)
#
# # Let's add an optimizer so we can train the network.
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=LEARNING_RATE)
# train_op = optimizer.minimize(loss=cross_entropy_mean)
#
# # The "softmax" function transforms the logits vector into a vector of
# # probabilities: non-negative numbers that sum up to one, and the i-th number
# # says how likely the input comes from class i.
# probabilities = tf.nn.softmax(logits)
#
# # We choose the highest one as the predicted class.
# prediction = tf.argmax(probabilities, 1)
# correct_prediction = tf.equal(prediction, tf.argmax(labels, 1))
#
# # The accuracy will allow us to eval on our test set.
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#
# How long will we train the network (number of batches).
# NUM_TRAIN_STEPS = 100
# # How many training examples we use in each step.
# TRAIN_BATCH_SIZE = 10
# # How often to evaluate the model performance.
# EVAL_EVERY = 10
#
# def get_batch(batch_size=None, test=False):
# """Get a random batch of examples."""
# examples = TEST_EXAMPLES if test else TRAIN_EXAMPLES
# batch_examples = random.sample(examples, batch_size) if batch_size else examples
# return batch_examples
#
# def get_images_and_labels(batch_examples):
# images = [get_encoded_image(e) for e in batch_examples]
# one_hot_labels = [get_label_one_hot(e) for e in batch_examples]
# return images, one_hot_labels
#
# def get_label_one_hot(example):
# """Get the one hot encoding vector for the example."""
# one_hot_vector = np.zeros(NUM_CLASSES)
# np.put(one_hot_vector, get_label(example), 1)
# return one_hot_vector
#
# with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
# for i in range(NUM_TRAIN_STEPS):
# # Get a random batch of training examples.
# train_batch = get_batch(batch_size=TRAIN_BATCH_SIZE)
# batch_images, batch_labels = get_images_and_labels(train_batch)
# # Run the train_op to train the model.
# train_loss, _, train_accuracy = sess.run(
# [cross_entropy_mean, train_op, accuracy],
# feed_dict={encoded_images: batch_images, labels: batch_labels})
# is_final_step = (i == (NUM_TRAIN_STEPS - 1))
# if i % EVAL_EVERY == 0 or is_final_step:
# # Get a batch of test examples.
# test_batch = get_batch(batch_size=None, test=True)
# batch_images, batch_labels = get_images_and_labels(test_batch)
# # Evaluate how well our model performs on the test set.
# test_loss, test_accuracy, test_prediction, correct_predicate = sess.run(
# [cross_entropy_mean, accuracy, prediction, correct_prediction],
# feed_dict={encoded_images: batch_images, labels: batch_labels})
# print('Test accuracy at step %s: %.2f%%' % (i, (test_accuracy * 100)))
#
#
# def show_confusion_matrix(test_labels, predictions):
# """Compute confusion matrix and normalize."""
# confusion = sk_metrics.confusion_matrix(
# np.argmax(test_labels, axis=1), predictions)
# confusion_normalized = confusion.astype("float") / confusion.sum(axis=1)
# axis_labels = list(CLASSES.values())
# ax = sns.heatmap(
# confusion_normalized, xticklabels=axis_labels, yticklabels=axis_labels,
# cmap='Blues', annot=True, fmt='.2f', square=True)
# plt.title("Confusion matrix")
# plt.ylabel("True label")
# plt.xlabel("Predicted label")/tmp
#
#
#
#
# show_confusion_matrix(batch_labels, test_prediction) | true |
47815afd7e8f7eac99bc33e5f6efb437b437ec50 | Python | ryubro/Hullpy | /hull.py | UTF-8 | 4,110 | 2.625 | 3 | [] | no_license | import requests
class Hull:
def __init__(self,
platform_id,
org_url,
platform_secret=None,
sentry=None):
self.baseurl = "%s/api/v1" % (org_url,)
self.platform_id = platform_id
self.platform_secret = platform_secret
self.sentry = sentry
def _auth_headers(self):
auth_headers = {
"Hull-App-Id": self.platform_id
}
if self.platform_secret is not None:
auth_headers["Hull-Access-Token"] = self.platform_secret
return auth_headers
def _req(self, method, url, data=None):
if not url.startswith("/"):
url = "/" + url
req_funcs = {
"get": requests.get,
"post": requests.post,
"put": requests.put,
"delete": requests.delete
}
payloads = {
"get": {},
"post": {}
}
if data is not None:
payloads = {
"get": {
"params": data
},
"post": {
"json": data
},
"put": {
"json": data
}
}
return req_funcs[method.lower()](self.baseurl + url,
headers=self._auth_headers(),
**payloads[method.lower()])
def _parse_json(self, response):
parsed_data = response.text
try:
parsed_data = response.json()
except ValueError:
print response
pass
return parsed_data
def get(self, endpoint, data=None):
response = self._req("get", endpoint, data)
return self._parse_json(response)
def get_all(self, endpoint, data=None):
if data is None:
data = {}
unduplicated_data = []
ids = []
page = 1
is_there_new_data = True
while is_there_new_data:
partial_data = []
try:
data.update({"per_page": 100, "page": page})
request = self._req("get", endpoint, data)
partial_data = request.json()
except ValueError:
raise self.JSONParseError()
except requests.exceptions.RequestException:
raise self.RequestException()
duplicate_count = 0
duplicated_ids = []
for obj in partial_data:
if obj["id"] in ids:
duplicate_count += 1
duplicated_ids += obj["id"]
else:
ids.append(obj["id"])
unduplicated_data.append(obj)
is_there_new_data = len(partial_data) != duplicate_count
page += 1
# reports when there is partially duplicated data
if is_there_new_data and duplicate_count != 0:
self.sentry.captureMessage(
"Duplicated data on different page",
tags={
"level": "info"
}, extra=({
"duplicated_ids": reduce(
lambda a, b: "%s;%s" % (a, b),
duplicated_ids),
"retrieved_ids": reduce(
lambda a, b: "%s;%s" % (a, b),
map(lambda hobj: hobj["id"], partial_data))
}))
return unduplicated_data
def put(self, endpoint, data=None):
response = self._req("put", endpoint, data)
return self._parse_json(response)
def post(self, endpoint, data=None):
response = self._req("post", endpoint, data)
return self._parse_json(response)
def delete(self, endpoint):
response = self._req("delete", endpoint)
return self._parse_json(response)
class JSONParseError(ValueError):
pass
class RequestException(requests.exceptions.RequestException):
pass
| true |
e6695b93aa9e72e9534c7a66d0d6f6aa1f6bf888 | Python | arthur-bryan/shopping-cart | /install.py | UTF-8 | 1,604 | 2.703125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import os
import sys
from time import sleep
import platform
APP_PATH = os.path.join(os.getcwd(), 'app.py')
# ICON_FILE = os.path.join(os.getcwd(), 'imagens/carrinho.png')
# EXECUTABLE_FILE = os.path.join(os.getcwd(), 'compras.sh')
PYTHON_VERSION = float(platform.python_version()[:3])
if PYTHON_VERSION < 3.6 or len(str(PYTHON_VERSION)) > 3:
sys.stdout.write(" [ ! ] Versão do Python inválida!\n")
sleep(0.5)
sys.exit(1)
def isUserRoot():
if os.getuid() != 0:
sys.stdout.write(" [ ! ] Necessário privilégios de root (ou sudo)!\n")
return False
else:
return True
def install():
sys.stdout.write(" [ + ] Instalando 'carro-de-compras'.\n")
sys.stdout.write(" [ + ] Criando arquivo '/usr/bin/compras'...\n")
sleep(0.5)
try:
with open("/usr/bin/compras", "w") as file:
file.write("#!/bin/sh\n")
file.write("python{} {}\n".format(PYTHON_VERSION, APP_PATH))
file.close()
except Exception as error:
sys.stdout.write(" [ ! ] Erro: {}\n".format(error))
else:
sys.stdout.write(" [ + ] Gerenciando permissões do arquivo /usr/bin/compras... \n")
os.system('chmod 777 /usr/bin/compras')
sleep(0.5)
sys.stdout.write(" [ + ] Installando 'python3-tk' (tkinter)...\n")
os.system("apt install python3-tk")
sys.stdout.write(" [ + ] Instalado com sucesso! Digite 'compras' no terminal para abrir o programa.\n")
sleep(1)
sys.exit(0)
def main():
if isUserRoot():
install()
if __name__ == '__main__':
main()
| true |
7eee2c6939708779f090c91659389b6adb83823b | Python | scipyargentina/sliceplots | /sliceplots/_util.py | UTF-8 | 435 | 3.5 | 4 | [
"BSD-3-Clause"
] | permissive | # -*- coding: utf-8 -*-
"""Utility functions module."""
import numpy as np
def idx_from_val(arr1d, val):
"""Given a 1D array, find index of closest element to given value.
:param arr1d: 1D array of values
:type arr1d: :py:class:`numpy.ndarray`
:param val: element value
:type val: float
:return: element position in the array
:rtype: int
"""
idx = (np.abs(arr1d - val)).argmin()
return idx
| true |
1c2f27907db7626d48bb596dc79051eb2658fd9b | Python | kaustubhagarwal/Basic-Codes | /Upper and Lower Case.py | UTF-8 | 109 | 4.15625 | 4 | [] | no_license | #To Print strings in upper and lower case
l=input("Enter the string ")
print(l.lower())
print(l.upper()) | true |
fbfc467b05978a9bf2f20c8a9a80bd20982327d3 | Python | chaomenghsuan/leetcode | /1856_MaximumSubarrayMin-Product.py | UTF-8 | 814 | 2.609375 | 3 | [] | no_license | from numpy import cumsum
class Solution:
def maxSumMinProduct(self, nums: List[int]) -> int:
cumm = [0] + list(cumsum(nums))
n = len(nums)
res = 0
l, st = [0], [[nums[0], 0]]
for i in range(1, n):
cur = 0
while st and nums[i] <= st[-1][0]:
cur += st.pop()[1] + 1
st.append([nums[i], cur])
l.append(cur)
r, st = [0], [[nums[-1], 0]]
for i in range(n-2, -1, -1):
cur = 0
while st and nums[i] <= st[-1][0]:
cur += st.pop()[1] + 1
st.append([nums[i], cur])
r.append(cur)
r.reverse()
for i in range(n):
res = max(res, nums[i]*(cumm[i + r[i] + 1] - cumm[i-l[i]]))
return res % (10 ** 9 + 7)
| true |
62995fb78c790c7ff642c758093587d0c23a9816 | Python | ganlanshu/leetcode | /subtree-of-another-tree.py | UTF-8 | 1,813 | 3.71875 | 4 | [] | no_license | #coding=utf-8
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isSubtree(self, s, t):
"""
判断t是否是s的子树
:type s: TreeNode
:type t: TreeNode
:rtype: bool
"""
if not s:
return False
if self.is_same(s, t):
return True
return self.isSubtree(s.left, t) or self.isSubtree(s.right, t)
def is_same(self, p, q):
"""
:param p:
:param q:
:return:
"""
if not p and not q:
return True
if p and q:
return p.val == q.val and self.is_same(p.left, q.left) and self.is_same(p.right, q.right)
return False
def isSubtree1(self, s, t):
def convert(s):
return '^'+str(s.val)+'#' + convert(s.left) + convert(s.right) if s else '$'
return convert(t) in convert(s)
def is_substructure(self, s, t):
"""
子结构和子树不一样,看辅助方法就知道,子树需要完全一样
子结构只要部分一样
判断t是否是s的子结构,参考
https://blog.csdn.net/qq_33431368/article/details/79257029
:param s:
:param t:
:return:
"""
if not s or not t:
return False
return self._substructure(s, t) or self.is_substructure(s.left, t) or self.is_substructure(s.right, t)
def _substructure(self, root1, root2):
if not root2:
return True
if not root1:
return False
return root1.val == root2.val and self._substructure(root1.left, root2.left) and self._substructure(root1.right, root2.right)
| true |
7bc6a1aa6ba183f344fb3eac517c260e8d399f2e | Python | Ravi-Nikam/Machine_learning_programs-or-Algorithems | /Logistic_regression.py | UTF-8 | 3,428 | 3.453125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 17 17:51:43 2020
@author: Ravi Nikam
"""
# Logistic Regression
# Suv is purchsed or not 0 means not 1 means yes
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv('Social_Network_Ads.csv')
data.head()
x=data.iloc[:,[2,3]].values
y=data.iloc[:,-1].values
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.25,random_state=0)
x_train
# future scalling we apply (standardrization or normalization) method on it
from sklearn.preprocessing import StandardScaler
st = StandardScaler()
# fit to apply stand or nor both in one and then after we transform it
x_train=st.fit_transform(x_train)
x_train
# in transform we not apply any method we only transform it bcz it test data we didn't need to apply any method on it
x_test = st.transform(x_test)
# fit the logistic regression to the traning set
from sklearn.linear_model import LogisticRegression
lor = LogisticRegression(random_state=0)
lor.fit(x_train,y_train)
# we use to check the predicted value with the actual value
y_prdic=lor.predict(x_test)
# confusion matrix helpful for finding the model accurancy and Error of model
from sklearn.metrics import confusion_matrix
# we add confusion matrix in testing set bcz we have perfome operation on the final outcome
# check actual test value with the predicted value to finding the accuracy and error they check both
con = confusion_matrix(y_test,y_prdic)
# in output there is 11 error(8+3)=11 only and (65+24)= 89 best value of predication
con
# now we plot a classification on graph
from matplotlib.colors import ListedColormap
X_set, y_set = x_train,y_train
# meshgrid function create a grid
# X_set[row,column]
X1,X2 = np.meshgrid(np.arange(start=X_set[:,0].min() - 1,stop=X_set[:,0].max() + 1 ,step=0.01),
np.arange(start=X_set[:,1].min() - 1,stop=X_set[:,1].max() + 1 , step=0.01))
plt.contourf(X1, X2, lor.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Logistic Regression (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = x_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, lor.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Logistic Regression (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show() | true |
b46733b26fe041b6d92d9bc1c43615f05cc861a3 | Python | themagicbean/Machine-Learning-Course-1 | /7 Natural Language Processing/my_natural_language_processing.py | UTF-8 | 2,912 | 3.421875 | 3 | [] | no_license | # Natural Language Processing
# L191
#making model to predict if review positive or negative
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Restaurant_Reviews.tsv', delimiter = '\t', quoting = 3)
# read csv can also read tsv files!
# but need to specify delimiter (it's not a comma)
# quoting parameter 3 = to ignore double quotes
# L 192
# Cleaning the texts
import re
import nltk
nltk.download('stopwords')
# list of filler / minor words like is / yours / so etc., for removal
# but includes not & sim! so can reverse tone of some samples
from nltk.corpus import stopwords
# gotta both download and import stopwords list
from nltk.stem.porter import PorterStemmer
# trims words to stems (removes endings)
corpus = [] # L 197, this is the list of cleaned-up reviews
for i in range(0, 1000): #basics in 193-94, loop in 195-96-97
review = re.sub('[^a-zA-Z]', ' ', dataset['Review'][i])
# removes all non-letter characters
# first paramter, quotes in bracket w/ hats are what you don't want to remove
# second paramter ensures removed characters are replaced with spaces
# so words don't stick together
# third parameter is on what to apply the rule/remover on (dataset)
review = review.lower()
# L193, changes all letters to lowercase
review = review.split()
# L 194, makes review a list of its words (each word an element)
ps = PorterStemmer()
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
# for loop to apply ps (stemming) to non-stopwords words in review (L194 and 195)
review = ' '.join(review)
# L 196 rejoining elements of cleaned review into single string
corpus.append(review)
# adding single string to corpus
# L198 Creating the Bag of Words model (see notes in notepad)
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features = 1500)
# max features reduces sparsity
X = cv.fit_transform(corpus).toarray()
# creates huge sparse matrix (matrix of features)
# L199 trying to reduce sparsity
y = dataset.iloc[:, 1].values
# .iloc takes columns when importing from pandas
# : takes all reviews
# .vallues creates values
# this is dep var
# also look to part 9 dimensionality reduction techniques
# 200
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
# Fitting Naive Bayes to the Training set
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred) | true |
65272fbf46c67f66a469a8ae8c6502737b6fc91a | Python | 19133/rps | /main.py | UTF-8 | 1,913 | 3.609375 | 4 | [] | no_license | import random
# functions go here
def check_rounds():
while True:
response = input ("how many rounds: ")
round_error = "Please type either <enter>" "or an interger that is more than 0\n"
# If infinite mode is not chosen, check response
# is an integer that is more than 0
if response != "":
try:
response = int(response)
if response < 1:
print (round_error)
continue
except ValueError:
print(round_error)
continue
return response
def choice_checker (question):
error = "Please chooosr rock / paper / scissors"
valid = False
while not valid:
#ask the user for choice
response = input(question).lower
if response == "r" or response == "rock":
return response
if response == "s" or response == "scissor" or response == "scissors":
return response
if response == "p" or response == "paper":
return response
elif response == "xxx":
return response
# Main routine
yes_no_list = ["yes", "no"]
rps_list = ["Rock", "paper", "scissors", "xxx"]
rounds_played = 0
choose_instruction = "Please choose rock (r), paper" "(p) or scissors (s)"
user_choice = choice_checker("Choose rock / paper /" "scissors (r/p/s): ")
rounds = check_rounds()
end_game = "no"
while end_game == "no":
print ()
if rounds == "":
heading = "continuous mode:" / "Round {}".format(rounds_played + 1)
else:
heading = "Round {} of " / "{}".format(rounds_played + 1, rounds)
print (heading)
choose = input ()
if choose == 'xxx':
break
else:
heading = "Round {} or {}".format(rounds_played + 1, rounds)
print(heading)
choose = input(choose_instruction)
if rounds_played == rounds -1:
end_game = "yes"
print ("you chose {}".format(choose))
rounds_played += 1
print("thank you for playing")
| true |
6ada0c03b9697b0e63d6f32c26e235ce9d1991fe | Python | alatiera/Ellinofreneia-crawler | /src/launcher.py | UTF-8 | 1,817 | 2.734375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
import crawler
import renamer
import file_organizer
import argparse
def opts():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
download = subparsers.add_parser('download', help='Downloads the shows',
aliases=['dl', 'crawl'])
download.add_argument('num', help='Number of backlog you want to download',
type=int)
showtype = download.add_mutually_exclusive_group()
showtype.add_argument('-v', '--video-only', help='Video only',
action='store_true')
showtype.add_argument('-a', '--audio-only', help='Audio only',
action='store_true')
rename = subparsers.add_parser('rename',
help='Renames radio shows to sort properly')
rename.add_argument('ren', action='store_true')
# rename.add_argument('-r', '--recursive', help='Executes recursivly',
# action='store_true')
org = subparsers.add_parser('sort', aliases=['organize'], help="""Organizes
mp3s in a folder structure based on date extracted from the file""")
org.add_argument('sort', action='store_true')
args = parser.parse_args()
return args
def main():
args = opts()
if 'num' in args:
if args.audio_only:
crawler.getshow('radio', args.num)
elif args.video_only:
crawler.getshow('tv', args.num)
else:
crawler.getshow('radio', args.num)
crawler.getshow('tv', args.num)
if 'ren' in args:
# if args.recursive:
# renamer.main(recursive=True)
# else:
renamer.main()
if 'sort' in args:
file_organizer.main()
if __name__ == '__main__':
main()
| true |
9ce70feae1eec76ec91cb37957e05f4201684e86 | Python | Aasthaengg/IBMdataset | /Python_codes/p03029/s122921010.py | UTF-8 | 55 | 2.9375 | 3 | [] | no_license | a,b= map(int, raw_input().split(' '))
print (3*a + b)/2 | true |
365740495d71b63d5522f0f7895ff981931d6a1e | Python | phodiep/BiomarkerLab_QuantileStatistics | /QuantileApp(v1.4).py | UTF-8 | 14,446 | 2.578125 | 3 | [] | no_license | import time
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats.stats import pearsonr, spearmanr, gmean, ttest_1samp
def clear_terminal():
import os
os.system('cls' if os.name=='nt' else 'clear')
def start_screen():
print '''
===========================================
Welcome to the QuantileApp
Fatty Acids
Version 1.4
Last updated 01.20.2014
===========================================
Written By: Pho Diep (phodiep@gmail.com)
Written in Python 2.7.3
-------------------------------------------'''
def getPeakList(data):
tempList = list()
tempList += {'p':data['p1'],'q':data['q1'],'peakName':'14:0','FAtype':'Saturated','common':'Myristic'},
tempList += {'p':data['p2'],'q':data['q2'],'peakName':'14:1n5','FAtype':'Monounsaturated','common':'<Fix this - unknown>'},
tempList += {'p':data['p3'],'q':data['q3'],'peakName':'15:0','FAtype':'Saturated','common':'Pentadecylic'},
tempList += {'p':data['p4'],'q':data['q4'],'peakName':'16:0','FAtype':'Saturated','common':'Palmitic'},
tempList += {'p':data['p5'],'q':data['q5'],'peakName':'16:1n9t','FAtype':'Trans','common':'7 trans heyadecenoic'},
tempList += {'p':data['p6'],'q':data['q6'],'peakName':'16:1n7t','FAtype':'Trans','common':'Palmitelaidic'},
tempList += {'p':data['p7'],'q':data['q7'],'peakName':'16:1n9c','FAtype':'Monounsaturated','common':'7-hexadecenoic'},
tempList += {'p':data['p8'],'q':data['q8'],'peakName':'16:1n7c','FAtype':'Monounsaturated','common':'Palmitoleic'},
tempList += {'p':data['p9'],'q':data['q9'],'peakName':'17:0','FAtype':'Saturated','common':'Margaric'},
tempList += {'p':data['p10'],'q':data['q10'],'peakName':'U1','FAtype':'unknown','common':'unknown'},
tempList += {'p':data['p11'],'q':data['q11'],'peakName':'17:1n9c','FAtype':'Monounsaturated','common':'heptadecenoic'},
tempList += {'p':data['p12'],'q':data['q12'],'peakName':'18:0', 'FAtype':'Saturated','common':'Stearic'},
tempList += {'p':data['p13'],'q':data['q13'],'peakName':'18:1n10-12t','FAtype':'Trans','common':'transoctadecenoic'},
tempList += {'p':data['p14'],'q':data['q14'],'peakName':'18:1n9t','FAtype':'Trans','common':'Elaidic'},
tempList += {'p':data['p15'],'q':data['q15'],'peakName':'18:1n8t','FAtype':'Trans','common':'transoctadecenoic'},
tempList += {'p':data['p16'],'q':data['q16'],'peakName':'18:1n7t','FAtype':'Trans','common':'transvaccenic'},
tempList += {'p':data['p17'],'q':data['q17'],'peakName':'18:1n6t','FAtype':'Trans','common':'transoctadecenoic'},
tempList += {'p':data['p18'],'q':data['q18'],'peakName':'18:1n8c','FAtype':'Monounsaturated','common':'10-octadecenoic'},
tempList += {'p':data['p19'],'q':data['q19'],'peakName':'18:1n9c','FAtype':'Monounsaturated','common':'Oleic'},
tempList += {'p':data['p20'],'q':data['q20'],'peakName':'18:1n7c','FAtype':'Monounsaturated','common':'cis-vaccenic'},
tempList += {'p':data['p21'],'q':data['q21'],'peakName':'18:1n5c','FAtype':'Monounsaturated','common':'13-octadecenoic'},
tempList += {'p':data['p22'],'q':data['q22'],'peakName':'18:2n6tt','FAtype':'Trans','common':'6-neolaiolic'},
tempList += {'p':data['p23'],'q':data['q23'],'peakName':'U2','FAtype':'unknown','common':'unknown'},
tempList += {'p':data['p24'],'q':data['q24'],'peakName':'18:2n6ct','FAtype':'Trans','common':'cistrans linoelaiolic'},
tempList += {'p':data['p25'],'q':data['q25'],'peakName':'18:2n6tc','FAtype':'Trans','common':'transcis linoelaiolic'},
tempList += {'p':data['p26'],'q':data['q26'],'peakName':'18:2n6','FAtype':'Omega-6','common':'Linoleic'},
tempList += {'p':data['p27'],'q':data['q27'],'peakName':'20:0','FAtype':'Saturated','common':'Arachidic'},
tempList += {'p':data['p28'],'q':data['q28'],'peakName':'18:3n6','FAtype':'Omega-6','common':'Gamma-linolenic'},
tempList += {'p':data['p29'],'q':data['q29'],'peakName':'20:1n9','FAtype':'Monounsaturated','common':'Gondoic'},
tempList += {'p':data['p30'],'q':data['q30'],'peakName':'18:3n3','FAtype':'Omega-3','common':'alpha-Linolenic'},
tempList += {'p':data['p31'],'q':data['q31'],'peakName':'20:2n6','FAtype':'Omega-6','common':'Eicosadienoic'},
tempList += {'p':data['p32'],'q':data['q32'],'peakName':'22:0','FAtype':'Saturated','common':'Behenic'},
tempList += {'p':data['p33'],'q':data['q33'],'peakName':'20:3n6','FAtype':'Omega-6','common':'Dihomo-gamma-linolenic'},
tempList += {'p':data['p34'],'q':data['q34'],'peakName':'22:1n9','FAtype':'Monounsaturated','common':'Erucic'},
tempList += {'p':data['p35'],'q':data['q35'],'peakName':'20:3n3','FAtype':'Omega-3','common':'EicoSaturatedrienoic'},
tempList += {'p':data['p36'],'q':data['q36'],'peakName':'20:4n6','FAtype':'Omega-6','common':'Arachidonic'},
tempList += {'p':data['p37'],'q':data['q37'],'peakName':'23:0','FAtype':'Saturated','common':'Tricosylic'},
tempList += {'p':data['p38'],'q':data['q38'],'peakName':'22:2n6','FAtype':'Omega-6','common':'Docosadienoic'},
tempList += {'p':data['p39'],'q':data['q39'],'peakName':'24:0','FAtype':'Saturated','common':'Lignoceric'},
tempList += {'p':data['p40'],'q':data['q40'],'peakName':'20:5n3','FAtype':'Omega-3','common':'Eicosapentaenoic'},
tempList += {'p':data['p41'],'q':data['q41'],'peakName':'24:1n9','FAtype':'Monounsaturated','common':'Nervonic'},
tempList += {'p':data['p42'],'q':data['q42'],'peakName':'22:4n6','FAtype':'Omega-6','common':'Adrenic'},
tempList += {'p':data['p43'],'q':data['q43'],'peakName':'22:5n6','FAtype':'Omega-6','common':'Docosapentaenoic'},
tempList += {'p':data['p44'],'q':data['q44'],'peakName':'U5','FAtype':'unknown','common':'unknown'},
tempList += {'p':data['p45'],'q':data['q45'],'peakName':'22:5n3','FAtype':'Omega-3','common':'Docosapentaenoic'},
tempList += {'p':data['p46'],'q':data['q46'],'peakName':'22:6n3','FAtype':'Omega-3','common':'Docosahexaenoic'},
return tempList
def add_ScatterPlot(dataX,dataY,fig,subR,subC,subN,title):
fit = np.polyfit(dataX,dataY,1) #calculate trendline
fit_fn = np.poly1d(fit)
ax = fig.add_subplot(subR,subC,subN)
ax.scatter(dataX,dataY,color='b', marker='.') #add scatter plot
ax.plot(dataX,fit_fn(dataX),color='r',linewidth=1.0) #add trendline in red
plt.title(title, fontsize = 12) #add plot title
ax.locator_params(nbins=4)
plt.setp(ax.get_xticklabels(), fontsize=6)
plt.setp(ax.get_yticklabels(), fontsize=6)
ax.set_xlabel('%', fontsize=10)
ax.set_ylabel('Abs', fontsize=10)
return fig
def scatterPlot(data,tempTitle):
fig = plt.figure(figsize=(12,9), dpi=100)
fig.suptitle(tempTitle + ' (n = '+str(len(data))+')')
peakList = getPeakList(data)
countLocation = 0
for entry in peakList:
countLocation += 1
try:
add_ScatterPlot(entry['p'],entry['q'],fig,7,7,countLocation,entry['peakName'])
except: pass
plt.tight_layout()
plt.subplots_adjust(top=0.92)
return plt
def add_HistPlot(dataX,fig,subR,subC,subN,title):
ax = fig.add_subplot(subR,subC,subN)
ax.hist(dataX, bins =10) #add hist plot
plt.title(title, fontsize = 12) #add plot title
plt.setp(ax.get_xticklabels(), fontsize=6, rotation=90)
plt.setp(ax.get_yticklabels(), fontsize=6)
ax.set_xlabel('Abs', fontsize=10)
ax.set_ylabel('Count', fontsize=10)
return fig
def histPlot(data,tempTitle):
fig = plt.figure(figsize=(12,9), dpi=100)
fig.suptitle(tempTitle + ' (n = '+str(len(data))+')')
peakList = getPeakList(data)
countLocation = 0
for entry in peakList:
countLocation += 1
try:
add_HistPlot(list(entry['q']),fig,7,7,countLocation,entry['peakName'])
except: pass
plt.tight_layout()
plt.subplots_adjust(top=0.92)
return plt
def quantile(column,quantile=5):
# categorizes each entry into quantile bin
try:
q = pd.qcut(column, quantile)
return q.labels + 1
except:
return 'NaN'
def apply_quantile(data,bins):
# reads csv raw data, applies quantile to data
return data.apply(quantile,quantile=bins)
def get_buckets(bins):
tempDict = dict()
for row in range(1,bins+1,1):
for col in range(1,bins+1,1):
tempDict[str(row)+str(col)] = ''
return tempDict
def get_labels(bins):
#columns 1...x rows x...1
# 1 2 3 4
# 4
# 3
# 2
# 1
# return list(range(1,bins+1,1)), list(range(bins,0,-1))
#columns 1...x rows 1...x
# 1 2 3 4
# 1
# 2
# 3
# 4
return list(range(1,bins+1,1)), list(range(1,bins+1,1))
def make_QuantileSummary(dataX,dataY,tempDict):
# tempDict = get_buckets(bins)
for rowX, rowY in zip(dataX, dataY):
try:
if tempDict[str(rowY)+str(rowX)] == '':
tempDict[str(rowY)+str(rowX)] = 1
else:
tempDict[str(rowY)+str(rowX)] += 1
except: pass
return tempDict
def make_table(row_labels,col_labels,tempDict):
table_vals = list()
for row in row_labels:
temp_vals = list()
for col in col_labels:
temp_vals += tempDict[str(row)+str(col)], #pulls values from '1x...11'
table_vals += [temp_vals] #add row to table
return table_vals
def calc_QuantileSummary(dataX,dataY,fig,subR,subC,subN,title,bins):
# creates a summary of each category
tempDict = make_QuantileSummary(dataX,dataY,get_buckets(bins))
col_labels, row_labels = get_labels(bins) #1...x x...1
table_vals = make_table(row_labels,col_labels,tempDict)
ax = fig.add_subplot(subR,subC,subN)
plt.title(title) #add plot title
ax.set_frame_on(False)
ax.set_xlabel('%')
ax.set_ylabel('Abs')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
sum_table=ax.table(cellText=table_vals,rowLabels=row_labels,colLabels=col_labels,loc='center')
sum_table.set_fontsize(10)
return fig
def quantile_Summary(data,tempTitle,bins):
fig = plt.figure(figsize=(12,9), dpi=100)
fig.suptitle(tempTitle + ' (n = '+str(len(data.index))+')')
peakList = getPeakList(data)
countLocation = 0
for entry in peakList:
countLocation += 1
try:
calc_QuantileSummary(entry['p'],entry['q'],fig,7,7,countLocation,entry['peakName'],bins)
except: pass
plt.tight_layout()
plt.subplots_adjust(top=0.92)
return plt
def apply_stats(data,runTTest):
peakList = getPeakList(data)
tempList = list()
colNames = ['Fatty Acid Type', #1
'Peak Name', #2
'Pearson Coefficient', #3
'Pearson P Value', #4
'Spearman Coefficient', #5
'Spearman P Value', #6
'P Geometric Mean (%)', #7
'Q Geometric Mean (ug/ml)', #8
'P Mean (%)', #9
'P Stdev', #10
'Q Mean (ug/ml)', #11
'Q Stdev', #12
'P T-test', #13
'P T-test P value', #14
'Q T-test', #15
'Q T-test P value', #16
'Common Name'] #17
for entry in peakList:
try:
pearson = pearsonr(entry['p'],entry['q'])
spearman = spearmanr(entry['p'],entry['q'])
if runTTest == 'y':
ttestP = ttest_1samp(entry['p'],0)
ttestQ = ttest_1samp(entry['q'],0)
else:
ttestP = ('-','-')
ttestQ = ('-','-')
tempList += [entry['FAtype'], #1
entry['peakName'], #2
pearson[0], #3
pearson[1], #4
spearman[0], #5
spearman[1], #6
gmean(entry['p']), #7
gmean(entry['q']), #8
np.mean(entry['p']), #9
np.std(entry['p'],ddof=1), #10
np.mean(entry['q']), #11
np.std(entry['q'],ddof=1), #12
ttestP[0], #13
ttestP[1], #14
ttestQ[0], #15
ttestQ[1], #16
entry['common']], #17
except: pass
return pd.DataFrame(tempList, columns=colNames)
#------------MAIN------------
clear_terminal()
start_screen()
tempDataFile = raw_input('\nEnter the file to be processed (default:Data.csv): \n') or 'Data.csv'
tempName = raw_input('\nEnter Study Name for file export (default:test): \n') or 'test'
tempTitle = raw_input('\nEnter Description for title of report (default:test_title): \n') or 'test_title'
tempBins = int(raw_input('\nEnter number of bins (4=quartile, 5=quintile/default):') or 5)
scatterPlot_run = raw_input('\nScatter Plot? y/n (default:y): ' ) or 'y'
histPlot_run = raw_input('\nHistogram Plot? y/n (default:y): ' ) or 'y'
quantPlot_run = raw_input('\nQuantile Plot? y/n (default:y): ' ) or 'y'
stats_run = raw_input('\nStatistics? y/n (default:y): ' ) or 'y'
MasterTime = time.time()
try:
data = pd.read_csv(tempDataFile, index_col=0) #import csv as dataframe
try: #=========scatter plot Percentage vs AbsoluteQuant=======
if scatterPlot_run == 'y':
startTime = time.time()
scatter = scatterPlot(data,tempTitle)
scatter.savefig('%s_Results_ScatterPlot.jpeg' % (tempName,))
print '\nScatter plot successfully printed in %s seconds' % str(time.time() - startTime)
except: print '\n...Scatter plot could not be printed...'
try: #=========Histogram plot AbsoluteQuant=======================
if histPlot_run == 'y':
startTime = time.time()
histo = histPlot(data,tempTitle)
histo.savefig('%s_Results_HistPlot.jpeg' % (tempName,))
print '\nHist plot successfully printed in %s seconds' % str(time.time() - startTime)
except: print '\n...Hist plot could not be printed...'
try: #=========Quantile (5-bin) summary=======================
if quantPlot_run == 'y':
startTime = time.time()
dataQuant = apply_quantile(data,tempBins)
dataQuant.to_csv('%s_Results_Quantile.csv' % (tempName,))
summary = quantile_Summary(dataQuant,tempTitle,tempBins)
summary.savefig('%s_Results_QuantileSummary.jpeg' % (tempName,))
print '\nQuantile plot successfully printed in %s seconds' % str(time.time() - startTime)
except: print '\n...Quantile summary could not be printed...'
try: #=========Stats=======================
if stats_run == 'y':
ttest_run = raw_input('\n1 sample T-Test? y/n (default:y): ' ) or 'y'
startTime = time.time()
dataStat = apply_stats(data,ttest_run)
dataStat.to_csv('%s_Results_Statistics.csv' % (tempName,),index=False)
print '\nStatistics summary successfully printed in %s seconds' % str(time.time() - startTime)
except: print '\n...Statistics summary could not be printed...'
print '\nThe data has been successfully processed.'
except:
print '''
\nSorry, the File could not be processed...
Be sure the correct file name was entered
and the file has been saved in the correct location'''
print '\nTotal time: ' + str(time.time() - MasterTime) + ' seconds\n\n' | true |
d867b792d25d99a36150fe48791cc9ebb69d2d4b | Python | Aasthaengg/IBMdataset | /Python_codes/p02852/s908177307.py | UTF-8 | 615 | 2.640625 | 3 | [] | no_license | N,M = map(int,input().split())
S = input()[::-1]
if M >= N:
print(N)
exit()
p = -1
for i in reversed(range(1,M+1)):
if S[i] == "0":
p = i
break
if p == -1:
print(-1)
exit()
ps = [p]
while 1:
tmp = -1
for i in reversed(range(ps[-1]+1,ps[-1]+M+1)):
try:
if S[i] == "0" or i == N:
ps.append(i)
tmp = i
break
except:
pass
if tmp == -1:
print(-1)
exit()
if ps[-1] == N:
break
pp = ([ps[i+1]-ps[i] for i in range(len(ps)-1)])[::-1] + [ps[0]]
print(*pp) | true |
84469a9d92d1baea91affe1249b7fe035daecc3e | Python | metalnick/emu-container-desktop | /emu_container_desktop/emu_container.py | UTF-8 | 4,483 | 2.515625 | 3 | [] | no_license | import configparser as cp
import os
import signal
from socketserver import BaseRequestHandler, TCPServer, ThreadingMixIn
from threading import Thread
import json
import subprocess
import sys
import glob
# TODO: Server should handle requests to start, stop, etc. No need for a "local client". GUI/cmd line will make use of
# TODO: the same methods the server invokes
class ThreadedEmuServerRequestHandler(BaseRequestHandler):
def handle(self):
print("Received message...")
data = json.loads(self.request.recv(1024).decode('UTF-8'))
response = json.dumps(data)
if data["command"] == "start":
self.request.sendall(('Got message! {}\n'.format(response)).encode())
self.start_emulator(emulator_name=data["emulator"])
elif data["command"] == "play_rom":
self.request.sendall(("Got message! {}\n".format(response)).encode())
self.play_rom(emulator_name=data["emulator"], rom_path=data["rom_path"])
elif data["command"] == "stop":
self.request.sendall(('Got message! {}\n'.format(response)).encode())
self.stop_emulator(emulator_name=data["emulator"])
elif data["command"] == "shutdown":
self.request.sendall(('Got message! {}\n'.format(response)).encode())
self.shutdown()
elif data["command"] == "get_emulators":
emulators = '{'
for i in range(len(self.get_config().sections())):
if i == len(self.get_config().sections()) - 1 :
emulators += '"{}": "logo"}}\n'.format(self.get_config().sections()[i])
else:
emulators += '"{}": "logo_path", '.format(self.get_config().sections()[i])
self.request.sendall(emulators.encode())
elif data["command"] == "get_roms":
platform = data["emulator"]
rom_path = self.get_config()[platform]['Roms']
rom_extension = self.get_config()[platform].get('RomExtension', '')
response = '{"roms": "'
if not(rom_path.endswith('/')):
rom_path += '/'
if not(rom_extension == ''):
rom_path += '*.{}'.format(rom_extension)
roms = glob.glob(rom_path)
for i in range(len(roms)):
rom = roms[i]
if i == len(roms) - 1:
response += rom+'"}}\n'
else:
response += rom+', '
self.request.sendall(response.encode())
def get_config(self):
return self.server.config
def play_rom(self, emulator_name: str, rom_path: str):
pid = subprocess.Popen([self.get_config()[emulator_name]['Emulator'], rom_path]).pid
pid_file = open(emulator_name+".pid", "w")
pid_file.write(str(pid))
pid_file.close()
def start_emulator(self, emulator_name: str):
stdout,stderr = subprocess.Popen([self.get_config()[emulator_name]['Emulator']],
stderr=subprocess.PIPE, stdout=subprocess.PIPE).communicate()
print(stdout.decode())
print(stderr.decode())
def stop_emulator(self, emulator_name: str):
pid_file = open(emulator_name+".pid", "r")
try:
if os.path.isfile(emulator_name+".pid"):
pid = pid_file.readline()
os.kill(int(pid), signal.SIGINT)
os.remove(emulator_name+".pid")
finally:
pid_file.close()
def shutdown(self):
self.server.shutdown()
self.server.server_close()
class EmuServer(ThreadingMixIn, TCPServer):
def __init__(self, address: str, port: int, request_handler: BaseRequestHandler, config: cp):
TCPServer.allow_reuse_address = True
TCPServer.__init__(self, (address, port), request_handler)
self._config = config
@property
def config(self):
return self._config
def start_server(address: str, port: int, config: cp, name='EmuServer') -> EmuServer:
server = EmuServer(address, port, ThreadedEmuServerRequestHandler, config)
server_thread = Thread(target=server.serve_forever, name=name, daemon=False)
server_thread.start()
def main():
config = cp.ConfigParser()
config.read("config/emucontainer.properties")
os.remove('*.pid')
try:
start_server('', 55453, config)
except KeyboardInterrupt:
sys.exit()
if __name__ == "__main__":
main()
| true |
d4d81448549f77e337ad3313dbc10078a3cb37c8 | Python | Deeksha-K/dm-assignments | /Assignment 1/DM_ASSN1_20160114_20160809_20160236/apriori.py | UTF-8 | 7,376 | 3.046875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 10 22:58:59 2019
@author: Skanda
"""
import csv
import copy
import itertools
def get_unique_items(data):
"""
This function reads the database and returns the list of unique items
"""
items = {}
for transaction in data:
for item in transaction:
if item in items.keys():
items[item] = items[item] + 1
else:
items[item] = 1
return items
def c_to_l(data, c, min_support):
"""
Converts Ci i.e. candidates to Li i.e. prunded candidates with respect to the minimum support
"""
l = []
lsup = []
for item_set in c:
support = 0
for transaction in data:
if (set(item_set).issubset(set(transaction))):
support = support + 1
if support >= min_support:
l.append(item_set)
lsup.append(support)
return l,lsup
def remove_duplicates(lst):
"""
Utility function that removes the duplicate lists in a nested list of lists
"""
unique_set_lst = []
unique_lst = []
for row in lst:
unique_row = set(row)
if unique_row not in unique_set_lst:
unique_set_lst.append(unique_row)
for row in unique_set_lst:
unique_lst.append(list(row))
return unique_lst
def l_to_c(l, req_common):
"""
Converts Li i.e. candidates to Ci+1 i.e. prunded candidates of bigger size
"""
copy1 = copy.deepcopy(l)
copy2 = copy.deepcopy(l)
c = []
for item_set1 in copy1:
for item_set2 in copy2:
new_item_set = []
intersection = [value for value in item_set1 if value in item_set2]
num_common = len(intersection)
if num_common == req_common:
new_item_set = item_set1 + item_set2
c.append(new_item_set)
return remove_duplicates(c)
def findMaximal(freqSet):
"""
Finds the maximal item sets given the list of freqeunt item sets
"""
maximal = []
for item in freqSet:
notmax = 0
if isinstance(item, list):
for sup in freqSet:
if set(sup).issuperset(set(item)) and len(sup) == len(item) + 1:
notmax = 1
if notmax == 0:
maximal.append(item)
return maximal
def findClosed(freqSet, freqSup):
"""
Finds the list of closed item sets given the list of frequent item sets
"""
closed = []
for item in freqSet:
notclosed = 0
if isinstance(item, list):
for sup in freqSet:
if set(sup).issuperset(set(item)) and freqSup[freqSet.index(item)] == freqSup[freqSet.index(sup)] and item != sup:
notclosed = 1
if notclosed == 0:
closed.append(item)
return closed
def generateAssociationRule(freqSet):
"""
Generates the associaciation rules given the frequent item sets
"""
associationRule = []
for item in freqSet:
if isinstance(item, list):
if len(item) != 0:
length = len(item) - 1
while length > 0:
combinations = list(itertools.combinations(item, length))
temp = []
LHS = []
for RHS in combinations:
LHS = set(item) - set(RHS)
temp.append(list(LHS))
temp.append(list(RHS))
associationRule.append(temp)
temp = []
length = length - 1
return associationRule
def aprioriOutput(rules, dataSet, minimumSupport, minimumConfidence):
"""
Finds the rules that breach the minimum confidence threshold and gives the output
"""
returnAprioriOutput = []
for rule in rules:
supportOfX = 0
supportOfY = 0
supportOfXinPercentage = 0
supportOfXandY = 0
supportOfXandYinPercentage = 0
for transaction in dataSet:
if set(rule[0]).issubset(set(transaction)):
supportOfX = supportOfX + 1
if set(rule[0] + rule[1]).issubset(set(transaction)):
supportOfXandY = supportOfXandY + 1
if set(rule[1]).issubset(set(transaction)):
supportOfY = supportOfY + 1
supportOfXinPercentage = (supportOfX * 1.0 / len(dataSet)) * 100
supportOfXandYinPercentage = (supportOfXandY * 1.0 / len(dataSet)) * 100
confidence = (supportOfXandYinPercentage / supportOfXinPercentage) * 100
if confidence >= minimumConfidence:
supportOfXAppendString = "Support Of X: " + str(supportOfX)
supportOfYAppendString = "Support Of Y: " + str(supportOfY)
confidenceAppendString = "Confidence: " + str(round(confidence, 4)) + "%"
returnAprioriOutput.append(supportOfXAppendString)
returnAprioriOutput.append(supportOfYAppendString)
returnAprioriOutput.append(confidenceAppendString)
returnAprioriOutput.append(rule)
return returnAprioriOutput
def main():
#Reading the database
data = []
with open('groceries.csv', 'r') as fp:
reader = csv.reader(fp)
for row in reader:
data.append(row)
fp.close()
items = get_unique_items(data)
temp_c1 = list(items.keys())
c1 = []
for string in temp_c1:
c1.append([string])
del temp_c1
#total_number_of_transactions = len(data) #9835
total_number_of_items = len(items) #169
#Setting the minimum support and minimum confidence
min_support = 500
min_confidence = 20
l1,l1sup = c_to_l(data, c1, min_support)
del items
del c1
current_l = copy.deepcopy(l1)
frequent_item_sets = l1
frequent_item_setssup = l1sup
#Generating all frequent item sets using the apriori principle
for i in range(2, total_number_of_items + 1):
current_c = l_to_c(current_l, i-2)
current_l, current_lsup = c_to_l(data, current_c, min_support)
frequent_item_sets.extend(current_l)
frequent_item_setssup.extend(current_lsup)
if len(current_l) == 0:
break
print(len(frequent_item_sets))
maximal = []
#Generating maximal item sets
maximal = findMaximal(frequent_item_sets)
closed = []
#Generating closed item sets
closed = findClosed(frequent_item_sets, frequent_item_setssup)
assoc_rules = []
#Finding association rules
assoc_rules = generateAssociationRule(frequent_item_sets)
#Pruning rules based on confidence and giving appropriate output
AprioriOutput = aprioriOutput(assoc_rules, data, min_support, min_confidence)
counter = 1
if len(AprioriOutput) == 0:
print("There are no association rules for this support and confidence.")
else:
for i in AprioriOutput:
if counter == 4:
print(str(i[0]) + "------>" + str(i[1]))
counter = 0
else:
print(i, end=' ')
counter = counter + 1
if __name__ == '__main__':
main() | true |
dd04647e1702687da86ba7e0e3c23d80983492a0 | Python | nbiadrytski-zz/python-training | /p_advanced_boiko/oop_demos/decorators/property_via_decorator.py | UTF-8 | 630 | 3.84375 | 4 | [] | no_license | class PropertyViaDecorator:
def __init__(self):
self._x = None
@property
def x(self):
"""I'm the 'x' property"""
print('...getter called...')
return self._x
@x.setter
def x(self, value):
print('...setter called...')
self._x = value
@x.deleter
def x(self):
print('...deleter called...')
del self._x
prop = PropertyViaDecorator()
print(prop._x) # None
print(prop.x) # ...getter called... None
prop.x = 13 # ...setter called...
print(prop._x) # 13
print(prop.x) # ...getter called... None
del prop.x # ...deleter called...
| true |
eeb166538e9100fc1bc849334737991f770854a3 | Python | vbaryshev4/faster_python | /homework/2/merger.py | UTF-8 | 652 | 2.671875 | 3 | [] | no_license | from os import listdir
import heapq
from datetime import *
def get_datetime_object(string):
return datetime.strptime(string, '%Y-%m-%d %H:%M')
def key_func(i):
date_time = i.split('\t')[2][:-1]
return get_datetime_object(date_time)
def join_results(lst):
with open('chunks/sort_results.txt', 'a') as f:
for i in heapq.merge(*lst, key=key_func):
f.write(i)
f.flush()
def start_merger():
result = []
for file in listdir('chunks'):
if file != '.DS_Store':
result.append(open('chunks/' + file))
join_results(result)
if __name__ == '__main__':
result = start_merger() | true |
1cbc3e635b23c7e1e01ce5d57f85d45437ba5af7 | Python | kmorris0123/reverse_word_order | /reverse_word_order.py | UTF-8 | 569 | 4 | 4 | [] | no_license | import os
def usr_str():
print("Input a string that has multiple words.")
print("Example: My name is Kyle")
return input("--> ")
def reverse_order(usr_str):
usr_str = usr_str.split(" ")
rev = usr_str[::-1]
joined = " ".join(rev)
return joined
def main():
play = True
while play == True:
usr_str_s = usr_str()
print(reverse_order(usr_str_s))
play_again = input('Do you want to enter another string? "Yes" or "No": ')
if play_again == "yes":
play = True
os.system('clear')
else:
play = False
if __name__ == "__main__":
main()
| true |
a8d828d447ad8b929d17b3542f2f59eae783f71f | Python | SakibNoman/Python-Numerical-Analysis | /string.py | UTF-8 | 976 | 4.3125 | 4 | [] | no_license | multilineString = '''Hello, This is Noman Sakib
trying to '''
print(multilineString)
print(multilineString[7])
for x in multilineString:
print(x)
#length of a string
print(len(multilineString))
#Checking a specific word if exist in a string
#in
print("Sakib" in multilineString)
myWord = "Sakib"
if myWord in multilineString:
print(myWord,"is present")
else:
print(myWord,"is not present")
# not in
if myWord not in multilineString:
print("Not exist")
else:
print("Exist")
#String slicing
myString = "Learn With Sakib"
print(myString[2:5])
print(myString[2:])
print(myString[:5])
print(myString[-5:])
print(myString[-10:-6])
#Modifying string
print(myString.upper())
print(myString.lower())
print(myString.strip()) #removes whitespace from beginning and ending
print(myString.replace("Learn","Teach"))
print(myString.split(" "))
#format String
yourString = "You are so {0},and {1}" #can be used index or not
print(yourString.format("good",21))
| true |
1b73fdacfc57a7bc7f2ce0ae4fc602c713d23a27 | Python | 1615961606/-test | /备份/1804爬虫/第二周/第八天/迭代器.py | UTF-8 | 587 | 3.953125 | 4 | [] | no_license | from collections import Iterable
class Booklist(object):
def __init__(self):
self.data = []
self.current = 0
def add_books(self,item):
self.data.append(item)
def __iter__(self):
return self
def __next__(self):
if len(self.data) > self.current:
result = self.data[self.current]
self.current +=1
return result
else:
raise StopIteration
books = Booklist()
books.add_books('保护者')
books.add_books('小橘子')
for y in books:
print(y)
print(isinstance(books,Iterable)) | true |
616f002eab852e7104cc19e0fc427e2f6bdb4ca3 | Python | bballjo/pythonfun | /commandGenerator.py | UTF-8 | 632 | 3.4375 | 3 | [] | no_license | import commands
import random
import string
def generateCommand():
return random.choice(test.wordsWithSemicolons)
def WriteFile(name):
file = open(name,'w')
file.write(commands.Line('x = ' + str(random.choice([1,2,3,4,5,6,7,8,9]))))
file.write(commands.If('x == ' + str(random.choice([1,2,3,4,5,6,7,8,9]))))
file.write(commands.Print("'x is \" + str(x) + \""))
file.close()
def OpenFile(name):
file = open(name, 'r')
print (file.read())
file.close()
# count = 0
# while (count < 9):
# cmd_val = random.choice(test.wordsWithSemicolons)
# print(cmd_val)
# count = count + 1 | true |
8a44b86aa4a3c767b221054a839fe32b59a222ee | Python | yucheno8/news_python | /LiaoSirWeb/recursive_factorial.py | UTF-8 | 216 | 3.890625 | 4 | [] | no_license | # 利用递归函数计算阶乘
# N! = 1 * 2 * 3 * ... * N
def fact(n):
if n == 1:
return 1
return n * fact(n-1)
print('fact(1) =', fact(1))
print('fact(5) =', fact(5))
print('fact(10) =', fact(10)) | true |
5df4e0217c9cdaf4be35e35dd8c44ec92c1b3bd7 | Python | ashishjsharda/numpyexamples | /argsort.py | UTF-8 | 64 | 2.703125 | 3 | [] | no_license | import numpy as np
a=np.array([5,2,8])
a=np.argsort(a)
print(a)
| true |
66f6869f549434af31b50bcd46096e71338990e7 | Python | JoelBondurant/RandomCodeSamples | /python/bytetool.py | UTF-8 | 1,257 | 3.359375 | 3 | [
"Apache-2.0"
] | permissive | """
Module for byte operations.
"""
import hashlib
import datetime
def sizeof_fmt(num, dec = 3, kibibyte = False):
"""Byte size formatting utility function."""
prefixes = None
factor = None
if kibibyte:
factor = 1024.0
prefix = ['bytes','KiB','MiB','GiB','TiB','PiB','EiB','ZiB','YiB']
else:
factor = 1000.0
prefix = ['bytes','KB','MB','GB','TB','PB','EB','ZB','YB']
for x in prefix:
if num < factor:
return ("%3."+str(dec)+"f %s") % (num, x)
num /= factor
def to_ascii(text):
"""Convert text to ascii."""
asciiText = ''.join([ch if ord(ch) < 128 else ' ' for ch in text])
return asciiText
def hex_hash(astring, length = 6):
"""Compute a hex string hash for a string."""
hasher = hashlib.md5()
hasher.update(astring.encode('utf-8'))
hash_raw = hasher.hexdigest()
return hash_raw[:length].upper()
def string_shorten(astring, preserve_header = 6, max_length = 35):
"""Long string shortener."""
if len(astring) <= max_length:
return astring
astring_lower = astring.lower()
astring2 = astring[:preserve_header] + '_DX' + hex_hash(astring)
astring2 += '_' + datetime.datetime.now().date().strftime('%Y%m%d')
if len(astring2) > max_length:
raise Exception('Illegal string length in %s.' % astring2)
return astring2
| true |
7017f224e020ac035dfaaa3d8dab635daacf6124 | Python | Bavithakv/PythonLab | /CO1/replaced.py | UTF-8 | 142 | 3.15625 | 3 | [] | no_license | a=input("enter a string")
b=a[0]
s=a[1:len(a)]
for i in range(len(s)):
if s[i]==a[0]:
b=b+"$"
else:
b=b+s[i]
print(b)
| true |
d4b6270429b462f79e25fe0260a0f4711573e5be | Python | HBinhCT/Q-project | /hackerearth/Algorithms/Decode/test.py | UTF-8 | 542 | 2.59375 | 3 | [
"MIT"
] | permissive | import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'2',
'wrien',
'reen',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
'erwin\n' +
'eren\n')
if __name__ == '__main__':
unittest.main()
| true |
fb3c0d8b4296404bba40639192711a50ccee3287 | Python | StephanHeijl/SMH500 | /compare_contribution_lists.py | UTF-8 | 1,099 | 2.546875 | 3 | [] | no_license | import json
import pprint
import pandas
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
with open("volume_contribution.json") as f:
volume_correspondence = json.load(f)
with open("relative_market_caps.json") as f:
relative_market_caps = sorted(json.load(f))
overview = {}
for i, (coin, val) in enumerate(volume_correspondence):
overview[coin] = [i]
for i, (coin, val) in enumerate(relative_market_caps):
try:
overview[coin].append(i)
except:
continue
overview = [(coin, vol, mar, (vol - mar) / 50 * 5 0, (vol + mar) / 10 * 10) for coin, (vol, mar) in overview.items()]
#pprint.pprint(overview)
# pprint.pprint(sorted(
# overview,
# key=lambda x: (x[-1], x[-2])
# ))
overview = pandas.DataFrame(overview, columns=["coin", "vol", "mar", "diff_vol_mar", "mean_vol_mar"])
overview.loc[:, ["vol", "mar"]].plot(x="vol", y="mar", kind="scatter", figsize=(20, 20))
plt.savefig("vol_mar_corr.png")
with pandas.option_context('display.max_rows', None, 'display.max_columns', 10):
print overview.sort_values(["diff_vol_mar","mar"])
| true |
61ae6dbc6166d8fbfcdf146920018e2bb47cc44b | Python | standrewscollege2018/2020-year-11-classwork-EthanAllison | /Movie ticket.py | UTF-8 | 343 | 3.671875 | 4 | [] | no_license | age = int(input("How old are you?\n"))
if age > 13:
student = input("Are you a student? (y/n)\n")
if student == "y":
print("It costs $8")
elif age >= 18:
print("It will cost $12")
else:
print("It will cost $9")
elif age >=5:
print("It will cost $7")
else:
print("It will be free to enter")
| true |
4d888fb082039bf3648970525e7dc3c592000e8c | Python | lukereding/nsf_awards_analysis | /parse_xml.py | UTF-8 | 2,483 | 2.8125 | 3 | [] | no_license | import os
import bs4
import csv
def e_8(s):
return s.encode('utf-8')
os.chdir('./data')
all_files = [file for file in os.listdir('.') if file.endswith(".xml")]
print("total number of files: {}".format(len(all_files)))
with open('../out.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow( ('file_name', 'directorate', 'division', 'title', 'institution', 'amount', 'grant type', 'abstract', 'date_start', 'date_end', 'program_officer', 'investigators', 'roles', 'number_pis') )
for i, file in enumerate(all_files):
try:
print(i)
# read in file
file_name = file
handler = open(file).read()
soup = bs4.BeautifulSoup(handler, 'xml')
# record a bunch of stuff about the grant
directorate = e_8(soup.Directorate.LongName.text)
division = e_8(soup.Division.LongName.text)
title = e_8(soup.AwardTitle.text)
institution = e_8(soup.Institution.Name.text)
amount = e_8(soup.Award.AwardAmount.text)
grant_type = e_8(soup.Award.AwardInstrument.Value.text)
abstract = e_8(soup.Award.AbstractNarration.text)
# need to parse these date:
date_end = e_8(soup.AwardExpirationDate.text)
date_start = e_8(soup.AwardEffectiveDate.text)
program_officer = e_8(soup.ProgramOfficer.text)
investigators = list()
roles = list()
for investigator in soup.find_all("Investigator"):
name = e_8(soup.Investigator.FirstName.text) + b" " + e_8(soup.Investigator.LastName.text)
if name not in investigators:
investigators.append(name)
roles.append(e_8(soup.Investigator.RoleCode.text))
number_pis = len(set(investigators))
try:
writer.writerow( (file_name, directorate, division, title, institution, amount, grant_type, abstract, date_start, date_end, program_officer, investigators, roles, number_pis) )
except:
writer.writerow( ('NA', 'NA','NA','NA','NA','NA','NA','NA','NA','NA','NA','NA') )
print("problem writing the csv row")
except:
# this occured three times in the whole dataset
print("problem parsing the XML file: {}".format(file))
if i % 100 == 0:
print("on the {}th file".format(i))
csvfile.close()
| true |
1f8df0cc10d25c2f27a49385802265d1029d7c8d | Python | Nelson-Gon/urlfix | /urlfix/urlfix.py | UTF-8 | 7,334 | 2.75 | 3 | [
"MIT"
] | permissive | from collections.abc import Sequence
import os
import re
import urllib.request
from urllib.request import Request
import tempfile
from urllib.error import URLError, HTTPError
import logging
log_format = "%(asctime)s %(levelname)s %(message)s"
log_filename = "urlfix_log.log"
log_level = logging.WARNING
logging.basicConfig(
filename= log_filename,
format = log_format,
filemode = "w"
)
logger = logging.getLogger(__name__)
logger.setLevel(log_level)
def file_format(in_file):
format_pattern = r'.+\.(\w+)'
matches = re.findall(format_pattern, in_file)
return matches[0] if len(matches) > 0 else ''
class URLFix(object):
def __init__(self, input_file, output_file=None):
"""
:param input_file: Path to input_file
:param output_file: Path to output_file
"""
self.input_file = input_file
self.output_file = output_file
# automatically detect input file format
self.input_format = file_format(self.input_file)
def replace_urls(self, verbose=False, correct_urls=None, inplace=False):
"""
:param verbose Logical. Should you be notified of what URLs have moved? Defaults to False.
:param correct_urls. A sequence of urls known to be correct.
:param inplace. Flag for inplace update operation.
:return Replaces outdated URL and writes to the specified file. It also returns the number of URLs that have
changed. The latter is useful for tests.
"""
if self.input_format not in ("md", "txt", "rmd", "Rmd", "rst"):
logger.error(f"File format {self.input_format} is not yet supported.")
raise NotImplementedError(f"File format {self.input_format} is not yet supported.")
else:
pass
link_text = "[^]]+"
# Better markdown link matching taken from https://stackoverflow.com/a/23395483/10323798
# http:// or https:// followed by anything but a closing paren
actual_link = r"http[s]?://[^)|^\s|?<=\]]+"
# Need to find more links if using double bracket Markdown hence define single md []() RegEx.
single_md = r"\[([^]]+)\]\((http[s]?://[^\s|^\)]+)\)"
combined_regex = fr"\[({link_text})\]\(({actual_link})\)\]\((http[s].*)\)|({single_md})"
# Match only links in a text file, do not text that follows.
# Assumes that links will always be followed by a space.
final_regex = r"http[s]?://[^\s]+" if self.input_format in ["rst", "txt"] else combined_regex
if self.output_file is None:
if not inplace:
logger.error("Please provide an output file to write to.")
raise ValueError("Please provide an output file to write to.")
else:
# Get directory name from input file path
output_file = tempfile.NamedTemporaryFile(dir=os.path.dirname(self.input_file), delete=False,
mode="w")
else:
# if not all(os.path.exists(x) for x in [self.input_file, self.output_file]):
for file_ in [self.input_file, self.output_file]:
if not os.path.exists(file_):
logger.error(f"Need both input and output files but {file_} does not exist.")
raise FileNotFoundError(f"Need both input and output files but {file_} does not exist.")
output_file = open(self.output_file, "w")
number_moved = 0
number_of_urls = 0
with open(self.input_file, "r") as input_f, output_file as out_f:
for line in input_f:
matched_url = re.findall(final_regex, line)
# Drop empty strings
if self.input_format in ["md", "rmd", "Rmd"]:
matched_url = [list(str(x) for x in texts_links if x != '') for texts_links in matched_url]
for link_texts in matched_url:
if len(link_texts) > 1:
link_texts = link_texts[1:]
# This is used because for some reason we match links twice if single md []()
# This isn't ideal
# TODO: Use better Regular Expression that matches the target links at once
matched_url = list(filter(lambda x: ("https" or "http") in x, link_texts))
if len(matched_url) == 0:
# If no URL found, write this line so it is kept in the output file.
out_f.write(line)
pass
else:
for final_link in matched_url:
number_of_urls += 1
if isinstance(correct_urls, Sequence) and final_link in correct_urls:
# skip current url if it's in 'correct_urls'
logger.info(f"{final_link} is already valid.")
continue
# This printing step while unnecessary may be useful to make sure things work as expected
if verbose:
logger.info(f"Found {final_link} in {input_f.name}, now validating.. ")
try:
visited_url = urllib.request.urlopen(
Request(final_link, headers={'User-Agent': 'XYZ/3.0'}))
url_used = visited_url.geturl()
except HTTPError as err:
# Put HTTPError before URLError to avoid issues with inheritance
# This may be useful for 4xxs, 3xxs if we get past the URLError
logger.warning(f"{final_link} not updated, got HTTP error code: {err.code}.")
#warnings.warn(f"{final_link} not updated, got HTTP error code: {err.code}.")
pass
except URLError as err:
logger.warning(f"{final_link} not updated. Reason: {err.reason}")
#warnings.warn(f"{final_link} not updated. Reason: {err.reason}")
# Must be a way to skip, for now rewrite it in there
pass
else:
if url_used != final_link:
number_moved += 1
line = line.replace(final_link, url_used)
if verbose:
logger.info(f"{final_link} replaced with {url_used} in {out_f.name}")
out_f.write(line)
information = "URLs have changed" if number_moved != 1 else "URL has changed"
logger.info(f"{number_moved} {information} of the {number_of_urls} links found in {self.input_file}")
# We leave this print message here as it is fairly useful
print(f"{number_moved} {information} of the {number_of_urls} links found in {self.input_file}")
if inplace:
os.replace(out_f.name, self.input_file)
if verbose:
logger.info(f"Renamed temporary file {output_file} as {self.input_file}")
return number_moved
| true |
e654d5dc25201845013c994ea9fdda9b650587d9 | Python | sunarditay/tue_robocup | /challenge_eegpsr/test/navigate_in_front_of.py | UTF-8 | 1,922 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python
import rospy, sys, robot_smach_states, random
if __name__ == "__main__":
rospy.init_node('navigate_in_front_of')
# Create Robot object based on argv[1]
if len(sys.argv) < 2:
print "Usage: ./navigate_in_front_of.py [amigo/sergio] [entityIds]..."
sys.exit()
robot_name = sys.argv[1]
if robot_name == 'amigo':
from robot_skills.amigo import Amigo as Robot
elif robot_name == 'sergio':
from robot_skills.sergio import Sergio as Robot
else:
print "unknown robot"
sys.exit()
robot = Robot()
if len(sys.argv) > 2:
ids = sys.argv[2:]
else:
robot.speech.speak("No ids specified, I will do them all", block=False)
ids = [e.id for e in robot.ed.get_entities() if e.is_a("furniture")]
random.shuffle(ids)
print "IDS:", ids
for id in ids:
robot.speech.speak("I am going to navigate to the %s" % id, block=False)
machine = robot_smach_states.NavigateToSymbolic(robot, {robot_smach_states.util.designators.EntityByIdDesignator(robot, id=id): "in_front_of"},
robot_smach_states.util.designators.EntityByIdDesignator(robot, id=id))
machine.execute()
robot.head.look_down()
robot.head.wait_for_motion_done()
import time
time.sleep(1)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Segment
robot.speech.speak("Segmenting on top of the %s" % id, block=False)
segmented_entities = robot.ed.update_kinect("on_top_of %s" % id)
if segmented_entities:
if not segmented_entities.error_msg:
robot.speech.speak("I found %d entities" % len(segmented_entities.updated_ids))
else:
robot.speech.speak(segmented_entities.error_msg)
robot.head.close()
| true |
eefc3904060a6ed5358ec5399ee7e8572a8b512e | Python | OtchereDev/yt_omdb_api | /api/models.py | UTF-8 | 945 | 2.625 | 3 | [
"MIT"
] | permissive | from django.db import models
RATINGS=(
('5','5 Star'),
('4', '4 Star'),
('3', '3 Star'),
('2', '2 Star'),
('1', '1 Star'),
)
TYPE=(
('movie','movie'),
('series','series'),
('episode','episode'),
)
class Genre(models.Model):
name=models.CharField(max_length=255)
def __str__(self) -> str:
return self.name
class Movie(models.Model):
title=models.CharField(max_length=400)
description=models.TextField()
created=models.DateField()
rated=models.CharField(choices=RATINGS,max_length=1)
duration=models.CharField(max_length=10)
genre=models.ForeignKey(Genre,on_delete=models.SET_NULL,null=True,blank=True)
actors=models.CharField(max_length=400)
country=models.CharField(max_length=100)
type=models.CharField(choices=TYPE,max_length=15)
poster=models.ImageField()
director=models.CharField(max_length=200)
language=models.CharField(max_length=30)
| true |
c1f40c62047e0185ecd73181b1aa793983887329 | Python | mikss/pr3 | /test/test_linear.py | UTF-8 | 2,226 | 2.515625 | 3 | [
"MIT"
] | permissive | import numpy as np
import pytest
from sklearn.metrics import r2_score
from pr3.linear import (
LeastAngleRegressionProjection,
LowerUpperRegressionProjection,
ProjectionOptimizerRegistry,
ProjectionSampler,
ProjectionVector,
)
def test_normalize(random_seed, p_dim, q_dim):
np.random.seed(random_seed)
projection = ProjectionVector(q=q_dim)
with pytest.raises(AttributeError):
projection._normalize()
projection.beta = np.random.normal(0, 1, (p_dim,))
projection._normalize()
np.testing.assert_almost_equal(np.linalg.norm(projection.beta, ord=q_dim), 1, decimal=15)
def test_sampler(random_seed, p_dim, q_dim, sparsity=5):
np.random.seed(random_seed)
sparse_projection = ProjectionSampler(p=p_dim, q=q_dim, sparsity=sparsity)
dense_projection = ProjectionSampler(p=p_dim, q=q_dim)
np.testing.assert_almost_equal(np.linalg.norm(sparse_projection.beta, ord=q_dim), 1, decimal=15)
np.testing.assert_almost_equal(np.linalg.norm(dense_projection.beta, ord=q_dim), 1, decimal=15)
assert np.count_nonzero(sparse_projection.beta) == sparsity
assert np.count_nonzero(dense_projection.beta) == p_dim
@pytest.fixture()
def test_xy(random_seed, p_dim, q_dim, sparsity, n_samples, eps_std):
np.random.seed(random_seed)
eps = np.random.normal(0, eps_std, (n_samples, 1))
beta = np.zeros((p_dim, 1))
beta[:sparsity, :] = ProjectionSampler(p=sparsity, q=q_dim, random_state=random_seed).beta
x = np.random.normal(0, 1, (n_samples, p_dim))
y = x @ beta + eps
return x, y
@pytest.mark.parametrize(
"regressor,init_kwargs,r2_threshold",
[
(LowerUpperRegressionProjection, dict(ridge=1.0), 19e-4),
(LeastAngleRegressionProjection, dict(max_iter=25, min_corr=1e-4), 14e-4),
],
)
def test_regression(test_xy, regressor, init_kwargs, r2_threshold):
x, y = test_xy
lurp = regressor(**init_kwargs)
lurp.fit_normalize(x, y)
y_hat = lurp.predict(x)
assert r2_score(y, y_hat) > r2_threshold
def test_registry(registry_size=2):
assert len(ProjectionOptimizerRegistry.valid_mnemonics()) == registry_size
assert len(ProjectionOptimizerRegistry.valid_regressors()) == registry_size
| true |
9677b542de10580a2986dda6ecb588709b030430 | Python | liyi0206/leetcode-python | /87 scramble string.py | UTF-8 | 665 | 3.453125 | 3 | [] | no_license | class Solution(object):
def isScramble(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: bool
"""
if s1==s2: return True
if len(s1)!=len(s2) or sorted(s1)!=sorted(s2): return False
if len(s1)==1: return s1==s2
for i in range(1,len(s1)):
if self.isScramble(s1[:i],s2[:i]) and self.isScramble(s1[i:],s2[i:]):
return True
if self.isScramble(s1[:i],s2[-i:]) and self.isScramble(s1[i:],s2[:-i]):
return True
return False
a=Solution()
print a.isScramble("great","rgtae") #True
print a.isScramble("abcd","bdac") #False | true |
8b25701a3a88e458c8a9e9f38191f10663b306ab | Python | zhaoqun05/Coding-Interviews | /Python/数组中出现次数超过一半的数字.py | UTF-8 | 789 | 3.546875 | 4 | [] | no_license | '''
数组中有一个数字出现的次数超过数组长度的一半,请找出这个数字。例如输入一个长度为9的数组{1,2,3,2,2,2,5,4,2}。由于数字2在数组中出现了5次,超过数组长度的一半,因此输出2。如果不存在则输出0。
'''
# -*- coding:utf-8 -*-
class Solution:
def MoreThanHalfNum_Solution(self, numbers):
if not numbers: return None
key, num = numbers[0], 1
for i in numbers[1:]:
if i == key:
num += 1
else:
num -= 1
if num == 0:
key = i
num = 1
num = 0
for i in numbers:
if i == key:
num += 1
return key if num * 2 > len(numbers) else 0
| true |
e8dbd5ddf8bf4dbbb893e1eb2875efee3c7295ff | Python | qlimaxx/projects-management-api | /manage.py | UTF-8 | 957 | 2.75 | 3 | [] | no_license | import click
from werkzeug.security import generate_password_hash
from app import create_app
from app.enums import Role
from app.models import User, db
app = create_app()
@app.cli.command('create-db')
def create_db():
db.drop_all()
db.create_all()
print('Database is created.')
@app.cli.command('create-admin')
@click.argument('email', default='admin@mail.com')
@click.argument('password', default='admin')
def create_admin(email, password):
try:
user = User(
email=email,
phash=generate_password_hash(password),
role=Role.ADMIN.value)
db.session.add(user)
db.session.commit()
print('Admin(email={0}, password={1}) is created.'.format(
email, password))
except Exception as ex:
print(ex)
@app.cli.command('generate-password-hash')
@click.argument('password')
def _generate_password_hash(password):
print(generate_password_hash(password))
| true |
932af16a0d8c38f4375f5c4b699df1948d8faf1a | Python | Anirban2404/LeetCodePractice | /1135_minimumCost.py | UTF-8 | 1,930 | 3.703125 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 15 10:40:04 2019
@author: anirban-mac
"""
"""
1135. Connecting Cities With Minimum Cost
There are N cities numbered from 1 to N.
You are given connections, where each connections[i] = [city1, city2, cost]
represents the cost to connect city1 and city2 together. (A connection is
bidirectional: connecting city1 and city2 is the same as connecting city2
and city1.)
Return the minimum cost so that for every pair of cities, there exists a path
of connections (possibly of length 1) that connects those two cities together.
The cost is the sum of the connection costs used. If the task is impossible,
return -1.
Example 1:
Input: N = 3, connections = [[1,2,5],[1,3,6],[2,3,1]]
Output: 6
Explanation:
Choosing any 2 edges will connect all cities so we choose the minimum 2.
Example 2:
Input: N = 4, connections = [[1,2,3],[3,4,4]]
Output: -1
Explanation:
There is no way to connect all cities even if all edges are used.
"""
# Finding Minimum spanning Tree
from collections import defaultdict
import heapq
class Solution:
def minimumCost(self, N, connections):
graph = defaultdict(list)
start = connections[0][0]
for src, dst, wt in connections:
graph[src].append((dst, wt))
graph[dst].append((src, wt))
print(graph)
dist = {}
heap = [(0, start)]
while heap:
ddist, node = heapq.heappop(heap)
print(ddist, node)
if node in dist:
continue
dist[node] = ddist
for neighbor, d in graph[node]:
if neighbor not in dist:
heapq.heappush (heap, (d, neighbor))
print(dist)
return sum(dist.values()) if len(dist) == N else -1
N = 3
connections = [[1,2,5],[1,3,6],[2,3,1]]
print(Solution().minimumCost(N, connections)) | true |
3de35b9bff11dee3b91dbb36f0672279a7444ade | Python | Jannatul-Ferdousi/PractisePython | /ND2.py | UTF-8 | 563 | 3.203125 | 3 | [] | no_license | # Compute mean and standard deviation: mu, sigma
mu = np.mean(belmont_no_outliers)
sigma = np.std(belmont_no_outliers)
# Sample out of a normal distribution with this mu and sigma: samples
samples = np.random.normal(mu, sigma, size=10000)
# Get the CDF of the samples and of the data
x_theor, y_theor = ecdf(samples)
x, y = ecdf(belmont_no_outliers)
# Plot the CDFs and show the plot
_ = plt.plot(x_theor, y_theor)
_ = plt.plot(x, y, marker='.', linestyle='none')
_ = plt.xlabel('Belmont winning time (sec.)')
_ = plt.ylabel('CDF')
plt.show()
| true |
d15ca6a56956293e69dbfbcb45e69f252163defb | Python | fadeopolis/scripts | /bin/kathex | UTF-8 | 4,332 | 2.578125 | 3 | [] | no_license | #!/usr/bin/python3
##### PARSE OPTIONS ############################################################
import argparse
import glob
import os
import subprocess
DEFAULT_BUILD_COMMAND = 'pdflatex'
RUBBER_COMMAND = 'rubber -q -s --pdf'
PDFLATEX_COMMAND = 'pdflatex'
DEFAULT_PDF_VIEWER = 'evince'
DEFAULT_HELPER_FILE_TYPES = ['log','aux','bbl','blg']
def main():
buildCmd, pdfViewer, texFile = parseCommandLineArguments()
### process input file name for usability
## if the user specified no input file try to find a .tex file
if texFile is None:
candidates = glob.glob('*.tex')
if len(candidates) == 0:
abort(">>>> There are no .tex files in the current directory, please tell me which one you want to compile.")
elif len(candidates) > 1:
print(">>>> You did not specify which .tex file to use")
print(">>>> and there is more than one in the current directory,")
print(">>>> please tell me which one you want to compile.")
print(">>>> Candidates are:")
for candidate in candidates:
print (">>>> " + candidate)
exit(1)
if len(candidates) == 1:
texFile = candidates[0]
## the specified file does not exist
elif not os.path.exists(texFile):
# maybe the user ommited the '.tex' extension
if os.path.exists(texFile + '.tex'):
texFile = texFile + '.tex'
# maybe the user typed a dot at the end of the file name but not the 'tex',
# this happens a lot when using shell autocompletion
elif os.path.exists(texFile + 'tex'):
texFile = texFile + 'tex'
## compile latex
print(">>>> Compiling " + texFile)
compileLatex(buildCmd, texFile)
## get basename of latex file
basename = getBasename(texFile)
## name of result of pdf
pdfFile = basename + '.pdf'
## check if a pdf was generated (i.e. it exists and is newer than the tex file)
if not os.path.exists(pdfFile) or os.path.getmtime(pdfFile) < os.path.getmtime(texFile):
print()
abort(">>>> Something went wrong, no PDF file was generated!")
if os.path.getsize(pdfFile) == 0:
print()
abort(">>>> Something went wrong, the generated PDF file is empty!")
## delete latex helper files
for fileType in DEFAULT_HELPER_FILE_TYPES:
try:
os.remove(basename + '.' + fileType)
except OSError:
## we don't really care if we couldn't delete a helper file
pass
## view result pdf
print()
print(">>>> Success! Viewing PDF")
viewResultPDF(pdfViewer, pdfFile)
def compileLatex(buildCmd, texFile):
try:
## invoke compiler
latex = subprocess.Popen([buildCmd, texFile])
latex.wait()
## catch SIGINT from user (pressing Ctrl+C)
except KeyboardInterrupt:
latex.terminate()
def viewResultPDF(pdfViewer, pdfFile):
try:
viewer = subprocess.Popen(
[pdfViewer, pdfFile],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout = viewer.communicate()[0]
print(stdout.decode('UTF-8'))
## catch SIGINT from user (pressing Ctrl+C)
except KeyboardInterrupt:
viewer.terminate()
def parseCommandLineArguments():
parser = argparse.ArgumentParser(
prog="katex",
description="Process latex files into pdfs and view them"
)
parser.set_defaults(
texFile = None,
buildCmd = DEFAULT_BUILD_COMMAND,
pdfViewer = DEFAULT_PDF_VIEWER
)
parser.add_argument(
'texFile',
metavar='TEX',
type=str,
nargs='?',
help='The input latex file to process'
)
parser.add_argument(
'-r', '--rubber',
dest='buildCmd',
const=RUBBER_COMMAND,
action='store_const',
help='Use rubber as the latex compiler'
)
parser.add_argument(
'-l', '--pdflatex',
dest='buildCmd',
const=PDFLATEX_COMMAND,
action='store_const',
help='Use pdflatex as the latex compiler'
)
parser.add_argument(
'--latex-compiler',
dest='buildCmd',
action='store',
type=str,
help='The latex compiler to use'
)
parser.add_argument(
'--pdf-viewer',
dest='pdfViewer',
action='store',
type=str,
help='The PDF viewing program to use'
)
args = parser.parse_args()
return args.buildCmd, args.pdfViewer, args.texFile
def getBasename(fileName, fileExtension='.tex'):
## call basename program
basename = subprocess.check_output(['basename', fileName, fileExtension]).decode('utf-8')
## remove trailing '\n'
return basename[:-1] if basename.endswith('\n') else basename
def abort(msg):
print(msg)
exit(1)
if __name__ == '__main__':
main()
| true |
e90969b7f7d9e4198418ffbcc0664074655a16f4 | Python | akdasa/gem | /gem/web/blueprints/session/connections.py | UTF-8 | 3,554 | 2.84375 | 3 | [] | no_license | from collections import namedtuple
from flask_socketio import join_room
from gem.db import users
from gem.event import Event
SessionConnection = namedtuple("SessionConnection", ["user_id", "socket_id", "session_id", "session", "user"])
class Connections:
"""
Handles all the connections to the sessions.
"""
def __init__(self):
"""
Initializes new instance of the Connections class
"""
self.__connections = [] # list of all connections
self.__sessions = {} # map of sessions: session_id -> Session model
self.__open_session = Event()
self.__close_session = Event()
@property
def open_session(self):
"""
Fires then connection to new session established
:rtype: Event
:return: Event
"""
return self.__open_session
@property
def close_session(self):
"""
Fires then no connection to session remains
:rtype: Event
:return: Event
"""
return self.__close_session
def of_socket(self, socket_id):
"""Returns connection of specified socket
:rtype: SessionConnection
:type socket_id: str
:param socket_id: Socket Id
:return: Session connection data"""
connections = list(filter(lambda x: x.socket_id == socket_id, self.__connections))
if len(connections) > 0:
return connections[0]
return None
def of_user(self, user_id):
"""Returns connection of specified socket
:rtype: SessionConnection
:type user_id: str
:param user_id: User Id
:return: List of connections data"""
return list(filter(lambda x: x.user_id == user_id, self.__connections))
def of_session(self, session_id):
"""Returns connection of specified socket
:rtype: SessionConnection
:type session_id: str
:param session_id: Session Id
:return: List of connections data"""
return filter(lambda x: x.session_id == session_id, self.__connections)
def add(self, socket_id, user_id, session_id):
"""Adds new connection using specified socket, user, session ids.
:param socket_id: Socket Id
:param user_id: User Id
:param session_id: Session Id"""
# Create new session controller if not exist
if session_id not in self.__sessions:
session = self.__open_session.notify(session_id)
if len(session) <= 0:
raise Exception("No session object created by open_session event handler")
self.__sessions[session_id] = session[0]
join_room(session_id, socket_id)
join_room(user_id, socket_id)
user = users.get(user_id)
session = self.__sessions[session_id]
cd = SessionConnection(user_id, socket_id, session_id, session, user)
self.__connections.append(cd)
session.users.join(user_id)
def remove(self, socket_id):
# find connection data for specified socket
connection = self.of_socket(socket_id)
if not connection:
return
# remove user from session, close session if no user remains
connection.session.users.leave(connection.user_id)
if len(connection.session.users.all) <= 0:
if self.__close_session:
self.__close_session.notify(connection.session)
del self.__sessions[connection.session_id]
# remove connection
self.__connections.remove(connection)
| true |
91573de6ac79cac1668cf23ae0e01743f2552d9d | Python | LucasGiori/DowloadPdfAutomaticoSpringLink | /scraping_spring_link.py | UTF-8 | 1,799 | 3.3125 | 3 | [] | no_license | import requests,sys,os
from bs4 import BeautifulSoup
from urls import getUrl
#Pasta onde irá salvar o arquivo, pega a pasta raiz do Script Python,em seguida defino em qual pasta será
downloadPath = str(sys.path[0])+'/arquivos/'
urls=getUrl() #Função que faz as leituras dos link no arquivo de texto e retorna uma lista
for url in urls:#for para percorrer a lista
try:
print("\nLink Page Pdf: ",url,"\n")
soup = BeautifulSoup(requests.get(url).text,'lxml')
#Aqui estamos Fazendo o parser do Html, e buscando pela class page-title, onde é definido o nome do livro ou pdf
tag_title = str(soup.find("div", {"class" : "page-title"}).getText())
tag_title = tag_title.replace('\n','')#substituindo o \n (Quebra de linha) para não dar erro quando for salvar o arquivo
tags=soup.find("div", {"class" : "cta-button-container__item"})#Fazendo o parser e buscando pela class onde contém o link de dowload do pdf
for i in tags.find_all('a',href=True):#percorrendo o html que foi encontrado, e buscando pela tag "a"
link='http://link.springer.com'+i["href"]#i["href"] é o atributo que está o link do pdf
response = requests.get(link)#aqui realiza um requisição para busar o pdf
print("Nome Arquivo: ",tag_title)
print("Link Download PDF: ",link)
with open(downloadPath+tag_title+'.pdf', 'wb') as f:#aqui será onde iremos criar o arquivo com o path especificado
f.write(response.content)#aqui salvamos o conteudo da requisição no arquivo criado
except Exception as e: #caso aconteça algum erro ele entra no except, para não para a execução.
print("\nOcorreu um erro, ao tentar fazer o download: ",url," Erro",e,"\n\n")
| true |
a744960cfa0166bf507a81dd90a70700e7f22e45 | Python | furas/python-examples | /tkinter/validate/main.py | UTF-8 | 1,145 | 3.109375 | 3 | [
"MIT"
] | permissive |
#
# https://stackoverflow.com/a/47990190/1832058
#
import tkinter as tk
'''
It lets you input only numbers 8.2 digits.
'''
def check(d, i, P, s, S, v, V, W):
print("d='%s'" % d)
print("i='%s'" % i)
print("P='%s'" % P)
print("s='%s'" % s)
print("S='%s'" % S)
print("v='%s'" % v)
print("V='%s'" % V)
print("W='%s'" % W)
text = P #e.get()
print('text:', text)
parts = text.split('.')
parts_number = len(parts)
if parts_number > 2:
#print('too much dots')
return False
if parts_number > 1 and parts[1]: # don't check empty string
if not parts[1].isdecimal() or len(parts[1]) > 2:
#print('wrong second part')
return False
if parts_number > 0 and parts[0]: # don't check empty string
if not parts[0].isdecimal() or len(parts[0]) > 8:
#print('wrong first part')
return False
return True
# --- main ---
root = tk.Tk()
vcmd = (root.register(check), '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
e = tk.Entry(root, validate='key', validatecommand=vcmd)
e.pack()
root.mainloop()
| true |
bb4b34ef70039150e6ed96a11994a4fe51a8c268 | Python | magisterka-ppp/ppp2d | /player.py | UTF-8 | 3,653 | 2.859375 | 3 | [] | no_license | import pygame
from pygame.math import Vector2
from pygame.rect import Rect
class Player(object):
def __init__(self):
self.grounded = False
self.max_y_vel = 20
self.drag = 0.8
self.gravity = 6
self.bounds = Rect(20, 0, 80, 120)
self.color = (155, 155, 0)
self.vel = Vector2(0, 0)
self.acc = Vector2(0, 0)
def draw(self, screen, camera):
pygame.draw.rect(screen, self.color,
Rect(self.bounds.x - camera.x,
self.bounds.y - camera.y,
self.bounds.width,
self.bounds.height))
def add_force(self, force):
self.acc = force
def logic(self, game):
pressed = pygame.key.get_pressed()
if pressed[pygame.K_d]:
self.add_force(Vector2(3, 0))
if pressed[pygame.K_a]:
self.add_force(Vector2(-3, 0))
if pressed[pygame.K_w] and self.grounded:
self.add_force(Vector2(0, -60))
if pressed[pygame.K_SPACE]:
print(pygame.mouse.get_pos())
self.vel += self.acc
self.acc *= 0
self.collisions(game.ground.platforms)
self.limit_fall_speed()
self.vel.x *= self.drag
self.bounds.x += round(self.vel.x, 0)
def limit_fall_speed(self):
if self.vel.y < self.max_y_vel:
self.bounds.y += self.vel.y
else:
self.bounds.y += self.max_y_vel
def collisions(self, colliders):
self.collisions_head(colliders)
self.collisions_feet(colliders)
def collisions_head(self, colliders):
head = self.get_head()
right = self.get_right()
left = self.get_left()
for collider in colliders:
if head.colliderect(collider):
self.vel.y = 0
self.bounds.y = self.bounds.y - (self.bounds.y - (collider.y + collider.height))
if right.colliderect(collider):
self.vel.x = 0
self.bounds.x = self.bounds.x - self.bounds.width - (self.bounds.x - collider.x)
if left.colliderect(collider):
self.vel.x = 0
self.bounds.x = self.bounds.x - (self.bounds.x - (collider.x + collider.width))
def collisions_feet(self, colliders):
feet = self.get_feet()
self.grounded = False
self.vel.y += self.gravity
for collider in colliders:
if feet.colliderect(collider) and self.vel.y > 0.0:
self.grounded = True
self.vel.y = 0
self.bounds.y = collider.y - self.bounds.height
def get_feet(self):
return Rect(self.bounds.x,
self.bounds.y + self.bounds.height,
self.bounds.width,
self.max_y_vel)
def get_head(self):
velocity = 0
if self.vel.y < 0:
velocity = self.vel.y
return Rect(self.bounds.x,
self.bounds.y + velocity,
self.bounds.width,
-velocity)
def get_right(self):
velocity = 0
if self.vel.x > 0:
velocity = self.vel.x
return Rect(self.bounds.x + self.bounds.width,
self.bounds.y,
velocity,
self.bounds.height)
def get_left(self):
velocity = 0
if self.vel.x < 0:
velocity = self.vel.x
return Rect(self.bounds.x + velocity,
self.bounds.y,
-velocity,
self.bounds.height)
| true |
74c18fd7c38197a05c598231d1abe1494745ef7e | Python | google/gfw-deployments | /apps/python/groups/find_groups_where_user_is_owner.py | UTF-8 | 4,482 | 2.78125 | 3 | [] | no_license | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
"""For a specific user, prints groups for which they are owner.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
###########################################################################
DISCLAIMER:
(i) GOOGLE INC. ("GOOGLE") PROVIDES YOU ALL CODE HEREIN "AS IS" WITHOUT ANY
WARRANTIES OF ANY KIND, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING,
WITHOUT LIMITATION, ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NON-INFRINGEMENT; AND
(ii) IN NO EVENT WILL GOOGLE BE LIABLE FOR ANY LOST REVENUES, PROFIT OR DATA,
OR ANY DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, EVEN IF
GOOGLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES, ARISING OUT OF
THE USE OR INABILITY TO USE, MODIFICATION OR DISTRIBUTION OF THIS CODE OR ITS
DERIVATIVES.
###########################################################################
Summary: This script allows an administrator to search for groups
for which a specific user is an owner of that group.
Usage: find_groups_where_user_is_owner.py [options]
Options:
-h, --help show this help message and exit
-d DOMAIN The domain name in which to add groups.
-u ADMIN_USER The admin user to use for authentication.
-p ADMIN_PASS The admin user's password
-m MEMBER The member of the group for which we want to
find all groups where they are owner.
"""
__author__ = 'mdauphinee@google.com (Matt Dauphinee)'
import datetime
import logging
from optparse import OptionParser
import sys
import gdata.apps.groups.service as groups_service
def ParseInputs():
"""Interprets command line parameters and checks for required parameters."""
parser = OptionParser()
parser.add_option('-d', dest='domain',
help='The domain name in which to add groups.')
parser.add_option('-u', dest='admin_user',
help='The admin user to use for authentication.')
parser.add_option('-p', dest='admin_pass',
help="The admin user's password")
parser.add_option('-m', dest='member',
help="""The member of the group for which we want to
find all groups where they are owner.""")
(options, args) = parser.parse_args()
if args:
parser.print_help()
parser.exit(msg='\nUnexpected arguments: %s\n' % ' '.join(args))
if options.domain is None:
print '-d (domain) is required'
sys.exit(1)
if options.admin_user is None:
print '-u (admin user) is required'
sys.exit(1)
if options.admin_pass is None:
print '-p (admin password) is required'
sys.exit(1)
if options.member is None:
print '-m (member) is required'
sys.exit(1)
return options
def GetTimeStamp():
now = datetime.datetime.now()
return now.strftime('%Y%m%d%H%M%S')
def GroupsConnect(options):
service = groups_service.GroupsService(email=options.admin_user,
domain=options.domain,
password=options.admin_pass)
service.ProgrammaticLogin()
return service
def main():
options = ParseInputs()
# Set up logging
log_filename = 'find_groups_where_user_is_owner_%s.log' % GetTimeStamp()
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
filename=log_filename,
level=logging.DEBUG)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logging.getLogger('').addHandler(console)
conn = GroupsConnect(options)
groups = conn.RetrieveAllGroups()
for group in groups:
try:
logging.info('Inspecting Group: [%s]', group['groupId'])
members = conn.RetrieveAllMembers(group['groupId'])
for member in members:
if (member['memberId'] == options.member and
conn.IsOwner(member['memberId'], group['groupId'])):
logging.info('[%s] is owner of group [%s]', member['memberId'],
group['groupId'])
except Exception, e:
logging.error('Failure processing group [%s] with [%s]',
group['groupId'], str(e))
print 'Log file is: %s' % log_filename
if __name__ == '__main__':
main()
| true |
fa34c7188c7008af305831dbda443a4f41a38dc3 | Python | brucesw/skills_progressions | /skills_progression.py | UTF-8 | 1,380 | 2.875 | 3 | [] | no_license | from dictionaries import *
from helpers import *
# this does main portion of the work
# just a modified search algorithm
def expand_prerequisites(skill, prereq_list, depth, ret, abbreviations):
if skill not in prereq_list:
return
for s in prereq_list[skill]:
#print s, depth
if depth not in ret:
ret[depth] = []
if s not in ret[depth]:
if abbreviations:
ret[depth].append(s)
else:
ret[depth].append(skills_dict[s])
expand_prerequisites(s, prereq_list, depth + 1, ret, abbreviations)
# this is the main function call to get the prerequisites
# for a skill. argument must be an abbreviation
def get_prereqs(skill, abbreviations = False):
if skill not in prerequisites_dict:
return False, {}
d = {}
expand_prerequisites(skill, prerequisites_dict, 0, d, abbreviations)
ret = remove_duplicates(d)
return True, ret
# this just displays the output from get_prereqs()
# in a way that is easy to read
def print_prerequisites(skill):
success, prereqs = get_prereqs(skill)
if not success:
print 'Skill {0} not added yet or spelled wrong.'.format(skills_dict[skill])
return
print 'prerequisites for {0} ({1}):\n'.format(skills_dict[skill], skill)
for i in sorted(prereqs.keys(), reverse = True):
for s in prereqs[i]:
print '{0} ({1})'.format(skills_dict[s], s)
print ''
print '===>{0} ({1})'.format(skills_dict[skill], skill)
| true |
949f10a2e6dfff24b211e2252d3369ae07438d42 | Python | salkhan23/contour_integration | /learned_lateral_weights.py | UTF-8 | 7,801 | 2.6875 | 3 | [] | no_license | # ------------------------------------------------------------------------------------------------
# Contour integration layer in an MNIST classifier
#
# In this version of contour integration, a learnable weight matrix is cast on top each pixel in
# the input volume. The weight matrix is shared across a feature map but is not shared across
# feature maps. As such it models lateral connections between similarly oriented neurons in V1.
#
# The hope is that the network automatically learns to add weighted inputs from neighbors that
# are co-planer to its filter, axial specificity as defined in [Ursino and Cara - 2004 - A
# model of contextual interactions and contour detection in primary visual cortex]
# ------------------------------------------------------------------------------------------------
from __future__ import print_function
import numpy as np
from keras.engine.topology import Layer
from keras.constraints import Constraint
import keras.initializers as initializers
import keras.regularizers as regularizers
import keras.backend as K
import keras
from keras.models import Sequential, save_model
from keras.layers import Conv2D, Activation, MaxPooling2D, Dense, Dropout, Flatten
import os
import utils
FILENAME = os.path.basename(__file__).split('.')[0] + '.hf'
BATCH_SIZE = 64
NUM_CLASSES = 10
EPOCHS = 12
# MNIST Input Image Dimensions
IMG_ROWS, IMG_COL = 28, 28
# Set the random seed for reproducibility
np.random.seed(7)
class ZeroCenter(Constraint):
def __init__(self, n, ch):
"""
Add a constraint that the central element of the weigh matrix should be zero. Only lateral connections
Should be learnt.
:param n: dimensions of the weight matrix, assuming it is a square
:param ch: number of channels in the input.
"""
half_len = n**2 >> 1
half_mask = K.ones((half_len, 1))
mask_1d = K.concatenate((half_mask, K.constant([[0]]), half_mask), axis=0)
mask = K.reshape(mask_1d, (n, n, 1))
self.mask = K.tile(mask, [1, 1, ch])
def __call__(self, w):
w = w * self.mask
return w
class ContourIntegrationLayer(Layer):
def __init__(self, n=3,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
kernel_constraint=None,
**kwargs):
"""
:param n:
:param kernel_initializer:
:param kernel_regularizer:
:param kernel_constraint:
:param kwargs:
"""
if n & 1 == 0:
raise Exception("Lateral filter dimension should be an odd number. %d specified" % n)
self.n = n
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.data_format = K.image_data_format()
super(ContourIntegrationLayer, self).__init__(**kwargs)
def build(self, input_shape):
"""
Define any learnable parameters for the layer
:param input_shape:
:return:
"""
if self.data_format == 'channels_last':
_, r, c, ch = input_shape
else:
raise Exception("Only channel_last data format is supported.")
self.kernel_shape = (self.n, self.n, ch)
self.kernel = self.add_weight(
shape=self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=ZeroCenter(self.n, ch),
trainable=True
)
super(ContourIntegrationLayer, self).build(input_shape)
def compute_output_shape(self, input_shape):
return input_shape # Layer does not change the shape of the input
def call(self, inputs):
"""
:param inputs:
:return:
"""
_, r, c, ch = K.int_shape(inputs)
# print("Call Fcn: Input shape ", r, c, ch)
# 1. Inputs Formatting
# Channel First, batch second. This is done to take the unknown batch size into the matrix multiply
# where it can be handled more easily
padded_inputs = K.spatial_2d_padding(
inputs,
((self.n / 2, self.n / 2), (self.n / 2, self.n / 2))
)
inputs_chan_first = K.permute_dimensions(padded_inputs, [3, 0, 1, 2])
# print("Call Fcn: inputs_chan_first shape: ", inputs_chan_first.shape)
# 2. Kernel
kernel_chan_first = K.permute_dimensions(self.kernel, (2, 0, 1))
# print("Call Fcn: kernel_chan_first shape", kernel_chan_first.shape)
k_ch, k_r, k_c = K.int_shape(kernel_chan_first)
apply_kernel = K.reshape(kernel_chan_first, (k_ch, k_r * k_c, 1))
# print("Call Fcn: kernel for matrix multiply: ", apply_kernel.shape)
# 3. Get outputs at each spatial location
xs = []
for i in range(r):
for j in range(c):
input_slice = inputs_chan_first[:, :, i:i+self.n, j:j+self.n]
input_slice_apply = K.reshape(input_slice, (ch, -1, self.n**2))
output_slice = K.batch_dot(input_slice_apply, apply_kernel)
# Reshape the output slice to put batch first
output_slice = K.permute_dimensions(output_slice, [1, 0, 2])
xs.append(output_slice)
# print("Call Fcn: len of xs", len(xs))
# print("Call Fcn: shape of each element of xs", xs[0].shape)
# 4. Reshape the output to correct format
outputs = K.concatenate(xs, axis=2)
outputs = K.reshape(outputs, (-1, ch, r, c)) # Break into row and column
outputs = K.permute_dimensions(outputs, [0, 2, 3, 1]) # Back to batch first
# print("Call Fcn: shape of output", outputs.shape)
# 5. Add the lateral and the feed-forward activations
outputs += inputs
return outputs
if __name__ == "__main__":
input_dims = (IMG_ROWS, IMG_COL, 1) # Input dimensions for a single sample
# 1. Get Data
# --------------------------------------------------------------------------
x_train, y_train, x_test, y_test, x_sample, y_sample = utils.get_mnist_data()
# 2. Define the model
# -------------------------------------------
model = Sequential()
# First Convolution layer, First sublayer processes feed-forward inputs, second layer adds the
# lateral connections. Third sublayer adds the activation function.
# Output = sigma_fcn([W_ff*x + W_l*(W_ff*x)]).
# Where sigma_fcn is the activation function
model.add(Conv2D(32, kernel_size=(3, 3), input_shape=input_dims, padding='same'))
model.add(ContourIntegrationLayer(n=5))
model.add(Activation('relu'))
# Rest of the layers.
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(units=128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(units=NUM_CLASSES, activation='softmax'))
# 3. Compile/Train/Save the model
# -------------------------------------------
model.compile(
loss=keras.losses.categorical_crossentropy, # Note this is not a function call.
optimizer=keras.optimizers.Adam(),
metrics=['accuracy']
)
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
verbose=1,
validation_data=(x_test, y_test)
)
save_model(model, FILENAME)
# 4. Evaluate Model accuracy
# -------------------------------------------
score = model.evaluate(x_test, y_test, verbose=0)
print('Test Loss:', score[0])
print('Test Accuracy', score[1])
| true |
550efc320225967509e49b681ab9b511ba1314bf | Python | frankieeder/fantasy_movie_league | /week_2018__12_14__12_16.py | UTF-8 | 2,250 | 2.640625 | 3 | [] | no_license | import fml
# 12/14/2018 - 12/16/2018 #
PRICES_RAW = """SPIDER-MAN: INTO THE SPIDER-VERSE
+$571
UNAVAILABLE
SCREENS LOCKED
THE MULE
+$235
UNAVAILABLE
SCREENS LOCKED
MORTAL ENGINES
+$171
UNAVAILABLE
SCREENS LOCKED
THE GRINCH
+$155
UNAVAILABLE
SCREENS LOCKED
RALPH BREAKS THE INTERNET
+$127
UNAVAILABLE
SCREENS LOCKED
ONCE UPON A DEADPOOL
+$76
UNAVAILABLE
SCREENS LOCKED
CREED II
+$68
UNAVAILABLE
SCREENS LOCKED
BOHEMIAN RHAPSODY
+$55
UNAVAILABLE
SCREENS LOCKED
THE FAVOURITE
+$52
UNAVAILABLE
SCREENS LOCKED
INSTANT FAMILY
+$51
UNAVAILABLE
SCREENS LOCKED
FANTASTIC BEASTS: THE CRIMES OF GRINDELWALD
+$48
UNAVAILABLE
SCREENS LOCKED
GREEN BOOK
+$41
UNAVAILABLE
SCREENS LOCKED
ROBIN HOOD
+$23
UNAVAILABLE
SCREENS LOCKED
WIDOWS
+$21
UNAVAILABLE
SCREENS LOCKED
A STAR IS BORN
+$19"""
FML_RAW = """"Spider-Man: Into the Spider-Verse" - $48.4 million
"The Mule" - $17.6 million
"Mortal Engines" - $13.4 million
"The Grinch" - $10.5 million
"Ralph Breaks the Internet" - $9.7 million
"Creed II" - $5.4 million
"Once Upon a Deadpool" - $4.2 million
"Bohemian Rhapsody" - $3.9 million
"Instant Family" - $3.8 million
"The Favourite" - $3.7 million
"Green Book" - $3.4 million
"Fantastic Beasts: The Crimes of Grindelwald" - $3.3 million
"Robin Hood" - $1.8 million
"Widows" - $1.7 million
"A Star Is Born" - $1.3 million"""
BOR_RAW = """Film (Distributor) Weekend
Gross Total
Gross %
Change Week
#
1 Spider-Man: Into the Spider-Verse
(Sony / Columbia) $39.0 M $39.0 M NEW 1
2 The Mule (Warner Bros.) $20.0 M $20.0 M NEW 1
3 Dr. Seuss' The Grinch (Universal) $11.5 M $239.3 M -24% 6
4 Mortal Engines (Universal) $11.0 M $11.0 M NEW 1
5 Ralph Breaks the Internet (Disney) $9.5 M $154.5 M -42% 4
6 Creed II (MGM) $5.2 M $104.8 M -48% 4
7 Once Upon a Deadpool (Fox) $4.5 M $6.5 M NEW 1
8 Bohemian Rhapsody (Fox) $4.0 M $180.3 M -35% 7
9 Instant Family (Paramount) $3.8 M $60.3 M -34% 5
10 Fantastic Beasts:
The Crimes of Grindelwald
(Warner Bros.) $3.7 M $151.8 M -47% 5
11 Green Book (Universal / DreamWorks) $3.3 M $25.4 M -15% 5
12 The Favourite (Fox Searchlight) $3.1 M $7.3 M +106% 6"""
PRICES, \
FML_PROJECTIONS, \
FML_BRACKET, \
BOR_PROJECTIONS, \
BOR_BRACKET = fml.exec_raw(20181214, PRICES_RAW, BOR_RAW, FML_RAW)
| true |
79d6bb919f4b2f8b691e7d575309eca55a1a50ad | Python | infinitEnigma/github-upload | /PyTorch/torch_nn/torch_nn-walkthrough.py | UTF-8 | 14,990 | 3.078125 | 3 | [] | no_license | from pathlib import Path
import requests
DATA_PATH = Path("data")
PATH = DATA_PATH / "mnist"
PATH.mkdir(parents=True, exist_ok=True)
URL = "http://deeplearning.net/data/mnist/"
FILENAME = "mnist.pkl.gz"
if not (PATH / FILENAME).exists():
content = requests.get(URL + FILENAME).content
(PATH / FILENAME).open("wb").write(content)
# This dataset is in numpy array format, and has been stored using pickle,
# a python-specific format for serializing data.
import pickle
import gzip
with gzip.open((PATH / FILENAME).as_posix(), "rb") as f:
((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding="latin-1")
# Each image is 28 x 28, we need to reshape it to 2d first.
from matplotlib import pyplot
import numpy as np
pyplot.imshow(x_train[0].reshape((28, 28)), cmap="gray")
print(x_train.shape)
# PyTorch uses torch.tensor, rather than numpy arrays,
# so we need to convert our data.
import torch
x_train, y_train, x_valid, y_valid = map(
torch.tensor, (x_train, y_train, x_valid, y_valid)
)
n, c = x_train.shape
x_train, x_train.shape, y_train.min(), y_train.max()
print(x_train, y_train)
print(x_train.shape)
print(y_train.min(), y_train.max())
# Neural net from scratch (no torch.nn)
# We are initializing the weights here with
# Xavier initialisation (by multiplying with 1/sqrt(n)).
import math
weights = torch.randn(784, 10) / math.sqrt(784)
weights.requires_grad_()
bias = torch.zeros(10, requires_grad=True)
# write a plain matrix multiplication and
# broadcasted addition to create a simple linear model.
# we also need an activation function
def log_softmax(x):
return x - x.exp().sum(-1).log().unsqueeze(-1)
def model(xb):
return log_softmax(xb @ weights + bias)
# We will call our function on one batch of data (in this case, 64 images).
# This is one forward pass.
# Note that our predictions won’t be any better than random at this stage,
# since we start with random weights.
bs = 64 # batch size
xb = x_train[0:bs] # a mini-batch from x
preds = model(xb) # predictions
preds[0], preds.shape
print(preds[0], preds.shape)
# Let’s implement negative log-likelihood to use as the loss function
def nll(input, target):
return -input[range(target.shape[0]), target].mean()
loss_func = nll
# Let’s check our loss with our random model,
# so we can see if we improve after a backprop pass later.
yb = y_train[0:bs]
print(loss_func(preds, yb))
# implement a function to calculate the accuracy of our model
# if the index with the largest value matches the target value,
# then the prediction was correct
def accuracy(out, yb):
preds = torch.argmax(out, dim=1)
return (preds == yb).float().mean()
print(accuracy(preds, yb))
# We can now run a training loop. For each iteration, we will:
# * select a mini-batch of data (of size bs)
# * use the model to make predictions
# * calculate the loss
# * loss.backward() updates the gradients of the model, in this case, weights and bias.
from IPython.core.debugger import set_trace
lr = 0.5 # learning rate
epochs = 2 # how many epochs to train for
for epoch in range(epochs):
for i in range((n - 1) // bs + 1):
# Uncomment set_trace() below to try it out
# set_trace()
start_i = i * bs
end_i = start_i + bs
xb = x_train[start_i:end_i]
yb = y_train[start_i:end_i]
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
with torch.no_grad():
weights -= weights.grad * lr
bias -= bias.grad * lr
weights.grad.zero_()
bias.grad.zero_()
print(loss_func(model(xb), yb), accuracy(model(xb), yb))
# Using torch.nn.functional
# We will now refactor our code, so that it does the same thing as before,
# only we’ll start taking advantage of
# PyTorch’s nn classes to make it more concise and flexible.
import torch.nn.functional as F
loss_func = F.cross_entropy
def model(xb):
return xb @ weights + bias
# Note that we no longer call log_softmax
# return log_softmax(xb @ weights + bias)
# confirm that it works the same
print(loss_func(model(xb), yb), accuracy(model(xb), yb))
# Refactor using nn.Module
# In this case, we want to create a class that holds our
# weights, bias, and method for the forward step
from torch import nn
class Mnist_Logistic(nn.Module):
def __init__(self):
super().__init__()
self.weights = nn.Parameter(torch.randn(784, 10) / math.sqrt(784))
self.bias = nn.Parameter(torch.zeros(10))
def forward(self, xb):
return xb @ self.weights + self.bias
# Since we’re now using an object instead of just using a function,
# we first have to instantiate our model:
model = Mnist_Logistic()
print(loss_func(model(xb), yb))
# Now we can take advantage of model.parameters() and model.zero_grad()
# with torch.no_grad():
# for p in model.parameters(): p -= p.grad * lr
# model.zero_grad()
# We’ll wrap our little training loop in a fit function
# so we can run it again later.
def fit():
for epoch in range(epochs):
for i in range((n - 1) // bs + 1):
start_i = i * bs
end_i = start_i + bs
xb = x_train[start_i:end_i]
yb = y_train[start_i:end_i]
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
with torch.no_grad():
for p in model.parameters():
p -= p.grad * lr
model.zero_grad()
fit()
# Let’s double-check that our loss has gone down:
print(loss_func(model(xb), yb))
# Refactor using nn.Linear
# Instead of manually defining and initializing self.weights and self.bias
# use the Pytorch class nn.Linear for a linear layer
class Mnist_Logistic(nn.Module):
def __init__(self):
super().__init__()
self.lin = nn.Linear(784, 10)
def forward(self, xb):
return self.lin(xb)
# instantiate our model and calculate the loss in the same way as before:
model = Mnist_Logistic()
print(loss_func(model(xb), yb))
# We are still able to use our same fit method as before.
fit()
print(loss_func(model(xb), yb))
# Refactor using optim
# This will let us replace our previous manually coded optimization step:
# ```
# with torch.no_grad():
# for p in model.parameters(): p -= p.grad * lr
# model.zero_grad()
# ```
# with: `opt.step()` and `opt.zero_grad()`
from torch import optim
# We’ll define a little function to create our model and optimizer
# so we can reuse it in the future.
def get_model():
model = Mnist_Logistic()
return model, optim.SGD(model.parameters(), lr=lr)
model, opt = get_model()
print(loss_func(model(xb), yb))
for epoch in range(epochs):
for i in range((n - 1) // bs + 1):
start_i = i * bs
end_i = start_i + bs
xb = x_train[start_i:end_i]
yb = y_train[start_i:end_i]
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
opt.step()
opt.zero_grad()
print(loss_func(model(xb), yb))
# Refactor using Dataset
# A Dataset can be anything that has a __len__ function
# and a __getitem__ function as a way of indexing into it.
# example of creating a custom
# FacialLandmarkDataset class as a subclass of Dataset
from torch.utils.data import TensorDataset
# Both x_train and y_train can be combined in a single TensorDataset,
# which will be easier to iterate over and slice.
train_ds = TensorDataset(x_train, y_train)
model, opt = get_model()
for epoch in range(epochs):
for i in range((n - 1) // bs + 1):
xb, yb = train_ds[i * bs: i * bs + bs]
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
opt.step()
opt.zero_grad()
print(loss_func(model(xb), yb))
# Refactor using DataLoader
# Pytorch’s DataLoader is responsible for managing batches.
# You can create a DataLoader from any Dataset.
from torch.utils.data import DataLoader
train_ds = TensorDataset(x_train, y_train)
train_dl = DataLoader(train_ds, batch_size=bs)
model, opt = get_model()
for epoch in range(epochs):
for xb, yb in train_dl:
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
opt.step()
opt.zero_grad()
print(loss_func(model(xb), yb))
# our training loop is now dramatically smaller and easier to understand
# Add validation
# We’ll use a batch size for the validation set
# that is twice as large as that for the training set.
train_ds = TensorDataset(x_train, y_train)
train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True)
valid_ds = TensorDataset(x_valid, y_valid)
valid_dl = DataLoader(valid_ds, batch_size=bs * 2)
# We will calculate and print the validation loss at the end of each epoch.
model, opt = get_model()
for epoch in range(epochs):
model.train()
for xb, yb in train_dl:
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
opt.step()
opt.zero_grad()
model.eval()
with torch.no_grad():
valid_loss = sum(loss_func(model(xb), yb) for xb, yb in valid_dl)
print(epoch, valid_loss / len(valid_dl))
# Create fit() and get_data()
# We pass an optimizer in for the training set, and use it to perform backprop.
# For the validation set, we don’t pass an optimizer,
# so the method doesn’t perform backprop.
def loss_batch(model, loss_func, xb, yb, opt=None):
loss = loss_func(model(xb), yb)
if opt is not None:
loss.backward()
opt.step()
opt.zero_grad()
return loss.item(), len(xb)
# fit runs the necessary operations to train our model
# and compute the training and validation losses for each epoch.
import numpy as np
def fit(epochs, model, loss_func, opt, train_dl, valid_dl):
for epoch in range(epochs):
model.train()
for xb, yb in train_dl:
loss_batch(model, loss_func, xb, yb, opt)
model.eval()
with torch.no_grad():
losses, nums = zip(
*[loss_batch(model, loss_func, xb, yb) for xb, yb in valid_dl]
)
val_loss = np.sum(np.multiply(losses, nums)) / np.sum(nums)
print(epoch, val_loss)
# get_data returns dataloaders for the training and validation sets.
def get_data(train_ds, valid_ds, bs):
return (
DataLoader(train_ds, batch_size=bs, shuffle=True),
DataLoader(valid_ds, batch_size=bs * 2),
)
# Now, our whole process of obtaining the data loaders and fitting the model
# can be run in 3 lines of code:
train_dl, valid_dl = get_data(train_ds, valid_ds, bs)
model, opt = get_model()
fit(epochs, model, loss_func, opt, train_dl, valid_dl)
# You can use these basic 3 lines of code to train a wide variety of models.
# Let’s see if we can use them to train a convolutional neural network (CNN)!
# Switch to CNN
# We are now going to build our neural network with three convolutional layers.
# Because none of the functions in the previous section
# assume anything about the model form,
# we’ll be able to use them to train a CNN without any modification.
# We will use Pytorch’s predefined Conv2d class as our convolutional layer
# Each convolution is followed by a ReLU. At the end, we perform an average pooling
class Mnist_CNN(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1)
self.conv2 = nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1)
self.conv3 = nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1)
def forward(self, xb):
xb = xb.view(-1, 1, 28, 28)
xb = F.relu(self.conv1(xb))
xb = F.relu(self.conv2(xb))
xb = F.relu(self.conv3(xb))
xb = F.avg_pool2d(xb, 4)
return xb.view(-1, xb.size(1))
lr = 0.1
# Momentum is a variation on stochastic gradient descent
# that takes previous updates into account as well
# and generally leads to faster training.
model = Mnist_CNN()
opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
fit(epochs, model, loss_func, opt, train_dl, valid_dl)
# nn.Sequential
# PyTorch doesn’t have a view layer, and we need to create one for our network.
# Lambda will create a layer that we can then use
# when defining a network with Sequential.
class Lambda(nn.Module):
def __init__(self, func):
super().__init__()
self.func = func
def forward(self, x):
return self.func(x)
def preprocess(x):
return x.view(-1, 1, 28, 28)
# The model created with Sequential is simply:
model = nn.Sequential(
Lambda(preprocess),
nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.AvgPool2d(4),
Lambda(lambda x: x.view(x.size(0), -1)),
)
opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
fit(epochs, model, loss_func, opt, train_dl, valid_dl)
# Wrapping DataLoader
# Our CNN is fairly concise, but it only works with MNIST
# It assumes the input is a 28*28 long vector and
# that the final CNN grid size is 4*4
# Let’s get rid of these two assumptions
# remove the initial Lambda layer but moving the data preprocessing into a generator:
def preprocess(x, y):
return x.view(-1, 1, 28, 28), y
class WrappedDataLoader:
def __init__(self, dl, func):
self.dl = dl
self.func = func
def __len__(self):
return len(self.dl)
def __iter__(self):
batches = iter(self.dl)
for b in batches:
yield (self.func(*b))
train_dl, valid_dl = get_data(train_ds, valid_ds, bs)
train_dl = WrappedDataLoader(train_dl, preprocess)
valid_dl = WrappedDataLoader(valid_dl, preprocess)
# Next, we can replace nn.AvgPool2d with nn.AdaptiveAvgPool2d
model = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.AdaptiveAvgPool2d(1),
Lambda(lambda x: x.view(x.size(0), -1)),
)
opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
# Let's try it out:
fit(epochs, model, loss_func, opt, train_dl, valid_dl)
# Using GPU
dev = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
#Let’s update preprocess to move batches to the GPU:
def preprocess(x, y):
return x.view(-1, 1, 28, 28).to(dev), y.to(dev)
train_dl, valid_dl = get_data(train_ds, valid_ds, bs)
train_dl = WrappedDataLoader(train_dl, preprocess)
valid_dl = WrappedDataLoader(valid_dl, preprocess)
# Finally, we can move our model to the GPU.
model.to(dev)
opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
fit(epochs, model, loss_func, opt, train_dl, valid_dl)
| true |
38247e491e5d958d0cc0fd894b6de7af5619770f | Python | Kieran-Williams/Password_manager | /env/Database/create_db.py | UTF-8 | 579 | 2.53125 | 3 | [
"MIT"
] | permissive | import sqlite3
from sqlite3 import Error
from pathlib import Path
from Database import create_tables
def create_connection(db_file):
conn = None
try:
conn = sqlite3.connect(db_file)
print(sqlite3.version)
except Error as e:
print('DB Version' + e)
finally:
if conn:
conn.close()
def create_db():
create_connection(r'Database/password_manager.db')
db = Path(str(Path().absolute()) + '/Database/password_manager.db')
if db.exists():
create_tables.main()
return(1)
else:
return(0) | true |
ef3dd7b85fc326223f4c3cb2ade8c980f3048d40 | Python | apalpant/ProjetFilRouge | /python/shared/admin/services/gitService copy.py | UTF-8 | 334 | 2.53125 | 3 | [] | no_license | import subprocess
# The service for git operations
class GitService():
# Constructor
def __init__(self):
print('init GitService')
# Clone repository from a given adress
def clone(self, adresse):
subprocess.Popen(['git', 'clone', str(adresse), '/home/vagrant/tmp/clone'])
return "git cloned"
| true |
bf8835766ab20429d4b4bfa1da420c157a95010a | Python | gluemoment/Exercises | /basic_projects/ZodiacSignApp/zodiacApp.py | UTF-8 | 1,777 | 3.609375 | 4 | [] | no_license | zodiac_signs_assignment = {
0:'Monkey',
1:'Rooster',
2:'Dog',
3:'Pig',
4:'Rat',
5:'Ox',
6:'Tiger',
7:'Rabbit',
8:'Dragon',
9:'Snake',
10:'Horse',
11:'Goat'
}
print("\n")
print("\n")
print("\n")
print("-------------------------------")
print("Welcome to the Zodiac Sign App!")
print("-------------------------------")
print("\n")
print("\n")
print("\n")
def age_input():
age = input("Please enter your year of Birth?")
#age = int(age)
## Obtaining an input information
try:
age = int(age)
except:
print('Hey, that was NOT an Integer! Try Again!')
return age
def finding_zodiac(age, zodiac_signs_assignment):
zodiac_modulo = age % 12
# print(zodiac_modulo)
# print(zodiac_signs_assignment[zodiac_modulo])
return zodiac_signs_assignment[zodiac_modulo]
#print(zodiac_modulo)
while True:
# age = int(input("Please enter your year of Birth?"))
age = age_input()
#finding_zodiac(age,zodiac_signs_assignment)
#finding_zodiac()
print("\n")
print("\n")
print ("So, your Chinese Zodiac Sign is a {}".format(finding_zodiac(age, zodiac_signs_assignment)))
#finding_zodiac(age,zodiac_signs_assignment))
print("\n")
print("\n")
goAgain = input(f"Play Again? (Press Enter to continue, or q to quit):")
print("\n")
print("\n")
if goAgain == "q":
break
print("\n")
print("\n")
print("\n")
print('Thanks for Playing! Bye!')
print("\n")
print("\n")
print("\n")
| true |
9763ba173e546b3e6905e88558056b87c0fe220a | Python | dbms-class/csc-2020-control-3.1 | /model.py | UTF-8 | 1,956 | 2.578125 | 3 | [] | no_license | # encoding: UTF-8
# В этом файле реализованы Data Access Objects в виде классов Peewee ORM
from peewee import *
from connect import getconn
from connect import LoggingDatabase
from args import *
db = PostgresqlDatabase(args().pg_database, user=args().pg_user, host=args().pg_host, password=args().pg_password)
#db = LoggingDatabase(args())
# Классы ORM модели.
class PlanetEntity(Model):
id = IntegerField()
distance = DecimalField()
name = TextField()
class Meta:
database = db
db_table = "planet"
class FlightEntity(Model):
id = IntegerField()
date = DateField()
available_seats = IntegerField()
planet = ForeignKeyField(PlanetEntity, related_name='flights')
class Meta:
database = db
db_table = "flightentityview"
class PriceEntity(Model):
id = IntegerField()
fare_code = IntegerField()
price = IntegerField()
class Meta:
database = db
db_table = "price"
class TicketEntity(Model):
id = IntegerField()
price = ForeignKeyField(PriceEntity)
flight = ForeignKeyField(FlightEntity)
discount = DecimalField()
class Meta:
database = db
db_table = "flightticket"
# Тут целочисленное значение атрибута Price.fare_code из интервала [1..10] конвертируется в
# символ от A до J
def fare(self):
return chr(ord('A') + self.price.fare_code - 1)
# Устанавливает размер скидки на билет
def set_discount(self, discount):
with getconn() as db:
cur = db.cursor()
cur.execute("UPDATE FlightTicket SET discount=%s WHERE id=%s", (discount, self.id))
class BookingEntity(Model):
ref_num = TextField()
ticket = ForeignKeyField(TicketEntity)
class Meta:
database = db
db_table = "booking"
| true |
c8a71707d4e9b6792c55f5dfa3a8cdcdefc1cbb4 | Python | afunsten/oil | /opy/_regtest/src/osh/arith_parse.py | UTF-8 | 5,476 | 3.078125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
"""
arith_parse.py - Parse shell arithmetic, which is based on C.
"""
from core import tdop
from core import util
from osh.meta import Id
from core import word
from osh.meta import ast
p_die = util.p_die
def NullIncDec(p, w, bp):
""" ++x or ++x[1] """
right = p.ParseUntil(bp)
child = tdop.ToLValue(right)
if child is None:
p_die("This value can't be assigned to", word=w)
return ast.UnaryAssign(word.ArithId(w), child)
def NullUnaryPlus(p, t, bp):
""" +x, to distinguish from binary operator. """
right = p.ParseUntil(bp)
return ast.ArithUnary(Id.Node_UnaryPlus, right)
def NullUnaryMinus(p, t, bp):
""" -1, to distinguish from binary operator. """
right = p.ParseUntil(bp)
return ast.ArithUnary(Id.Node_UnaryMinus, right)
def LeftIncDec(p, w, left, rbp):
""" For i++ and i--
"""
if word.ArithId(w) == Id.Arith_DPlus:
op_id = Id.Node_PostDPlus
elif word.ArithId(w) == Id.Arith_DMinus:
op_id = Id.Node_PostDMinus
else:
raise AssertionError
child = tdop.ToLValue(left)
return ast.UnaryAssign(op_id, child)
def LeftIndex(p, w, left, unused_bp):
"""Array indexing, in both LValue and RValue context.
LValue: f[0] = 1 f[x+1] = 2
RValue: a = f[0] b = f[x+1]
On RHS, you can have:
1. a = f[0]
2. a = f(x, y)[0]
3. a = f[0][0] # in theory, if we want character indexing?
NOTE: a = f[0].charAt() is probably better
On LHS, you can only have:
1. a[0] = 1
Nothing else is valid:
2. function calls return COPIES. They need a name, at least in osh.
3. strings don't have mutable characters.
"""
if not tdop.IsIndexable(left):
p_die("%s can't be indexed", left, word=w)
index = p.ParseUntil(0)
p.Eat(Id.Arith_RBracket)
return ast.ArithBinary(word.ArithId(w), left, index)
def LeftTernary(p, t, left, bp):
""" Function call f(a, b). """
true_expr = p.ParseUntil(bp)
p.Eat(Id.Arith_Colon)
false_expr = p.ParseUntil(bp)
return ast.TernaryOp(left, true_expr, false_expr)
# For overloading of , inside function calls
COMMA_PREC = 1
def LeftFuncCall(p, t, left, unused_bp):
""" Function call f(a, b). """
children = []
# f(x) or f[i](x)
if not tdop.IsCallable(left):
raise tdop.ParseError("%s can't be called" % left)
while not p.AtToken(Id.Arith_RParen):
# We don't want to grab the comma, e.g. it is NOT a sequence operator. So
# set the precedence to 5.
children.append(p.ParseUntil(COMMA_PREC))
if p.AtToken(Id.Arith_Comma):
p.Next()
p.Eat(Id.Arith_RParen)
return ast.FuncCall(left, children)
def MakeShellSpec():
"""
Following this table:
http://en.cppreference.com/w/c/language/operator_precedence
Bash has a table in expr.c, but it's not as cmoplete (missing grouping () and
array[1]). Although it has the ** exponentation operator, not in C.
- Extensions:
- function calls f(a,b)
- Possible extensions (but save it for oil):
- could allow attribute/object access: obj.member and obj.method(x)
- could allow extended indexing: t[x,y] -- IN PLACE OF COMMA operator.
- also obj['member'] because dictionaries are objects
"""
spec = tdop.ParserSpec()
# -1 precedence -- doesn't matter
spec.Null(-1, tdop.NullConstant, [
Id.Word_Compound,
Id.Arith_Semi, # for loop
])
spec.Null(-1, tdop.NullError, [
Id.Arith_RParen, Id.Arith_RBracket, Id.Arith_Colon,
Id.Eof_Real, Id.Eof_RParen, Id.Eof_Backtick,
# Not in the arithmetic language, but is a common terminator, e.g.
# ${foo:1}
Id.Arith_RBrace,
])
# 0 precedence -- doesn't bind until )
spec.Null(0, tdop.NullParen, [Id.Arith_LParen]) # for grouping
spec.Left(33, LeftIncDec, [Id.Arith_DPlus, Id.Arith_DMinus])
spec.Left(33, LeftFuncCall, [Id.Arith_LParen])
spec.Left(33, LeftIndex, [Id.Arith_LBracket])
# 31 -- binds to everything except function call, indexing, postfix ops
spec.Null(31, NullIncDec, [Id.Arith_DPlus, Id.Arith_DMinus])
spec.Null(31, NullUnaryPlus, [Id.Arith_Plus])
spec.Null(31, NullUnaryMinus, [Id.Arith_Minus])
spec.Null(31, tdop.NullPrefixOp, [Id.Arith_Bang, Id.Arith_Tilde])
# Right associative: 2 ** 3 ** 2 == 2 ** (3 ** 2)
# NOTE: This isn't in C
spec.LeftRightAssoc(29, tdop.LeftBinaryOp, [Id.Arith_DStar])
# * / %
spec.Left(27, tdop.LeftBinaryOp, [
Id.Arith_Star, Id.Arith_Slash, Id.Arith_Percent])
spec.Left(25, tdop.LeftBinaryOp, [Id.Arith_Plus, Id.Arith_Minus])
spec.Left(23, tdop.LeftBinaryOp, [Id.Arith_DLess, Id.Arith_DGreat])
spec.Left(21, tdop.LeftBinaryOp, [
Id.Arith_Less, Id.Arith_Great, Id.Arith_LessEqual, Id.Arith_GreatEqual])
spec.Left(19, tdop.LeftBinaryOp, [Id.Arith_NEqual, Id.Arith_DEqual])
spec.Left(15, tdop.LeftBinaryOp, [Id.Arith_Amp])
spec.Left(13, tdop.LeftBinaryOp, [Id.Arith_Caret])
spec.Left(11, tdop.LeftBinaryOp, [Id.Arith_Pipe])
spec.Left(9, tdop.LeftBinaryOp, [Id.Arith_DAmp])
spec.Left(7, tdop.LeftBinaryOp, [Id.Arith_DPipe])
spec.Left(5, LeftTernary, [Id.Arith_QMark])
# Right associative: a = b = 2 is a = (b = 2)
spec.LeftRightAssoc(3, tdop.LeftAssign, [
Id.Arith_Equal,
Id.Arith_PlusEqual, Id.Arith_MinusEqual, Id.Arith_StarEqual,
Id.Arith_SlashEqual, Id.Arith_PercentEqual, Id.Arith_DLessEqual,
Id.Arith_DGreatEqual, Id.Arith_AmpEqual, Id.Arith_CaretEqual,
Id.Arith_PipeEqual
])
spec.Left(COMMA_PREC, tdop.LeftBinaryOp, [Id.Arith_Comma])
return spec
SPEC = MakeShellSpec()
| true |
626f6cabfe3c5a095d91f7be4425c0ec910115f7 | Python | kou1127h/atcoder | /ABC/ver3/188/B.py | UTF-8 | 179 | 3.078125 | 3 | [] | no_license | N = int(input())
a = list(map(int, input().split()))
b = list(map(int, input().split()))
ans = 0
for i in range(N):
ans += (a[i] * b[i])
print("Yes" if ans == 0 else "No")
| true |
7d211b2fffb7a2ed5a76ed4d197a6c6732ff3c59 | Python | zannn3/LeetCode-Solutions-Python | /0306. Additive Number.py | UTF-8 | 882 | 3.375 | 3 | [] | no_license | class Solution(object):
def isAdditiveNumber(self, num):
"""
:type num: str
:rtype: bool
"""
n = len(num)
if n < 3:
return False
for i in range(n-2):
for j in range(i+1, n-1):
if self.checkNum(num, i, j):
return True
return False
def checkNum(self, num, i, j):
if i > 0 and num[0] == "0":
return False
if j - i > 1 and num[i+1] == "0":
return False
num1, num2 = int(num[:i+1]), int(num[i+1:j+1])
cur = j+1
while cur < len(num):
num3 = num1 + num2
k = len(str(num3))
if num[cur:cur+k] != str(num3):
return False
num1, num2 = num2, num3
cur += k
return True
# prune
| true |
12a88e232676dcc6650d8e8ef0e41461b7b23b6d | Python | alex35469/Flow-Scheduling-for-video-Streaming | /simulator/utils.py | UTF-8 | 989 | 2.8125 | 3 | [] | no_license | import io
import sys
import os
from time import time
PATH = os.path.dirname(os.path.abspath(__file__))
def get_scaled_time(scale):
"Get a scalable time"
def get_time():
return scale * time()
return get_time
def read_network_trace(path):
"Return a generator that outputs the trace"
full_path = PATH + path
def read_nt():
with open(full_path) as nt:
for line in nt:
line = line.strip()
if str.isdigit(line):
yield int(line)
return read_nt
def read_frame_trace(path):
"Return a generator that outputs the trace"
def read_nt():
with open(path) as nt:
for line in nt:
line = line.strip()
if str.isdigit(line):
yield int(line)
return read_nt
def print_metrics(d):
for streamer in d:
print(streamer, ": ")
for m in d[streamer]:
print(" ", m, ": ", d[streamer][m])
| true |
177687297f8e4199eca539d03409ea93b2940d33 | Python | Neniao/fmt-back | /hubspotconnect.py | UTF-8 | 1,045 | 2.75 | 3 | [] | no_license | import requests
import json
import urllib
max_results = 500
hapikey = "2b0cbf32-bc72-40b6-8953-c40aeebeec07"
count = 20
contact_list = []
property_list = []
get_all_contacts_url = "https://api.hubapi.com/contacts/v1/lists/all/contacts/all?"
parameter_dict = {'hapikey': hapikey, 'count': count}
headers = {}
# Paginate your request using offset
has_more = True
while has_more:
parameters = urllib.urlencode(parameter_dict)
get_url = get_all_contacts_url + parameters
r = requests.get(url= get_url, headers = headers)
response_dict = json.loads(r.text)
has_more = response_dict['has-more']
contact_list.extend(response_dict['contacts'])
parameter_dict['vidOffset']= response_dict['vid-offset']
if len(contact_list) >= max_results: # Exit pagination, based on whatever value you've set your max results variable to.
print('maximum number of results exceeded')
break
print('loop finished')
list_length = len(contact_list)
print("You've succesfully parsed through {} contact records and added them to a list".format(list_length)) | true |
e94fbca7230071051fd52095af6c5cd6233e505b | Python | helios2k6/python3_interview_questions | /stronglyConnectedComponents.py | UTF-8 | 1,857 | 3.625 | 4 | [] | no_license | def visit(adjList, visitedNodes, l, node):
if node in visitedNodes:
return
visitedNodes[node] = True
for neighbor in adjList[node]:
visit(adjList, visitedNodes, l, neighbor)
l.append(node)
def transposeAdjList(adjList):
transposedAdjList = {}
for node, neighbors in adjList.items():
for neighbor in neighbors:
if neighbor in transposedAdjList:
if node not in transposedAdjList[neighbor]:
transposedAdjList[neighbor].append(node)
else:
transposedAdjList[neighbor] = [node]
return transposedAdjList
def hasNodeBeenAssignedToComponent(components, node):
for root, componentMembers in components.items():
if node == root or node in componentMembers:
return True
return False
def assign(transposedAdjList, components, node, root):
if hasNodeBeenAssignedToComponent(components, node):
return
if root not in components:
components[root] = []
components[root].append(node)
for inMember in transposedAdjList[node]:
assign(transposedAdjList, components, inMember, root)
def stronglyConnectedComponents(adjList):
visitedNodes = {}
l = []
for node, _ in adjList.items():
visit(adjList, visitedNodes, l, node)
transposedAdjList = transposeAdjList(adjList)
components = {}
for u in reversed(l): #we have to reverse this because we were supposed to prepend stuff, but that's slow in python
assign(transposedAdjList, components, u, u)
return components
def test(adjList):
components = stronglyConnectedComponents(adjList)
for root, componentMembers in components.items():
print(f"({root}) -> {componentMembers}")
def test1():
adjList = {0: [1, 3], 1: [2], 2: [], 3: [4], 4: [0]}
test(adjList)
test1() | true |
73cbeb02678baf50a5b13b5a10091f289bd70798 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_75/51.py | UTF-8 | 1,481 | 2.828125 | 3 | [] | no_license | import sys
outf = []
def pout(text):
outf.append("Case #" + str(pout.case) + ": " + text + "\n")
pout.case += 1
pout.case = 1
def get_input(infname):
with open(infname, "r") as f:
return map(lambda a: a.strip(), f.readlines())
def write_output(outfname):
with open(outfname, "w") as f:
for line in outf:
f.write(line)
def main(inp):
lines = map(lambda a: a.split(" "), inp[1:])
for line in lines:
l = line[:]
ncombos = int(l[0])
combos = l[1:(ncombos+1)]
combodic = {}
for c in combos:
combodic[c[0]+c[1]] = c[2]
combodic[c[1]+c[0]] = c[2]
noppos = int(l[ncombos+1])
oppos = l[(ncombos+2):(ncombos+noppos+2)]
oppodic = oppos[:]
for o in oppos:
oppodic.append(o[1] + o[0])
seq = l[-1]
sol = []
for n in seq:
if sol:
if (n+sol[-1]) in combodic:
sol[-1] = combodic[(n+sol[-1])]
else:
for e in sol:
if (n+e) in oppodic:
sol = []
break
else:
sol += n
else:
sol += n
pout(str(sol).replace("'", ""))
inp = get_input(sys.argv[1])
main(inp)
write_output(sys.argv[2])
| true |
5c283929af4c908985960fe46c3faf94a7589699 | Python | Abe27342/project-euler | /src/104.py | UTF-8 | 1,144 | 3.515625 | 4 | [] | no_license | from decimal import Decimal
import math
phi = (Decimal(5).sqrt() + Decimal(1))/Decimal(2)
def frac(x):
return(x-math.floor(x))
def is_pandigital(x):
return({i for i in str(x)} == {'1','2','3','4','5','6','7','8','9'} and len(str(x)) == 9)
fl = [0,1,1,2,3,5]
def fibs(n):
while n > len(fl):
fl.append((fl[-1] + fl[-2])%1000000000)
return fl[:n]
first_digit_list = [0,1,1,2,3,5]
def fibs2(n):
count = 6
while(first_digit_list[-1] + first_digit_list[-2] < 1000000000):
first_digit_list.append(first_digit_list[-1]+first_digit_list[-2])
count += 1
for i in range(count, n):
first_digit_list.append(round(round(10**(frac(i*math.log10(phi)-0.5*math.log10(5))),8)*10**8))
#print('added %s as F_%s'%(first_digit_list[-1],i))
i = 2749
print(10**(frac(i*math.log10(phi)-0.5*math.log10(5))))
fibs(100000)
fibs2(100000)
l = []
a = []
for i in range(100000):
if(is_pandigital(fl[i]) and is_pandigital(first_digit_list[i])):
l.append(i)
if(i == 2749):
print(first_digit_list[i])
if(is_pandigital(first_digit_list[i])):
a.append(i)
print(l)
print(a)
| true |
f23ceea33b3b36f3b784cf1a5fd4ca84658d2b1d | Python | jamtot/PyProjectEuler | /45 - Triangular, pentagonal, and hexagonal/tph.py | UTF-8 | 1,482 | 4.1875 | 4 | [
"MIT"
] | permissive | # -*- coding: utf8 -*-
#Triangle Tn=n(n+1)/2 1, 3, 6, 10, 15, ...
#Pentagonal Pn=n(3n−1)/2 1, 5, 12, 22, 35, ...
#Hexagonal Hn=n(2n−1) 1, 6, 15, 28, 45, ...
def triangulate(n):
return (n*(n+1))/2
def pentagulate(n):
return (n*((3*n)-1))/2
def hexagulate(n):
return n*((2*n)-1)
def trigen(n=1):
while True:
yield triangulate(n)
n+=1
def pengen(n=1):
while True:
yield pentagulate(n)
n+=1
def hexgen(n=1):
while True:
yield hexagulate(n)
n+=1
def findnum():
# start from just after known number
tgen = trigen(286)
pgen = pengen(166)
hgen = hexgen(143)
pcache = [pgen.next()]
hcache = [hgen.next()]
while True:
trinum = tgen.next()
if checkpen(trinum, pcache, pgen):
if checkhex(trinum, hcache, hgen):
return trinum
def checkpen(num, pcache, pgen):
while num > pcache[-1]:
pcache.append(pgen.next())
if num in pcache:
return True
else: return False
def checkhex(num, hcache, hgen):
while num > hcache[-1]:
hcache.append(hgen.next())
if num in hcache:
return True
else: return False
if __name__=="__main__":
assert [triangulate(n+1) for n in xrange(5)] == [1, 3, 6, 10, 15]
assert [pentagulate(n+1) for n in xrange(5)] == [1, 5, 12, 22, 35]
assert [hexagulate(n+1) for n in xrange(5)] == [1, 6, 15, 28, 45]
print findnum()
| true |
d53f1d9ed62cc5455aa7e2cb7a24894f591ffa83 | Python | daniel-reich/ubiquitous-fiesta | /kKFuf9hfo2qnu7pBe_22.py | UTF-8 | 355 | 2.515625 | 3 | [] | no_license |
def is_prime(primes, num, left=0, right=None):
mid = primes[int(len(primes)/2)]
if mid == num:
return "yes"
if (len(primes) == 1):
return "no"
if mid < num:
return is_prime(primes[int(len(primes)/2): len(primes)], num, 0, None )
if mid > num:
return is_prime(primes[0:int(len(primes)/2)], num, 0, None )
| true |
9003070a9074c8327a860e21d266eee3894d698b | Python | lihao6666/graduation | /parser/爬虫/parse/build/lib/parse/spiders/hot.py | UTF-8 | 2,586 | 2.578125 | 3 | [] | no_license | import scrapy
from parse.items import WeiboTopItem,ZhiHuTopItem,WeiboHots,ZhihuHots
class HotSpider(scrapy.Spider):
name = 'hot'
# allowed_domains = ['https://s.weibo.com/top/summary/']
start_url = 'https://s.weibo.com/top/summary/'
next_url = 'https://www.zhihu.com/hot'
def cookies_dict(self,cookies):
dict = {}
for cookie in cookies.split('; '):
dict[cookie.split('=')[0]]=cookie[cookie.index('=')+1:]
return dict
def headers_cookies_set(self):
self.weibo_cookies = self.cookies_dict(self.settings.get("WEIBO_COOKIES"))
self.zhihu_cookies = self.cookies_dict(self.settings.get("ZHIHU_COOKIES"))
self.headers = self.settings.get("HEADERS")
def start_requests(self):
self.headers_cookies_set()
yield scrapy.Request(self.start_url,headers = self.headers,cookies = self.weibo_cookies,callback=self.parse_weibo)
def parse_weibo(self, response):
weibo_hots = WeiboHots()
hots_res = []
try:
hots = response.xpath("//tr")
for hot in hots:
item = WeiboTopItem()
item['ranking'] = hot.xpath('td[@class="td-01 ranktop"]/text()').get()
item['content'] = hot.xpath('td[@class="td-02"]/a/text()').extract_first()
item['count'] = hot.xpath('td[@class="td-02"]/span/text()').extract_first()
item['desc'] = hot.xpath('td[@class="td-03"]/i/text()').extract_first()
if not item['ranking']:
pass
else:
hots_res.append(item)
except:
print("出错了")
return
else:
weibo_hots['parse_type'] = "weibo"
weibo_hots['hots'] = hots_res
yield weibo_hots
yield scrapy.Request(self.next_url,headers = self.headers,cookies = self.zhihu_cookies,callback=self.parse_zhihu)
def parse_zhihu(self, response):
zhihu_hots = ZhihuHots()
hots_res = []
hots = response.xpath('//section')
for hot in hots:
item = ZhiHuTopItem()
item['ranking'] = hot.xpath('div[@class="HotItem-index"]/div/text()').get()
item['content'] = hot.xpath('div[@class="HotItem-content"]/a/h2/text()').extract_first()
item['count'] = hot.xpath('div[@class="HotItem-content"]/div/text()').extract_first()
hots_res.append(item)
zhihu_hots['parse_type'] = "zhihu"
zhihu_hots['hots'] = hots_res
yield zhihu_hots
| true |
b84a916974907120b37b03d4424a94d12ad5a08e | Python | novayo/LeetCode | /For Irene/BFS/0733_Flood_Fill.py | UTF-8 | 926 | 3.1875 | 3 | [] | no_license | class Solution:
def floodFill(self, image: List[List[int]], sr: int, sc: int, newColor: int) -> List[List[int]]:
old_color = image[sr][sc]
if old_color == newColor:
return image
width = len(image[0])
height = len(image)
queue = collections.deque()
queue.appendleft((sr, sc))
found = set()
found.add((sr, sc))
while queue:
x, y = queue.pop()
image[x][y] = newColor
for i, j in [x, y-1], [x, y+1], [x-1, y], [x+1, y]:
if i < 0 or j < 0 or i >= height or j >= width or (i, j) in found:
continue
if image[i][j] == old_color:
found.add((i, j))
queue.appendleft((i, j))
return image | true |
fe719dad32d27bb81dc66228d711c463b0770d52 | Python | Tusharshah2006/feature_selection_project | /q05_forward_selected/build.py | UTF-8 | 1,315 | 2.921875 | 3 | [] | no_license | # %load q05_forward_selected/build.py
# Default imports
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split as tts
from sklearn.metrics import mean_squared_error, r2_score
data = pd.read_csv('data/house_prices_multivariate.csv')
model = LinearRegression()
# Your solution code here
def forward_selected(df, LinReg):
features = df.drop('SalePrice', axis=1)
target = df['SalePrice']
feature_list = list(features.columns)
best_features = []
best_scores = []
while len(feature_list) > 0:
scores_with_features = []
for feature in feature_list:
best_features.append(feature)
LinReg.fit(features[best_features], target)
rsquare = LinReg.score(features[best_features], target)
scores_with_features.append((rsquare, feature))
best_features.remove(feature)
scores_with_features.sort()
best_score, best_candidate = scores_with_features.pop()
feature_list.remove(best_candidate)
best_features.append(best_candidate)
best_scores.append(best_score)
return best_features, best_scores
forward_selected(data, model)
| true |
70b6529c1901fbddf62d11a618d412422f4a9041 | Python | cwlseu/Algorithm | /cpp/jianzhioffer/power.py | UTF-8 | 505 | 3.875 | 4 | [] | no_license | # -*- coding:utf-8 -*-
class Solution:
def Power(self, base, exponent):
# write code here
if exponent < 0:
return 1.0/(float)(self.Power(base, -exponent))
elif exponent == 0:
return 1
elif exponent % 2 == 0:
return self.Power(base, exponent/2)**2
else:
return base * self.Power(base, exponent/2)**2
solve = Solution()
print solve.Power(1.5, 2)
print solve.Power(1.5, -1)
print solve.Power(1.2, 0)
print solve.Power(1.2, 3)
| true |
d1da752545cdc0f2a96af048ee835c000eddcaf2 | Python | stacykutyepov/python-cp-cheatsheet | /leet/strings/wordBreak.py | UTF-8 | 1,004 | 3.46875 | 3 | [
"Apache-2.0"
] | permissive | """
time: n^2
space: n
"""
class Solution:
# go through word and check if slice matches subword, go until the end
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
stk = [0]
visited = set()
while stk:
i = stk.pop()
visited.add(i)
# check slice at index i
for w in wordDict:
wend = i + len(w)
if s[i:wend] == w:
# return if we reach the end of s
if i + len(w) == len(s):
return True
if wend not in visited:
stk.append(wend)
return False
"""
time: n^3
space: n
"""
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
dp = [True] + [False] * len(s)
for i in range(1, len(s)+1):
for w in wordDict:
if s[:i].endswith(w):
dp[i] |= dp[i-len(w)]
return dp[-1] | true |
4a15f3f6d05637fab3b291bbca83696d19d9e102 | Python | Shumpei-Kikuta/ants_book | /python/beginner/src/meiro.py | UTF-8 | 1,215 | 2.78125 | 3 | [] | no_license | import sys
sys.setrecursionlimit(10000000)
from collections import deque
import numpy as np
INF = 10 ** 10
def main():
N, M = map(int, input().split())
meiros = np.ones((N + 2, M + 2)) * (-1)
d = np.ones((N + 2, M + 2)) * INF
for i in range(N):
input_ = input()
for j, v in enumerate(input_):
if v == '.':
meiros[i + 1][j + 1] = INF
elif v == '#':
meiros[i + 1][j + 1] = -1
elif v == 'S':
meiros[i + 1][j + 1] = 0
S = (i + 1, j + 1)
else:
G = (i + 1, j + 1)
meiros[i + 1][j + 1] = INF
queue = deque()
queue.append(S)
while(len(queue) != 0):
now = queue.popleft()
if now == G:
print(meiros[now[0]][now[1]])
break
for i in range(-1, 2):
for j in range(-1, 2):
if (i + j) % 2 == 0:
continue
if meiros[now[0] + i][now[1] + j] == INF:
queue.append((now[0] + i, now[1] + j))
meiros[now[0] + i][now[1] + j] = meiros[now[0]][now[1]] + 1
if __name__ == '__main__':
main()
| true |
a35d1af9de65789a06984d400e1b0bac3200eb00 | Python | Hashfyre/7dom | /prototypes/entity/alt_main.py | UTF-8 | 4,207 | 3.140625 | 3 | [] | no_license | from pandac.PandaModules import *
import direct.directbase.DirectStart
from direct.showbase.DirectObject import DirectObject
from panda3d.bullet import *
class Entity(DirectObject):
def __init__(self, world, parent, taskMgr=None, shape=BulletCapsuleShape(0.5, 1), pos=(0, 0, 2)):
"""Creates a generic Entity with a physical component.
Keyword arguments:
world -- a BulletWorld object to add the Entity's physical body to
parent -- the Node under which the Entity's physical body will be added
taskMgr -- a TaskMgr which the Entity's update function will be added to. (default None)
shape -- a BulletShape object for the Entity's physical body (default BulletCapsuleShape(0.5, 1))
pos -- a three-tuple for the position of the Entity with respect of its parent (default (0, 0, 2))
"""
# Creates the body, sets mass, makes it remain upright, and attaches to world.
self.body = BulletRigidBodyNode()
self.body.setMass(5.0)
self.body.setAngularFactor(Vec3(0, 0, 1))
self.body.setCcdMotionThreshold(1)
world.attachRigidBody(self.body)
# Adds a shape.
self.shape = shape
self.body.addShape(self.shape)
# Creates a nodepath and positions the body.
self.body_np = parent.attachNewNode(self.body)
self.body_np.setPos(*pos)
# Initializes velocities to move the body with.
self.v_linear = Vec3(0, 0, 0)
self.prev_v_linear = Vec3(0, 0, 0)
self.v_angular = Vec3(0, 0, 0)
# Limits for the velocities' magnitudes, respectively.
self.limit_v_linear = 10
self.limit_v_angular = 10
# Automates updating the Entity.
if taskMgr: taskMgr.add(self.update, "an Entity update")
def update(self, task=None):
"""Moves the Entity by applying linear and angular velocity to its physical body.
This also takes a Task object, for automation."""
# Gets the dt.
dt = globalClock.getDt()
# Applies velocities.
# Here, a force is applied to the body at every tick,
# resulting in more physical movement, but also acceleration.
self.body.applyCentralForce(self.v_linear*dt*100) # Needs quarternions to respect orientation.
self.body.applyTorque(self.v_angular*dt*100)
# Perpetuates itself.
if task: return task.cont
def move(self, v):
"""Sets the Entity's linear velocity to the given amount, upto the limit.
Keyword arguments:
v -- a Vec3 vector
"""
# If velocity larger than limit, then normalize it.
if v.length() >= self.limit_v_linear:
v = v / v.length() * self.limit_v_linear
# Set velocity.
self.v_linear = v
def turn(self, v):
"""Sets the Entity's angular velocity to the given amount, upto the limit.
Keyword arguments:
v -- a Vec3 vector
"""
# If velocity larger than limit, then normalize it.
if v.length() <= self.limit_v_angular:
v = v / v.length() * self.limit_v_angular
# Set velocity.
self.v_angular = v
def main():
"""A main function to test this code out."""
## world ##
# the actual world
world = BulletWorld()
world.setGravity(Vec3(0, 0, -9.81))
# a nodepath for attaching things
world_np = render.attachNewNode('World')
# this would render the shapes, I wish I knew of this earlier
debug_np = world_np.attachNewNode(BulletDebugNode('Debug'))
debug_np.show()
world.setDebugNode(debug_np.node())
# task to update the world
def update(task):
dt = globalClock.getDt()
world.doPhysics(dt)
return task.cont
taskMgr.add(update, 'update')
## ground ##
# body
node = BulletRigidBodyNode('ground')
#node.setMass(1.0)
shape = BulletPlaneShape(Vec3(0, 0, 1), 1)
node.addShape(shape)
# attach to the nodetree via a parent, for easier access
np = render.attachNewNode(node)
np.setPos(0, 0, -2)
# attach to the Bullet world
world.attachRigidBody(node)
## instances ##
e = Entity(world, render, taskMgr, pos=(0, 0, 1))
e.move(Vec3(0, 10, 0))
#f = Entity(world, render, taskMgr, pos=(0, 50, 0))
## camera ##
base.cam.setPos(0, -20, 50)
base.cam.lookAt(0, 0, 0)
## run ##
run()
return 0
if __name__ == '__main__':
main()
| true |
0a0a4d1940bde46b24604fef43d53d9c140a86a7 | Python | RubenBasentsyan/DataScrapingAUA | /Homework 2/MovieScraping.py | UTF-8 | 2,040 | 3.703125 | 4 | [] | no_license | import requests;
import pandas as pd
import time;
from scrapy.http import TextResponse;
URL = "https://www.imdb.com/chart/moviemeter/"
base_url = "https://www.imdb.com"
#td.titleColumn a - Movie titles
#.secondaryInfo:nth-child(2) - Movie Year
#.velocity - Rank -> doesn't return proper rankings therefore I will use an increment
#.imdbRating strong = Rating. Returns only if the ranking is available
#td.titleColumn a::attr(href) - Movie hyperlink without the base url
class Movies:
def __init__(self,URL):
self.URL = URL
self.page = requests.get(self.URL)
self.response = TextResponse(body=self.page.text,url=self.URL,encoding="utf-8")
def scrape_movies(self):
"Scrapes the movies, ratings, ranks and the hyperlink"
title = self.response.css("td.titleColumn a::text").extract()
year = [str(i).strip('()') for i in self.response.css(".secondaryInfo:nth-child(2)::text").extract()]
rank = []
[rank.append(i) for i in range(1,101)]
rating = []
for i in self.response.css(".imdbRating"):
rating.append(str(i.css("strong::text").extract()).strip('[]'))
hyperlink = [base_url+i for i in self.response.css("td.titleColumn a::attr(href)").extract()]
return title, year, rank, rating, hyperlink
#While scraping, I had to do some checks. For example whether or not a ranking exists or attaching a base URL to the movie hyperlink.
m = Movies(URL).scrape_movies()
#After scraping all the information, I replaced the empty rankings with a "No ranking" string.
for i in range(0,100):
if(m[3][i] == ""):
m[3][i]="No ranking"
#In order to get the details of each movie in one element of a list, I had to transpose the scraped list.
m = list(map(list, zip(*m)))
#Finally I put the transposed list into the DataFrame and named the columns to make it look better.
df = pd.DataFrame(m, columns=['Title','Year','Rank','Rating','Hyperlink'])
print(df)
#All the information needed is the DataFrame "df"
| true |
ddaee3330d16afc99d1b2cf00c652728d313661d | Python | IanRivas/python-ejercicios | /tp5-Excepciones/7.py | UTF-8 | 1,103 | 4.65625 | 5 | [] | no_license | '''
7. Escribir un programa que juegue con el usuario a adivinar un número. El programa debe generar un número al azar entre 1 y 500 y el usuario debe adivinarlo.
Para eso, cada vez que se introduce un valor se muestra un mensaje indicando si el nú-mero que tiene que adivinar es mayor o menor que el ingresado.
Cuando consiga adivinarlo, se debe imprimir en pantalla la cantidad de intentos que le tomó hallar el número. Si el usuario introduce algo que no sea un número
se mostrará un mensaje en pantalla y se lo contará como un intento más.
'''
from random import randint
def Main():
randomNumber = randint(1,500)
while True:
try:
number = int(input(f'Adivina el numero: '))
assert number == randomNumber
print(f'Adivinaste el numero: {number}')
break
except (ValueError, AssertionError):
if number < randomNumber:
print('mas')
continue
elif number > randomNumber:
print('menos')
continue
if __name__ == '__main__':
Main() | true |
4e806189c021916e64a4ae331d80369db0dccd11 | Python | mourkeita/scripts | /urllib_to_server.py | UTF-8 | 197 | 2.515625 | 3 | [
"MIT"
] | permissive | #! /usr/bin/python
# coding: utf8
import httplib2
import urllib
print "Try to connect to site ..."
h = httplib2.Http()
headers, content = h.request("https://www.google.fr", "GET")
print content | true |
a76bc3c3632e0d2c06ec107e325e21fd3029300e | Python | borislavstoychev/Soft_Uni | /soft_uni_fundamentals/Basic Syntax, Conditional Statements and Loops/More Exercises/04_Sum_Of_A_Beach.py | UTF-8 | 192 | 3.34375 | 3 | [] | no_license | word_snake = input()
word_matches = ["water", "sand", "sun", "fish"]
match = 0
for i in range(len(word_matches)):
match += word_snake.lower().count(word_matches[i])
print(match) | true |
a07108132bca4f86ef08d9c08e5bc1940c5fe5f7 | Python | simonchapman1986/ripe | /src/apps/api/helpers/response.py | UTF-8 | 2,346 | 2.5625 | 3 | [] | no_license | import json
import logging
trace = logging.getLogger('trace')
class ResponseGenerator():
def __init__(self, api_name, path, attribute_keys, sub_struct=None, struct_count=0):
self.resp = dict()
self.struct = dict()
self.struct[api_name] = {}
self.struct[api_name]['current_report'] = ''
self.struct[api_name]['current_report'] = {str(k): sub_struct for k in range(0, struct_count) if sub_struct}
self.api_name = api_name
self.path = path
attrib_vals = path.split('/')[3:-1]
# trace.info('attrib: {}'.format(attribute_keys))
# trace.info('attrib: {}'.format(attrib_vals))
self.attributes = {k: v for k, v in zip(attribute_keys, attrib_vals)}
def add_el(self, sub_struct, parent_key):
"""
>>> r = ResponseGenerator('testAPI', '32432325252/subscriptions/all/active/all/daily/all/all/2010-03-15/2010-03-29/', ['client', 'api', 'package', 'state', 'group', 'interval', 'territory', 'platform', 'start_date', 'end_date'])
>>> r.struct
>>> r.add_el({'test': 'one'}, 'current_report')
>>> r.add_el({'two': '2'}, 'current_report')
>>> r.add_el({'three': '3'}, 'current_report')
>>> r.set_report_template()
>>> r.get_dict_response()
>>> 1
"""
keys = find_key(parent_key, self.struct)
if not keys:
print 'parent not found'
parent = self.struct
try:
for k in range(0, len(keys)-1, 1):
parent = parent.get(keys[k], None)
except Exception as e:
print e.args
i = len(parent[parent_key])
if not i:
parent[parent_key] = {i: sub_struct}
else:
parent[parent_key].update({i: sub_struct})
# print ' STRUCT: {}'.format(self.struct)
def set_report_template(self):
self.struct[self.api_name]['current_report']['attributes'] = self.attributes
def get_dict_response(self):
return self.struct
def get_json_response(self):
return json.dumps(self.resp)
def find_key(key, d):
for k, v in d.items():
if k == key:
return [k]
if isinstance(v, dict):
p = find_key(key, v)
if p:
return [k] + p
elif k == key:
return [k]
| true |
cf3e375792f4df68bc7481a350f008c8f244eebc | Python | NAMHYEONJI/PPS | /NamHyeonJi_20210710/3-4_NamHyeonJi.py | UTF-8 | 291 | 3.625 | 4 | [] | no_license | #이렇게 풀어도 되는 것인가...
n = int(input())
first = int(input())
if first == 0:
second = 1
else:
second = 0
if n > 5:
print("Love is open door")
else:
for i in range(n-1):
if i % 2 == 0:
print(second)
else:
print(first) | true |
234d8b06ebab3bfb3357bf011e6182b0ded53b85 | Python | Nu-Pan/konoumasuki | /parameter_solver/.ipynb_checkpoints/parameter_solver-checkpoint.py | UTF-8 | 6,468 | 3.03125 | 3 | [] | no_license |
from sympy import *
from typing import Dict, List, Any
from sympy.plotting import plot
'''
# sympy ヘルパ
'''
def makesym() -> Symbol:
'''
シンボルを生成する。
シンボル名は後で更新するので適当でいい。
'''
return Symbol('undef')
def Lerp( min_value: Symbol, max_value: Symbol, ratio: Symbol ) -> Symbol:
'''
区間 [min_value, max_value] 中の比率 ratio の値を返す。
ratio は区間 [0.0, 1.0] に従う。
'''
length = max_value - min_value
return length * ratio + min_value
def clamp( min_value: Symbol, max_value: Symbol, value: Symbol ) -> Symbol:
'''
value を区間 [min_value, max_value] にクランプする。
'''
return Piecewise(
(min_value, value < min_value),
(max_value, value > max_value),
(value, true )
)
'''
# リーフパラメータ定義
入力として与えるべきパラメータ。
'''
# ウマ娘基礎ステータス
スピード = makesym()
スタミナ = makesym()
パワー = makesym()
根性 = makesym()
賢さ = makesym()
脚質適正補正 = makesym()
バ場適正補正 = makesym()
距離適性補正 = makesym()
やる気補正 = makesym()
# スキル補正
スキル補正賢さ = makesym()
スキル補正スタミナ = makesym()
スキル補正パワー = makesym()
スキル補正根性 = makesym()
スキル補正スピード = makesym()
スキル補正速度 = makesym()
# コース関連
バ場状態パラ補正 = makesym()
レース基準速度 = makesym()
基本速度補正 = makesym() # コース適正のこと
レース距離 = makesym()
バ場状態体力消費速度補正 = makesym()
# レース中動的変化
傾斜角 = makesym()
ポジションキープ補正 = makesym()
前のウマ娘との距離差 = makesym()
ブロック補正 = makesym()
ブロックしているウマ娘の現在速度 = makesym()
ウマ状態補正 = makesym()
現在レーン距離 = makesym()
最大レーン距離 = makesym()
順位 = makesym()
現在速度 = makesym()
# 他
育成モード補正 = makesym()
'''
# ルートパラメータ定義
計算式の左辺にしか存在しないパラメータ。
'''
基礎目標速度 = makesym()
通常目標速度 = makesym()
ポジションキープ目標速度 = makesym()
スパート目標速度 = makesym()
スタミナ切れ目標速度 = makesym()
被ブロック目標速度 = makesym()
加速度 = makesym()
初期体力上限 = makesym()
体力消耗速度 = makesym()
レーン変更目標速度 = makesym()
レーン変更加速度 = makesym()
レーン変更実際速度 = makesym()
'''
# sympy シンボル名更新
python 上と sympy 上で変数名を一致させる。
'''
base_locals = locals()
locals().update([
(k, Symbol(k))
for k in base_locals
if type(base_locals[k]) is Symbol
])
'''
# ノードパラメータ定義
リーフパラメータとノードパラメータをつなぐ中間パラメータ。
'''
# ウマ娘基礎パラメータ
補正スピード = スピード * やる気補正 * 基本速度補正 + バ場状態パラ補正 + 育成モード補正 + スキル補正スピード
補正賢さ = 賢さ * やる気補正 * 脚質適正補正 + 育成モード補正 + スキル補正賢さ
補正スタミナ = スタミナ * やる気補正 + バ場状態パラ補正 + 育成モード補正 + スキル補正スタミナ
補正パワー = パワー * やる気補正 + バ場状態パラ補正 + 育成モード補正 + スキル補正パワー
補正根性 = 根性 * やる気補正 + バ場状態パラ補正 + 育成モード補正 + スキル補正根性
# 速度関係
レース基準速度 = 20 - ( 2000 - レース距離 ) * 0.001
賢さランダム補正上限 = ( 補正賢さ / 5500 ) * log( 補正賢さ * 0.1 )
賢さランダム補正下限 = 賢さランダム補正上限 - 0.65
賢さランダム補正 = ( 賢さランダム補正下限 + 賢さランダム補正上限 ) / 2 # 本当は一様乱数なんだけど、めんどいので期待値でお茶を濁す
上り坂補正 = - abs( 100 * tan( 傾斜角 * 0.017453 ) ) * 200 / 補正パワー
下り坂補正 = 0.3 + abs( 100 * tan( 傾斜角 * 0.017453 ) ) / 10.0
坂補正 = Piecewise( (下り坂補正, 傾斜角 < 0), (上り坂補正, 傾斜角 >= 0) )
# 体力関係
体力消耗速度補正 = ( 現在速度 - レース基準速度 + 12 ) ** 2 / 144
体力消耗速度スパート補正 = 1 + 200 / sqrt( 600 * 補正根性 )
# レーン変更関係
レーン変更スタート補正 = 1.0 + 0.05 * ( 現在レーン距離 / 最大レーン距離 )
レーン変更順位補正 = 1.0 + 0.001 * 順位
レーン変更内側移動補正 = - ( 1.0 + 現在レーン距離 )
'''
# ルートパラメータ式定義
ルートパラメータの計算式。
solve の対象なので Eq にしている。
'''
equations = [
Eq(
基礎目標速度,
sqrt( 500 * 補正スピード ) * 距離適性補正 * 0.002 + レース基準速度 * ( 脚質適正補正 + 賢さランダム補正 )
),
Eq(
通常目標速度,
基礎目標速度 + 坂補正
),
Eq(
ポジションキープ目標速度,
基礎目標速度 * ポジションキープ補正 + 坂補正
),
Eq(
スパート目標速度,
1.05 * ( 基礎目標速度 + 0.01 * レース基準速度 ) + sqrt( 500 * 補正スピード ) * 距離適性補正 * 0.002
),
Eq(
スタミナ切れ目標速度,
レース基準速度 * 0.85 + sqrt( 補正根性 * 200 ) * 0.001
),
Eq(
被ブロック目標速度,
Lerp( 0.988, 1.0, 前のウマ娘との距離差 / ブロック補正 ) * ブロックしているウマ娘の現在速度
),
Eq(
加速度,
脚質適正補正 * 0.0006 * sqrt( 補正パワー * 500 ) * バ場適正補正
),
Eq(
初期体力上限,
レース距離 + 脚質適正補正 * 0.8 * 補正スタミナ
),
Eq(
体力消耗速度,
20 * 体力消耗速度補正 * バ場状態体力消費速度補正 * ウマ状態補正 * 体力消耗速度スパート補正
),
Eq(
レーン変更目標速度,
0.02 * ( 0.3 + 0.001 * 補正パワー ) * レーン変更スタート補正 * レーン変更順位補正
),
Eq(
レーン変更加速度,
0.02 * 1.5
),
Eq(
レーン変更実際速度,
clamp( 0, 0.6, 現在速度 + スキル補正速度 ) * レーン変更内側移動補正
)
]
'''
↓みたいな感じで解く
solve(equations, スピード)
'''
| true |
6d425069335954ffedb365eb159e0b804331b766 | Python | sjzyjc/leetcode | /200/200-BFS.py | UTF-8 | 1,096 | 3.484375 | 3 | [] | no_license | from collections import deque
class Solution:
"""
@param grid: a boolean 2D matrix
@return: an integer
"""
def numIslands(self, grid):
if not grid or not grid[0]:
return 0
counter = 0
for i in range(len(grid)):
for j in range(len(grid[i])):
if self.findIsland(grid, i, j):
counter += 1
return counter
def findIsland(self, grid, i, j):
queue = deque()
queue.append((i, j))
findIsland = False
while queue:
i_tmp, j_tmp = queue.popleft()
if not (0<= i_tmp < len(grid) and 0<= j_tmp < len(grid[0])):
continue
if grid[i_tmp][j_tmp] == 0:
continue
findIsland = True
grid[i_tmp][j_tmp] = 0
offsets = [[1, 0], [-1, 0], [0, 1], [0, -1]]
for offset in offsets:
queue.append((i_tmp + offset[0] , j_tmp + offset[1]))
return findIsland | true |
62dee71f74e7210daddaf06a4db7f690944c569c | Python | marcioinfo/reviews | /src/project/middlewares.py | UTF-8 | 566 | 2.625 | 3 | [] | no_license | import json
# DJANGO LIBRARY IMPORT
from django.http import HttpResponse
class ExceptionsMiddleware(object):
""" A middleware for handling exceptions """
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
return self.get_response(request)
def process_exception(self, request, exception):
response = {'error': exception.__class__.__name__ + ': ' + str(exception)}
return HttpResponse(json.dumps(response, ensure_ascii=False),
status=400)
| true |
202a51aeca3c36532ad1309e45ca6b72c692076f | Python | Speclized/cdn | /prj/paper/paperJiangchong.py | UTF-8 | 642 | 2.96875 | 3 | [] | no_license | import pandas as pd
import jieba
import synonyms
# 更改比例 小 中 大 s m l
# 更改程度 小 中 大 s m l
def change(text, rate=1, level=2):
i = 0
while(i < level):
i += 1
seg_list = list(jieba.cut(text, cut_all=False))
for i in range(len(seg_list)):
s = seg_list[i]
try:
if synonyms.nearby(s)[0] != None:
s = synonyms.nearby(s)[0][2]
except:
pass
seg_list[i] = s
text = ''.join(seg_list)
return text
if __name__ == '__main__':
while True:
print(change(input(), level=2)) | true |
94c66a8d94da5590c0b797c698ebe0f4364b3c66 | Python | RAVURISREESAIHARIKRISHNA/Python-2.7.12-3.5.2- | /Tempp.py | UTF-8 | 309 | 3.4375 | 3 | [
"MIT"
] | permissive | def replicate(times, data):
value = []
if type(times) is str or data is " ":
raise ValueError ("Invalid Input")
elif times > 0 and data is not " ":
for x in range(times):
value.append(data)
return value
elif times <= 0:
return value
H = replicate(5,"x")
print(str(H))
| true |
d90be73f0ff56e596b7abe33ac40f5f9269c8829 | Python | cale-i/atcoder | /yukicoder/No.236 鴛鴦茶.py | UTF-8 | 168 | 3.21875 | 3 | [] | no_license | # yukicoder No.236 鴛鴦茶 2020/02/03
a,b,x,y=map(int,input().split())
xa=x/a
yb=y/b
if a==b:
ans=2*min(x,y)
else:
ans=(a+b)*min(xa,yb)
print(ans) | true |
7bf907a25f16a8a4e6e7a62a31bd7ce4a42a09b6 | Python | magiccjae/robust_tracking | /src/quaternion_to_euler.py | UTF-8 | 748 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python
import rospy, tf
from sensor_msgs.msg import Imu
import numpy as np
def quaternion_callback(msg):
quaternion = (
msg.orientation.x,
msg.orientation.y,
msg.orientation.z,
msg.orientation.w)
# Use ROS tf to convert to Euler angles from quaternion
euler = tf.transformations.euler_from_quaternion(quaternion)
tracker_roll = euler[0]*180/np.pi
tracker_pitch = euler[1]*180/np.pi
tracker_yaw = euler[2]*180/np.pi
print tracker_roll, tracker_pitch, tracker_yaw
def listener():
rospy.init_node('quaternion_to_euler', anonymous=True)
rospy.Subscriber('/gimbal_cam/raw_imu', Imu, quaternion_callback)
rospy.spin()
if __name__ == '__main__':
listener()
| true |
24377f76cc0a62ea2e71873e0e27aef36c4048e2 | Python | MendelBak/cdPython | /python_fundamentals_cd/names.py | UTF-8 | 338 | 3.140625 | 3 | [] | no_license | students = [
{'first_name': 'Michael', 'last_name' : 'Jordan'},
{'first_name' : 'John', 'last_name' : 'Rosales'},
{'first_name' : 'Mark', 'last_name' : 'Guillen'},
{'first_name' : 'KB', 'last_name' : 'Tonel'}
]
def names(list):
for obj in list:
print obj["first_name"], obj["last_name"]
names(students)
| true |
e60a748dab0279bfcec845278e241166afc0d3d5 | Python | Yuhjiang/LeetCode_Jyh | /problems/midium/13_roman_to_integer.py | UTF-8 | 642 | 3.65625 | 4 | [] | no_license | class Solution:
def romanToInt(self, s: str) -> int:
digit = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
ans = 0
length = len(s)
for i in range(length-1, -1, -1):
if i < length-1 and digit[s[i]] < digit[s[i+1]]:
sign = -1
else:
sign = 1
ans += sign * digit[s[i]]
return ans
if __name__ == '__main__':
print(Solution().romanToInt('III'))
print(Solution().romanToInt('IV'))
print(Solution().romanToInt('IX'))
print(Solution().romanToInt('LVIII'))
print(Solution().romanToInt('MCMXCIV')) | true |
233ef342cb61845f6eb5134ee7db22489e66a3fa | Python | bmviniciuss/ufpb-so | /project1/main.py | UTF-8 | 1,101 | 2.78125 | 3 | [] | no_license | import sys
import copy
from Parser import Parser
from FCFS import FCFS
from SJF import SJF
from RR import RR
from OutputHandler import OutputHandler
from Utils import get_stats_str, verbose_mode
def main():
verbose = verbose_mode(sys.argv)
parser = Parser()
output = OutputHandler("results.txt")
processes = parser.parse_file(sys.argv[1])
# Creates a FCFS scheduler
fcfs = FCFS(copy.deepcopy(processes))
fcfs_stats = fcfs.run()
# Creates a SJF Scheduler
sjf = SJF(copy.deepcopy(processes))
sjf_stats = sjf.run()
# Creates a RR Scheduler
rr = RR(copy.deepcopy(processes))
rr_stats = rr.run()
# If verbose. print to terminal
if verbose:
print(get_stats_str("FCFS", fcfs_stats), end="")
print(get_stats_str("SJF", sjf_stats), end="")
print(get_stats_str("RR", rr_stats), end="")
# Done - writing results to output file
output.write_to_file(get_stats_str("FCFS", fcfs_stats),
get_stats_str("SJF", sjf_stats), get_stats_str("RR", rr_stats))
if __name__ == "__main__":
main()
| true |
06bac0360a193fa558d07216ddc4744b65653f7b | Python | antonsold/mapreduce | /reducer.py | UTF-8 | 653 | 2.734375 | 3 | [] | no_license | import sys
def print_row(word, files_set):
if word:
print(word + '\t' + str(sum(files_set.values())))
current_word = None
files = dict()
files_banned = set()
for line in sys.stdin:
word, text_id, flag = line.strip().split('\t', 1)
if current_word != word:
print_row(current_word, files)
files.clear()
files_banned.clear()
current_word = word
if flag == '0' or text_id in files_banned:
files_banned.add(text_id)
files[text_id] = 0
continue
if text_id in files.keys():
files[text_id] += 1
else:
files[text_id] = 1
print_row(current_word, files)
| true |
5b5340e8731544da3fbf5a4018883810383470f8 | Python | swethakandakatla/python-project | /listcomprahensioncheck.py | UTF-8 | 77 | 3.140625 | 3 | [] | no_license | n=100
num_list=[i for i in range(0,n) if(i%2==0 and i%5==0)]
print(num_list) | true |
33cbd164a435e9a4545c87bc7709057d5b847aa1 | Python | konriz/KoNotek | /modules/morse/generator.py | UTF-8 | 1,829 | 3.078125 | 3 | [] | no_license | import numpy as np
import struct
class Signal:
def __init__(self, type, length):
self.type = type
self.length = length
SIGNAL_RULES = {
".": Signal(type='signal', length=1),
"-": Signal(type='signal', length=3),
"/": Signal(type='pause', length=2),
"*": Signal(type='pause', length=6)
}
class Writer:
def __init__(self, file_name, freq=440, short_length=0.1, sampling_rate=44100):
self.file_name = file_name
self.freq = freq
self.short_length = short_length
self.sampling_rate = sampling_rate
self.sample = self.sampling_rate / self.freq
self.repetitions = short_length * freq
def __generate_wave(self, length=1):
x = np.arange(self.sample * self.repetitions * length)
y = 100 * np.sin(2 * np.pi * self.freq * x / self.sampling_rate)
return y
def __generate_pause(self, length=1):
x = np.arange(self.sample * self.repetitions * length)
y = 0 * x
return y
def __write_signal(self, signal):
with open(self.file_name, 'ab') as file:
for sample in signal:
file.write(struct.pack('b', int(sample)))
def __write(self, sign):
try:
signal = SIGNAL_RULES[sign]
print('Writing ' + sign)
if signal.type == 'pause':
self.__write_signal(self.__generate_pause(signal.length))
else:
self.__write_signal(self.__generate_wave(signal.length))
self.__write_signal(self.__generate_pause())
except KeyError:
raise MorseException(sign)
def write_morse_wav(self, morse):
for sign in morse:
self.__write(sign)
class MorseException(Exception):
def __init__(self, sign):
self.sign = sign
| true |
b0fba72b9a49e3ffd119b853e1dbf15cb9349dfc | Python | michaelgbw/leetcode_python | /14.最长公共前缀.py | UTF-8 | 1,543 | 3.484375 | 3 | [] | no_license | #
# @lc app=leetcode.cn id=14 lang=python3
#
# [14] 最长公共前缀
#
# https://leetcode-cn.com/problems/longest-common-prefix/description/
#
# algorithms
# Easy (36.59%)
# Likes: 921
# Dislikes: 0
# Total Accepted: 211.5K
# Total Submissions: 577.2K
# Testcase Example: '["flower","flow","flight"]'
#
# 编写一个函数来查找字符串数组中的最长公共前缀。
#
# 如果不存在公共前缀,返回空字符串 ""。
#
# 示例 1:
#
# 输入: ["flower","flow","flight"]
# 输出: "fl"
#
#
# 示例 2:
#
# 输入: ["dog","racecar","car"]
# 输出: ""
# 解释: 输入不存在公共前缀。
#
#
# 说明:
#
# 所有输入只包含小写字母 a-z 。
#
#
# @lc code=start
class Solution:
def longestCommonPrefix(self, strs):
if len(strs) == 0:
return ''
if len(strs) == 1:
return strs[0]
pre_str = ''
doubel_list = []
for v in strs:
if v == '':
return ''
doubel_list.append([i for i in v])
for j in range(len(doubel_list[0])):
for i in range(len(doubel_list)):
# print(doubel_list[i][j])
try:
doubel_list[i][j]
except :
return pre_str
if doubel_list[0][j] != doubel_list[i][j]:
return pre_str
pre_str += doubel_list[0][j]
return pre_str
# @lc code=end
ob = Solution()
print(ob.longestCommonPrefix(["f","f",'fa']))
| true |
ae6dfe64f882c2aad7482b6f25bff5b66aa8bd5b | Python | childsm/Python-Projects | /helloWorld/scr/mainapp/profiles/models.py | UTF-8 | 839 | 2.625 | 3 | [] | no_license | from django.db import models
# Create your models here.
PREFIX_OPTION = (
('Mr','Mr'),
('Mrs', 'Mrs'),
('Ms', 'Ms'),
)
class Profiles(models.Model):
Prefix = models.CharField(max_length=60, default="", choices=PREFIX_OPTION)
First_Name = models.CharField(max_length=30, default="", blank=False, null=False)
Last_Name = models.CharField(max_length=30, default="", blank=False, null=False)
Email = models.EmailField(max_length=254) #Email = models.CharField(max_length=254, EmailVarify)
Username = models.CharField(max_length=60, default="", blank=True, null=False)
objects = models.Manager()
def __str__(self):
return self.First_Name #invoking Python's built in module string
#the above takes "Product object(1)" and turns it to a string. So name is returned instead of object
| true |
474a19ef045298b5de26751dd460777bcafaa80f | Python | zuzux3/Projekt-NP | /codes/main.py | UTF-8 | 386 | 3.671875 | 4 | [] | no_license | from Expo_Euler import Euler_Exp
from Impo_Euler import Euler_Imp
from trapez import trapez
string = "Podaj wybor operacji: "
string1 = "1 - Jawny Euler, 2 - Niejawny Euler, 3 - Metoda Trapezowa"
print(string)
print(string1)
w = input()
if w == '1':
Euler_Exp()
elif w == '2':
Euler_Imp()
elif w == '3':
trapez()
else:
string2 = "Wybor niepoprawny"
print(string2)
| true |
41c51d1e477b1920837e1ea17f0267553eee3070 | Python | pendragonxi/tec4tensorflow | /Test2.py | UTF-8 | 1,231 | 3.46875 | 3 | [] | no_license | #-*-coding:UTF-8-*-
import numpy as np
import matplotlib.pyplot as plt
# x = np.linspace(0, 10, 1000)
# y = np.sin(x)
# plt.figure(figsize=(8,4))
# plt.plot(x,y,label="$sin(x)$",color="red",linewidth=2)
# plt.xlabel("Time(s)")
# plt.ylabel("Volt")
# plt.title("PyPlot First Example")
# plt.ylim(-1.2,1.2)
# plt.show()
# """
# 通过一系列函数设置当前Axes对象的各个属性:
# xlabel、ylabel:分别设置X、Y轴的标题文字。
# title:设置子图的标题。
# xlim、ylim:分别设置X、Y轴的显示范围。
# """
#
# """
# ===============================
# Legend using pre-defined labels
# ===============================
#
# Notice how the legend labels are defined with the plots!
# """
#
# Make some fake data.
a = b = np.arange(0, 3, .02)
c = np.exp(a)
d = c[::-1]
e = c+2
# Create plots with pre-defined labels.
fig, ax = plt.subplots()
ax.plot(a, c, 'k--', label='Model length')
ax.plot(a, d, 'k:', label='Data length')
ax.plot(a, e, 'k:', label='Data length')
# ax.plot(a, c + d, 'k', label='Total message length')
legend = ax.legend(loc='upper center', shadow=True, fontsize='x-large')
# Put a nicer background color on the legend.
legend.get_frame().set_facecolor('#00FFCC')
plt.show() | true |
54b0d95955d5164262faa27974f04ccbe081a074 | Python | alintudose/football_match | /football_match.py | UTF-8 | 2,163 | 3.421875 | 3 | [] | no_license | '''
Se va simula un meci de fotbal intre doua echipe.
Vom defini un teren de fotbal, o minge si pozitia mingii pe terenul de fotbal.
Simularea meciului va consta in definirea numerelor de suturi si contabilizarea tuturor golurilor, outurilor si
cornerelor.
La marcarea golurilor mingea va reveni in centrul terenului.
'''
from random import randint
class Minge:
def __init__(self, x = 50, y = 25):
self.x = x
self.y = y
def __repr__(self):
return f"x = {self.x} y = {self.y}"
def sut(self):
self.x = randint(0, 101)
self.y = randint(0, 51)
return self
class Meci:
def __init__(self):
self.e1 = input("Introduceti numele echipei 1: ")
self.e2 = input("Introduceti numele echipei 2: ")
self.nr_gol_e1 = 0
self.nr_gol_e2 = 0
self.nr_corner_e1 = 0
self.nr_corner_e2 = 0
self.nr_out_e1 = 0
self.nr_out_e2 = 0
def __repr__(self):
return f"{self.e1} vs {self.e2} scor {self.nr_gol_e1} - {self.nr_gol_e2}\n" \
f"Cornere {self.e1} = {self.nr_corner_e1}\n" \
f"Cornere {self.e2} = {self.nr_corner_e2}\n" \
f"Outuri {self.e1} = {self.nr_out_e1}\n" \
f"Outuri {self.e2} = {self.nr_out_e2}\n"
def simulare(self):
poz = Minge(50, 25)
for i in range(1000):
poz.sut()
if poz.x == 0 and poz.y >= 20 and poz.y <= 30:
self.nr_gol_e2 += 1
poz = Minge()
elif poz.x == 100 and poz.y >= 20 and poz.y <= 30:
self.nr_gol_e1 += 1
elif (poz.x == 0 and poz.y >= 0 and poz.y < 20) or (poz.x == 0 and poz.y > 30 and poz.y < 50):
self.nr_corner_e2 += 1
elif (poz.x == 100 and poz.y >= 0 and poz.y < 20) or (poz.x == 0 and poz.y > 30 and poz.y < 50):
self.nr_corner_e1 += 1
elif (poz.x >= 0 and poz.x <= 100 and poz.y == 0) or (poz.x >= 0 and poz.x <= 100 and poz.y == 50):
self.nr_out_e1 += 1
print(self)
x = Meci()
x.simulare()
| true |