blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
911ee00602ee009a5e848976af8fc8d642d673c0
|
Python
|
menhuan/notes
|
/code/python/python_study/six/iterated.py
|
UTF-8
| 159
| 3.171875
| 3
|
[
"Apache-2.0"
] |
permissive
|
result = map(lambda x:x*2,[1,2,3,4,5])
print(result)
print(list(result))
result = map(lambda x,y:x*y,[1,2,3,4,5],[2,3,4,5])
print(result)
print(list(result))
| true
|
ed7a3213009cb968346bbcefced23cfeacb92aef
|
Python
|
ahua010/Restrooms-Finder
|
/Google.py
|
UTF-8
| 3,118
| 2.796875
| 3
|
[] |
no_license
|
import urllib, urllib2, webbrowser, json
import os
import logging
from _dbus_bindings import String
### Utility functions you may want to use
def pretty(obj):
return json.dumps(obj, sort_keys=True, indent=2)
def safeGet(url):
try:
return urllib2.urlopen(url)
except urllib2.HTTPError, e:
print "The server couldn't fulfill the request."
print "Error code: ", e.code
except urllib2.URLError, e:
print "We failed to reach a server"
print "Reason: ", e.reason
return None
#### Main Assignment ##############
## Don't forget, you need to get your own api_key from Flickr, following the
#procedure in session 10 slides. Put it in the file flickr_key.py
# Then, UNCOMMENT the api_key line AND the params['api_key'] line in the function below.
def googleREST(address = '1600 Amphitheatre Parkway, Mountain View, CA'):
params = {}
api_key = 'AIzaSyC5GSGKfO66jFfdjBVE-QNd0c8tKYPBMQU'
params['address'] = address
params['key']=api_key
baseurl = 'https://maps.googleapis.com/maps/api/geocode/json?'
url = baseurl+urllib.urlencode(params)
print url
return safeGet(url)
#print googleREST()
print '\n=========== Break 1 ===========\n'
def searchGeocode(address = '1600 Amphitheatre Parkway, Mountain View, CA'):
result = googleREST(address)
jsresult = result.read()
dict = json.loads(jsresult)
print pretty(dict)
if dict['status'] == 'OK':
lat = dict['results'][0]['geometry']['location']['lat']
long = dict['results'][0]['geometry']['location']['lng']
location = {'lat':lat,'long':long}
return location
return dict['status']
# print pretty(searchGeocode("nonexistent address"))
result = searchGeocode('11657 14th Ave SW, Burien, WA')
if type(result) is not dict:
print "can't find that address"
else:
print "result is: " + str(type(result))
print pretty(result)
print '\n========== Break 2 =============\n'
def refugeREST(location_dict, ada='false'):
params={}
params['lat'] = location_dict['lat']
params['lng'] = location_dict['long']
params['ada'] = ada
baseurl = 'http://www.refugerestrooms.org:80/api/v1/restrooms/by_location.json?'
url = baseurl + urllib.urlencode(params)
print url
return safeGet(url)
location_dict = searchGeocode()
# print refugeREST(location_dict)
print '\n========== Break 3 ============\n'
def searchRefuge():
result = refugeREST(location_dict, ada='true')
jresult = result.read()
wc_list = json.loads(jresult)
return wc_list
wc_list = searchRefuge()
print pretty(wc_list)
print len(wc_list)
#filter restroom within 5 miles
short_list = []
for wc in wc_list:
if wc['distance'] < 5:
short_list.append(wc)
print pretty(short_list)
print "there are " + str(len(short_list)) + " restrooms in 5 miles radius of your search"
def writeFile():
f = open('myRestrooms.csv', 'w')
f.write ('name, latitude, longitude, directions\n')
rest = searchRefuge()
for rs in rest:
f.write('"%s","%s",%s,"%s"\n'%(rs['name'], rs['latitude'], rs['longitude'], rs['directions']))
f.close()
| true
|
49a3c0f1764a014a067521c74d71a27b492123dd
|
Python
|
Connor-Harkness/Kiara
|
/cogs/selfmanage.py
|
UTF-8
| 8,351
| 2.53125
| 3
|
[] |
no_license
|
import asyncio
import traceback
import discord
from discord.ext import commands
from cogs.utils.cooldowns import basic_cooldown
def get_color_role(member):
for role in member.roles:
if role.color == member.color:
return role
GUILD_ID = 215424443005009920
base_colors = [
("Red", 424579184216506368, 30),
("Yellow", 424579315066208276, 30),
("Green", 424579385983762432, 30),
("Orange", 424579446578872332, 30),
("Cyan", 424579523363733507, 30),
("Blue", 424579641802752000, 30),
("Purple", 424579707573633024, 30),
("Pink", 424579770240466951, 30),
("Charcoal", 424579833994149888, 30),
("Light Grey", 488187345565122565, 30),
]
class Selfmanage:
"""A cog for managing yourself"""
all_roles = [
353019520983629824, 347689132908085248, 371251751564869632,
389376686745059329, 549542288020471808
]
legal = "Welcome to Waifu Worshipping, the home of all your lewd socialising needs! " \
"Please make sure to read over #information and #rules to learn the essentials.\n" \
"Also understand that due to the content of the server, all users are required to be over the age of 18.\n"\
"By continuing with these questions, you confirm that you are of legal age.\n\n" \
"**When you're ready to receive your roles, please reply with the following: **`begin`"
questions = [
('Are you happy to receive messages from other users in the server?', 353019520983629824),
('Are you interested in being informed about all future server-wide events? (Movie nights, gaming events, and other fun activities)', 347689132908085248),
('Are you interested in seeing NSFW content?', 371251751564869632),
('Are you interested in roleplay channels?', 389376686745059329),
("Are you interested in the 'Weekly Waifu' channels? These are channels dedicated to posting images of the chosen waifu of the week, who is used for the server icon.", 549542288020471808)
]
def __init__(self, bot):
self.bot = bot
self.active_intros = []
self.profiles = bot.get_cog("Economy")
async def on_member_join(self, member):
if member.guild.id == GUILD_ID:
try:
msg = await self.bot.wait_for('message', check=lambda m: m.author.id == member.id, timeout=300)
if 'intro' in msg.content:
return
except asyncio.TimeoutError:
pass
await self.questionare(member.guild, member)
@commands.command()
async def intro(self, ctx):
if ctx.author.id in self.active_intros:
return await ctx.send("You're already doing the intro.")
guild = self.bot.get_guild(GUILD_ID) or ctx.guild
if not guild:
return
if ctx.guild:
await ctx.send(f'{ctx.author.mention} I sent you a DM!')
await self.questionare(guild, guild.get_member(ctx.author.id))
async def questionare(self, guild, member):
if member.id in self.active_intros:
return
else:
try:
await member.send(self.legal)
except discord.errors.Forbidden:
return
self.active_intros += [member.id]
roles_to_add = []
fresh = discord.utils.get(member.roles, id=373122164544765953) is None
try:
await self.bot.wait_for('message', check=lambda m: m.content.lower() == 'begin' and m.author == member, timeout=300)
for question, role in self.questions:
if await self.ask_question(member, question):
roles_to_add.append(discord.utils.get(guild.roles, id=role))
# If the user hasn't claimed a free color
if not await self.bot.redis.exists(f"claimedcolor:{member.id}"):
if await self.ask_question(member, 'Would you like a colored name for on the server?'):
owned = [r[0] for r in await self.bot.db.fetch(f'SELECT color FROM colors WHERE user_id={member.id}')]
await member.send("Which of the following colors would you like?\n"
f"{', '.join([i[0] for i in base_colors])} or `none`")
c = await self.bot.wait_for('message', check=lambda m: m.author.id == member.id and m.guild is None,
timeout=120)
for name, color_code, price in base_colors:
if c.content.lower() == name.lower() and color_code not in owned:
await self.bot.db.execute(f"INSERT INTO colors (user_id, color) VALUES ({member.id}, {color_code})")
await self.bot.redis.set(f"claimedcolor:{member.id}", f"{name}")
roles_to_add.append(discord.utils.get(guild.roles, id=color_code))
break
except asyncio.TimeoutError:
try:
await member.send('Sorry, you took too long to answer. Use `~intro` if you want to start over.')
except discord.errors.Forbidden:
pass
self.active_intros.remove(member.id)
else:
try:
roles_to_add.append(discord.utils.get(guild.roles, id=373122164544765953))
await member.send("Please give me a few seconds to finalize everything.")
await member.remove_roles(*[discord.utils.get(guild.roles, id=x) for x in self.all_roles])
await member.add_roles(*roles_to_add)
await member.send('Thank you for answering, the appropriate roles have been assigned to you! If there are any issues, please contact a staff member and they will happily assist you.')
if fresh:
monitorlog = self.bot.get_cog('Monitor')
await monitorlog.post_member_log(member)
except Exception as e:
traceback.print_tb(e.__traceback__)
self.active_intros.remove(member.id)
async def ask_question(self, user, question):
def check(m):
return isinstance(m.channel, discord.DMChannel) and m.author == user and is_answer(m.content)
def is_answer(c):
return 'y' in c.lower() or 'n' in c.lower()
await user.send(question+' `yes / no`')
m = await self.bot.wait_for('message', check=check, timeout=120)
if 'y' in m.content.lower():
await m.add_reaction('✅')
return True
else:
return False
@commands.command(aliases=['tributes'], hidden=True)
@commands.guild_only()
@basic_cooldown(60)
async def tribute(self, ctx):
rolename = "Tributes"
if discord.utils.get(ctx.author.roles, name=rolename) is None:
await ctx.author.add_roles(discord.utils.get(ctx.guild.roles, name=rolename))
await ctx.send(f'I gave you the {rolename} role.')
else:
await ctx.author.remove_roles(discord.utils.get(ctx.guild.roles, name=rolename))
await ctx.send(f'I removed your {rolename} role.')
@commands.command(hidden=True)
@commands.guild_only()
@basic_cooldown(60)
async def pokemon(self, ctx):
rolename = "Pokemon"
if discord.utils.get(ctx.author.roles, name=rolename) is None:
await ctx.author.add_roles(discord.utils.get(ctx.guild.roles, name=rolename))
await ctx.send(f'I gave you the {rolename} role.')
else:
await ctx.author.remove_roles(discord.utils.get(ctx.guild.roles, name=rolename))
await ctx.send(f'I removed your {rolename} role.')
@commands.command(hidden=True)
@commands.guild_only()
@basic_cooldown(60)
async def waifu(self, ctx):
rolename = "Waifu"
if discord.utils.get(ctx.author.roles, name=rolename) is None:
await ctx.author.add_roles(discord.utils.get(ctx.guild.roles, name=rolename))
await ctx.send(f'I gave you the {rolename} role.')
else:
await ctx.author.remove_roles(discord.utils.get(ctx.guild.roles, name=rolename))
await ctx.send(f'I removed your {rolename} role.')
def setup(bot):
bot.add_cog(Selfmanage(bot))
| true
|
bdef305829ed5339b1a78d4f649b3d773527072f
|
Python
|
nabazar/obstacle-avoidance
|
/depth.py
|
UTF-8
| 1,341
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
import cv2
import numpy as np
import math
from threading import Thread
from time import time
from utils import debug
class DepthCalcThread(Thread):
def __init__(self, matches, name, config):
super(DepthCalcThread, self).__init__(name=name)
self.pts_1 = matches['pts_1']
self.pts_2 = matches['pts_2']
self.config = config
self.results = []
def run(self):
distances = []
length = len(self.pts_1)
start_time = time()
for i in range(0, length):
x_1 = self.pts_1[i][0]
y_1 = self.pts_1[i][1]
x_2 = self.pts_2[i][0]
y_2 = self.pts_2[i][1]
distance = math.sqrt((x_1 - x_2)**2 + (y_1 - y_2)**2)
distances.append(distance)
debug("Depth computation time: {} seconds." . format(time() - start_time))
if self.config.get('normalize_points'):
distances = cv2.normalize(np.array(distances), 0, 255, norm_type=cv2.NORM_MINMAX)
for i in range(0, length):
self.results.append((int(self.pts_2[i][0]), int(self.pts_2[i][1]), round(distances[i], 2)))
if self.config.get('sort_points'):
self.results.sort(key=lambda x: x[2])
def join(self, timeout=None):
super(DepthCalcThread, self).join(timeout)
return self.results
| true
|
073f46fe685389a314c03dbf87e3f9f64ecf0673
|
Python
|
oskin1/torch_practice
|
/torch_grad.py
|
UTF-8
| 128
| 2.53125
| 3
|
[] |
no_license
|
import torch
x = torch.ones(2, 2, requires_grad=True)
y = x + 2
z = y ** 2 * 3
out = z.mean()
out.backward()
print(x.grad)
| true
|
bf16942c3068d69d05a1e77fbb9499f4c63f95d9
|
Python
|
hyejinHong0602/BOJ
|
/silver/[WEEK8] 1475 - 방 번호.PY
|
UTF-8
| 335
| 3.5625
| 4
|
[] |
no_license
|
n = input()
sixnine=0
others=0
for i in range(len(n)):
if n[i]=='6' or n[i]=='9':
sixnine+=1
else:
if others < n.count(n[i]):
others = n.count(n[i])
if sixnine%2==1:
sixnine=sixnine//2+1
elif sixnine%2==0:
sixnine=sixnine//2
if sixnine < others:
print(others)
else:
print(sixnine)
| true
|
df39c041d79fd25bc6c06018ffde7cf3a0803a1f
|
Python
|
AnthonyHadfield/Neural-Networks
|
/reconstitute_data.py
|
UTF-8
| 663
| 3.421875
| 3
|
[] |
no_license
|
import numpy as np
from decimal import *
data = np.array([[1, 5], [2, 6], [3, 7], [4, 8], [1.1, 5.5], [2.2, 6.6], [3.3, 7.7], [4.4, 8.8]], dtype=float)
def Reconstitute_DataSets():
X = [];
y = []
print('')
for i in range(0, 8):
index_0 = data[i, 0]
index_1 = data[i, 1]
X.append(index_0)
X = [float(Decimal("%.2f" % e)) for e in X]
y.append(index_1)
y = [float(Decimal("%.2f" % e)) for e in y]
X = [X]
y = [y]
print('New expanded X np.arrray data =')
print(X)
print('')
print('New expanded y np.arrray data =')
print(y)
print(Reconstitute_DataSets())
| true
|
f494000e5f8df0a19dd379f77613cd085f859197
|
Python
|
antoinemadec/vim-verilog-instance
|
/plugin/verilog_instance.py
|
UTF-8
| 4,394
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
"""this script is a basic script for doing verilog editing
it parses the variables from stdin and generate a verilog name instantiation
of the variables"""
import re
import sys
skip_last_coma = 0
keep_comment = 1
keep_empty_line = 1
if len(sys.argv) > 1:
skip_last_coma = int(sys.argv[1])
if len(sys.argv) > 2:
keep_comment = int(sys.argv[2])
keywords = []
keywords.extend(["input", "output", "inout", "ref", "parameter", "localparam"])
keywords.extend(["reg", "logic", "wire", "bit", "integer", "int", "string", "type"])
keywords.extend(["const", "unsigned"])
patterns = []
patterns.append(re.compile(r'\[[^\[\]]*\]')) # port size, array size
patterns.append(re.compile(r'=.*')) # assignment
patterns.append(re.compile(r'//.*') )
patterns.append(re.compile(r'\w+\.\w+')) # interfaces with modport
for kw in keywords: # match keywords
patterns.append(re.compile("\\b%s\\b" % kw))
pattern_empty_line = re.compile(r'^\s*$')
pattern_open_comment = re.compile(r'/\*.*')
pattern_close_comment = re.compile(r'.*\*/')
pattern_open_to_close_comment = re.compile(r'/\*.*\*/')
pattern_punctuation = re.compile(r'[,;]')
pattern_two_words_no_coma = re.compile(r'^\s*(\w+)\s+(\w+.*)')
pattern_spaces = re.compile(r'\s+')
pattern_inline_comment_kept = re.compile(r'.*\w+.*(//.*)') # comment in port define
pattern_comment_kept = re.compile(r'\s*(//.*)') # one line comment
ports = []
ports_comments = {} # save comment for every port
contents = [] # save ports and single line comments
wait_to_close_comment = 0
indent_len = -1
for line in sys.stdin:
# get indentation length from 1st non empty line
if indent_len == -1 and not(pattern_empty_line.match(line)):
indent_len = len(re.match(r'^\s*', line).group(0))
# handle empty line
if pattern_empty_line.match(line) is not None:
contents.append('')
# handle comments
if wait_to_close_comment:
if pattern_close_comment.search(line):
line = pattern_close_comment.sub(' ', line)
wait_to_close_comment = 0
else:
continue
if pattern_open_comment.search(line):
if pattern_close_comment.search(line):
line = pattern_open_to_close_comment.sub(' ', line)
else:
wait_to_close_comment = 1
continue
# handle port comment
port_comment = pattern_inline_comment_kept.match(line)
if port_comment is not None:
port_comment = port_comment.group(1)
# handle single line comment
line_comment = pattern_comment_kept.match(line)
if line_comment is not None:
line_comment = line_comment.group(1)
# handle all other patterns
for pattern in patterns:
line = pattern.sub(' ', line)
# handle typedef, class and interfaces
line = pattern_two_words_no_coma.sub('\\2', line)
line = pattern_punctuation.sub(' ', line)
line = pattern_spaces.sub(' ', line)
# finally, get port names
line = line.strip()
if line != "":
port_names = line.split(' ')
ports.extend(port_names)
contents.extend(port_names)
for port in port_names:
ports_comments[port] = port_comment
else:
# add single line comment to port
if line_comment is not None:
contents.append(line_comment)
ports_nb = len(ports)
i = 0
if ports_nb > 0:
max_str_len = len(max(ports, key=len))
indent_str = " " * indent_len
for content in contents:
if len(content) > 0:
if content[:2] == "//":
if keep_comment == 1:
print(f'{indent_str}{content}')
continue
else:
# empty line
if keep_empty_line == 1:
print('')
continue
port = content
skip_coma = skip_last_coma and i == (ports_nb - 1)
space_str = " " * (max_str_len - len(port))
output_line_port = "%s.%s%s (%s%s)%s" % (
indent_str, port, space_str, port, space_str, (",", "")[skip_coma])
if ports_comments.get(port) is not None and keep_comment == 1:
# add port comment
output_line = f"{output_line_port} {ports_comments.get(port)}"
else:
output_line = output_line_port
print(output_line)
i = i + 1
| true
|
5020e492327f41d892ea7ccf03ccc19aef5b928d
|
Python
|
Introspectibles/tobe-misc
|
/processing/python_sinus_generator.py
|
UTF-8
| 3,306
| 2.703125
| 3
|
[] |
no_license
|
import numpy
# Sinus generator from OpenViBE tutorial, here we can set the frequency of the mother wave (1st channel), the frequency of others will be a multiple (by 2, 3, and so on).
# values between 0 and 1
class MyOVBox(OVBox):
def __init__(self):
OVBox.__init__(self)
self.channelCount = 0
self.samplingFrequency = 0
self.epochSampleCount = 0
self.startTime = 0.
self.endTime = 0.
self.dimensionSizes = list()
self.dimensionLabels = list()
self.timeBuffer = list()
self.signalBuffer = None
self.signalHeader = None
# this time we also re-define the initialize method to directly prepare the header and the first data chunk
def initialize(self):
# settings are retrieved in the dictionary
self.channelCount = int(self.setting['Channel count'])
self.samplingFrequency = int(self.setting['Sampling frequency'])
self.epochSampleCount = int(self.setting['Generated epoch sample count'])
self.sinusFrequency = float(self.setting['Sinus Frequency'])
#creation of the signal header
for i in range(self.channelCount):
self.dimensionLabels.append( 'Sinus'+str(i) )
self.dimensionLabels += self.epochSampleCount*['']
self.dimensionSizes = [self.channelCount, self.epochSampleCount]
self.signalHeader = OVSignalHeader(0., 0., self.dimensionSizes, self.dimensionLabels, self.samplingFrequency)
self.output[0].append(self.signalHeader)
#creation of the first signal chunk
self.endTime = 1.*self.epochSampleCount/self.samplingFrequency
self.signalBuffer = numpy.zeros((self.channelCount, self.epochSampleCount))
self.updateTimeBuffer()
self.updateSignalBuffer()
def updateStartTime(self):
self.startTime += 1.*self.epochSampleCount/self.samplingFrequency
def updateEndTime(self):
self.endTime = float(self.startTime + 1.*self.epochSampleCount/self.samplingFrequency)
def updateTimeBuffer(self):
self.timeBuffer = numpy.arange(self.startTime, self.endTime, 1./self.samplingFrequency)
# crop number of chunks in case it is not a multiple of frequency
self.timeBuffer = self.timeBuffer[0:self.epochSampleCount]
def updateSignalBuffer(self):
for rowIndex, row in enumerate(self.signalBuffer):
self.signalBuffer[rowIndex,:] = numpy.sin( 2.*numpy.pi*(rowIndex+1.)*self.sinusFrequency*self.timeBuffer )/2 + 0.5
def sendSignalBufferToOpenvibe(self):
start = self.timeBuffer[0]
end = self.timeBuffer[-1] + 1./self.samplingFrequency
bufferElements = self.signalBuffer.reshape(self.channelCount*self.epochSampleCount).tolist()
self.output[0].append( OVSignalBuffer(start, end, bufferElements) )
# the process is straightforward
def process(self):
start = self.timeBuffer[0]
end = self.timeBuffer[-1]
if self.getCurrentTime() >= end:
self.sendSignalBufferToOpenvibe()
self.updateStartTime()
self.updateEndTime()
self.updateTimeBuffer()
self.updateSignalBuffer()
# this time we also re-define the uninitialize method to output the end chunk.
def uninitialize(self):
end = self.timeBuffer[-1]
self.output[0].append(OVSignalEnd(end, end))
box = MyOVBox()
| true
|
5f8629edfadd12f25f9f6dc8f2706cfb9ab3a4d6
|
Python
|
LugonesM/FisicaPy
|
/FuerzayPresion/volumenHierroVsCorcho.py
|
UTF-8
| 1,816
| 4.21875
| 4
|
[] |
no_license
|
import sys
import math
#Ejercicio del libro:
#¿Cuantos centimetros cubicos de corcho pesan lo mismo que 200cm³ de hierro?
#peso especifico del hierro es 7,85 g/cm³
#peso especifico corcho 0,22 g/cm³
PEH = 7.85 #peso especifico del hierro en g/cm³
PEC = 0.22 #peso especifico del corcho en g/cm³
def check( value ) :
"""Chequea si puede convertir las cadena a float:
Si puede la convierte y retorna eso,
Si no puede cierra el programa con un mensaje de: pone solo numeros
"""
try:
float(value)
except ValueError:
print( "pone solo numeros")
sys.exit()
return
def strToFloat( cadena ):
"""Toma un avariable, si es de tipo str reemplaza sus comas, si las tiene, por puntos.
Chequea si puede convertir las cadena a float( mediante funcion check) :
Si puede la convierte y retorna eso,
Si no puede cierra el programa con un mensaje
"""
if type(cadena) == str:
cadena = cadena.replace(",",".") #encuentra las comas en el str y las reemplaza por un punto
if check(cadena) != 1 :
cadena=float(cadena)
return cadena
def calculoPeso(vol, pE ):
"""calcula el peso en gramos de un material tomando su volumen (cm³) y peso especifico (g/cm³)"""
return vol * pE
def calculoVol(peso , pE ):
"""calcula el volumen en cm³ de un material tomando su peso en gramos y su peso especifico (g/cm³)"""
return peso / pE
if __name__ == '__main__':
print("¿Cuanto volumen de corcho hay que juntar para igualar el peso de tanto volumen de hierro? (poner decimales detras de un punto)")
volH = input("volumen de hierro en cm³ : ")
volH = strToFloat(volH)
pesoH = calculoPeso(volH , PEH )
volC = calculoVol(pesoH , PEC )
print("El volumen de corcho tendria que ser igual a : ")
print(volC, "cm³")
sys.exit()
| true
|
b8ae4004e78674f63228da4a5d3f6a60ea478eab
|
Python
|
KarchinLab/CHASMplus
|
/scripts/snvbox/update_uniprot_ppi.py
|
UTF-8
| 3,626
| 2.671875
| 3
|
[] |
no_license
|
"""
File: update_exon_feat.py
Author: Collin Tokheim
Email: ctokheim@jhu.edu
Github: ctokheim
Description: Add ppi feature to uniprot_features table
"""
import MySQLdb
import csv
import argparse
def parse_arguments():
info = 'Add uniprot features to Exon_Features table'
parser = argparse.ArgumentParser(description=info)
parser.add_argument('-i', '--input',
type=str, required=True,
help='Text file containing uniprot density feature')
parser.add_argument('-mh', '--mysql-host',
type=str,
default='karchin-db01.icm.jhu.edu',
help='Host name for mysql')
parser.add_argument('-mdb', '--mysql-db',
type=str,
default='SNVBox_dev_sandbox',
help='Database name for snvbox')
parser.add_argument('--mysql-user',
type=str, required=True,
help='MySQL user name')
parser.add_argument('--mysql-passwd',
type=str, required=True,
help='MySQL password')
args = parser.parse_args()
return vars(args)
def main(opts):
# connect to mysql
db = MySQLdb.connect(host=opts['mysql_host'],
user=opts['mysql_user'],
passwd=opts['mysql_passwd'],
db=opts['mysql_db'])
cursor = db.cursor()
with open(opts['input']) as handle:
myreader = csv.reader(handle, delimiter='\t')
header = next(myreader)
uniprot_ix, pos_ix, count_ix = header.index('uniprot_id'), header.index('residue'), header.index('count')
# run mysql command to reset to zero
myquery = (
'UPDATE Uniprot_features '
' SET insider_ppi=0 '
)
cursor.execute(myquery)
# commit changes
db.commit()
# iterate over each line of the file
for line in myreader:
# get the relevant columns
uniprot, pos, count = line[uniprot_ix], line[pos_ix], line[count_ix]
# run mysql command
myquery = (
'UPDATE Uniprot_features '
' SET insider_ppi={count} '
'WHERE Acc=\'{uniprot_id}\' AND Pos={pos}'
).format(count=count, uniprot_id=uniprot, pos=int(pos))
cursor.execute(myquery)
# commit changes
db.commit()
num_rows_affected = cursor.rowcount
if num_rows_affected == 0:
colnames = (
'Acc', 'Pos', 'BINDING', 'ACT_SITE', 'SITE',
'LIPID', 'METAL', 'CARBOHYD', 'DNA_BIND', 'NP_BIND',
'CA_BIND', 'DISULFID', 'SE_CYS', 'MOD_RES', 'PROPEP',
'SIGNALP', 'TRANSMEM', 'COMPBIAS', 'REP', 'MOTIF',
'ZN_FING', 'REGIONS', 'PPI', 'RNABD', 'TF',
'LOC', 'MMBRBD', 'Chrom', 'PostModRec',
'PostModEnz', 'insider_ppi'
)
# run mysql command
myquery = (
'INSERT INTO Uniprot_features ({colnames}) '
'VALUES (\'{uniprot_id}\', {pos}, {zeros}, {count})'
).format(colnames=', '.join(colnames), uniprot_id=uniprot,
pos=int(pos), zeros=', '.join(['0']*(len(colnames)-3)),
count=count)
cursor.execute(myquery)
# commit changes
db.commit()
if __name__ == '__main__':
opts = parse_arguments()
main(opts)
| true
|
1220d06e7db3e96590c7f82832dd2f98cffd477e
|
Python
|
sheena-fernandez/sdr
|
/sortin/merge.py
|
UTF-8
| 672
| 3.390625
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import random
n = random.randint(5, 20)
lista = [ random.randint(0, n * 2) for x in range(0, n) ]
print(lista)
def mergesort(lista):
if len(lista) <= 1:
return lista
h = len(lista) // 2
# Ordena cada metade
a = mergesort(lista[0:h])
b = mergesort(lista[h:])
# Mescla as metades em ordem
i = 0
j = 0
nova = []
while i < len(a) and j < len(b):
if a[i] < b[j]:
nova.append(a[i])
i += 1
else:
nova.append(b[j])
j += 1
while i < len(a):
nova.append(a[i])
i += 1
while j < len(b):
nova.append(b[j])
j += 1
return nova
lista = mergesort(lista)
print(lista)
if lista != sorted(lista):
raise Exception("foo")
| true
|
cc529ce9a5c1acde4d34e05eff54c2d29fa0d8dd
|
Python
|
Ufuk-a/Space-invaders
|
/space_invaders/lib/human_ship.py
|
UTF-8
| 1,731
| 3.140625
| 3
|
[] |
no_license
|
from space_invaders.lib.bullet import Bullet
from space_invaders.res.glob import *
player_image = pygame.image.load("space_invaders\\res\\images\\human_ship.png")
class HumanShip(pygame.sprite.Sprite):
def __init__(self, hp):
pygame.sprite.Sprite.__init__(self)
self.image = player_image
self.rect = self.image.get_rect()
self.x = 300
self.rect.center = (self.x + 32, 500)
self.hp = hp
self.score = 0
def update(self, bullets, screen):
key = pygame.key.get_pressed()
#health logic
if self.hp < 0:
self.hp = 0
if self.hp == 0:
end_score = FONT.render(f"You died! Your score was: {self.score}", False, colors.white)
screen.blit(end_score,(200,300))
if key[pygame.K_SPACE]:
pygame.display.quit()
#Movement
speed = 3
if self.hp != 0:
if key[pygame.K_LEFT]:
self.x -= speed
self.rect.x = self.x
elif key[pygame.K_RIGHT]:
self.x += speed
self.rect.x = self.x
#Boundary
if self.rect.left < 0:
self.rect.left = 0
self.x = 0
elif self.rect.right > 600:
self.rect.right = 600
self.x = 536
#shoot
if key[pygame.K_SPACE]:
shoot = Bullet(self.x + 32, 468, "up")
bullets.add(shoot)
#score
score_text = FONT.render(f"Score: {self.score}", False, colors.white)
screen.blit(score_text,(0,0))
| true
|
c7c619f402ad2ec9380fcd0a040bb04d8a0b7bbd
|
Python
|
TannhauserGate42/pandas
|
/pandas/core/layout.py
|
UTF-8
| 5,311
| 3.671875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
"""
Implements ArrayLayout copy factory to change memory layout
of `numpy.ndarrays`.
Depending on the use case, operations on DataFrames can be much
faster if the appropriate memory layout is set and preserved.
The implementation allows for changing the desired layout. Changes apply when
copies or new objects are created, as for example, when slicing or aggregating
via groupby ...
This implementation tries to solve the issue raised on GitHub
https://github.com/pandas-dev/pandas/issues/26502
"""
import numpy as np
class ArrayLayout(object):
"""
Array layout management for numpy.ndarrays.
Singleton implementation.
Example:
>>> from pandas.core.layout import array_layout
>>> array_layout.order = 'K' #
>>> # K ... keep array layout from input
>>> # C ... set to c-contiguous / column order
>>> # F ... set to f-contiguous / row order
>>> array = array_layout.apply(array)
>>> array = array_layout.apply(array, 'C')
>>> array = array_layout.copy(array)
>>> array = array_layout.apply_on_transpose(array)
"""
_order = None
_instance = None
@property
def order(self):
"""
Return memory layout ordering.
:return: `str`
"""
if self.__class__._order is None:
raise AssertionError("Array layout order not set.")
return self.__class__._order
@order.setter
def order(self, order):
"""
Set memory layout order.
Allowed values are 'C', 'F', and 'K'. Raises AssertionError
when trying to set other values.
:param order: `str`
:return: `None`
"""
assert order in ['C', 'F', 'K'], "Only 'C', 'F' and 'K' supported."
self.__class__._order = order
def __new__(cls):
"""
Create only one instance throughout the lifetime of this process.
:return: `ArrayLayout` instance as singleton
"""
return cls._instance if cls._instance is not None \
else super(ArrayLayout, cls).__new__(ArrayLayout)
def __init__(self):
"""
Kind of a singleton implementation for the memory layout order in use.
Works together with the __new__ definition.
Returns initialized singleton instance of ArrayLayout
:return: `ArrayLayout` instance, singleton, the one and only.
"""
if self._instance is None:
self._instance = self
@staticmethod
def get_from(array):
"""
Get memory layout from array
Possible values:
'C' ... only C-contiguous or column order
'F' ... only F-contiguous or row order
'O' ... other: both, C- and F-contiguous or both
not C- or F-contiguous (as on empty arrays).
:param array: `numpy.ndarray`
:return: `str`
"""
if array.flags.c_contiguous == array.flags.f_contiguous:
return 'O'
return {True: 'C', False: 'F'}[array.flags.c_contiguous]
def apply(self, array, order=None):
"""
Apply the order set or the order given as input on the array
given as input.
Possible values:
'C' ... apply C-contiguous layout or column order
'F' ... apply F-contiguous layout or row order
'K' ... keep the given layout
:param array: `numpy.ndarray`
:param order: `str`
:return: `np.ndarray`
"""
order = self.__class__._order if order is None else order
if order == 'K':
return array
array_order = ArrayLayout.get_from(array)
if array_order == order:
return array
return np.reshape(np.ravel(array), array.shape, order=order)
def copy(self, array, order=None):
"""
Return a copy of the input array with the memory layout set.
Layout set:
'C' ... return C-contiguous copy
'F' ... return F-contiguous copy
'K' ... return copy with same layout as
given by the input array.
:param array: `np.ndarray`
:return: `np.ndarray`
"""
order = order if order is not None else self.__class__._order
return array.copy(order=self.get_from(array)) if order == 'K' \
else array.copy(order=order)
def copy_transposed(self, array):
"""
Return a copy of the input array in order that its transpose
has the memory layout set.
Note: numpy simply changes the memory layout from row to column
order instead of reshuffling the data in memory.
Layout set:
'C' ... return F-contiguous copy
'F' ... return C-contiguous copy
'K' ... return copy with oposite (C versus F) layout as
given by the input array.
:param array: `np.ndarray`
:return: `np.ndarray`
:param array:
:return:
"""
if self.__class__._order == 'K':
return array.copy(
order={'C': 'C', 'F': 'F', 'O': None}[self.get_from(array)])
else:
return array.copy(
order={'C': 'F', 'F': 'C'}[self.__class__._order])
def __str__(self):
return str(self.__class__._order)
array_layout = ArrayLayout() # Singleton
| true
|
c9e9d8b1b05149e3cd3d392ec9b70e3c6183316b
|
Python
|
HansZimmer5000/LensComparison
|
/webcrawler/tests/crawledlenstestsuite.py
|
UTF-8
| 3,404
| 2.59375
| 3
|
[
"Apache-2.0"
] |
permissive
|
import unittest
from webcrawler.tests.testdata import generalexamples
from webcrawler.lenses.crawledlens import CrawledLens
class CrawledLensTestsuite(unittest.TestCase):
#\\\\\\\\\\\\
# setUp & tearDown
#////////////
def setUp(self):
self.__class__.CRAWLED_LENS1_LENS_DICT = generalexamples.TESTDATA_CRAWLED_LENS1_LENS_DICT
self.__class__.CRAWLED_LENS1_WITHOUT_MOUNT_AND_WEIGHT_LENS_DICT = generalexamples.TESTDATA_CRAWLED_LENS1_WITHOUT_MOUNT_AND_WEIGHT_LENS_DICT
self.__class__.CRAWLED_LENS1_WITH_OLD_MOUNT_LENS_DICT = generalexamples.TESTDATA_CRAWLED_LENS1_OLD_MOUNT_LENS_DICT
self.__class__.CRAWLED_LENS1_WITH_NEW_MOUNT_LENS_DICT = generalexamples.TESTDATA_CRAWLED_LENS1_NEW_MOUNT_LENS_DICT
self.__class__.CRAWLED_LENS2_LENS_DICT = generalexamples.TESTDATA_CRAWLED_LENS2_LENS_DICT
self.__class__.CRAWLED_LENS2_WITHOUT_SENSOR_LENS_DICT = generalexamples.TESTDATA_CRAWLED_LENS2_WITHOUT_SENSOR_LENS_DICT
self.__class__.CRAWLED_LENS3_LENS_DICT = generalexamples.TESTDATA_CRAWLED_LENS3_LENS_DICT
self.__class__.ALL_CRAWLED_LENSES_WITH_MISSING_INFO_LENS_DICTS = generalexamples.TESTDATA_ALL_CRAWLED_LENSES_WITH_MISSING_INFO_LENS_DICTS
self.__class__.ALL_CRAWLED_LENSES_WITH_FULL_INFO_LENS_DICTS = generalexamples.TESTDATA_ALL_CRAWLED_LENSES_WITH_FULL_INFO_LENS_DICTS
self.__class__.CRAWLED_LENS1 = CrawledLens(self.__class__.CRAWLED_LENS1_LENS_DICT)
self.__class__.CRAWLED_LENS1_WITHOUT_MOUNT_AND_WEIGHT = CrawledLens(self.__class__.CRAWLED_LENS1_WITHOUT_MOUNT_AND_WEIGHT_LENS_DICT)
self.__class__.CRAWLED_LENS1_WITH_OLD_MOUNT = CrawledLens(self.__class__.CRAWLED_LENS1_WITH_OLD_MOUNT_LENS_DICT)
self.__class__.CRAWLED_LENS2 = CrawledLens(self.__class__.CRAWLED_LENS2_LENS_DICT)
print("\n\nsetup Done\n" + self._testMethodName)
#End of setUp
def tearDown(self):
print("tearDown Done, next Line E(Error), F(Failure) or .(Passed)")
#End of tearDown
#\\\\\\\\\\\\
# Test cases
#////////////
def test_pos_lens_data_exists_update_mount_and_weight(self):
self.__class__.CRAWLED_LENS1_WITHOUT_MOUNT_AND_WEIGHT.update(self.__class__.CRAWLED_LENS1_LENS_DICT)
self.assertTrue(self.__class__.CRAWLED_LENS1_WITHOUT_MOUNT_AND_WEIGHT.equals(self.__class__.CRAWLED_LENS1))
def test_pos_lens_data_exists_update_sensor(self):
lens_to_be_updated = CrawledLens(self.__class__.CRAWLED_LENS2_WITHOUT_SENSOR_LENS_DICT.copy())
lens_to_be_updated.update(self.__class__.CRAWLED_LENS2_LENS_DICT)
self.assertTrue(lens_to_be_updated.equals(self.__class__.CRAWLED_LENS2))
def test_pos_lens_data_exists_update_but_nothing_new_to_add(self):
self.__class__.CRAWLED_LENS1_WITH_OLD_MOUNT.update(self.__class__.CRAWLED_LENS1_WITH_NEW_MOUNT_LENS_DICT)
self.assertTrue(self.__class__.CRAWLED_LENS1.equals(self.__class__.CRAWLED_LENS1_WITH_OLD_MOUNT))
def test_pos_lens_data_exists_update_but_no_new_mount_to_add(self):
self.__class__.CRAWLED_LENS1_WITH_OLD_MOUNT.update(self.__class__.CRAWLED_LENS1_WITH_NEW_MOUNT_LENS_DICT)
self.__class__.CRAWLED_LENS1_WITH_OLD_MOUNT.update(self.__class__.CRAWLED_LENS1_WITH_NEW_MOUNT_LENS_DICT)
self.assertTrue(self.__class__.CRAWLED_LENS1.equals(self.__class__.CRAWLED_LENS1_WITH_OLD_MOUNT))
| true
|
a9b8f73258619c3bcb61faa60c4691cf8c380b65
|
Python
|
morepath/morepath
|
/morepath/request.py
|
UTF-8
| 11,760
| 2.671875
| 3
|
[] |
permissive
|
"""Morepath request implementation.
Entirely documented in :class:`morepath.Request` and
:class:`morepath.Response` in the public API.
"""
from webob import BaseRequest, Response as BaseResponse
from dectate import Sentinel
from .reify import reify
from .traject import create_path, parse_path
from .error import LinkError
from .authentication import NO_IDENTITY
SAME_APP = Sentinel("SAME_APP")
class Request(BaseRequest):
"""Request.
Extends :class:`webob.request.BaseRequest`
"""
path_code_info = None
view_code_info = None
def __init__(self, environ, app, **kw):
super().__init__(environ, **kw)
# parse path, normalizing dots away in
# in case the client didn't do the normalization
path_info = self.path_info
segments = parse_path(path_info)
# optimization: only if the normalized path is different from the
# original path do we set it to the webob request, as this is
# relatively expensive. Webob updates the environ as well
new_path_info = create_path(segments)
if new_path_info != path_info:
self.path_info = new_path_info
# reverse to get unconsumed
segments.reverse()
self.unconsumed = segments
"""Stack of path segments that have not yet been consumed.
See :mod:`morepath.publish`.
"""
self._root_app = app
self.app = app
""":class:`morepath.App` instance currently handling request.
"""
self._after = []
self._link_prefix_cache = {}
def reset(self):
"""Reset request.
This resets the request back to the state it had when request
processing started. This is used by ``more.transaction`` when it
retries a transaction.
"""
self.make_body_seekable()
segments = parse_path(self.path_info)
segments.reverse()
self.unconsumed = segments
self.app = self._root_app
self._after = []
@reify
def identity(self):
"""Self-proclaimed identity of the user.
The identity is established using the identity policy. Normally
this would be an instance of :class:`morepath.Identity`.
If no identity is claimed or established, or if the identity
is not verified by the application, the identity is the the
special value :attr:`morepath.NO_IDENTITY`.
The identity can be used for authentication/authorization of
the user, using Morepath permission directives.
"""
result = self.app._identify(self)
if result is None or result is NO_IDENTITY:
return NO_IDENTITY
if not self.app._verify_identity(result):
return NO_IDENTITY
return result
def link_prefix(self, app=None):
"""Prefix to all links created by this request.
:param app: Optionally use the given app to create the link.
This leads to use of the link prefix configured for the given app.
This parameter is mainly used internally for link creation.
"""
app = app or self.app
cached = self._link_prefix_cache.get(app.__class__)
if cached is not None:
return cached
prefix = self._link_prefix_cache[app.__class__] = app._link_prefix(self)
return prefix
def view(self, obj, default=None, app=SAME_APP, **predicates):
"""Call view for model instance.
This does not render the view, but calls the appropriate
view function and returns its result.
:param obj: the model instance to call the view on.
:param default: default value if view is not found.
:param app: If set, change the application in which to look up
the view. By default the view is looked up for the current
application. The ``defer_links`` directive can be used to change
the default app for all instances of a particular class.
:param predicates: extra predicates to modify view
lookup, such as ``name`` and ``request_method``. The default
``name`` is empty, so the default view is looked up,
and the default ``request_method`` is ``GET``. If you introduce
your own predicates you can specify your own default.
"""
if app is None:
raise LinkError("Cannot view: app is None")
if app is SAME_APP:
app = self.app
predicates["model"] = obj.__class__
def find(app, obj):
return app.get_view.by_predicates(**predicates).component
view, app = app._follow_defers(find, obj)
if view is None:
return default
old_app = self.app
self.app = app
# need to use value as view is registered as a function, not
# as a wrapped method
result = view.func(obj, self)
self.app = old_app
return result
def link(self, obj, name="", default=None, app=SAME_APP):
"""Create a link (URL) to a view on a model instance.
The resulting link is prefixed by the link prefix. By default
this is the full URL based on the Host header.
You can configure the link prefix for an application using the
:meth:`morepath.App.link_prefix` directive.
If no link can be constructed for the model instance, a
:exc:`morepath.error.LinkError` is raised. ``None`` is treated
specially: if ``None`` is passed in the default value is
returned.
The :meth:`morepath.App.defer_links` or
:meth:`morepath.App.defer_class_links` directives can be used
to defer link generation for all instances of a particular
class (if this app doesn't handle them) to another app.
:param obj: the model instance to link to, or ``None``.
:param name: the name of the view to link to. If omitted, the
the default view is looked up.
:param default: if ``None`` is passed in, the default value is
returned. By default this is ``None``.
:param app: If set, change the application to which the
link is made. By default the link is made to an object
in the current application.
"""
if obj is None:
return default
if app is None:
raise LinkError("Cannot link: app is None")
if app is SAME_APP:
app = self.app
info, app = app._get_deferred_mounted_path(obj)
if info is None:
raise LinkError("Cannot link to: %r" % obj)
return info.url(self.link_prefix(app), name)
def class_link(self, model, variables=None, name="", app=SAME_APP):
"""Create a link (URL) to a view on a class.
Given a model class and a variables dictionary, create a link
based on the path registered for the class and interpolate the
variables.
If you have an instance of the model available you'd link to the
model instead, but in some cases it is expensive to instantiate
the model just to create a link. In this case `class_link` can be
used as an optimization.
The :meth:`morepath.App.defer_class_links` directive can be
used to defer link generation for a particular class (if this
app doesn't handle them) to another app.
Note that the :meth:`morepath.App.defer_links` directive has
**no** effect on ``class_link``, as it needs an instance of the
model to work, which is not available.
If no link can be constructed for the model class, a
:exc:`morepath.error.LinkError` is raised. This error is
also raised if you don't supply enough variables. Additional
variables not used in the path are interpreted as URL
parameters.
:param model: the model class to link to.
:param variables: a dictionary with as keys the variable names,
and as values the variable values. These are used to construct
the link URL. If omitted, the dictionary is treated as containing
no variables.
:param name: the name of the view to link to. If omitted, the
the default view is looked up.
:param app: If set, change the application to which the
link is made. By default the link is made to an object
in the current application.
"""
if variables is None:
variables = {}
if app is None:
raise LinkError("Cannot link: app is None")
if app is SAME_APP:
app = self.app
info = app._get_deferred_mounted_class_path(model, variables)
if info is None:
raise LinkError("Cannot link to class: %r" % model)
return info.url(self.link_prefix(), name)
def resolve_path(self, path, app=SAME_APP):
"""Resolve a path to a model instance.
The resulting object is a model instance, or ``None`` if the
path could not be resolved.
:param path: URL path to resolve.
:param app: If set, change the application in which the
path is resolved. By default the path is resolved in the
current application.
:return: instance or ``None`` if no path could be resolved.
"""
if app is None:
raise LinkError("Cannot path: app is None")
if app is SAME_APP:
app = self.app
request = Request(self.environ.copy(), app, path_info=path)
# try to resolve imports..
from .publish import resolve_model
return resolve_model(request)
def after(self, func):
"""Call a function with the response after a successful request.
A request is considered *successful* if the HTTP status is a 2XX or a
3XX code (e.g. 200 OK, 204 No Content, 302 Found).
In this case ``after`` *is* called.
A request is considered *unsuccessful* if the HTTP status lies outside
the 2XX-3XX range (e.g. 403 Forbidden, 404 Not Found,
500 Internal Server Error). Usually this happens if an exception
occurs. In this case ``after`` is *not* called.
Some exceptions indicate a successful request however and their
occurrence still leads to a call to ``after``. These exceptions
inherit from either :class:`webob.exc.HTTPOk` or
:class:`webob.exc.HTTPRedirection`.
You use `request.after` inside a view function definition.
It can be used explicitly::
@App.view(model=SomeModel)
def some_model_default(self, request):
def myfunc(response):
response.headers.add('blah', 'something')
request.after(my_func)
or as a decorator::
@App.view(model=SomeModel)
def some_model_default(self, request):
@request.after
def myfunc(response):
response.headers.add('blah', 'something')
:param func: callable that is called with response
:return: func argument, not wrapped
"""
self._after.append(func)
return func
def _run_after(self, response):
"""Run callbacks registered with :meth:`morepath.Request.after`."""
# if we don't have anything to run, don't even check status
if not self._after:
return
# run after only if it's not a 2XX or 3XX response
if response.status[0] not in ("2", "3"):
return
for after in self._after:
after(response)
def clear_after(self):
self._after = []
class Response(BaseResponse):
"""Response.
Extends :class:`webob.response.Response`.
"""
| true
|
8de5900f692194fe6f6741fb02cced1fe2243e21
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03046/s847893958.py
|
UTF-8
| 530
| 2.78125
| 3
|
[] |
no_license
|
def main():
m,k=map(int,input().split())
if 2**m<=k:
print(-1)
elif m==0:
if k==0:
print(0,0)
else:
print(-1)
elif m==1:
if k==0:
print(1,0,0,1)
else:
print(-1)
else:
ans=[]
for i in range(2**m-1,-1,-1):
if i!=k:
ans.append(i)
ans.append(k)
for i in range(2**m):
if i!=k:
ans.append(i)
ans.append(k)
print(*ans)
main()
| true
|
66e112ccaf17b8d3038b446bad54495a74a7301c
|
Python
|
Metallicow/wxPythonDemos
|
/SplashScreen.py
|
UTF-8
| 1,922
| 2.859375
| 3
|
[] |
no_license
|
#!/usr/bin/env python2.4
# I Always specify the python version in the #! line, it makes it much
# easier to have multiple versions on your system
import wxversion
wxversion.select("2.6")
## Note: it may well work with other versions, but it's been tested on 2.6.
import wx
import os, time
class MySplashScreen(wx.SplashScreen):
def __init__(self,imageFileName):
bmp = wx.Bitmap(imageFileName)
wx.SplashScreen.__init__(self,
bitmap = bmp,
splashStyle = wx.SPLASH_CENTRE_ON_SCREEN | wx.SPLASH_TIMEOUT,
milliseconds = 5000,
parent = None)
#self.Bind(wx.EVT_CLOSE, self.OnClose)
#self.fc = wx.FutureCall(2000, self.ShowMain)
def OnClose(self, evt):
print "OnClose Called"
# Make sure the default handler runs too so this window gets
# destroyed
#evt.Skip()
#self.Hide()
# if the timer is still running then go ahead and show the
# main frame now
#if self.fc.IsRunning():
# self.fc.Stop()
self.ShowMain()
def ShowMain(self):
print "ShowMain called"
frame = MainFrame(None, title="An Empty Frame")
frame.CenterOnScreen()
frame.Show()
#if self.fc.IsRunning():
# self.Raise()
class MainFrame(wx.Frame):
"""
this is an empty frame, just to have something to show
"""
pass
class App(wx.App):
def OnInit(self):
"""
This gets called when the App Starts up
"""
Splash = MySplashScreen(SplashImageFile)
frame = MainFrame(None, title = "Main Frame")
frame.Show()
#Splash.Show(True)
return True
if __name__ == "__main__":
SplashImageFile = "Images/cute_close_up.jpg"
app = App(0)
app.MainLoop()
| true
|
41502d04d0d2433a41ea2740f492d30882c52b12
|
Python
|
cybersaksham/Python-Tutorials
|
/24_recursion.py
|
UTF-8
| 795
| 4.6875
| 5
|
[] |
no_license
|
"""
Recursion is just calling a function in itself.
But if we not declare some base values to end the recursion, then it would calling the function infinite times &
will give error.
Recursion is not a good technique because it process in reverse manner hence line of code increase rapidly.
"""
def fac_itr(n): # This is for iteration i.e. looping
fac = 1
for i in range(n):
fac = fac * (i + 1)
return fac
def fac_rec(n): # This is for recursion
if n == 0 or n == 1:
return 1
elif n > 1:
return n * fac_rec(n - 1)
else:
print("Entered incorrect value")
number = int(input("Enter a number = "))
print("Factorial of", number, "using iteration is", fac_itr(number))
print("Factorial of", number, "using recursion is", fac_itr(number))
| true
|
7e601e8a69d95cef4ef233b44d04bf75b1e83e1c
|
Python
|
stjordanis/autogluon
|
/core/tests/unittests/scheduler/test_seq_scheduler.py
|
UTF-8
| 4,599
| 2.515625
| 3
|
[
"Apache-2.0"
] |
permissive
|
import pytest
from autogluon.common import space
from autogluon.core.scheduler.seq_scheduler import LocalSequentialScheduler
cls = LocalSequentialScheduler
def test_get_average_trial_time_():
running_time = cls.get_average_trial_time_(0, avg_trial_run_time=None, trial_start_time=100, time_end=102)
assert running_time == 2
running_time = cls.get_average_trial_time_(1, avg_trial_run_time=running_time, trial_start_time=110, time_end=114)
assert running_time == 3.0
running_time = cls.get_average_trial_time_(2, avg_trial_run_time=running_time, trial_start_time=120, time_end=126)
assert running_time == 4.0
def test_has_enough_time_for_trial__enough_time__no_avg_time():
# Enough time - no average time
assert cls.has_enough_time_for_trial_(time_out=10, time_start=100, trial_start_time=105, trial_end_time=106, avg_trial_run_time=None)
def test_has_enough_time_for_trial__enough_time__avg_time_allows_trials():
# Enough time - average time allows more trial
assert cls.has_enough_time_for_trial_(time_out=10, time_start=100, trial_start_time=105, trial_end_time=106, avg_trial_run_time=1)
def test_has_enough_time_for_trial__enough_time__avg_time_not_allows_trials():
# Enough time - average time does not allow more trial
assert not cls.has_enough_time_for_trial_(time_out=10, time_start=100, trial_start_time=105, trial_end_time=106, avg_trial_run_time=5)
def test_has_enough_time_for_trial__time_exceeded_no_avg_time():
# Time exceeded - no average time
assert not cls.has_enough_time_for_trial_(time_out=10, time_start=100, trial_start_time=105, trial_end_time=116, avg_trial_run_time=None)
def test_has_enough_time_for_trial__avg_time():
# Time exceeded - no average time
assert not cls.has_enough_time_for_trial_(time_out=10, time_start=100, trial_start_time=105, trial_end_time=116, avg_trial_run_time=0)
def test_has_enough_time_for_trial__enough_time__avg_time_not_allows_trials_by_fill_factor():
# Enough time - average time does not allow more trial
assert not cls.has_enough_time_for_trial_(time_out=10, time_start=100, trial_start_time=105, trial_end_time=106, avg_trial_run_time=1, fill_factor=5)
def test_LocalSequentialScheduler_no_criteria():
search_space = {"lr": space.Real(1e-2, 1e-1, log=True)}
def _train_fn_():
pass
with pytest.raises(AssertionError, match="Need stopping criterion: Either num_trials or time_out"):
LocalSequentialScheduler(train_fn=_train_fn_, search_space=search_space, reward_attr="reward_attr", resource={})
def test_search_space():
search_space = dict(
a=space.Real(1e-3, 1e-2, log=True),
b=space.Real(1e-3, 1e-2),
c=space.Int(1, 10),
d=space.Categorical("a", "b", "c", "d"),
e=space.Bool(),
)
def train_fn(args, reporter):
a, b, c, d, e = args["a"], args["b"], args["c"], args["d"], args["e"]
assert a <= 1e-2 and a >= 1e-3
assert b <= 1e-2 and b >= 1e-3
assert c <= 10 and c >= 1
assert d in ["a", "b", "c", "d"]
assert e in [True, False]
reporter(epoch=1, accuracy=0)
scheduler = LocalSequentialScheduler(
train_fn,
search_space=search_space,
resource={"num_cpus": "all", "num_gpus": 0},
num_trials=10,
reward_attr="accuracy",
time_attr="epoch",
checkpoint=None,
)
scheduler.run()
def test_scheduler_can_handle_failing_jobs():
trails_outcomes = []
best_result = [-1]
search_space = dict(a=space.Real(0, 1))
def train_fn(args, reporter):
test_should_fail = args["a"] > 0.7
trails_outcomes.append(test_should_fail)
if test_should_fail:
raise Exception("Failed Trial")
elif args["a"] > best_result[0]:
best_result[0] = args["a"]
reporter(epoch=1, accuracy=args["a"])
scheduler = LocalSequentialScheduler(
train_fn,
search_space=search_space,
resource={"num_cpus": "all", "num_gpus": 0},
num_trials=10,
reward_attr="accuracy",
time_attr="epoch",
checkpoint=None,
)
scheduler.run()
actual_runs = []
for trial in scheduler.training_history.values():
is_failed = False
for i in trial:
if "traceback" in i:
is_failed = True
break
actual_runs.append(is_failed)
assert trails_outcomes == actual_runs
assert scheduler.get_best_reward() == best_result[0]
assert scheduler.get_best_config() == {"a": best_result[0]}
| true
|
f2821f0ed33bdc448d0a7e46e6eec447f9eeab87
|
Python
|
benjamincorcoran/SASDocumentation
|
/SASDocumentation/SASObjects/SASDataObjectParser.py
|
UTF-8
| 1,768
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
import re
from .SASBaseObject import SASBaseObject
from .SASDataObject import SASDataObject
class SASDataObjectParser(SASBaseObject):
'''
SAS Data Object Parser Class
Factory for creating DataObjects from text string
'''
def __init__(self):
SASBaseObject.__init__(self)
def parseDataObjects(self, objectText, startLine=None, endLine=None):
'''
Takes object text and parses out Data Objects
Parameters:
objectText - Raw text with DataObject defined within
startLine (optional) - Starting line of specified text
endLine (optional) - Ending line of specified text
Returns:
list - List of validated SASDataObjects found in objectText
'''
rawObjectList = self.splitDataObjects(objectText)
objectList = []
for dataObject in rawObjectList:
library = dataObject[0]
dataset = re.findall(
r'([^(]+)[.]*', dataObject[1], self.regexFlags)
if len(dataset) == 0:
dataset=''
else:
dataset=dataset[0]
condition = re.findall(r'\((.*)\)', dataObject[1], self.regexFlags)
if len(library) > 0:
library = library
else:
library = None
if len(condition) > 0:
condition = condition[0]
else:
condition = None
if len(dataset) > 0:
objectList.append(
SASDataObject(
library,
dataset,
condition,
startLine,
endLine))
return objectList
| true
|
a2945a82c36cb5157a364979aba9d6671c9be41c
|
Python
|
daylightPL/Instrumenty_Finansowe
|
/Modelgui.py
|
UTF-8
| 3,858
| 3.484375
| 3
|
[] |
no_license
|
from sklearn.linear_model import LinearRegression
import numpy
import time
import pandas
import datetime
import dateutil.relativedelta
def Prediction (data_xy, date_for_prediction):
#Obrobka dat (Lista X)
train_list_x = data_xy[0]
train_list_y = data_xy[1]
for i in range(0, len(train_list_x)):
train_list_x[i] = int(time.mktime(time.strptime(train_list_x[i], '%Y-%m-%d')))
train_list_x = numpy.array(train_list_x).reshape(-1, 1)
#Obrobka wartosci (Lista y)
train_list_y = numpy.array(train_list_y)
#Uczenie Modelu
model = LinearRegression()
model.fit(train_list_x, train_list_y)
#Obrobka daty dla predykcji
date_for_prediction = int(time.mktime(time.strptime(date_for_prediction[0], '%Y-%m-%d')))
date_for_prediction = numpy.array(date_for_prediction).reshape(-1, 1)
#Predykcja
predicted_value = model.predict(date_for_prediction)
#Zwrocenie wartosci
return float(predicted_value)
def Select_Data(duration, date_today):
#Switch zaleznie od zakresu danych
if duration == 'year':
#Wczytaj dane z pliku
csv_file = './data/data_store/selected_data.csv'
df = pandas.read_csv(csv_file)
#Wykorzystuje wszystko wczytane - nie ma potrzeby filtracji
#Utworz listy i je zwroc
x = df.data.tolist()
y = df.cena.tolist()
return x, y
elif duration == 'month':
#Wczytaj dane z pliku
csv_file = './data/data_store/selected_data.csv'
df = pandas.read_csv(csv_file)
#Filtracja
targettime = datetime.datetime.strptime(date_today, '%Y-%m-%d')
targettime = targettime - dateutil.relativedelta.relativedelta(months=1)
targettime = str(targettime)
#print(date_today)
#print(targettime)
df = df[(df.data > targettime)]
#print(df)
#Utworz listy i je zwroc
x = df.data.tolist()
y = df.cena.tolist()
return x, y
elif duration == 'week':
#Wczytaj dane z pliku
csv_file = './data/data_store/selected_data.csv'
df = pandas.read_csv(csv_file)
#Filtracja
targettime = datetime.datetime.strptime(date_today, '%Y-%m-%d')
targettime = targettime - dateutil.relativedelta.relativedelta(days=7)
targettime = str(targettime)
#print(date_today)
#print(targettime)
df = df[(df.data > targettime)]
#print(df)
#Utworz listy i je zwroc
x = df.data.tolist()
y = df.cena.tolist()
return x, y
def model_licz(period, predict_for ):
#today = '2019-01-13'
#period = 'month' or 'week' or 'year'
today = '2019-01-15' #TODO: str(datetime.datetime.today())
data = Select_Data(period, today)
prediction = str(Prediction(data, predict_for))
return prediction
#period = 'month'
#predict_for = ['2019-01-15']
#wyliczone = model_licz(period, predict_for)
#print(wyliczone)
#Ponizej przykład zastosowania obu funkcji
#predict_for = ['2019-01-13']
#today = '2019-01-13'
#prediction = [0, 0, 0]
#data = Select_Data('year', today)
#prediction[0] = Prediction(data, predict_for)
#data = Select_Data('month', today)
#prediction[1] = Prediction(data, predict_for)
#data = Select_Data('week', today)
#prediction[2] = Prediction(data, predict_for)
#print('Prognoza na: ' + str(predict_for[0]))
#print('Rok: '+ str(prediction[0]))
#print('Miesiac: '+ str(prediction[1]))
#print('Tydzien: '+ str(prediction[2]))
#Ponizej przykład zastosowania samej funkcji predict
#Inicjalizacja danych
#x = ['2019-01-01', '2019-01-02', '2019-01-03', '2019-01-04', '2019-01-05']
#print(x)
#y = [1, 2, 3, 4, 5]
#predict_for = ['2019-01-30']
#Wywolanie funkcji
#prediction = Prediction(x, y, predict_for)
#Wyswietlenie danych i wyniku
#print(x)
#print(y)
#print(predict_for)
#print(prediction)
| true
|
d4bd3413f804236ad7706ccb09bbebf52f4f0316
|
Python
|
nagyist/mapd-core
|
/QueryEngine/scripts/generate_TableFunctionsFactory_init.py
|
UTF-8
| 21,916
| 2.65625
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
"""Given a list of input files, scan for lines containing UDTF
specification statements in the following form:
UDTF: function_name(<arguments>) -> <output column types> (, <template type specifications>)?
where <arguments> is a comma-separated list of argument types. The
argument types specifications are:
- scalar types:
Int8, Int16, Int32, Int64, Float, Double, Bool, TextEncodingDict, etc
- column types:
ColumnInt8, ColumnInt16, ColumnInt32, ColumnInt64, ColumnFloat, ColumnDouble, ColumnBool, etc
- column list types:
ColumnListInt8, ColumnListInt16, ColumnListInt32, ColumnListInt64, ColumnListFloat, ColumnListDouble, ColumnListBool, etc
- cursor type:
Cursor<t0, t1, ...>
where t0, t1 are column or column list types
- output buffer size parameter type:
RowMultiplier<i>, ConstantParameter<i>, Constant<i>, TableFunctionSpecifiedParameter<i>
where i is a literal integer.
The output column types is a comma-separated list of column types, see above.
In addition, the following equivalents are suppored:
Column<T> == ColumnT
ColumnList<T> == ColumnListT
Cursor<T, V, ...> == Cursor<ColumnT, ColumnV, ...>
int8 == int8_t == Int8, etc
float == Float, double == Double, bool == Bool
T == ColumnT for output column types
RowMultiplier == RowMultiplier<i> where i is the one-based position of the sizer argument
when no sizer argument is provided, Constant<1> is assumed
Argument types can be annotated using `|' (bar) symbol after an
argument type specification. An annotation is specified by a label and
a value separated by `=' (equal) symbol. Multiple annotations can be
specified by using `|` (bar) symbol as the annotations separator.
Supported annotation labels are:
- name: to specify argument name
- input_id: to specify the dict id mapping for output TextEncodingDict columns.
- default: to specify a default value for an argument (scalar only)
If argument type follows an identifier, it will be mapped to name
annotations. For example, the following argument type specifications
are equivalent:
Int8 a
Int8 | name=a
Template type specifications is a comma separated list of template
type assignments where values are lists of argument type names. For
instance:
T = [Int8, Int16, Int32, Float], V = [Float, Double]
"""
# Author: Pearu Peterson
# Created: January 2021
import os
import sys
import warnings
import TableFunctionsFactory_transformers as transformers
import TableFunctionsFactory_parser as parser
import TableFunctionsFactory_declbracket as declbracket
import TableFunctionsFactory_util as util
import TableFunctionsFactory_linker as linker
# fmt: off
separator = '$=>$'
def line_is_incomplete(line):
# TODO: try to parse the line to be certain about completeness.
# `$=>$' is used to separate the UDTF signature and the expected result
return line.endswith(',') or line.endswith('->') or line.endswith(separator) or line.endswith('|')
# fmt: off
def find_signatures(input_file):
"""Returns a list of parsed UDTF signatures."""
signatures = []
last_line = None
for line in open(input_file).readlines():
line = line.strip()
if last_line is not None:
line = last_line + ' ' + line
last_line = None
if not line.startswith('UDTF:'):
continue
if line_is_incomplete(line):
last_line = line
continue
last_line = None
line = line[5:].lstrip()
i = line.find('(')
j = line.find(')')
if i == -1 or j == -1:
sys.stderr.write('Invalid UDTF specification: `%s`. Skipping.\n' % (line))
continue
expected_result = None
if separator in line:
line, expected_result = line.split(separator, 1)
expected_result = expected_result.strip().split(separator)
expected_result = list(map(lambda s: s.strip(), expected_result))
ast = parser.Parser(line).parse()
if expected_result is not None:
# Treat warnings as errors so that one can test TransformeWarnings
warnings.filterwarnings("error")
# Template transformer expands templates into multiple lines
try:
result = transformers.Pipeline(
transformers.TemplateTransformer,
transformers.AmbiguousSignatureCheckTransformer,
transformers.FieldAnnotationTransformer,
transformers.TextEncodingDictTransformer,
transformers.DefaultValueAnnotationTransformer,
transformers.SupportedAnnotationsTransformer,
transformers.RangeAnnotationTransformer,
transformers.FixRowMultiplierPosArgTransformer,
transformers.RenameNodesTransformer,
transformers.AstPrinter)(ast)
except (transformers.TransformerException, transformers.TransformerWarning) as msg:
result = ['%s: %s' % (type(msg).__name__, msg)]
assert len(result) == len(expected_result), "\n\tresult: %s \n!= \n\texpected: %s" % (
'\n\t\t '.join(result),
'\n\t\t '.join(expected_result)
)
assert set(result) == set(expected_result), "\n\tresult: %s != \n\texpected: %s" % (
'\n\t\t '.join(result),
'\n\t\t '.join(expected_result),
)
else:
signature = transformers.Pipeline(
transformers.TemplateTransformer,
transformers.AmbiguousSignatureCheckTransformer,
transformers.FieldAnnotationTransformer,
transformers.TextEncodingDictTransformer,
transformers.DefaultValueAnnotationTransformer,
transformers.SupportedAnnotationsTransformer,
transformers.RangeAnnotationTransformer,
transformers.FixRowMultiplierPosArgTransformer,
transformers.RenameNodesTransformer,
transformers.DeclBracketTransformer)(ast)
signatures.extend(signature)
return signatures
def format_function_args(input_types, output_types, uses_manager, use_generic_arg_name, emit_output_args):
cpp_args = []
name_args = []
if uses_manager:
cpp_args.append('TableFunctionManager& mgr')
name_args.append('mgr')
for idx, typ in enumerate(input_types):
cpp_arg, name = typ.format_cpp_type(idx,
use_generic_arg_name=use_generic_arg_name,
is_input=True)
cpp_args.append(cpp_arg)
name_args.append(name)
if emit_output_args:
for idx, typ in enumerate(output_types):
cpp_arg, name = typ.format_cpp_type(idx,
use_generic_arg_name=use_generic_arg_name,
is_input=False)
cpp_args.append(cpp_arg)
name_args.append(name)
cpp_args = ', '.join(cpp_args)
name_args = ', '.join(name_args)
return cpp_args, name_args
def build_template_function_call(caller, called, input_types, output_types, uses_manager):
cpp_args, name_args = format_function_args(input_types,
output_types,
uses_manager,
use_generic_arg_name=True,
emit_output_args=True)
template = ("EXTENSION_NOINLINE int32_t\n"
"%s(%s) {\n"
" return %s(%s);\n"
"}\n") % (caller, cpp_args, called, name_args)
return template
def build_preflight_function(fn_name, sizer, input_types, output_types, uses_manager):
def format_error_msg(err_msg, uses_manager):
if uses_manager:
return " return mgr.error_message(%s);\n" % (err_msg,)
else:
return " return table_function_error(%s);\n" % (err_msg,)
cpp_args, _ = format_function_args(input_types,
output_types,
uses_manager,
use_generic_arg_name=False,
emit_output_args=False)
if uses_manager:
fn = "EXTENSION_NOINLINE int32_t\n"
fn += "%s(%s) {\n" % (fn_name.lower() + "__preflight", cpp_args)
else:
fn = "EXTENSION_NOINLINE int32_t\n"
fn += "%s(%s) {\n" % (fn_name.lower() + "__preflight", cpp_args)
for typ in input_types:
if isinstance(typ, declbracket.Declaration):
ann = typ.annotations
for key, value in ann:
if key == 'require':
err_msg = '"Constraint `%s` is not satisfied."' % (value[1:-1])
fn += " if (!(%s)) {\n" % (value[1:-1].replace('\\', ''),)
fn += format_error_msg(err_msg, uses_manager)
fn += " }\n"
if sizer.is_arg_sizer():
precomputed_nrows = str(sizer.args[0])
if '"' in precomputed_nrows:
precomputed_nrows = precomputed_nrows[1:-1]
# check to see if the precomputed number of rows > 0
err_msg = '"Output size expression `%s` evaluated in a negative value."' % (precomputed_nrows)
fn += " auto _output_size = %s;\n" % (precomputed_nrows)
fn += " if (_output_size < 0) {\n"
fn += format_error_msg(err_msg, uses_manager)
fn += " }\n"
fn += " return _output_size;\n"
else:
fn += " return 0;\n"
fn += "}\n\n"
return fn
def must_emit_preflight_function(sig, sizer):
if sizer.is_arg_sizer():
return True
for arg_annotations in sig.input_annotations:
d = dict(arg_annotations)
if 'require' in d.keys():
return True
return False
def format_annotations(annotations_):
def fmt(k, v):
# type(v) is not always 'str'
if k == 'require' or k == 'default' and v[0] == "\"":
return v[1:-1]
return v
s = "std::vector<std::map<std::string, std::string>>{"
s += ', '.join(('{' + ', '.join('{"%s", "%s"}' % (k, fmt(k, v)) for k, v in a) + '}') for a in annotations_)
s += "}"
return s
def is_template_function(sig):
i = sig.name.rfind('_template')
return i >= 0 and '__' in sig.name[:i + 1]
def uses_manager(sig):
return sig.inputs and sig.inputs[0].name == 'TableFunctionManager'
def is_cpu_function(sig):
# Any function that does not have _gpu_ suffix is a cpu function.
i = sig.name.rfind('_gpu_')
if i >= 0 and '__' in sig.name[:i + 1]:
if uses_manager(sig):
raise ValueError('Table function {} with gpu execution target cannot have TableFunctionManager argument'.format(sig.name))
return False
return True
def is_gpu_function(sig):
# A function with TableFunctionManager argument is a cpu-only function
if uses_manager(sig):
return False
# Any function that does not have _cpu_ suffix is a gpu function.
i = sig.name.rfind('_cpu_')
return not (i >= 0 and '__' in sig.name[:i + 1])
def parse_annotations(input_files):
counter = 0
add_stmts = []
cpu_template_functions = []
gpu_template_functions = []
cpu_function_address_expressions = []
gpu_function_address_expressions = []
cond_fns = []
for input_file in input_files:
for sig in find_signatures(input_file):
# Compute sql_types, input_types, and sizer
sql_types_ = []
input_types_ = []
input_annotations = []
sizer = None
if sig.sizer is not None:
expr = sig.sizer.value
sizer = declbracket.Bracket('kPreFlightParameter', (expr,))
uses_manager = False
for i, (t, annot) in enumerate(zip(sig.inputs, sig.input_annotations)):
if t.is_output_buffer_sizer():
if t.is_user_specified():
sql_types_.append(declbracket.Bracket.parse('int32').normalize(kind='input'))
input_types_.append(sql_types_[-1])
input_annotations.append(annot)
assert sizer is None # exactly one sizer argument is allowed
assert len(t.args) == 1, t
sizer = t
elif t.name == 'Cursor':
for t_ in t.args:
input_types_.append(t_)
input_annotations.append(annot)
sql_types_.append(declbracket.Bracket('Cursor', args=()))
elif t.name == 'TableFunctionManager':
if i != 0:
raise ValueError('{} must appear as a first argument of {}, but found it at position {}.'.format(t, sig.name, i))
uses_manager = True
else:
input_types_.append(t)
input_annotations.append(annot)
if t.is_column_any():
# XXX: let Bracket handle mapping of column to cursor(column)
sql_types_.append(declbracket.Bracket('Cursor', args=()))
else:
sql_types_.append(t)
if sizer is None:
name = 'kTableFunctionSpecifiedParameter'
idx = 1 # this sizer is not actually materialized in the UDTF
sizer = declbracket.Bracket(name, (idx,))
assert sizer is not None
ns_output_types = tuple([a.apply_namespace(ns='ExtArgumentType') for a in sig.outputs])
ns_input_types = tuple([t.apply_namespace(ns='ExtArgumentType') for t in input_types_])
ns_sql_types = tuple([t.apply_namespace(ns='ExtArgumentType') for t in sql_types_])
sig.function_annotations.append(('uses_manager', str(uses_manager).lower()))
input_types = 'std::vector<ExtArgumentType>{%s}' % (', '.join(map(util.tostring, ns_input_types)))
output_types = 'std::vector<ExtArgumentType>{%s}' % (', '.join(map(util.tostring, ns_output_types)))
sql_types = 'std::vector<ExtArgumentType>{%s}' % (', '.join(map(util.tostring, ns_sql_types)))
annotations = format_annotations(input_annotations + sig.output_annotations + [sig.function_annotations])
# Notice that input_types and sig.input_types, (and
# similarly, input_annotations and sig.input_annotations)
# have different lengths when the sizer argument is
# Constant or TableFunctionSpecifiedParameter. That is,
# input_types contains all the user-specified arguments
# while sig.input_types contains all arguments of the
# implementation of an UDTF.
if must_emit_preflight_function(sig, sizer):
fn_name = '%s_%s' % (sig.name, str(counter)) if is_template_function(sig) else sig.name
check_fn = build_preflight_function(fn_name, sizer, input_types_, sig.outputs, uses_manager)
cond_fns.append(check_fn)
if is_template_function(sig):
name = sig.name + '_' + str(counter)
counter += 1
t = build_template_function_call(name, sig.name, input_types_, sig.outputs, uses_manager)
address_expression = ('avoid_opt_address(reinterpret_cast<void*>(%s))' % name)
if is_cpu_function(sig):
cpu_template_functions.append(t)
cpu_function_address_expressions.append(address_expression)
if is_gpu_function(sig):
gpu_template_functions.append(t)
gpu_function_address_expressions.append(address_expression)
add = ('TableFunctionsFactory::add("%s", %s, %s, %s, %s, %s, /*is_runtime:*/false);'
% (name, sizer.format_sizer(), input_types, output_types, sql_types, annotations))
add_stmts.append(add)
else:
add = ('TableFunctionsFactory::add("%s", %s, %s, %s, %s, %s, /*is_runtime:*/false);'
% (sig.name, sizer.format_sizer(), input_types, output_types, sql_types, annotations))
add_stmts.append(add)
address_expression = ('avoid_opt_address(reinterpret_cast<void*>(%s))' % sig.name)
if is_cpu_function(sig):
cpu_function_address_expressions.append(address_expression)
if is_gpu_function(sig):
gpu_function_address_expressions.append(address_expression)
return add_stmts, cpu_template_functions, gpu_template_functions, cpu_function_address_expressions, gpu_function_address_expressions, cond_fns
if len(sys.argv) < 3:
input_files = [os.path.join(os.path.dirname(__file__), 'test_udtf_signatures.hpp')]
print('Running tests from %s' % (', '.join(input_files)))
add_stmts, _, _, _, _, _ = parse_annotations(input_files)
print('Usage:\n %s %s input1.hpp input2.hpp ... output.hpp' % (sys.executable, sys.argv[0], ))
sys.exit(1)
input_files, output_filename = sys.argv[1:-1], sys.argv[-1]
cpu_output_header = os.path.splitext(output_filename)[0] + '_cpu.hpp'
gpu_output_header = os.path.splitext(output_filename)[0] + '_gpu.hpp'
assert input_files, sys.argv
add_stmts = []
cpu_template_functions = []
gpu_template_functions = []
cpu_address_expressions = []
gpu_address_expressions = []
cond_fns = []
canonical_input_files = [input_file[input_file.find("/QueryEngine/") + 1:] for input_file in input_files]
header_file = ['#include "' + canonical_input_file + '"' for canonical_input_file in canonical_input_files]
dirname = os.path.dirname(output_filename)
if dirname and not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError as e:
import errno
if e.errno != errno.EEXIST:
raise
for input_file in input_files:
stmts, cpu_fns, gpu_fns, cpu_addr, gpu_addr, cond_funcs = parse_annotations([input_file])
add_stmts.extend(stmts)
cpu_template_functions.extend(cpu_fns)
gpu_template_functions.extend(gpu_fns)
cpu_address_expressions.extend(cpu_addr)
gpu_address_expressions.extend(gpu_addr)
cond_fns.extend(cond_funcs)
header_file = input_file[input_file.find("/QueryEngine/") + 1:]
add_tf_generated_files = linker.GenerateAddTableFunctionsFiles(dirname, stmts,
header_file)
if add_tf_generated_files.should_generate_files():
add_tf_generated_files.generate_files()
if len(cpu_fns):
cpu_generated_files = linker.GenerateTemplateFiles(dirname, cpu_fns,
header_file, 'cpu')
cpu_generated_files.generate_files()
if len(gpu_fns):
gpu_generated_files = linker.GenerateTemplateFiles(dirname, gpu_fns,
header_file, 'gpu')
gpu_generated_files.generate_files()
def call_methods(add_stmts):
n_add_funcs = linker.GenerateAddTableFunctionsFiles.get_num_generated_files()
return [ 'table_functions::add_table_functions_%d();' % (i) for i in range(n_add_funcs+1) ]
content = '''
/*
This file is generated by %s. Do no edit!
*/
#include "QueryEngine/TableFunctions/TableFunctionsFactory.h"
/*
Include the UDTF template initiations:
*/
%s
// volatile+noinline prevents compiler optimization
#ifdef _WIN32
__declspec(noinline)
#else
__attribute__((noinline))
#endif
#ifndef NO_OPT_ATTRIBUTE
#if defined(__clang__)
#define NO_OPT_ATTRIBUTE __attribute__((optnone))
#elif defined(__GNUC__) || defined(__GNUG__)
#define NO_OPT_ATTRIBUTE __attribute__((optimize("O0")))
#elif defined(_MSC_VER)
#define NO_OPT_ATTRIBUTE
#endif
#endif
#if defined(_MSC_VER)
#pragma optimize("", off)
#endif
volatile
NO_OPT_ATTRIBUTE bool avoid_opt_address(void *address) {
return address != nullptr;
}
NO_OPT_ATTRIBUTE bool functions_exist() {
bool ret = true;
ret &= (%s);
return ret;
}
extern bool g_enable_table_functions;
extern bool functions_exist_geo_column();
// Each table function initialization module needs its own AddTableFunctions struct definition,
// otherwise, when calling an initialization function at runtime, symbol name conflicts will
// cause the wrong struct to be instantiated.
namespace {
struct AddTableFunctions {
NO_OPT_ATTRIBUTE void operator()() {
%s
}
};
} // anonymous namespace
namespace table_functions {
// Each table function initialization module should have its own init flag
static std::once_flag init_flag;
static const char filename[] = __FILE__;
template<const char *filename>
void TableFunctionsFactory::init() {
if (!g_enable_table_functions) {
return;
}
if (!functions_exist() && !functions_exist_geo_column()) {
UNREACHABLE();
return;
}
std::call_once(init_flag, AddTableFunctions{});
}
extern "C" void init_table_functions() {
TableFunctionsFactory::init<filename>();
}
#if defined(_MSC_VER)
#pragma optimize("", on)
#endif
// conditional check functions
%s
} // namespace table_functions
'''
#####
content = content % (
sys.argv[0],
'\n'.join(map(lambda x: '#include "%s"' % x, linker.BaseGenerateFiles.generated_header_files())),
' &&\n'.join(cpu_address_expressions),
'\n '.join(call_methods(add_stmts)),
''.join(cond_fns))
if not (os.path.exists(output_filename) and \
content == linker.get_existing_file_content(output_filename)):
with open(output_filename, 'w') as f:
f.write(content)
| true
|
d69c703aa9727f00104d864902f92555a7bca525
|
Python
|
feng-li/Distributed-Statistical-Computing
|
/book-examples/16-recommendation-systems/mapper2.py
|
UTF-8
| 676
| 3.46875
| 3
|
[] |
no_license
|
#!/usr/bin/python3
from itertools import combinations
import sys
while True:
try:
line=sys.stdin.readline()9. if not line:
break
line=line.strip()
values=line.split('|')
#combinations(values,2) get all the combinations of 2 films
for item1, item2 in combinations(values,2):
#check if the items are empty
if len(item1)>1 and len(item2)>1:
item1,item2=item1.split('*'),item2.split('*')
print((item1[0],item2[0]),end='')
print('|',end='')
print(item1[1]+','+item2[1])
#output of map2: (film1,film2)|(score of film1,score of film2)
except:
continue
| true
|
0abec71f386b206a72983a6a5015a937ac1ffadd
|
Python
|
H-E-L-P/dmu_products
|
/dmu12/Q0_calc.py
|
UTF-8
| 3,614
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
from astropy.table import Table
from astropy import units as u
from astropy.coordinates import SkyCoord, search_around_sky
from IPython.display import clear_output
from pymoc import MOC
from pymoc.util.catalog import catalog_to_moc
from mltier import Field, Q_0, parallel_process, describe, gen_rand_cat_inMOC
'''
Finds Q0 (see Flueren et al 2012 for a definition of this parameter and the method used to calculate it) for the given catalogues as well as the optimum radius for crossmatching
INPUTS
------------
cat1 - the radio catalogue
cat2 - the masterlist
cat1ra - column name of the ra column for cat1
cat1dec - column name of the dec column for cat1
cat2ra - column name of the ra column for cat2
cat2dec - column name of the dec column for cat2
cat2flux - column name of the flux of the sources in cat2 in the band that is being used for the crossmatching
ra_down - the ra that defines the left side of a rectangle encompasing the area the crossmatching will be done in (this parameter is redundant if a moc is provided)
ra_up - the ra that defines the right side of a rectangle encompasing the area the crossmatching will be done in (this parameter is redundant if a moc is provided)
dec_down - the dec that defines the bottom side of a rectangle encompasing the area the crossmatching will be done in (this parameter is redundant if a moc is provided)
dec_up - the dec that defines the upper side of a rectangle encompassing the area the crossmatching will be done in (this parameter is redundant if a moc is provided)
radius_low - lower radius that Q0 is computed for in arcseconds. Value given must be >0 and if too small may cause the code to crash (would advise >=0.01)
radius_high - upper radius that Q0 is computed forcin arcseconds
steps - the number of intermediate radii that Q0 is computed for
moc - a moc defining the area the crossmatching will be done in
OUTPUTS
------------
Q0 - the maximum value of Q0
rads - the radius at which the maximum value of Q0 is found in arcseconds
'''
def Q0_calc(cat1,cat2,cat1ra,cat1dec,cat2ra,cat2dec,cat2flux,ra_down,ra_up,dec_down,dec_up,radius_low,radius_up,steps,moc=None):
field = Field(ra_down, ra_up, dec_down, dec_up, moc)
radio = cat1
masterlist_data = cat2
radio = field.filter_catalogue(radio, colnames=(cat1ra,cat1dec))
masterlist = field.filter_catalogue(masterlist_data, colnames=(cat2ra,cat2dec))
coords_masterlist = SkyCoord(masterlist[cat2ra],
masterlist[cat2dec],
unit=(u.deg, u.deg),
frame='icrs')
coords_radio = SkyCoord(radio[cat1ra],
radio[cat1dec],
unit=(u.deg, u.deg),
frame='icrs')
n_iter = 10
rads = np.linspace(radius_low,radius_up,steps)
q_0_comp_i = Q_0(coords_radio, coords_masterlist, field)
q_0_rad_i = []
print('starting to find Q0. This will take a while')
for radius in rads:
print('finding Q0 with radius = {} arcseconds'.format(radius))
q_0_rad_aux = []
for i in range(n_iter):
out = q_0_comp_i(radius=radius)
q_0_rad_aux.append(out)
q_0_rad_i.append(np.mean(q_0_rad_aux))
#print("{:7.5f} {:7.5f} +/- {:7.5f} [{:7.5f} {:7.5f}]".format(radius,
# np.mean(q_0_rad_aux), np.std(q_0_rad_aux),
# np.min(q_0_rad_aux), np.max(q_0_rad_aux)))
mask = q_0_rad_i == max(q_0_rad_i)
return(q_0_rad_i,rads)
| true
|
49a7bb1d3539e8deb963f751178c7861f79b15fc
|
Python
|
danabaxia/stockAnalysis
|
/stockAnalysis/indicators.py
|
UTF-8
| 11,135
| 3
| 3
|
[
"MIT"
] |
permissive
|
import robin_stocks as r
import trading_algorithms as m
import financial as f
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import stockstats
import time
from datetime import datetime
import csv
import itertools
#import talib
#########################
#stock tikers grabbing
#download data
def get_stocks():
data = pd.DataFrame(f.requestAllStocks())
data.to_csv('stock_list.csv',index=True)
#read from csv
#return tikers that in nyse and nasdaq
def clean_stocks():
data = pd.read_csv('stock_list.csv')
stock_exchanges = ['NYSE','Nasdaq','NYSE Arca',
'Nasdaq Global Select','New York Stock Exchange',
'NASDAQ Capital Market','NYSE American',
'NASDAQ Global Market','NasdaqCM','NasdaqGM','NasdaqGS']
df = data[data['exchange'].isin(stock_exchanges)]
#remove funds: name contains 'Fund'
df = df.drop(df[df['name'].str.contains('Fund', na=False)].index)
#check if the price can be requested
df1 = df.copy()
change = []
for index, row in df1.iterrows():
try:
#result = f.getPricePercent(row['symbol'],30)
result = f.getPriceCurrent(row['symbol'])
print(row['symbol'],result)
change.append(result)
except Exception as exc:
change.append(np.nan)
df1.loc[:,'close'] = change
#remove nan rows
df2 = df1.dropna(how='any')
#save to cvs
df2.to_csv('stock_list_clean1.csv',index=False)
def get_all_tickers():
tkers = []
data = pd.read_csv('stock_list_clean.csv')
for index, row in data.iterrows():
if row['price'] > 5: #get rid of penny stock
tkers.append(row['symbol'])
return tkers
#########################
#create binary data
def get_binary(x):
current_value = 0.1
current_state = 0
state = []
i = 0
for value in x:
trend = value - current_value
if trend > 0:
current_state = 1
elif trend <= 0:
current_state = 0
else:
if i == 0:
current_state = 0
else:
current_state = state[-1]
current_value = value
state.append(current_state)
i += 1
return state
def get_binary_angle(x, angle=0.05):
current_value = 0.1
current_state = 0
state = []
i = 0
for value in x:
trend = (value - current_value)/(current_value+0.0001)
#print(trend)
if trend > angle:
current_state = 1
elif trend <= -angle:
current_state = 0
else:
if i == 0:
current_state = 0
else:
current_state = state[-1]
current_value = value
state.append(current_state)
i += 1
return state
def get_binary_np(x):
current_value = 0
current_state = 0
state = []
for value in x:
if value >= current_value:
current_state = 1
elif value < current_value:
current_state = 0
current_value = value
state.append(current_state)
return np.array(state)
def load_stock(tker,day):
X = f.requestHistoryStockPriceByDay(tker,day)
Data = pd.DataFrame(X)
Data = pd.concat([Data['date'][::-1],Data['open'][::-1],
Data['high'][::-1],Data['low'][::-1],
Data['close'][::-1],Data['volume'][::-1],
Data['adjClose'][::-1]],axis=1)
Data.columns = ['date','open','high','low','close','volume','adjClose']
Data = Data.reset_index(drop=True) #inverse the index number
return Data
def load_stock_from(tker,end,day):
start = f.calDatesFrom(end,day).strftime("%Y-%m-%d")
X = f.requestHistoryStockPrice(tker, start, end)
Data = pd.DataFrame(X)
Data = pd.concat([Data['date'][::-1],Data['open'][::-1],
Data['high'][::-1],Data['low'][::-1],
Data['close'][::-1],Data['volume'][::-1],
Data['adjClose'][::-1]],axis=1)
Data.columns = ['date','Open','High','Low','Close','Volume','adjClose']
Data = Data.reset_index(drop=True) #inverse the index number
return Data
#load stock data from start day to end day:
#fox example load_stock_from_to('NIO', '2020-04-30', '2020-08-01')
def load_stock_from_to(tker,start,end):
X = f.requestHistoryStockPrice(tker, start, end)
Data = pd.DataFrame(X)
Data = pd.concat([Data['date'][::-1],Data['open'][::-1],
Data['high'][::-1],Data['low'][::-1],
Data['close'][::-1],Data['volume'][::-1],
Data['adjClose'][::-1]],axis=1)
Data.columns = ['date','Open','High','Low','Close','Volume','adjClose']
Data = Data.reset_index(drop=True) #inverse the index number
return Data
def load_stock_30min(tker):
X = f.request30minStockPrice(tker)
Data = pd.DataFrame(X)
Data = pd.concat([Data['date'][::-1],Data['open'][::-1],
Data['high'][::-1],Data['low'][::-1],
Data['close'][::-1],Data['volume'][::-1]],axis=1)
Data.columns = ['date','Open','High','Low','Close','Volume']
Data = Data.reset_index(drop=True) #inverse the index number
return Data
def load_stock_15min(tker):
X = f.request15minStockPrice(tker)
Data = pd.DataFrame(X)
Data = pd.concat([Data['date'][::-1],Data['open'][::-1],
Data['high'][::-1],Data['low'][::-1],
Data['close'][::-1],Data['volume'][::-1]],axis=1)
Data.columns = ['date','Open','High','Low','Close','Volume']
Data = Data.reset_index(drop=True) #inverse the index number
return Data
def cal_stock(data):
stock = stockstats.StockDataFrame.retype(data)
stock['cross_kd'] = stock['kdjk'] - stock['kdjd'] #cross kd
stock['macdh_b']=get_binary(stock['macdh']) #macdh binary
stock['kdjd_b']=get_binary(stock['kdjd']) #kdjd binary
stock['kdjk_b']=get_binary(stock['kdjk']) #kdjk binary
stock['cross_kd_b']=get_binary(stock['cross_kd']) #cross kd binary
return stock
#kd rules:
#current D < average D
#last kd_cross < 0 and current kd_cross > 0
#current D up trend
def kd_buy_long(stock):
last_kd_cross = stock.iloc[-2]['cross_kd']
curr_kd_cross = stock.iloc[-1]['cross_kd']
curr_kdjd = stock.iloc[-1]['kdjd']
curr_kdjd_b = stock.iloc[-1]['kdjd_b']
kdjd_ave = stock['kdjd'].mean()
if curr_kdjd < kdjd_ave and last_kd_cross < 0 and curr_kd_cross > 0 and curr_kdjd_b == 1:
return True
else:
return False
#ma rules:
#30ma up trend
def ma_buy_long(stock):
last_ma30 = stock['close_30_sma'].iloc[-2]
curr_ma30 = stock['close_30_sma'].iloc[-1]
if curr_ma30 > last_ma30:
return True
else:
return False
#ma rules:
#30ma up trend
def ma_buy_short(stock):
last_ma30 = stock['close_30_sma'].iloc[-2]
curr_ma30 = stock['close_30_sma'].iloc[-1]
if curr_ma30 > last_ma30:
return True
else:
return False
#macd rules:
#last three macdh up trend
#last macdh > 0
def macd_buy_long(stock):
curr_macdh = stock.iloc[-1]['macdh']
win = [1,1,1]
if list(stock['macdh_b'].tail(3)) == win and curr_macdh > - 0.5:
return True
else:
return False
#macd rules:
#last three macdh up trend
def macd_buy_short(stock):
curr_macdh = stock.iloc[-1]['macdh']
win = [1,1,1]
if list(stock['macdh_b'].tail(3)) == win and curr_macdh > - 0.1:
return True
else:
return False
#kd rules:
#current D < average D
#last kd_cross < 0 and current kd_cross > 0
#current D up trend
def kd_buy_short(stock):
last_kd_cross = stock.iloc[-2]['cross_kd']
curr_kd_cross = stock.iloc[-1]['cross_kd']
curr_kdjd = stock.iloc[-1]['kdjd']
curr_kdjd_b = stock.iloc[-1]['kdjd_b']
kdjd_ave = stock['kdjd'].mean()
if curr_kdjd < kdjd_ave and last_kd_cross < 0 and curr_kd_cross > 0 and curr_kdjd_b == 1:
print('d_ave',kdjd_ave)
print('d',curr_kdjd)
return True
else:
return False
#this function returns stock tikers watch list
def algo_watch():
watch = []
tikers = ['AAPL','MSFT','NIO']
for tker in get_all_tickers():
try:
day = 100
data = load_stock(tker,day)
stock = cal_stock(data)
print(tker)
if kd_buy_long(stock) == True and ma_buy_long(stock)==True and macd_buy_long(stock) == True:
print('buy ',tker)
watch.append(tker)
except Exception as ex:
print('error', ex)
return watch
#buy signal
def buy_signal(stock):
if kd_buy_short(stock) == True and macd_buy_short(stock) == True and ma_buy_short(stock):
return True
else:
return False
def watch_list():
watch = algo_watch()
now = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
with open('watch_list.csv', 'a') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([now,watch])
return watch
def save_to_csv(tker,day):
data = load_stock(tker,day)
data.to_csv(tker+'.csv',index=True)
# up and down trends example with winodw = 1
#1, 1, 1 up
#0, 0, 0 down
#1, 1, 0 up to down
#0, 0, 1 down to up
def up_trend(data, window=1):
b = get_binary(data)
#print(b)
w = [1]* window
if list(b[-window:]) == w:
return True
else:
return False
def down_trend(data, window=1):
b = get_binary(data)
w = [0]* window
if list(b[-window:]) == w:
return True
else:
return False
def up_to_down_trend(data, left_window=2,right_window=1):
assert left_window >= 0, 'windown size must be >= 0'
assert right_window >= 0, 'windown size must be >= 0'
b = get_binary_angle(data)
#print(b)
w = [1]*left_window + [0]*right_window
#print(w)
if list(b[-(left_window+right_window):]) == w:
return True
else:
return False
def down_to_up_trend(data, left_window=2, right_window=1):
assert left_window >= 0, 'windown size must be >= 0'
assert right_window >= 0, 'windown size must be >= 0'
b = get_binary_angle(data)
#print(b)
w = [0]*left_window + [1]*right_window
#print(w)
if list(b[-(left_window+right_window):]) == w:
return True
else:
return False
#generator combinatons
def parameteras_generator(*arg):
pool = {'SMA_short': range(2,14),
'SMA_mid': range(15,29),
'SMA_long': range(30,91),
'rsi_low': range(2,20),
'rsi_hi': range(65,90),
'boll': range(5,15)}
#check if the arguments in the pool
assert all(name in pool for name in arg), 'arguments contain error(s)'
output = []
for key in arg:
output.append(pool[key])
return itertools.product(*output)
if __name__ == "__main__":
data = pd.DataFrame([5,2,1,0.5,5],columns=['A'])
print(down_to_up_trend(data['A']))
print(up_to_down_trend(data['A']))
| true
|
c2e8770c869d9071e73cf71b9fd3fadbc5a03e14
|
Python
|
jiguifang904/DSSM
|
/layers/activation.py
|
UTF-8
| 1,045
| 2.84375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 30 14:34:53 2021
@author: jiguifang
"""
# 激活函数
import torch.nn as nn
class Identity(nn.Module):
def __init__(self, **kwargs):
super(Identity, self).__init__()
def forword(self, X):
return X
def activation_layer(act_name, hidden_size=None, dice_dim=2):
if isinstance(act_name, str):
if act_name.lower() == 'sigmoid':
act_layer = nn.Sigmoid()
elif act_name.lower() == 'linear':
act_layer = Identity()
elif act_name.lower() == 'relu':
act_layer = nn.ReLU(inplace=True)
elif act_name.lower() == 'dice':
assert dice_dim
# act_layer = Dice(hidden_size, dice_dim)
elif act_name.lower() == 'prelu':
act_layer = nn.PReLU()
elif issubclass(act_name, nn.Module):
act_layer = act_name()
else:
raise NotImplementedError
return act_layer
| true
|
3c2c4fc5bad7f323eb70e01656e0bb6eec30166a
|
Python
|
PanMaster13/Python-Programming
|
/Week 9/Sample Files/Sample 7.py
|
UTF-8
| 158
| 2.796875
| 3
|
[] |
no_license
|
#Sample7.py
import random
def randomLine(fname):
lines = open(fname).read().splitlines()
return random.choice(lines)
print(randomLine("write.txt"))
| true
|
9e8e1dbd634accbe01259d72c25664cfee8ad9db
|
Python
|
Vladislav29/neural-lab
|
/Lab2/neural.py
|
UTF-8
| 1,055
| 3.09375
| 3
|
[] |
no_license
|
import math
class neural:
n = 1
def __init__(self):
self.w = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
def point(self, a, b):
x = []
l = (float(abs(b)) + float(abs(2 * b - a))) / float(40)
e = float(2 * b - a)
while float(b) < e:
b += l
x.append(b)
return x
def func_activation(self, t):
return math.exp(t - 1)
def etalon(self, x):
y = []
for i in range(0, 20):
b = self.func_activation(x[i])
y.append(b)
return y
def hoff(self, x, b, j, k):
for i in range(j, k):
self.w[i] += self.n * x[i] * b
def window(self, x, j, k):
window = 0
for i in range(j, k):
window += x[i] * self.w[i]
return window
def check_error(self, x, h):
return h - x != 0
def net(self, w, x):
for i in range(0, 10):
net = w[i] * x[i] + w[21]
return net
def get_w(self):
return self.w
| true
|
95a336838d5e38dd59cfdb6a416e7362d634a6c4
|
Python
|
w3cp/coding
|
/python/python-3.5.1/5-data-structures/6-del.py
|
UTF-8
| 129
| 3.109375
| 3
|
[] |
no_license
|
a = [-1, 1, 66.25, 333, 333, 1234.5]
del a[0]
a
del a[2:4]
a
del a[:]
a
# del can also be used to delete entire variables:
del a
| true
|
f44f3012420dcb13edd1a979d6ceccf58e9fdd90
|
Python
|
clovisdanielcosta/python-dio
|
/aula3_if_elif_else.py
|
UTF-8
| 533
| 4.4375
| 4
|
[
"MIT"
] |
permissive
|
a = int(input('Primeiro valor: '))
b = int(input('Segundo valor: '))
if a > b:
print('O maior valor é: {}'.format(a))
else:
print('O maior valor é: {}'.format(b))
print('Fim do programa')
#Usando Elif (Else If)
a = int(input('Primeiro valor: '))
b = int(input('Segundo valor: '))
c = int(input('Terceiro valor: '))
if a > b and a > c:
print('O maior valor é: {}'.format(a))
elif b > a and b > c:
print('O maior valor é: {}'.format(b))
else:
print('O maior valor é: {}'.format(c))
print('Fim do programa')
| true
|
36f9d0c0b52b40115bb870c3fc56b4b7a10f70fc
|
Python
|
ryankupyn/eulerprobs
|
/euler12/main.py
|
UTF-8
| 333
| 3.34375
| 3
|
[] |
no_license
|
triangnum = 0
adder = 1
divisors = 0
while divisors < 500:
divisors = 0
triangnum += adder
adder += 1
for divider in range(1, triangnum/2):
if triangnum % divider == 0:
divisors += 1
divisors += 1
if divisors > 250:
print(triangnum)
if divisors >= 500:
print triangnum
| true
|
62596ab283b592cccb1b4b9f5a91d74bd02ad365
|
Python
|
Nilanshrajput/SyferText
|
/syfertext/pointers/span_pointer.py
|
UTF-8
| 2,660
| 2.75
| 3
|
[
"Apache-2.0"
] |
permissive
|
from syft.generic.pointers.object_pointer import ObjectPointer
from syft.workers.base import BaseWorker
import syft as sy
from typing import List
from typing import Union
class SpanPointer(ObjectPointer):
"""An Object Pointer that points to the Span Object at remote location"""
def __init__(
self,
location: BaseWorker = None,
id_at_location: Union[str, int] = None,
owner: BaseWorker = None,
id: Union[str, int] = None,
):
"""Create a Span Pointer from `location` where the `Span` object resides and
`id_at_location`, the id of the `Span` object at that location.
Args:
location (BaseWorker): the worker where the `Span` object resides that this
SpanPointer will point to.
id_at_location (int or str): the id of the `Span` object at the `location` worker.
owner (BaseWorker): the owner of the this object ie. `SpanPointer`
Returns:
A `SpanPointer` object
"""
super(SpanPointer, self).__init__(
location=location,
id_at_location=id_at_location,
owner=owner,
id=id,
garbage_collect_data=True, # Always True
)
def __len__(self):
# Send the command
length = self.owner.send_command(
recipient=self.location, cmd_name="__len__", target=self, args_=tuple(), kwargs_={}
)
return length
def __getitem__(self, item: Union[slice, int]):
# if item is int, so we are trying to access to token
assert isinstance(
item, slice
), "You are not authorised to access a `Token` from a `SpanPointer`"
# Send the command
obj_id = self.owner.send_command(
recipient=self.location, cmd_name="__getitem__", target=self, args_=(item,), kwargs_={}
)
# we create a SpanPointer from the obj_id
span = SpanPointer(location=self.location, id_at_location=obj_id, owner=self.owner)
return span
def as_doc(self):
"""Create a `Doc` object with a copy of the `Span`'s tokens.
Returns:
The new `Doc` copy (or id to `Doc` object) of the span.
"""
# Avoid circular imports
from .doc_pointer import DocPointer
# Send the command
doc_id = self.owner.send_command(
recipient=self.location, cmd_name="as_doc", target=self, args_=tuple(), kwargs_={}
)
# Create a DocPointer from doc_id
doc = DocPointer(location=self.location, id_at_location=doc_id, owner=sy.local_worker)
return doc
| true
|
33b6c6f27bcc2cea48c3537844e64223486a258d
|
Python
|
XyK0907/for_work
|
/LeetCode/Stack/20_Stack_valid_parentheses.py
|
UTF-8
| 1,928
| 3.171875
| 3
|
[] |
no_license
|
from pythonds.basic import Stack
class Solution(object):
def isValid_stack(self, s): #导入栈模块
"""
:type s: str
:rtype: bool
"""
opens = '([{'
closes = ')]}'
parstack = Stack()
balance = True
for each in s:
if each in '([{':
parstack.push(each)
else:
if parstack.isEmpty():
balance = False
else:
top = parstack.pop()
if opens.index(top) != closes.index(each):
balance = False
if balance and parstack.isEmpty():
return True
else:
return False
def isValid(self, s): #没有栈模块
"""
:type s: str
:rtype: bool
"""
# opens = '([{'
# closes = ')]}'
pairs = {'(':')', '[':']', '{':'}'}
parstack = []
for each in s:
if each in '([{':
parstack.append(each)
else:
if len(parstack) == 0:
return False
else:
# if opens.index(top) != closes.index(each):
if pairs[parstack.pop()] != each:
return False
return len(parstack) == 0
def isValid_similar(self, s):
"""
time O(n)
space O(n)
:param s:
:return:
"""
stack = []
dict = {"]":"[", "}":"{", ")":"("}
for char in s:
if char in dict.values():
stack.append(char)
elif char in dict.keys():
if stack == [] or dict[char] != stack.pop():
return False
else:
return False
return stack == []
if __name__=="__main__":
solution = Solution()
print(solution.isValid("{[]}"))
| true
|
c6ca73895fcc0314d19c3713f2b02fcdbb29dfe1
|
Python
|
JiahongHe/WeCare
|
/Pulse.py
|
UTF-8
| 876
| 2.921875
| 3
|
[] |
no_license
|
import mraa
import time
import math
#Initialization
led_pin_number=6
led = mraa.Gpio(led_pin_number)
led.dir(mraa.DIR_OUT)
last_signal = [0]
current_signal = [0]
last_time = [0]
current_time = [0]
BPM = [0]
#Set heart rate test function
def test_pulse():
#Read raw data from pulse sensor
Pulse = float(PulSensor.read())
current_signal[0] = Pulse
#Calculate BPM (Beat per minute)
if current_signal[0]>550 and last_signal[0] <550:
current_time[0] = int(time.time()*1000)
BPM[0] = 60000/(current_time[0]-last_time[0])
last_time[0] = current_time[0]
last_signal[0] = current_signal[0]
if Pulse > 550:
led.write(1)
else:
led.write(0)
time.sleep(0.02)
print(BPM[0])
#set Pulse senor
PulSensor = mraa.Aio(1)
try:
while(1):
test_pulse()
except KeyboardInterrupt:
led.write(0)
exit
| true
|
7a2e8e0aca37985458dc15e106d25b56cc4af93f
|
Python
|
ykumards/Algorithms
|
/arrays/ZeroMatrix.py
|
UTF-8
| 738
| 3.921875
| 4
|
[
"WTFPL"
] |
permissive
|
def clearZeros(Mat, row, col):
"""
Given a matrix and the row and column index
This will clear out all the elements in the
row and column. Returns the matrix in the end
"""
M = len(Mat)
N = len(Mat[0])
Mat[row][:] = [0 for _ in xrange(N)]
for i in xrange(M):
Mat[i][col] = 0
return Mat
if __name__ == "__main__":
zero_arr = []
Mat = [[1,2,0], [4,5,6],[0,2,3]],
print "Original Matrix: "
print Mat
M = len(Mat)
N = len(Mat[0])
for i in xrange(M):
for j in xrange(N):
if Mat[i][j] == 0:
zero_arr.append((i,j))
for idx in zero_arr:
Mat = clearZeros(Mat, idx[0], idx[1])
print "Cleared Matrix: ",
print Mat
| true
|
22db792793b36e0fa61ba1f92ea32ab8d69f0ea3
|
Python
|
abhishektayal/casebook
|
/parse2.py
|
UTF-8
| 4,261
| 2.5625
| 3
|
[] |
no_license
|
import cass
import re
import time
from threading import Thread
from Queue import Queue
from urllib import urlopen
q = Queue()
workers = []
dict_photo_url={}
def user_frnd_post(name):
#print "in function"
file = open('file.txt', 'r')
for line in file:
#print "line is:",line
match = line.split(' ', 1)
#print match[0],"------->",match[1]
if match:
if not match[0] in 'photo':
#print"in not photo- extracted:",match[0]
if match[0] in 'user':
#print "in user"
stat= {}
MATCH= match[1].split(' ',1)
NEW=MATCH[1].split('"',4)
#print NEW
password= NEW[3]
stat['actual_name']=NEW[1]
stat['password']=password
#print 'actual name is:',NEW[1]
#print 'password is:',password
#print 'user name is:',MATCH[0]
cass.save_user(MATCH[0],stat)
if match[0] in 'wallpost':
stat1 ={}
#print 'in wallpost(), val is ',val
MATCH= match[1].split(' ',1)
#print 'user is',MATCH[0]
post= MATCH[1].split('\n',1)
stat1['body']=post[0][1:-1]
#print 'body is:',stat1['body']
cass.save_post(MATCH[0],stat1)
else:
MATCH= match[0].split(' ',1)
cass.add_friend(match[0],match[1][0:-1])
def downloader(worker_number):
#print "starting thread",worker_number
while True:
url = q.get()
#print q.qsize()
data = urlopen(url).read()
data_uri = data.encode("base64").replace("\n", "")
dict_photo_url[url]=data_uri
#print "I am worker ",worker_number,"- downloaded image from url :"
#print"data is :",dict_photo_url[url]
q.task_done()
def forking(num):
t_pool = Thread(target=downloader(1))
t.daemon=True
t_pool.start()
def photo_main(name):
#print "111111111"
file = open('file.txt', 'r')
file2 = open('file.txt', 'r')
#print 'in photo()'
for line in file:
#print "line is:",line
match = line.split(' ', 1)
if match[0] in 'photo':
MATCH= match[1].split(' ',1)
photo_url=MATCH[1].split('\n',1)
#print "url is:",photo_url[0]
if photo_url[0] in dict_photo_url.keys():
#print"found url in dict no need to download"
continue
else:
dict_photo_url[photo_url[0]]='url'
#print"sending to worker, q length is",q.qsize()
q.put(photo_url[0])
q.join()
#q.join()
#print "before going in saving photo................."
for line in file2:
match = line.split(' ', 1)
#print "For main aa gaya....."
if match[0] in 'photo':
#print "Aaja mamu....."
MATCH= match[1].split(' ',1)
photo_url=MATCH[1].split('\n',1)
#print 'url is:',MATCH[0], dict_photo_url[photo_url[0]]
cass.save_photo(MATCH[0], dict_photo_url[photo_url[0]] ,photo_url[0] )
def main():
print time.asctime()
u = urlopen('http://enl.usc.edu/~cs694/casebook/config.txt')
FILE_local = open('file.txt', 'w')
FILE_local.write(u.read())
FILE_local.close()
t1=Thread(target=user_frnd_post,args=("thread 1",))
t2=Thread(target=photo_main,args=("thread 2",))
t3=Thread(target=forking,args=("thread 3",))
t3.start()
#print"there!!!!!!!!!!"
t1.start()
t2.start()
#print workers
#print dict_photo_url.keys()
#user_frnd_post(f)
#print"i am here"
t1.join()
#print "in b/w"
t2.join()
t3.join()
print time.asctime()
return
if __name__ == "__main__":
main()
| true
|
ebed81e100b25012e1c6808b886763d9fc26c525
|
Python
|
wmpg/WesternMeteorPyLib
|
/wmpl/Utils/OptimizePointingsFOV.py
|
UTF-8
| 15,219
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
""" Given the locations and pointings of camera, optimize their pointings so they have a maximum overlap
at the given range of heights.
"""
from __future__ import print_function, division, absolute_import
import copy
import datetime
import multiprocessing
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize
from wmpl.TrajSim.ShowerSim import initStationList, plotStationFovs
from wmpl.Utils.TrajConversions import datetime2JD
from wmpl.Utils.Math import estimateHullOverlapRatio
from wmpl.Utils.PyDomainParallelizer import domainParallelizer
# def _volumeRun(pointings, station_list, fixed_cameras, jd, max_height, min_height, niter=1):
# """ Given the ooptimization parameters, calculate the common volume. The functions returns a negative
# value for the volume because the minimum is being searched.
# """
# # Pointings index
# k = 0
# # Set the estimated FOV centre for moving cameras
# for i, stat in enumerate(station_list):
# if not fixed_cameras[i]:
# # Set the azimuth
# stat.azim_centre = pointings[k]
# k += 1
# # Set the elevation
# stat.elev_centre = pointings[k]
# k += 1
# volumes = []
# # Calculate the mean of several common volume runs
# for i in range(niter):
# vol = stationsCommonVolume(station_list, fixed_cameras, jd, max_height, min_height)
# volumes.append(vol)
# vol_mean = np.mean(volumes)
# print()
# print(np.degrees(pointings))
# print(vol_mean)
# return -vol_mean
def stationsCommonVolume(station_list, fixed_cameras, jd, max_height, min_height):
""" Calculates the common volume between the stations FOVs. """
common_volume = 0
# Go through all fixed stations
for i, stat_fixed in enumerate(station_list):
# Check if this is the fixed camera
if fixed_cameras[i]:
# Go through all non-fixed stations
for j, stat_moving in enumerate(station_list):
# Check if this is the moving camera
if not fixed_cameras[j]:
### CALCULATE ECI COORDINATES OF FIXED STATION ###
##########################################################################################
# Get ECI coordinates of the FOV at the given maximum height
fixed_top_eci_corners = stat_fixed.fovCornersToECI(jd, max_height)
# Get ECI coordinates of the FOV at the given minimum height
fixed_bottom_eci_corners = stat_fixed.fovCornersToECI(jd, min_height)
fixed_eci_corners = np.array(fixed_top_eci_corners + fixed_bottom_eci_corners)
##########################################################################################
### CALCULATE ECI COORDINATES OF MOVING STATION ###
##########################################################################################
# Get ECI coordinates of the FOV at the given maximum height
moving_top_eci_corners = stat_moving.fovCornersToECI(jd, max_height)
# Get ECI coordinates of the FOV at the given minimum height
moving_bottom_eci_corners = stat_moving.fovCornersToECI(jd, min_height)
moving_eci_corners = np.array(moving_top_eci_corners + moving_bottom_eci_corners)
##########################################################################################
# Calculate the common volume between the fixed and the moving camera
common_v = estimateHullOverlapRatio(fixed_eci_corners, moving_eci_corners, volume=True, niter=1000)
common_volume += common_v
return common_volume
# def optimizePointingsFOV(station_list, fixed_cameras, min_height, max_height):
# """ Optimize the pointings of the cameras by finding the pointings where the common volume of the sky at
# given height is maximized. It is assumed that one of the cameras are fixes, while all other can be
# moved.
# """
# # Check that at least one camera is fixed
# if not (True in fixed_cameras):
# print("""At least one camera must be fixed! Check the fixed_cameras variable, at least one entry
# there should be True.""")
# return station_list
# # Check that at least one camera can be moved
# if not (False in fixed_cameras):
# print("""At least one camera must be non-fixed! Check the fixed_cameras variable, at least one entry
# there should be False.""")
# return station_list
# # Use the current Julian date as the reference time (this is just used to convert the coordinated to ECI,
# # it has no operational importance whatsoever).
# jd = datetime2JD(datetime.datetime.now())
# # Construct the initial parameters list
# p0 = np.array([[stat.azim_centre, stat.elev_centre] for (i, stat) in enumerate(station_list) \
# if not fixed_cameras[i]]).ravel()
# # Set the bounds for every parameter (azimuth from 0 to 2pi, elevation from 0 to pi)
# bounds = [[0, np.pi] if i%2 else [0, 2*np.pi] for i in range(len(p0))]
# # Find the pointings with the largest common volume
# res = scipy.optimize.minimize(_volumeRun, p0, bounds=bounds, args=(station_list, fixed_cameras, jd, max_height, min_height))
# print(res)
# print(np.degrees(res.x))
def _computePointing(azim_centre_orig, elev_centre_orig, stat_moving, i, jd, \
d_range, elev_ind, d_elev, azim_ind, d_azim):
# Calculate the azimuth of centre
azim = (azim_centre_orig + d_azim)%(2*np.pi)
# Calculate the elevation of the centre
elev = elev_centre_orig + d_elev
if elev > np.pi/2:
elev = (np.pi/2 - elev)%(np.pi/2)
if elev < 0:
elev = np.abs(elev)
# Set the new centre to the station
stat_moving.azim_centre = azim
stat_moving.elev_centre = elev
# Assign the changed parameter to the moving camera
station_list[i] = stat_moving
# Estimate the volume only for this moving camera
fix_cam = [True]*len(station_list)
fix_cam[i] = False
# Estimate the intersection volume
vol = stationsCommonVolume(station_list, fix_cam, jd, max_height, min_height)
# Print status messages
print()
print('Azim: {:d}/{:d}, elev: {:d}/{:d}'.format(azim_ind + 1, \
len(d_range), elev_ind + 1, len(d_range)))
print('Azim {:.2f} elev {:.2f} vol {:e}'.format(np.degrees(azim), np.degrees(elev), vol))
return [azim, elev, vol]
def explorePointings(station_list, fixed_cameras, min_height, max_height, moving_ranges, steps, parallel_jobs = 1):
""" Given the list of cameras, a range of heights and a range of possible movements for the camera,
construct a map of volume overlaps for each camera position. The map will be saves as an image.
Arguments:
station_list: [list] A list of SimStation objects.
fixed_cameras: [list] A list of bools indiciating if the camera is fixed or it can be moved to
optimize the overlap.
min_height: [float] Minimum height of the FOV polyhedron (meters).
max_height: [float] Maximum height of the FOV polyhedron (meters).
moving_ranges: [list] A list of possible movement for each non-fixed camera (degrees).
steps: [int] Steps to take inside the moving range. The map will thus have a resolution of
range/steps.
parallel_jobs: [int] number of parallel jobs. Default: 1
Return:
None
"""
station_list = copy.deepcopy(station_list)
k = 0
# Use the current Julian date as the reference time (this is just used to convert the coordinated to ECI,
# it has no operational importance whatsoever).
jd = datetime2JD(datetime.datetime.now())
# Go through all moving cameras
for i, stat_moving in enumerate(station_list):
if not fixed_cameras[i]:
# Original centre pointing
azim_centre_orig = stat_moving.azim_centre
elev_centre_orig = stat_moving.elev_centre
# Get the range of movements for this camera
mv_range = np.radians(moving_ranges[k])
k += 1
volume_results = []
d_range = np.linspace(-mv_range/2.0, +mv_range/2, steps)
# Make a grid of movements in list of parameters that will be passed to each
# _computePointing parallel instance
pointings = []
for elev_ind, d_elev in enumerate(d_range):
for azim_ind, d_azim in enumerate(d_range):
pointings.append([azim_centre_orig, elev_centre_orig, stat_moving, i, jd, \
d_range, elev_ind, d_elev, azim_ind, d_azim])
print()
print("-> Pointings to compute: {:d}".format(len(pointings)))
# run parallel jobs to compute pointings
volume_results = domainParallelizer(pointings, _computePointing, cores=parallel_jobs)
print()
print("-> Pointings computed: {:d}".format(len(volume_results)))
volume_results = np.array(volume_results)
azims = volume_results[:, 0].reshape(len(d_range), len(d_range))
elevs = volume_results[:, 1].reshape(len(d_range), len(d_range))
# Select only the volumes
vols = volume_results[:, 2].reshape(len(d_range), len(d_range))
# Find the index of the largest volume
vol_max = np.unravel_index(vols.argmax(), vols.shape)
# Print the largest overlapping volume
print('MAX OVERLAP:')
print('Azim {:.2f} elev {:.2f} vol {:e}'.format(np.degrees(azims[vol_max]), np.degrees(elevs[vol_max]), vols[vol_max]))
plt.figure()
plt.imshow(vols/1e9, extent=np.degrees([azim_centre_orig + np.min(d_range), azim_centre_orig + np.max(d_range), elev_centre_orig + np.max(d_range), \
elev_centre_orig + np.min(d_range)]))
plt.gca().invert_yaxis()
plt.xlabel('Azimuth (deg)')
plt.ylabel('Elevation (deg)')
plt.colorbar(label='Common volume (km^3)')
plt.savefig('fov_map_' + stat_moving.station_id + '_ht_range_' + str(min_height) + '_' + str(max_height) + '.png', dpi=300)
#plt.show()
plt.clf()
plt.close()
if __name__ == "__main__":
### STATION PARAMETERS ###
##########################################################################################################
# Number of stations in total
n_stations = 2
# Geographical coordinates of stations (lat, lon, elev, station_id) in degrees and meters
stations_geo = [
[43.26420, -80.77209, 329.0, 'tavis'], # Tavis
[43.19279, -81.31565, 324.0, 'elgin'] # Elgin
]
### CAMO WIDE ###
#################
# Azimuths of centre of FOVs (degrees)
azim_fovs = [326.75, 344.34]
# Elevations of centre of FOVs (degrees)
elev_fovs = [45.62, 44.6]
# # Azimuths of centre of FOVs (degrees)
# azim_fovs = [338.823, 1.891]
# # Elevations of centre of FOVs (degrees)
# elev_fovs = [45.104, 46.344]
# Cameras FOV widths (degrees)
fov_widths = [33.9, 33.6]
# Cameras FOV heights (degrees)
fov_heights = [33.9, 33.6]
# If the camera FOV is fixed, it should have True at its index, and False if it can be moved to optimize
# the overlap
fixed_cameras = [True, False]
# Height range to optimize for (kilometers)
min_height = 70
max_height = 120
#################
# ### CAMO MIRRORS ###
# #################
# # Azimuths of centre of FOVs (degrees)
# azim_fovs = [335.29, 358.3]
# # Elevations of centre of FOVs (degrees)
# elev_fovs = [43.912, 46.643]
# # Cameras FOV widths (degrees)
# fov_widths = [40.0, 39.0]
# # Cameras FOV heights (degrees)
# fov_heights = [40.0, 39.0]
# # If the camera FOV is fixed, it should have True at its index, and False if it can be moved to optimize
# # the overlap
# fixed_cameras = [False, True]
# # Height range to optimize for (kilometers)
# min_height = 70
# max_height = 120
# #################
# ### EMCCD ###
# #################
# # Azimuths of centre of FOVs (degrees)
# azim_fovs = [315, 16.75]
# # Elevations of centre of FOVs (degrees)
# elev_fovs = [63.5, 65.5]
# # Cameras FOV widths (degrees)
# fov_widths = [14.5, 14.5]
# # Cameras FOV heights (degrees)
# fov_heights = [14.5, 14.5]
# # If the camera FOV is fixed, it should have True at its index, and False if it can be moved to optimize
# # the overlap
# fixed_cameras = [False, True]
# # Height range to optimize for (kilometers)
# min_height = 80
# max_height = 100
# #################
# ### ROMULAN ###
# #################
# # Azimuths of centre of FOVs (degrees)
# azim_fovs = [327.0, 4.0]
# # Elevations of centre of FOVs (degrees)
# elev_fovs = [56.5, 60.75]
# # Cameras FOV widths (degrees)
# fov_widths = [29.5, 29.5]
# # Cameras FOV heights (degrees)
# fov_heights = [23.77, 23.77]
# # If the camera FOV is fixed, it should have True at its index, and False if it can be moved to optimize
# # the overlap
# fixed_cameras = [False, True]
# # Height range to optimize for (kilometers)
# min_height = 70
# max_height = 120
# #################
# How much each non-fixed camera can be moved on each axis (degrees)
moving_ranges = [40]
# Steps of movement to explore
steps = 21
# How many parallel jobs computing pointings
parallel_jobs = multiprocessing.cpu_count() # use all available CPUs
##########################################################################################################
# Calculate heights in meters
min_height *= 1000
max_height *= 1000
# Init stations data to SimStation objects
station_list = initStationList(stations_geo, azim_fovs, elev_fovs, fov_widths, fov_heights)
# Show current FOV overlap
plotStationFovs(station_list, datetime2JD(datetime.datetime.now()), min_height, max_height)
# Do an assessment for the whole range of given rights
explorePointings(station_list, fixed_cameras, min_height, max_height, moving_ranges, steps, parallel_jobs)
# # Do the anaysis for ranges of heights
# # Height step in kilometers
# height_step = 5
# height_step *= 1000
# for ht_min in range(min_height, max_height - height_step + 1, height_step):
# print(ht_min, ht_min + height_step)
# # Make a map of pointings and common volumes for all given steps in the moving range
# explorePointings(station_list, fixed_cameras, ht_min, ht_min + height_step, moving_ranges, steps)
| true
|
58312fc909e172788f4cfcfeccb8177b8994dcd6
|
Python
|
MReneBrown/Python-Course
|
/Sorted_Function.py
|
UTF-8
| 516
| 3.484375
| 3
|
[] |
no_license
|
sales_prices = [
100,
83,
220,
40,
100,
400,
10,
1,
3
]
# sales_prices.sort()
# print(sales_prices)
# [1, 3, 10, 40, 83, 100, 100, 220, 400]
# sorted_list = sales_prices.sort()
# print(sorted_list)
# None
# sorted_list = sorted(sales_prices)
# print(sorted_list)
# print(sales_prices)
# [1, 3, 10, 40, 83, 100, 100, 220, 400]
# [100, 83, 220, 40, 100, 400, 10, 1, 3]
sorted_list = sorted(sales_prices, reverse=True)
print(sorted_list)
# [400, 220, 100, 100, 83, 40, 10, 3, 1]
| true
|
6f9a6d82248fb0522753330c9ce7c7bc9ceb3412
|
Python
|
rlawjdghks7/HistoDram
|
/utils/Logger.py
|
UTF-8
| 2,770
| 2.515625
| 3
|
[] |
no_license
|
""" Implements Logger class for tensorflow experiments. """
import tensorflow as tf
import os
class Logger(object):
def __init__(self, log_dir='/home/aamomeni/research/momena/tests/experiments', sess=None, summary_ops={}, var_list=[],
global_step=None, eval_ops={}, n_verbose=10):
self.session = sess
# folders
self.log_dir = log_dir
self.checkpoint_path, self.summary_path = self.create_directories(log_dir)
# file writers
self.writers = {
'train': tf.summary.FileWriter(os.path.join(self.summary_path,'train'),self.session.graph, flush_secs= 120),
'test': tf.summary.FileWriter(os.path.join(self.summary_path,'test')),
'val': tf.summary.FileWriter(os.path.join(self.summary_path,'val'))
}
# saver
self.global_step = global_step
if var_list == []:
self.saver = tf.train.Saver(keep_checkpoint_every_n_hours=1)
else:
self.saver = tf.train.Saver(var_list, keep_checkpoint_every_n_hours=1)
# summaries
self.summary_ops = summary_ops
self.eval_ops = eval_ops
self.merged_op = tf.summary.merge_all()
# step counter
self.step = 0
self.n_verbose = n_verbose
def log(self, writer_id, feed_dict):
""" Logs performance using either 'train', 'test' or 'val' writer"""
summaries = self.session.run(self.merged_op, feed_dict=feed_dict)
self.writers[writer_id].add_summary(summaries,self.step)
# print ('\n------ Step %s ------' % (self.step))
# for key in self.eval_ops.keys():
# val = self.session.run(self.eval_ops[key],feed_dict)
# print ('%s \t %s' %(key,val))
def create_directories(self, log_dir):
checkpoint_path = os.path.join(log_dir, 'checkpoints')
summary_path = os.path.join(log_dir, 'summaries')
if not os.path.exists(log_dir):
os.mkdir(log_dir)
os.mkdir(checkpoint_path)
os.mkdir(summary_path)
print ('\n\nLogging to <<%s>>.\n\n' % log_dir )
return checkpoint_path, summary_path
def save(self):
self.saver.save(self.session, os.path.join(self.checkpoint_path,'checkpoint'),
global_step=self.global_step)
def restore(self, checkpoint_dir):
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
print (ckpt)
self.saver.restore(self.session, ckpt.model_checkpoint_path)
| true
|
51b504e622b5f91b5997b887108e9e8e3972fddb
|
Python
|
yuliasimanenko/Python
|
/lab5/functions.py
|
UTF-8
| 1,109
| 3.0625
| 3
|
[] |
no_license
|
import requests
import random
from lxml import html
def get_names(count):
try:
with open('names/names.txt', 'r', encoding="utf-8") as file_open:
list_of_words = file_open.read().split("\n")
random.shuffle(list_of_words)
return list_of_words[:count]
except FileNotFoundError:
print("Can't open the file. File not found")
return -1
except PermissionError as e:
print(f"Permission denied. You have no rights. {e}")
return -1
except Exception as e:
print(f"Can't open the file with {e}.")
return -1
def choose_name(list_names):
return random.choice(list_names)
def get_img_link(name):
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0'}
url = f'https://yandex.ru/images/search?text={name}'
r = requests.get(url, headers=headers)
tree = html.fromstring(r.text.encode('utf-8'))
list_of_img = ['http:' + a for a in tree.xpath("//img[@class='serp-item__thumb justifier__thumb']/@src")]
return random.choice(list_of_img)
| true
|
f20813a0df43b91d2b8479b961e8ebbccbff5a59
|
Python
|
ZoltanMG/holbertonschool-higher_level_programming
|
/0x0A-python-inheritance/1-my_list.py
|
UTF-8
| 283
| 3.921875
| 4
|
[] |
no_license
|
#!/usr/bin/python3
""" class MyList that inherits from list """
class MyList(list):
""" My subclass """
def print_sorted(self):
""" Print sorted """
new_list = MyList()
for i in self:
new_list.append(i)
print(sorted(new_list))
| true
|
dcf5afae43e6ebfa72c15f3eee329d56952ceb4c
|
Python
|
nish235/PythonPrograms
|
/Nov09/02String1st2Last2.py
|
UTF-8
| 123
| 3.171875
| 3
|
[] |
no_license
|
def string(st1):
if len(st1) < 2:
return ''
return st1[0:2] + st1[-2:]
print(string('Nishant'))
| true
|
49d1f591fd2ca2620b019497bcce9b5fb3eaa42f
|
Python
|
ECMora/SoundLab
|
/sound_lab_core/Segmentation/SegmentManager.py
|
UTF-8
| 15,477
| 2.609375
| 3
|
[] |
no_license
|
from PyQt4.QtCore import QObject, pyqtSignal
import numpy as np
from duetto.audio_signals import AudioSignal
from sound_lab_core.Segmentation.Adapters import ManualDetectorAdapter
from sound_lab_core.Clasification.Adapters import ManualClassifierAdapter
from sound_lab_core.Elements.OneDimensionalElements.OneDimensionalElement import OneDimensionalElement
from utils.Utils import SegmentationThread, MeasurementThread
class SegmentManager(QObject):
"""
Manage the parameter measurement ad classification over a group of segments.
Provide a table interface for segments parameter measurement and classification.
Allow to save on db the measurements made on segments.
"""
# region SIGNALS
# signal raised when a parameter is measured on a segment
# raise the segment index and the list of visual items associated
segmentVisualItemAdded = pyqtSignal(int, list)
# signal raised when the segmentation has finished
segmentationFinished = pyqtSignal()
# signal raised when the measurement of parameters and classification has finished
measurementsFinished = pyqtSignal()
# signal raised while detection is been made. Raise the percent of detection progress.
detectionProgressChanged = pyqtSignal(int)
# endregion
def __init__(self):
QObject.__init__(self)
# the classifier object that would be used on segments classification
self._classifier = None
# the detector adapter object that would be used on segments detection
self._detector = None
# the parameter measurer list
self._measurerList = []
# the detected elements
self._elements = []
# the segmentation_thread to perform the segmentation with
self.segmentation_thread = SegmentationThread(parent=None)
self.segmentation_thread.finished.connect(self._get_elements)
# the thread to perform the measurements with
self.measurement_thread = MeasurementThread(segment_manager=self)
self.measurement_thread.finished.connect(lambda: self.measurementsFinished.emit())
# the signal in which would be detected the elements
self._signal = None
# stores the measured parameters of the detected elements
# has dimensions len(elements) * len(measurerList)
self.measuredParameters = np.array([])
# stores the classification data that are present in the table of parameters
# list of ClassificationData
self.classificationTableData = []
# region Properties
@property
def elements(self):
return self._elements
@elements.setter
def elements(self, elements_list):
"""
:param elements_list: Accepts a list of One dimensional elements or a list of tuples
:return:
"""
self._elements = elements_list
self.recompute_element_table()
@property
def parameters(self):
return self._measurerList
@parameters.setter
def parameters(self, new_parameter_measurers):
self._measurerList = new_parameter_measurers
self.recompute_element_table()
self.measure_parameters()
self.measurementsFinished.emit()
def recompute_element_table(self):
"""
Recompute the shape of the elements parameter table
when a change is made on the amount of elements or the amount of
measured parameters
:return:
"""
# clear the parameters
rows, cols = len(self.elements), len(self.parameters)
self.measuredParameters = np.zeros(rows * cols).reshape((rows, cols))
self.classificationTableData = [None for _ in self._elements]
self.measurementsFinished.emit()
@property
def classificationColumnNames(self):
"""
The names of the columns of classification data
:return:
"""
return [self.tr(u"Family"), self.tr(u"Genera"), self.tr(u"Specie")]
@property
def parameterColumnNames(self):
"""
The names of the columns of parameters.
:return:
"""
return [x.getName() for x in self.parameters]
@property
def columnNames(self):
return self.parameterColumnNames + self.classificationColumnNames
@property
def detector_adapter(self):
return self._detector if self._detector is not None else ManualDetectorAdapter()
@detector_adapter.setter
def detector_adapter(self, value):
self._detector = value
@property
def signal(self):
return self._signal
@signal.setter
def signal(self, new_signal):
"""
Modify and update the internal variables that uses the signal.
:param new_signal: the new AudioSignal
:raise Exception: If signal is not of type AudioSignal
"""
if new_signal is None or not isinstance(new_signal, AudioSignal):
raise Exception("Invalid assignation value. Must be of type AudioSignal")
self._signal = new_signal
@property
def classifier_adapter(self):
return self._classifier if self._classifier is not None else ManualClassifierAdapter()
@classifier_adapter.setter
def classifier_adapter(self, classifier):
self._classifier = classifier
@property
def rowCount(self):
return len(self.elements)
@property
def columnCount(self):
return len(self.parameters) + len(self.classificationColumnNames)
# endregion
# region Classification
def get_segment_classification(self, segment_index):
"""
Returns the classification value of the segment at index segment_index
:param segment_index: the index of the segment to ask for classification value
:return:
"""
if not 0 <= segment_index < len(self.elements):
raise Exception("Index out of range")
return self.classificationTableData[segment_index]
def set_manual_elements_classification(self, indexes_list, classification):
"""
Set the elements classification manually.
:param indexes_list: the indexes of classified elements
:param classification: the value for classification Data.
the values are applied to all the elements that have indexes in indexes_list.
:return:
"""
indexes_list = [index for index in indexes_list if 0 <= index < self.rowCount]
for i in indexes_list:
self.update_elements_visual_items(self.classifier_adapter, i, classification)
self.classificationTableData[i] = classification
self.measurementsFinished.emit()
def classify_elements(self):
"""
Execute the automatic classification for all the detected
elements with the current selected classifier.
:return:
"""
# get the classification parameters and classifier
classifier = self.classifier_adapter.get_instance()
classifier.parameters = self.parameters
# classify each element
for i in xrange(len(self.elements)):
# parameter adapter, value
parameter_vector = [self.measuredParameters[i, j] for j, x in enumerate(self.parameters)]
self._classify_element(element_index=i, classifier=classifier, parameter_vector=parameter_vector)
self.measurementsFinished.emit()
def _classify_element(self, element_index, classifier=None, parameter_vector=None):
"""
Helper method that classify a single element.
:param element_index: The index of the element to classify
:param classifier: the classifier instance if any (If None the classifier would be computed)
:param parameter_vector: the vector of parameters to supply to the classifier
(If None the vector would be computed)
:return:
"""
classifier = classifier if classifier is not None else self.classifier_adapter.get_instance()
parameter_vector = parameter_vector if parameter_vector is not None else \
[self.measuredParameters[element_index, j] for j in xrange(len(self.parameters))]
classification_value = classifier.classify(self.elements[element_index], parameter_vector)
self.classificationTableData[element_index] = classification_value
# update visualization
self.update_elements_visual_items(self.classifier_adapter, element_index, classification_value)
def update_elements_visual_items(self, parameter, element_index, value):
"""
Method that raises the signal segmentVisualItemAdded
with the according visual items founded for an specific
detected element.
:return:
"""
visual_items = parameter.get_visual_items()
if not visual_items:
return
# copy the visual items list to add every item on each detected element
copied_visual_items = [x.clone() for x in visual_items]
for item in copied_visual_items:
item.set_data(self.signal, parameter, self.elements[element_index], value)
self.segmentVisualItemAdded.emit(element_index, copied_visual_items)
# endregion
# region Detection
def delete_elements(self, start_index, end_index):
"""
Removes elements from the detected
:param start_index: start index of removed elements
:param end_index: end index of removed elements
:return:
"""
self.measuredParameters = np.concatenate((self.measuredParameters[:start_index],
self.measuredParameters[end_index + 1:]))
self.classificationTableData = self.classificationTableData[:start_index] +\
self.classificationTableData[end_index + 1:]
self._elements = self.elements[:start_index] + self.elements[end_index+1:]
self.measurementsFinished.emit()
def add_element(self, index, index_from, index_to):
"""
Add a new element at index supplied. Execute the parameter measurement over it
:type index_from: the start index of the new element in signal data values
:type index_to: the end index of the new element in signal data values
:param element: the element to add
:param index: the index to insert the element at
:return:
"""
# index could be equal to rowCount if insert after all previous elements
if not 0 <= index <= self.rowCount:
raise IndexError()
element = OneDimensionalElement(self.signal, index_from, index_to)
if self.rowCount == 0:
# the property would update the other variables
self.elements = [element]
else:
# add the element
self.classificationTableData.insert(index, None)
self._elements.insert(index, element)
self.measuredParameters = np.concatenate((self.measuredParameters[:index],
np.array([np.zeros(len(self.parameters))]),
self.measuredParameters[index:]))
# measure parameters
self._measure(element, index, raise_visual_items=True)
self._classify_element(index)
self.measurementsFinished.emit()
def detect_elements(self):
"""
Detect elements in the signal using the detector
"""
if self.segmentation_thread.isRunning():
return
detector = self.detector_adapter.get_instance(self.signal)
detector.detectionProgressChanged.connect(lambda x: self.detectionProgressChanged.emit(x))
self.segmentation_thread.detector = detector
self.segmentation_thread.start()
def _get_elements(self):
"""
update the elements when the segmentation thread finish
:return:
"""
self.elements = self.segmentation_thread.detector.elements
self.segmentationFinished.emit()
# endregion
# region Measurements
def measure_parameters_and_classify(self):
"""
Measure the parameters over the detected elements and
performs the classification of them
:return:
"""
if not self.measurement_thread.isRunning():
self.measurement_thread.start()
def measure_parameters(self):
"""
:param tableParameterOscilogram:
:param paramsTomeasure:
:param elements:
:return:
"""
if len(self.parameters) == 0:
return
for i in xrange(self.rowCount):
self._measure(self.elements[i], i, raise_visual_items=True)
def _measure(self, element, index, measure_methods=None, raise_visual_items=False):
"""
Measure the list of parameters over the element supplied
:param update_visual_items_and_db: True if the visual items of measurements and the db session
would be updated False otherwise. Efficiency improvement
:param element: The element to measure
:param index: The element index on the table of parameters
:return:
"""
if not 0 <= index < self.rowCount:
raise IndexError()
if measure_methods is None:
measure_methods = self.parameters
for j, parameter in enumerate(self.parameters):
try:
# measure param
self.measuredParameters[index, j] = measure_methods[j].measure(element)
# raise the parameter visual item if any
if raise_visual_items:
self.update_elements_visual_items(parameter, index, self.measuredParameters[index, j])
except Exception as e:
# if some error is raised do not set value
print("Error measure params " + e.message)
# endregion
def stop_processing(self):
"""
Stop the current on going processing if any.
The process could be a Segmentation or a Measurement
:return:
"""
if self.segmentation_thread.isRunning():
self.segmentation_thread.terminate()
if self.measurement_thread.isRunning():
self.measurement_thread.terminate()
def save_data_on_db(self):
"""
Save on db the data of detected segments, measurements and classification made.
:return:
"""
pass
def __getitem__(self, item):
if not isinstance(item, tuple) or not len(item) == 2:
raise Exception("Invalid Argument exception")
row, col = item
if row < 0 or row >= self.rowCount:
raise IndexError()
if col < len(self.parameters):
return self.measuredParameters[row, col]
# the order of the classification taxonomy may change
classification = self.classificationTableData[row]
if classification is None:
return self.tr(u"No Identified")
index = col - len(self.parameters)
if index == 0 and classification.family is not None:
return classification.family
elif index == 1 and classification.genus is not None:
return classification.genus
# specie
return self.tr(u"No Identified") if classification.specie is None else classification.specie
| true
|
45ca50a9e95e0b5580d163057a430ceedcf59eb0
|
Python
|
netogalindo/blog
|
/tests/unit/post_test.py
|
UTF-8
| 1,326
| 3.578125
| 4
|
[] |
no_license
|
from unittest import TestCase
from post import Post
class PostTest(TestCase):
def test_create_post(self):
p = Post("Test Title", "Test content.")
self.assertEqual("Test Title", p.title, "The title is not valid for the test") # self is TestCase
# This is checking if the defined title is equal to p.title
# self.assertEqual("Testx", p.title) This would make the test fail
self.assertEqual("Test content.", p.content, "The content is not valid for the test")
# The same, but with the content
# This seems like an idiotic test, but, if you would change the __init__ method, this test would fail, and this
# would remind you that you have to check other parts of your software as well, to make sure nothing is broken
# Something else that would result in an "Test failed" would be if a programmer would have added an initial
# value to one of the parameters
self.assertIsInstance(p.title, str, "Test Title should be a string")
self.assertIsInstance(p.content, str, "Test Content should be a string")
def test_json(self):
p = Post("Test Title", "Test content.")
expected = {"title": "Test Title", "content": "Test content."}
self.assertDictEqual(expected, p.json(), "The json was not created")
| true
|
30951416f39c023c0911ea0fb1ef2df1c804a652
|
Python
|
mkristien/trading_playground
|
/src/predictor/exponential_mean.py
|
UTF-8
| 1,534
| 3.359375
| 3
|
[] |
no_license
|
from predictor.model_interface import AbstractPredictor
class ExponentialMeanParametric(AbstractPredictor):
"""
Compute running exponential mean as:
mean = mean * L + new_value * (1-L)
for alpha parameter L being 0 < L < 0
The alpha parameter determines how fast should the mean adapt to new prices.
High values of Alpha is slower to respond to changes
"""
def __init__(self, price_history, alpha):
super().__init__(price_history)
self.alpha = alpha
self.mean = 0
# set initial mean as arithmetic average
sum = 0
for price in price_history:
sum += price
self.mean = sum / len(price_history)
def __str__(self):
return "ExpMean alpha={}".format(self.alpha)
def feed_price(self, price):
self.price_history.append(price)
self.mean = self.mean * self.alpha + price * (1 - self.alpha)
def predicted_price(self):
"""
Assume stock price will regress to the running exponential mean
"""
return self.mean
class ExponentialMean9(ExponentialMeanParametric):
def __init__(self, price_history):
super().__init__(price_history, 0.9)
class ExponentialMean8(ExponentialMeanParametric):
def __init__(self, price_history):
super().__init__(price_history, 0.8)
class ExponentialMean99(ExponentialMeanParametric):
def __init__(self, price_history):
super().__init__(price_history, 0.99)
| true
|
f3d23c09228bc453b79af482ee1e86cbe0642f5d
|
Python
|
DorDob/rest-api-training-v
|
/test_greeting.py
|
UTF-8
| 634
| 3.3125
| 3
|
[] |
no_license
|
import requests
import pytest
NAME = "Doroa" # stała
def test_greeting():
r = requests.get(pytest.HOST +'/greeting')
assert r.status_code == 200 # sprawdza czy otrzymamy rzeczywiście 200,
assert r.json()['content'] == 'Hello, Stranger!'
def test_greeting_by_name():
r = requests.get(
pytest.HOST + '/greeting',
params = {'name' : NAME} # wysylam query params, ktore chce dodac do URL {klucz : wartosc}
)
assert r.status_code == 200
assert r.json()['content'] == f"Hello, {NAME}!" #zeby operowac zawsze na obiekcie
| true
|
13248f1c6b4b232f1520b5e7a07b47bd9ec24a1c
|
Python
|
sebastienliu/leetcode_puzzles
|
/217_contain_duplicates.py
|
UTF-8
| 210
| 2.75
| 3
|
[] |
no_license
|
class Solution:
def containsDuplicate(self, nums):
d_nums = set()
for val in nums:
if val in d_nums:
return True
d_nums.add(val)
return False
| true
|
97548bd46fcde818fc78affa77171d9fdd86218b
|
Python
|
J-Gottschalk-NZ/FRC_Panda
|
/06_round_up.py
|
UTF-8
| 350
| 4.09375
| 4
|
[] |
no_license
|
import math
# rounding function
def round_up(amount, round_to):
# rounds amount UP to the specified amount (round_to)
return int(round_to * round(math.ceil(amount) / round_to))
# Main Routine starts here
to_round = [2.75, 2.25, 2]
for item in to_round:
rounded = round_up(item, 1)
print("${:.2f} --> ${:.2f}".format(item,rounded))
| true
|
218e80dd74fbd67c992a9245fc936116349cf64d
|
Python
|
TheWewokaChronicle/downstream-node
|
/tests/test_utils.py
|
UTF-8
| 5,160
| 3
| 3
|
[
"MIT"
] |
permissive
|
import unittest
from downstream_node import utils
class TestDistribution(unittest.TestCase):
def setUp(self):
self.list0 = [10, 10, 20, 20, 30, 30]
self.list1 = [20, 30]
self.list2 = [10, 10, 20, 30]
self.list3 = [10, 10, 20, 20, 20, 30, 30, 30]
self.dist0 = utils.Distribution(from_list=self.list0)
self.dist1 = utils.Distribution(from_list=self.list1)
def tearDown(self):
pass
def test_subtract(self):
dist2 = self.dist0.subtract(self.dist1)
self.assertEqual(sorted(dist2.get_list()), sorted(self.list2))
def test_subtract_negative(self):
dict4 = self.dist1.subtract(self.dist0)
self.assertEqual(dict4.counts[10], -2)
self.assertEqual(dict4.counts[20], -1)
self.assertEqual(dict4.counts[30], -1)
def test_add(self):
dist3 = self.dist0.add(self.dist1)
self.assertEqual(sorted(dist3.get_list()), sorted(self.list3))
def test_repr(self):
self.assertEqual(str(self.dist0), '{10: 2, 20: 2, 30: 2}')
def test_total(self):
self.assertEqual(sum(self.list0), self.dist0.get_total())
class TestMonopolyDistribution(unittest.TestCase):
def setUp(self):
self.distribution_base2_10000 = utils.MonopolyDistribution(
1024, 8192, 10000, 2)
self.distribution_base2_10000000000 = utils.MonopolyDistribution(
1024, 1000000000, 10000000000, 2)
self.distribution_base10_100000 = utils.MonopolyDistribution(
1000, 10000, 100000, 10)
self.distribution_base10_100000000000 = utils.MonopolyDistribution(
1000, 10000000000, 100000000000, 10)
def tearDown(self):
pass
def distribution_generic_test(self, distribution_object=None):
if (distribution_object is None):
return
dist = distribution_object.get_list()
possible_chunks = distribution_object.get_possible_chunks()
all_so_far_in = True
for possibility in possible_chunks:
if (possibility not in possible_chunks):
all_so_far_in = False
else:
if (all_so_far_in is False):
# we are missing a chunk size
self.fail('A chunk size was skipped in the distribution')
self.assertLessEqual(sum(dist), distribution_object.total)
self.assertGreater(
sum(dist), distribution_object.total - distribution_object.min)
def test_monopoly_distribution(self):
# what are we actually trying to accomplish here?
# we want as wide a range of chunk sizes as possible
# we also don't want too many of one chunk size
# ok so what if we have a list of desired chunk sizes
# just the powers of 10 are simple. that will be our base
# with a minimum, obviously.
# so, total: 25,000
# min: 100
# the posibilitites will then be all the powers of 10
# between 100 and 25,000, or
# [100, 1000, 10000]
# the result will be
# [10000, 1000, 100, 10000, 1000, 100, 1000,
# 100, 1000, 100, 100, 100, 100, 100, 100, 100]
# there should not be any skipped values, so it should probably start
# iterating from the
# lowest value and then go up to increase the number of chunks
# the total should be less than the specified total by less than the
# smallest chunk
self.distribution_generic_test(self.distribution_base2_10000)
self.distribution_generic_test(self.distribution_base10_100000)
self.distribution_generic_test(self.distribution_base10_100000000000)
def test_get_possible_chunks(self):
class TestPossibleChunkVector(object):
def __init__(self, distribution, result):
self.distribution = distribution
self.result = result
vectors = [
TestPossibleChunkVector(
self.distribution_base2_10000, [
1024, 2048, 4096, 8192]), TestPossibleChunkVector(
self.distribution_base10_100000, [
1000, 10000]), TestPossibleChunkVector(
self.distribution_base10_100000000000, [
1000, 10000, 100000, 1000000, 10000000,
100000000, 1000000000, 10000000000])]
for v in vectors:
self.assertEqual(v.distribution.get_possible_chunks(), v.result)
def test_get_missing(self):
fresh = self.distribution_base2_10000.get_list()
left = fresh.pop(0)
right = fresh.pop()
missing = self.distribution_base2_10000.get_missing(fresh).get_list()
self.assertIn(left, missing)
self.assertIn(right, missing)
self.assertEqual(len(missing), 2)
fresh = self.distribution_base10_100000.get_list()
left = fresh.pop(0)
right = fresh.pop()
missing = self.distribution_base10_100000.get_missing(fresh).get_list()
self.assertIn(left, missing)
self.assertIn(right, missing)
self.assertEqual(len(missing), 2)
| true
|
63276a2186d7195fd89f64846d25bc5b435260ce
|
Python
|
k-harada/AtCoder
|
/ABC/ABC101-150/ABC139/A.py
|
UTF-8
| 226
| 3.046875
| 3
|
[] |
no_license
|
def main():
s = list(input())
t = list(input())
assert len(s) == len(t)
res = 0
for i, ss in enumerate(s):
if ss == t[i]:
res += 1
print(res)
if __name__ == "__main__":
main()
| true
|
71bbe30c6f5c82448da4e0da711819f30a3df535
|
Python
|
GanatheVyshnavi/Vyshnavi
|
/7th.py
|
UTF-8
| 74
| 3.53125
| 4
|
[] |
no_license
|
s=float(input('enter value of s'))
a=4*s
print("perimeter of square is",a)
| true
|
92f6026283faf20d4496a71234b3ef1ff81a2ab0
|
Python
|
sebastianedholme/DevOps18
|
/programmering_systemering/egna_ovningar/rate_calc.py
|
UTF-8
| 202
| 3.609375
| 4
|
[] |
no_license
|
print('Enter hours: ', end="")
hours = int(input())
print('Enter hours: ', end="")
rate = int(input())
#print(type(hours))
#print(type(rate))
pay = hours * rate
print("\nYour pay is: {}".format(pay))
| true
|
5375e70b4946efec1214f9e7d33c3c0e17f0a61a
|
Python
|
pengfeiyan/fluent_python
|
/three/3.5.py
|
UTF-8
| 976
| 3.953125
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# @Author : yanpengfei
# @time : 2018/11/20 下午5:03
# @File : 3.5.py
# 字典的变种
from collections import OrderedDict
'''
python3.6以后dict都是保持顺序的,但使用OrderedDict能返回最开始或者最后添加的元素。dict默认没有这两个方法
OrderedDict中popitem()默认删除并返回最后一个添加的元素,popitem(last=False)默认删除并返回第一个添加的元素
'''
d = OrderedDict(dict(zip(['one','two','three'],[1,2,3])))
print(d)
print(d.popitem())
print(d)
from collections import Counter
l = [1,2,4,6,2,5,8,4,6,9,2,4,8]
ct = Counter(l)
print(ct)
'''
Counter的init
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
| true
|
3e718163297a3f2a2b072943fd0a4e58a838dedc
|
Python
|
vonzhou/Core-Python-Programming
|
/chapter8/For.py
|
UTF-8
| 295
| 3.515625
| 4
|
[] |
no_license
|
#P195
for eachLetter in 'vonzhou':
print 'current letter:', eachLetter
nameList = ['vonzhou', 'luyna', 'Yet', 'chown']
for name in nameList:
print name, 'is right'
for nameIndex in range(len(nameList)):
print 'By index:', nameList[nameIndex]
print len(nameList)
print range(len(nameList))
| true
|
4064169bb318d893e445ccd78dfe47d4d1985ad1
|
Python
|
Anand191/Thesis-Results
|
/Scripts/ch_plot.py
|
UTF-8
| 799
| 2.75
| 3
|
[] |
no_license
|
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import numpy as np
width = [8.5, 6, 4, 2.2]
height = [6.0, 4.5, 3.0, 1.5]
xy = (0,0)
ch = ['recursively enumerable', 'context-sensitive', 'context-free', 'regular']
h_offset = [0.35,0.35,0.35,0.5]
w_offset = [-1.72, -1.4, -1.01, -0.45]
ells = [Ellipse(xy, w, height[i]) for i, w in enumerate(width)]
#a = plt.subplot(111, aspect='equal')
fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'})
for j, e in enumerate(ells):
e.set_clip_box(ax.bbox)
e.set_alpha(0.4)
e.set_facecolor(np.random.rand(3))
ax.add_artist(e)
ax.text(w_offset[j], (height[j]/2.17)-h_offset[j], ch[j])
plt.xlim(-5, 5)
plt.ylim(-4, 4)
plt.axis('off')
plt.savefig('../Plots/chomsky_h.eps', format='eps', bbox_inches='tight')
plt.show()
| true
|
d4845241475eacc80f47ef6794528d184bf26db4
|
Python
|
SleepwalkerCh/Leetcode-
|
/45_2.py
|
UTF-8
| 1,483
| 3.375
| 3
|
[] |
no_license
|
#45. Jump Game II
#简单的贪心递归,将之前那一版的递归改成了递推就过了
#好像是BFS来着,其实改进的地方可以直接把前面数据初始化给删了
class Solution:
def jump(self, nums: List[int]) -> int:
'''
record=[]
if len(nums)==1:
return 0
for i in range(len(nums)):
if nums[i]+i>len(nums)-1:
target=len(nums)-1
else:
target=nums[i]+i
record.append(target)
'''
i=0
step=1
while i<len(nums):
tmax,imax=-1,-1
if record[i]==len(nums)-1:
break
if nums[i]+i>len(nums)-1:
target=len(nums)-1
else:
target=nums[i]+i
for j in range(i+1,target+1):
if nums[j]+j>len(nums)-1:
target1=len(nums)-1
else:
target1=nums[i]+i
if tmax<target1:
tmax=target1
imax=j
i=imax
step+=1
return step
#Runtime: 72 ms, faster than 13.54% of Python3 online submissions for Jump Game II.
#Memory Usage: 15.5 MB, less than 7.02% of Python3 online submissions for Jump Game II.
#Ver 2.0
#Runtime: 60 ms, faster than 34.72% of Python3 online submissions for Jump Game II.
#Memory Usage: 14.7 MB, less than 20.92% of Python3 online submissions for Jump Game II.
| true
|
8ca8e675bfce7c69ece866e888c06ddde2758139
|
Python
|
nilanjanchakraborty87/py-learn
|
/introduction/functions.py
|
UTF-8
| 583
| 3.796875
| 4
|
[] |
no_license
|
def greet(*args, **kwargs):
"""
docstring
this function takes a message and greets you
"""
print("Hello ", args[0], args[1], args[2])
print("Kwargs ", kwargs["message"])
# greet("Cts", "Macy", "IBM", message = "Nilanjan")
print(greet.__doc__)
# lambda
# multipler_3 =
"""
def random11111(x):
print(x)
func = random11111
"""
for i in range(1, 11):
#print((lambda x: 3 * x)(i))
pass
# dunder __repr__/__str__
# object
func = lambda msg = "World": print("Hello, ", msg)
func()
func("Naga")
func = lambda : print("Example lambda")
func()
| true
|
010ffe7aec1fc164d3541d4d927feceffdc06b40
|
Python
|
bjellesma/chatchord
|
/models/models.py
|
UTF-8
| 1,825
| 2.65625
| 3
|
[] |
no_license
|
# NOTE importing the mongobase alone is enough to initiate the connection
import models.mongobase
from mongoengine import Document
from mongoengine.fields import (
StringField, ListField, BooleanField
)
from mongoengine.errors import DoesNotExist #Error Handling
from flask_login import UserMixin # user logins
from werkzeug.security import generate_password_hash, check_password_hash #password hashing
from app import login
import pprint
class UsersModel(UserMixin, Document):
# Note: everything is camelcased in graphql so first_name becomes firstName no matter what it is in the database
meta = {'collection': 'users'}
username = StringField(required=True)
password_hash = StringField(required=True)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def find_user_by_username(self, username):
try:
return UsersModel.objects.get(username=username)
except DoesNotExist as err:
print('No user exists with that name')
return None
# def register(self, username, password, confirmPassword):
# Execute the graphql mutation to register the user
# mutation {
# createUser(username: "some_dude", password: "123"){
# user{
# id
# username
# }
# }
# }
@login.user_loader
def load_user(id):
return UsersModel.objects.get(id=id)
class RoomsModel(Document):
meta = {'collection': "rooms"}
name = StringField()
requiresAuth = BooleanField()
class BotsModel(Document):
meta = {'collection': "bots"}
name = StringField()
phrases = ListField(StringField())
| true
|
a6c4fb1f05dfb53112d7a71084fe91271b684cad
|
Python
|
theRemix/linkchecker
|
/linkchecker.py
|
UTF-8
| 1,368
| 2.96875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import sys
import requests
from bs4 import BeautifulSoup
from texttable import Texttable
domain = sys.argv[-1]
omit = {'/','#'}
links = {}
colors = {
'reset': '\033[0m',
'red': '\033[31m'
}
def color(text, c):
return "{c}{text}{r}".format(c=colors[c], text=text, r=colors['reset'])
print("checking links in {}".format(domain))
def recursivelyCheckLinks(link):
res = requests.get(link)
links[link] = res.status_code
if res.status_code != 200:
return
soup = BeautifulSoup(res.text, "html.parser")
for link in soup.find_all("a"):
l = link.get("href")
if l != None:
l = l.split('#')[0]
checkLink = domain + l
if checkLink not in links and checkLink not in omit and l.startswith('/'):
recursivelyCheckLinks(checkLink)
recursivelyCheckLinks(domain)
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t','t',])
table.set_cols_align(["l", "r"])
for link in filter(lambda l: l != '', links):
label = link.replace(domain,'')
status = links[link]
statusLabel = str(links[link])
if status >= 400:
label += " ❌"
statusLabel = color(statusLabel, 'red')
table.add_row([
label,
statusLabel
])
print(table.draw())
print("checked {} links".format(len(links)))
| true
|
1611dac3d06f949c48d55002dd7c66fa06f45eba
|
Python
|
gfbenatto/Simple-tests-in-Python
|
/files2.py
|
UTF-8
| 99
| 3.265625
| 3
|
[] |
no_license
|
file = open('numeros.txt', 'r')
for line in file.readlines():
print(line.rstrip())
file.close()
| true
|
fd5814bbc5bff9db236736b2ca2cc02b36591dc4
|
Python
|
lucocozz/Labyrinthe_python
|
/ft/game.py
|
UTF-8
| 1,407
| 3.40625
| 3
|
[] |
no_license
|
from ft.display import *
from ft.file import *
from classe.map import *
def roboc(map):
"""Lance le jeu"""
end = False
while end is False:
clear()
print(map)
entry = get_input()
end = do_entry(map, entry)
save_game(map)
def get_input():
"""recupere l'input"""
good = False
while good is False:
try:
entry = input("\n> ").strip().lower().
assert good_format(entry) is True
except AssertionError:
print_error("Erreur de saisie.")
else:
good = True
return entry
def good_format(entry):
"""Verifie si l'input correspond a un bon format"""
if len(entry) < 1 or len(entry) > 2:
return False
if entry[0] not in "qneso":
return False
if len(entry) == 2 and entry[1].isdigit() == False:
return False
if len(entry) == 2 and entry[0] == 'q':
return False
return True
def do_entry(map, entry):
"""Execute les actions en fonction de l'input"""
if entry == 'q':
return True
if len(entry) == 1:
entry += '1'
if entry[0] == 'n':
end = map.move_up(int(entry[1]))
if entry[0] == 's':
end = map.move_down(int(entry[1]))
if entry[0] == 'e':
end = map.move_left(int(entry[1]))
if entry[0] == 'o':
end = map.move_right(int(entry[1]))
return end
| true
|
739a51a645f0e63d521480bf23f40e082a6f9044
|
Python
|
redX1/vaanah_back
|
/vaana_app/orders/utils.py
|
UTF-8
| 400
| 2.734375
| 3
|
[] |
no_license
|
import random
import string
from .models import Order
class Util:
def getOrderNumber():
letters = string.ascii_uppercase
digits = string.digits
strn = 'OR' + str(len(Order.objects.all()) + 1)
rand_nb = ''.join(random.choice(digits) for i in range(2))
rand_str = ''.join(random.choice(letters) for i in range(3))
return strn + rand_nb + rand_str
| true
|
94fb12248c44e70d7df664147cc35343f9577387
|
Python
|
throwawwwwway/haptiq
|
/app/view.py
|
UTF-8
| 9,076
| 2.6875
| 3
|
[] |
no_license
|
import tkinter as tk
import app.logconfig as lc
from tkinter import Canvas, Menu
from app.device import Point
from functools import partial
from app.network import Network
def motion(event, device):
x, y = event.x, event.y
device.position = Point(x, y)
# def update_pressed_key(event, listener):
# listener.update_pressed_key(repr(event.char))
# print("pressed {}".format())
def focus(event, canvas):
canvas.focus_set()
def color_from_level(level):
'''return a hex black depth color depending on the level'''
intensity = 255 - int(level * 2.55) % 256
return '#%02x%02x%02x' % (intensity, intensity, intensity)
class View(object):
def __init__(self, device, **opts):
self.root = tk.Tk()
self.networks = opts['networks'] if 'networks' in opts else []
self.interacts = opts['interacts'] if 'interacts' in opts else []
self._network = None
self._interaction = None
menubar = Menu(self.root)
self.root.config(menu=menubar)
if self.networks != []:
newtwork_menu = Menu(menubar)
menubar.add_cascade(label="Networks", menu=newtwork_menu)
for key, network in sorted(self.networks.items()):
partial_command = partial(self.load_network, key)
newtwork_menu.add_command(
label=key, command=partial_command)
if self.interacts != []:
interaction_menu = Menu(menubar)
menubar.add_cascade(label="Interactions", menu=interaction_menu)
for key, interaction in sorted(self.interacts.items()):
interaction.device = device
interaction.view = self
partial_command = partial(
self.load_interaction, key)
interaction_menu.add_command(
label=key, command=partial_command)
if 'mouse_tracking' in opts:
mouse_tracking = opts['mouse_tracking']
else:
mouse_tracking = False
self.scene = Scene(self.root, device, mouse_tracking)
if 'default_network' in opts:
self.load_network(opts['default_network'])
if 'default_interact' in opts:
self.load_interaction(opts['default_interact'])
@property
def network(self):
return self._network
@network.setter
def network(self, value):
self._network = value
self.scene.draw_network(value)
lc.log.info(str(value))
def load_network(self, key):
lc.log.info("network: {}".format(key))
if (key[:3] == 'gen'):
print(key[4])
self.network = Network.generate(int(key[4]))
else:
self.network = self.networks[key]
@property
def interaction(self):
return self._interaction
@interaction.setter
def interaction(self, value):
self._interaction = value
def load_interaction(self, key):
lc.log.info("interaction: {}".format(key))
self.interaction = self.interacts[key]
def activate_key_listening(self, listener):
self.scene.activate_key_listening(listener)
def desactivate_key_listening(self):
self.scene.desactivate_key_listening()
def on_exit(self):
self.root.destroy()
def loop(self):
self.root.mainloop()
class Scene(object):
def __init__(self, master, device, mouse_tracking=False):
self.master = master
self.device = device
self.frame = tk.Frame(master)
self.feedbackButton = tk.Button(
self.frame,
text="Feedback window",
width=25,
command=self.open_feedback
)
self.feedbackButton.pack()
self.explore_canvas = Canvas(master, width=500, height=500)
self.explore_canvas.pack()
if mouse_tracking:
self.explore_canvas.bind(
'<Motion>', lambda event, device=device: motion(event, device))
self.enable_position_feedback()
self.network_drawings = []
self.frame.pack()
self.app = None
self.update()
def activate_key_listening(self, listener):
# Will focus frame, needed for key binding
self.explore_canvas.bind(
"<Button-1>",
lambda event,
frame=self.explore_canvas: focus(event, frame)
)
self.explore_canvas.bind(
"<Key>",
lambda event: listener.update_pressed_key(str(event.char))
)
def desactivate_key_listening(self):
self.explore_canvas.unbind("<Button-1>")
self.explore_canvas.unbind("<Key>")
def enable_position_feedback(self):
self.device_cursor = self.explore_canvas.create_oval(
self.device.position.x - 2.5, self.device.position.y - 2.5,
self.device.position.x + 2.5, self.device.position.y + 2.5)
def draw_network(self, network):
self.explore_canvas.delete('all')
self.enable_position_feedback()
for node in network.nodes:
pos_x = node.x - 5
pos_y = node.y - 5
self.explore_canvas.create_oval(
pos_x, pos_y, pos_x + 10, pos_y + 10, fill="blue")
for link in network.links:
pt_a = link.first
pt_b = link.sec
self.explore_canvas.create_line(
pt_a.x, pt_a.y, pt_b.x, pt_b.y)
def update(self):
coords = self.explore_canvas.coords(self.device_cursor)
if len(coords) <= 3:
self.master.after(50, self.update)
return
center = ((coords[0] + coords[2]) / 2, (coords[1] + coords[3]) / 2)
self.explore_canvas.move(
self.device_cursor,
self.device.position.x - center[0],
self.device.position.y - center[1])
if self.app and not self.app.closed:
self.app.update()
self.master.after(50, self.update)
def open_feedback(self):
self.feedbackWindow = tk.Toplevel(self.master)
self.app = Feedback(self.feedbackWindow, self.device)
class Feedback(object):
def __init__(self, master, device):
self.master = master
self.device = device
self.frame = tk.Frame(master)
self.on_update = False
self.mapped_actuators = {}
self.quitButton = tk.Button(
self.frame, text='Quit', width=25, command=self.close_window)
self.quitButton.pack()
self.master.protocol("WM_DELETE_WINDOW", self.close_window)
self.canvas = Canvas(master, width=250, height=250)
self.canvas.pack()
self.set_mapping()
lc.log.debug("device actuators: {}".format(self.device.actuators))
self.frame.pack()
self.closed = False
def set_mapping(self):
cntr_x = 250 / 2
cntr_y = 250 / 2
straight = 90
wd = 8
self.north = self.canvas.create_line(
cntr_x, cntr_y - 8, cntr_x, cntr_y - straight, width=wd)
self.east = self.canvas.create_line(
cntr_x + 8, cntr_y, cntr_x + straight, cntr_y, width=wd)
self.south = self.canvas.create_line(
cntr_x, cntr_y + 8, cntr_x, cntr_y + straight, width=wd)
self.west = self.canvas.create_line(
cntr_x - 8, cntr_y, cntr_x - straight, cntr_y, width=wd)
if len(self.device.actuators) == 8:
diag = straight * 0.75
self.north_east = self.canvas.create_line(
cntr_x + 8, cntr_y - 8, cntr_x + diag, cntr_y - diag, width=wd)
self.south_east = self.canvas.create_line(
cntr_x + 8, cntr_y + 8, cntr_x + diag, cntr_y + diag, width=wd)
self.south_west = self.canvas.create_line(
cntr_x - 8, cntr_y + 8, cntr_x - diag, cntr_y + diag, width=wd)
self.north_west = self.canvas.create_line(
cntr_x - 8, cntr_y - 8, cntr_x - diag, cntr_y - diag, width=wd)
self.mapped_actuators = {
self.device.actuators[0]: self.east,
self.device.actuators[1]: self.north_east,
self.device.actuators[2]: self.north,
self.device.actuators[3]: self.north_west,
self.device.actuators[4]: self.west,
self.device.actuators[5]: self.south_west,
self.device.actuators[6]: self.south,
self.device.actuators[7]: self.south_east
}
else:
self.mapped_actuators = {
self.device.actuators[0]: self.east,
self.device.actuators[1]: self.north,
self.device.actuators[2]: self.west,
self.device.actuators[3]: self.south,
}
def update(self):
for actuator in self.mapped_actuators:
self.canvas.itemconfig(
self.mapped_actuators[actuator],
fill=color_from_level(actuator.level))
def close_window(self):
self.closed = True
self.master.destroy()
| true
|
416c95ee8abf504bd181f8e7ec8c8ecc3dbf66ec
|
Python
|
doguhanyeke/leetcode-sol
|
/Sort/k-closest-points-to-origin.py
|
UTF-8
| 437
| 3.234375
| 3
|
[] |
no_license
|
import math
from typing import List
import heapq
class Solution:
def kClosest(self, points: List[List[int]], K: int) -> List[List[int]]:
return sorted(points, key=lambda x: math.pow(x[0], 2) + math.pow(x[1], 2))[:K]
class Solution2:
def kClosest(self, points: List[List[int]], K: int) -> List[List[int]]:
heapq.heapify(points)
return heapq.nsmallest(K, points, key=lambda x: x[0] * x[0] + x[1] * x[1])
| true
|
7f66064302a48ef735480d00011ea63769bdde62
|
Python
|
IrekKarimov/11419
|
/Lab_04.py
|
UTF-8
| 914
| 4.5
| 4
|
[] |
no_license
|
# 4. Пользователь вводит две буквы.
# Определить, на каких местах алфавита они стоят, и сколько между ними находится букв.
print("====================================================")
print("Определение места буквы в английском алфавите")
print("====================================================")
s1 = input("Введите букву: ").upper()
s2 = input("Введите букву: ").upper()
n1 = ord(s1)
n2 = ord(s2)
if(n1 > 64 & n1 < 91):
m1 = n1 - 64
if(n2 > 64 & n2 < 91):
m2 = n2 - 64
print(f'Буква {s1} в алфавите под номером {m1}. ')
print(f'Буква {s2} в алфавите под номером {m2}. ')
print(f'Между ними {m2-m1-1} букв(ы)')
print("====================================================")
| true
|
56bb3a7aacad88dff2488c7825c24c00c9c7ab76
|
Python
|
gotutiyan/nlp100_knock
|
/source/chapter1/004.py
|
UTF-8
| 787
| 3.671875
| 4
|
[] |
no_license
|
#"Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can."という文を単語に分解し,
# 1, 5, 6, 7, 8, 9, 15, 16, 19番目の単語は先頭の1文字,それ以外の単語は先頭に2文字を取り出し,
# 取り出した文字列から単語の位置(先頭から何番目の単語か)への連想配列(辞書型もしくはマップ型)を作成せよ.
sentence = "Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can."
words = sentence.split()
index = (1, 5, 6, 7, 8, 9, 15, 16, 19)
dic = {}
for i,word in enumerate(words,1):
if i in index:
dic[word[0:1]] = i
else:
dic[word[0:2]] = i
print(dic)
| true
|
72feb95a9fe6f0e1e5a022883b8883cc2759cb86
|
Python
|
jmichalicek/django-fractions
|
/djfractions/__init__.py
|
UTF-8
| 8,666
| 3.25
| 3
|
[] |
permissive
|
__version__ = "5.0.0"
import fractions
import re
from decimal import Decimal
from typing import Any, Union
from djfractions.exceptions import InvalidFractionString, NoHtmlUnicodeEntity
__all__ = [
"quantity_to_decimal",
"is_number",
"is_fraction",
"get_fraction_parts",
"get_fraction_unicode_entity",
]
# Aligns with https://docs.python.org/3/library/fractions.html#fractions.Fraction.limit_denominator
DEFAULT_MAX_DENOMINATOR = 1000000
# Enttities from https://dev.w3.org/html5/html-author/charref
HTML_ENTITIES = [
"½",
"⅓",
"⅔",
"¼",
"¾",
"⅕",
"⅖",
"⅗",
"⅘",
"⅙",
"⅚",
"&frac17;",
"⅛",
"⅜",
"⅝",
"⅞",
]
def is_number(s: Any) -> bool:
"""
Determine if the input value is numeric - an int, float, decimal.Decimal,
or a string such as '1', '1.23', etc.
:param s: A string value to check to see if it represents a float or int.
"""
try:
int(s)
return True
except ValueError:
pass
try:
float(s)
return True
except ValueError:
pass
return False
def is_fraction(s: Any) -> bool:
"""
Determine if the input string appears to represent a fraction.
This does not include mixed numbers such as 1 1/3
:param s: A string value to check if it is formatted as a fraction.
"""
return bool(re.match(r"^-?\d+/\d+$", s))
def coerce_to_thirds(value: fractions.Fraction) -> fractions.Fraction:
"""
takes a :class:`fractions.Fraction` and forces it to thirds if it is one that
is frequently the result of taking a number such as 1/3, converting to decimal/float,
then back to a fraction.
"""
temp_decimal = Decimal(value.numerator / value.denominator).quantize(Decimal("0.00"))
if (
temp_decimal % 1 == Decimal(".33")
or temp_decimal % 1 == Decimal(".3")
or temp_decimal % 1 == Decimal(".67")
or temp_decimal % 1 == Decimal(".6")
):
value = value.limit_denominator(3)
return value
def quantity_to_decimal(quantity_string: str) -> Decimal:
"""
Take a quantity string and return a decimal.
Handles one hundred, two hundred, three hundred twenty five,
1, 1 1/4, 1 and 1/4, 1.25, .25. Negative values should have
the negative sign first, such as -1/4 or -1 1/4
:param quantity_string: String to convert to a :class:`decimal.Decimal`
"""
# get actual fraction-like strings to be N/N with no spaces
quantity_string = quantity_string.strip()
quantity_string = re.sub(r"\b(\d+)\s+/\s+(\d+)\b", r"\1/\2", quantity_string)
if is_number(quantity_string):
return Decimal(quantity_string)
if is_fraction(quantity_string):
return _fraction_string_to_decimal(quantity_string)
# assume the a hyphen between a whole value and fraction such as 1-1/4
# is a separator and not a negative fraction.
# If the negative is first though, then we need to keep it negative.
positive_or_negative = -1 if quantity_string.startswith("-") else 1
quantity_string = quantity_string.replace("-", " ")
parts = quantity_string.split()
parts_length = len(parts)
# it must be a mixed number like 1 1/4
number_stack = [] # for storing the entire number to return in parts
for part in parts:
if is_fraction(part):
number_stack.append(_fraction_string_to_decimal(part))
elif is_number(part):
number_stack.append(Decimal(part))
return Decimal(sum(number_stack)) * positive_or_negative
def quantity_to_fraction(quantity_string: str) -> fractions.Fraction:
"""
Take a quantity string and return a :class:`fractions.Fraction`.
Handles one hundred, two hundred, three hundred twenty five,
1, 1 1/4, 1 and 1/4, 1-1/4, 1.25, .25. Negative values should have
the negative sign first, such as -1/4 or -1 1/4
:param quantity_string: String to convert to a :class:`fractions.Fraction`
"""
# get actual fraction-like strings to be N/N with no spaces
quantity_string = quantity_string.strip()
quantity_string = re.sub(r"\b(\d+)\s+/\s+(\d+)\b", r"\1/\2", quantity_string)
if is_number(quantity_string):
return fractions.Fraction(quantity_string)
if is_fraction(quantity_string):
return _fraction_string_to_fraction(quantity_string)
# it must be a something like 1 1/4
# assume that a hyphen between a whole value and fraction such as 1-1/4
# is a separator and not a negative fraction.
# If the negative is first though, then we need to keep it negative.
# Cannot just keep the fraction on the int or we end up subtraction.
# -1 1/4 becomes -3/4 when what is meant is -5/4
positive_or_negative = -1 if quantity_string.startswith("-") else 1
# non-capturing group in the middle handls just a space, hyphen with
# optional spaces, or the word and. Examples:
# 1 1/4, 1-1/4, 1 - 1/4, 1 and 1/4
parts = re.match(r"^-?(\d+)(?:\s+|\s*-?\s*|\s+and\s+)(\d+\/\d+)", quantity_string)
if not parts:
raise InvalidFractionString("%s is not a valid fraction" % quantity_string)
# parts.group(0) is the entire string, 1 is the whole number bit
f = fractions.Fraction(parts.group(2))
f = (f + int(parts.group(1))) * positive_or_negative
return f
def _fraction_string_to_fraction(fraction: str) -> fractions.Fraction:
"""
Convert a string representing a fraction to a :class:`fractions.Fraction`
"""
parts = fraction.split("/")
numerator = int(parts[0])
denominator = int(parts[1])
return fractions.Fraction(numerator, denominator)
def _fraction_string_to_decimal(fraction: str) -> Decimal:
"""
Convert strings such as '1/4' to a Decimal
"""
parts = fraction.split("/")
numerator = int(parts[0])
denominator = int(parts[1])
return Decimal(numerator / denominator)
def get_fraction_parts(
value: Union[fractions.Fraction, float, Decimal, int, str],
allow_mixed_numbers: bool = True,
limit_denominator: int = DEFAULT_MAX_DENOMINATOR,
coerce_thirds: bool = True,
):
"""
Takes an `int`, `float`, or :class:`decimal.Decimal` and returns
a tuple of (whole_number, numerator, denominator). If allow_mixed_numbers
is not True, then whole_number will be None.
:param value: The value to convert to parts of a fraction.
:param bool allow_mixed_numbers: Defaults to True. If True, then parts for
mixed numbers will be created, otherwise improper fractions with a
whole_number of 0 will be created. In the case where value is a
whole number such as 4, if allow_mixed_numbers is True, then
a tuple of (4, 0, 1) would be returned, otherwise
(0, 4, 1) would be returned.
:param bool limit_denominator: Limit the denominator to this value. Defaults to 1000000,
which is the same as :meth:`fractions.Fraction.limit_denominator()` default max_denominator
:param bool coerce_thirds: Defaults to True. If True
then .3 repeating is forced to 1/3 rather than 3/10, 33/100, etc.
and .66 and .67 are forced to 2/3.
"""
f = fractions.Fraction(value)
whole_number = 0
if allow_mixed_numbers and f.numerator >= f.denominator:
# convert to complex number
# whole_number = f.numerator // f.denominator
whole_number, numerator = divmod(f.numerator, f.denominator)
# f = fractions.Fraction(f.numerator - (whole_number * f.denominator), f.denominator)
f = fractions.Fraction(numerator, f.denominator)
if limit_denominator:
f = f.limit_denominator(limit_denominator)
if coerce_thirds and (not limit_denominator or limit_denominator > 3):
# if denominator is limited to less than 3, this would be in opposition to that.
# if denominator is limited to 3 then this has naturally already been done.
f = coerce_to_thirds(f)
return (whole_number, f.numerator, f.denominator)
def get_fraction_unicode_entity(value: Union[fractions.Fraction, float, Decimal, int, str]) -> str:
"""
Returns the html unicode entity for the fraction if one exists or None
:param value: The value to get the entity for.
"""
if not isinstance(value, fractions.Fraction):
value = fractions.Fraction(value)
entity = "&frac%d%d;" % (value.numerator, value.denominator)
if entity not in HTML_ENTITIES:
raise NoHtmlUnicodeEntity("No valid HTML entity exists for %s" % value)
return entity
| true
|
49975842383b40aab822968740f504d1b8f04db0
|
Python
|
aleksartamonov/BPL_Stats
|
/parser/parser.py
|
UTF-8
| 1,597
| 2.859375
| 3
|
[] |
no_license
|
from service.match import FootballMatch
__author__ = 'aleksart'
from bs4 import BeautifulSoup
def parse_document(filename):
f = open(filename)
content = f.read()
f.close()
soup = BeautifulSoup(content)
tour_results = parse_tour_results(soup)
tour_num = 1
matches_html = []
all_matches = []
for tour in tour_results:
matches = tour.find_all('tr')
for match in matches:
info = parse_match_results(match, tour_num)
football_match = FootballMatch(info)
all_matches.append(football_match)
matches_html.append(match)
tour_num += 1
return all_matches
def parse_tour_results(soup):
headers = soup.find_all('h3', 'titleH3 bordered mB10')
# print result
tour_result = []
for head in headers:
tour_result.append(head.find_next_sibling('div').find('tbody'))
return tour_result
def parse_match_results(match, tour_num):
info = dict()
info['tour'] = tour_num
time_info = match.find('td', 'name-td alLeft')
time_info = time_info.text.encode("utf-8").split('|')
info['date'] = time_info[0]
info['time'] = time_info[1]
host_info = match.find('td', 'owner-td')
info['host'] = host_info.text.encode("utf-8").strip()
guest_info = match.find('td', 'guests-td')
info['guest'] = guest_info.text.encode("utf-8").strip()
score_info = match.find('td', 'score-td')
info['score'] = score_info.text.encode("utf-8").strip()
info['visitors'] = match.find('td', 'padR alRight').text.encode("utf-8").strip()
return info
| true
|
126013d2c3d77eb7375fe7e415ed297c4953dd9d
|
Python
|
the-argus/pyweek31
|
/core/GameResource.py
|
UTF-8
| 7,592
| 2.578125
| 3
|
[] |
no_license
|
import math
import os
import random
import arcade
from constants.camera import FOLLOW, IDLE, LERP_MARGIN, LERP_SPEED
from constants.enemies import SPAWN_RADIUS
from constants.game import (
GRID_SIZE,
PLAYER_DEFAULT_START,
ROOM_HEIGHT,
ROOM_WIDTH,
SCREEN_HEIGHT,
SCREEN_WIDTH,
SPRITE_SCALING,
TILE_SPRITE_SCALING,
)
from core.Enemies import Default, Jetpack
from core.EnemyManager import EnemyManager
from core.lerp import lerp
from core.MouseCursor import MouseCursor
from core.PlayerCharacter import PlayerCharacter
from core.Bullet import Bullet
class GameResources:
"""
All resource handling like maps and sprites and rendering
"""
def __init__(self, game_instance):
self.game_instance = game_instance
# sets relative path to where this file is
file_path = os.path.abspath(os.getcwd())
os.chdir(file_path)
# keep track of camera location
self.view_bottom = 0
self.view_left = 0
# keep track of mouse
self.mouse_x = 0
self.mouse_y = 0
# mouse cursor
self.mouse_cursor = MouseCursor()
# camera behavior and follow location
self.behavior = FOLLOW
self.follow_x = PLAYER_DEFAULT_START[0]
self.follow_y = PLAYER_DEFAULT_START[1]
# screenshake
self.shake_remain = 0
self.shake_strength = 1
self.shake_x = 0
self.shake_y = 0
# list initilization
self.wall_list = arcade.SpriteList(
use_spatial_hash=True, spatial_hash_cell_size=16
) # contains all static objects which should have collision
self.floor_list = arcade.SpriteList()
self.player_list = arcade.SpriteList()
self.enemy_list = EnemyManager(self)
self.gui_list = arcade.SpriteList()
self.bullet_list = arcade.SpriteList()
# player
self.player_sprite = PlayerCharacter(PLAYER_DEFAULT_START, self)
self.player_sprite.register_sprite_list(self.player_list)
self.player_list.append(self.player_sprite)
self.load_level()
# enemies
for i in range(3):
self.enemy_list.spawn_enemy()
self.gui_list.append(self.mouse_cursor)
self.mouse_cursor.register_sprite_list(self.gui_list)
def on_draw(self):
# draw all the lists
self.floor_list.draw(filter=(arcade.gl.NEAREST, arcade.gl.NEAREST))
self.wall_list.draw(filter=(arcade.gl.NEAREST, arcade.gl.NEAREST))
self.player_sprite.on_draw()
self.player_list.draw(filter=(arcade.gl.NEAREST, arcade.gl.NEAREST))
self.bullet_list.draw(filter=(arcade.gl.NEAREST, arcade.gl.NEAREST))
self.enemy_list.enemy_list.draw(filter=(arcade.gl.NEAREST, arcade.gl.NEAREST))
self.gui_list.draw(filter=(arcade.gl.NEAREST, arcade.gl.NEAREST))
"""
for enemy in self.enemy_list.enemy_list:
enemy.draw_path()
"""
def on_update(self, delta_time):
# mouse cursor
self.mouse_cursor.center_x = self.mouse_x + self.view_left
self.mouse_cursor.center_y = self.mouse_y + self.view_bottom
# camera scrolling
self.follow_x = self.player_sprite.center_x
self.follow_y = self.player_sprite.center_y
dist = math.sqrt(
((self.view_left + (SCREEN_WIDTH / 2)) - self.follow_x) ** 2
+ ((self.view_bottom + (SCREEN_HEIGHT / 2)) - self.follow_y) ** 2
)
if dist >= LERP_MARGIN:
self.view_left = int(
lerp(self.view_left, self.follow_x - (SCREEN_WIDTH / 2), LERP_SPEED)
)
self.view_bottom = int(
lerp(self.view_bottom, self.follow_y - (SCREEN_HEIGHT / 2), LERP_SPEED)
)
self.player_sprite.on_update(delta_time)
for enemy_sprite in self.enemy_list.enemy_list:
enemy_sprite.on_update(delta_time)
for bullet_sprite in self.bullet_list:
bullet_sprite.on_update(delta_time)
# screenshake and camera updates
if self.shake_remain > 0:
self.shake_x = random.randrange(-self.shake_strength, self.shake_strength)
self.shake_y = random.randrange(-self.shake_strength, self.shake_strength)
self.shake_remain -= 1
else:
self.shake_x = 0
self.shake_y = 0
self.shake_strength = 0
# clamp viewport to room
rightside_clamp = ROOM_WIDTH
leftside_clamp = 0
topside_clamp = ROOM_HEIGHT
bottom_clamp = 0
if not (0 < self.view_left + self.shake_x < rightside_clamp - SCREEN_WIDTH):
self.view_left = min(
rightside_clamp - SCREEN_WIDTH - self.shake_x, self.view_left
)
self.view_left = max(self.view_left, leftside_clamp - self.shake_x)
if not (0 < self.view_bottom + self.shake_y < topside_clamp - SCREEN_HEIGHT):
self.view_bottom = min(
topside_clamp - SCREEN_HEIGHT - self.shake_y, self.view_bottom
)
self.view_bottom = max(self.view_bottom, bottom_clamp - self.shake_y)
arcade.set_viewport(
self.view_left + self.shake_x,
(SCREEN_WIDTH) + self.view_left + self.shake_x,
self.view_bottom + self.shake_y,
(SCREEN_HEIGHT) + self.view_bottom + self.shake_y,
)
def screenshake(self, length, strength):
self.shake_remain = int(abs(length))
self.shake_strength = int(abs(strength))
def on_key_press(self, key, modifiers):
if key == arcade.key.F:
self.game_instance.window.set_fullscreen(
not self.game_instance.window.fullscreen
)
arcade.set_viewport(
self.view_left + self.shake_x,
self.view_left + self.shake_x + SCREEN_WIDTH,
self.view_bottom + self.shake_y,
self.view_bottom + self.shake_y + SCREEN_HEIGHT,
)
def create_bullet(self, pos, vel, damage, speed_falloff, damage_falloff):
new_bullet = Bullet(pos, vel, damage, speed_falloff, damage_falloff, self.game_instance.physics_engine, self)
new_bullet.register_sprite_list(self.bullet_list)
self.bullet_list.append(new_bullet)
def calculate_distance_from_player(self, enemy_x, enemy_y):
player_x = self.player_sprite.center_x
player_y = self.player_sprite.center_y
return math.sqrt(abs(enemy_x - player_x) ** 2 + abs(enemy_y - player_y) ** 2)
def on_mouse_motion(self, x, y, dx, dy):
self.mouse_x = x
self.mouse_y = y
def load_level(self):
# Read in the tiled map
path = os.path.join("resources","GameMap.tmx")
my_map = arcade.tilemap.read_tmx(path)
# --- Walls ---
# Calculate the right edge of the my_map in pixels
self.end_of_map = my_map.map_size.width * GRID_SIZE
# Grab the layer of items we can't move through
self.wall_list = arcade.tilemap.process_layer(
my_map, "wall", TILE_SPRITE_SCALING
)
self.floor_list = arcade.tilemap.process_layer(
my_map, "floor", TILE_SPRITE_SCALING
)
# --- Other stuff
# Set the background color
if my_map.background_color:
arcade.set_background_color(my_map.background_color)
# Set the view port boundaries
# These numbers set where we have 'scrolled' to.
self.view_left = 0
self.view_bottom = 0
| true
|
3839139f280e45332db2003202258f6352272e6a
|
Python
|
D0ub1ePieR/Leetcode
|
/solutions/202-Happy_Number-快乐数/Happy Number.py
|
UTF-8
| 860
| 3.34375
| 3
|
[] |
no_license
|
# python3
# simple
# 哈希表 数学
# 48ms 31.73%
# 13.4MB 15.50%
class Solution:
def isHappy(self, n: int) -> bool:
record = []
while 1:
s = sum([int(x)**2 for x in str(n)])
if s == 1:
return True
if s in record:
return False
record.append(s)
n = s
# 56ms 18.34%
# 13.4MB 15.50%
class Solution:
def isHappy(self, n: int) -> bool:
def cal_sum(num):
s = sum([int(x)**2 for x in str(num)])
# s = 0
# while num != 0:
# s += (num % 10) ** 2
# num //= 10
# return s
slow = fast = n
while 1:
slow = cal_sum(slow)
fast = cal_sum(cal_sum(fast))
if slow == fast:
break
return slow == 1
| true
|
d38fb236c68608ba7aafc16328c24c59fe86c423
|
Python
|
Tansiya/tansiya-training-prgm
|
/sample_question/class_divisible.py
|
UTF-8
| 440
| 4.0625
| 4
|
[] |
no_license
|
"""define a class generate which can iterate the number,which divisible by 7,range between 0 and n"""
#assign a function
class string_handling():
def putNumbers(n):
divi = []
for i in range(1, n+1):
if i%7==0:
divi.append(i)
return divi
def st_rev(n):
s = str(n)[::-1]
return s
n = int(input())
b = string_handling.putNumbers(n)
c = string_handling.st_rev(n)
print("Reverse", c)
print("Numbers divisible by 7", b)
| true
|
cc0a0a448c1050b959c7917d6e79d354c67e17bf
|
Python
|
sanyamc/Courses
|
/Python/Company/minWindowSubstring.py
|
UTF-8
| 4,404
| 3.65625
| 4
|
[] |
no_license
|
"""
Given a string S and a string T, find the minimum window in S which will contain all the characters in T in complexity O(n).
For example,
S = "ADOBECODEBANC"
T = "ABC"
Minimum window is "BANC".
Note:
If there is no such window in S that covers all characters in T, return the empty string "".
If there are multiple such windows, you are guaranteed that there will always be only one unique minimum window in S.
"""
#result = [0,3,5,4,6,7,2]
class Solution(object):
def minRange(self,dictionary):
import heapq
min_heap=[]
global_min=float("Inf")
global_max=-global_min
min_diff=global_max
min_max=[]
min_index=None
max_index=None
global_dict={}
min_key=None
min_val=global_min
keys=list(dictionary.keys())
i=0
j=0
for key in keys:
# print(dictionary)
val=dictionary[key].pop(0)
global_dict[val]=key
heapq.heappush(min_heap,val)
if val<min_val:
min_val=val
min_key=key
if(len(dictionary[key])==0):
val=heapq.heappop(min_heap)
if(len(min_heap)>0):
return (val,max(min_heap))
else:
return (val,val)
while(len(min_heap)>0 and len(dictionary[global_dict[min_heap[0]]])>0):
#print(min_heap)
val2=heapq.heappop(min_heap)
if(len(min_heap)==0):
key=global_dict[val2]
max_val=dictionary[key][0]
else:
max_val=max(min_heap)
min_max.append((val2,max_val))
# if val2<min_val:
# min_val=val2
min_key=global_dict[val2]
val=dictionary[min_key].pop(0)
global_dict[val]=min_key
heapq.heappush(min_heap,val)
# print("min key "+str(min_key))
print("heap "+str(min_heap))
print(dictionary)
print("min max"+str(min_max))
# if(len(min_heap)==0 or len(dictionary[global_dict[min_heap[0]]])==0):
# val = heapq.heappop(min_heap)
# if(len(min_heap)==0):
# max_val=
# else:
# max_val=max(min_heap)
# min_max.append((val,max_val))
# break
if(len(min_heap)>0):
val2=heapq.heappop(min_heap)
if(len(min_heap)==0):
key=global_dict[val2]
if(len(dictionary[key])>0):
max_val=dictionary[key][0]
else:
max_val=float("Inf")
else:
max_val=max(min_heap)
min_max.append((val2,max_val))
print(min_max)
print(min_heap)
for tup in min_max:
if abs(tup[1]-tup[0])<global_min:
global_min=abs(tup[1]-tup[0])
min_index=min(tup)
max_index=max(tup)
return (min_index,max_index)
def minWindow(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
if s==t:
return s
if len(s)==0 or len(s)<len(t):
return ""
dictionary={}
t_dict={}
for i in t:
if i in t_dict:
t_dict[i]+=1
else:
t_dict[i]=1
for i,val in enumerate(s):
if val in t:
if val in dictionary:
dictionary[val].append(i)
else:
dictionary[val]=[i]
#print(t_dict)
#print(dictionary)
for key in t_dict:
if key not in dictionary or len(dictionary[key])<t_dict[key]:
return ""
# print(dictionary)
tup=self.minRange(dictionary)
# print(tup)
if(tup[0]==None or tup[1]==None):
return ""
else:
return s[tup[0]:tup[1]+1]
a={
'a':[0,2,4],
'b':[1,5,7],
'c':[3,6]
}
s=Solution()
S = "ADOBECODEBANC"
T = "ABC"
# S="baab"
# T="bbb"
# S="aabaabaaab"
# T="bb"
# S="ab"
# T="a"
# S="abc"
# T="ab"
# S="cabwefgewcwaefgcf"
# T="cae"
S="aabaabaaab"
T="bb"
S="bbaa"
T="aba"
print(s.minWindow(S,T))
| true
|
fb81f8b21c2b749e959d2bca3ec8a909415c146d
|
Python
|
aroproduction/School_Programs_Class12
|
/Binary File Programs/updating_binary_02.py
|
UTF-8
| 775
| 3.03125
| 3
|
[] |
no_license
|
# Updating name in existing binary file
import pickle
stu = {}
found = False
fin = open('../Resources/Stu.dat', "rb+")
try:
while True:
rpos = fin.tell()
stu = pickle.load(fin)
if stu['Rollno'] == 5:
stu['Name'] = 'Gurnam'
fin.seek(rpos)
pickle.dump(stu, fin)
found = True
except EOFError:
if found == False:
print("Sorry, no matching record found")
else:
print("\n"+"Record(s) successfully updated.\n")
fin.close()
with open('../Resources/Stu.dat', 'rb') as file:
print('Updated Records:')
try:
while True:
updated = pickle.load(file)
if updated['Rollno'] == 5:
print(updated)
except EOFError:
pass
| true
|
33f8b828b4d65e6520c6695cdb04eac9957eeec5
|
Python
|
fedepacher/Wazuh-Test
|
/Task_4/Fixture/Module/test_student.py
|
UTF-8
| 774
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
from student import StudentDB
import pytest
@pytest.fixture(scope='module')
def db():
print('----------------Setup method----------------')
db = StudentDB()
db.connect('students.json')
yield db
print('----------------Teardown method---------------')
db.close()
'''
Test for student called Scott
param db student list
'''
def test_scott_data(db):
scott_data = db.get_data('Scott')
assert scott_data['id'] == 1
assert scott_data['name'] == 'Scott'
assert scott_data['result'] == 'pass'
'''
Test for student called Mark
param db student list
'''
def test_mark_data(db):
mark_data = db.get_data('Mark')
assert mark_data['id'] == 2
assert mark_data['name'] == 'Mark'
assert mark_data['result'] == 'fail'
| true
|
d749db8f4defc9101d35c4cf96da5fa8e352e6c5
|
Python
|
Vahid-Esmaeelzadeh/CTCI-Python
|
/educative/08 Tree DFS/04 Path With Given Sequence (medium).py
|
UTF-8
| 1,013
| 4.28125
| 4
|
[] |
no_license
|
'''
Path With Given Sequence
Given a binary tree and a number sequence, find if the sequence is present as a root-to-leaf path in the given tree.
'''
class TreeNode:
def __init__(self, value, left=None, right=None):
self.value = value
self.left, self.right = left, right
def has_path_with_given_sequence(root, seq):
if root is None or len(seq) == 0:
return False
if root.value != seq[0]:
return False
if root.left is None and root.right is None and len(seq) == 1 and root.value == seq[0]:
return True
return has_path_with_given_sequence(root.left, seq[1:]) or has_path_with_given_sequence(root.right, seq[1:])
root = TreeNode(1)
root.left = TreeNode(0)
root.right = TreeNode(1)
root.left.left = TreeNode(1)
root.right.left = TreeNode(6)
root.right.right = TreeNode(5)
print("Tree has path sequence: " + str(has_path_with_given_sequence(root, [1, 0, 7])))
print("Tree has path sequence: " + str(has_path_with_given_sequence(root, [1, 1, 6])))
| true
|
4c30efcbe8896e73ee5549edcaf16c70dd3e4b4c
|
Python
|
drewrutt/Image-To-Dice
|
/editor.py
|
UTF-8
| 3,035
| 3.6875
| 4
|
[] |
no_license
|
from PIL import Image, ImageFilter
import math
import os
dirname = os.path.dirname(__file__)
def open_image(path):
newImage = Image.open(path).convert('LA')
return newImage
# Save Image
def save_image(image, path):
image.save(path, 'png')
# Create a new image with the given size
def create_image(i, j):
image = Image.new("RGB", (i, j), "white")
return image
# Get the pixel from the given image
def get_pixel(image, i, j):
# Inside image bounds?
width, height = image.size
if i >= width or j >= height:
return None
# Get Pixel
pixel = image.getpixel((i, j))
return pixel
#Builds the image section by section with the dice faces.
#BRACKETS
# 6 -> 0 - 42.5
# 5 -> 42.5 - 85
# 4 -> 85 - 127.5
# 3 -> 127.5 - 170
# 2 -> 170 - 212.5
# 1 -> 212.5 - 255
def create_dice_ary(image, blur, dirname):
one = open_image(os.path.join(dirname, 'dice_bigger', 'one.png'))
two = open_image(os.path.join(dirname, 'dice_bigger', 'two.png'))
three = open_image(os.path.join(dirname, 'dice_bigger', 'three.png'))
four = open_image(os.path.join(dirname, 'dice_bigger', 'four.png'))
five = open_image(os.path.join(dirname, 'dice_bigger', 'five.png'))
six = open_image(os.path.join(dirname, 'dice_bigger', 'six.png'))
blur *= 3
factor = math.floor(int(50/blur))
lastX = 0
lastY = 0
width, height = image.size
newimage = Image.new("1", (width*factor, height*factor))
ary = []
for x in range(int(height/blur)):
row = []
for y in range(int(width/blur)):
pix = get_pixel(image, lastX, lastY)
if(pix != None):
if(pix[0] <= 42.5):
row.append(6)
newimage.paste(six, (lastX*factor, lastY*factor))
elif(pix[0] > 42.5 and pix[0] <= 85):
row.append(5)
newimage.paste(five, (lastX*factor, lastY*factor))
elif(pix[0] > 85 and pix[0] <= 127.5):
row.append(4)
newimage.paste(four, (lastX*factor, lastY*factor))
elif(pix[0] > 127.5 and pix[0] <= 170):
row.append(3)
newimage.paste(three, (lastX*factor, lastY*factor))
elif(pix[0] > 170 and pix[0] <= 212.5):
row.append(2)
newimage.paste(two, (lastX*factor, lastY*factor))
else:
row.append(1)
newimage.paste(one, (lastX*factor, lastY*factor))
lastX += blur
lastY += blur
lastX = 0
ary.append(row)
return newimage
#To run:
# -Pillow must be installed
# -Put the image file path in the open_image section
# -Set the blur to determine how much detail you would like (int 1 to whatever)
# -Resulting file is saved in the same place as this file and is titled "dice.png"
if __name__ == "__main__":
img = open_image(r"") #Put the path to the image file here.
#For windows you need to convert to a raw string which
# it is already set up for.
blur = 1
img = img.filter(ImageFilter.BoxBlur(blur))
newimg = create_dice_ary(img, blur, dirname)
newimg.save("dice.png")
| true
|
34abc24dc07fb23f142b90ccd10ecff719b39e1a
|
Python
|
dhermes/project-euler
|
/python/complete/no303.py
|
UTF-8
| 1,958
| 3.53125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python
# We begin with a sorted list of values to choose from.
# If the value is not found, we log our biggest value and
# save the number of digits. We then start with 10**d
# and 2*(10**d), the next biggest values with only digits
# less than 3. We wish to find some x*(10**d) + y, where
# both x and y have only 1s and 2s and y has d digits
# or less. We want to x*(10**d) + y == 0 mod n
# Since we have all possible values y with d digits or less,
# we want to find the smallest match x such that
# x*(10**d) is in the set of residues (-y) % n
from itertools import product as i_product
from python.decorators import euler_timer
def find(n, value_list):
for value in value_list:
if value % n == 0:
return value
digs = len(str(max(value_list)))
needed_residues = sorted(set((-value) % n for value in value_list))
residue = (10 ** digs) % n
actual_list = [1, 2]
residue_form = [(residue * val) % n for val in actual_list]
while set(residue_form).intersection(needed_residues) == set():
next = []
for val in actual_list:
next.extend([10 * val, 10 * val + 1, 10 * val + 2])
actual_list = next
residue_form = [(residue * val) % n for val in actual_list]
best_match = min(val for val in actual_list
if (residue * val) % n in needed_residues)
best_opposites = [val for val in value_list
if val % n == (-(best_match * residue)) % n]
return (10 ** digs) * best_match + min(best_opposites)
def main(verbose=False):
MAX_DIGITS = 12
candidate_lists = [['0', '1', '2']] * MAX_DIGITS
values = list(i_product(*candidate_lists))
values = [int(''.join(value)) for value in values][1:]
running_sum = 0
for n in range(1, 10000 + 1):
running_sum += find(n, values) / n
return running_sum
if __name__ == '__main__':
print euler_timer(303)(main)(verbose=True)
| true
|
90fa9a1cadcb369433eb6be5222001532e76dcd7
|
Python
|
Chad-Mowbray/iamb-classifier
|
/ipclassifier/token_processors/spelling_syllabify.py
|
UTF-8
| 8,370
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
import re
from copy import deepcopy
from nltk import pos_tag
class SpellingSyllabifier:
"""
A last resort if a word token cannot be found elsewhere
Syllabifies a word based only on its spelling
Ridiculously expandable
"""
VOWELS = "aeiouy"
DUMMY_STRESSED = "AH1"
DUMMY_UNSTRESSED = "AH0"
REGEX = {
# Endings
"IES": r'ies?$',
"ION": r'[st]{1}ions?$',
"ED": r'[^aeiou]ed$',
"EST": r'.{2,}est$',
"IZE": r'i[sz]es?$',
"E": r'[^aeio]es?$',
"SES": r'[sz]es$',
"IAN": r'ianS?$',
# Non-Endings
"QU": r'qu',
"AE": r'ae',
"DOUBLE": r'([eiouy])\1', # no double a
"OU": r'ou',
"EY": r'ey\W?',
"YV": r'y[aeiou]',
"EA": r'ea',
"AI": r'ai',
"AU": r'au',
"UI": r'ui',
"OY": r'oy',
# Others
"EOU": r'eou',
"EON": r'eon',
"VLV": r'[aeiou]{1}[vwrl][aeiou]{1}'
}
def __init__(self, token):
self.tentative_phonemes = [[]]
self._token = token
self._syllable_count = 0
self._modified_word = ''
self._reduced_syllables = 0
self._main()
def _get_syllable_count(self):
word = self._check_endings()
word = self._check_special_cases(word)
syllables = [w for w in word if w in self.VOWELS]
self._syllable_count = len(syllables)
def _find_multiple(self, regex, word, rev=False):
res = re.finditer(regex, word)
indicies = [m.start() + 1 for m in res]
indicies = indicies[::-1] if rev else indicies
for idx in indicies:
word = word[:idx] + word[idx + 1:]
self._reduced_syllables += 1
return word
def _find_single(self, letter, word):
idx = word.rindex(letter)
word = word[:idx] + word[idx + 1:]
self._reduced_syllables += 1
return word
def _check_endings(self):
word = self._token
if re.search(self.REGEX["EST"], word):
word = self._find_single("e", word)
return word
if re.search(self.REGEX["IZE"], word):
word = self._find_single("e", word)
return word
if re.search(self.REGEX["IAN"], word):
word = self._find_single("i", word)
return word
if re.search(self.REGEX["IES"], word):
word = self._find_single("e", word)
return word
if re.search(self.REGEX["SES"], word):
return word
if re.search(self.REGEX["E"], word):
word = self._find_single("e", word)
return word
if re.search(self.REGEX["ION"], word) and len(word) > 4:
word = self._find_single("i", word)
return word
if re.search(self.REGEX["ED"], word) and len(word) >= 4:
word = self._find_single("e", word)
return word
return word
def _check_special_cases(self, word=None):
word = word if word else self._token
if re.search(self.REGEX["AU"], word):
word = self._find_multiple(self.REGEX["AU"], word)
return self._check_special_cases(word)
if re.search(self.REGEX["AI"], word):
word = self._find_multiple(self.REGEX["AI"], word)
return self._check_special_cases(word)
if re.search(self.REGEX["QU"], word):
word = self._find_multiple(self.REGEX["QU"], word)
return self._check_special_cases(word)
if re.search(self.REGEX["AE"], word):
word = self._find_multiple(self.REGEX["AE"], word, rev=True)
return self._check_special_cases(word)
if re.search(self.REGEX["DOUBLE"], word):
word = self._find_multiple(self.REGEX["DOUBLE"], word)
return self._check_special_cases(word)
if re.search(self.REGEX["OU"], word):
word = self._find_multiple(self.REGEX["OU"], word)
return self._check_special_cases(word)
if re.search(self.REGEX["EY"], word):
word = self._find_multiple(self.REGEX["EY"], word)
return self._check_special_cases(word)
if re.search(self.REGEX["YV"], word):
word = self._find_multiple(self.REGEX["YV"], word)
return self._check_special_cases(word)
if re.search(self.REGEX["EA"], word):
word = self._find_multiple(self.REGEX["EA"], word)
return self._check_special_cases(word)
if re.search(self.REGEX["UI"], word):
word = self._find_multiple(self.REGEX["UI"], word)
return self._check_special_cases(word)
if re.search(self.REGEX["OY"], word):
word = self._find_multiple(self.REGEX["OY"], word)
return self._check_special_cases(word)
self._modified_word = word
return self._modified_word
def _simple_stressor(self, restore_syllables=0):
count = self._syllable_count
if count == 1:
return [[self.DUMMY_STRESSED]]
else:
return [[self.DUMMY_STRESSED if i == self._syllable_count - 2 else self.DUMMY_UNSTRESSED for i in range(self._syllable_count + restore_syllables) ]]
def _complicated_stressor(self, POS, restore_syllables=0):
if POS == "V" or any([self._token.endswith(ending) for ending in ["est", "eth", "ise", "ize"] ]):
return [[self.DUMMY_STRESSED if i == 0 else self.DUMMY_UNSTRESSED for i in range(self._syllable_count + restore_syllables) ]]
if POS in ["N", "J"]:
return [[self.DUMMY_STRESSED if i == self._syllable_count - 1 else self.DUMMY_UNSTRESSED for i in range(self._syllable_count + restore_syllables) ]]
def _check_ed(self, phonemes):
if self._token.endswith("ed") and len(self._token) > 4:
antepenult_letter = self._token[-3]
if antepenult_letter not in "aeiou":
phonemes_copy = deepcopy(phonemes[0])
phonemes_copy.insert(-1,'EH0')
phonemes.append(phonemes_copy)
self._reduced_syllables -= 1
return phonemes
def _check_vowel_cluster(self, phonemes):
if re.search(self.REGEX["EOU"], self._token):
phonemes_len = len(phonemes[0])
reduced_phonemes = [self.DUMMY_STRESSED if i == 0 else self.DUMMY_UNSTRESSED for i in range(phonemes_len - 1) ]
phonemes.append(reduced_phonemes)
if re.search(self.REGEX["EON"], self._token):
reduced_phonemes = [self.DUMMY_STRESSED if i == 0 else self.DUMMY_UNSTRESSED for i in range(2) ]
phonemes.append(reduced_phonemes)
if re.search(self.REGEX["VLV"], self._token):
phonemes_len = len(phonemes[0])
reduced_phonemes = [self.DUMMY_STRESSED if i == 0 else self.DUMMY_UNSTRESSED for i in range(phonemes_len - 1) ]
phonemes.append(reduced_phonemes)
return phonemes
def _create_phoneme_repr(self):
"""
Check POS, if none, use simple_stressor, otherwise, use complicated_stressor
"""
tag = pos_tag([self._token])[0][1]
if tag.startswith("V") or any([self._token.endswith(ending) for ending in ["est", "eth", "ise", "ize"] ]): #or tag.startswith("N") or tag.startswith("J")
self.tentative_phonemes = self._complicated_stressor(tag[0])
if self._token.endswith('ed'):
self.tentative_phonemes = self._check_ed(self.tentative_phonemes)
if self._modified_word:
self.tentative_phonemes.append(self._complicated_stressor(tag[0], self._reduced_syllables)[0])
else:
self.tentative_phonemes = self._simple_stressor()
if self._token.endswith('ed'):
self.tentative_phonemes = self._check_ed(self.tentative_phonemes)
if self._modified_word:
self.tentative_phonemes.append(self._simple_stressor(self._reduced_syllables)[0])
def _final_reduction_check(self):
with_vowel_cluster = self._check_vowel_cluster(self.tentative_phonemes)
self.tentative_phonemes = with_vowel_cluster
def _main(self):
self._get_syllable_count()
self._create_phoneme_repr()
self._final_reduction_check()
| true
|
6923cb61c5715437746f44e20c14ec82cfa5988e
|
Python
|
houking-can/GenDataset
|
/acl.py
|
UTF-8
| 962
| 2.65625
| 3
|
[] |
no_license
|
import os
import shutil
import json
import re
def iter_files(path):
"""Walk through all files located under a root path."""
if os.path.isfile(path):
yield path
elif os.path.isdir(path):
for dirpath, _, filenames in os.walk(path):
for f in filenames:
yield os.path.join(dirpath, f)
else:
raise RuntimeError('Path %s is invalid' % path)
d_path = r'E:\JSON\ACL'
files = iter_files(d_path)
for id,file in enumerate(files):
if file.endswith('.json'):
tmp = json.load(open(file))
abstract = tmp["abstract"]
if "title and abstract" in abstract.lower():
abstract = re.sub("title and abstract.*$",'',abstract,flags=re.I)
tmp["abstract"] =abstract
json.dump(tmp,open(file,'w'),indent=4)
print(file)
# files = iter_files(path)
# for id,file in enumerate(files):
# shutil.move(file,os.path.join(path,"%d_decoded.txt" % id))
| true
|
65ea0e92549e393489d39287a1439826736fdea8
|
Python
|
AHKerrigan/Think-Python
|
/exercise11_1.py
|
UTF-8
| 834
| 3.90625
| 4
|
[] |
no_license
|
""" This is a solution to an exercise from
Think Python, 2nd Edition
by Allen Downey
http://thinkpython2.com
Copyright 2015 Allen Downey
License: http://creativecommons.org/licenses/by/4.0/
Exercise 11-1
Write a function that reads the words in words.txt and stores them as keys in a dic‐
tionary. It doesn’t matter what the values are. Then you can use the in operator as a
fast way to check whether a string is in the dictionary.
"""
def to_dictionary(file):
"""Takes a file as input and returns a dictionary containing
each line as a key
Keyword Arugments:
file: the file we pull from
Return aruguments:
d: the dictionary we will return"""
fin = open(file)
d = dict()
for line in fin:
d[line.strip()] = ''
return d
if __name__ == "__main__":
d = to_dictionary('words.txt')
print("hello")
print('exercise' in d)
| true
|
f124cdbb85b04a3a09bc9252dddf7d6633df03bb
|
Python
|
afauth/mudecay
|
/MuonDecay/.old/analysis/mainFile.py
|
UTF-8
| 19,225
| 3.234375
| 3
|
[] |
no_license
|
#Imports
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import find_peaks
from scipy.integrate import simps, trapz
from scipy.optimize import curve_fit
from scipy.stats import chisquare
###################################################################################################################
#1. Picos
## 1.1.
def peak_finder(series, height): # peak_finder se aplica em data frames com valores do eixo y
#print('= Chamando funcao: peak_finder...')
'''
Auxilia a encontrar todos os picos de uma waveform. No caso, encontra dois picos.
Essa função retorna os valores no eixo x
'''
# get the actual peaks
peaks, _ = find_peaks( (-1)*series, height = height ) # height é um parâmetro decidido
return(peaks)
## 1.2.
def peaks_em_x(df, height, err_mess=0):
'''
Esse loop cria e checa se existem erros na obtenção dos picos através da função peak-finder, quando peaks != 2
'''
peaks_em_x = []
counter = 0
for i in range(df.shape[1]):
evento = df[ df.columns[i] ]
_ = peak_finder(series=evento, height=height)
if len(_) != 2:
print(f'problema no {evento.name} com tamanho = {len(_)} e picos em {_}')
counter += 1
peaks_em_x.append(_)
if err_mess == 1:
print(f'\nloop finalizado com {counter} erros')
return(peaks_em_x)
## 1.3.
def peak_finder_xy(series, height):
'''
Essa função utiliza a peak_finder e já retorna os dados como uma Series com os valores em x e y de cada pico
'''
__ = peak_finder(series=series, height=height)
return(series.iloc[__])
## 1.4.
def peaks_em_xy(df, height, err_mess=0):
peaks_em_xy = pd.Series([])
for i in range( df.shape[1] ):
evento = df[ df.columns[i] ]
_ = peak_finder_xy(series=evento, height=height)
peaks_em_xy = pd.concat([ peaks_em_xy, _ ])
if len(peaks_em_xy) != 2*(df.shape[1] ): # se ele reconhe algo diferente de dois picos por waveform:
print(f'existe um erro nos picos, pois aparece(m) {len(peaks_em_xy) - 2*(df.shape[1] )} pico(s) a mais')
else:
if err_mess == 1:
print('não foram detectados problemas na quantidade de picos')
peaks_em_xy = pd.DataFrame(peaks_em_xy).reset_index()
peaks_em_xy.columns = ['x', 'y']
return(peaks_em_xy)
##1.5.
def peaks_divididos_01(df, height, err_mess=0):
_ = peaks_em_xy(df=df, height=height, err_mess=err_mess)
peaks_0, peaks_1 = _.query('index % 2 == 0'), _.query('index % 2 != 0')
return(peaks_0, peaks_1)
###################################################################################################################
#2
##2.1.
def derivada_x(s): # insira uma series na entrada; o valor retornado está em coordenadas do eixo x
'''
Encontra as diferenças entre elementos lado-a-lado. Serve para avaliar quando estamos começando ou terminando um pulso.
'''
VA_derivada_baseLine = 5 # flutuação/amplitude máxima da base-line;
# valor arbirtrário para saber se o número em questão está fora da
# base-line; não é exatamente a derivada
_ = []
for i in range( len(s) - 1 ): # i = index da series
if abs(s[i] - s[i+1]) > VA_derivada_baseLine:
_.append(i)
return (_) # o valor retornado está em coordenadas do eixo x
###################################################################################################################
#3. Contornos
##3.1.
def contorno_limite_derivada(series, VA_1, VA_2, height): # recebe uma Series do pandas para começar
'''
Define um recorte de onde se deve buscar um pulso e os seus delimitadores
VA_1 e VA_2 são variáveis arbitrárias para definir a largura do pulso
'''
peak = peak_finder(series, height=height) # encontrar os picos da waveform
s1 = series[ (peak[0] - VA_1):(peak[0] + VA_2) ] # recortar envolta dos picos
s2 = series[ (peak[1] - VA_1):(peak[1] + VA_2) ]
df1 = pd.DataFrame( dict(s1 = s1) ).reset_index() # cria um Data Frame com os valores do recorte
df1.columns = ['time', 's1'] # renomeia a coluna do data frame
df2 = pd.DataFrame( dict(s1 = s2) ).reset_index()
df2.columns = ['time', 's2']
'''
Calcular a "derivada" em cada ponto no entorno, para saber os limitantes do pulso
Ao terminar, retornar o data frame que contem os valores limitantes do contorno do pulso
'''
indexLim_1 = derivada_x( df1['s1'] ) # índices limitantes
indexLim_2 = derivada_x( df2['s2'] )
# redefine os valores para apenas os limitantes do data frame
df1 = df1.iloc[ [ indexLim_1[0], indexLim_1[-1] ] ]
df2 = df2.iloc[ [ indexLim_2[0], indexLim_2[-1] ] ]
# print(df2) # series marcada pelas colunas
# da Series original, temos agora o contorno do pulso
s1 = series[ df1['time'].iloc[0] : df1['time'].iloc[1]+1 ] # soma 1 para incluir o último termo
s2 = series[ df2['time'].iloc[0] : df2['time'].iloc[1]+1 ]
# print(s2)
pulsos = s1, s2
return(pulsos) # retorna os dois contornos, um de cada pulso
# ##3.2.
def contorno_limite_arbitrario(df, VA_1, VA_2, height , err_mess=0):
'''
Esse loop serve para olhar, em cada waveform do Data Frame original, o contorno ao redor de cada pulso, selecionado através do pico e das larguras arbitradas.
Por razões de implementaçao, filtramos como queremos em cada uma das waveforms e transformamos num array. Com isso,
acho que ficará bem mais rápido de fazer essas operações. O resultado é empilhado e transformado num Data Frame.
'''
peaks_0 , peaks_1 = peaks_divididos_01(df=df, height=height)
# if len(peaks_0) != len(peaks_1):
# print('erro na obtenção dos picos divididos')
contorno_0 = []
contorno_1 = []
'''
Para eventos que "terminam na borda" da janela, é possível que tenhamos um problema de tamanho do contorno,
que seria de não pegarmos ou todo o pulso ou de não conseguir alocar tudo num DataFrame por causa da diferença
de tamanho.
'''
aux = []
for i in range( df.shape[1] ):
evento = df[ df.columns[i] ]
s0 = evento.where(
(evento.index >= peaks_0.x.iloc[i] - VA_1) & (evento.index <= peaks_0.x.iloc[i] + VA_2)
).dropna().array
if len(s0) != 1 + VA_1 + VA_2:
s0 = np.append( s0 , np.full(1 + VA_1 + VA_2 - len(s0) , np.nan) )
aux.append(i)
# if len(s0) != 1 + VA_1 + VA_2:
# print(f'erro em {i}')
s1 = evento.where(
(evento.index >= peaks_1.x.iloc[i] - VA_1) & (evento.index <= peaks_1.x.iloc[i] + VA_2)
).dropna().array
if len(s1) != 1 + VA_1 + VA_2:
s1 = np.append( s1 , np.full(1 + VA_1 + VA_2 - len(s1) , np.nan) )
aux.append(i)
# if len(s1) != 1 + VA_1 + VA_2:
# print(f'erro em {i}')
contorno_0.append(s0)
contorno_1.append(s1)
contorno_0 = pd.DataFrame( np.array(contorno_0) ).T # pontos do contorno vs waveform
contorno_1 = pd.DataFrame( np.array(contorno_1) ).T
if err_mess == 1:
'''
Loop de checagem dos contornos
'''
if ( len(contorno_0) and len(contorno_1) ) != df.shape[1] :
_ = abs( len(contorno_0) - len(contorno_1) )
__ = 'primeiro' if len(contorno_0) > len(contorno_1) else 'segundo'
print(f'Os contornos não batem no tamanho. Existe(m) {_} a mais no {__}')
for elemento in (contorno_0 or contorno_1):
if len(elemento) != VA_1 + VA_2 + 1:
print(f'Erro no {elemento.name}')
if len(aux) != 0:
print(f'\nForam detectados {len(aux)} problemas na questão do tamanho da janela do contorno;\nOs problemas estão em {aux};\nPreenchidos com valores Nan\n')
else:
if err_mess == 1:
print('\nNão foram detectados problemas na questão do tamanho da janela do contorno\n')
#Retorna dois DataFrames contendo os contornos de cada waveform para cada um dos dois picos
return( contorno_0, contorno_1 )
def contorno_limite_arbitrario_picos(df, peaks_01, VA_1, VA_2, height, err_mess=0):
'''
Esse loop serve para olhar, em cada waveform do Data Frame original, o contorno ao redor de cada pulso, selecionado através do pico e das larguras arbitradas.
Por razões de implementaçao, filtramos como queremos em cada uma das waveforms e transformamos num array. Com isso,
acho que ficará bem mais rápido de fazer essas operações. O resultado é empilhado e transformado num Data Frame.
'''
peaks_xy_0 , peaks_xy_1 = peaks_01
'''
Para eventos que "terminam na borda" da janela, é possível que tenhamos um problema de tamanho do contorno,
que seria de não pegarmos ou todo o pulso ou de não conseguir alocar tudo num DataFrame por causa da diferença
de tamanho.
'''
# aux = []
# for i in range( df.shape[1] ):
# evento = df[ df.columns[i] ]
# s0 = evento.where(
# (evento.index >= peaks_0.x.iloc[i] - VA_1) & (evento.index <= peaks_0.x.iloc[i] + VA_2)
# ).dropna().array
# if len(s0) != 1 + VA_1 + VA_2:
# s0 = np.append( s0 , np.full(1 + VA_1 + VA_2 - len(s0) , np.nan) )
# aux.append(i)
# # if len(s0) != 1 + VA_1 + VA_2:
# # print(f'erro em {i}')
# s1 = evento.where(
# (evento.index >= peaks_1.x.iloc[i] - VA_1) & (evento.index <= peaks_1.x.iloc[i] + VA_2)
# ).dropna().array
# if len(s1) != 1 + VA_1 + VA_2:
# s1 = np.append( s1 , np.full(1 + VA_1 + VA_2 - len(s1) , np.nan) )
# aux.append(i)
# # if len(s1) != 1 + VA_1 + VA_2:
# # print(f'erro em {i}')
contorno_0 = []
aux_0 = []
for i in peaks_xy_0.index:
evento = df[ df.columns[i] ]
s0 = evento.where(
(evento.index >= peaks_xy_0['x'][i] - VA_1) & (evento.index <= peaks_xy_0['x'][i] + VA_2)
).dropna().array
if len(s0) != 1 + VA_1 + VA_2:
s0 = np.append( s0 , np.full(1 + VA_1 + VA_2 - len(s0) , np.nan) )
aux_0.append(i)
# if len(s0) != 1 + VA_1 + VA_2:
# print(f'erro em {i}')
contorno_0.append(s0)
contorno_1 = []
aux_1 = []
for i in peaks_xy_1.index:
evento = df[ df.columns[i] ]
s1 = evento.where(
(evento.index >= peaks_xy_1['x'][i] - VA_1) & (evento.index <= peaks_xy_1['x'][i] + VA_2)
).dropna().array
if len(s1) != 1 + VA_1 + VA_2:
s1 = np.append( s1 , np.full(1 + VA_1 + VA_2 - len(s1) , np.nan) )
aux_1.append(i)
# if len(s1) != 1 + VA_1 + VA_2:
# print(f'erro em {i}')
contorno_1.append(s1)
contorno_0 = pd.DataFrame( np.array(contorno_0) ).T # pontos do contorno vs waveform
contorno_1 = pd.DataFrame( np.array(contorno_1) ).T
if err_mess == 1:
'''
Loop de checagem dos contornos
'''
if ( len(contorno_0) and len(contorno_1) ) != df.shape[1] :
_ = abs( len(contorno_0) - len(contorno_1) )
__ = 'primeiro' if len(contorno_0) > len(contorno_1) else 'segundo'
print(f'Os contornos não batem no tamanho. Existe(m) {_} a mais no {__}')
for elemento in (contorno_0 or contorno_1):
if len(elemento) != VA_1 + VA_2 + 1:
print(f'Erro no {elemento.name}')
aux = aux_0 + aux_1
if len(aux) != 0:
print(f'\nForam detectados {len(aux)} problemas na questão do tamanho da janela do contorno;\nOs problemas estão em {aux};\nPreenchidos com valores Nan\n')
else:
if err_mess == 1:
print('\nNão foram detectados problemas na questão do tamanho da janela do contorno\n')
'''
Retorna dois DataFrames contendo os contornos de cada waveform para cada um dos dois picos
'''
return( contorno_0, contorno_1 )
#%%
#4. Integrais
##4.1.
def integral_simples(dados_em_y, dx):
_ = dados_em_y
I = 0
for i in range(len(_)):
y = _.iloc[i]
I += y * dx
return(I)
def integral_simples_dataframe(df, dx):
integrais = np.array([])
for i in range( df.shape[1] ):
coluna = df[ df.columns[i] ].dropna()
integral = integral_simples(dados_em_y=coluna , dx=dx)
integrais = np.append(integrais , integral)
return(pd.Series(integrais))
#%%
#5. Base Line
##5.1.
def baseLine_sample(series, height, VA_1): # recebe uma series do pandas
'''
Definimos essa função que retorna uma sample da base line da waveform.
Retorna uma amostra da waveform para determinar o valor estatístico dela
'''
_ = peak_finder(series, height = height)
'''Pega todos os elementos até o início do primeiro pulso; intervalo exclusivo à direita'''
if len(_) != 0: # pode acontecer de que o peak_finder não
x_inicio_pulso_0 = _[0] - VA_1 # definido arbitrariamente
sample = series.iloc[:x_inicio_pulso_0]
else:
sample = None
return( sample ) # este elemento é uma Series
##5.2. Base Line
def baseLine(df, height, VA_1):
baseLines = [] # será uma lista de series
for i in range( df.shape[1] ):
evento = df[ df.columns[i] ]
baseLines.append( baseLine_sample(evento, height, VA_1 = VA_1) )
baseLines = pd.concat(baseLines)
return(baseLines) #retorna uma Series com os dados de base line
#%%
#6. LIDANDO COM FILTRAGENS E SATURAÇÕES NO DATA FRAME
def filtra_saturacao_total(df, height, VA_1, VA_2):
_ = peaks_divididos_01( df, height = height )
peaks_xy_0 , peaks_xy_1 = _[0] , _[1]
filt_0 = pd.Series( peaks_xy_0.query('y > y.min()').index // 2 )
filt_1 = pd.Series( peaks_xy_1.query('y > y.min()').index // 2 )
_ = pd.concat( (filt_0, filt_1), ignore_index=True ).value_counts()
filt_saturacao = pd.Series (
_.where( _ == 2).dropna().sort_index().index
) # aparece duas vezes <--> está nas duas Series
df_filt_saturacao = df.iloc[ : , filt_saturacao]
return (df_filt_saturacao)
def filtra_saturacao_parcial(df, height, VA_1, VA_2):
_ = peaks_divididos_01( df, height = height )
peaks_xy_0 , peaks_xy_1 = _[0] , _[1]
filt_0 = peaks_xy_0.query('y > y.min()')
filt_1 = peaks_xy_1.query('y > y.min()')
return( np.array([ filt_0, filt_1 ]) )
def filtra_delta_t(df, convert_to_microsec, time_in_ADCch, height):
_ = pd.DataFrame( peaks_em_x( df=df, height=height), columns = ['peak_0', 'peak_1'] )
delta_x = ( _['peak_1'] - _['peak_0'] )
delta_x.name = 'delta_x'
delta_x_filtrado = delta_x.where(delta_x <= convert_to_microsec*time_in_ADCch).dropna()
df_filtrado = df.iloc[: , delta_x_filtrado.index]
return(df_filtrado)
#%% Plot fit exponencial das diferenças de tempo
def curve_fit_exponencial(df, height, convert_to_microsec, custom_number_of_bins=0, plot_graph=1, plt_points=0, plt_bars=1, path_to_save='../Documents/images/curve_fit-vida_media.png'):
_ = pd.DataFrame( peaks_em_x( df=df, height=height), columns = ['peak_0', 'peak_1'] )
delta_x = ( _['peak_1'] - _['peak_0'] )*convert_to_microsec
delta_x.name = 'delta_x'
#print(f'Total de {len(delta_x)} eventos')
'''
(a) Definimos a função de ajuste para a exponencial
'''
def fit_function(t, A, tau, C): # função de ajuste exponencial
return( A * np.e**(-t/tau) + C )
'''
(b) Coletamos os dados e fazemos plots preliminares, para ir acompanhando os resultados
Obs.: a variável number_of_bins é o jeito padrão de quantificar quantos bins se tem num histograma
fonte: https://stackoverflow.com/questions/60512271/how-can-i-extract-the-bins-from-seaborns-kde-distplot-object
'''
if custom_number_of_bins == 0:
number_of_bins = min( len( np.histogram_bin_edges(delta_x, bins="fd") ), 50 )
else:
number_of_bins = custom_number_of_bins
data_entries, bins = np.histogram(delta_x , bins = number_of_bins)
bins_centers = np.array([0.5 * (bins[i] + bins[i+1]) for i in range(len(bins)-1)])
'''
(c) Fazemos a regressão pela curve_fit do Scipy
obs.: É importante dar um valor inicial para os parâmetros, como um chute inicial.
Nesse caso, eu apenas chutei os valores mesmo algumas vezes
'''
p0 = np.array([ 0.5, 0.1, 1 ]) # initial guess for the parameters: y(t) = A * np.e**(-t/tau) + C
coeff, cov_coeff = curve_fit( fit_function , xdata = bins_centers , ydata = data_entries, p0 = p0 )
#coeff, cov_coeff = curve_fit( fit_function , xdata = bins_centers , ydata = data_entries )
'''
(d) incertezas
'''
#Calcula a incerteza
coeff_error = np.sqrt(np.diag(cov_coeff)) / np.sqrt(number_of_bins - 1) #"np.sqrt(cov) / sqrt(n - 1)"
'''
(e) Resultado
'''
coeff_results = pd.DataFrame( [coeff, coeff_error] ).T
coeff_results.rename( columns = {0:'valor', 1:'incerteza'}, index ={ 0:'A', 1:'tau', 2:'C' } , inplace = True )
'''
(f) Plot com histograma e regressão da curva
'''
if plot_graph == 1:
# detalhes
fig = plt.figure( figsize=(8,6), dpi=200 )
plt.title('Diferenças de tempo entre o primeiro e o segundo pulso')
#plt.ylabel(r'$\dfrac{\Delta N}{\Delta t}$')
plt.xlabel(r'Diferença de tempo ($\mu$s)')
plt.ylabel('Contagem / intervalo de tempo (Hz)')
#plt.legend()
# plot do histograma
if plt_bars == 1:
#sns.histplot(delta_x*convert_to_microsec, color='gray', bins=number_of_bins)
sns.histplot(delta_x, color='gray', bins=number_of_bins)
# plot dos centros dos bins
if plt_points == 1:
#plt.scatter(bins_centers*convert_to_microsec, data_entries, color = 'black', label = 'centro dos bins')
plt.scatter(bins_centers, data_entries, color = 'black', label = 'centro dos bins')
# plot da curve_fit
x = np.linspace(0,10, 10000)
plt.plot(
x, fit_function(x, coeff[0], coeff[1], coeff[2]),
color = 'orange', label = f'y(t) = {round(coeff[0])}*e^-t/{round(coeff[1])} + {round(coeff[2])}'
)
return(coeff_results, fig, bins_centers, data_entries)
else:
#print('No figure')
return(coeff_results, bins_centers, data_entries)
#%%
print('mainFile e bibliotecas importados')
| true
|
3e472c5972d90ad0848cc8a6b5409d6b5cc4effa
|
Python
|
BuysDB/SingleCellMultiOmics
|
/singlecellmultiomics/barcodeFileParser/barcodeFileParser.py
|
UTF-8
| 10,138
| 3.015625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import glob
import logging
from colorama import Fore
from colorama import Back
from colorama import Style
import os
import collections
import itertools
import gzip
#logging.basicConfig(level=logging.DEBUG)
# http://codereview.stackexchange.com/questions/88912/create-a-list-of-all-strings-within-hamming-distance-of-a-reference-string-with
def hamming_circle(s, n, alphabet):
for positions in itertools.combinations(range(len(s)), n):
for replacements in itertools.product(
range(len(alphabet) - 1), repeat=n):
cousin = list(s)
for p, r in zip(positions, replacements):
if cousin[p] == alphabet[r]:
cousin[p] = alphabet[-1]
else:
cousin[p] = alphabet[r]
yield ''.join(cousin)
class BarcodeParser():
def path_to_barcode_alias(self, path):
barcodeFileAlias = os.path.splitext(
os.path.basename(path))[0].replace('.gz','').replace('.bc','')
return barcodeFileAlias
def parse_pending_barcode_file_of_alias(self, alias):
if not alias in self.pending_files:
raise ValueError(f"trying to load {alias}, but barcode file not found")
logging.info(f"Loading promised barcode file {alias}")
self.parse_barcode_file( self.pending_files[alias] )
logging.info(f"Performing hamming extension for {alias}")
self.expand(self.hammingDistanceExpansion, alias=alias)
del self.pending_files[alias]
def parse_barcode_file(self, barcodeFile):
barcodeFileAlias = self.path_to_barcode_alias(barcodeFile)
# Decide the file type (index first or name first)
indexNotFirst = False
with gzip.open(barcodeFile,'rt') if barcodeFile.endswith('.gz') else open(barcodeFile) as f :
for i, line in enumerate(f):
parts = line.strip().split()
if len(parts) == 1 and ' ' in line:
parts = line.strip().split(' ')
if len(parts) == 1:
pass
elif len(parts) == 2:
indexFirst = not all((c in 'ATCGNX') for c in parts[0])
if not indexFirst:
indexNotFirst = True
# print(parts[1],indexFirst)
nospec=False
i=0
with gzip.open(barcodeFile,'rt') if barcodeFile.endswith('.gz') else open(barcodeFile) as f :
for i, line in enumerate(f):
parts = line.strip().split()
if len(parts) == 1 and ' ' in line:
parts = line.strip().split(' ')
if len(parts) == 1:
self.addBarcode(
barcodeFileAlias, barcode=parts[0], index=i+1)
#self.barcodes[barcodeFileAlias][parts[0]] = i
if not nospec: # only show this once:
logging.info(
f"\t{parts[0]}:{i} (No index specified in file)")
nospec=True
elif len(parts) == 2:
if indexNotFirst:
barcode, index = parts
else:
index, barcode = parts
#self.barcodes[barcodeFileAlias][barcode] = index
# When the index is only digits, convert to integer
try:
if int(index)==int(str(int(index))):
index = int(index)
else:
pass
except Exception as e:
pass
self.addBarcode(
barcodeFileAlias, barcode=barcode, index=index)
if not nospec: # only show this once:
logging.info(
f"\t{barcode}:{index} (index was specified in file, {'index' if indexFirst else 'barcode'} on first column)")
nospec=True
else:
e = f'The barcode file {barcodeFile} contains more than two columns. Failed to parse!'
logging.error(e)
raise ValueError(e)
logging.info(f'done loading {i} barcodes')
def __init__(
self,
barcodeDirectory='barcodes',
hammingDistanceExpansion=0,
spaceFill=False,
lazyLoad=None # these aliases wiill not be loaded until requested, '*' matches all files
):
self.hammingDistanceExpansion = hammingDistanceExpansion
barcodeDirectory = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
barcodeDirectory)
barcode_files = list(glob.glob(f'{barcodeDirectory}/*'))
self.spaceFill = spaceFill
self.hammingDistanceExpansion = hammingDistanceExpansion
self.barcodes = collections.defaultdict(
dict) # alias -> barcode -> index
# alias -> barcode -> (index, hammingDistance)
self.extendedBarcodes = collections.defaultdict(dict)
self.pending_files = dict()
for barcodeFile in barcode_files:
barcodeFileAlias = self.path_to_barcode_alias(barcodeFile)
if lazyLoad is not None and (barcodeFileAlias in lazyLoad or lazyLoad =='*'):
logging.info(f"Lazy loading {barcodeFile}, alias {barcodeFileAlias}")
self.pending_files[barcodeFileAlias] = barcodeFile
continue
logging.info(f"Parsing {barcodeFile}, alias {barcodeFileAlias}")
self.parse_barcode_file(barcodeFile)
if hammingDistanceExpansion > 0:
self.expand(hammingDistanceExpansion, alias=barcodeFileAlias)
def getTargetCount(self, barcodeFileAlias):
return(len(self.barcodes[barcodeFileAlias]), len(self.extendedBarcodes[barcodeFileAlias]))
def expand(
self,
hammingDistanceExpansion,
alias,
reportCollisions=True,
spaceFill=None):
barcodes = self.barcodes[alias]
# hammingBarcode -> ( ( distance,origin) )
hammingSpace = collections.defaultdict(list)
for barcode in barcodes:
for hammingDistance in range(0, hammingDistanceExpansion + 1):
for hammingInstance in hamming_circle(
barcode, hammingDistance, 'ACTGN'):
hammingSpace[hammingInstance].append(
(hammingDistance, barcode))
# Resolve all
for hammingBarcode in hammingSpace:
# Check if there is a closest origin:
sortedDistances = sorted(hammingSpace[hammingBarcode])
if len(sortedDistances) > 1 and (
sortedDistances[0][0] == sortedDistances[1][0]):
# We cannot resolve this, two or more origins are at the same distance:
#print('Cannot resolve %s' % hammingBarcode)
continue
hammingDistance, origin = sortedDistances[0]
self.addBarcode(
alias,
barcode=hammingBarcode,
index=self.barcodes[alias][origin],
hammingDistance=hammingDistance,
originBarcode=origin)
def addBarcode(
self,
barcodeFileAlias,
barcode,
index,
hammingDistance=0,
originBarcode=None):
if hammingDistance == 0:
self.barcodes[barcodeFileAlias][barcode] = index
else:
if originBarcode is None:
raise ValueError()
self.extendedBarcodes[barcodeFileAlias][barcode] = (
index, originBarcode, hammingDistance)
# get index and hamming distance to barcode returns none if not Available
def getIndexCorrectedBarcodeAndHammingDistance(self, barcode, alias, try_lazy_load_pending=True):
# Check if the alias still needs to be loaded:
if barcode in self.barcodes[alias]:
return (self.barcodes[alias][barcode], barcode, 0)
if barcode in self.extendedBarcodes[alias]:
return self.extendedBarcodes[alias][barcode]
if alias in self.pending_files:
if not try_lazy_load_pending:
raise RecursionError()
self.parse_pending_barcode_file_of_alias(alias)
return self.getIndexCorrectedBarcodeAndHammingDistance(barcode, alias, try_lazy_load_pending=False)
return (None, None, None)
def list(self, showBarcodes=5): # showBarcodes=None show all
for barcodeAlias, mapping in self.barcodes.items():
print(
f'{len(mapping)} barcodes{Style.DIM} obtained from {Style.RESET_ALL}{barcodeAlias}')
if len(mapping):
for bcId in list(mapping.keys())[:showBarcodes]:
try:
print(('%s%s%s%s%s%s' % (Fore.GREEN, bcId,
Fore.WHITE, '→', mapping[bcId], Style.RESET_ALL)))
except Exception as e:
print(('%s%s%s%s%s%s' % (Fore.GREEN, bcId,
Fore.WHITE, '->', mapping[bcId], Style.RESET_ALL)))
if showBarcodes is not None and len(mapping) > showBarcodes:
print(
f'{Style.DIM} %s more ...\n{Style.RESET_ALL}' %
(len(mapping) - showBarcodes))
def getBarcodeMapping(self):
return self.barcodes
def __getitem__(self, alias):
if alias in self.pending_files:
self.parse_pending_barcode_file_of_alias(alias)
return self.barcodes.get(alias)
| true
|
585391d23f82681d69728d4afc2f2c389f75e4d5
|
Python
|
VadimChorrny/DEV-HUB
|
/_XSperions/add_cog.py
|
UTF-8
| 1,649
| 2.796875
| 3
|
[] |
no_license
|
#----------------------------------------------+
# Enter Cog name |
#----------------------------------------------+
COG_NAME = "captcha"
#----------------------------------------------+
# Cog template |
#----------------------------------------------+
cog_template = f"""import discord
from discord.ext import commands
from discord.ext.commands import Bot
import asyncio
#----------------------------------------------+
# Functions |
#----------------------------------------------+
class {COG_NAME}(commands.Cog):
def __init__(self, client):
self.client = client
#----------------------------------------------+
# Events |
#----------------------------------------------+
@commands.Cog.listener()
async def on_ready(self):
print(f">> {COG_NAME} cog is loaded")
#----------------------------------------------+
# Commands |
#----------------------------------------------+
#----------------------------------------------+
# Errors |
#----------------------------------------------+
def setup(client):
client.add_cog({COG_NAME}(client))"""
import os
if "cogs" not in os.listdir("."):
print("No [cogs] folder in current directory")
elif f"{COG_NAME}.py" in os.listdir("cogs"):
print(f"[{COG_NAME}.py] already exists")
else:
with open(f"cogs/{COG_NAME}.py", "w", encoding="utf8") as _file:
_file.write(cog_template)
print("Cog is deployed")
| true
|
9b1e909df6bf98b472594f0ca584d679142bc9a0
|
Python
|
samparkewolfe/LSTM_Synth
|
/train_model.py
|
UTF-8
| 4,767
| 3
| 3
|
[] |
no_license
|
#This python script handles training the models.
#Import the needed libraries
import os, sys
import numpy as np
import h5py
import keras
from keras.models import Sequential, load_model
from keras.callbacks import ModelCheckpoint
#Declare the variables of the inputs
model_name = ''
dataset_name = ''
nb_epoch = ''
it_i = 0
bestWeights = ""
#Use the sys library to get variable imputs to the python script
if len(sys.argv) < 4:
#if the wrong number of inputs was given tell the user what is supposed to be inputted
raise ValueError('Incorrect arguments, Correct format: python train_model.py [name_of_model] [name_of_dataset] [num_epochs]')
else:
#If the correct number of arguments are given, allocate them.
model_name = sys.argv[1]
dataset_name = sys.argv[2]
nb_epoch = sys.argv[3]
#Tell the user what's going on.
print('Training Model:', model_name)
print('Training Dataset:', dataset_name)
print('Training Epochs:', nb_epoch)
#load the Keras model and data set from the specific directory of the work space using Keras an h5py
model = load_model('models/' + model_name +'/'+ model_name + '.h5')
dataset = h5py.File('datasets/' + dataset_name +'/'+ dataset_name + '.h5' ,'r')
#get the info from the files.
X = dataset['X'][:]
Y = dataset['Y'][:]
nb_epoch = int(nb_epoch)
#create the name for the new weights of we are about to make.
weights_name = model_name +'_'+dataset_name+'_'+str(it_i)
#if this model has already been trained on this dataset we automatically load the most recent set of weights and train from the best epoch.
while os.path.exists('models/' + model_name +'/'+'weights/'+weights_name + "/" ):
#Get all of the files in this directory.
files = os.listdir('models/' + model_name +'/'+'weights/'+weights_name + "/")
#Always start traing from the best epoch that was last trained.
#initialize to -1 just is case the best one was 0.
bestEpoch = -1
#iterate through all the files names we got from os.listdir
for _file in (files):
#Reset the wpoch we are on.
currEpochString = ''
#Get the value of the epoch of the files from the last 4 values of the files name.
for i in range(0, 4):
currEpochString = currEpochString + _file[(-7+i)]
#Make that value and integer to be able to know how good it is.
currEpoch = int(currEpochString)
#If that epoch is a larger value of that the best one we got so far the loss must have improved for that epoch.
if( currEpoch > bestEpoch):
#Therefore this is now our best epoch
bestEpoch = currEpoch
#And the best weights are the files that we are currently on.
bestWeights = _file
#After iterating through all the files and finding the best one load the weights.
model.load_weights('models/' + model_name + '/weights/' + weights_name+ '/' +bestWeights)
#Tell them what we loaded.
print('Loaded Weights:',bestWeights)
#Because some weights already existed for this file we need to give the weights a new name,
#if the new name we give it also existed then the while loop will repeat until this new name does not exist meaning we are making new files.
it_i += 1
weights_name = model_name +'_'+dataset_name+'_'+str(it_i)
#Make the specific directory for these weights in the correct place in the work space.
if not os.path.exists('models/' + model_name +'/'+'weights/'+weights_name):
os.makedirs('models/' + model_name +'/'+'weights/'+weights_name)
#logs include `acc` and `loss`, and optionally include `val_loss`
#We could add the loss of the epoch in to the name of the weights but then we wouldn't be able to iterate though them and find the best one as easy.
#filepath= 'models/' + model_name +'/'+'weights/'+weights_name+'/'+'weights-improvement-{epoch:04d}-{loss:.2f}.h5'
#Instead we just save a set of the weights with a unique name to that epoch with 4 digits.
filepath= 'models/' + model_name +'/'+'weights/'+weights_name+'/'+weights_name+'_{epoch:00004d}.h5'
#Create a checkpoint class to give to the training function.
#This checkpoint will be called every time the loss of the training. It will only save the weights when the loss gets better.
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, save_weights_only=True, mode='auto', period=1)
#Do this
callbacks_list = [checkpoint]
#START TRAINGING.
model.fit(X, Y, nb_epoch=nb_epoch, callbacks=callbacks_list)
#If the directory does not exist for these weights make one.
if not os.path.exists('models/' + model_name +'/'+'weights/'+weights_name):
os.makedirs('models/' + model_name +'/'+'weights/'+weights_name)
#Print finished
print('Saved Weights:',weights_name)
| true
|
1f51aa621b523ed5465b6ad5674b3ec47e6aa7ad
|
Python
|
AashishMehtoliya/Diabetes
|
/diabetes.py
|
UTF-8
| 663
| 2.9375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 28 16:37:18 2019
@author: Aashish Mehtoliya
"""
from numpy import loadtxt
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
df = loadtxt('pima-indians-diabetes.csv', delimiter=",")
X = df[:,0:8]
Y = df[:,8]
X_train, X_test, y_train, y_test = train_test_split(X,Y,test_size=0.3,random_seed = 23)
model = XGBClassifier()
model.fit(X_train,y_train)
print(model)
y_pred = model.predict(X_test)
predictions = [round(i) for i in y_pred]
accuracy = accuracy_score(y_test,predictions)
print(accuracy*100.0)
| true
|
18ab555bd279c090344e88f1c0b15ce4741dc7be
|
Python
|
kokliangchng/Sandbox
|
/week3/exceptionsToComplete.py
|
UTF-8
| 235
| 3.796875
| 4
|
[] |
no_license
|
finished=False
result=0
while not finished:
try:
result = int(input("Enter a number"))
finished=True
pass
except ValueError:
print("Please enter a valid number")
print("Valid result is :",result)
| true
|
d327760f9bca8983ca03c002ae2a6d39de8a5bb6
|
Python
|
DDKinger/FlowFly
|
/code/data_process_0.py
|
UTF-8
| 6,283
| 2.71875
| 3
|
[] |
no_license
|
import scipy.io as sio
import abc
from functools import partial
# MAXIMUM_FLOW = 1500
NUM_TRAIN = 14400
class AbstractDataset(abc.ABC):
def __init__(self, x):
self.x = x
@property
def x(self):
return self.x
@abc.abstractmethod
def fmap(self, f):
pass
def __len__(self):
return len(self.x)
@abc.abstractmethod
def to_dict(self):
pass
# @abc.abstractmethod
# def denormalization(self):
# pass
class RawDataset(AbstractDataset):
def __init__(self, x):
super().__init__(x)
def fmap(self, f):
return RawDataset(*f(x))
def to_dict(self):
return {'traffic_flow': self.x}
class MinMaxNormalizedDataset(AbstractDataset):
def __init__(self, x, x_max):
super().__init__(x)
self.x_max = x_max
def fmap(self, f):
return MinMaxNormalizedDataset(*f(self.x, self.y), self.x_max)
def to_dict(self):
return {'traffic_flow': self.x, 'x_max': self.x_max}
class GaussianNormalizedDataset(AbstractDataset):
def __init__(self, x, x_mean, x_std, x_max):
super().__init__(x)
self.x_mean = x_mean
self.x_std = x_std
self.x_max = x_max
def fmap(self, f):
return GaussianNormalizedDataset(*f(self.x), self.x_mean, self.x_std, self.x_max)
def to_dict(self):
return {
'x_input': self.x,
'x_max': self.x_max,
'x_mean': self.x_mean,
'x_std': self.x_std,
}
# def shuffle(x):
# np.random.seed(400)
# randomorder = np.arange(len(x))
# np.random.shuffle(randomorder)
# x = x[randomorder]
# return x
def load_data(path):
data = sio.loadmat(f"{path}.mat")
return RawDataset(data['traffic_flow'])
def normalization(prefix, processing, is_training):
full_path = prefix + suffix(processing, is_training)
if os.path.exists(full_path):
return load_data(full_path)
data = processing(load_data(prefix), is_training)
sio.savemat(full_path, data.to_dict)
return data
def minmax_normal(data:RawDataset, num_train=NUM_TRAIN, is_training):
if is_training:
x_max = np.amax(data.x)
else:
x_max =
return MinMaxNormalizedDataset(data.x/x_max, x_max)
def gaussian_normal(data:RawDataset, num_train=NUM_TRAIN, is_training):
data = minmax_normal(data, num_train)
x_mean = np.mean(data.x[:num_train, :], axis=0)
x_std = np.std(data.y[:num_train, :], axis=0)
return GaussianNormalizedDataset(whiten(data.x, x_mean, x_std),
x_mean, x_std, data.x_max)
def whiten(x, mean, std):
return (x-mean)/std
def suffix(func, is_training):
if is_training:
sur = '_train'
else:
sur = '_test'
if func is minmax_normal:
return sur+'_normal'
if func is gaussian_normal:
return sur+'_gaussian'
raise ValueError(f"Not valid function: {func}")
# def split_dataset(x, y, dataset_type, num_train=NUM_TRAIN):
# if dataset_type.lower() == 'train':
# return x[:num_train, :], y[:num_train, :]
# if dataset_type.lower() == 'test':
# return x[num_train:, :], y[num_train:, :]
# raise ValueError(
# f"Invalid dataset type {dataset_type}, expected train or test.")
def generate_data(prefix, shuffle=True, gaussian=False, is_training):
if gaussian:
data = normalization(prefix, gaussian_normal, is_training)
else:
data = normalization(prefix, minmax_normal, is_training)
if is_training:
data = data.fmap(partial(split_dataset, dataset_type='train'))
if shuffle:
data = data.fmap(shuffle)
print("train data generated")
else:
data = data.fmap(partial(split_dataset, dataset_type='test'))
print("test data generated")
print("data size:", len(data))
return data
###
def generate_normal(dataset):
dataset_normal = dataset+'_normal.mat'
if not os.path.exists(dataset_normal):
x_input = sio.loadmat(dataset+'.mat')['x_input']
y_output = sio.loadmat(dataset)['y_output']
x_max = np.amax(x_input)
x_input /= x_max
y_output /= x_max
sio.savemat(dataset_x_max, {'x_input': x_input,
'y_output': y_output, 'x_max': x_max})
else:
x_input = sio.loadmat(dataset_normal)['x_input']
y_output = sio.loadmat(dataset_normal)['y_output']
x_max = sio.loadmat(dataset_x_max)['x_max']
return x_input, y_output, x_max
def generate_gaussian():
dataset_gaussian = dataset+'_gaussian.mat'
if not os.path.exists(dataset_gaussian):
x_input, y_output, x_max = generate_normal(dataset)
x_mean = np.mean(x_input[:num_train, :], axis=0)
x_std = np.std(y_output[:num_train, :], axis=0)
x_input -= x_mean
x_input /= x_std
y_output -= x_mean
y_output /= x_std
sio.savemat(dataset_gaussian, {'x_input': x_input, 'y_output': y_output,
'x_max': x_max, 'x_mean': x_mean, 'x_std': x_std})
else:
x_input = sio.loadmat(dataset_gaussian)['x_input']
y_output = sio.loadmat(dataset_gaussian)['y_output']
x_max = sio.loadmat(dataset_gaussian)['x_max']
x_mean = sio.loadmat(dataset_gaussian)['x_mean']
x_std = sio.loadmat(dataset_gaussian)['x_std']
return x_input, y_output, x_max, x_mean, x_std
def generate_data(dataset, shuffle=True, gaussian=False, is_training):
if gaussian:
x_input, y_output, x_max, x_mean, x_std = generate_gaussian(dataset)
else:
x_input, y_output, x_max = generate_normal(dataset)
x_mean = 0
x_std = 1
if is_training:
x = x_input[:num_train, :]
y = y_output[:num_train, :]
if shuffle:
np.random.seed(400)
randomorder = np.arange(len(x))
np.random.shuffle(randomorder)
x = x[randomorder]
y = y[randomorder]
print("train data generated")
else:
x = x_input[num_train:, :]
y = y_output[num_train:, :]
print("test data generated")
print("data size:", len(x))
return x, y, x_max, x_mean, x_std
| true
|
186aabe5e70b2cde29c28640dac7664dffcade52
|
Python
|
elektrik-elektronik-muhendisligi/Esp8266Examples
|
/10_Socket_TCP/socket_TCP_02_server_simple.py
|
UTF-8
| 1,500
| 2.953125
| 3
|
[] |
no_license
|
'''
https://www.binarytides.com/python-socket-programming-tutorial/
prvni priklad
'''
import socket
import sys
HOST = '' # Symbolic name meaning all available interfaces
PORT = 8888 # Arbitrary non-privileged port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('Socket created')
try:
s.bind((HOST, PORT))
except socket.error as msg:
print('Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1])
sys.exit()
print('Socket bind complete')
s.listen(10)
print('Socket now listening')
# now keep talking with the client
# TODO na PC to nefunguje
# po pripojeni telnetem: telnet localhost 5000
# se pomoci recev precte jen jedno pismenko, pak probehne zbytek smycky
# az k accept a uz se to nepripoji
# trochu jsem to upravil oproti prikladu
# asi to bude telnetem, ktery to posila po pismenku
while 1:
print("zacatek")
# wait to accept a connection - blocking call
conn, addr = s.accept()
# display client information
print('Connected with ' + addr[0] + ':' + str(addr[1]))
while 1:
print("recv")
data = conn.recv(1024)
print(data)
reply = 'OK...' + data.decode('utf-8')
print(reply)
print("tady")
if not data:
print("not data")
break
print("reply")
conn.sendall(reply.encode('utf-8'))
print("reply")
if data.decode('utf-8')=="e":
print("Konec")
break
conn.close()
print("conn close")
s.close()
| true
|
8065345665443f799494064174b97370d8aa88af
|
Python
|
AxentiAndrei2004/Introducere_Afi-are_-alcule
|
/Problema 10.py
|
UTF-8
| 236
| 2.90625
| 3
|
[] |
no_license
|
n=int(input('Introdu numarul='))
print(n,'*1=',n*1)
print(n,'*2=',n*2)
print(n,'*3=',n*3)
print(n,'*4=',n*4)
print(n,'*5=',n*5)
print(n,'*6=',n*6)
print(n,'*7=',n*7)
print(n,'*8=',n*8)
print(n,'*9=',n*9)
print(n,'*10=',n*10)
| true
|
8031f8419eb69c9529f291b4b399b1239db5a19f
|
Python
|
elemel/drillion
|
/drillion/health_component.py
|
UTF-8
| 2,672
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
from drillion.component import Component
from drillion.maths import clamp
class HealthComponent(Component):
def __init__(self, update_phase, health=1.0, min_health=0.0,
max_health=1.0, regeneration=0.0, epsilon=0.001):
self._update_phase = update_phase
self._health = health
self._min_health = min_health
self._max_health = max_health
self._regeneration = regeneration
self._epsilon = epsilon
self._active = False
self._healing = False
@property
def health(self):
return self._health
@health.setter
def health(self, health):
self._health = health
self._update_healing()
@property
def min_health(self):
return self._min_health
@min_health.setter
def min_health(self, min_health):
self._min_health = min_health
self._update_healing()
@property
def max_health(self):
return self._max_health
@max_health.setter
def max_health(self, max_health):
self._max_health = max_health
self._update_healing()
@property
def regeneration(self):
return self._regeneration
@regeneration.setter
def regeneration(self, regeneration):
self._regeneration = regeneration
self._update_healing()
@property
def epsilon(self):
return self._epsilon
@epsilon.setter
def epsilon(self, epsilon):
self._epsilon = epsilon
self._update_healing()
@property
def at_min_health(self):
return self._health < self._min_health + self._epsilon
@property
def at_max_health(self):
return self._health > self._max_health - self._epsilon
def create(self):
self._active = True
self._update_healing()
def delete(self):
self._active = False
self._update_healing()
def update(self, dt):
self._health = clamp(self._health + dt * self._regeneration,
self._min_health, self._max_health)
self._update_healing()
def _update_healing(self):
healing = self._is_healing()
if healing != self._healing:
if self._healing:
self._update_phase.remove_handler(self)
self._healing = healing
if self._healing:
self._update_phase.add_handler(self)
def _is_healing(self):
if not self._active:
return False
if self._regeneration > self._epsilon:
return not self.at_max_health
if self._regeneration < -self._epsilon:
return not self.at_min_health
return False
| true
|
442ee784f506e195ce9fd3f7191eb7aa61f7ce25
|
Python
|
mrusinowski/pp1
|
/04-Subroutines/z31.py
|
UTF-8
| 167
| 3.921875
| 4
|
[] |
no_license
|
def reverse(t):
t.reverse()
return t
tab = [2,5,4,1,8,7,4,0,9]
print(f'Tablica: {tab}')
print(f'Tablica z elementami w odwrotnej kolejności: {reverse(tab)}')
| true
|
41e2e6d891224efec4265c9e844e738f24a1e329
|
Python
|
qingbol/BullyDetection
|
/predict.py
|
UTF-8
| 5,175
| 2.515625
| 3
|
[] |
no_license
|
import tensorflow as tf
import numpy as np
import os,glob,cv2
import sys,argparse
from PIL import Image
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
# from loaddata import load_dataset
from tensorflow.python.platform import flags
def main(_):
#Load labels
label_lst=[]
rs = os.path.exists(FLAGS.output_labels)
# rs = os.path.exists("trained_model/output_labels.txt")
if rs==True:
file_handler =open(FLAGS.output_labels,mode='r')
contents = file_handler.readlines()
for name in contents:
name = name.strip('\n')
label_lst.append(name)
file_handler.close()
num_label=len(label_lst)
# print(label_lst)
#==============================================================
# Prepare input data
# classes = os.listdir(FLAGS.train_path)
# classes.sort()
# print(classes)
# num_classes = len(classes)
# print("number classes is ",num_classes)
# # We shall load all the training and validation images and labels into memory using openCV and use that during training
# x_batch, y_true_batch, _, cls_batch, categories = load_dataset(train_path, FLAGS.img_size, classes)
# print(y_true_batch)
#==============================================================
#======================read the image========================================
# First, pass the path of the image
# dir_path = os.path.dirname(os.path.realpath(__file__))
# image_path=sys.argv[1]
# filename = dir_path +'/' +image_path
images = []
# Reading the image using OpenCV
image = cv2.imread(FLAGS.img_file)
# Resizing the image to our desired size and preprocessing will be done exactly as done during training
image = cv2.resize(image, (FLAGS.img_size, FLAGS.img_size),0,0, cv2.INTER_LINEAR)
images.append(image)
images = np.array(images, dtype=np.uint8)
images = images.astype('float32')
images = np.multiply(images, 1.0/255.0)
#The input to the network is of shape [None image_size image_size num_channels]. Hence we reshape.
input_img = images.reshape(1, FLAGS.img_size,FLAGS.img_size,FLAGS.num_channels)
#========================read the image======================================
with tf.Session() as sess:
## Let us restore the saved model
# Step-1: Recreate the network graph. At this step only graph is created.
saver = tf.train.import_meta_graph(FLAGS.trained_model)
# Step-2: Now let's load the weights saved using the restore method.
saver.restore(sess, tf.train.latest_checkpoint(FLAGS.checkpoint))
# Accessing the default graph which we have restored
graph = tf.get_default_graph()
# Now, let's get hold of the op that we can be processed to get the output.
# In the original network labels_pred is the tensor that is the prediction of the network
labels_pred = graph.get_tensor_by_name("labels_pred:0")
## Let's feed the images to the input placeholders
data_placeholder= graph.get_tensor_by_name("data_placeholder:0")
label_placeholder = graph.get_tensor_by_name("label_placeholder:0")
# label_false = np.zeros((1, num_classes))
label_false = np.zeros((1, num_label))
### Creating the feed_dict that is required to be fed to calculate y_pred
feed_dict_testing = {data_placeholder: input_img, label_placeholder: label_false}
result=sess.run(labels_pred, feed_dict=feed_dict_testing)
# result is of this format [probabiliy_of_rose probability_of_sunflower]
# print(result)
#print(label_lst)
class_pred= tf.argmax(result, axis=1)
label_index=sess.run(class_pred)
# print(label_index)
# print("This image belongs to category: " classes[label_index[0]])
print(label_lst[label_index[0]])
# print(classes[label_index[0]])
# # img = Image.open("data_cat/testing_data/1018.jpg")
# plt.figure(figsize=(6,4))
# plt.subplot(2,1,1)
# # print(input_img[0])
# # plt.imshow(input_img)
# plt.imshow(image)
# plt.axis('off')
# plt.subplot(2,1,2)
# # plt.figure()
# plt.barh([0, 1], result[0], alpha=0.5)
# plt.yticks([0, 1], classes)
# #plt.yticks([0, 1], label_lst)
# plt.xlabel('Probability')
# plt.xlim(0,1.01)
# plt.tight_layout()
# plt.show()
if __name__ == "__main__":
#set some superparameters which can reset befor run
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("train_path", "data_bully/training_data", "path of testing data")
flags.DEFINE_string("img_file", "data_bully/testing_data/gossiping/gossiping0001.jpg", "path of testing data")
flags.DEFINE_integer('img_size', 128, 'image width=image height.')
flags.DEFINE_integer('num_channels', 3, 'image channel number.')
flags.DEFINE_string("output_labels", "trained_model/output_labels.txt", "store the labels")
flags.DEFINE_string("trained_model", "trained_model/bully_action.meta", "meta graph")
flags.DEFINE_string("checkpoint", "./trained_model/", "checkpoint")
tf.app.run()
| true
|
25a2ee4b86e4c1b883e8d1c48f5fc487c2ce91a2
|
Python
|
luoguanghao/bioinfo_algo_script
|
/M3_Week5_2BreakOnGenomeGraph.py
|
UTF-8
| 574
| 2.6875
| 3
|
[] |
no_license
|
if __name__ == '__main__':
from os.path import dirname
dataset = open(dirname(__file__)+'dataset.txt').read().strip().split('\n')
edges = dataset[0].strip('(').strip(')').split('), (')
bp = list(map(int,dataset[1].split(', ')))
edges = [list(map(int,i.split(', '))) for i in edges]
#print(edges)
try:
i1=edges.index([bp[1],bp[0]])
except:
i1=edges.index([bp[0],bp[1]])
try:
i2=edges.index([bp[2],bp[3]])
except:
i2=edges.index([bp[3],bp[2]])
edges[i1]=[bp[2],bp[0]]
edges[i2]=[bp[1],bp[3]]
for e in edges:
print('(%d, %d), '%(e[0],e[1]),end='')
| true
|
52dc52c762b28f4fc9e302b246a25ad0eda58426
|
Python
|
Adashian/goit-python
|
/module_8/hw_8.py
|
UTF-8
| 1,584
| 3.265625
| 3
|
[] |
no_license
|
from datetime import date, datetime, timedelta
from collections import defaultdict
def congratulate(users):
day_names = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
this_week = date.today().isocalendar().week # От этой даты совершается проверка ДР на следующей недели
result = defaultdict(list)
for user in users:
b_day = user.get('birthday')
b_day_this_year = date(datetime.today().year, b_day.month, b_day.day)
b_day_week = b_day_this_year.isocalendar().week
b_day_week_day = b_day_this_year.isocalendar().weekday
if this_week + 1 == b_day_week:
day = day_names[b_day_week_day - 1]
if b_day_week_day - 1 > 4:
continue
result[day].append(user.get('name'))
if this_week == b_day_week and b_day_week_day - 1 > 4:
day = day_names[0]
result[day].append(user.get('name'))
for key, values in result.items():
print(f'{key}:', end=' ')
print(*values, sep=', ')
test_data = [
{'name': 'Andriy', 'birthday': date(1994, 5, 8)},
{'name': 'Ivan', 'birthday': date(1992, 8, 5)},
{'name': 'Georg', 'birthday': date(1995, 2, 12)},
{'name': 'Yaroslav', 'birthday': date(1994, 8, 8)},
{'name': 'Anton', 'birthday': date(1993, 10, 21)},
{'name': 'Kirill', 'birthday': date(1992, 5, 14)},
{'name': 'Alexandr', 'birthday': date(1995, 5, 8)},
{'name': 'August', 'birthday': date(1994, 6, 27)}
]
congratulate(test_data)
| true
|
4037482df9a31dcf09c875b1563e40312584b8b5
|
Python
|
pfhayes/euler
|
/solved/Euler105.py
|
UTF-8
| 571
| 2.953125
| 3
|
[] |
no_license
|
# Find the number of lines in Euler105.txt that obey the special sumset property
from useful import powerset
total = 0
for line in open("Euler105.txt") :
s = map(int, line.split(","))
print s,"",
bad = False
for subs in powerset(s)[1:-1] :
g = set(subs)
comp = list(set(s) - g)
for altSet in powerset(comp)[1:] :
diff = (sum(g) - sum(altSet))
if diff == 0 or float(len(g) - len(altSet)) / diff < 0 :
print g, altSet,
print "No!"
bad = True
break
if bad :
break
else :
print "Yes!",
k = sum(s)
total += k
print k
print total
| true
|
fbfc2f969525e6f290b2268e4de3ff52bc073d50
|
Python
|
JjVera96/chat_sockets
|
/server.py
|
UTF-8
| 6,177
| 2.59375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import socket
import select
from pymongo import MongoClient
import sys
import json
class Servidor():
def __init__(self, ip, port):
self.client = MongoClient('10.253.130.254', 27017)
self.db = self.client.distribuidos
self.users = self.db.users
self.host = ip
self.port = port
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.bind((self.host, self.port))
self.s.listen(0)
self.contacts = {}
self.rooms = {}
self.conn_list = []
print('Servidor escuchando por la ip {} en el puerto {}'.format(ip, port))
def registrar(self, username, password, name, last_name, edad, gender):
new_user = {
'username' : username,
'password' : password,
'name' : name,
'last_name' : last_name,
'edad' : edad,
'gender' : gender
}
try:
self.user = self.users.find_one({'username' : username})
if self.user is None:
self.user_id = self.users.insert_one(new_user).inserted_id
return 'Registrado'
else:
return 'Usuario ya existe'
except:
return 'Error'
def ingresar(self, username, password):
self.user = self.users.find_one({'username' : username})
if self.user is None:
return 'Usuario no existe'
elif self.user['password'] == password:
return 'Exito'
else:
return 'Contraseña incorrecta'
def run(self):
self.conn_list.append(self.s)
while True:
self.ready_to_read,self.ready_to_write,self.in_error = select.select(self.conn_list,[],[],0)
for self.sock in self.ready_to_read:
if self.sock == self.s:
self.conn, self.addr = self.s.accept()
self.conn_list.append(self.conn)
print('Cliente {} conectado'.format(self.addr))
else:
self.data = self.sock.recv(1024).decode('utf8')
if self.data:
self.cabeza, self.cuerpo = self.data.split('-')
#Registrar un nuevo usuario
if self.cabeza == 'reg':
self.username, self.password, self.name, self.last_name, self.edad, self.gender = self.cuerpo.split('/')
self.response = self.registrar(self.username, self.password, self.name, self.last_name, self.edad, self.gender)
if self.response == 'Registrado':
self.contacts[self.username] = self.sock
self.sock.send(self.response.encode('utf8'))
#Ingresar usuario
if self.cabeza == 'ing':
self.username, self.password = self.cuerpo.split('/')
self.response = self.ingresar(self.username, self.password)
if self.response == 'Exito':
self.contacts[self.username] = self.sock
self.sock.send(self.response.encode('utf8'))
#Mostrar salas
if self.cabeza == 'IR':
self.response = json.dumps({'rooms' : list(self.rooms.keys())}).encode('utf8')
self.sock.send(self.response)
#Mostrar usuarios
if self.cabeza == 'show users' :
self.response = json.dumps({'users' : list(self.contacts.keys())}).encode('utf8')
self.sock.send(self.response)
#Crear sala desde lobby
if self.cabeza == 'nR':
self.rooms[self.cuerpo] = []
self.rooms[self.cuerpo].append(self.sock)
#Entrar a una sala desde lobby
if self.cabeza == 'ngR':
self.rooms[self.cuerpo].append(self.sock)
#Chat
if self.cabeza == 'chat':
self.room, self.mensaje = self.cuerpo.split('/')
for self.person in self.rooms[self.room]:
if self.person != self.s and self.person != self.sock:
self.person.send(self.mensaje.encode('utf8'))
#Eliminar sala
if self.cabeza == 'dR':
print(self.rooms[self.cuerpo])
if self.rooms[self.cuerpo][0] == self.sock:
for self.person in self.rooms[self.cuerpo]:
self.person.send('#remove-'.encode('utf8'))
del self.rooms[self.cuerpo]
self.sock.send('Ok'.encode('utf8'))
else:
self.sock.send('Denegado'.encode('utf8'))
#Crear una sala estando en otra
if self.cabeza == 'cR':
print('cR')
self.nroom, self.room = self.cuerpo.split('/')
if not self.nroom in self.rooms:
self.lista_usuarios = self.rooms[self.room]
self.indice = self.lista_usuarios.index(self.sock)
print(self.lista_usuarios)
del self.lista_usuarios[self.indice]
print(self.lista_usuarios)
self.rooms[self.nroom] = []
self.rooms[self.nroom].append(self.sock)
print(self.rooms[self.nroom])
self.sock.send('#room-{}'.format(self.nroom).encode('utf8'))
else:
self.sock.send('Esta sala ya existe'.encode('utf8'))
#Cambiar sala
if self.cabeza == 'gR':
print('gR')
self.nroom, self.room = self.cuerpo.split('/')
if self.nroom in self.rooms:
self.lista_usuarios = self.rooms[self.room]
self.indice = self.lista_usuarios.index(self.sock)
print(self.lista_usuarios)
del self.lista_usuarios[self.indice]
print(self.lista_usuarios)
self.rooms[self.nroom].append(self.sock)
print(self.rooms[self.nroom])
self.sock.send('#room-{}'.format(self.nroom).encode('utf8'))
else:
self.sock.send('Esta sala no existe'.encode('utf8'))
#Privado a alguien
if self.cabeza == 'private':
self.person, self.mensaje = self.cuerpo.split('/')
if self.person in self.contacts:
self.sock_person = self.contacts[self.person]
self.sock_person.send(self.mensaje.encode('utf8'))
else:
self.sock.send('No existe tal usuario'.encode('utf8'))
#Salir sala
if self.cabeza == 'eR':
self.room = self.cuerpo
self.lista_usuarios = self.rooms[self.room]
self.indice = self.lista_usuarios.index(self.sock)
del self.lista_usuarios[self.indice]
#Salir chat
if self.cabeza == 'exit':
self.username, self.room = self.cuerpo.split('/')
if not self.room is None:
self.lista_usuarios = self.rooms[self.room]
self.indice = self.lista_usuarios.index(self.sock)
del self.lista_usuarios[self.indice]
del self.contacts[self.username]
def main():
server = Servidor('10.253.129.41', 5000)
server.run()
if __name__ == '__main__':
main()
| true
|
d004913c3cf60e9500800475b29a70eff866235e
|
Python
|
ulf1/torch-tweaks
|
/torch_tweaks/idseqs_to_mask.py
|
UTF-8
| 3,096
| 3.171875
| 3
|
[
"Apache-2.0"
] |
permissive
|
import torch.sparse
import itertools
from typing import List, Optional, Union
Number = Union[bool, int, float]
def idseqs_to_mask(idseqs: List[List[int]],
n_seqlen: Optional[int] = None,
n_vocab_sz: Optional[int] = None,
ignore: Optional[List[int]] = [],
dtype: Optional[torch.dtype] = torch.bool,
dense: Optional[bool] = False
) -> torch.sparse.FloatTensor:
"""Convert ID sequences into mask matrices
Parameter:
----------
idseqs: List[List[int]]
A list of ID sequences. Each ID basically a row-index.
It's assumed that sequences are already padded!
n_seqlen: Optional[int] = None
The expected sequence length.
n_vocab_sz: Optional[int] = None
The number distinct IDs of all sequences.
ignore: Optional[List[int]] = []
A list of IDs to ignore, e.g. ignore=[VOCAB.index("[PAD]")]
As a result the empty rows of the mask matrix are removed
accordingly.
dtype: Optional[torch.dtype] = bool
Only if dense=True, the data type of the mask matrix,
e.g. torch.bool (True/False), torch.uint8 (0/1)
dense: Optional[bool] = False
Flag to return a dense mask matrix
Returns:
--------
torch.sparse.FloatTensor
A batch-first FloatTensor <batch_sz, n_seqlen, vocab_sz>
Example:
--------
from torch_tweaks import idseqs_to_mask
idseqs = [[1,2,3,4,0,0,1,2], [2,4,2,0,1]]
masks = idseqs_to_mask(idseqs, n_seqlen=5, ignore=[3], dense=True)
Help:
-----
- Sparse module: https://pytorch.org/docs/stable/sparse.html
- dtype: https://pytorch.org/docs/stable/tensors.html
"""
if n_seqlen is None:
n_seqlen = max([len(seq) for seq in idseqs])
# create a list of IDs
if n_vocab_sz is None:
ids = set(itertools.chain(*idseqs))
else:
ids = set(range(0, n_vocab_sz))
# remove IDs that we ignore
ids = ids.difference(set(ignore))
n_features = len(ids)
# convert to list to lookup with .index() method
ids = list(ids)
# loop over each ID sequence
masks = []
for seq in idseqs:
# extract index pairs of the sparse matrix
featidx = []
seqidx = []
for step, elem in enumerate(seq[:n_seqlen]):
try:
featidx.append(ids.index(elem))
seqidx.append(step)
except Exception:
pass
# convert to COO matrix
tmp = torch.sparse.FloatTensor(
indices=torch.LongTensor([seqidx, featidx]),
values=torch.FloatTensor([1.0 for _ in range(len(seqidx))]),
size=torch.Size([n_seqlen, n_features])
).coalesce()
# save it
masks.append(tmp)
# stack into one 3D tensor <batch_sz, n_seqlen, vocab_sz>
masks = torch.stack(masks).coalesce()
# convert to dense matrix if requested
if dense:
masks = masks.to_dense().type(dtype)
# done
return masks
| true
|
ca220d267b0171d7a8ea926f638cf3dcbebca414
|
Python
|
Animenosekai/jsConsole
|
/jsConsole/internal/javascript/execute_js.py
|
UTF-8
| 1,989
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
"""
Executing JavaScript on the browser.
© Anime no Sekai - 2020
"""
from ..browser import browser
from .. import config
from ..exceptions import BrowserError
import threading
import asyncio
async def evaluate_on_pyppeteer(command):
result = await browser.evaluate(command)
return result
def evaluate(command, return_value=True):
if config.layer == 'Selenium':
if return_value:
return browser.execute_script('return ' + str(command))
else:
browser.execute_script(str(command))
elif config.layer == 'Pyppeteer':
'''
if return_value:
if isinstance(threading.current_thread(), threading._MainThread):
#print('Main Thread')
event_loop = asyncio.get_event_loop()
result = event_loop.run_until_complete(evaluate_on_pyppeteer('return ' + str(command)))
else:
#print('Not Main Thread')
asyncio.set_event_loop(asyncio.new_event_loop())
event_loop = asyncio.get_event_loop()
result = event_loop.run_until_complete(evaluate_on_pyppeteer('return ' + str(command)))
else:
'''
try:
if isinstance(threading.current_thread(), threading._MainThread):
#print('Main Thread')
event_loop = asyncio.get_event_loop()
result = event_loop.run_until_complete(evaluate_on_pyppeteer(str(command)))
else:
#print('Not Main Thread')
asyncio.set_event_loop(asyncio.new_event_loop())
event_loop = asyncio.get_event_loop()
result = event_loop.run_until_complete(evaluate_on_pyppeteer(str(command)))
except:
return "Error while "
return result
else:
raise BrowserError(f'There is an error with the layer: {str(config.layer)}')
def switch_to_alert():
if config.layer == 'Selenium':
browser.switch_to.alert
| true
|
ff3eaad880cfa2f2264d6d6f19a9193f3d04eb19
|
Python
|
bretthop/projectchooser
|
/app/services/DomainService.py
|
UTF-8
| 777
| 2.609375
| 3
|
[] |
no_license
|
from app.data.model.Domain import Domain
class DomainService:
def createDomain(self, domain):
"""
:type domain: Domain
"""
domain.put()
return domain
def updateDomain(self, domainId, domainTitle, domainDescription, domainStatus):
_domain = Domain.get_by_id(domainId)
if domainTitle is not None:
_domain.title = domainTitle
if domainDescription is not None:
_domain.description = domainDescription
if domainStatus is not None:
_domain.status = domainStatus
Domain.save(_domain)
def GetDomainsByStatus(self, domainStatus):
result = Domain.gql("WHERE status = '" + domainStatus +"'")
return result
| true
|