content stringlengths 5 1.05M |
|---|
import pathlib
import numpy
import pandas as pd
from libs.datasets.population import PopulationDataset
from libs.datasets import dataset_utils
from libs.datasets import data_source
from libs.us_state_abbrev import US_STATE_ABBREV, ABBREV_US_FIPS
from libs import enums
from libs.datasets.dataset_utils import AggregationLevel
CURRENT_FOLDER = pathlib.Path(__file__).parent
class FIPSPopulation(data_source.DataSource):
"""FIPS data from US Gov census predictions + fips list.
https://www.census.gov/data/datasets/time-series/demo/popest/2010s-counties-total.html
https://www.census.gov/geographies/reference-files/2018/demo/popest/2018-fips.html
"""
FILE_PATH = CURRENT_FOLDER / "fips_population.csv"
SOURCE_NAME = "FIPS"
class Fields(object):
STATE = "state"
COUNTY = "county"
FIPS = "fips"
POPULATION = "population"
# Added in standardize data.
AGGREGATE_LEVEL = "aggregate_level"
COUNTRY = "country"
POPULATION_FIELD_MAP = {
PopulationDataset.Fields.COUNTRY: Fields.COUNTRY,
PopulationDataset.Fields.STATE: Fields.STATE,
PopulationDataset.Fields.FIPS: Fields.FIPS,
PopulationDataset.Fields.POPULATION: Fields.POPULATION,
PopulationDataset.Fields.AGGREGATE_LEVEL: Fields.AGGREGATE_LEVEL,
}
def __init__(self, path):
data = pd.read_csv(path, dtype={"fips": str})
data["fips"] = data.fips.str.zfill(5)
data = self.standardize_data(data)
super().__init__(data)
@classmethod
def local(cls):
return cls(cls.FILE_PATH)
@classmethod
def standardize_data(cls, data: pd.DataFrame) -> pd.DataFrame:
# Add Missing
unknown_fips = []
for state in data.state.unique():
row = {
cls.Fields.STATE: state,
# TODO(chris): Possibly separate fips out by state prefix
cls.Fields.FIPS: enums.UNKNOWN_FIPS,
cls.Fields.POPULATION: None,
cls.Fields.COUNTY: "Unknown",
}
unknown_fips.append(row)
data = data.append(unknown_fips)
# All DH data is aggregated at the county level
data[cls.Fields.AGGREGATE_LEVEL] = AggregationLevel.COUNTY.value
data[cls.Fields.COUNTRY] = "USA"
states_aggregated = dataset_utils.aggregate_and_get_nonmatching(
data,
[cls.Fields.COUNTRY, cls.Fields.STATE, cls.Fields.AGGREGATE_LEVEL],
AggregationLevel.COUNTY,
AggregationLevel.STATE,
).reset_index()
states_aggregated[cls.Fields.FIPS] = states_aggregated[cls.Fields.STATE].map(
ABBREV_US_FIPS
)
states_aggregated[cls.Fields.COUNTY] = None
return pd.concat([data, states_aggregated])
def build_fips_data_frame(census_csv, counties_csv):
counties = pd.read_csv(counties_csv, dtype=str)
counties.columns = [
"summary",
"state_fip",
"county_fip",
"subdivision",
"place",
"city",
"name",
]
county_pop = pd.read_csv(census_csv)
county_pop.columns = ["county_state", "population"]
# Various filters
no_county = counties.county_fip == "000"
has_state = counties.state_fip != "00"
has_county = counties.county_fip != "000"
no_subdivision = counties.subdivision == "00000"
no_place = counties.place == "00000"
no_city = counties.city == "00000"
# Create state level fips
states = counties[
has_state & no_county & no_subdivision & no_city & no_place
].reset_index()
states = states.rename({"name": "state"}, axis=1)[["state_fip", "state"]]
states.state = states.state.apply(lambda x: US_STATE_ABBREV[x])
# Create County level
county_only = counties[
has_county & no_subdivision & no_place & no_city
].reset_index()
county_only = county_only.rename({"name": "county"}, axis=1)
county_only["fips"] = county_only.state_fip + county_only.county_fip
state_data = (
county_only.set_index("state_fip")
.join(states.set_index("state_fip"), on="state_fip")
.reset_index()
)
# Sorry these lambdas are ugly
county_pop.population = county_pop.population.apply(
lambda x: int(x.replace(",", ""))
)
county_pop["state"] = county_pop.county_state.apply(
lambda x: US_STATE_ABBREV[x.split(",")[1].strip()]
)
county_pop["county"] = county_pop.county_state.apply(
lambda x: x.split(",")[0].strip().lstrip(".")
)
county_pop = county_pop.replace("Sainte", "Ste.")
county_pop = county_pop.replace("Saint", "St.")
left = state_data.set_index(["state", "county"])
right = county_pop.set_index(["state", "county"])
results = left.join(right, on=["state", "county"]).reset_index()
return results[["state", "county", "fips", "population"]]
|
names = ['Michael_', 'Bob_', 'Tracy_']
for name in names:
print(name, end="")
print("---------------------------------")
# python内置id()函数,这个函数用于返回对象的唯一标识(identity)。对象实际内存地址为hex(id(obj)),本文我将id()和内存地址划等号。
# 内置函数is(),用于比较两个对象的identity是否一样,举例:
classmates = ['Michael', 'Bob', 'Tracy']
a = ['Michael', 'Bob', 'Tracy']
b = ['Michael', 'Bob', 'Tracy']
print(hex(id(classmates)))
print(a == b)
print(a is b)
print("---------------------------------")
print(ord("a"))
print(chr(97))
print(str("\u4e2d\u6587"))
print(10/3)
print(10//3)
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract class for wrapper sampling methods that call base sampling methods.
Provides interface to sampling methods that allow same signature
for select_batch. Each subclass implements select_batch_ with the desired
signature for readability.
"""
import abc
from sampling_methods.constants import AL_MAPPING
from sampling_methods.constants import get_all_possible_arms
from sampling_methods.sampling_def import SamplingMethod
get_all_possible_arms()
class WrapperSamplingMethod(SamplingMethod, metaclass=abc.ABCMeta):
def initialize_samplers(self, mixtures):
methods = []
for m in mixtures:
methods += m['methods']
methods = set(methods)
self.base_samplers = {}
for s in methods:
self.base_samplers[s] = AL_MAPPING[s](self.X, self.y, self.seed)
self.samplers = []
for m in mixtures:
self.samplers.append(
AL_MAPPING['mixture_of_samplers'](self.X, self.y, self.seed, m,
self.base_samplers))
|
from django.contrib import admin
from .models import Proxy
@admin.register(Proxy)
class ProxyAdmin(admin.ModelAdmin):
...
|
import discord
from redbot.core import commands, checks, Config, modlog
from redbot.core.utils.chat_formatting import humanize_list
from redbot.core.i18n import Translator, cog_i18n
from .eventmixin import EventMixin, CommandPrivs
inv_settings = {
"message_edit": {"enabled": False, "channel": None, "bots": False},
"message_delete": {"enabled": False, "channel": None, "bots": False},
"user_change": {"enabled": False, "channel": None},
"role_change": {"enabled": False, "channel": None},
"voice_change": {"enabled": False, "channel": None},
"user_join": {"enabled": False, "channel": None},
"user_left": {"enabled": False, "channel": None},
"channel_change": {"enabled": False, "channel": None},
"guild_change": {"enabled": False, "channel": None},
"emoji_change": {"enabled": False, "channel": None},
"commands_used": {
"enabled": False,
"channel": None,
"privs": ["MOD", "ADMIN", "BOT_OWNER", "GUILD_OWNER"],
},
"ignored_channels": [],
"invite_links": {},
}
_ = Translator("ExtendedModLog", __file__)
@cog_i18n(_)
class ExtendedModLog(EventMixin, commands.Cog):
"""
Extended modlogs
Works with core modlogset channel
"""
__version__ = "2.1.0"
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, 154457677895)
self.config.register_guild(**inv_settings, force_registration=True)
self.loop = bot.loop.create_task(self.invite_links_loop())
async def initialize(self):
all_data = await self.config.all_guilds()
for guild_id, data in all_data.items():
guild = self.bot.get_guild(guild_id)
if guild is None:
await self.config._clear_scope(Config.GUILD, str(guild_id))
continue
for entry in inv_settings.keys():
setting = data[entry]
# print(type(setting))
if type(setting) == bool:
new_data = {"enabled": setting, "channel": None}
if entry == "commands_used":
new_data["privs"] = ["MOD", "ADMIN", "BOT_OWNER", "GUILD_OWNER"]
await self.config.guild(guild).set_raw(entry, value=new_data)
async def modlog_settings(self, ctx):
guild = ctx.message.guild
try:
_modlog_channel = await modlog.get_modlog_channel(guild)
modlog_channel = _modlog_channel.mention
except Exception:
modlog_channel = "Not Set"
cur_settings = {
"message_edit": _("Message edits"),
"message_delete": _("Message delete"),
"user_change": _("Member changes"),
"role_change": _("Role changes"),
"voice_change": _("Voice changes"),
"user_join": _("User join"),
"user_left": _("Member left"),
"channel_change": _("Channel changes"),
"guild_change": _("Guild changes"),
"emoji_change": _("Emoji changes"),
"commands_used": _("Mod/Admin Commands"),
}
msg = _("Setting for {guild}\n Modlog Channel {channel}\n\n").format(
guild=guild.name, channel=modlog_channel
)
data = await self.config.guild(guild).all()
ign_chans = data["ignored_channels"]
ignored_channels = []
for c in ign_chans:
chn = guild.get_channel(c)
if chn is None:
# a bit of automatic cleanup so things don't break
data["ignored_channels"].remove(c)
else:
ignored_channels.append(chn)
enabled = ""
disabled = ""
for settings, name in cur_settings.items():
msg += f"{name}: **{data[settings]['enabled']}**"
if data[settings]["channel"]:
chn = guild.get_channel(data[settings]["channel"])
if chn is None:
# a bit of automatic cleanup so things don't break
data[settings]["channel"] = None
else:
msg += f" {chn.mention}\n"
else:
msg += "\n"
if enabled == "":
enabled = _("None ")
if disabled == "":
disabled = _("None ")
if ignored_channels:
chans = ", ".join(c.mention for c in ignored_channels)
msg += _("Ignored Channels") + ": " + chans
await self.config.guild(ctx.guild).set(data)
# save the data back to config incase we had some deleted channels
await ctx.maybe_send_embed(msg)
@checks.admin_or_permissions(manage_channels=True)
@commands.group(name="modlog", aliases=["modlogtoggle", "modlogs"])
@commands.guild_only()
async def _modlog(self, ctx):
"""
Toggle various extended modlog notifications
Requires the channel to be setup with `[p]modlogset modlog #channel` first
"""
if await self.config.guild(ctx.message.guild).settings() == {}:
await self.config.guild(ctx.message.guild).set(inv_settings)
if ctx.invoked_subcommand is None:
await self.modlog_settings(ctx)
@_modlog.group(name="edit")
async def _edit(self, ctx):
"""
Message edit logging settings
"""
pass
@_edit.command(name="toggle")
async def _edit_toggle(self, ctx):
"""
Toggle message edit notifications
"""
guild = ctx.message.guild
msg = _("Edit messages ")
if not await self.config.guild(guild).message_edit.enabled():
await self.config.guild(guild).message_edit.enabled.set(True)
verb = _("enabled")
else:
await self.config.guild(guild).message_edit.enabled.set(False)
verb = _("disabled")
await ctx.send(msg + verb)
@_edit.command(name="bots")
async def _edit_toggle_bots(self, ctx):
"""
Toggle message edit notifications for bot users
"""
guild = ctx.message.guild
msg = _("Bots edited messages ")
if not await self.config.guild(guild).message_edit.bots():
await self.config.guild(guild).message_edit.bots.set(True)
verb = _("enabled")
else:
await self.config.guild(guild).message_edit.bots.set(False)
verb = _("disabled")
await ctx.send(msg + verb)
@_edit.command(name="channel")
async def _edit_channel(self, ctx, channel: discord.TextChannel = None):
"""
Set custom channel for edit logging
"""
if channel is not None:
channel = channel.id
await self.config.guild(ctx.guild).message_edit.channel.set(channel)
await ctx.tick()
@_modlog.group(name="join")
async def _join(self, ctx):
"""
Member join logging settings
"""
pass
@_join.command(name="toggle")
async def _join_toggle(self, ctx):
"""
Toggle member join notifications
"""
guild = ctx.message.guild
msg = _("Join message logs ")
if not await self.config.guild(guild).user_join.enabled():
await self.config.guild(guild).user_join.enabled.set(True)
links = await self.save_invite_links(guild)
if links:
verb = _("enabled with invite links")
else:
verb = _("enabled")
else:
await self.config.guild(guild).user_join.enabled.set(False)
verb = _("disabled")
await ctx.send(msg + verb)
@_join.command(name="channel")
async def _join_channel(self, ctx, channel: discord.TextChannel = None):
"""
Set custom channel for join logging
"""
if channel is not None:
channel = channel.id
await self.config.guild(ctx.guild).user_join.channel.set(channel)
await ctx.tick()
@_modlog.group(name="guild")
async def _guild(self, ctx):
"""
Guild change logging settings
"""
pass
@_guild.command(name="toggle")
async def _guild_toggle(self, ctx):
"""
Toggle guild change notifications
Shows changes to name, region, afk timeout, and afk channel
"""
guild = ctx.message.guild
msg = _("Guild logs ")
if not await self.config.guild(guild).guild_change.enabled():
await self.config.guild(guild).guild_change.enabled.set(True)
verb = _("enabled")
else:
await self.config.guild(guild).guild_change.enabled.set(False)
verb = _("disabled")
await ctx.send(msg + verb)
@_guild.command(name="channel")
async def _guild_channel(self, ctx, channel: discord.TextChannel = None):
"""
Set custom channel for guild logging
"""
if channel is not None:
channel = channel.id
await self.config.guild(ctx.guild).guild_change.channel.set(channel)
await ctx.tick()
@_modlog.group(name="channel", aliases=["channels"])
async def _channel(self, ctx):
"""
Channel change logging settings
"""
pass
@_channel.command(name="toggle")
async def _channel_toggle(self, ctx):
"""
Toggle channel edit notifications
Shows changes to name, topic, slowmode, and NSFW
"""
guild = ctx.message.guild
msg = _("Channel logs ")
if not await self.config.guild(guild).channel_change.enabled():
await self.config.guild(guild).channel_change.enabled.set(True)
verb = _("enabled")
else:
await self.config.guild(guild).channel_change.enabled.set(False)
verb = _("disabled")
await ctx.send(msg + verb)
@_channel.command(name="channel")
async def _channel_channel(self, ctx, channel: discord.TextChannel = None):
"""
Set custom channel for channel logging
"""
if channel is not None:
channel = channel.id
await self.config.guild(ctx.guild).channel_change.channel.set(channel)
await ctx.tick()
@_modlog.group(name="leave")
async def _leave(self, ctx):
"""
Member leave logging settings
"""
pass
@_leave.command(name="toggle")
async def _leave_toggle(self, ctx):
"""
Toggle member leave notifications
"""
guild = ctx.message.guild
msg = _("Leave logs ")
if not await self.config.guild(guild).user_left.enabled():
await self.config.guild(guild).user_left.enabled.set(True)
verb = _("enabled")
else:
await self.config.guild(guild).user_left.enabled.set(False)
verb = _("disabled")
await ctx.send(msg + verb)
@_leave.command(name="channel")
async def _leave_channel(self, ctx, channel: discord.TextChannel = None):
"""
Set custom channel for member leave logging
"""
if channel is not None:
channel = channel.id
await self.config.guild(ctx.guild).user_left.channel.set(channel)
await ctx.tick()
@_modlog.group(name="delete")
async def _delete(self, ctx):
"""
Delete logging settings
"""
pass
@_delete.command(name="toggle")
async def _delete_toggle(self, ctx):
"""
Toggle message delete notifications
"""
guild = ctx.message.guild
msg = _("Message delete logs ")
if not await self.config.guild(guild).message_delete.enabled():
await self.config.guild(guild).message_delete.enabled.set(True)
verb = _("enabled")
else:
await self.config.guild(guild).message_delete.enabled.set(False)
verb = _("disabled")
await ctx.send(msg + verb)
@_delete.command(name="bots")
async def _delete_bots(self, ctx):
"""
Toggle message delete notifications for bot users
"""
guild = ctx.message.guild
msg = _("Bot delete logs ")
if not await self.config.guild(guild).message_delete.bots():
await self.config.guild(guild).message_delete.bots.set(True)
verb = _("enabled")
else:
await self.config.guild(guild).message_delete.bots.set(False)
verb = _("disabled")
await ctx.send(msg + verb)
@_delete.command(name="channel")
async def _delete_channel(self, ctx, channel: discord.TextChannel = None):
"""
Set custom channel for delete logging
"""
if channel is not None:
channel = channel.id
await self.config.guild(ctx.guild).message_delete.channel.set(channel)
await ctx.tick()
@_modlog.group(name="member", aliases=["user"])
async def _user(self, ctx):
"""
Member logging settings
"""
pass
@_user.command(name="toggle")
async def _user_toggle(self, ctx):
"""
Toggle member change notifications
Shows changes to roles and nicknames
"""
guild = ctx.message.guild
msg = _("Profile logs ")
if not await self.config.guild(guild).user_change.enabled():
await self.config.guild(guild).user_change.enabled.set(True)
verb = _("enabled")
else:
await self.config.guild(guild).user_change.enabled.set(False)
verb = _("disabled")
await ctx.send(msg + verb)
@_user.command(name="channel")
async def _user_channel(self, ctx, channel: discord.TextChannel = None):
"""
Set custom channel for user logging
"""
if channel is not None:
channel = channel.id
await self.config.guild(ctx.guild).user_change.channel.set(channel)
await ctx.tick()
@_modlog.group(name="roles", aliases=["role"])
async def _roles(self, ctx):
"""
Role logging settings
"""
pass
@_roles.command(name="toggle")
async def _roles_toggle(self, ctx):
"""
Toggle role change notifications
Shows new roles, deleted roles, and permission changes
"""
guild = ctx.message.guild
msg = _("Role logs ")
if not await self.config.guild(guild).role_change.enabled():
await self.config.guild(guild).role_change.enabled.set(True)
verb = _("enabled")
else:
await self.config.guild(guild).role_change.enabled.set(False)
verb = _("disabled")
await ctx.send(msg + verb)
@_roles.command(name="channel")
async def _roles_channel(self, ctx, channel: discord.TextChannel = None):
"""
Set custom channel for roles logging
"""
if channel is not None:
channel = channel.id
await self.config.guild(ctx.guild).role_change.channel.set(channel)
await ctx.tick()
@_modlog.group(name="voice")
async def _voice(self, ctx):
"""
Voice logging settings
"""
pass
@_voice.command(name="toggle")
async def _voice_toggle(self, ctx):
"""
Toggle voice state notifications
Shows changes to mute, deafen, self mute, self deafen, afk, and channel
"""
guild = ctx.message.guild
msg = _("Voice logs ")
if not await self.config.guild(guild).voice_change.enabled():
await self.config.guild(guild).voice_change.enabled.set(True)
verb = _("enabled")
else:
await self.config.guild(guild).voice_change.enabled.set(False)
verb = _("disabled")
await ctx.send(msg + verb)
@_voice.command(name="channel")
async def _voice_channel(self, ctx, channel: discord.TextChannel = None):
"""
Set custom channel for voice logging
"""
if channel is not None:
channel = channel.id
await self.config.guild(ctx.guild).voice_change.channel.set(channel)
await ctx.tick()
@_modlog.group(name="emoji", aliases=["emojis"])
async def _emoji(self, ctx):
"""
Emoji change logging settings
"""
pass
@_emoji.command(name="toggle")
async def _emoji_toggle(self, ctx):
"""
Toggle emoji change logging
"""
guild = ctx.message.guild
msg = _("Emoji logs ")
if not await self.config.guild(guild).emoji_change.enabled():
await self.config.guild(guild).emoji_change.enabled.set(True)
verb = _("enabled")
else:
await self.config.guild(guild).emoji_change.enabled.set(False)
verb = _("disabled")
await ctx.send(msg + verb)
@_emoji.command(name="channel")
async def _emoji_channel(self, ctx, channel: discord.TextChannel = None):
"""
Set custom channel for emoji logging
"""
if channel is not None:
channel = channel.id
await self.config.guild(ctx.guild).emoji_change.channel.set(channel)
await ctx.tick()
@_modlog.group(name="command", aliases=["commands"])
async def _command(self, ctx):
"""
Toggle command logging
"""
pass
@_command.command(name="level")
async def _command_level(self, ctx, *level: CommandPrivs):
"""
Set the level of commands to be logged
`[level...]` must include all levels you want from:
MOD, ADMIN, BOT_OWNER, GUILD_OWNER, and NONE
These are the basic levels commands check for in permissions.
`NONE` is a command anyone has permission to use, where as `MOD`
can be `mod or permissions`
"""
if len(level) == 0:
return await ctx.send_help()
guild = ctx.message.guild
msg = _("Command logs set to: ")
await self.config.guild(guild).commands_used.privs.set(list(level))
await ctx.send(msg + humanize_list(level))
@_command.command(name="toggle")
async def _command_toggle(self, ctx):
"""
Toggle command usage logging
"""
guild = ctx.message.guild
msg = _("Command logs ")
if not await self.config.guild(guild).commands_used.enabled():
await self.config.guild(guild).commands_used.enabled.set(True)
verb = _("enabled")
else:
await self.config.guild(guild).commands_used.enabled.set(False)
verb = _("disabled")
await ctx.send(msg + verb)
@_command.command(name="channel")
async def _command_channel(self, ctx, channel: discord.TextChannel = None):
"""
Set custom channel for command logging
"""
if channel is not None:
channel = channel.id
await self.config.guild(ctx.guild).commands_used.channel.set(channel)
await ctx.tick()
@_modlog.group()
async def ignore(self, ctx, channel: discord.TextChannel = None):
"""
Ignore a channel from message delete/edit events and bot commands
`channel` the channel to ignore message delete/edit events
defaults to current channel
"""
guild = ctx.message.guild
if channel is None:
channel = ctx.channel
cur_ignored = await self.config.guild(guild).ignored_channels()
if channel.id not in cur_ignored:
cur_ignored.append(channel.id)
await self.config.guild(guild).ignored_channels.set(cur_ignored)
await ctx.send(_(" Now ignoring messages edited and deleted in ") + channel.mention)
else:
await ctx.send(channel.mention + _(" is already being ignored."))
@_modlog.group()
async def unignore(self, ctx, channel: discord.TextChannel = None):
"""
Unignore a channel from message delete/edit events and bot commands
`channel` the channel to unignore message delete/edit events
defaults to current channel
"""
guild = ctx.message.guild
if channel is None:
channel = ctx.channel
cur_ignored = await self.config.guild(guild).ignored_channels()
if channel.id in cur_ignored:
cur_ignored.remove(channel.id)
await self.config.guild(guild).ignored_channels.set(cur_ignored)
await ctx.send(_(" now tracking edited and deleted messages in ") + channel.mention)
else:
await ctx.send(channel.mention + _(" is not being ignored."))
def __unload(self):
self.loop.cancel()
|
import time
import unittest
import sys
from utils import xbridge_utils
from interface import xbridge_client
""" *** COMMENT ***
- Here, the length of the garbage data is very high and increased.
The "j" parameter in the "generate_garbage_input" function is the length of the garbage input we want.
- Non-numerical parameters are only garbage data.
- Numerical parameters are both valid and out-of-bounds numbers.
- export_data() function generates :
1) an Excel File with the recorded timing information.
2) a small descriptive table with mean, standard deviation, and some quantiles (25%, 50%, 75%).
"""
def test_createtx_garbage_load_v1(nb_of_runs):
time_distribution = []
total_elapsed_seconds = 0
for j in range(10000, 10000+nb_of_runs):
garbage_input_str1 = xbridge_utils.generate_garbage_input(j)
garbage_input_str2 = xbridge_utils.generate_garbage_input(j)
garbage_input_str3 = xbridge_utils.generate_garbage_input(j)
garbage_input_str4 = xbridge_utils.generate_garbage_input(j)
source_nb = xbridge_utils.generate_random_number(-99999999999999999999999999999999999999999999999, 99999999999999999999999999999999999999999999999)
dest_nb = xbridge_utils.generate_random_number(-99999999999999999999999999999999999999999999999, 99999999999999999999999999999999999999999999999)
ts = time.time()
xbridge_client.CHECK_CREATE_TX(garbage_input_str1, garbage_input_str2, source_nb, garbage_input_str3, garbage_input_str4,
dest_nb)
te = time.time()
total_elapsed_seconds += te - ts
json_str = {"time": te - ts, "API": "dxCreateTransaction"}
time_distribution.append(json_str)
xbridge_utils.export_data("test_createtx_garbage_load_v1.xlsx", time_distribution)
""" *** COMMENT ***
1. Here, the length of garbage parameters is random.
2. Numerical parameters are both valid and out-of-bounds numbers.
"""
def test_createtx_garbage_load_v2(nb_of_runs):
time_distribution = []
total_elapsed_seconds = 0
for i in range(1, nb_of_runs):
garbage_input_str1 = xbridge_utils.generate_garbage_input(xbridge_utils.generate_random_number(1, 10000))
garbage_input_str2 = xbridge_utils.generate_garbage_input(xbridge_utils.generate_random_number(1, 10000))
garbage_input_str3 = xbridge_utils.generate_garbage_input(xbridge_utils.generate_random_number(1, 10000))
garbage_input_str4 = xbridge_utils.generate_garbage_input(xbridge_utils.generate_random_number(1, 10000))
source_nb = xbridge_utils.generate_random_number(-99999999999999999999999999999999999999999999999, 99999999999999999999999999999999999999999999999)
dest_nb = xbridge_utils.generate_random_number(-99999999999999999999999999999999999999999999999, 99999999999999999999999999999999999999999999999)
ts = time.time()
xbridge_client.CHECK_CREATE_TX(garbage_input_str1, garbage_input_str2, source_nb, garbage_input_str3, garbage_input_str4,
dest_nb)
te = time.time()
total_elapsed_seconds += te - ts
json_str = {"time": te - ts, "API": "dxCreateTransaction"}
time_distribution.append(json_str)
xbridge_utils.export_data("test_createtx_garbage_load_v2.xlsx", time_distribution)
""" *** COMMENT ***
1. Here, The length of parameters is kept fixed, we just increase the number of iterations ==> Pure load test, when resources are available.
2. Numerical parameters are both valid and out-of-bounds numbers.
3. Address and tokens parameters are garbage data.
"""
def test_createtx_valid_load(number_of_runs):
time_distribution = []
total_elapsed_seconds = 0
for i in range(1, number_of_runs):
garbage_input_str1 = xbridge_utils.generate_garbage_input(64)
garbage_input_str2 = xbridge_utils.generate_garbage_input(64)
garbage_input_str3 = xbridge_utils.generate_garbage_input(64)
garbage_input_str4 = xbridge_utils.generate_garbage_input(64)
source_nb = xbridge_utils.generate_random_number(0.1, 1000)
dest_nb = xbridge_utils.generate_random_number(0.1, 1000)
ts = time.time()
xbridge_client.CHECK_CREATE_TX(garbage_input_str1, garbage_input_str2, source_nb, garbage_input_str3, garbage_input_str4,
dest_nb)
te = time.time()
total_elapsed_seconds += te - ts
json_str = {"time": te - ts, "char_nb": 64, "API": "dxCreateTransaction"}
time_distribution.append(json_str)
xbridge_utils.export_data("test_createtx_valid_load.xlsx", time_distribution)
""" *** UNIT TESTS ***
- Here we test combinations of valid and invalid data.
- Time is not a consideration here.
"""
class create_Tx_Test(unittest.TestCase):
# Generate new data before each run
def setUp(self):
# Valid data
self.valid_txid = xbridge_utils.generate_random_valid_txid()
self.valid_src_Token = xbridge_utils.generate_random_valid_token()
self.valid_dest_Token = xbridge_utils.generate_random_valid_token()
self.valid_src_Address = xbridge_utils.generate_random_valid_address()
self.valid_dest_Address = xbridge_utils.generate_random_valid_address()
self.valid_positive_nb_1 = xbridge_utils.generate_random_number(1, 10000)
self.valid_positive_nb_2 = xbridge_utils.generate_random_number(1, 10000)
# Invalid data
self.invalid_neg_nb = xbridge_utils.generate_random_number(-99999999999999999999999999999999999999999999999, -0.0000000000000000000000000000000000000000000000000001)
self.invalid_sm_positive_nb = 0.0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001
self.invalid_lg_positive_nb = 999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999
self.invalid_src_Address = xbridge_utils.generate_garbage_input(xbridge_utils.generate_random_number(1, 5000))
self.invalid_dest_Address = xbridge_utils.generate_garbage_input(xbridge_utils.generate_random_number(1, 5000))
self.invalid_src_Token = xbridge_utils.generate_garbage_input(xbridge_utils.generate_random_number(1, 500))
self.invalid_dest_Token = xbridge_utils.generate_garbage_input(xbridge_utils.generate_random_number(1, 500))
self.nb_with_leading_zeros_1 = xbridge_utils.generate_random_number_with_leading_zeros()
self.nb_with_leading_zeros_2 = xbridge_utils.generate_random_number_with_leading_zeros()
# Various numerical parameter combinations
def test_invalid_create_tx_v1(self):
# negative_number + positive_number, all other parameters being valid
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.valid_src_Address, self.valid_src_Token, self.invalid_neg_nb, self.valid_dest_Address, self.valid_dest_Token, self.valid_positive_nb_2))
# positive_number + negative_number, all other parameters being valid
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.valid_src_Address, self.valid_src_Token, self.valid_positive_nb_1, self.valid_dest_Address, self.valid_dest_Token, self.invalid_neg_nb))
# negative_number + negative_number, all other parameters being valid
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.valid_src_Address, self.valid_src_Token, self.invalid_neg_nb, self.valid_dest_Address, self.valid_dest_Token, self.invalid_neg_nb))
# 0 + negative_number, all other parameters being valid
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.valid_src_Address, self.valid_src_Token, 0, self.valid_dest_Address, self.valid_dest_Token, self.invalid_neg_nb))
# positive_number + 0, all other parameters being valid
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.valid_src_Address, self.valid_src_Token, self.valid_positive_nb_1, self.valid_dest_Address, self.valid_dest_Token, 0))
# 0 + 0, all other parameters being valid
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.valid_src_Address, self.valid_src_Token, 0, self.valid_dest_Address, self.valid_dest_Token, 0))
# Combinations with empty addresses
def test_invalid_create_tx_v2(self):
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(" ", self.valid_src_Token, self.valid_positive_nb_1, self.valid_dest_Address, "SYS", self.valid_positive_nb_2))
self.assertIsNone(xbridge_client.CHECK_CREATE_TX("LTnoVFAnKSMj4v2eFXBJuMmyjqSQT9eXBy", self.valid_src_Token, self.valid_positive_nb_1, " ", "SYS", self.valid_positive_nb_2))
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(" ", self.valid_src_Token, self.valid_positive_nb_1, " ", "SYS", self.valid_positive_nb_2))
def test_invalid_create_tx_v3(self):
# Same source and destination Addresses, all other parameters being valid
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.valid_src_Address, self.valid_src_Token, self.valid_positive_nb_1, self.valid_src_Address, self.valid_dest_Token, self.valid_positive_nb_2))
# Same source and destination Tokens, different addresses, all other parameters being valid
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.valid_src_Address, self.valid_src_Token, self.valid_positive_nb_1, self.valid_dest_Address, self.valid_src_Token, self.valid_positive_nb_2))
# Same source and destination Addresses and Tokens, all other parameters being valid
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.valid_src_Address, self.valid_src_Token, self.valid_positive_nb_1, self.valid_src_Address, self.valid_src_Token, self.valid_positive_nb_2))
# Combinations of address parameters containing quotes
def test_invalid_create_tx_v4(self):
# Address 1 contains quotes, all other parameters being valid
self.assertIsNone(xbridge_client.CHECK_CREATE_TX("'LTnoVFAnKSMj4v2eFXBJuMmyjqSQT9eXBy'", self.valid_src_Token, self.valid_positive_nb_1, "12BueeBVD2uiAHViXf7jPVQb2MSQ1Eggey", self.valid_dest_Token, self.valid_positive_nb_2))
# Address 2 contains quotes, all other parameters being valid
self.assertIsNone(xbridge_client.CHECK_CREATE_TX("LTnoVFAnKSMj4v2eFXBJuMmyjqSQT9eXBy", self.valid_src_Token, self.valid_positive_nb_1, "'12BueeBVD2uiAHViXf7jPVQb2MSQ1Eggey'", self.valid_dest_Token, self.valid_positive_nb_2))
# Both Addresses contain quotes, all other parameters being valid
self.assertIsNone(xbridge_client.CHECK_CREATE_TX("'LTnoVFAnKSMj4v2eFXBJuMmyjqSQT9eXBy'", self.valid_src_Token, self.valid_positive_nb_1, "'12BueeBVD2uiAHViXf7jPVQb2MSQ1Eggey'", self.valid_dest_Token, self.valid_positive_nb_2))
# Combinations of quotes + out-of-bounds quantities
def test_invalid_create_tx_v5(self):
self.assertIsNone(xbridge_client.CHECK_CREATE_TX("'LTnoVFAnKSMj4v2eFXBJuMmyjqSQT9eXBy'", "LTC", self.invalid_neg_nb, "'LTnoVFAnKSMj4v2eFXBJuMmyjqSQT9eXBy'", "LTC", self.invalid_neg_nb))
self.assertIsNone(xbridge_client.CHECK_CREATE_TX("LTnoVFAnKSMj4v2eFXBJuMmyjqSQT9eXBy", self.valid_src_Token, self.valid_positive_nb_1, "LTnoVFAnKSMj4v2eFXBJuMmyjqSQT9eXBy", "LTC", self.invalid_neg_nb))
self.assertIsNone(xbridge_client.CHECK_CREATE_TX("LTnoVFAnKSMj4v2eFXBJuMmyjqSQT9eXBy", self.valid_src_Token, self.invalid_neg_nb, "LTnoVFAnKSMj4v2eFXBJuMmyjqSQT9eXBy", "LTC", self.valid_positive_nb_1))
# Combinations of multiple invalid parameters leading up to ALL parameters being invalid
def test_invalid_create_tx_v6(self):
# Only source Address is valid, the rest is invalid
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.valid_src_Address, self.invalid_src_Token, self.invalid_neg_nb, self.invalid_dest_Address, self.invalid_dest_Token, self.invalid_neg_nb))
# Only source Address + source Token are valid, the rest is invalid
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.valid_src_Address, self.valid_src_Token, self.invalid_neg_nb, self.invalid_dest_Address, self.invalid_dest_Token, self.invalid_neg_nb))
# Only source Address + source Token + source_Quantity are valid, the rest is invalid
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.valid_src_Address, self.valid_src_Token, self.valid_positive_nb_1, self.invalid_dest_Address, self.invalid_dest_Token, self.invalid_neg_nb))
# Only (source + dest) Addresses + source Token + source_Quantity are valid, the rest is invalid
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.valid_src_Address, self.valid_src_Token, self.valid_positive_nb_1, self.valid_dest_Address, self.invalid_dest_Token, self.invalid_neg_nb))
# Only (source + dest) Addresses + (source + dest) Tokens + source_Quantity are valid, the rest is invalid
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.valid_src_Address, self.valid_src_Token, self.valid_positive_nb_1, self.valid_dest_Address, self.valid_dest_Token, self.invalid_neg_nb))
# All parameters are invalid
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.invalid_src_Address, self.invalid_src_Token, self.invalid_neg_nb, self.invalid_dest_Address, self.invalid_dest_Token, self.invalid_neg_nb))
# Combinations of numerical parameters containining leading Zeros, all other parameters being valid
def test_invalid_create_tx_v7(self):
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.valid_src_Address, self.valid_src_Token, self.nb_with_leading_zeros_1, self.valid_dest_Address, self.valid_dest_Token, self.valid_positive_nb_2))
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.valid_src_Address, self.valid_src_Token, self.valid_positive_nb_1, self.valid_dest_Address, self.valid_dest_Token, self.nb_with_leading_zeros_1))
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.valid_src_Address, self.valid_src_Token, self.nb_with_leading_zeros_1, self.valid_dest_Address, self.valid_dest_Token, self.nb_with_leading_zeros_2))
# Combinations of very small and very large numerical parameters, all other parameters being valid
def test_invalid_create_tx_v8(self):
# very small + very small
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.valid_src_Address, self.valid_src_Token, self.invalid_sm_positive_nb, self.valid_dest_Address, self.valid_dest_Token, self.invalid_sm_positive_nb))
# very small + very large
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.valid_src_Address, self.valid_src_Token, self.invalid_sm_positive_nb, self.valid_dest_Address, self.valid_dest_Token, self.invalid_lg_positive_nb))
# very large + very small
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.valid_src_Address, self.valid_src_Token, self.invalid_lg_positive_nb, self.valid_dest_Address, self.valid_dest_Token, self.invalid_sm_positive_nb))
# very large + very large
self.assertIsNone(xbridge_client.CHECK_CREATE_TX(self.valid_src_Address, self.valid_src_Token, self.invalid_lg_positive_nb, self.valid_dest_Address, self.valid_dest_Token, self.invalid_lg_positive_nb))
def repeat_create_tx_unit_tests(nb_of_runs):
for i in (1, nb_of_runs):
wasSuccessful = unittest.main(exit=False).result.wasSuccessful()
if not wasSuccessful:
sys.exit(1)
# unittest.main()
|
#!/usr/bin/env python2
# Own config parser based on ConfigParser. Defines default values.
import ConfigParser
import utils
import time
confn = "main.ini"
#confn = "test.ini"
#caln = "calib_data.txt"
#ttyn = "/dev/ttyUSB0"
#mainpos = (180, 0)
#addr = ('192.168.1.29', 12345)
#az_step = 0.5
#el_step = 0.5
# Earth radius in km
#eR = 6367.45*1000 # Mean equator/polar
eR = 6371.0*1000 # FAI sphere
eR2 = eR**2
# Yes, this is subject to configuration too (debug-time only)
def small_wait():
time.sleep(0.01)
class MyConf:
defaults = {'Heading':
{'Head': 180, '_Head': 'f',
'Decline': 10.67, '_Decline': 'f',
'StaticError': 13, '_StaticError': 'f',
'WaitPre': 1, '_WaitPre': 'f',
'WaitBtw': 1, '_WaitBtw': 'f',
'WaitCal': 0.5, '_WaitCal': 'f',
'NProbes': 3, '_NProbes': 'd',
'NCycles': 2, '_NCycles': 'd'},
'Radant':
{'Dev': '/dev/ttyUSB0',
'Timeout': 10, '_Timeout': 'f',
'MainAZ': 180, '_MainAZ': 'f',
'MainEL': 0, '_MainEL': 'f',
'DiscAZ': 0.5, '_DiscAZ': 'f',
'DiscEL': 0.5, '_DiscEL': 'f',
'ELBase': 0, '_ELBase': 'f',
'Emulate': True, '_Emulate': 'b'},
'GPS':
{'Lat': 60.750759, '_Lat': 'f',
'Lon': 30.048471, '_Lon': 'f',
'Alt': 1, '_Alt': 'f',
'WaitPre': 30, '_WaitPre': 'f',
'WaitBtw': 15, '_WaitBtw': 'f',
'NProbes': 5, '_NProbes': 'd',
'Timeout': 10, '_Timeout': 'f',
'WaitMax': 120, '_WaitMax': 'f'},
'SNMP':
{'Comm': 'public',
'Host': '192.168.1.20',
'OID': '1.3.6.1.4.1.14988.1.1.1.1.1.4.5',
'WaitPre': 0, '_WaitPre': 'f',
'WaitBtw': 0.1, '_WaitBtw': 'f',
'NProbes': 3, '_NProbes': 'd',
'Timeout': 1, '_Timeout': 'f',
'Retries': 0, '_Retries': 'd'},
'Net':
{'Addr': '10.129.0.140',
'Port': 12345, '_Port': 'd'},
'Search':
{'NPoints': 4, '_NPoints': 'd',
'AZRad': 3, '_AZRad': 3,
'ELRad': 3, '_ELRad': 3,
'Delta': 3, '_Delta': 3,
'Thres': -30, '_Thres': -30,
'AZStep': 1, '_AZStep': 1,
'ELStep': 1, '_ELStep': 1,
'PhiStep': 30, '_PhiStep': 30,},
'Log':
{'File': 'main.log',
'Period': 5, '_Period': 'f'},
'HTTP':
{'Addr': '127.0.0.1',
'Port': 8000, '_Port': 'd'},
}
def __init__(self, configfn=confn):
self.name = configfn
self.conf = ConfigParser.ConfigParser()
self.conf.optionxform = str
self.val = self.defaults
self.update()
def update(self):
self.conf.read(self.name)
for it in self.conf.sections():
if (not (it in self.val)):
self.val[it] = {}
for jt in self.conf.options(it):
try:
ujt = '_'+jt;
if (ujt in self.val[it]): # Type of value
if (self.val[it][ujt] == 'd'): # Integer
tmp = self.conf.getint(it, jt)
elif (self.val[it][ujt] == 'f'): # Float
tmp = self.conf.getfloat(it, jt)
elif (self.val[it][ujt] == 'b'): # Boolean
tmp = self.conf.getboolean(it, jt)
else: # Unkown type casts to string
tmp = self.conf.get(it, jt)
else: # Default type is string
tmp = self.conf.get(it, jt)
self.val[it][jt] = tmp
except:
utils.print_err(('ERROR while reading parameter {0:s} in section {1:s} (file {2:s})').\
format(jt, it, self.name))
self.val['Heading']['TrueHead'] = self.val['Heading']['Head'] + self.val['Heading']['Decline'] + \
self.val['Heading']['StaticError']
self.val['Radant']['AZBase'] = self.val['Radant']['MainAZ'] - self.val['Heading']['TrueHead']
print "Config file {0:s} (re)loaded".format(self.name)
def print_runtime(self):
print "Mag heading is {0:0.2f} deg, decline is {1:0.2f} deg, static error is {2:0.2f} deg.".\
format(self.val['Heading']['Head'], self.val['Heading']['Decline'], self.val['Heading']['StaticError'])
print "True heading is {0:0.2f} deg, base pos is {1:0.2f} deg, azimuth calibirated on {2:0.2f} deg.".\
format(self.val['Heading']['TrueHead'], self.val['Radant']['MainAZ'], self.val['Radant']['AZBase'])
if (self.val['Radant']['ELBase']):
print "Elevation calibrated at {0:0.2f} deg.".format(self.val['Radant']['ELBase'])
print "Our latitude is {0:0.5f} deg, longtitude is {1:0.5f} deg, altitude is {2:0.2f} m.".\
format(self.val['GPS']['Lat'], self.val['GPS']['Lon'], self.val['GPS']['Alt'])
print "Getting signal level from {0:s} (OID {1:s}, N = {2:0d}).".\
format(self.val['SNMP']['Host'], self.val['SNMP']['OID'], self.val['SNMP']['NProbes'])
def dump(self):
for it in self.val.keys():
if (not self.conf.has_section(it)):
self.conf.add_section(it)
for jt in self.val[it].keys():
if ((len(jt) < 1) or (jt[0] == "_")):
continue
self.conf.set(it, jt, self.val[it][jt])
conffd = open(self.name, 'w')
self.conf.write(conffd)
conffd.close()
print "Config file {0:s} modified".format(self.name)
# Ok, let's test it
#conf = MyConf()
#print(conf.val)
#conf.val['Heading']['Head'] = round(conf.val['Heading']['Head'])
#conf.dump()
|
# -*- coding: utf-8 -*-
def slugify_persian(str):
# self.slug = slugify(self.title)
str = str.replace(u'،', "-")
str = str.replace(u'، ', "-")
str = str.replace("(", "-")
str = str.replace(")", "")
str = str.replace( u'؟', "")
str = str.replace( u'?', "")
str = str.replace( u'!', "")
str = '-'.join(str.lower().split(' '))
return "%s" % str |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.spanner.admin.database.v1",
manifest={
"DatabaseDialect",
"OperationProgress",
"EncryptionConfig",
"EncryptionInfo",
},
)
class DatabaseDialect(proto.Enum):
r"""Indicates the dialect type of a database."""
DATABASE_DIALECT_UNSPECIFIED = 0
GOOGLE_STANDARD_SQL = 1
POSTGRESQL = 2
class OperationProgress(proto.Message):
r"""Encapsulates progress related information for a Cloud Spanner
long running operation.
Attributes:
progress_percent (int):
Percent completion of the operation.
Values are between 0 and 100 inclusive.
start_time (google.protobuf.timestamp_pb2.Timestamp):
Time the request was received.
end_time (google.protobuf.timestamp_pb2.Timestamp):
If set, the time at which this operation
failed or was completed successfully.
"""
progress_percent = proto.Field(proto.INT32, number=1,)
start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
end_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
class EncryptionConfig(proto.Message):
r"""Encryption configuration for a Cloud Spanner database.
Attributes:
kms_key_name (str):
The Cloud KMS key to be used for encrypting and decrypting
the database. Values are of the form
``projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>``.
"""
kms_key_name = proto.Field(proto.STRING, number=2,)
class EncryptionInfo(proto.Message):
r"""Encryption information for a Cloud Spanner database or
backup.
Attributes:
encryption_type (google.cloud.spanner_admin_database_v1.types.EncryptionInfo.Type):
Output only. The type of encryption.
encryption_status (google.rpc.status_pb2.Status):
Output only. If present, the status of a
recent encrypt/decrypt call on underlying data
for this database or backup. Regardless of
status, data is always encrypted at rest.
kms_key_version (str):
Output only. A Cloud KMS key version that is
being used to protect the database or backup.
"""
class Type(proto.Enum):
r"""Possible encryption types."""
TYPE_UNSPECIFIED = 0
GOOGLE_DEFAULT_ENCRYPTION = 1
CUSTOMER_MANAGED_ENCRYPTION = 2
encryption_type = proto.Field(proto.ENUM, number=3, enum=Type,)
encryption_status = proto.Field(proto.MESSAGE, number=4, message=status_pb2.Status,)
kms_key_version = proto.Field(proto.STRING, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
import math
# log e tan
tan = math.tan(3.14/6)
log = math.log(1000,10)
print("O tangente é: ", tan)
print("O log é: ", log)
# arredondamento
a = 3.8
print(math.ceil(a))
print(math.floor(a))
# raiz e potencia
raiz = math.sqrt(36)
potencia = math.pow(2,3)
# seno e cosseno
seno = math.sin(3.14/6) # 30 graus
cosseno = math.cos(3.14/4) # 45 graus
print("O seno é: ", seno)
print("O cosseno é: ", cosseno)
print(raiz)
print(potencia)
pi = math.pi
print("O valor de PI é: ", pi)
from typing import Final
PI: Final = 3.14159
PI = 4
print(PI) |
default_app_config = 'parsing.apps.ParsingConfig' |
from flask_restful import Resource, reqparse
import psycopg2
import pandas as pd
from pandas.io.json import json_normalize
import cx_Oracle
from sqlalchemy import create_engine
import datetime
import numpy as np
import json
import re
from flask_jwt_extended import (create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt)
#Epidemiological zoning request
Epidemiological=reqparse.RequestParser()
Epidemiological.add_argument('type',help = 'This field cannot be blank', required = True)
# yeu cau lay nguon nuoc
#nn=reqparse.RequestParser()
#nn.add_argument('type',help = 'This field cannot be blank', required = True)
def epidemiology(value):
df=pd.read_csv(r"data.csv",encoding='utf-8')
with open(r'datajson.json', 'r') as myfile:
datajson=myfile.read()
#data=df
data=df.query("LOAI2=='%s'"%value) #quyre theo loai
#data=df.query("LOAI=='%s'"%value) #quyre theo loai
statesdata = json.loads(datajson)
districts = statesdata['features']
for i,obj in enumerate(districts) :
district_name = obj['properties']['name']
data_district = data.query("Huyen=='%s'"%district_name)
statesdata['features'][i]['properties']['density'] = len(data_district)
# Bắt đàu đặt biến lấy theo huyện
# Ngày 24/5/2020 Tắt option chọn nguồn nước
#dataNguonnuoc1 = data_district
#Query số tổng số theo nguồn nước bằng nước máy
#statesdata['features'][i]['properties']['nuocMay'] = len(dataNguonnuoc1.query("Nguonnuoc=='Nuoc may'"))
#dataNguonNuoc2 = data_district
#statesdata['features'][i]['properties']['nuocGiengKhoan'] = len(dataNguonNuoc2.query("Nguonnuoc=='Nuoc gieng khoan'"))
#dataNguonNuoc3 = data_district
#statesdata['features'][i]['properties']['nuocTuNhien'] = len(dataNguonNuoc3.query("Nguonnuoc=='Nuoc tu nhien'"))
# Bắt đàu đặt biết lấy loai ca bệnh theo huyện
dataChangeCaBenh1 = data_district
#Query số tổng số theo loại ca bệnh theo tan phat
statesdata['features'][i]['properties']['tanPhat'] = len(dataChangeCaBenh1.query("Loaicabenh=='Tan phat'"))
# Bắt đàu đặt biết lấy loai dương tính EV71 theo huyện
dataDuongTinhEV71 = data_district
#Query số tổng số theo loại ca bệnh theo EV71
statesdata['features'][i]['properties']['EV71'] = len(dataDuongTinhEV71.query("LOAI=='EV71'"))
dataDuongTinhCA16 = data_district
#Query số tổng số theo loại ca bệnh theo CA16
statesdata['features'][i]['properties']['CA16'] = len(dataDuongTinhCA16.query("LOAI=='CA16'"))
dataDuongTinhChungKhac = data_district
#Query số tổng số theo loại ca bệnh theo Chung khac
#statesdata['features'][i]['properties']['CHUNGKHAC'] = len(dataDuongTinhChungKhac.query("LOAI=='Non EV71 va CA16'"))
statesdata['features'][i]['properties']['CHUNGKHAC'] = len(dataDuongTinhChungKhac.query("LOAI =='A6' or LOAI =='A10' or LOAI =='A2'"))
#Query số tổng số theo loại ca bệnh theo Chung A10
dataDuongTinhA10 = data_district
statesdata['features'][i]['properties']['A10'] = len(dataDuongTinhChungKhac.query("LOAI=='A10'"))
#Query số tổng số theo loại ca bệnh theo Chung A2
dataDuongTinhA2 = data_district
statesdata['features'][i]['properties']['A2'] = len(dataDuongTinhChungKhac.query("LOAI=='A2'"))
#Query số tổng số theo loại ca bệnh theo Chung A6
dataDuongTinhA6 = data_district
statesdata['features'][i]['properties']['A6'] = len(dataDuongTinhChungKhac.query("LOAI=='A6'"))
#Bổ sung ngày 20200411 Số ca xét nghiệm dương tính
dataDuongTinh = data_district
#Query số tổng số theo loại ca bệnh theo Ca dương tính
statesdata['features'][i]['properties']['DuongTinh'] = len(dataDuongTinh.query("LOAI=='EV71' or LOAI=='CA16' or LOAI=='Non EV71 va CA16' or LOAI =='A6' or LOAI =='A10' or LOAI =='A2'"))
return {'status':'SUCCESS','message':'The number of positive cases is located in the area of Binh Dinh province','data':statesdata}
class EpidemiologicalZoning(Resource):
def post(self):
data = Epidemiological.parse_args()
if data['type']=='Positive':
df=pd.read_csv(r"data.csv",encoding='utf-8')
with open(r'datajson.json', 'r') as myfile:
datajson=myfile.read()
data=df
#data=df.query("KQXetNghiemTCM=='Duong Tinh'")
statesdata = json.loads(datajson)
districts = statesdata['features']
for i,obj in enumerate(districts) :
district_name = obj['properties']['name']
data_district = data.query("Huyen=='%s'"%district_name)
statesdata['features'][i]['properties']['density'] = len(data_district)
# Bắt đàu đặt biết lấy theo huyện
# Ngày 24/5/2020 Tắt option chọn nguồn nước
# dataNguonnuoc1 = data_district
#Query số tổng số theo nguồn nước bằng nước máy
#statesdata['features'][i]['properties']['nuocMay'] = len(dataNguonnuoc1.query("Nguonnuoc=='Nuoc may'"))
#dataNguonNuoc2 = data_district
#statesdata['features'][i]['properties']['nuocGiengKhoan'] = len(dataNguonNuoc2.query("Nguonnuoc=='Nuoc gieng khoan'"))
#dataNguonNuoc3 = data_district
#statesdata['features'][i]['properties']['nuocTuNhien'] = len(dataNguonNuoc3.query("Nguonnuoc=='Nuoc tu nhien'"))
# Bắt đàu đặt biết lấy loai ca bệnh theo huyện
#dataChangeCaBenh1 = data_district
#Query số tổng số theo loại ca bệnh theo tan phat
#statesdata['features'][i]['properties']['tanPhat'] = len(dataChangeCaBenh1.query("Loaicabenh=='Tan phat'"))
# Bắt đàu đặt biết lấy loai dương tính EV71 theo huyện
dataDuongTinhEV71 = data_district
#Query số tổng số theo loại ca bệnh theo tan phat
statesdata['features'][i]['properties']['EV71'] = len(dataDuongTinhEV71.query("LOAI=='EV71'"))
dataDuongTinhCA16 = data_district
#Query số tổng số theo loại ca bệnh theo tan phat
statesdata['features'][i]['properties']['CA16'] = len(dataDuongTinhCA16.query("LOAI=='CA16'"))
dataDuongTinhChungKhac = data_district
#Query số tổng số theo loại ca bệnh theo tan phat
statesdata['features'][i]['properties']['CHUNGKHAC'] = len(dataDuongTinhChungKhac.query("LOAI=='Non EV71 va CA16'"))
#Query số tổng số theo loại ca bệnh theo Chung A10
dataDuongTinhA10 = data_district
statesdata['features'][i]['properties']['A10'] = len(dataDuongTinhChungKhac.query("LOAI=='A10'"))
#Query số tổng số theo loại ca bệnh theo Chung A2
dataDuongTinhA2 = data_district
statesdata['features'][i]['properties']['A2'] = len(dataDuongTinhChungKhac.query("LOAI=='A2'"))
#Query số tổng số theo loại ca bệnh theo Chung A6
dataDuongTinhA6 = data_district
statesdata['features'][i]['properties']['A6'] = len(dataDuongTinhChungKhac.query("LOAI=='A6'"))
#Bổ sung ngày 20200411 Số ca xét nghiệm dương tính
dataDuongTinh = data_district
#Query số tổng số theo loại ca bệnh theo Ca dương tính
statesdata['features'][i]['properties']['DuongTinh'] = len(dataDuongTinh.query("LOAI=='EV71' or LOAI=='CA16' or LOAI=='Non EV71 va CA16' or LOAI =='A6' or LOAI =='A10' or LOAI =='A2'"))
return {'status':'SUCCESS','message':'The number of positive cases is located in the area of Binh Dinh province','data':statesdata}
else:
if data['type']!='':
return epidemiology(data['type'])
else:
return {'message':'Something went wrong'}
def nguonnuoc(type):
df=pd.read_csv(r"data.csv",encoding='utf-8')
with open(r'datajson.json', 'r') as myfile:
datajson=myfile.read()
data=df.query("Nguonnuoc=='%s'"%type)
statesdata = json.loads(datajson)
districts = statesdata['features']
for i,obj in enumerate(districts) :
district_name = obj['properties']['name']
statesdata['features'][i]['properties']['density'] = len(data.query("Huyen=='%s'"%district_name))
#statesdata['features'][i]['properties']['whatever'] = 3
return {'status':'SUCCESS','message':'The number of positive cases is located in the area of Binh Dinh province','data':statesdata}
class layNguonNuoc(Resource):
def post(self):
data = nn.parse_args()
if data['type']!='':
return nguonnuoc(data['type'])
else:
return {'message':'Something went wrong'}
|
import unittest
from unittest import TestCase
from escnn.gspaces import *
from escnn.nn import *
class TestRestriction(TestCase):
def test_restrict_rotations(self):
space = rot2dOnR2(-1, maximum_frequency=10)
cls = FieldType(space, list(space.representations.values()))
rl = RestrictionModule(cls, 4)
rl.check_equivariance()
def test_restrict_rotations_to_trivial(self):
space = rot2dOnR2(-1, maximum_frequency=10)
cls = FieldType(space, list(space.representations.values()))
rl = RestrictionModule(cls, 1)
rl.check_equivariance()
def test_restrict_flipsrotations(self):
space = flipRot2dOnR2(-1, maximum_frequency=10)
cls = FieldType(space, list(space.representations.values()))
rl = RestrictionModule(cls, (0., 10))
rl.check_equivariance()
def test_restrict_flipsrotations_to_rotations(self):
space = flipRot2dOnR2(-1, maximum_frequency=10)
cls = FieldType(space, list(space.representations.values()))
rl = RestrictionModule(cls, (None, -1))
rl.check_equivariance()
def test_restrict_flipsrotations_to_flips(self):
space = flipRot2dOnR2(-1, maximum_frequency=10)
cls = FieldType(space, list(space.representations.values()))
rl = RestrictionModule(cls, (0., 1))
rl.check_equivariance()
def test_restrict_fliprotations_to_trivial(self):
space = flipRot2dOnR2(-1, maximum_frequency=10)
cls = FieldType(space, list(space.representations.values()))
rl = RestrictionModule(cls, (None, 1))
rl.check_equivariance()
def test_restrict_flips_to_trivial(self):
space = flip2dOnR2()
cls = FieldType(space, list(space.representations.values()))
rl = RestrictionModule(cls, 1)
rl.check_equivariance()
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# cryptographically secure pseudorandom number generators
def mcg(alpha, beta, m, n):
"""
Multiplicative congruential generator (MCG)
"""
for i in range(n):
alpha = (beta * alpha) % m
yield alpha / m
def mmg(b, c, k, n):
"""
MacLaren-Marsaglia generator (MMG)
"""
v = b[:k]
for i in range(n):
s = int(c[i] * k)
yield v[s]
v[s] = b[i + k]
|
from django.urls import path, include
from .views.project_view import *
from .views.column_view import *
from .views.task_view import *
from .views.subtask_view import *
from .views.comment_view import *
from .views.attachments_view import *
urlpatterns = [
path('project/detail/<int:pk>/', ProjectDetailView.as_view()),
path('project/create/', ProjectCreateView.as_view()),
path('project/all/', ProjectListView.as_view()),
path('project/tasks/assignee/all/', TasksAssigneeListView.as_view()),
path('project/column/<int:pk>/', ColumnDetailView.as_view()),
path('project/column/task/<int:pk>/', TaskDetailView.as_view()),
path('project/column/task/subtask/<int:pk>/', SubtaskDetailView.as_view()),
path('project/column/task/comment/<int:pk>/', CommentDetailView.as_view()),
path('project/column/task/attachment/<int:pk>/', AttachmentDetailView.as_view()),
path('project/column/create/', ColumnCreateView.as_view()),
path('project/column/task/create/', TaskCreateView.as_view()),
path('project/column/task/subtask/create/', SubtaskCreateView.as_view()),
path('project/column/task/comment/create/', CommentCreateView.as_view()),
path('project/column/task/attachment/create/', AttachmentCreateView.as_view()),
]
|
from django import forms
from accounts.models import GovDepartment
class SCDForm(forms.Form):
department = forms.ModelChoiceField(
queryset=GovDepartment.objects.all(),
empty_label="Select department",
widget=forms.Select(
attrs={
"class": "govuk-select",
"aria-label": "department",
}
),
required=True,
error_messages={
"required": "You must select a department to continue",
},
)
|
name = input("Enter file:")
if len(name) < 1 : name = "mbox-short.txt"
handle = open(name)
di = {}
for line in handle:
if line.startswith('From '): #I forgot the space while the first trial
line = line.rstrip()
words = line.split()
email = list()
# it should use list to store the email otherwise the loop would treat extracted data as a massive string
email.append(words[1])
for em in email:
di[em] = di.get(em, 0) + 1
# print(di)
largest = -1
prolific_em = None
for k,v in di.items():
if v > largest :
largest = v
prolific_em = k
print(prolific_em, largest)
|
"""
1:9 binding system solved using Lagrange multiplier approach
Modified Factory example utilising Lagrane multiplier to solve complex
concentration in a 1:9 protein:ligand binding system
"""
from timeit import default_timer as timer
from scipy.optimize import fsolve
import autograd.numpy as np
from autograd import grad, jacobian
def lagrange_1_to_9(p0, l0,kd1, kd2, kd3, kd4, kd5, kd6, kd7, kd8, kd9):
def F(X): # Augmented Lagrange function
pf=X[0]
lf=X[1]
pl1=pf*lf/kd1
pl2=pf*lf/kd2
pl3=pf*lf/kd3
pl4=pf*lf/kd4
pl5=pf*lf/kd5
pl6=pf*lf/kd6
pl7=pf*lf/kd7
pl8=pf*lf/kd8
pl9=pf*lf/kd9
pl12=(pl2*lf+pl1*lf)/(kd1+kd2)
pl13=(pl3*lf+pl1*lf)/(kd1+kd3)
pl14=(pl4*lf+pl1*lf)/(kd1+kd4)
pl15=(pl5*lf+pl1*lf)/(kd1+kd5)
pl16=(pl6*lf+pl1*lf)/(kd1+kd6)
pl17=(pl7*lf+pl1*lf)/(kd1+kd7)
pl18=(pl8*lf+pl1*lf)/(kd1+kd8)
pl19=(pl9*lf+pl1*lf)/(kd1+kd9)
pl23=(pl3*lf+pl2*lf)/(kd2+kd3)
pl24=(pl4*lf+pl2*lf)/(kd2+kd4)
pl25=(pl5*lf+pl2*lf)/(kd2+kd5)
pl26=(pl6*lf+pl2*lf)/(kd2+kd6)
pl27=(pl7*lf+pl2*lf)/(kd2+kd7)
pl28=(pl8*lf+pl2*lf)/(kd2+kd8)
pl29=(pl9*lf+pl2*lf)/(kd2+kd9)
pl34=(pl4*lf+pl3*lf)/(kd3+kd4)
pl35=(pl5*lf+pl3*lf)/(kd3+kd5)
pl36=(pl6*lf+pl3*lf)/(kd3+kd6)
pl37=(pl7*lf+pl3*lf)/(kd3+kd7)
pl38=(pl8*lf+pl3*lf)/(kd3+kd8)
pl39=(pl9*lf+pl3*lf)/(kd3+kd9)
pl45=(pl5*lf+pl4*lf)/(kd4+kd5)
pl46=(pl6*lf+pl4*lf)/(kd4+kd6)
pl47=(pl7*lf+pl4*lf)/(kd4+kd7)
pl48=(pl8*lf+pl4*lf)/(kd4+kd8)
pl49=(pl9*lf+pl4*lf)/(kd4+kd9)
pl56=(pl6*lf+pl5*lf)/(kd5+kd6)
pl57=(pl7*lf+pl5*lf)/(kd5+kd7)
pl58=(pl8*lf+pl5*lf)/(kd5+kd8)
pl59=(pl9*lf+pl5*lf)/(kd5+kd9)
pl67=(pl7*lf+pl6*lf)/(kd6+kd7)
pl68=(pl8*lf+pl6*lf)/(kd6+kd8)
pl69=(pl9*lf+pl6*lf)/(kd6+kd9)
pl78=(pl8*lf+pl7*lf)/(kd7+kd8)
pl79=(pl9*lf+pl7*lf)/(kd7+kd9)
pl89=(pl9*lf+pl8*lf)/(kd8+kd9)
pl123=(pl23*lf+pl13*lf+pl12*lf)/(kd1+kd2+kd3)
pl124=(pl24*lf+pl14*lf+pl12*lf)/(kd1+kd2+kd4)
pl125=(pl25*lf+pl15*lf+pl12*lf)/(kd1+kd2+kd5)
pl126=(pl26*lf+pl16*lf+pl12*lf)/(kd1+kd2+kd6)
pl127=(pl27*lf+pl17*lf+pl12*lf)/(kd1+kd2+kd7)
pl128=(pl28*lf+pl18*lf+pl12*lf)/(kd1+kd2+kd8)
pl129=(pl29*lf+pl19*lf+pl12*lf)/(kd1+kd2+kd9)
pl134=(pl34*lf+pl14*lf+pl13*lf)/(kd1+kd3+kd4)
pl135=(pl35*lf+pl15*lf+pl13*lf)/(kd1+kd3+kd5)
pl136=(pl36*lf+pl16*lf+pl13*lf)/(kd1+kd3+kd6)
pl137=(pl37*lf+pl17*lf+pl13*lf)/(kd1+kd3+kd7)
pl138=(pl38*lf+pl18*lf+pl13*lf)/(kd1+kd3+kd8)
pl139=(pl39*lf+pl19*lf+pl13*lf)/(kd1+kd3+kd9)
pl145=(pl45*lf+pl15*lf+pl14*lf)/(kd1+kd4+kd5)
pl146=(pl46*lf+pl16*lf+pl14*lf)/(kd1+kd4+kd6)
pl147=(pl47*lf+pl17*lf+pl14*lf)/(kd1+kd4+kd7)
pl148=(pl48*lf+pl18*lf+pl14*lf)/(kd1+kd4+kd8)
pl149=(pl49*lf+pl19*lf+pl14*lf)/(kd1+kd4+kd9)
pl156=(pl56*lf+pl16*lf+pl15*lf)/(kd1+kd5+kd6)
pl157=(pl57*lf+pl17*lf+pl15*lf)/(kd1+kd5+kd7)
pl158=(pl58*lf+pl18*lf+pl15*lf)/(kd1+kd5+kd8)
pl159=(pl59*lf+pl19*lf+pl15*lf)/(kd1+kd5+kd9)
pl167=(pl67*lf+pl17*lf+pl16*lf)/(kd1+kd6+kd7)
pl168=(pl68*lf+pl18*lf+pl16*lf)/(kd1+kd6+kd8)
pl169=(pl69*lf+pl19*lf+pl16*lf)/(kd1+kd6+kd9)
pl178=(pl78*lf+pl18*lf+pl17*lf)/(kd1+kd7+kd8)
pl179=(pl79*lf+pl19*lf+pl17*lf)/(kd1+kd7+kd9)
pl189=(pl89*lf+pl19*lf+pl18*lf)/(kd1+kd8+kd9)
pl234=(pl34*lf+pl24*lf+pl23*lf)/(kd2+kd3+kd4)
pl235=(pl35*lf+pl25*lf+pl23*lf)/(kd2+kd3+kd5)
pl236=(pl36*lf+pl26*lf+pl23*lf)/(kd2+kd3+kd6)
pl237=(pl37*lf+pl27*lf+pl23*lf)/(kd2+kd3+kd7)
pl238=(pl38*lf+pl28*lf+pl23*lf)/(kd2+kd3+kd8)
pl239=(pl39*lf+pl29*lf+pl23*lf)/(kd2+kd3+kd9)
pl245=(pl45*lf+pl25*lf+pl24*lf)/(kd2+kd4+kd5)
pl246=(pl46*lf+pl26*lf+pl24*lf)/(kd2+kd4+kd6)
pl247=(pl47*lf+pl27*lf+pl24*lf)/(kd2+kd4+kd7)
pl248=(pl48*lf+pl28*lf+pl24*lf)/(kd2+kd4+kd8)
pl249=(pl49*lf+pl29*lf+pl24*lf)/(kd2+kd4+kd9)
pl256=(pl56*lf+pl26*lf+pl25*lf)/(kd2+kd5+kd6)
pl257=(pl57*lf+pl27*lf+pl25*lf)/(kd2+kd5+kd7)
pl258=(pl58*lf+pl28*lf+pl25*lf)/(kd2+kd5+kd8)
pl259=(pl59*lf+pl29*lf+pl25*lf)/(kd2+kd5+kd9)
pl267=(pl67*lf+pl27*lf+pl26*lf)/(kd2+kd6+kd7)
pl268=(pl68*lf+pl28*lf+pl26*lf)/(kd2+kd6+kd8)
pl269=(pl69*lf+pl29*lf+pl26*lf)/(kd2+kd6+kd9)
pl278=(pl78*lf+pl28*lf+pl27*lf)/(kd2+kd7+kd8)
pl279=(pl79*lf+pl29*lf+pl27*lf)/(kd2+kd7+kd9)
pl289=(pl89*lf+pl29*lf+pl28*lf)/(kd2+kd8+kd9)
pl345=(pl45*lf+pl35*lf+pl34*lf)/(kd3+kd4+kd5)
pl346=(pl46*lf+pl36*lf+pl34*lf)/(kd3+kd4+kd6)
pl347=(pl47*lf+pl37*lf+pl34*lf)/(kd3+kd4+kd7)
pl348=(pl48*lf+pl38*lf+pl34*lf)/(kd3+kd4+kd8)
pl349=(pl49*lf+pl39*lf+pl34*lf)/(kd3+kd4+kd9)
pl356=(pl56*lf+pl36*lf+pl35*lf)/(kd3+kd5+kd6)
pl357=(pl57*lf+pl37*lf+pl35*lf)/(kd3+kd5+kd7)
pl358=(pl58*lf+pl38*lf+pl35*lf)/(kd3+kd5+kd8)
pl359=(pl59*lf+pl39*lf+pl35*lf)/(kd3+kd5+kd9)
pl367=(pl67*lf+pl37*lf+pl36*lf)/(kd3+kd6+kd7)
pl368=(pl68*lf+pl38*lf+pl36*lf)/(kd3+kd6+kd8)
pl369=(pl69*lf+pl39*lf+pl36*lf)/(kd3+kd6+kd9)
pl378=(pl78*lf+pl38*lf+pl37*lf)/(kd3+kd7+kd8)
pl379=(pl79*lf+pl39*lf+pl37*lf)/(kd3+kd7+kd9)
pl389=(pl89*lf+pl39*lf+pl38*lf)/(kd3+kd8+kd9)
pl456=(pl56*lf+pl46*lf+pl45*lf)/(kd4+kd5+kd6)
pl457=(pl57*lf+pl47*lf+pl45*lf)/(kd4+kd5+kd7)
pl458=(pl58*lf+pl48*lf+pl45*lf)/(kd4+kd5+kd8)
pl459=(pl59*lf+pl49*lf+pl45*lf)/(kd4+kd5+kd9)
pl467=(pl67*lf+pl47*lf+pl46*lf)/(kd4+kd6+kd7)
pl468=(pl68*lf+pl48*lf+pl46*lf)/(kd4+kd6+kd8)
pl469=(pl69*lf+pl49*lf+pl46*lf)/(kd4+kd6+kd9)
pl478=(pl78*lf+pl48*lf+pl47*lf)/(kd4+kd7+kd8)
pl479=(pl79*lf+pl49*lf+pl47*lf)/(kd4+kd7+kd9)
pl489=(pl89*lf+pl49*lf+pl48*lf)/(kd4+kd8+kd9)
pl567=(pl67*lf+pl57*lf+pl56*lf)/(kd5+kd6+kd7)
pl568=(pl68*lf+pl58*lf+pl56*lf)/(kd5+kd6+kd8)
pl569=(pl69*lf+pl59*lf+pl56*lf)/(kd5+kd6+kd9)
pl578=(pl78*lf+pl58*lf+pl57*lf)/(kd5+kd7+kd8)
pl579=(pl79*lf+pl59*lf+pl57*lf)/(kd5+kd7+kd9)
pl589=(pl89*lf+pl59*lf+pl58*lf)/(kd5+kd8+kd9)
pl678=(pl78*lf+pl68*lf+pl67*lf)/(kd6+kd7+kd8)
pl679=(pl79*lf+pl69*lf+pl67*lf)/(kd6+kd7+kd9)
pl689=(pl89*lf+pl69*lf+pl68*lf)/(kd6+kd8+kd9)
pl789=(pl89*lf+pl79*lf+pl78*lf)/(kd7+kd8+kd9)
pl1234=(pl234*lf+pl134*lf+pl124*lf+pl123*lf)/(kd1+kd2+kd3+kd4)
pl1235=(pl235*lf+pl135*lf+pl125*lf+pl123*lf)/(kd1+kd2+kd3+kd5)
pl1236=(pl236*lf+pl136*lf+pl126*lf+pl123*lf)/(kd1+kd2+kd3+kd6)
pl1237=(pl237*lf+pl137*lf+pl127*lf+pl123*lf)/(kd1+kd2+kd3+kd7)
pl1238=(pl238*lf+pl138*lf+pl128*lf+pl123*lf)/(kd1+kd2+kd3+kd8)
pl1239=(pl239*lf+pl139*lf+pl129*lf+pl123*lf)/(kd1+kd2+kd3+kd9)
pl1245=(pl245*lf+pl145*lf+pl125*lf+pl124*lf)/(kd1+kd2+kd4+kd5)
pl1246=(pl246*lf+pl146*lf+pl126*lf+pl124*lf)/(kd1+kd2+kd4+kd6)
pl1247=(pl247*lf+pl147*lf+pl127*lf+pl124*lf)/(kd1+kd2+kd4+kd7)
pl1248=(pl248*lf+pl148*lf+pl128*lf+pl124*lf)/(kd1+kd2+kd4+kd8)
pl1249=(pl249*lf+pl149*lf+pl129*lf+pl124*lf)/(kd1+kd2+kd4+kd9)
pl1256=(pl256*lf+pl156*lf+pl126*lf+pl125*lf)/(kd1+kd2+kd5+kd6)
pl1257=(pl257*lf+pl157*lf+pl127*lf+pl125*lf)/(kd1+kd2+kd5+kd7)
pl1258=(pl258*lf+pl158*lf+pl128*lf+pl125*lf)/(kd1+kd2+kd5+kd8)
pl1259=(pl259*lf+pl159*lf+pl129*lf+pl125*lf)/(kd1+kd2+kd5+kd9)
pl1267=(pl267*lf+pl167*lf+pl127*lf+pl126*lf)/(kd1+kd2+kd6+kd7)
pl1268=(pl268*lf+pl168*lf+pl128*lf+pl126*lf)/(kd1+kd2+kd6+kd8)
pl1269=(pl269*lf+pl169*lf+pl129*lf+pl126*lf)/(kd1+kd2+kd6+kd9)
pl1278=(pl278*lf+pl178*lf+pl128*lf+pl127*lf)/(kd1+kd2+kd7+kd8)
pl1279=(pl279*lf+pl179*lf+pl129*lf+pl127*lf)/(kd1+kd2+kd7+kd9)
pl1289=(pl289*lf+pl189*lf+pl129*lf+pl128*lf)/(kd1+kd2+kd8+kd9)
pl1345=(pl345*lf+pl145*lf+pl135*lf+pl134*lf)/(kd1+kd3+kd4+kd5)
pl1346=(pl346*lf+pl146*lf+pl136*lf+pl134*lf)/(kd1+kd3+kd4+kd6)
pl1347=(pl347*lf+pl147*lf+pl137*lf+pl134*lf)/(kd1+kd3+kd4+kd7)
pl1348=(pl348*lf+pl148*lf+pl138*lf+pl134*lf)/(kd1+kd3+kd4+kd8)
pl1349=(pl349*lf+pl149*lf+pl139*lf+pl134*lf)/(kd1+kd3+kd4+kd9)
pl1356=(pl356*lf+pl156*lf+pl136*lf+pl135*lf)/(kd1+kd3+kd5+kd6)
pl1357=(pl357*lf+pl157*lf+pl137*lf+pl135*lf)/(kd1+kd3+kd5+kd7)
pl1358=(pl358*lf+pl158*lf+pl138*lf+pl135*lf)/(kd1+kd3+kd5+kd8)
pl1359=(pl359*lf+pl159*lf+pl139*lf+pl135*lf)/(kd1+kd3+kd5+kd9)
pl1367=(pl367*lf+pl167*lf+pl137*lf+pl136*lf)/(kd1+kd3+kd6+kd7)
pl1368=(pl368*lf+pl168*lf+pl138*lf+pl136*lf)/(kd1+kd3+kd6+kd8)
pl1369=(pl369*lf+pl169*lf+pl139*lf+pl136*lf)/(kd1+kd3+kd6+kd9)
pl1378=(pl378*lf+pl178*lf+pl138*lf+pl137*lf)/(kd1+kd3+kd7+kd8)
pl1379=(pl379*lf+pl179*lf+pl139*lf+pl137*lf)/(kd1+kd3+kd7+kd9)
pl1389=(pl389*lf+pl189*lf+pl139*lf+pl138*lf)/(kd1+kd3+kd8+kd9)
pl1456=(pl456*lf+pl156*lf+pl146*lf+pl145*lf)/(kd1+kd4+kd5+kd6)
pl1457=(pl457*lf+pl157*lf+pl147*lf+pl145*lf)/(kd1+kd4+kd5+kd7)
pl1458=(pl458*lf+pl158*lf+pl148*lf+pl145*lf)/(kd1+kd4+kd5+kd8)
pl1459=(pl459*lf+pl159*lf+pl149*lf+pl145*lf)/(kd1+kd4+kd5+kd9)
pl1467=(pl467*lf+pl167*lf+pl147*lf+pl146*lf)/(kd1+kd4+kd6+kd7)
pl1468=(pl468*lf+pl168*lf+pl148*lf+pl146*lf)/(kd1+kd4+kd6+kd8)
pl1469=(pl469*lf+pl169*lf+pl149*lf+pl146*lf)/(kd1+kd4+kd6+kd9)
pl1478=(pl478*lf+pl178*lf+pl148*lf+pl147*lf)/(kd1+kd4+kd7+kd8)
pl1479=(pl479*lf+pl179*lf+pl149*lf+pl147*lf)/(kd1+kd4+kd7+kd9)
pl1489=(pl489*lf+pl189*lf+pl149*lf+pl148*lf)/(kd1+kd4+kd8+kd9)
pl1567=(pl567*lf+pl167*lf+pl157*lf+pl156*lf)/(kd1+kd5+kd6+kd7)
pl1568=(pl568*lf+pl168*lf+pl158*lf+pl156*lf)/(kd1+kd5+kd6+kd8)
pl1569=(pl569*lf+pl169*lf+pl159*lf+pl156*lf)/(kd1+kd5+kd6+kd9)
pl1578=(pl578*lf+pl178*lf+pl158*lf+pl157*lf)/(kd1+kd5+kd7+kd8)
pl1579=(pl579*lf+pl179*lf+pl159*lf+pl157*lf)/(kd1+kd5+kd7+kd9)
pl1589=(pl589*lf+pl189*lf+pl159*lf+pl158*lf)/(kd1+kd5+kd8+kd9)
pl1678=(pl678*lf+pl178*lf+pl168*lf+pl167*lf)/(kd1+kd6+kd7+kd8)
pl1679=(pl679*lf+pl179*lf+pl169*lf+pl167*lf)/(kd1+kd6+kd7+kd9)
pl1689=(pl689*lf+pl189*lf+pl169*lf+pl168*lf)/(kd1+kd6+kd8+kd9)
pl1789=(pl789*lf+pl189*lf+pl179*lf+pl178*lf)/(kd1+kd7+kd8+kd9)
pl2345=(pl345*lf+pl245*lf+pl235*lf+pl234*lf)/(kd2+kd3+kd4+kd5)
pl2346=(pl346*lf+pl246*lf+pl236*lf+pl234*lf)/(kd2+kd3+kd4+kd6)
pl2347=(pl347*lf+pl247*lf+pl237*lf+pl234*lf)/(kd2+kd3+kd4+kd7)
pl2348=(pl348*lf+pl248*lf+pl238*lf+pl234*lf)/(kd2+kd3+kd4+kd8)
pl2349=(pl349*lf+pl249*lf+pl239*lf+pl234*lf)/(kd2+kd3+kd4+kd9)
pl2356=(pl356*lf+pl256*lf+pl236*lf+pl235*lf)/(kd2+kd3+kd5+kd6)
pl2357=(pl357*lf+pl257*lf+pl237*lf+pl235*lf)/(kd2+kd3+kd5+kd7)
pl2358=(pl358*lf+pl258*lf+pl238*lf+pl235*lf)/(kd2+kd3+kd5+kd8)
pl2359=(pl359*lf+pl259*lf+pl239*lf+pl235*lf)/(kd2+kd3+kd5+kd9)
pl2367=(pl367*lf+pl267*lf+pl237*lf+pl236*lf)/(kd2+kd3+kd6+kd7)
pl2368=(pl368*lf+pl268*lf+pl238*lf+pl236*lf)/(kd2+kd3+kd6+kd8)
pl2369=(pl369*lf+pl269*lf+pl239*lf+pl236*lf)/(kd2+kd3+kd6+kd9)
pl2378=(pl378*lf+pl278*lf+pl238*lf+pl237*lf)/(kd2+kd3+kd7+kd8)
pl2379=(pl379*lf+pl279*lf+pl239*lf+pl237*lf)/(kd2+kd3+kd7+kd9)
pl2389=(pl389*lf+pl289*lf+pl239*lf+pl238*lf)/(kd2+kd3+kd8+kd9)
pl2456=(pl456*lf+pl256*lf+pl246*lf+pl245*lf)/(kd2+kd4+kd5+kd6)
pl2457=(pl457*lf+pl257*lf+pl247*lf+pl245*lf)/(kd2+kd4+kd5+kd7)
pl2458=(pl458*lf+pl258*lf+pl248*lf+pl245*lf)/(kd2+kd4+kd5+kd8)
pl2459=(pl459*lf+pl259*lf+pl249*lf+pl245*lf)/(kd2+kd4+kd5+kd9)
pl2467=(pl467*lf+pl267*lf+pl247*lf+pl246*lf)/(kd2+kd4+kd6+kd7)
pl2468=(pl468*lf+pl268*lf+pl248*lf+pl246*lf)/(kd2+kd4+kd6+kd8)
pl2469=(pl469*lf+pl269*lf+pl249*lf+pl246*lf)/(kd2+kd4+kd6+kd9)
pl2478=(pl478*lf+pl278*lf+pl248*lf+pl247*lf)/(kd2+kd4+kd7+kd8)
pl2479=(pl479*lf+pl279*lf+pl249*lf+pl247*lf)/(kd2+kd4+kd7+kd9)
pl2489=(pl489*lf+pl289*lf+pl249*lf+pl248*lf)/(kd2+kd4+kd8+kd9)
pl2567=(pl567*lf+pl267*lf+pl257*lf+pl256*lf)/(kd2+kd5+kd6+kd7)
pl2568=(pl568*lf+pl268*lf+pl258*lf+pl256*lf)/(kd2+kd5+kd6+kd8)
pl2569=(pl569*lf+pl269*lf+pl259*lf+pl256*lf)/(kd2+kd5+kd6+kd9)
pl2578=(pl578*lf+pl278*lf+pl258*lf+pl257*lf)/(kd2+kd5+kd7+kd8)
pl2579=(pl579*lf+pl279*lf+pl259*lf+pl257*lf)/(kd2+kd5+kd7+kd9)
pl2589=(pl589*lf+pl289*lf+pl259*lf+pl258*lf)/(kd2+kd5+kd8+kd9)
pl2678=(pl678*lf+pl278*lf+pl268*lf+pl267*lf)/(kd2+kd6+kd7+kd8)
pl2679=(pl679*lf+pl279*lf+pl269*lf+pl267*lf)/(kd2+kd6+kd7+kd9)
pl2689=(pl689*lf+pl289*lf+pl269*lf+pl268*lf)/(kd2+kd6+kd8+kd9)
pl2789=(pl789*lf+pl289*lf+pl279*lf+pl278*lf)/(kd2+kd7+kd8+kd9)
pl3456=(pl456*lf+pl356*lf+pl346*lf+pl345*lf)/(kd3+kd4+kd5+kd6)
pl3457=(pl457*lf+pl357*lf+pl347*lf+pl345*lf)/(kd3+kd4+kd5+kd7)
pl3458=(pl458*lf+pl358*lf+pl348*lf+pl345*lf)/(kd3+kd4+kd5+kd8)
pl3459=(pl459*lf+pl359*lf+pl349*lf+pl345*lf)/(kd3+kd4+kd5+kd9)
pl3467=(pl467*lf+pl367*lf+pl347*lf+pl346*lf)/(kd3+kd4+kd6+kd7)
pl3468=(pl468*lf+pl368*lf+pl348*lf+pl346*lf)/(kd3+kd4+kd6+kd8)
pl3469=(pl469*lf+pl369*lf+pl349*lf+pl346*lf)/(kd3+kd4+kd6+kd9)
pl3478=(pl478*lf+pl378*lf+pl348*lf+pl347*lf)/(kd3+kd4+kd7+kd8)
pl3479=(pl479*lf+pl379*lf+pl349*lf+pl347*lf)/(kd3+kd4+kd7+kd9)
pl3489=(pl489*lf+pl389*lf+pl349*lf+pl348*lf)/(kd3+kd4+kd8+kd9)
pl3567=(pl567*lf+pl367*lf+pl357*lf+pl356*lf)/(kd3+kd5+kd6+kd7)
pl3568=(pl568*lf+pl368*lf+pl358*lf+pl356*lf)/(kd3+kd5+kd6+kd8)
pl3569=(pl569*lf+pl369*lf+pl359*lf+pl356*lf)/(kd3+kd5+kd6+kd9)
pl3578=(pl578*lf+pl378*lf+pl358*lf+pl357*lf)/(kd3+kd5+kd7+kd8)
pl3579=(pl579*lf+pl379*lf+pl359*lf+pl357*lf)/(kd3+kd5+kd7+kd9)
pl3589=(pl589*lf+pl389*lf+pl359*lf+pl358*lf)/(kd3+kd5+kd8+kd9)
pl3678=(pl678*lf+pl378*lf+pl368*lf+pl367*lf)/(kd3+kd6+kd7+kd8)
pl3679=(pl679*lf+pl379*lf+pl369*lf+pl367*lf)/(kd3+kd6+kd7+kd9)
pl3689=(pl689*lf+pl389*lf+pl369*lf+pl368*lf)/(kd3+kd6+kd8+kd9)
pl3789=(pl789*lf+pl389*lf+pl379*lf+pl378*lf)/(kd3+kd7+kd8+kd9)
pl4567=(pl567*lf+pl467*lf+pl457*lf+pl456*lf)/(kd4+kd5+kd6+kd7)
pl4568=(pl568*lf+pl468*lf+pl458*lf+pl456*lf)/(kd4+kd5+kd6+kd8)
pl4569=(pl569*lf+pl469*lf+pl459*lf+pl456*lf)/(kd4+kd5+kd6+kd9)
pl4578=(pl578*lf+pl478*lf+pl458*lf+pl457*lf)/(kd4+kd5+kd7+kd8)
pl4579=(pl579*lf+pl479*lf+pl459*lf+pl457*lf)/(kd4+kd5+kd7+kd9)
pl4589=(pl589*lf+pl489*lf+pl459*lf+pl458*lf)/(kd4+kd5+kd8+kd9)
pl4678=(pl678*lf+pl478*lf+pl468*lf+pl467*lf)/(kd4+kd6+kd7+kd8)
pl4679=(pl679*lf+pl479*lf+pl469*lf+pl467*lf)/(kd4+kd6+kd7+kd9)
pl4689=(pl689*lf+pl489*lf+pl469*lf+pl468*lf)/(kd4+kd6+kd8+kd9)
pl4789=(pl789*lf+pl489*lf+pl479*lf+pl478*lf)/(kd4+kd7+kd8+kd9)
pl5678=(pl678*lf+pl578*lf+pl568*lf+pl567*lf)/(kd5+kd6+kd7+kd8)
pl5679=(pl679*lf+pl579*lf+pl569*lf+pl567*lf)/(kd5+kd6+kd7+kd9)
pl5689=(pl689*lf+pl589*lf+pl569*lf+pl568*lf)/(kd5+kd6+kd8+kd9)
pl5789=(pl789*lf+pl589*lf+pl579*lf+pl578*lf)/(kd5+kd7+kd8+kd9)
pl6789=(pl789*lf+pl689*lf+pl679*lf+pl678*lf)/(kd6+kd7+kd8+kd9)
pl12345=(pl2345*lf+pl1345*lf+pl1245*lf+pl1235*lf+pl1234*lf)/(kd1+kd2+kd3+kd4+kd5)
pl12346=(pl2346*lf+pl1346*lf+pl1246*lf+pl1236*lf+pl1234*lf)/(kd1+kd2+kd3+kd4+kd6)
pl12347=(pl2347*lf+pl1347*lf+pl1247*lf+pl1237*lf+pl1234*lf)/(kd1+kd2+kd3+kd4+kd7)
pl12348=(pl2348*lf+pl1348*lf+pl1248*lf+pl1238*lf+pl1234*lf)/(kd1+kd2+kd3+kd4+kd8)
pl12349=(pl2349*lf+pl1349*lf+pl1249*lf+pl1239*lf+pl1234*lf)/(kd1+kd2+kd3+kd4+kd9)
pl12356=(pl2356*lf+pl1356*lf+pl1256*lf+pl1236*lf+pl1235*lf)/(kd1+kd2+kd3+kd5+kd6)
pl12357=(pl2357*lf+pl1357*lf+pl1257*lf+pl1237*lf+pl1235*lf)/(kd1+kd2+kd3+kd5+kd7)
pl12358=(pl2358*lf+pl1358*lf+pl1258*lf+pl1238*lf+pl1235*lf)/(kd1+kd2+kd3+kd5+kd8)
pl12359=(pl2359*lf+pl1359*lf+pl1259*lf+pl1239*lf+pl1235*lf)/(kd1+kd2+kd3+kd5+kd9)
pl12367=(pl2367*lf+pl1367*lf+pl1267*lf+pl1237*lf+pl1236*lf)/(kd1+kd2+kd3+kd6+kd7)
pl12368=(pl2368*lf+pl1368*lf+pl1268*lf+pl1238*lf+pl1236*lf)/(kd1+kd2+kd3+kd6+kd8)
pl12369=(pl2369*lf+pl1369*lf+pl1269*lf+pl1239*lf+pl1236*lf)/(kd1+kd2+kd3+kd6+kd9)
pl12378=(pl2378*lf+pl1378*lf+pl1278*lf+pl1238*lf+pl1237*lf)/(kd1+kd2+kd3+kd7+kd8)
pl12379=(pl2379*lf+pl1379*lf+pl1279*lf+pl1239*lf+pl1237*lf)/(kd1+kd2+kd3+kd7+kd9)
pl12389=(pl2389*lf+pl1389*lf+pl1289*lf+pl1239*lf+pl1238*lf)/(kd1+kd2+kd3+kd8+kd9)
pl12456=(pl2456*lf+pl1456*lf+pl1256*lf+pl1246*lf+pl1245*lf)/(kd1+kd2+kd4+kd5+kd6)
pl12457=(pl2457*lf+pl1457*lf+pl1257*lf+pl1247*lf+pl1245*lf)/(kd1+kd2+kd4+kd5+kd7)
pl12458=(pl2458*lf+pl1458*lf+pl1258*lf+pl1248*lf+pl1245*lf)/(kd1+kd2+kd4+kd5+kd8)
pl12459=(pl2459*lf+pl1459*lf+pl1259*lf+pl1249*lf+pl1245*lf)/(kd1+kd2+kd4+kd5+kd9)
pl12467=(pl2467*lf+pl1467*lf+pl1267*lf+pl1247*lf+pl1246*lf)/(kd1+kd2+kd4+kd6+kd7)
pl12468=(pl2468*lf+pl1468*lf+pl1268*lf+pl1248*lf+pl1246*lf)/(kd1+kd2+kd4+kd6+kd8)
pl12469=(pl2469*lf+pl1469*lf+pl1269*lf+pl1249*lf+pl1246*lf)/(kd1+kd2+kd4+kd6+kd9)
pl12478=(pl2478*lf+pl1478*lf+pl1278*lf+pl1248*lf+pl1247*lf)/(kd1+kd2+kd4+kd7+kd8)
pl12479=(pl2479*lf+pl1479*lf+pl1279*lf+pl1249*lf+pl1247*lf)/(kd1+kd2+kd4+kd7+kd9)
pl12489=(pl2489*lf+pl1489*lf+pl1289*lf+pl1249*lf+pl1248*lf)/(kd1+kd2+kd4+kd8+kd9)
pl12567=(pl2567*lf+pl1567*lf+pl1267*lf+pl1257*lf+pl1256*lf)/(kd1+kd2+kd5+kd6+kd7)
pl12568=(pl2568*lf+pl1568*lf+pl1268*lf+pl1258*lf+pl1256*lf)/(kd1+kd2+kd5+kd6+kd8)
pl12569=(pl2569*lf+pl1569*lf+pl1269*lf+pl1259*lf+pl1256*lf)/(kd1+kd2+kd5+kd6+kd9)
pl12578=(pl2578*lf+pl1578*lf+pl1278*lf+pl1258*lf+pl1257*lf)/(kd1+kd2+kd5+kd7+kd8)
pl12579=(pl2579*lf+pl1579*lf+pl1279*lf+pl1259*lf+pl1257*lf)/(kd1+kd2+kd5+kd7+kd9)
pl12589=(pl2589*lf+pl1589*lf+pl1289*lf+pl1259*lf+pl1258*lf)/(kd1+kd2+kd5+kd8+kd9)
pl12678=(pl2678*lf+pl1678*lf+pl1278*lf+pl1268*lf+pl1267*lf)/(kd1+kd2+kd6+kd7+kd8)
pl12679=(pl2679*lf+pl1679*lf+pl1279*lf+pl1269*lf+pl1267*lf)/(kd1+kd2+kd6+kd7+kd9)
pl12689=(pl2689*lf+pl1689*lf+pl1289*lf+pl1269*lf+pl1268*lf)/(kd1+kd2+kd6+kd8+kd9)
pl12789=(pl2789*lf+pl1789*lf+pl1289*lf+pl1279*lf+pl1278*lf)/(kd1+kd2+kd7+kd8+kd9)
pl13456=(pl3456*lf+pl1456*lf+pl1356*lf+pl1346*lf+pl1345*lf)/(kd1+kd3+kd4+kd5+kd6)
pl13457=(pl3457*lf+pl1457*lf+pl1357*lf+pl1347*lf+pl1345*lf)/(kd1+kd3+kd4+kd5+kd7)
pl13458=(pl3458*lf+pl1458*lf+pl1358*lf+pl1348*lf+pl1345*lf)/(kd1+kd3+kd4+kd5+kd8)
pl13459=(pl3459*lf+pl1459*lf+pl1359*lf+pl1349*lf+pl1345*lf)/(kd1+kd3+kd4+kd5+kd9)
pl13467=(pl3467*lf+pl1467*lf+pl1367*lf+pl1347*lf+pl1346*lf)/(kd1+kd3+kd4+kd6+kd7)
pl13468=(pl3468*lf+pl1468*lf+pl1368*lf+pl1348*lf+pl1346*lf)/(kd1+kd3+kd4+kd6+kd8)
pl13469=(pl3469*lf+pl1469*lf+pl1369*lf+pl1349*lf+pl1346*lf)/(kd1+kd3+kd4+kd6+kd9)
pl13478=(pl3478*lf+pl1478*lf+pl1378*lf+pl1348*lf+pl1347*lf)/(kd1+kd3+kd4+kd7+kd8)
pl13479=(pl3479*lf+pl1479*lf+pl1379*lf+pl1349*lf+pl1347*lf)/(kd1+kd3+kd4+kd7+kd9)
pl13489=(pl3489*lf+pl1489*lf+pl1389*lf+pl1349*lf+pl1348*lf)/(kd1+kd3+kd4+kd8+kd9)
pl13567=(pl3567*lf+pl1567*lf+pl1367*lf+pl1357*lf+pl1356*lf)/(kd1+kd3+kd5+kd6+kd7)
pl13568=(pl3568*lf+pl1568*lf+pl1368*lf+pl1358*lf+pl1356*lf)/(kd1+kd3+kd5+kd6+kd8)
pl13569=(pl3569*lf+pl1569*lf+pl1369*lf+pl1359*lf+pl1356*lf)/(kd1+kd3+kd5+kd6+kd9)
pl13578=(pl3578*lf+pl1578*lf+pl1378*lf+pl1358*lf+pl1357*lf)/(kd1+kd3+kd5+kd7+kd8)
pl13579=(pl3579*lf+pl1579*lf+pl1379*lf+pl1359*lf+pl1357*lf)/(kd1+kd3+kd5+kd7+kd9)
pl13589=(pl3589*lf+pl1589*lf+pl1389*lf+pl1359*lf+pl1358*lf)/(kd1+kd3+kd5+kd8+kd9)
pl13678=(pl3678*lf+pl1678*lf+pl1378*lf+pl1368*lf+pl1367*lf)/(kd1+kd3+kd6+kd7+kd8)
pl13679=(pl3679*lf+pl1679*lf+pl1379*lf+pl1369*lf+pl1367*lf)/(kd1+kd3+kd6+kd7+kd9)
pl13689=(pl3689*lf+pl1689*lf+pl1389*lf+pl1369*lf+pl1368*lf)/(kd1+kd3+kd6+kd8+kd9)
pl13789=(pl3789*lf+pl1789*lf+pl1389*lf+pl1379*lf+pl1378*lf)/(kd1+kd3+kd7+kd8+kd9)
pl14567=(pl4567*lf+pl1567*lf+pl1467*lf+pl1457*lf+pl1456*lf)/(kd1+kd4+kd5+kd6+kd7)
pl14568=(pl4568*lf+pl1568*lf+pl1468*lf+pl1458*lf+pl1456*lf)/(kd1+kd4+kd5+kd6+kd8)
pl14569=(pl4569*lf+pl1569*lf+pl1469*lf+pl1459*lf+pl1456*lf)/(kd1+kd4+kd5+kd6+kd9)
pl14578=(pl4578*lf+pl1578*lf+pl1478*lf+pl1458*lf+pl1457*lf)/(kd1+kd4+kd5+kd7+kd8)
pl14579=(pl4579*lf+pl1579*lf+pl1479*lf+pl1459*lf+pl1457*lf)/(kd1+kd4+kd5+kd7+kd9)
pl14589=(pl4589*lf+pl1589*lf+pl1489*lf+pl1459*lf+pl1458*lf)/(kd1+kd4+kd5+kd8+kd9)
pl14678=(pl4678*lf+pl1678*lf+pl1478*lf+pl1468*lf+pl1467*lf)/(kd1+kd4+kd6+kd7+kd8)
pl14679=(pl4679*lf+pl1679*lf+pl1479*lf+pl1469*lf+pl1467*lf)/(kd1+kd4+kd6+kd7+kd9)
pl14689=(pl4689*lf+pl1689*lf+pl1489*lf+pl1469*lf+pl1468*lf)/(kd1+kd4+kd6+kd8+kd9)
pl14789=(pl4789*lf+pl1789*lf+pl1489*lf+pl1479*lf+pl1478*lf)/(kd1+kd4+kd7+kd8+kd9)
pl15678=(pl5678*lf+pl1678*lf+pl1578*lf+pl1568*lf+pl1567*lf)/(kd1+kd5+kd6+kd7+kd8)
pl15679=(pl5679*lf+pl1679*lf+pl1579*lf+pl1569*lf+pl1567*lf)/(kd1+kd5+kd6+kd7+kd9)
pl15689=(pl5689*lf+pl1689*lf+pl1589*lf+pl1569*lf+pl1568*lf)/(kd1+kd5+kd6+kd8+kd9)
pl15789=(pl5789*lf+pl1789*lf+pl1589*lf+pl1579*lf+pl1578*lf)/(kd1+kd5+kd7+kd8+kd9)
pl16789=(pl6789*lf+pl1789*lf+pl1689*lf+pl1679*lf+pl1678*lf)/(kd1+kd6+kd7+kd8+kd9)
pl23456=(pl3456*lf+pl2456*lf+pl2356*lf+pl2346*lf+pl2345*lf)/(kd2+kd3+kd4+kd5+kd6)
pl23457=(pl3457*lf+pl2457*lf+pl2357*lf+pl2347*lf+pl2345*lf)/(kd2+kd3+kd4+kd5+kd7)
pl23458=(pl3458*lf+pl2458*lf+pl2358*lf+pl2348*lf+pl2345*lf)/(kd2+kd3+kd4+kd5+kd8)
pl23459=(pl3459*lf+pl2459*lf+pl2359*lf+pl2349*lf+pl2345*lf)/(kd2+kd3+kd4+kd5+kd9)
pl23467=(pl3467*lf+pl2467*lf+pl2367*lf+pl2347*lf+pl2346*lf)/(kd2+kd3+kd4+kd6+kd7)
pl23468=(pl3468*lf+pl2468*lf+pl2368*lf+pl2348*lf+pl2346*lf)/(kd2+kd3+kd4+kd6+kd8)
pl23469=(pl3469*lf+pl2469*lf+pl2369*lf+pl2349*lf+pl2346*lf)/(kd2+kd3+kd4+kd6+kd9)
pl23478=(pl3478*lf+pl2478*lf+pl2378*lf+pl2348*lf+pl2347*lf)/(kd2+kd3+kd4+kd7+kd8)
pl23479=(pl3479*lf+pl2479*lf+pl2379*lf+pl2349*lf+pl2347*lf)/(kd2+kd3+kd4+kd7+kd9)
pl23489=(pl3489*lf+pl2489*lf+pl2389*lf+pl2349*lf+pl2348*lf)/(kd2+kd3+kd4+kd8+kd9)
pl23567=(pl3567*lf+pl2567*lf+pl2367*lf+pl2357*lf+pl2356*lf)/(kd2+kd3+kd5+kd6+kd7)
pl23568=(pl3568*lf+pl2568*lf+pl2368*lf+pl2358*lf+pl2356*lf)/(kd2+kd3+kd5+kd6+kd8)
pl23569=(pl3569*lf+pl2569*lf+pl2369*lf+pl2359*lf+pl2356*lf)/(kd2+kd3+kd5+kd6+kd9)
pl23578=(pl3578*lf+pl2578*lf+pl2378*lf+pl2358*lf+pl2357*lf)/(kd2+kd3+kd5+kd7+kd8)
pl23579=(pl3579*lf+pl2579*lf+pl2379*lf+pl2359*lf+pl2357*lf)/(kd2+kd3+kd5+kd7+kd9)
pl23589=(pl3589*lf+pl2589*lf+pl2389*lf+pl2359*lf+pl2358*lf)/(kd2+kd3+kd5+kd8+kd9)
pl23678=(pl3678*lf+pl2678*lf+pl2378*lf+pl2368*lf+pl2367*lf)/(kd2+kd3+kd6+kd7+kd8)
pl23679=(pl3679*lf+pl2679*lf+pl2379*lf+pl2369*lf+pl2367*lf)/(kd2+kd3+kd6+kd7+kd9)
pl23689=(pl3689*lf+pl2689*lf+pl2389*lf+pl2369*lf+pl2368*lf)/(kd2+kd3+kd6+kd8+kd9)
pl23789=(pl3789*lf+pl2789*lf+pl2389*lf+pl2379*lf+pl2378*lf)/(kd2+kd3+kd7+kd8+kd9)
pl24567=(pl4567*lf+pl2567*lf+pl2467*lf+pl2457*lf+pl2456*lf)/(kd2+kd4+kd5+kd6+kd7)
pl24568=(pl4568*lf+pl2568*lf+pl2468*lf+pl2458*lf+pl2456*lf)/(kd2+kd4+kd5+kd6+kd8)
pl24569=(pl4569*lf+pl2569*lf+pl2469*lf+pl2459*lf+pl2456*lf)/(kd2+kd4+kd5+kd6+kd9)
pl24578=(pl4578*lf+pl2578*lf+pl2478*lf+pl2458*lf+pl2457*lf)/(kd2+kd4+kd5+kd7+kd8)
pl24579=(pl4579*lf+pl2579*lf+pl2479*lf+pl2459*lf+pl2457*lf)/(kd2+kd4+kd5+kd7+kd9)
pl24589=(pl4589*lf+pl2589*lf+pl2489*lf+pl2459*lf+pl2458*lf)/(kd2+kd4+kd5+kd8+kd9)
pl24678=(pl4678*lf+pl2678*lf+pl2478*lf+pl2468*lf+pl2467*lf)/(kd2+kd4+kd6+kd7+kd8)
pl24679=(pl4679*lf+pl2679*lf+pl2479*lf+pl2469*lf+pl2467*lf)/(kd2+kd4+kd6+kd7+kd9)
pl24689=(pl4689*lf+pl2689*lf+pl2489*lf+pl2469*lf+pl2468*lf)/(kd2+kd4+kd6+kd8+kd9)
pl24789=(pl4789*lf+pl2789*lf+pl2489*lf+pl2479*lf+pl2478*lf)/(kd2+kd4+kd7+kd8+kd9)
pl25678=(pl5678*lf+pl2678*lf+pl2578*lf+pl2568*lf+pl2567*lf)/(kd2+kd5+kd6+kd7+kd8)
pl25679=(pl5679*lf+pl2679*lf+pl2579*lf+pl2569*lf+pl2567*lf)/(kd2+kd5+kd6+kd7+kd9)
pl25689=(pl5689*lf+pl2689*lf+pl2589*lf+pl2569*lf+pl2568*lf)/(kd2+kd5+kd6+kd8+kd9)
pl25789=(pl5789*lf+pl2789*lf+pl2589*lf+pl2579*lf+pl2578*lf)/(kd2+kd5+kd7+kd8+kd9)
pl26789=(pl6789*lf+pl2789*lf+pl2689*lf+pl2679*lf+pl2678*lf)/(kd2+kd6+kd7+kd8+kd9)
pl34567=(pl4567*lf+pl3567*lf+pl3467*lf+pl3457*lf+pl3456*lf)/(kd3+kd4+kd5+kd6+kd7)
pl34568=(pl4568*lf+pl3568*lf+pl3468*lf+pl3458*lf+pl3456*lf)/(kd3+kd4+kd5+kd6+kd8)
pl34569=(pl4569*lf+pl3569*lf+pl3469*lf+pl3459*lf+pl3456*lf)/(kd3+kd4+kd5+kd6+kd9)
pl34578=(pl4578*lf+pl3578*lf+pl3478*lf+pl3458*lf+pl3457*lf)/(kd3+kd4+kd5+kd7+kd8)
pl34579=(pl4579*lf+pl3579*lf+pl3479*lf+pl3459*lf+pl3457*lf)/(kd3+kd4+kd5+kd7+kd9)
pl34589=(pl4589*lf+pl3589*lf+pl3489*lf+pl3459*lf+pl3458*lf)/(kd3+kd4+kd5+kd8+kd9)
pl34678=(pl4678*lf+pl3678*lf+pl3478*lf+pl3468*lf+pl3467*lf)/(kd3+kd4+kd6+kd7+kd8)
pl34679=(pl4679*lf+pl3679*lf+pl3479*lf+pl3469*lf+pl3467*lf)/(kd3+kd4+kd6+kd7+kd9)
pl34689=(pl4689*lf+pl3689*lf+pl3489*lf+pl3469*lf+pl3468*lf)/(kd3+kd4+kd6+kd8+kd9)
pl34789=(pl4789*lf+pl3789*lf+pl3489*lf+pl3479*lf+pl3478*lf)/(kd3+kd4+kd7+kd8+kd9)
pl35678=(pl5678*lf+pl3678*lf+pl3578*lf+pl3568*lf+pl3567*lf)/(kd3+kd5+kd6+kd7+kd8)
pl35679=(pl5679*lf+pl3679*lf+pl3579*lf+pl3569*lf+pl3567*lf)/(kd3+kd5+kd6+kd7+kd9)
pl35689=(pl5689*lf+pl3689*lf+pl3589*lf+pl3569*lf+pl3568*lf)/(kd3+kd5+kd6+kd8+kd9)
pl35789=(pl5789*lf+pl3789*lf+pl3589*lf+pl3579*lf+pl3578*lf)/(kd3+kd5+kd7+kd8+kd9)
pl36789=(pl6789*lf+pl3789*lf+pl3689*lf+pl3679*lf+pl3678*lf)/(kd3+kd6+kd7+kd8+kd9)
pl45678=(pl5678*lf+pl4678*lf+pl4578*lf+pl4568*lf+pl4567*lf)/(kd4+kd5+kd6+kd7+kd8)
pl45679=(pl5679*lf+pl4679*lf+pl4579*lf+pl4569*lf+pl4567*lf)/(kd4+kd5+kd6+kd7+kd9)
pl45689=(pl5689*lf+pl4689*lf+pl4589*lf+pl4569*lf+pl4568*lf)/(kd4+kd5+kd6+kd8+kd9)
pl45789=(pl5789*lf+pl4789*lf+pl4589*lf+pl4579*lf+pl4578*lf)/(kd4+kd5+kd7+kd8+kd9)
pl46789=(pl6789*lf+pl4789*lf+pl4689*lf+pl4679*lf+pl4678*lf)/(kd4+kd6+kd7+kd8+kd9)
pl56789=(pl6789*lf+pl5789*lf+pl5689*lf+pl5679*lf+pl5678*lf)/(kd5+kd6+kd7+kd8+kd9)
pl123456=(pl23456*lf+pl13456*lf+pl12456*lf+pl12356*lf+pl12346*lf+pl12345*lf)/(kd1+kd2+kd3+kd4+kd5+kd6)
pl123457=(pl23457*lf+pl13457*lf+pl12457*lf+pl12357*lf+pl12347*lf+pl12345*lf)/(kd1+kd2+kd3+kd4+kd5+kd7)
pl123458=(pl23458*lf+pl13458*lf+pl12458*lf+pl12358*lf+pl12348*lf+pl12345*lf)/(kd1+kd2+kd3+kd4+kd5+kd8)
pl123459=(pl23459*lf+pl13459*lf+pl12459*lf+pl12359*lf+pl12349*lf+pl12345*lf)/(kd1+kd2+kd3+kd4+kd5+kd9)
pl123467=(pl23467*lf+pl13467*lf+pl12467*lf+pl12367*lf+pl12347*lf+pl12346*lf)/(kd1+kd2+kd3+kd4+kd6+kd7)
pl123468=(pl23468*lf+pl13468*lf+pl12468*lf+pl12368*lf+pl12348*lf+pl12346*lf)/(kd1+kd2+kd3+kd4+kd6+kd8)
pl123469=(pl23469*lf+pl13469*lf+pl12469*lf+pl12369*lf+pl12349*lf+pl12346*lf)/(kd1+kd2+kd3+kd4+kd6+kd9)
pl123478=(pl23478*lf+pl13478*lf+pl12478*lf+pl12378*lf+pl12348*lf+pl12347*lf)/(kd1+kd2+kd3+kd4+kd7+kd8)
pl123479=(pl23479*lf+pl13479*lf+pl12479*lf+pl12379*lf+pl12349*lf+pl12347*lf)/(kd1+kd2+kd3+kd4+kd7+kd9)
pl123489=(pl23489*lf+pl13489*lf+pl12489*lf+pl12389*lf+pl12349*lf+pl12348*lf)/(kd1+kd2+kd3+kd4+kd8+kd9)
pl123567=(pl23567*lf+pl13567*lf+pl12567*lf+pl12367*lf+pl12357*lf+pl12356*lf)/(kd1+kd2+kd3+kd5+kd6+kd7)
pl123568=(pl23568*lf+pl13568*lf+pl12568*lf+pl12368*lf+pl12358*lf+pl12356*lf)/(kd1+kd2+kd3+kd5+kd6+kd8)
pl123569=(pl23569*lf+pl13569*lf+pl12569*lf+pl12369*lf+pl12359*lf+pl12356*lf)/(kd1+kd2+kd3+kd5+kd6+kd9)
pl123578=(pl23578*lf+pl13578*lf+pl12578*lf+pl12378*lf+pl12358*lf+pl12357*lf)/(kd1+kd2+kd3+kd5+kd7+kd8)
pl123579=(pl23579*lf+pl13579*lf+pl12579*lf+pl12379*lf+pl12359*lf+pl12357*lf)/(kd1+kd2+kd3+kd5+kd7+kd9)
pl123589=(pl23589*lf+pl13589*lf+pl12589*lf+pl12389*lf+pl12359*lf+pl12358*lf)/(kd1+kd2+kd3+kd5+kd8+kd9)
pl123678=(pl23678*lf+pl13678*lf+pl12678*lf+pl12378*lf+pl12368*lf+pl12367*lf)/(kd1+kd2+kd3+kd6+kd7+kd8)
pl123679=(pl23679*lf+pl13679*lf+pl12679*lf+pl12379*lf+pl12369*lf+pl12367*lf)/(kd1+kd2+kd3+kd6+kd7+kd9)
pl123689=(pl23689*lf+pl13689*lf+pl12689*lf+pl12389*lf+pl12369*lf+pl12368*lf)/(kd1+kd2+kd3+kd6+kd8+kd9)
pl123789=(pl23789*lf+pl13789*lf+pl12789*lf+pl12389*lf+pl12379*lf+pl12378*lf)/(kd1+kd2+kd3+kd7+kd8+kd9)
pl124567=(pl24567*lf+pl14567*lf+pl12567*lf+pl12467*lf+pl12457*lf+pl12456*lf)/(kd1+kd2+kd4+kd5+kd6+kd7)
pl124568=(pl24568*lf+pl14568*lf+pl12568*lf+pl12468*lf+pl12458*lf+pl12456*lf)/(kd1+kd2+kd4+kd5+kd6+kd8)
pl124569=(pl24569*lf+pl14569*lf+pl12569*lf+pl12469*lf+pl12459*lf+pl12456*lf)/(kd1+kd2+kd4+kd5+kd6+kd9)
pl124578=(pl24578*lf+pl14578*lf+pl12578*lf+pl12478*lf+pl12458*lf+pl12457*lf)/(kd1+kd2+kd4+kd5+kd7+kd8)
pl124579=(pl24579*lf+pl14579*lf+pl12579*lf+pl12479*lf+pl12459*lf+pl12457*lf)/(kd1+kd2+kd4+kd5+kd7+kd9)
pl124589=(pl24589*lf+pl14589*lf+pl12589*lf+pl12489*lf+pl12459*lf+pl12458*lf)/(kd1+kd2+kd4+kd5+kd8+kd9)
pl124678=(pl24678*lf+pl14678*lf+pl12678*lf+pl12478*lf+pl12468*lf+pl12467*lf)/(kd1+kd2+kd4+kd6+kd7+kd8)
pl124679=(pl24679*lf+pl14679*lf+pl12679*lf+pl12479*lf+pl12469*lf+pl12467*lf)/(kd1+kd2+kd4+kd6+kd7+kd9)
pl124689=(pl24689*lf+pl14689*lf+pl12689*lf+pl12489*lf+pl12469*lf+pl12468*lf)/(kd1+kd2+kd4+kd6+kd8+kd9)
pl124789=(pl24789*lf+pl14789*lf+pl12789*lf+pl12489*lf+pl12479*lf+pl12478*lf)/(kd1+kd2+kd4+kd7+kd8+kd9)
pl125678=(pl25678*lf+pl15678*lf+pl12678*lf+pl12578*lf+pl12568*lf+pl12567*lf)/(kd1+kd2+kd5+kd6+kd7+kd8)
pl125679=(pl25679*lf+pl15679*lf+pl12679*lf+pl12579*lf+pl12569*lf+pl12567*lf)/(kd1+kd2+kd5+kd6+kd7+kd9)
pl125689=(pl25689*lf+pl15689*lf+pl12689*lf+pl12589*lf+pl12569*lf+pl12568*lf)/(kd1+kd2+kd5+kd6+kd8+kd9)
pl125789=(pl25789*lf+pl15789*lf+pl12789*lf+pl12589*lf+pl12579*lf+pl12578*lf)/(kd1+kd2+kd5+kd7+kd8+kd9)
pl126789=(pl26789*lf+pl16789*lf+pl12789*lf+pl12689*lf+pl12679*lf+pl12678*lf)/(kd1+kd2+kd6+kd7+kd8+kd9)
pl134567=(pl34567*lf+pl14567*lf+pl13567*lf+pl13467*lf+pl13457*lf+pl13456*lf)/(kd1+kd3+kd4+kd5+kd6+kd7)
pl134568=(pl34568*lf+pl14568*lf+pl13568*lf+pl13468*lf+pl13458*lf+pl13456*lf)/(kd1+kd3+kd4+kd5+kd6+kd8)
pl134569=(pl34569*lf+pl14569*lf+pl13569*lf+pl13469*lf+pl13459*lf+pl13456*lf)/(kd1+kd3+kd4+kd5+kd6+kd9)
pl134578=(pl34578*lf+pl14578*lf+pl13578*lf+pl13478*lf+pl13458*lf+pl13457*lf)/(kd1+kd3+kd4+kd5+kd7+kd8)
pl134579=(pl34579*lf+pl14579*lf+pl13579*lf+pl13479*lf+pl13459*lf+pl13457*lf)/(kd1+kd3+kd4+kd5+kd7+kd9)
pl134589=(pl34589*lf+pl14589*lf+pl13589*lf+pl13489*lf+pl13459*lf+pl13458*lf)/(kd1+kd3+kd4+kd5+kd8+kd9)
pl134678=(pl34678*lf+pl14678*lf+pl13678*lf+pl13478*lf+pl13468*lf+pl13467*lf)/(kd1+kd3+kd4+kd6+kd7+kd8)
pl134679=(pl34679*lf+pl14679*lf+pl13679*lf+pl13479*lf+pl13469*lf+pl13467*lf)/(kd1+kd3+kd4+kd6+kd7+kd9)
pl134689=(pl34689*lf+pl14689*lf+pl13689*lf+pl13489*lf+pl13469*lf+pl13468*lf)/(kd1+kd3+kd4+kd6+kd8+kd9)
pl134789=(pl34789*lf+pl14789*lf+pl13789*lf+pl13489*lf+pl13479*lf+pl13478*lf)/(kd1+kd3+kd4+kd7+kd8+kd9)
pl135678=(pl35678*lf+pl15678*lf+pl13678*lf+pl13578*lf+pl13568*lf+pl13567*lf)/(kd1+kd3+kd5+kd6+kd7+kd8)
pl135679=(pl35679*lf+pl15679*lf+pl13679*lf+pl13579*lf+pl13569*lf+pl13567*lf)/(kd1+kd3+kd5+kd6+kd7+kd9)
pl135689=(pl35689*lf+pl15689*lf+pl13689*lf+pl13589*lf+pl13569*lf+pl13568*lf)/(kd1+kd3+kd5+kd6+kd8+kd9)
pl135789=(pl35789*lf+pl15789*lf+pl13789*lf+pl13589*lf+pl13579*lf+pl13578*lf)/(kd1+kd3+kd5+kd7+kd8+kd9)
pl136789=(pl36789*lf+pl16789*lf+pl13789*lf+pl13689*lf+pl13679*lf+pl13678*lf)/(kd1+kd3+kd6+kd7+kd8+kd9)
pl145678=(pl45678*lf+pl15678*lf+pl14678*lf+pl14578*lf+pl14568*lf+pl14567*lf)/(kd1+kd4+kd5+kd6+kd7+kd8)
pl145679=(pl45679*lf+pl15679*lf+pl14679*lf+pl14579*lf+pl14569*lf+pl14567*lf)/(kd1+kd4+kd5+kd6+kd7+kd9)
pl145689=(pl45689*lf+pl15689*lf+pl14689*lf+pl14589*lf+pl14569*lf+pl14568*lf)/(kd1+kd4+kd5+kd6+kd8+kd9)
pl145789=(pl45789*lf+pl15789*lf+pl14789*lf+pl14589*lf+pl14579*lf+pl14578*lf)/(kd1+kd4+kd5+kd7+kd8+kd9)
pl146789=(pl46789*lf+pl16789*lf+pl14789*lf+pl14689*lf+pl14679*lf+pl14678*lf)/(kd1+kd4+kd6+kd7+kd8+kd9)
pl156789=(pl56789*lf+pl16789*lf+pl15789*lf+pl15689*lf+pl15679*lf+pl15678*lf)/(kd1+kd5+kd6+kd7+kd8+kd9)
pl234567=(pl34567*lf+pl24567*lf+pl23567*lf+pl23467*lf+pl23457*lf+pl23456*lf)/(kd2+kd3+kd4+kd5+kd6+kd7)
pl234568=(pl34568*lf+pl24568*lf+pl23568*lf+pl23468*lf+pl23458*lf+pl23456*lf)/(kd2+kd3+kd4+kd5+kd6+kd8)
pl234569=(pl34569*lf+pl24569*lf+pl23569*lf+pl23469*lf+pl23459*lf+pl23456*lf)/(kd2+kd3+kd4+kd5+kd6+kd9)
pl234578=(pl34578*lf+pl24578*lf+pl23578*lf+pl23478*lf+pl23458*lf+pl23457*lf)/(kd2+kd3+kd4+kd5+kd7+kd8)
pl234579=(pl34579*lf+pl24579*lf+pl23579*lf+pl23479*lf+pl23459*lf+pl23457*lf)/(kd2+kd3+kd4+kd5+kd7+kd9)
pl234589=(pl34589*lf+pl24589*lf+pl23589*lf+pl23489*lf+pl23459*lf+pl23458*lf)/(kd2+kd3+kd4+kd5+kd8+kd9)
pl234678=(pl34678*lf+pl24678*lf+pl23678*lf+pl23478*lf+pl23468*lf+pl23467*lf)/(kd2+kd3+kd4+kd6+kd7+kd8)
pl234679=(pl34679*lf+pl24679*lf+pl23679*lf+pl23479*lf+pl23469*lf+pl23467*lf)/(kd2+kd3+kd4+kd6+kd7+kd9)
pl234689=(pl34689*lf+pl24689*lf+pl23689*lf+pl23489*lf+pl23469*lf+pl23468*lf)/(kd2+kd3+kd4+kd6+kd8+kd9)
pl234789=(pl34789*lf+pl24789*lf+pl23789*lf+pl23489*lf+pl23479*lf+pl23478*lf)/(kd2+kd3+kd4+kd7+kd8+kd9)
pl235678=(pl35678*lf+pl25678*lf+pl23678*lf+pl23578*lf+pl23568*lf+pl23567*lf)/(kd2+kd3+kd5+kd6+kd7+kd8)
pl235679=(pl35679*lf+pl25679*lf+pl23679*lf+pl23579*lf+pl23569*lf+pl23567*lf)/(kd2+kd3+kd5+kd6+kd7+kd9)
pl235689=(pl35689*lf+pl25689*lf+pl23689*lf+pl23589*lf+pl23569*lf+pl23568*lf)/(kd2+kd3+kd5+kd6+kd8+kd9)
pl235789=(pl35789*lf+pl25789*lf+pl23789*lf+pl23589*lf+pl23579*lf+pl23578*lf)/(kd2+kd3+kd5+kd7+kd8+kd9)
pl236789=(pl36789*lf+pl26789*lf+pl23789*lf+pl23689*lf+pl23679*lf+pl23678*lf)/(kd2+kd3+kd6+kd7+kd8+kd9)
pl245678=(pl45678*lf+pl25678*lf+pl24678*lf+pl24578*lf+pl24568*lf+pl24567*lf)/(kd2+kd4+kd5+kd6+kd7+kd8)
pl245679=(pl45679*lf+pl25679*lf+pl24679*lf+pl24579*lf+pl24569*lf+pl24567*lf)/(kd2+kd4+kd5+kd6+kd7+kd9)
pl245689=(pl45689*lf+pl25689*lf+pl24689*lf+pl24589*lf+pl24569*lf+pl24568*lf)/(kd2+kd4+kd5+kd6+kd8+kd9)
pl245789=(pl45789*lf+pl25789*lf+pl24789*lf+pl24589*lf+pl24579*lf+pl24578*lf)/(kd2+kd4+kd5+kd7+kd8+kd9)
pl246789=(pl46789*lf+pl26789*lf+pl24789*lf+pl24689*lf+pl24679*lf+pl24678*lf)/(kd2+kd4+kd6+kd7+kd8+kd9)
pl256789=(pl56789*lf+pl26789*lf+pl25789*lf+pl25689*lf+pl25679*lf+pl25678*lf)/(kd2+kd5+kd6+kd7+kd8+kd9)
pl345678=(pl45678*lf+pl35678*lf+pl34678*lf+pl34578*lf+pl34568*lf+pl34567*lf)/(kd3+kd4+kd5+kd6+kd7+kd8)
pl345679=(pl45679*lf+pl35679*lf+pl34679*lf+pl34579*lf+pl34569*lf+pl34567*lf)/(kd3+kd4+kd5+kd6+kd7+kd9)
pl345689=(pl45689*lf+pl35689*lf+pl34689*lf+pl34589*lf+pl34569*lf+pl34568*lf)/(kd3+kd4+kd5+kd6+kd8+kd9)
pl345789=(pl45789*lf+pl35789*lf+pl34789*lf+pl34589*lf+pl34579*lf+pl34578*lf)/(kd3+kd4+kd5+kd7+kd8+kd9)
pl346789=(pl46789*lf+pl36789*lf+pl34789*lf+pl34689*lf+pl34679*lf+pl34678*lf)/(kd3+kd4+kd6+kd7+kd8+kd9)
pl356789=(pl56789*lf+pl36789*lf+pl35789*lf+pl35689*lf+pl35679*lf+pl35678*lf)/(kd3+kd5+kd6+kd7+kd8+kd9)
pl456789=(pl56789*lf+pl46789*lf+pl45789*lf+pl45689*lf+pl45679*lf+pl45678*lf)/(kd4+kd5+kd6+kd7+kd8+kd9)
pl1234567=(pl234567*lf+pl134567*lf+pl124567*lf+pl123567*lf+pl123467*lf+pl123457*lf+pl123456*lf)/(kd1+kd2+kd3+kd4+kd5+kd6+kd7)
pl1234568=(pl234568*lf+pl134568*lf+pl124568*lf+pl123568*lf+pl123468*lf+pl123458*lf+pl123456*lf)/(kd1+kd2+kd3+kd4+kd5+kd6+kd8)
pl1234569=(pl234569*lf+pl134569*lf+pl124569*lf+pl123569*lf+pl123469*lf+pl123459*lf+pl123456*lf)/(kd1+kd2+kd3+kd4+kd5+kd6+kd9)
pl1234578=(pl234578*lf+pl134578*lf+pl124578*lf+pl123578*lf+pl123478*lf+pl123458*lf+pl123457*lf)/(kd1+kd2+kd3+kd4+kd5+kd7+kd8)
pl1234579=(pl234579*lf+pl134579*lf+pl124579*lf+pl123579*lf+pl123479*lf+pl123459*lf+pl123457*lf)/(kd1+kd2+kd3+kd4+kd5+kd7+kd9)
pl1234589=(pl234589*lf+pl134589*lf+pl124589*lf+pl123589*lf+pl123489*lf+pl123459*lf+pl123458*lf)/(kd1+kd2+kd3+kd4+kd5+kd8+kd9)
pl1234678=(pl234678*lf+pl134678*lf+pl124678*lf+pl123678*lf+pl123478*lf+pl123468*lf+pl123467*lf)/(kd1+kd2+kd3+kd4+kd6+kd7+kd8)
pl1234679=(pl234679*lf+pl134679*lf+pl124679*lf+pl123679*lf+pl123479*lf+pl123469*lf+pl123467*lf)/(kd1+kd2+kd3+kd4+kd6+kd7+kd9)
pl1234689=(pl234689*lf+pl134689*lf+pl124689*lf+pl123689*lf+pl123489*lf+pl123469*lf+pl123468*lf)/(kd1+kd2+kd3+kd4+kd6+kd8+kd9)
pl1234789=(pl234789*lf+pl134789*lf+pl124789*lf+pl123789*lf+pl123489*lf+pl123479*lf+pl123478*lf)/(kd1+kd2+kd3+kd4+kd7+kd8+kd9)
pl1235678=(pl235678*lf+pl135678*lf+pl125678*lf+pl123678*lf+pl123578*lf+pl123568*lf+pl123567*lf)/(kd1+kd2+kd3+kd5+kd6+kd7+kd8)
pl1235679=(pl235679*lf+pl135679*lf+pl125679*lf+pl123679*lf+pl123579*lf+pl123569*lf+pl123567*lf)/(kd1+kd2+kd3+kd5+kd6+kd7+kd9)
pl1235689=(pl235689*lf+pl135689*lf+pl125689*lf+pl123689*lf+pl123589*lf+pl123569*lf+pl123568*lf)/(kd1+kd2+kd3+kd5+kd6+kd8+kd9)
pl1235789=(pl235789*lf+pl135789*lf+pl125789*lf+pl123789*lf+pl123589*lf+pl123579*lf+pl123578*lf)/(kd1+kd2+kd3+kd5+kd7+kd8+kd9)
pl1236789=(pl236789*lf+pl136789*lf+pl126789*lf+pl123789*lf+pl123689*lf+pl123679*lf+pl123678*lf)/(kd1+kd2+kd3+kd6+kd7+kd8+kd9)
pl1245678=(pl245678*lf+pl145678*lf+pl125678*lf+pl124678*lf+pl124578*lf+pl124568*lf+pl124567*lf)/(kd1+kd2+kd4+kd5+kd6+kd7+kd8)
pl1245679=(pl245679*lf+pl145679*lf+pl125679*lf+pl124679*lf+pl124579*lf+pl124569*lf+pl124567*lf)/(kd1+kd2+kd4+kd5+kd6+kd7+kd9)
pl1245689=(pl245689*lf+pl145689*lf+pl125689*lf+pl124689*lf+pl124589*lf+pl124569*lf+pl124568*lf)/(kd1+kd2+kd4+kd5+kd6+kd8+kd9)
pl1245789=(pl245789*lf+pl145789*lf+pl125789*lf+pl124789*lf+pl124589*lf+pl124579*lf+pl124578*lf)/(kd1+kd2+kd4+kd5+kd7+kd8+kd9)
pl1246789=(pl246789*lf+pl146789*lf+pl126789*lf+pl124789*lf+pl124689*lf+pl124679*lf+pl124678*lf)/(kd1+kd2+kd4+kd6+kd7+kd8+kd9)
pl1256789=(pl256789*lf+pl156789*lf+pl126789*lf+pl125789*lf+pl125689*lf+pl125679*lf+pl125678*lf)/(kd1+kd2+kd5+kd6+kd7+kd8+kd9)
pl1345678=(pl345678*lf+pl145678*lf+pl135678*lf+pl134678*lf+pl134578*lf+pl134568*lf+pl134567*lf)/(kd1+kd3+kd4+kd5+kd6+kd7+kd8)
pl1345679=(pl345679*lf+pl145679*lf+pl135679*lf+pl134679*lf+pl134579*lf+pl134569*lf+pl134567*lf)/(kd1+kd3+kd4+kd5+kd6+kd7+kd9)
pl1345689=(pl345689*lf+pl145689*lf+pl135689*lf+pl134689*lf+pl134589*lf+pl134569*lf+pl134568*lf)/(kd1+kd3+kd4+kd5+kd6+kd8+kd9)
pl1345789=(pl345789*lf+pl145789*lf+pl135789*lf+pl134789*lf+pl134589*lf+pl134579*lf+pl134578*lf)/(kd1+kd3+kd4+kd5+kd7+kd8+kd9)
pl1346789=(pl346789*lf+pl146789*lf+pl136789*lf+pl134789*lf+pl134689*lf+pl134679*lf+pl134678*lf)/(kd1+kd3+kd4+kd6+kd7+kd8+kd9)
pl1356789=(pl356789*lf+pl156789*lf+pl136789*lf+pl135789*lf+pl135689*lf+pl135679*lf+pl135678*lf)/(kd1+kd3+kd5+kd6+kd7+kd8+kd9)
pl1456789=(pl456789*lf+pl156789*lf+pl146789*lf+pl145789*lf+pl145689*lf+pl145679*lf+pl145678*lf)/(kd1+kd4+kd5+kd6+kd7+kd8+kd9)
pl2345678=(pl345678*lf+pl245678*lf+pl235678*lf+pl234678*lf+pl234578*lf+pl234568*lf+pl234567*lf)/(kd2+kd3+kd4+kd5+kd6+kd7+kd8)
pl2345679=(pl345679*lf+pl245679*lf+pl235679*lf+pl234679*lf+pl234579*lf+pl234569*lf+pl234567*lf)/(kd2+kd3+kd4+kd5+kd6+kd7+kd9)
pl2345689=(pl345689*lf+pl245689*lf+pl235689*lf+pl234689*lf+pl234589*lf+pl234569*lf+pl234568*lf)/(kd2+kd3+kd4+kd5+kd6+kd8+kd9)
pl2345789=(pl345789*lf+pl245789*lf+pl235789*lf+pl234789*lf+pl234589*lf+pl234579*lf+pl234578*lf)/(kd2+kd3+kd4+kd5+kd7+kd8+kd9)
pl2346789=(pl346789*lf+pl246789*lf+pl236789*lf+pl234789*lf+pl234689*lf+pl234679*lf+pl234678*lf)/(kd2+kd3+kd4+kd6+kd7+kd8+kd9)
pl2356789=(pl356789*lf+pl256789*lf+pl236789*lf+pl235789*lf+pl235689*lf+pl235679*lf+pl235678*lf)/(kd2+kd3+kd5+kd6+kd7+kd8+kd9)
pl2456789=(pl456789*lf+pl256789*lf+pl246789*lf+pl245789*lf+pl245689*lf+pl245679*lf+pl245678*lf)/(kd2+kd4+kd5+kd6+kd7+kd8+kd9)
pl3456789=(pl456789*lf+pl356789*lf+pl346789*lf+pl345789*lf+pl345689*lf+pl345679*lf+pl345678*lf)/(kd3+kd4+kd5+kd6+kd7+kd8+kd9)
pl12345678=(pl2345678*lf+pl1345678*lf+pl1245678*lf+pl1235678*lf+pl1234678*lf+pl1234578*lf+pl1234568*lf+pl1234567*lf)/(kd1+kd2+kd3+kd4+kd5+kd6+kd7+kd8)
pl12345679=(pl2345679*lf+pl1345679*lf+pl1245679*lf+pl1235679*lf+pl1234679*lf+pl1234579*lf+pl1234569*lf+pl1234567*lf)/(kd1+kd2+kd3+kd4+kd5+kd6+kd7+kd9)
pl12345689=(pl2345689*lf+pl1345689*lf+pl1245689*lf+pl1235689*lf+pl1234689*lf+pl1234589*lf+pl1234569*lf+pl1234568*lf)/(kd1+kd2+kd3+kd4+kd5+kd6+kd8+kd9)
pl12345789=(pl2345789*lf+pl1345789*lf+pl1245789*lf+pl1235789*lf+pl1234789*lf+pl1234589*lf+pl1234579*lf+pl1234578*lf)/(kd1+kd2+kd3+kd4+kd5+kd7+kd8+kd9)
pl12346789=(pl2346789*lf+pl1346789*lf+pl1246789*lf+pl1236789*lf+pl1234789*lf+pl1234689*lf+pl1234679*lf+pl1234678*lf)/(kd1+kd2+kd3+kd4+kd6+kd7+kd8+kd9)
pl12356789=(pl2356789*lf+pl1356789*lf+pl1256789*lf+pl1236789*lf+pl1235789*lf+pl1235689*lf+pl1235679*lf+pl1235678*lf)/(kd1+kd2+kd3+kd5+kd6+kd7+kd8+kd9)
pl12456789=(pl2456789*lf+pl1456789*lf+pl1256789*lf+pl1246789*lf+pl1245789*lf+pl1245689*lf+pl1245679*lf+pl1245678*lf)/(kd1+kd2+kd4+kd5+kd6+kd7+kd8+kd9)
pl13456789=(pl3456789*lf+pl1456789*lf+pl1356789*lf+pl1346789*lf+pl1345789*lf+pl1345689*lf+pl1345679*lf+pl1345678*lf)/(kd1+kd3+kd4+kd5+kd6+kd7+kd8+kd9)
pl23456789=(pl3456789*lf+pl2456789*lf+pl2356789*lf+pl2346789*lf+pl2345789*lf+pl2345689*lf+pl2345679*lf+pl2345678*lf)/(kd2+kd3+kd4+kd5+kd6+kd7+kd8+kd9)
pl123456789=(pl23456789*lf+pl13456789*lf+pl12456789*lf+pl12356789*lf+pl12346789*lf+pl12345789*lf+pl12345689*lf+pl12345679*lf+pl12345678*lf)/(kd1+kd2+kd3+kd4+kd5+kd6+kd7+kd8+kd9)
constraint1=p0-(pf+pl1+pl2+pl3+pl4+pl5+pl6+pl7+pl8+pl9+pl12+pl13+pl14+pl15+pl16+pl17+pl18+pl19+pl23+pl24+pl25+pl26+pl27+pl28+pl29+pl34+pl35+pl36+pl37+pl38+pl39+pl45+pl46+pl47+pl48+pl49+pl56+pl57+pl58+pl59+pl67+pl68+pl69+pl78+pl79+pl89+pl123+pl124+pl125+pl126+pl127+pl128+pl129+pl134+pl135+pl136+pl137+pl138+pl139+pl145+pl146+pl147+pl148+pl149+pl156+pl157+pl158+pl159+pl167+pl168+pl169+pl178+pl179+pl189+pl234+pl235+pl236+pl237+pl238+pl239+pl245+pl246+pl247+pl248+pl249+pl256+pl257+pl258+pl259+pl267+pl268+pl269+pl278+pl279+pl289+pl345+pl346+pl347+pl348+pl349+pl356+pl357+pl358+pl359+pl367+pl368+pl369+pl378+pl379+pl389+pl456+pl457+pl458+pl459+pl467+pl468+pl469+pl478+pl479+pl489+pl567+pl568+pl569+pl578+pl579+pl589+pl678+pl679+pl689+pl789+pl1234+pl1235+pl1236+pl1237+pl1238+pl1239+pl1245+pl1246+pl1247+pl1248+pl1249+pl1256+pl1257+pl1258+pl1259+pl1267+pl1268+pl1269+pl1278+pl1279+pl1289+pl1345+pl1346+pl1347+pl1348+pl1349+pl1356+pl1357+pl1358+pl1359+pl1367+pl1368+pl1369+pl1378+pl1379+pl1389+pl1456+pl1457+pl1458+pl1459+pl1467+pl1468+pl1469+pl1478+pl1479+pl1489+pl1567+pl1568+pl1569+pl1578+pl1579+pl1589+pl1678+pl1679+pl1689+pl1789+pl2345+pl2346+pl2347+pl2348+pl2349+pl2356+pl2357+pl2358+pl2359+pl2367+pl2368+pl2369+pl2378+pl2379+pl2389+pl2456+pl2457+pl2458+pl2459+pl2467+pl2468+pl2469+pl2478+pl2479+pl2489+pl2567+pl2568+pl2569+pl2578+pl2579+pl2589+pl2678+pl2679+pl2689+pl2789+pl3456+pl3457+pl3458+pl3459+pl3467+pl3468+pl3469+pl3478+pl3479+pl3489+pl3567+pl3568+pl3569+pl3578+pl3579+pl3589+pl3678+pl3679+pl3689+pl3789+pl4567+pl4568+pl4569+pl4578+pl4579+pl4589+pl4678+pl4679+pl4689+pl4789+pl5678+pl5679+pl5689+pl5789+pl6789+pl12345+pl12346+pl12347+pl12348+pl12349+pl12356+pl12357+pl12358+pl12359+pl12367+pl12368+pl12369+pl12378+pl12379+pl12389+pl12456+pl12457+pl12458+pl12459+pl12467+pl12468+pl12469+pl12478+pl12479+pl12489+pl12567+pl12568+pl12569+pl12578+pl12579+pl12589+pl12678+pl12679+pl12689+pl12789+pl13456+pl13457+pl13458+pl13459+pl13467+pl13468+pl13469+pl13478+pl13479+pl13489+pl13567+pl13568+pl13569+pl13578+pl13579+pl13589+pl13678+pl13679+pl13689+pl13789+pl14567+pl14568+pl14569+pl14578+pl14579+pl14589+pl14678+pl14679+pl14689+pl14789+pl15678+pl15679+pl15689+pl15789+pl16789+pl23456+pl23457+pl23458+pl23459+pl23467+pl23468+pl23469+pl23478+pl23479+pl23489+pl23567+pl23568+pl23569+pl23578+pl23579+pl23589+pl23678+pl23679+pl23689+pl23789+pl24567+pl24568+pl24569+pl24578+pl24579+pl24589+pl24678+pl24679+pl24689+pl24789+pl25678+pl25679+pl25689+pl25789+pl26789+pl34567+pl34568+pl34569+pl34578+pl34579+pl34589+pl34678+pl34679+pl34689+pl34789+pl35678+pl35679+pl35689+pl35789+pl36789+pl45678+pl45679+pl45689+pl45789+pl46789+pl56789+pl123456+pl123457+pl123458+pl123459+pl123467+pl123468+pl123469+pl123478+pl123479+pl123489+pl123567+pl123568+pl123569+pl123578+pl123579+pl123589+pl123678+pl123679+pl123689+pl123789+pl124567+pl124568+pl124569+pl124578+pl124579+pl124589+pl124678+pl124679+pl124689+pl124789+pl125678+pl125679+pl125689+pl125789+pl126789+pl134567+pl134568+pl134569+pl134578+pl134579+pl134589+pl134678+pl134679+pl134689+pl134789+pl135678+pl135679+pl135689+pl135789+pl136789+pl145678+pl145679+pl145689+pl145789+pl146789+pl156789+pl234567+pl234568+pl234569+pl234578+pl234579+pl234589+pl234678+pl234679+pl234689+pl234789+pl235678+pl235679+pl235689+pl235789+pl236789+pl245678+pl245679+pl245689+pl245789+pl246789+pl256789+pl345678+pl345679+pl345689+pl345789+pl346789+pl356789+pl456789+pl1234567+pl1234568+pl1234569+pl1234578+pl1234579+pl1234589+pl1234678+pl1234679+pl1234689+pl1234789+pl1235678+pl1235679+pl1235689+pl1235789+pl1236789+pl1245678+pl1245679+pl1245689+pl1245789+pl1246789+pl1256789+pl1345678+pl1345679+pl1345689+pl1345789+pl1346789+pl1356789+pl1456789+pl2345678+pl2345679+pl2345689+pl2345789+pl2346789+pl2356789+pl2456789+pl3456789+pl12345678+pl12345679+pl12345689+pl12345789+pl12346789+pl12356789+pl12456789+pl13456789+pl23456789+pl123456789)
constraint2=l0-(lf+1*(pl1+pl2+pl3+pl4+pl5+pl6+pl7+pl8+pl9)+2*(pl12+pl13+pl14+pl15+pl16+pl17+pl18+pl19+pl23+pl24+pl25+pl26+pl27+pl28+pl29+pl34+pl35+pl36+pl37+pl38+pl39+pl45+pl46+pl47+pl48+pl49+pl56+pl57+pl58+pl59+pl67+pl68+pl69+pl78+pl79+pl89)+3*(pl123+pl124+pl125+pl126+pl127+pl128+pl129+pl134+pl135+pl136+pl137+pl138+pl139+pl145+pl146+pl147+pl148+pl149+pl156+pl157+pl158+pl159+pl167+pl168+pl169+pl178+pl179+pl189+pl234+pl235+pl236+pl237+pl238+pl239+pl245+pl246+pl247+pl248+pl249+pl256+pl257+pl258+pl259+pl267+pl268+pl269+pl278+pl279+pl289+pl345+pl346+pl347+pl348+pl349+pl356+pl357+pl358+pl359+pl367+pl368+pl369+pl378+pl379+pl389+pl456+pl457+pl458+pl459+pl467+pl468+pl469+pl478+pl479+pl489+pl567+pl568+pl569+pl578+pl579+pl589+pl678+pl679+pl689+pl789)+4*(pl1234+pl1235+pl1236+pl1237+pl1238+pl1239+pl1245+pl1246+pl1247+pl1248+pl1249+pl1256+pl1257+pl1258+pl1259+pl1267+pl1268+pl1269+pl1278+pl1279+pl1289+pl1345+pl1346+pl1347+pl1348+pl1349+pl1356+pl1357+pl1358+pl1359+pl1367+pl1368+pl1369+pl1378+pl1379+pl1389+pl1456+pl1457+pl1458+pl1459+pl1467+pl1468+pl1469+pl1478+pl1479+pl1489+pl1567+pl1568+pl1569+pl1578+pl1579+pl1589+pl1678+pl1679+pl1689+pl1789+pl2345+pl2346+pl2347+pl2348+pl2349+pl2356+pl2357+pl2358+pl2359+pl2367+pl2368+pl2369+pl2378+pl2379+pl2389+pl2456+pl2457+pl2458+pl2459+pl2467+pl2468+pl2469+pl2478+pl2479+pl2489+pl2567+pl2568+pl2569+pl2578+pl2579+pl2589+pl2678+pl2679+pl2689+pl2789+pl3456+pl3457+pl3458+pl3459+pl3467+pl3468+pl3469+pl3478+pl3479+pl3489+pl3567+pl3568+pl3569+pl3578+pl3579+pl3589+pl3678+pl3679+pl3689+pl3789+pl4567+pl4568+pl4569+pl4578+pl4579+pl4589+pl4678+pl4679+pl4689+pl4789+pl5678+pl5679+pl5689+pl5789+pl6789)+5*(pl12345+pl12346+pl12347+pl12348+pl12349+pl12356+pl12357+pl12358+pl12359+pl12367+pl12368+pl12369+pl12378+pl12379+pl12389+pl12456+pl12457+pl12458+pl12459+pl12467+pl12468+pl12469+pl12478+pl12479+pl12489+pl12567+pl12568+pl12569+pl12578+pl12579+pl12589+pl12678+pl12679+pl12689+pl12789+pl13456+pl13457+pl13458+pl13459+pl13467+pl13468+pl13469+pl13478+pl13479+pl13489+pl13567+pl13568+pl13569+pl13578+pl13579+pl13589+pl13678+pl13679+pl13689+pl13789+pl14567+pl14568+pl14569+pl14578+pl14579+pl14589+pl14678+pl14679+pl14689+pl14789+pl15678+pl15679+pl15689+pl15789+pl16789+pl23456+pl23457+pl23458+pl23459+pl23467+pl23468+pl23469+pl23478+pl23479+pl23489+pl23567+pl23568+pl23569+pl23578+pl23579+pl23589+pl23678+pl23679+pl23689+pl23789+pl24567+pl24568+pl24569+pl24578+pl24579+pl24589+pl24678+pl24679+pl24689+pl24789+pl25678+pl25679+pl25689+pl25789+pl26789+pl34567+pl34568+pl34569+pl34578+pl34579+pl34589+pl34678+pl34679+pl34689+pl34789+pl35678+pl35679+pl35689+pl35789+pl36789+pl45678+pl45679+pl45689+pl45789+pl46789+pl56789)+6*(pl123456+pl123457+pl123458+pl123459+pl123467+pl123468+pl123469+pl123478+pl123479+pl123489+pl123567+pl123568+pl123569+pl123578+pl123579+pl123589+pl123678+pl123679+pl123689+pl123789+pl124567+pl124568+pl124569+pl124578+pl124579+pl124589+pl124678+pl124679+pl124689+pl124789+pl125678+pl125679+pl125689+pl125789+pl126789+pl134567+pl134568+pl134569+pl134578+pl134579+pl134589+pl134678+pl134679+pl134689+pl134789+pl135678+pl135679+pl135689+pl135789+pl136789+pl145678+pl145679+pl145689+pl145789+pl146789+pl156789+pl234567+pl234568+pl234569+pl234578+pl234579+pl234589+pl234678+pl234679+pl234689+pl234789+pl235678+pl235679+pl235689+pl235789+pl236789+pl245678+pl245679+pl245689+pl245789+pl246789+pl256789+pl345678+pl345679+pl345689+pl345789+pl346789+pl356789+pl456789)+7*(pl1234567+pl1234568+pl1234569+pl1234578+pl1234579+pl1234589+pl1234678+pl1234679+pl1234689+pl1234789+pl1235678+pl1235679+pl1235689+pl1235789+pl1236789+pl1245678+pl1245679+pl1245689+pl1245789+pl1246789+pl1256789+pl1345678+pl1345679+pl1345689+pl1345789+pl1346789+pl1356789+pl1456789+pl2345678+pl2345679+pl2345689+pl2345789+pl2346789+pl2356789+pl2456789+pl3456789)+8*(pl12345678+pl12345679+pl12345689+pl12345789+pl12346789+pl12356789+pl12456789+pl13456789+pl23456789)+9*(pl123456789))
nonzero_constraint=(constraint1-abs(constraint1))-(constraint2-abs(constraint2))
return pl123456789 + X[2]*constraint1 + X[3]*constraint2 + X[4]*nonzero_constraint
dfdL = grad(F, 0) # Gradients of the Lagrange function
pf, lf, lam1, lam2,lam3= fsolve(dfdL, [p0, l0]+[1.0]*3, fprime=jacobian(dfdL))
pl1=pf*lf/kd1
pl2=pf*lf/kd2
pl3=pf*lf/kd3
pl4=pf*lf/kd4
pl5=pf*lf/kd5
pl6=pf*lf/kd6
pl7=pf*lf/kd7
pl8=pf*lf/kd8
pl9=pf*lf/kd9
pl12=(pl2*lf+pl1*lf)/(kd1+kd2)
pl13=(pl3*lf+pl1*lf)/(kd1+kd3)
pl14=(pl4*lf+pl1*lf)/(kd1+kd4)
pl15=(pl5*lf+pl1*lf)/(kd1+kd5)
pl16=(pl6*lf+pl1*lf)/(kd1+kd6)
pl17=(pl7*lf+pl1*lf)/(kd1+kd7)
pl18=(pl8*lf+pl1*lf)/(kd1+kd8)
pl19=(pl9*lf+pl1*lf)/(kd1+kd9)
pl23=(pl3*lf+pl2*lf)/(kd2+kd3)
pl24=(pl4*lf+pl2*lf)/(kd2+kd4)
pl25=(pl5*lf+pl2*lf)/(kd2+kd5)
pl26=(pl6*lf+pl2*lf)/(kd2+kd6)
pl27=(pl7*lf+pl2*lf)/(kd2+kd7)
pl28=(pl8*lf+pl2*lf)/(kd2+kd8)
pl29=(pl9*lf+pl2*lf)/(kd2+kd9)
pl34=(pl4*lf+pl3*lf)/(kd3+kd4)
pl35=(pl5*lf+pl3*lf)/(kd3+kd5)
pl36=(pl6*lf+pl3*lf)/(kd3+kd6)
pl37=(pl7*lf+pl3*lf)/(kd3+kd7)
pl38=(pl8*lf+pl3*lf)/(kd3+kd8)
pl39=(pl9*lf+pl3*lf)/(kd3+kd9)
pl45=(pl5*lf+pl4*lf)/(kd4+kd5)
pl46=(pl6*lf+pl4*lf)/(kd4+kd6)
pl47=(pl7*lf+pl4*lf)/(kd4+kd7)
pl48=(pl8*lf+pl4*lf)/(kd4+kd8)
pl49=(pl9*lf+pl4*lf)/(kd4+kd9)
pl56=(pl6*lf+pl5*lf)/(kd5+kd6)
pl57=(pl7*lf+pl5*lf)/(kd5+kd7)
pl58=(pl8*lf+pl5*lf)/(kd5+kd8)
pl59=(pl9*lf+pl5*lf)/(kd5+kd9)
pl67=(pl7*lf+pl6*lf)/(kd6+kd7)
pl68=(pl8*lf+pl6*lf)/(kd6+kd8)
pl69=(pl9*lf+pl6*lf)/(kd6+kd9)
pl78=(pl8*lf+pl7*lf)/(kd7+kd8)
pl79=(pl9*lf+pl7*lf)/(kd7+kd9)
pl89=(pl9*lf+pl8*lf)/(kd8+kd9)
pl123=(pl23*lf+pl13*lf+pl12*lf)/(kd1+kd2+kd3)
pl124=(pl24*lf+pl14*lf+pl12*lf)/(kd1+kd2+kd4)
pl125=(pl25*lf+pl15*lf+pl12*lf)/(kd1+kd2+kd5)
pl126=(pl26*lf+pl16*lf+pl12*lf)/(kd1+kd2+kd6)
pl127=(pl27*lf+pl17*lf+pl12*lf)/(kd1+kd2+kd7)
pl128=(pl28*lf+pl18*lf+pl12*lf)/(kd1+kd2+kd8)
pl129=(pl29*lf+pl19*lf+pl12*lf)/(kd1+kd2+kd9)
pl134=(pl34*lf+pl14*lf+pl13*lf)/(kd1+kd3+kd4)
pl135=(pl35*lf+pl15*lf+pl13*lf)/(kd1+kd3+kd5)
pl136=(pl36*lf+pl16*lf+pl13*lf)/(kd1+kd3+kd6)
pl137=(pl37*lf+pl17*lf+pl13*lf)/(kd1+kd3+kd7)
pl138=(pl38*lf+pl18*lf+pl13*lf)/(kd1+kd3+kd8)
pl139=(pl39*lf+pl19*lf+pl13*lf)/(kd1+kd3+kd9)
pl145=(pl45*lf+pl15*lf+pl14*lf)/(kd1+kd4+kd5)
pl146=(pl46*lf+pl16*lf+pl14*lf)/(kd1+kd4+kd6)
pl147=(pl47*lf+pl17*lf+pl14*lf)/(kd1+kd4+kd7)
pl148=(pl48*lf+pl18*lf+pl14*lf)/(kd1+kd4+kd8)
pl149=(pl49*lf+pl19*lf+pl14*lf)/(kd1+kd4+kd9)
pl156=(pl56*lf+pl16*lf+pl15*lf)/(kd1+kd5+kd6)
pl157=(pl57*lf+pl17*lf+pl15*lf)/(kd1+kd5+kd7)
pl158=(pl58*lf+pl18*lf+pl15*lf)/(kd1+kd5+kd8)
pl159=(pl59*lf+pl19*lf+pl15*lf)/(kd1+kd5+kd9)
pl167=(pl67*lf+pl17*lf+pl16*lf)/(kd1+kd6+kd7)
pl168=(pl68*lf+pl18*lf+pl16*lf)/(kd1+kd6+kd8)
pl169=(pl69*lf+pl19*lf+pl16*lf)/(kd1+kd6+kd9)
pl178=(pl78*lf+pl18*lf+pl17*lf)/(kd1+kd7+kd8)
pl179=(pl79*lf+pl19*lf+pl17*lf)/(kd1+kd7+kd9)
pl189=(pl89*lf+pl19*lf+pl18*lf)/(kd1+kd8+kd9)
pl234=(pl34*lf+pl24*lf+pl23*lf)/(kd2+kd3+kd4)
pl235=(pl35*lf+pl25*lf+pl23*lf)/(kd2+kd3+kd5)
pl236=(pl36*lf+pl26*lf+pl23*lf)/(kd2+kd3+kd6)
pl237=(pl37*lf+pl27*lf+pl23*lf)/(kd2+kd3+kd7)
pl238=(pl38*lf+pl28*lf+pl23*lf)/(kd2+kd3+kd8)
pl239=(pl39*lf+pl29*lf+pl23*lf)/(kd2+kd3+kd9)
pl245=(pl45*lf+pl25*lf+pl24*lf)/(kd2+kd4+kd5)
pl246=(pl46*lf+pl26*lf+pl24*lf)/(kd2+kd4+kd6)
pl247=(pl47*lf+pl27*lf+pl24*lf)/(kd2+kd4+kd7)
pl248=(pl48*lf+pl28*lf+pl24*lf)/(kd2+kd4+kd8)
pl249=(pl49*lf+pl29*lf+pl24*lf)/(kd2+kd4+kd9)
pl256=(pl56*lf+pl26*lf+pl25*lf)/(kd2+kd5+kd6)
pl257=(pl57*lf+pl27*lf+pl25*lf)/(kd2+kd5+kd7)
pl258=(pl58*lf+pl28*lf+pl25*lf)/(kd2+kd5+kd8)
pl259=(pl59*lf+pl29*lf+pl25*lf)/(kd2+kd5+kd9)
pl267=(pl67*lf+pl27*lf+pl26*lf)/(kd2+kd6+kd7)
pl268=(pl68*lf+pl28*lf+pl26*lf)/(kd2+kd6+kd8)
pl269=(pl69*lf+pl29*lf+pl26*lf)/(kd2+kd6+kd9)
pl278=(pl78*lf+pl28*lf+pl27*lf)/(kd2+kd7+kd8)
pl279=(pl79*lf+pl29*lf+pl27*lf)/(kd2+kd7+kd9)
pl289=(pl89*lf+pl29*lf+pl28*lf)/(kd2+kd8+kd9)
pl345=(pl45*lf+pl35*lf+pl34*lf)/(kd3+kd4+kd5)
pl346=(pl46*lf+pl36*lf+pl34*lf)/(kd3+kd4+kd6)
pl347=(pl47*lf+pl37*lf+pl34*lf)/(kd3+kd4+kd7)
pl348=(pl48*lf+pl38*lf+pl34*lf)/(kd3+kd4+kd8)
pl349=(pl49*lf+pl39*lf+pl34*lf)/(kd3+kd4+kd9)
pl356=(pl56*lf+pl36*lf+pl35*lf)/(kd3+kd5+kd6)
pl357=(pl57*lf+pl37*lf+pl35*lf)/(kd3+kd5+kd7)
pl358=(pl58*lf+pl38*lf+pl35*lf)/(kd3+kd5+kd8)
pl359=(pl59*lf+pl39*lf+pl35*lf)/(kd3+kd5+kd9)
pl367=(pl67*lf+pl37*lf+pl36*lf)/(kd3+kd6+kd7)
pl368=(pl68*lf+pl38*lf+pl36*lf)/(kd3+kd6+kd8)
pl369=(pl69*lf+pl39*lf+pl36*lf)/(kd3+kd6+kd9)
pl378=(pl78*lf+pl38*lf+pl37*lf)/(kd3+kd7+kd8)
pl379=(pl79*lf+pl39*lf+pl37*lf)/(kd3+kd7+kd9)
pl389=(pl89*lf+pl39*lf+pl38*lf)/(kd3+kd8+kd9)
pl456=(pl56*lf+pl46*lf+pl45*lf)/(kd4+kd5+kd6)
pl457=(pl57*lf+pl47*lf+pl45*lf)/(kd4+kd5+kd7)
pl458=(pl58*lf+pl48*lf+pl45*lf)/(kd4+kd5+kd8)
pl459=(pl59*lf+pl49*lf+pl45*lf)/(kd4+kd5+kd9)
pl467=(pl67*lf+pl47*lf+pl46*lf)/(kd4+kd6+kd7)
pl468=(pl68*lf+pl48*lf+pl46*lf)/(kd4+kd6+kd8)
pl469=(pl69*lf+pl49*lf+pl46*lf)/(kd4+kd6+kd9)
pl478=(pl78*lf+pl48*lf+pl47*lf)/(kd4+kd7+kd8)
pl479=(pl79*lf+pl49*lf+pl47*lf)/(kd4+kd7+kd9)
pl489=(pl89*lf+pl49*lf+pl48*lf)/(kd4+kd8+kd9)
pl567=(pl67*lf+pl57*lf+pl56*lf)/(kd5+kd6+kd7)
pl568=(pl68*lf+pl58*lf+pl56*lf)/(kd5+kd6+kd8)
pl569=(pl69*lf+pl59*lf+pl56*lf)/(kd5+kd6+kd9)
pl578=(pl78*lf+pl58*lf+pl57*lf)/(kd5+kd7+kd8)
pl579=(pl79*lf+pl59*lf+pl57*lf)/(kd5+kd7+kd9)
pl589=(pl89*lf+pl59*lf+pl58*lf)/(kd5+kd8+kd9)
pl678=(pl78*lf+pl68*lf+pl67*lf)/(kd6+kd7+kd8)
pl679=(pl79*lf+pl69*lf+pl67*lf)/(kd6+kd7+kd9)
pl689=(pl89*lf+pl69*lf+pl68*lf)/(kd6+kd8+kd9)
pl789=(pl89*lf+pl79*lf+pl78*lf)/(kd7+kd8+kd9)
pl1234=(pl234*lf+pl134*lf+pl124*lf+pl123*lf)/(kd1+kd2+kd3+kd4)
pl1235=(pl235*lf+pl135*lf+pl125*lf+pl123*lf)/(kd1+kd2+kd3+kd5)
pl1236=(pl236*lf+pl136*lf+pl126*lf+pl123*lf)/(kd1+kd2+kd3+kd6)
pl1237=(pl237*lf+pl137*lf+pl127*lf+pl123*lf)/(kd1+kd2+kd3+kd7)
pl1238=(pl238*lf+pl138*lf+pl128*lf+pl123*lf)/(kd1+kd2+kd3+kd8)
pl1239=(pl239*lf+pl139*lf+pl129*lf+pl123*lf)/(kd1+kd2+kd3+kd9)
pl1245=(pl245*lf+pl145*lf+pl125*lf+pl124*lf)/(kd1+kd2+kd4+kd5)
pl1246=(pl246*lf+pl146*lf+pl126*lf+pl124*lf)/(kd1+kd2+kd4+kd6)
pl1247=(pl247*lf+pl147*lf+pl127*lf+pl124*lf)/(kd1+kd2+kd4+kd7)
pl1248=(pl248*lf+pl148*lf+pl128*lf+pl124*lf)/(kd1+kd2+kd4+kd8)
pl1249=(pl249*lf+pl149*lf+pl129*lf+pl124*lf)/(kd1+kd2+kd4+kd9)
pl1256=(pl256*lf+pl156*lf+pl126*lf+pl125*lf)/(kd1+kd2+kd5+kd6)
pl1257=(pl257*lf+pl157*lf+pl127*lf+pl125*lf)/(kd1+kd2+kd5+kd7)
pl1258=(pl258*lf+pl158*lf+pl128*lf+pl125*lf)/(kd1+kd2+kd5+kd8)
pl1259=(pl259*lf+pl159*lf+pl129*lf+pl125*lf)/(kd1+kd2+kd5+kd9)
pl1267=(pl267*lf+pl167*lf+pl127*lf+pl126*lf)/(kd1+kd2+kd6+kd7)
pl1268=(pl268*lf+pl168*lf+pl128*lf+pl126*lf)/(kd1+kd2+kd6+kd8)
pl1269=(pl269*lf+pl169*lf+pl129*lf+pl126*lf)/(kd1+kd2+kd6+kd9)
pl1278=(pl278*lf+pl178*lf+pl128*lf+pl127*lf)/(kd1+kd2+kd7+kd8)
pl1279=(pl279*lf+pl179*lf+pl129*lf+pl127*lf)/(kd1+kd2+kd7+kd9)
pl1289=(pl289*lf+pl189*lf+pl129*lf+pl128*lf)/(kd1+kd2+kd8+kd9)
pl1345=(pl345*lf+pl145*lf+pl135*lf+pl134*lf)/(kd1+kd3+kd4+kd5)
pl1346=(pl346*lf+pl146*lf+pl136*lf+pl134*lf)/(kd1+kd3+kd4+kd6)
pl1347=(pl347*lf+pl147*lf+pl137*lf+pl134*lf)/(kd1+kd3+kd4+kd7)
pl1348=(pl348*lf+pl148*lf+pl138*lf+pl134*lf)/(kd1+kd3+kd4+kd8)
pl1349=(pl349*lf+pl149*lf+pl139*lf+pl134*lf)/(kd1+kd3+kd4+kd9)
pl1356=(pl356*lf+pl156*lf+pl136*lf+pl135*lf)/(kd1+kd3+kd5+kd6)
pl1357=(pl357*lf+pl157*lf+pl137*lf+pl135*lf)/(kd1+kd3+kd5+kd7)
pl1358=(pl358*lf+pl158*lf+pl138*lf+pl135*lf)/(kd1+kd3+kd5+kd8)
pl1359=(pl359*lf+pl159*lf+pl139*lf+pl135*lf)/(kd1+kd3+kd5+kd9)
pl1367=(pl367*lf+pl167*lf+pl137*lf+pl136*lf)/(kd1+kd3+kd6+kd7)
pl1368=(pl368*lf+pl168*lf+pl138*lf+pl136*lf)/(kd1+kd3+kd6+kd8)
pl1369=(pl369*lf+pl169*lf+pl139*lf+pl136*lf)/(kd1+kd3+kd6+kd9)
pl1378=(pl378*lf+pl178*lf+pl138*lf+pl137*lf)/(kd1+kd3+kd7+kd8)
pl1379=(pl379*lf+pl179*lf+pl139*lf+pl137*lf)/(kd1+kd3+kd7+kd9)
pl1389=(pl389*lf+pl189*lf+pl139*lf+pl138*lf)/(kd1+kd3+kd8+kd9)
pl1456=(pl456*lf+pl156*lf+pl146*lf+pl145*lf)/(kd1+kd4+kd5+kd6)
pl1457=(pl457*lf+pl157*lf+pl147*lf+pl145*lf)/(kd1+kd4+kd5+kd7)
pl1458=(pl458*lf+pl158*lf+pl148*lf+pl145*lf)/(kd1+kd4+kd5+kd8)
pl1459=(pl459*lf+pl159*lf+pl149*lf+pl145*lf)/(kd1+kd4+kd5+kd9)
pl1467=(pl467*lf+pl167*lf+pl147*lf+pl146*lf)/(kd1+kd4+kd6+kd7)
pl1468=(pl468*lf+pl168*lf+pl148*lf+pl146*lf)/(kd1+kd4+kd6+kd8)
pl1469=(pl469*lf+pl169*lf+pl149*lf+pl146*lf)/(kd1+kd4+kd6+kd9)
pl1478=(pl478*lf+pl178*lf+pl148*lf+pl147*lf)/(kd1+kd4+kd7+kd8)
pl1479=(pl479*lf+pl179*lf+pl149*lf+pl147*lf)/(kd1+kd4+kd7+kd9)
pl1489=(pl489*lf+pl189*lf+pl149*lf+pl148*lf)/(kd1+kd4+kd8+kd9)
pl1567=(pl567*lf+pl167*lf+pl157*lf+pl156*lf)/(kd1+kd5+kd6+kd7)
pl1568=(pl568*lf+pl168*lf+pl158*lf+pl156*lf)/(kd1+kd5+kd6+kd8)
pl1569=(pl569*lf+pl169*lf+pl159*lf+pl156*lf)/(kd1+kd5+kd6+kd9)
pl1578=(pl578*lf+pl178*lf+pl158*lf+pl157*lf)/(kd1+kd5+kd7+kd8)
pl1579=(pl579*lf+pl179*lf+pl159*lf+pl157*lf)/(kd1+kd5+kd7+kd9)
pl1589=(pl589*lf+pl189*lf+pl159*lf+pl158*lf)/(kd1+kd5+kd8+kd9)
pl1678=(pl678*lf+pl178*lf+pl168*lf+pl167*lf)/(kd1+kd6+kd7+kd8)
pl1679=(pl679*lf+pl179*lf+pl169*lf+pl167*lf)/(kd1+kd6+kd7+kd9)
pl1689=(pl689*lf+pl189*lf+pl169*lf+pl168*lf)/(kd1+kd6+kd8+kd9)
pl1789=(pl789*lf+pl189*lf+pl179*lf+pl178*lf)/(kd1+kd7+kd8+kd9)
pl2345=(pl345*lf+pl245*lf+pl235*lf+pl234*lf)/(kd2+kd3+kd4+kd5)
pl2346=(pl346*lf+pl246*lf+pl236*lf+pl234*lf)/(kd2+kd3+kd4+kd6)
pl2347=(pl347*lf+pl247*lf+pl237*lf+pl234*lf)/(kd2+kd3+kd4+kd7)
pl2348=(pl348*lf+pl248*lf+pl238*lf+pl234*lf)/(kd2+kd3+kd4+kd8)
pl2349=(pl349*lf+pl249*lf+pl239*lf+pl234*lf)/(kd2+kd3+kd4+kd9)
pl2356=(pl356*lf+pl256*lf+pl236*lf+pl235*lf)/(kd2+kd3+kd5+kd6)
pl2357=(pl357*lf+pl257*lf+pl237*lf+pl235*lf)/(kd2+kd3+kd5+kd7)
pl2358=(pl358*lf+pl258*lf+pl238*lf+pl235*lf)/(kd2+kd3+kd5+kd8)
pl2359=(pl359*lf+pl259*lf+pl239*lf+pl235*lf)/(kd2+kd3+kd5+kd9)
pl2367=(pl367*lf+pl267*lf+pl237*lf+pl236*lf)/(kd2+kd3+kd6+kd7)
pl2368=(pl368*lf+pl268*lf+pl238*lf+pl236*lf)/(kd2+kd3+kd6+kd8)
pl2369=(pl369*lf+pl269*lf+pl239*lf+pl236*lf)/(kd2+kd3+kd6+kd9)
pl2378=(pl378*lf+pl278*lf+pl238*lf+pl237*lf)/(kd2+kd3+kd7+kd8)
pl2379=(pl379*lf+pl279*lf+pl239*lf+pl237*lf)/(kd2+kd3+kd7+kd9)
pl2389=(pl389*lf+pl289*lf+pl239*lf+pl238*lf)/(kd2+kd3+kd8+kd9)
pl2456=(pl456*lf+pl256*lf+pl246*lf+pl245*lf)/(kd2+kd4+kd5+kd6)
pl2457=(pl457*lf+pl257*lf+pl247*lf+pl245*lf)/(kd2+kd4+kd5+kd7)
pl2458=(pl458*lf+pl258*lf+pl248*lf+pl245*lf)/(kd2+kd4+kd5+kd8)
pl2459=(pl459*lf+pl259*lf+pl249*lf+pl245*lf)/(kd2+kd4+kd5+kd9)
pl2467=(pl467*lf+pl267*lf+pl247*lf+pl246*lf)/(kd2+kd4+kd6+kd7)
pl2468=(pl468*lf+pl268*lf+pl248*lf+pl246*lf)/(kd2+kd4+kd6+kd8)
pl2469=(pl469*lf+pl269*lf+pl249*lf+pl246*lf)/(kd2+kd4+kd6+kd9)
pl2478=(pl478*lf+pl278*lf+pl248*lf+pl247*lf)/(kd2+kd4+kd7+kd8)
pl2479=(pl479*lf+pl279*lf+pl249*lf+pl247*lf)/(kd2+kd4+kd7+kd9)
pl2489=(pl489*lf+pl289*lf+pl249*lf+pl248*lf)/(kd2+kd4+kd8+kd9)
pl2567=(pl567*lf+pl267*lf+pl257*lf+pl256*lf)/(kd2+kd5+kd6+kd7)
pl2568=(pl568*lf+pl268*lf+pl258*lf+pl256*lf)/(kd2+kd5+kd6+kd8)
pl2569=(pl569*lf+pl269*lf+pl259*lf+pl256*lf)/(kd2+kd5+kd6+kd9)
pl2578=(pl578*lf+pl278*lf+pl258*lf+pl257*lf)/(kd2+kd5+kd7+kd8)
pl2579=(pl579*lf+pl279*lf+pl259*lf+pl257*lf)/(kd2+kd5+kd7+kd9)
pl2589=(pl589*lf+pl289*lf+pl259*lf+pl258*lf)/(kd2+kd5+kd8+kd9)
pl2678=(pl678*lf+pl278*lf+pl268*lf+pl267*lf)/(kd2+kd6+kd7+kd8)
pl2679=(pl679*lf+pl279*lf+pl269*lf+pl267*lf)/(kd2+kd6+kd7+kd9)
pl2689=(pl689*lf+pl289*lf+pl269*lf+pl268*lf)/(kd2+kd6+kd8+kd9)
pl2789=(pl789*lf+pl289*lf+pl279*lf+pl278*lf)/(kd2+kd7+kd8+kd9)
pl3456=(pl456*lf+pl356*lf+pl346*lf+pl345*lf)/(kd3+kd4+kd5+kd6)
pl3457=(pl457*lf+pl357*lf+pl347*lf+pl345*lf)/(kd3+kd4+kd5+kd7)
pl3458=(pl458*lf+pl358*lf+pl348*lf+pl345*lf)/(kd3+kd4+kd5+kd8)
pl3459=(pl459*lf+pl359*lf+pl349*lf+pl345*lf)/(kd3+kd4+kd5+kd9)
pl3467=(pl467*lf+pl367*lf+pl347*lf+pl346*lf)/(kd3+kd4+kd6+kd7)
pl3468=(pl468*lf+pl368*lf+pl348*lf+pl346*lf)/(kd3+kd4+kd6+kd8)
pl3469=(pl469*lf+pl369*lf+pl349*lf+pl346*lf)/(kd3+kd4+kd6+kd9)
pl3478=(pl478*lf+pl378*lf+pl348*lf+pl347*lf)/(kd3+kd4+kd7+kd8)
pl3479=(pl479*lf+pl379*lf+pl349*lf+pl347*lf)/(kd3+kd4+kd7+kd9)
pl3489=(pl489*lf+pl389*lf+pl349*lf+pl348*lf)/(kd3+kd4+kd8+kd9)
pl3567=(pl567*lf+pl367*lf+pl357*lf+pl356*lf)/(kd3+kd5+kd6+kd7)
pl3568=(pl568*lf+pl368*lf+pl358*lf+pl356*lf)/(kd3+kd5+kd6+kd8)
pl3569=(pl569*lf+pl369*lf+pl359*lf+pl356*lf)/(kd3+kd5+kd6+kd9)
pl3578=(pl578*lf+pl378*lf+pl358*lf+pl357*lf)/(kd3+kd5+kd7+kd8)
pl3579=(pl579*lf+pl379*lf+pl359*lf+pl357*lf)/(kd3+kd5+kd7+kd9)
pl3589=(pl589*lf+pl389*lf+pl359*lf+pl358*lf)/(kd3+kd5+kd8+kd9)
pl3678=(pl678*lf+pl378*lf+pl368*lf+pl367*lf)/(kd3+kd6+kd7+kd8)
pl3679=(pl679*lf+pl379*lf+pl369*lf+pl367*lf)/(kd3+kd6+kd7+kd9)
pl3689=(pl689*lf+pl389*lf+pl369*lf+pl368*lf)/(kd3+kd6+kd8+kd9)
pl3789=(pl789*lf+pl389*lf+pl379*lf+pl378*lf)/(kd3+kd7+kd8+kd9)
pl4567=(pl567*lf+pl467*lf+pl457*lf+pl456*lf)/(kd4+kd5+kd6+kd7)
pl4568=(pl568*lf+pl468*lf+pl458*lf+pl456*lf)/(kd4+kd5+kd6+kd8)
pl4569=(pl569*lf+pl469*lf+pl459*lf+pl456*lf)/(kd4+kd5+kd6+kd9)
pl4578=(pl578*lf+pl478*lf+pl458*lf+pl457*lf)/(kd4+kd5+kd7+kd8)
pl4579=(pl579*lf+pl479*lf+pl459*lf+pl457*lf)/(kd4+kd5+kd7+kd9)
pl4589=(pl589*lf+pl489*lf+pl459*lf+pl458*lf)/(kd4+kd5+kd8+kd9)
pl4678=(pl678*lf+pl478*lf+pl468*lf+pl467*lf)/(kd4+kd6+kd7+kd8)
pl4679=(pl679*lf+pl479*lf+pl469*lf+pl467*lf)/(kd4+kd6+kd7+kd9)
pl4689=(pl689*lf+pl489*lf+pl469*lf+pl468*lf)/(kd4+kd6+kd8+kd9)
pl4789=(pl789*lf+pl489*lf+pl479*lf+pl478*lf)/(kd4+kd7+kd8+kd9)
pl5678=(pl678*lf+pl578*lf+pl568*lf+pl567*lf)/(kd5+kd6+kd7+kd8)
pl5679=(pl679*lf+pl579*lf+pl569*lf+pl567*lf)/(kd5+kd6+kd7+kd9)
pl5689=(pl689*lf+pl589*lf+pl569*lf+pl568*lf)/(kd5+kd6+kd8+kd9)
pl5789=(pl789*lf+pl589*lf+pl579*lf+pl578*lf)/(kd5+kd7+kd8+kd9)
pl6789=(pl789*lf+pl689*lf+pl679*lf+pl678*lf)/(kd6+kd7+kd8+kd9)
pl12345=(pl2345*lf+pl1345*lf+pl1245*lf+pl1235*lf+pl1234*lf)/(kd1+kd2+kd3+kd4+kd5)
pl12346=(pl2346*lf+pl1346*lf+pl1246*lf+pl1236*lf+pl1234*lf)/(kd1+kd2+kd3+kd4+kd6)
pl12347=(pl2347*lf+pl1347*lf+pl1247*lf+pl1237*lf+pl1234*lf)/(kd1+kd2+kd3+kd4+kd7)
pl12348=(pl2348*lf+pl1348*lf+pl1248*lf+pl1238*lf+pl1234*lf)/(kd1+kd2+kd3+kd4+kd8)
pl12349=(pl2349*lf+pl1349*lf+pl1249*lf+pl1239*lf+pl1234*lf)/(kd1+kd2+kd3+kd4+kd9)
pl12356=(pl2356*lf+pl1356*lf+pl1256*lf+pl1236*lf+pl1235*lf)/(kd1+kd2+kd3+kd5+kd6)
pl12357=(pl2357*lf+pl1357*lf+pl1257*lf+pl1237*lf+pl1235*lf)/(kd1+kd2+kd3+kd5+kd7)
pl12358=(pl2358*lf+pl1358*lf+pl1258*lf+pl1238*lf+pl1235*lf)/(kd1+kd2+kd3+kd5+kd8)
pl12359=(pl2359*lf+pl1359*lf+pl1259*lf+pl1239*lf+pl1235*lf)/(kd1+kd2+kd3+kd5+kd9)
pl12367=(pl2367*lf+pl1367*lf+pl1267*lf+pl1237*lf+pl1236*lf)/(kd1+kd2+kd3+kd6+kd7)
pl12368=(pl2368*lf+pl1368*lf+pl1268*lf+pl1238*lf+pl1236*lf)/(kd1+kd2+kd3+kd6+kd8)
pl12369=(pl2369*lf+pl1369*lf+pl1269*lf+pl1239*lf+pl1236*lf)/(kd1+kd2+kd3+kd6+kd9)
pl12378=(pl2378*lf+pl1378*lf+pl1278*lf+pl1238*lf+pl1237*lf)/(kd1+kd2+kd3+kd7+kd8)
pl12379=(pl2379*lf+pl1379*lf+pl1279*lf+pl1239*lf+pl1237*lf)/(kd1+kd2+kd3+kd7+kd9)
pl12389=(pl2389*lf+pl1389*lf+pl1289*lf+pl1239*lf+pl1238*lf)/(kd1+kd2+kd3+kd8+kd9)
pl12456=(pl2456*lf+pl1456*lf+pl1256*lf+pl1246*lf+pl1245*lf)/(kd1+kd2+kd4+kd5+kd6)
pl12457=(pl2457*lf+pl1457*lf+pl1257*lf+pl1247*lf+pl1245*lf)/(kd1+kd2+kd4+kd5+kd7)
pl12458=(pl2458*lf+pl1458*lf+pl1258*lf+pl1248*lf+pl1245*lf)/(kd1+kd2+kd4+kd5+kd8)
pl12459=(pl2459*lf+pl1459*lf+pl1259*lf+pl1249*lf+pl1245*lf)/(kd1+kd2+kd4+kd5+kd9)
pl12467=(pl2467*lf+pl1467*lf+pl1267*lf+pl1247*lf+pl1246*lf)/(kd1+kd2+kd4+kd6+kd7)
pl12468=(pl2468*lf+pl1468*lf+pl1268*lf+pl1248*lf+pl1246*lf)/(kd1+kd2+kd4+kd6+kd8)
pl12469=(pl2469*lf+pl1469*lf+pl1269*lf+pl1249*lf+pl1246*lf)/(kd1+kd2+kd4+kd6+kd9)
pl12478=(pl2478*lf+pl1478*lf+pl1278*lf+pl1248*lf+pl1247*lf)/(kd1+kd2+kd4+kd7+kd8)
pl12479=(pl2479*lf+pl1479*lf+pl1279*lf+pl1249*lf+pl1247*lf)/(kd1+kd2+kd4+kd7+kd9)
pl12489=(pl2489*lf+pl1489*lf+pl1289*lf+pl1249*lf+pl1248*lf)/(kd1+kd2+kd4+kd8+kd9)
pl12567=(pl2567*lf+pl1567*lf+pl1267*lf+pl1257*lf+pl1256*lf)/(kd1+kd2+kd5+kd6+kd7)
pl12568=(pl2568*lf+pl1568*lf+pl1268*lf+pl1258*lf+pl1256*lf)/(kd1+kd2+kd5+kd6+kd8)
pl12569=(pl2569*lf+pl1569*lf+pl1269*lf+pl1259*lf+pl1256*lf)/(kd1+kd2+kd5+kd6+kd9)
pl12578=(pl2578*lf+pl1578*lf+pl1278*lf+pl1258*lf+pl1257*lf)/(kd1+kd2+kd5+kd7+kd8)
pl12579=(pl2579*lf+pl1579*lf+pl1279*lf+pl1259*lf+pl1257*lf)/(kd1+kd2+kd5+kd7+kd9)
pl12589=(pl2589*lf+pl1589*lf+pl1289*lf+pl1259*lf+pl1258*lf)/(kd1+kd2+kd5+kd8+kd9)
pl12678=(pl2678*lf+pl1678*lf+pl1278*lf+pl1268*lf+pl1267*lf)/(kd1+kd2+kd6+kd7+kd8)
pl12679=(pl2679*lf+pl1679*lf+pl1279*lf+pl1269*lf+pl1267*lf)/(kd1+kd2+kd6+kd7+kd9)
pl12689=(pl2689*lf+pl1689*lf+pl1289*lf+pl1269*lf+pl1268*lf)/(kd1+kd2+kd6+kd8+kd9)
pl12789=(pl2789*lf+pl1789*lf+pl1289*lf+pl1279*lf+pl1278*lf)/(kd1+kd2+kd7+kd8+kd9)
pl13456=(pl3456*lf+pl1456*lf+pl1356*lf+pl1346*lf+pl1345*lf)/(kd1+kd3+kd4+kd5+kd6)
pl13457=(pl3457*lf+pl1457*lf+pl1357*lf+pl1347*lf+pl1345*lf)/(kd1+kd3+kd4+kd5+kd7)
pl13458=(pl3458*lf+pl1458*lf+pl1358*lf+pl1348*lf+pl1345*lf)/(kd1+kd3+kd4+kd5+kd8)
pl13459=(pl3459*lf+pl1459*lf+pl1359*lf+pl1349*lf+pl1345*lf)/(kd1+kd3+kd4+kd5+kd9)
pl13467=(pl3467*lf+pl1467*lf+pl1367*lf+pl1347*lf+pl1346*lf)/(kd1+kd3+kd4+kd6+kd7)
pl13468=(pl3468*lf+pl1468*lf+pl1368*lf+pl1348*lf+pl1346*lf)/(kd1+kd3+kd4+kd6+kd8)
pl13469=(pl3469*lf+pl1469*lf+pl1369*lf+pl1349*lf+pl1346*lf)/(kd1+kd3+kd4+kd6+kd9)
pl13478=(pl3478*lf+pl1478*lf+pl1378*lf+pl1348*lf+pl1347*lf)/(kd1+kd3+kd4+kd7+kd8)
pl13479=(pl3479*lf+pl1479*lf+pl1379*lf+pl1349*lf+pl1347*lf)/(kd1+kd3+kd4+kd7+kd9)
pl13489=(pl3489*lf+pl1489*lf+pl1389*lf+pl1349*lf+pl1348*lf)/(kd1+kd3+kd4+kd8+kd9)
pl13567=(pl3567*lf+pl1567*lf+pl1367*lf+pl1357*lf+pl1356*lf)/(kd1+kd3+kd5+kd6+kd7)
pl13568=(pl3568*lf+pl1568*lf+pl1368*lf+pl1358*lf+pl1356*lf)/(kd1+kd3+kd5+kd6+kd8)
pl13569=(pl3569*lf+pl1569*lf+pl1369*lf+pl1359*lf+pl1356*lf)/(kd1+kd3+kd5+kd6+kd9)
pl13578=(pl3578*lf+pl1578*lf+pl1378*lf+pl1358*lf+pl1357*lf)/(kd1+kd3+kd5+kd7+kd8)
pl13579=(pl3579*lf+pl1579*lf+pl1379*lf+pl1359*lf+pl1357*lf)/(kd1+kd3+kd5+kd7+kd9)
pl13589=(pl3589*lf+pl1589*lf+pl1389*lf+pl1359*lf+pl1358*lf)/(kd1+kd3+kd5+kd8+kd9)
pl13678=(pl3678*lf+pl1678*lf+pl1378*lf+pl1368*lf+pl1367*lf)/(kd1+kd3+kd6+kd7+kd8)
pl13679=(pl3679*lf+pl1679*lf+pl1379*lf+pl1369*lf+pl1367*lf)/(kd1+kd3+kd6+kd7+kd9)
pl13689=(pl3689*lf+pl1689*lf+pl1389*lf+pl1369*lf+pl1368*lf)/(kd1+kd3+kd6+kd8+kd9)
pl13789=(pl3789*lf+pl1789*lf+pl1389*lf+pl1379*lf+pl1378*lf)/(kd1+kd3+kd7+kd8+kd9)
pl14567=(pl4567*lf+pl1567*lf+pl1467*lf+pl1457*lf+pl1456*lf)/(kd1+kd4+kd5+kd6+kd7)
pl14568=(pl4568*lf+pl1568*lf+pl1468*lf+pl1458*lf+pl1456*lf)/(kd1+kd4+kd5+kd6+kd8)
pl14569=(pl4569*lf+pl1569*lf+pl1469*lf+pl1459*lf+pl1456*lf)/(kd1+kd4+kd5+kd6+kd9)
pl14578=(pl4578*lf+pl1578*lf+pl1478*lf+pl1458*lf+pl1457*lf)/(kd1+kd4+kd5+kd7+kd8)
pl14579=(pl4579*lf+pl1579*lf+pl1479*lf+pl1459*lf+pl1457*lf)/(kd1+kd4+kd5+kd7+kd9)
pl14589=(pl4589*lf+pl1589*lf+pl1489*lf+pl1459*lf+pl1458*lf)/(kd1+kd4+kd5+kd8+kd9)
pl14678=(pl4678*lf+pl1678*lf+pl1478*lf+pl1468*lf+pl1467*lf)/(kd1+kd4+kd6+kd7+kd8)
pl14679=(pl4679*lf+pl1679*lf+pl1479*lf+pl1469*lf+pl1467*lf)/(kd1+kd4+kd6+kd7+kd9)
pl14689=(pl4689*lf+pl1689*lf+pl1489*lf+pl1469*lf+pl1468*lf)/(kd1+kd4+kd6+kd8+kd9)
pl14789=(pl4789*lf+pl1789*lf+pl1489*lf+pl1479*lf+pl1478*lf)/(kd1+kd4+kd7+kd8+kd9)
pl15678=(pl5678*lf+pl1678*lf+pl1578*lf+pl1568*lf+pl1567*lf)/(kd1+kd5+kd6+kd7+kd8)
pl15679=(pl5679*lf+pl1679*lf+pl1579*lf+pl1569*lf+pl1567*lf)/(kd1+kd5+kd6+kd7+kd9)
pl15689=(pl5689*lf+pl1689*lf+pl1589*lf+pl1569*lf+pl1568*lf)/(kd1+kd5+kd6+kd8+kd9)
pl15789=(pl5789*lf+pl1789*lf+pl1589*lf+pl1579*lf+pl1578*lf)/(kd1+kd5+kd7+kd8+kd9)
pl16789=(pl6789*lf+pl1789*lf+pl1689*lf+pl1679*lf+pl1678*lf)/(kd1+kd6+kd7+kd8+kd9)
pl23456=(pl3456*lf+pl2456*lf+pl2356*lf+pl2346*lf+pl2345*lf)/(kd2+kd3+kd4+kd5+kd6)
pl23457=(pl3457*lf+pl2457*lf+pl2357*lf+pl2347*lf+pl2345*lf)/(kd2+kd3+kd4+kd5+kd7)
pl23458=(pl3458*lf+pl2458*lf+pl2358*lf+pl2348*lf+pl2345*lf)/(kd2+kd3+kd4+kd5+kd8)
pl23459=(pl3459*lf+pl2459*lf+pl2359*lf+pl2349*lf+pl2345*lf)/(kd2+kd3+kd4+kd5+kd9)
pl23467=(pl3467*lf+pl2467*lf+pl2367*lf+pl2347*lf+pl2346*lf)/(kd2+kd3+kd4+kd6+kd7)
pl23468=(pl3468*lf+pl2468*lf+pl2368*lf+pl2348*lf+pl2346*lf)/(kd2+kd3+kd4+kd6+kd8)
pl23469=(pl3469*lf+pl2469*lf+pl2369*lf+pl2349*lf+pl2346*lf)/(kd2+kd3+kd4+kd6+kd9)
pl23478=(pl3478*lf+pl2478*lf+pl2378*lf+pl2348*lf+pl2347*lf)/(kd2+kd3+kd4+kd7+kd8)
pl23479=(pl3479*lf+pl2479*lf+pl2379*lf+pl2349*lf+pl2347*lf)/(kd2+kd3+kd4+kd7+kd9)
pl23489=(pl3489*lf+pl2489*lf+pl2389*lf+pl2349*lf+pl2348*lf)/(kd2+kd3+kd4+kd8+kd9)
pl23567=(pl3567*lf+pl2567*lf+pl2367*lf+pl2357*lf+pl2356*lf)/(kd2+kd3+kd5+kd6+kd7)
pl23568=(pl3568*lf+pl2568*lf+pl2368*lf+pl2358*lf+pl2356*lf)/(kd2+kd3+kd5+kd6+kd8)
pl23569=(pl3569*lf+pl2569*lf+pl2369*lf+pl2359*lf+pl2356*lf)/(kd2+kd3+kd5+kd6+kd9)
pl23578=(pl3578*lf+pl2578*lf+pl2378*lf+pl2358*lf+pl2357*lf)/(kd2+kd3+kd5+kd7+kd8)
pl23579=(pl3579*lf+pl2579*lf+pl2379*lf+pl2359*lf+pl2357*lf)/(kd2+kd3+kd5+kd7+kd9)
pl23589=(pl3589*lf+pl2589*lf+pl2389*lf+pl2359*lf+pl2358*lf)/(kd2+kd3+kd5+kd8+kd9)
pl23678=(pl3678*lf+pl2678*lf+pl2378*lf+pl2368*lf+pl2367*lf)/(kd2+kd3+kd6+kd7+kd8)
pl23679=(pl3679*lf+pl2679*lf+pl2379*lf+pl2369*lf+pl2367*lf)/(kd2+kd3+kd6+kd7+kd9)
pl23689=(pl3689*lf+pl2689*lf+pl2389*lf+pl2369*lf+pl2368*lf)/(kd2+kd3+kd6+kd8+kd9)
pl23789=(pl3789*lf+pl2789*lf+pl2389*lf+pl2379*lf+pl2378*lf)/(kd2+kd3+kd7+kd8+kd9)
pl24567=(pl4567*lf+pl2567*lf+pl2467*lf+pl2457*lf+pl2456*lf)/(kd2+kd4+kd5+kd6+kd7)
pl24568=(pl4568*lf+pl2568*lf+pl2468*lf+pl2458*lf+pl2456*lf)/(kd2+kd4+kd5+kd6+kd8)
pl24569=(pl4569*lf+pl2569*lf+pl2469*lf+pl2459*lf+pl2456*lf)/(kd2+kd4+kd5+kd6+kd9)
pl24578=(pl4578*lf+pl2578*lf+pl2478*lf+pl2458*lf+pl2457*lf)/(kd2+kd4+kd5+kd7+kd8)
pl24579=(pl4579*lf+pl2579*lf+pl2479*lf+pl2459*lf+pl2457*lf)/(kd2+kd4+kd5+kd7+kd9)
pl24589=(pl4589*lf+pl2589*lf+pl2489*lf+pl2459*lf+pl2458*lf)/(kd2+kd4+kd5+kd8+kd9)
pl24678=(pl4678*lf+pl2678*lf+pl2478*lf+pl2468*lf+pl2467*lf)/(kd2+kd4+kd6+kd7+kd8)
pl24679=(pl4679*lf+pl2679*lf+pl2479*lf+pl2469*lf+pl2467*lf)/(kd2+kd4+kd6+kd7+kd9)
pl24689=(pl4689*lf+pl2689*lf+pl2489*lf+pl2469*lf+pl2468*lf)/(kd2+kd4+kd6+kd8+kd9)
pl24789=(pl4789*lf+pl2789*lf+pl2489*lf+pl2479*lf+pl2478*lf)/(kd2+kd4+kd7+kd8+kd9)
pl25678=(pl5678*lf+pl2678*lf+pl2578*lf+pl2568*lf+pl2567*lf)/(kd2+kd5+kd6+kd7+kd8)
pl25679=(pl5679*lf+pl2679*lf+pl2579*lf+pl2569*lf+pl2567*lf)/(kd2+kd5+kd6+kd7+kd9)
pl25689=(pl5689*lf+pl2689*lf+pl2589*lf+pl2569*lf+pl2568*lf)/(kd2+kd5+kd6+kd8+kd9)
pl25789=(pl5789*lf+pl2789*lf+pl2589*lf+pl2579*lf+pl2578*lf)/(kd2+kd5+kd7+kd8+kd9)
pl26789=(pl6789*lf+pl2789*lf+pl2689*lf+pl2679*lf+pl2678*lf)/(kd2+kd6+kd7+kd8+kd9)
pl34567=(pl4567*lf+pl3567*lf+pl3467*lf+pl3457*lf+pl3456*lf)/(kd3+kd4+kd5+kd6+kd7)
pl34568=(pl4568*lf+pl3568*lf+pl3468*lf+pl3458*lf+pl3456*lf)/(kd3+kd4+kd5+kd6+kd8)
pl34569=(pl4569*lf+pl3569*lf+pl3469*lf+pl3459*lf+pl3456*lf)/(kd3+kd4+kd5+kd6+kd9)
pl34578=(pl4578*lf+pl3578*lf+pl3478*lf+pl3458*lf+pl3457*lf)/(kd3+kd4+kd5+kd7+kd8)
pl34579=(pl4579*lf+pl3579*lf+pl3479*lf+pl3459*lf+pl3457*lf)/(kd3+kd4+kd5+kd7+kd9)
pl34589=(pl4589*lf+pl3589*lf+pl3489*lf+pl3459*lf+pl3458*lf)/(kd3+kd4+kd5+kd8+kd9)
pl34678=(pl4678*lf+pl3678*lf+pl3478*lf+pl3468*lf+pl3467*lf)/(kd3+kd4+kd6+kd7+kd8)
pl34679=(pl4679*lf+pl3679*lf+pl3479*lf+pl3469*lf+pl3467*lf)/(kd3+kd4+kd6+kd7+kd9)
pl34689=(pl4689*lf+pl3689*lf+pl3489*lf+pl3469*lf+pl3468*lf)/(kd3+kd4+kd6+kd8+kd9)
pl34789=(pl4789*lf+pl3789*lf+pl3489*lf+pl3479*lf+pl3478*lf)/(kd3+kd4+kd7+kd8+kd9)
pl35678=(pl5678*lf+pl3678*lf+pl3578*lf+pl3568*lf+pl3567*lf)/(kd3+kd5+kd6+kd7+kd8)
pl35679=(pl5679*lf+pl3679*lf+pl3579*lf+pl3569*lf+pl3567*lf)/(kd3+kd5+kd6+kd7+kd9)
pl35689=(pl5689*lf+pl3689*lf+pl3589*lf+pl3569*lf+pl3568*lf)/(kd3+kd5+kd6+kd8+kd9)
pl35789=(pl5789*lf+pl3789*lf+pl3589*lf+pl3579*lf+pl3578*lf)/(kd3+kd5+kd7+kd8+kd9)
pl36789=(pl6789*lf+pl3789*lf+pl3689*lf+pl3679*lf+pl3678*lf)/(kd3+kd6+kd7+kd8+kd9)
pl45678=(pl5678*lf+pl4678*lf+pl4578*lf+pl4568*lf+pl4567*lf)/(kd4+kd5+kd6+kd7+kd8)
pl45679=(pl5679*lf+pl4679*lf+pl4579*lf+pl4569*lf+pl4567*lf)/(kd4+kd5+kd6+kd7+kd9)
pl45689=(pl5689*lf+pl4689*lf+pl4589*lf+pl4569*lf+pl4568*lf)/(kd4+kd5+kd6+kd8+kd9)
pl45789=(pl5789*lf+pl4789*lf+pl4589*lf+pl4579*lf+pl4578*lf)/(kd4+kd5+kd7+kd8+kd9)
pl46789=(pl6789*lf+pl4789*lf+pl4689*lf+pl4679*lf+pl4678*lf)/(kd4+kd6+kd7+kd8+kd9)
pl56789=(pl6789*lf+pl5789*lf+pl5689*lf+pl5679*lf+pl5678*lf)/(kd5+kd6+kd7+kd8+kd9)
pl123456=(pl23456*lf+pl13456*lf+pl12456*lf+pl12356*lf+pl12346*lf+pl12345*lf)/(kd1+kd2+kd3+kd4+kd5+kd6)
pl123457=(pl23457*lf+pl13457*lf+pl12457*lf+pl12357*lf+pl12347*lf+pl12345*lf)/(kd1+kd2+kd3+kd4+kd5+kd7)
pl123458=(pl23458*lf+pl13458*lf+pl12458*lf+pl12358*lf+pl12348*lf+pl12345*lf)/(kd1+kd2+kd3+kd4+kd5+kd8)
pl123459=(pl23459*lf+pl13459*lf+pl12459*lf+pl12359*lf+pl12349*lf+pl12345*lf)/(kd1+kd2+kd3+kd4+kd5+kd9)
pl123467=(pl23467*lf+pl13467*lf+pl12467*lf+pl12367*lf+pl12347*lf+pl12346*lf)/(kd1+kd2+kd3+kd4+kd6+kd7)
pl123468=(pl23468*lf+pl13468*lf+pl12468*lf+pl12368*lf+pl12348*lf+pl12346*lf)/(kd1+kd2+kd3+kd4+kd6+kd8)
pl123469=(pl23469*lf+pl13469*lf+pl12469*lf+pl12369*lf+pl12349*lf+pl12346*lf)/(kd1+kd2+kd3+kd4+kd6+kd9)
pl123478=(pl23478*lf+pl13478*lf+pl12478*lf+pl12378*lf+pl12348*lf+pl12347*lf)/(kd1+kd2+kd3+kd4+kd7+kd8)
pl123479=(pl23479*lf+pl13479*lf+pl12479*lf+pl12379*lf+pl12349*lf+pl12347*lf)/(kd1+kd2+kd3+kd4+kd7+kd9)
pl123489=(pl23489*lf+pl13489*lf+pl12489*lf+pl12389*lf+pl12349*lf+pl12348*lf)/(kd1+kd2+kd3+kd4+kd8+kd9)
pl123567=(pl23567*lf+pl13567*lf+pl12567*lf+pl12367*lf+pl12357*lf+pl12356*lf)/(kd1+kd2+kd3+kd5+kd6+kd7)
pl123568=(pl23568*lf+pl13568*lf+pl12568*lf+pl12368*lf+pl12358*lf+pl12356*lf)/(kd1+kd2+kd3+kd5+kd6+kd8)
pl123569=(pl23569*lf+pl13569*lf+pl12569*lf+pl12369*lf+pl12359*lf+pl12356*lf)/(kd1+kd2+kd3+kd5+kd6+kd9)
pl123578=(pl23578*lf+pl13578*lf+pl12578*lf+pl12378*lf+pl12358*lf+pl12357*lf)/(kd1+kd2+kd3+kd5+kd7+kd8)
pl123579=(pl23579*lf+pl13579*lf+pl12579*lf+pl12379*lf+pl12359*lf+pl12357*lf)/(kd1+kd2+kd3+kd5+kd7+kd9)
pl123589=(pl23589*lf+pl13589*lf+pl12589*lf+pl12389*lf+pl12359*lf+pl12358*lf)/(kd1+kd2+kd3+kd5+kd8+kd9)
pl123678=(pl23678*lf+pl13678*lf+pl12678*lf+pl12378*lf+pl12368*lf+pl12367*lf)/(kd1+kd2+kd3+kd6+kd7+kd8)
pl123679=(pl23679*lf+pl13679*lf+pl12679*lf+pl12379*lf+pl12369*lf+pl12367*lf)/(kd1+kd2+kd3+kd6+kd7+kd9)
pl123689=(pl23689*lf+pl13689*lf+pl12689*lf+pl12389*lf+pl12369*lf+pl12368*lf)/(kd1+kd2+kd3+kd6+kd8+kd9)
pl123789=(pl23789*lf+pl13789*lf+pl12789*lf+pl12389*lf+pl12379*lf+pl12378*lf)/(kd1+kd2+kd3+kd7+kd8+kd9)
pl124567=(pl24567*lf+pl14567*lf+pl12567*lf+pl12467*lf+pl12457*lf+pl12456*lf)/(kd1+kd2+kd4+kd5+kd6+kd7)
pl124568=(pl24568*lf+pl14568*lf+pl12568*lf+pl12468*lf+pl12458*lf+pl12456*lf)/(kd1+kd2+kd4+kd5+kd6+kd8)
pl124569=(pl24569*lf+pl14569*lf+pl12569*lf+pl12469*lf+pl12459*lf+pl12456*lf)/(kd1+kd2+kd4+kd5+kd6+kd9)
pl124578=(pl24578*lf+pl14578*lf+pl12578*lf+pl12478*lf+pl12458*lf+pl12457*lf)/(kd1+kd2+kd4+kd5+kd7+kd8)
pl124579=(pl24579*lf+pl14579*lf+pl12579*lf+pl12479*lf+pl12459*lf+pl12457*lf)/(kd1+kd2+kd4+kd5+kd7+kd9)
pl124589=(pl24589*lf+pl14589*lf+pl12589*lf+pl12489*lf+pl12459*lf+pl12458*lf)/(kd1+kd2+kd4+kd5+kd8+kd9)
pl124678=(pl24678*lf+pl14678*lf+pl12678*lf+pl12478*lf+pl12468*lf+pl12467*lf)/(kd1+kd2+kd4+kd6+kd7+kd8)
pl124679=(pl24679*lf+pl14679*lf+pl12679*lf+pl12479*lf+pl12469*lf+pl12467*lf)/(kd1+kd2+kd4+kd6+kd7+kd9)
pl124689=(pl24689*lf+pl14689*lf+pl12689*lf+pl12489*lf+pl12469*lf+pl12468*lf)/(kd1+kd2+kd4+kd6+kd8+kd9)
pl124789=(pl24789*lf+pl14789*lf+pl12789*lf+pl12489*lf+pl12479*lf+pl12478*lf)/(kd1+kd2+kd4+kd7+kd8+kd9)
pl125678=(pl25678*lf+pl15678*lf+pl12678*lf+pl12578*lf+pl12568*lf+pl12567*lf)/(kd1+kd2+kd5+kd6+kd7+kd8)
pl125679=(pl25679*lf+pl15679*lf+pl12679*lf+pl12579*lf+pl12569*lf+pl12567*lf)/(kd1+kd2+kd5+kd6+kd7+kd9)
pl125689=(pl25689*lf+pl15689*lf+pl12689*lf+pl12589*lf+pl12569*lf+pl12568*lf)/(kd1+kd2+kd5+kd6+kd8+kd9)
pl125789=(pl25789*lf+pl15789*lf+pl12789*lf+pl12589*lf+pl12579*lf+pl12578*lf)/(kd1+kd2+kd5+kd7+kd8+kd9)
pl126789=(pl26789*lf+pl16789*lf+pl12789*lf+pl12689*lf+pl12679*lf+pl12678*lf)/(kd1+kd2+kd6+kd7+kd8+kd9)
pl134567=(pl34567*lf+pl14567*lf+pl13567*lf+pl13467*lf+pl13457*lf+pl13456*lf)/(kd1+kd3+kd4+kd5+kd6+kd7)
pl134568=(pl34568*lf+pl14568*lf+pl13568*lf+pl13468*lf+pl13458*lf+pl13456*lf)/(kd1+kd3+kd4+kd5+kd6+kd8)
pl134569=(pl34569*lf+pl14569*lf+pl13569*lf+pl13469*lf+pl13459*lf+pl13456*lf)/(kd1+kd3+kd4+kd5+kd6+kd9)
pl134578=(pl34578*lf+pl14578*lf+pl13578*lf+pl13478*lf+pl13458*lf+pl13457*lf)/(kd1+kd3+kd4+kd5+kd7+kd8)
pl134579=(pl34579*lf+pl14579*lf+pl13579*lf+pl13479*lf+pl13459*lf+pl13457*lf)/(kd1+kd3+kd4+kd5+kd7+kd9)
pl134589=(pl34589*lf+pl14589*lf+pl13589*lf+pl13489*lf+pl13459*lf+pl13458*lf)/(kd1+kd3+kd4+kd5+kd8+kd9)
pl134678=(pl34678*lf+pl14678*lf+pl13678*lf+pl13478*lf+pl13468*lf+pl13467*lf)/(kd1+kd3+kd4+kd6+kd7+kd8)
pl134679=(pl34679*lf+pl14679*lf+pl13679*lf+pl13479*lf+pl13469*lf+pl13467*lf)/(kd1+kd3+kd4+kd6+kd7+kd9)
pl134689=(pl34689*lf+pl14689*lf+pl13689*lf+pl13489*lf+pl13469*lf+pl13468*lf)/(kd1+kd3+kd4+kd6+kd8+kd9)
pl134789=(pl34789*lf+pl14789*lf+pl13789*lf+pl13489*lf+pl13479*lf+pl13478*lf)/(kd1+kd3+kd4+kd7+kd8+kd9)
pl135678=(pl35678*lf+pl15678*lf+pl13678*lf+pl13578*lf+pl13568*lf+pl13567*lf)/(kd1+kd3+kd5+kd6+kd7+kd8)
pl135679=(pl35679*lf+pl15679*lf+pl13679*lf+pl13579*lf+pl13569*lf+pl13567*lf)/(kd1+kd3+kd5+kd6+kd7+kd9)
pl135689=(pl35689*lf+pl15689*lf+pl13689*lf+pl13589*lf+pl13569*lf+pl13568*lf)/(kd1+kd3+kd5+kd6+kd8+kd9)
pl135789=(pl35789*lf+pl15789*lf+pl13789*lf+pl13589*lf+pl13579*lf+pl13578*lf)/(kd1+kd3+kd5+kd7+kd8+kd9)
pl136789=(pl36789*lf+pl16789*lf+pl13789*lf+pl13689*lf+pl13679*lf+pl13678*lf)/(kd1+kd3+kd6+kd7+kd8+kd9)
pl145678=(pl45678*lf+pl15678*lf+pl14678*lf+pl14578*lf+pl14568*lf+pl14567*lf)/(kd1+kd4+kd5+kd6+kd7+kd8)
pl145679=(pl45679*lf+pl15679*lf+pl14679*lf+pl14579*lf+pl14569*lf+pl14567*lf)/(kd1+kd4+kd5+kd6+kd7+kd9)
pl145689=(pl45689*lf+pl15689*lf+pl14689*lf+pl14589*lf+pl14569*lf+pl14568*lf)/(kd1+kd4+kd5+kd6+kd8+kd9)
pl145789=(pl45789*lf+pl15789*lf+pl14789*lf+pl14589*lf+pl14579*lf+pl14578*lf)/(kd1+kd4+kd5+kd7+kd8+kd9)
pl146789=(pl46789*lf+pl16789*lf+pl14789*lf+pl14689*lf+pl14679*lf+pl14678*lf)/(kd1+kd4+kd6+kd7+kd8+kd9)
pl156789=(pl56789*lf+pl16789*lf+pl15789*lf+pl15689*lf+pl15679*lf+pl15678*lf)/(kd1+kd5+kd6+kd7+kd8+kd9)
pl234567=(pl34567*lf+pl24567*lf+pl23567*lf+pl23467*lf+pl23457*lf+pl23456*lf)/(kd2+kd3+kd4+kd5+kd6+kd7)
pl234568=(pl34568*lf+pl24568*lf+pl23568*lf+pl23468*lf+pl23458*lf+pl23456*lf)/(kd2+kd3+kd4+kd5+kd6+kd8)
pl234569=(pl34569*lf+pl24569*lf+pl23569*lf+pl23469*lf+pl23459*lf+pl23456*lf)/(kd2+kd3+kd4+kd5+kd6+kd9)
pl234578=(pl34578*lf+pl24578*lf+pl23578*lf+pl23478*lf+pl23458*lf+pl23457*lf)/(kd2+kd3+kd4+kd5+kd7+kd8)
pl234579=(pl34579*lf+pl24579*lf+pl23579*lf+pl23479*lf+pl23459*lf+pl23457*lf)/(kd2+kd3+kd4+kd5+kd7+kd9)
pl234589=(pl34589*lf+pl24589*lf+pl23589*lf+pl23489*lf+pl23459*lf+pl23458*lf)/(kd2+kd3+kd4+kd5+kd8+kd9)
pl234678=(pl34678*lf+pl24678*lf+pl23678*lf+pl23478*lf+pl23468*lf+pl23467*lf)/(kd2+kd3+kd4+kd6+kd7+kd8)
pl234679=(pl34679*lf+pl24679*lf+pl23679*lf+pl23479*lf+pl23469*lf+pl23467*lf)/(kd2+kd3+kd4+kd6+kd7+kd9)
pl234689=(pl34689*lf+pl24689*lf+pl23689*lf+pl23489*lf+pl23469*lf+pl23468*lf)/(kd2+kd3+kd4+kd6+kd8+kd9)
pl234789=(pl34789*lf+pl24789*lf+pl23789*lf+pl23489*lf+pl23479*lf+pl23478*lf)/(kd2+kd3+kd4+kd7+kd8+kd9)
pl235678=(pl35678*lf+pl25678*lf+pl23678*lf+pl23578*lf+pl23568*lf+pl23567*lf)/(kd2+kd3+kd5+kd6+kd7+kd8)
pl235679=(pl35679*lf+pl25679*lf+pl23679*lf+pl23579*lf+pl23569*lf+pl23567*lf)/(kd2+kd3+kd5+kd6+kd7+kd9)
pl235689=(pl35689*lf+pl25689*lf+pl23689*lf+pl23589*lf+pl23569*lf+pl23568*lf)/(kd2+kd3+kd5+kd6+kd8+kd9)
pl235789=(pl35789*lf+pl25789*lf+pl23789*lf+pl23589*lf+pl23579*lf+pl23578*lf)/(kd2+kd3+kd5+kd7+kd8+kd9)
pl236789=(pl36789*lf+pl26789*lf+pl23789*lf+pl23689*lf+pl23679*lf+pl23678*lf)/(kd2+kd3+kd6+kd7+kd8+kd9)
pl245678=(pl45678*lf+pl25678*lf+pl24678*lf+pl24578*lf+pl24568*lf+pl24567*lf)/(kd2+kd4+kd5+kd6+kd7+kd8)
pl245679=(pl45679*lf+pl25679*lf+pl24679*lf+pl24579*lf+pl24569*lf+pl24567*lf)/(kd2+kd4+kd5+kd6+kd7+kd9)
pl245689=(pl45689*lf+pl25689*lf+pl24689*lf+pl24589*lf+pl24569*lf+pl24568*lf)/(kd2+kd4+kd5+kd6+kd8+kd9)
pl245789=(pl45789*lf+pl25789*lf+pl24789*lf+pl24589*lf+pl24579*lf+pl24578*lf)/(kd2+kd4+kd5+kd7+kd8+kd9)
pl246789=(pl46789*lf+pl26789*lf+pl24789*lf+pl24689*lf+pl24679*lf+pl24678*lf)/(kd2+kd4+kd6+kd7+kd8+kd9)
pl256789=(pl56789*lf+pl26789*lf+pl25789*lf+pl25689*lf+pl25679*lf+pl25678*lf)/(kd2+kd5+kd6+kd7+kd8+kd9)
pl345678=(pl45678*lf+pl35678*lf+pl34678*lf+pl34578*lf+pl34568*lf+pl34567*lf)/(kd3+kd4+kd5+kd6+kd7+kd8)
pl345679=(pl45679*lf+pl35679*lf+pl34679*lf+pl34579*lf+pl34569*lf+pl34567*lf)/(kd3+kd4+kd5+kd6+kd7+kd9)
pl345689=(pl45689*lf+pl35689*lf+pl34689*lf+pl34589*lf+pl34569*lf+pl34568*lf)/(kd3+kd4+kd5+kd6+kd8+kd9)
pl345789=(pl45789*lf+pl35789*lf+pl34789*lf+pl34589*lf+pl34579*lf+pl34578*lf)/(kd3+kd4+kd5+kd7+kd8+kd9)
pl346789=(pl46789*lf+pl36789*lf+pl34789*lf+pl34689*lf+pl34679*lf+pl34678*lf)/(kd3+kd4+kd6+kd7+kd8+kd9)
pl356789=(pl56789*lf+pl36789*lf+pl35789*lf+pl35689*lf+pl35679*lf+pl35678*lf)/(kd3+kd5+kd6+kd7+kd8+kd9)
pl456789=(pl56789*lf+pl46789*lf+pl45789*lf+pl45689*lf+pl45679*lf+pl45678*lf)/(kd4+kd5+kd6+kd7+kd8+kd9)
pl1234567=(pl234567*lf+pl134567*lf+pl124567*lf+pl123567*lf+pl123467*lf+pl123457*lf+pl123456*lf)/(kd1+kd2+kd3+kd4+kd5+kd6+kd7)
pl1234568=(pl234568*lf+pl134568*lf+pl124568*lf+pl123568*lf+pl123468*lf+pl123458*lf+pl123456*lf)/(kd1+kd2+kd3+kd4+kd5+kd6+kd8)
pl1234569=(pl234569*lf+pl134569*lf+pl124569*lf+pl123569*lf+pl123469*lf+pl123459*lf+pl123456*lf)/(kd1+kd2+kd3+kd4+kd5+kd6+kd9)
pl1234578=(pl234578*lf+pl134578*lf+pl124578*lf+pl123578*lf+pl123478*lf+pl123458*lf+pl123457*lf)/(kd1+kd2+kd3+kd4+kd5+kd7+kd8)
pl1234579=(pl234579*lf+pl134579*lf+pl124579*lf+pl123579*lf+pl123479*lf+pl123459*lf+pl123457*lf)/(kd1+kd2+kd3+kd4+kd5+kd7+kd9)
pl1234589=(pl234589*lf+pl134589*lf+pl124589*lf+pl123589*lf+pl123489*lf+pl123459*lf+pl123458*lf)/(kd1+kd2+kd3+kd4+kd5+kd8+kd9)
pl1234678=(pl234678*lf+pl134678*lf+pl124678*lf+pl123678*lf+pl123478*lf+pl123468*lf+pl123467*lf)/(kd1+kd2+kd3+kd4+kd6+kd7+kd8)
pl1234679=(pl234679*lf+pl134679*lf+pl124679*lf+pl123679*lf+pl123479*lf+pl123469*lf+pl123467*lf)/(kd1+kd2+kd3+kd4+kd6+kd7+kd9)
pl1234689=(pl234689*lf+pl134689*lf+pl124689*lf+pl123689*lf+pl123489*lf+pl123469*lf+pl123468*lf)/(kd1+kd2+kd3+kd4+kd6+kd8+kd9)
pl1234789=(pl234789*lf+pl134789*lf+pl124789*lf+pl123789*lf+pl123489*lf+pl123479*lf+pl123478*lf)/(kd1+kd2+kd3+kd4+kd7+kd8+kd9)
pl1235678=(pl235678*lf+pl135678*lf+pl125678*lf+pl123678*lf+pl123578*lf+pl123568*lf+pl123567*lf)/(kd1+kd2+kd3+kd5+kd6+kd7+kd8)
pl1235679=(pl235679*lf+pl135679*lf+pl125679*lf+pl123679*lf+pl123579*lf+pl123569*lf+pl123567*lf)/(kd1+kd2+kd3+kd5+kd6+kd7+kd9)
pl1235689=(pl235689*lf+pl135689*lf+pl125689*lf+pl123689*lf+pl123589*lf+pl123569*lf+pl123568*lf)/(kd1+kd2+kd3+kd5+kd6+kd8+kd9)
pl1235789=(pl235789*lf+pl135789*lf+pl125789*lf+pl123789*lf+pl123589*lf+pl123579*lf+pl123578*lf)/(kd1+kd2+kd3+kd5+kd7+kd8+kd9)
pl1236789=(pl236789*lf+pl136789*lf+pl126789*lf+pl123789*lf+pl123689*lf+pl123679*lf+pl123678*lf)/(kd1+kd2+kd3+kd6+kd7+kd8+kd9)
pl1245678=(pl245678*lf+pl145678*lf+pl125678*lf+pl124678*lf+pl124578*lf+pl124568*lf+pl124567*lf)/(kd1+kd2+kd4+kd5+kd6+kd7+kd8)
pl1245679=(pl245679*lf+pl145679*lf+pl125679*lf+pl124679*lf+pl124579*lf+pl124569*lf+pl124567*lf)/(kd1+kd2+kd4+kd5+kd6+kd7+kd9)
pl1245689=(pl245689*lf+pl145689*lf+pl125689*lf+pl124689*lf+pl124589*lf+pl124569*lf+pl124568*lf)/(kd1+kd2+kd4+kd5+kd6+kd8+kd9)
pl1245789=(pl245789*lf+pl145789*lf+pl125789*lf+pl124789*lf+pl124589*lf+pl124579*lf+pl124578*lf)/(kd1+kd2+kd4+kd5+kd7+kd8+kd9)
pl1246789=(pl246789*lf+pl146789*lf+pl126789*lf+pl124789*lf+pl124689*lf+pl124679*lf+pl124678*lf)/(kd1+kd2+kd4+kd6+kd7+kd8+kd9)
pl1256789=(pl256789*lf+pl156789*lf+pl126789*lf+pl125789*lf+pl125689*lf+pl125679*lf+pl125678*lf)/(kd1+kd2+kd5+kd6+kd7+kd8+kd9)
pl1345678=(pl345678*lf+pl145678*lf+pl135678*lf+pl134678*lf+pl134578*lf+pl134568*lf+pl134567*lf)/(kd1+kd3+kd4+kd5+kd6+kd7+kd8)
pl1345679=(pl345679*lf+pl145679*lf+pl135679*lf+pl134679*lf+pl134579*lf+pl134569*lf+pl134567*lf)/(kd1+kd3+kd4+kd5+kd6+kd7+kd9)
pl1345689=(pl345689*lf+pl145689*lf+pl135689*lf+pl134689*lf+pl134589*lf+pl134569*lf+pl134568*lf)/(kd1+kd3+kd4+kd5+kd6+kd8+kd9)
pl1345789=(pl345789*lf+pl145789*lf+pl135789*lf+pl134789*lf+pl134589*lf+pl134579*lf+pl134578*lf)/(kd1+kd3+kd4+kd5+kd7+kd8+kd9)
pl1346789=(pl346789*lf+pl146789*lf+pl136789*lf+pl134789*lf+pl134689*lf+pl134679*lf+pl134678*lf)/(kd1+kd3+kd4+kd6+kd7+kd8+kd9)
pl1356789=(pl356789*lf+pl156789*lf+pl136789*lf+pl135789*lf+pl135689*lf+pl135679*lf+pl135678*lf)/(kd1+kd3+kd5+kd6+kd7+kd8+kd9)
pl1456789=(pl456789*lf+pl156789*lf+pl146789*lf+pl145789*lf+pl145689*lf+pl145679*lf+pl145678*lf)/(kd1+kd4+kd5+kd6+kd7+kd8+kd9)
pl2345678=(pl345678*lf+pl245678*lf+pl235678*lf+pl234678*lf+pl234578*lf+pl234568*lf+pl234567*lf)/(kd2+kd3+kd4+kd5+kd6+kd7+kd8)
pl2345679=(pl345679*lf+pl245679*lf+pl235679*lf+pl234679*lf+pl234579*lf+pl234569*lf+pl234567*lf)/(kd2+kd3+kd4+kd5+kd6+kd7+kd9)
pl2345689=(pl345689*lf+pl245689*lf+pl235689*lf+pl234689*lf+pl234589*lf+pl234569*lf+pl234568*lf)/(kd2+kd3+kd4+kd5+kd6+kd8+kd9)
pl2345789=(pl345789*lf+pl245789*lf+pl235789*lf+pl234789*lf+pl234589*lf+pl234579*lf+pl234578*lf)/(kd2+kd3+kd4+kd5+kd7+kd8+kd9)
pl2346789=(pl346789*lf+pl246789*lf+pl236789*lf+pl234789*lf+pl234689*lf+pl234679*lf+pl234678*lf)/(kd2+kd3+kd4+kd6+kd7+kd8+kd9)
pl2356789=(pl356789*lf+pl256789*lf+pl236789*lf+pl235789*lf+pl235689*lf+pl235679*lf+pl235678*lf)/(kd2+kd3+kd5+kd6+kd7+kd8+kd9)
pl2456789=(pl456789*lf+pl256789*lf+pl246789*lf+pl245789*lf+pl245689*lf+pl245679*lf+pl245678*lf)/(kd2+kd4+kd5+kd6+kd7+kd8+kd9)
pl3456789=(pl456789*lf+pl356789*lf+pl346789*lf+pl345789*lf+pl345689*lf+pl345679*lf+pl345678*lf)/(kd3+kd4+kd5+kd6+kd7+kd8+kd9)
pl12345678=(pl2345678*lf+pl1345678*lf+pl1245678*lf+pl1235678*lf+pl1234678*lf+pl1234578*lf+pl1234568*lf+pl1234567*lf)/(kd1+kd2+kd3+kd4+kd5+kd6+kd7+kd8)
pl12345679=(pl2345679*lf+pl1345679*lf+pl1245679*lf+pl1235679*lf+pl1234679*lf+pl1234579*lf+pl1234569*lf+pl1234567*lf)/(kd1+kd2+kd3+kd4+kd5+kd6+kd7+kd9)
pl12345689=(pl2345689*lf+pl1345689*lf+pl1245689*lf+pl1235689*lf+pl1234689*lf+pl1234589*lf+pl1234569*lf+pl1234568*lf)/(kd1+kd2+kd3+kd4+kd5+kd6+kd8+kd9)
pl12345789=(pl2345789*lf+pl1345789*lf+pl1245789*lf+pl1235789*lf+pl1234789*lf+pl1234589*lf+pl1234579*lf+pl1234578*lf)/(kd1+kd2+kd3+kd4+kd5+kd7+kd8+kd9)
pl12346789=(pl2346789*lf+pl1346789*lf+pl1246789*lf+pl1236789*lf+pl1234789*lf+pl1234689*lf+pl1234679*lf+pl1234678*lf)/(kd1+kd2+kd3+kd4+kd6+kd7+kd8+kd9)
pl12356789=(pl2356789*lf+pl1356789*lf+pl1256789*lf+pl1236789*lf+pl1235789*lf+pl1235689*lf+pl1235679*lf+pl1235678*lf)/(kd1+kd2+kd3+kd5+kd6+kd7+kd8+kd9)
pl12456789=(pl2456789*lf+pl1456789*lf+pl1256789*lf+pl1246789*lf+pl1245789*lf+pl1245689*lf+pl1245679*lf+pl1245678*lf)/(kd1+kd2+kd4+kd5+kd6+kd7+kd8+kd9)
pl13456789=(pl3456789*lf+pl1456789*lf+pl1356789*lf+pl1346789*lf+pl1345789*lf+pl1345689*lf+pl1345679*lf+pl1345678*lf)/(kd1+kd3+kd4+kd5+kd6+kd7+kd8+kd9)
pl23456789=(pl3456789*lf+pl2456789*lf+pl2356789*lf+pl2346789*lf+pl2345789*lf+pl2345689*lf+pl2345679*lf+pl2345678*lf)/(kd2+kd3+kd4+kd5+kd6+kd7+kd8+kd9)
pl123456789=(pl23456789*lf+pl13456789*lf+pl12456789*lf+pl12356789*lf+pl12346789*lf+pl12345789*lf+pl12345689*lf+pl12345679*lf+pl12345678*lf)/(kd1+kd2+kd3+kd4+kd5+kd6+kd7+kd8+kd9)
return {'pf':pf,'lf':lf, 'pl1':pl1, 'pl2':pl2, 'pl3':pl3, 'pl4':pl4, 'pl5':pl5, 'pl6':pl6, 'pl7':pl7, 'pl8':pl8, 'pl9':pl9, 'pl12':pl12, 'pl13':pl13, 'pl14':pl14, 'pl15':pl15, 'pl16':pl16, 'pl17':pl17, 'pl18':pl18, 'pl19':pl19, 'pl23':pl23, 'pl24':pl24, 'pl25':pl25, 'pl26':pl26, 'pl27':pl27, 'pl28':pl28, 'pl29':pl29, 'pl34':pl34, 'pl35':pl35, 'pl36':pl36, 'pl37':pl37, 'pl38':pl38, 'pl39':pl39, 'pl45':pl45, 'pl46':pl46, 'pl47':pl47, 'pl48':pl48, 'pl49':pl49, 'pl56':pl56, 'pl57':pl57, 'pl58':pl58, 'pl59':pl59, 'pl67':pl67, 'pl68':pl68, 'pl69':pl69, 'pl78':pl78, 'pl79':pl79, 'pl89':pl89, 'pl123':pl123, 'pl124':pl124, 'pl125':pl125, 'pl126':pl126, 'pl127':pl127, 'pl128':pl128, 'pl129':pl129, 'pl134':pl134, 'pl135':pl135, 'pl136':pl136, 'pl137':pl137, 'pl138':pl138, 'pl139':pl139, 'pl145':pl145, 'pl146':pl146, 'pl147':pl147, 'pl148':pl148, 'pl149':pl149, 'pl156':pl156, 'pl157':pl157, 'pl158':pl158, 'pl159':pl159, 'pl167':pl167, 'pl168':pl168, 'pl169':pl169, 'pl178':pl178, 'pl179':pl179, 'pl189':pl189, 'pl234':pl234, 'pl235':pl235, 'pl236':pl236, 'pl237':pl237, 'pl238':pl238, 'pl239':pl239, 'pl245':pl245, 'pl246':pl246, 'pl247':pl247, 'pl248':pl248, 'pl249':pl249, 'pl256':pl256, 'pl257':pl257, 'pl258':pl258, 'pl259':pl259, 'pl267':pl267, 'pl268':pl268, 'pl269':pl269, 'pl278':pl278, 'pl279':pl279, 'pl289':pl289, 'pl345':pl345, 'pl346':pl346, 'pl347':pl347, 'pl348':pl348, 'pl349':pl349, 'pl356':pl356, 'pl357':pl357, 'pl358':pl358, 'pl359':pl359, 'pl367':pl367, 'pl368':pl368, 'pl369':pl369, 'pl378':pl378, 'pl379':pl379, 'pl389':pl389, 'pl456':pl456, 'pl457':pl457, 'pl458':pl458, 'pl459':pl459, 'pl467':pl467, 'pl468':pl468, 'pl469':pl469, 'pl478':pl478, 'pl479':pl479, 'pl489':pl489, 'pl567':pl567, 'pl568':pl568, 'pl569':pl569, 'pl578':pl578, 'pl579':pl579, 'pl589':pl589, 'pl678':pl678, 'pl679':pl679, 'pl689':pl689, 'pl789':pl789, 'pl1234':pl1234, 'pl1235':pl1235, 'pl1236':pl1236, 'pl1237':pl1237, 'pl1238':pl1238, 'pl1239':pl1239, 'pl1245':pl1245, 'pl1246':pl1246, 'pl1247':pl1247, 'pl1248':pl1248, 'pl1249':pl1249, 'pl1256':pl1256, 'pl1257':pl1257, 'pl1258':pl1258, 'pl1259':pl1259, 'pl1267':pl1267, 'pl1268':pl1268, 'pl1269':pl1269, 'pl1278':pl1278, 'pl1279':pl1279, 'pl1289':pl1289, 'pl1345':pl1345, 'pl1346':pl1346, 'pl1347':pl1347, 'pl1348':pl1348, 'pl1349':pl1349, 'pl1356':pl1356, 'pl1357':pl1357, 'pl1358':pl1358, 'pl1359':pl1359, 'pl1367':pl1367, 'pl1368':pl1368, 'pl1369':pl1369, 'pl1378':pl1378, 'pl1379':pl1379, 'pl1389':pl1389, 'pl1456':pl1456, 'pl1457':pl1457, 'pl1458':pl1458, 'pl1459':pl1459, 'pl1467':pl1467, 'pl1468':pl1468, 'pl1469':pl1469, 'pl1478':pl1478, 'pl1479':pl1479, 'pl1489':pl1489, 'pl1567':pl1567, 'pl1568':pl1568, 'pl1569':pl1569, 'pl1578':pl1578, 'pl1579':pl1579, 'pl1589':pl1589, 'pl1678':pl1678, 'pl1679':pl1679, 'pl1689':pl1689, 'pl1789':pl1789, 'pl2345':pl2345, 'pl2346':pl2346, 'pl2347':pl2347, 'pl2348':pl2348, 'pl2349':pl2349, 'pl2356':pl2356, 'pl2357':pl2357, 'pl2358':pl2358, 'pl2359':pl2359, 'pl2367':pl2367, 'pl2368':pl2368, 'pl2369':pl2369, 'pl2378':pl2378, 'pl2379':pl2379, 'pl2389':pl2389, 'pl2456':pl2456, 'pl2457':pl2457, 'pl2458':pl2458, 'pl2459':pl2459, 'pl2467':pl2467, 'pl2468':pl2468, 'pl2469':pl2469, 'pl2478':pl2478, 'pl2479':pl2479, 'pl2489':pl2489, 'pl2567':pl2567, 'pl2568':pl2568, 'pl2569':pl2569, 'pl2578':pl2578, 'pl2579':pl2579, 'pl2589':pl2589, 'pl2678':pl2678, 'pl2679':pl2679, 'pl2689':pl2689, 'pl2789':pl2789, 'pl3456':pl3456, 'pl3457':pl3457, 'pl3458':pl3458, 'pl3459':pl3459, 'pl3467':pl3467, 'pl3468':pl3468, 'pl3469':pl3469, 'pl3478':pl3478, 'pl3479':pl3479, 'pl3489':pl3489, 'pl3567':pl3567, 'pl3568':pl3568, 'pl3569':pl3569, 'pl3578':pl3578, 'pl3579':pl3579, 'pl3589':pl3589, 'pl3678':pl3678, 'pl3679':pl3679, 'pl3689':pl3689, 'pl3789':pl3789, 'pl4567':pl4567, 'pl4568':pl4568, 'pl4569':pl4569, 'pl4578':pl4578, 'pl4579':pl4579, 'pl4589':pl4589, 'pl4678':pl4678, 'pl4679':pl4679, 'pl4689':pl4689, 'pl4789':pl4789, 'pl5678':pl5678, 'pl5679':pl5679, 'pl5689':pl5689, 'pl5789':pl5789, 'pl6789':pl6789, 'pl12345':pl12345, 'pl12346':pl12346, 'pl12347':pl12347, 'pl12348':pl12348, 'pl12349':pl12349, 'pl12356':pl12356, 'pl12357':pl12357, 'pl12358':pl12358, 'pl12359':pl12359, 'pl12367':pl12367, 'pl12368':pl12368, 'pl12369':pl12369, 'pl12378':pl12378, 'pl12379':pl12379, 'pl12389':pl12389, 'pl12456':pl12456, 'pl12457':pl12457, 'pl12458':pl12458, 'pl12459':pl12459, 'pl12467':pl12467, 'pl12468':pl12468, 'pl12469':pl12469, 'pl12478':pl12478, 'pl12479':pl12479, 'pl12489':pl12489, 'pl12567':pl12567, 'pl12568':pl12568, 'pl12569':pl12569, 'pl12578':pl12578, 'pl12579':pl12579, 'pl12589':pl12589, 'pl12678':pl12678, 'pl12679':pl12679, 'pl12689':pl12689, 'pl12789':pl12789, 'pl13456':pl13456, 'pl13457':pl13457, 'pl13458':pl13458, 'pl13459':pl13459, 'pl13467':pl13467, 'pl13468':pl13468, 'pl13469':pl13469, 'pl13478':pl13478, 'pl13479':pl13479, 'pl13489':pl13489, 'pl13567':pl13567, 'pl13568':pl13568, 'pl13569':pl13569, 'pl13578':pl13578, 'pl13579':pl13579, 'pl13589':pl13589, 'pl13678':pl13678, 'pl13679':pl13679, 'pl13689':pl13689, 'pl13789':pl13789, 'pl14567':pl14567, 'pl14568':pl14568, 'pl14569':pl14569, 'pl14578':pl14578, 'pl14579':pl14579, 'pl14589':pl14589, 'pl14678':pl14678, 'pl14679':pl14679, 'pl14689':pl14689, 'pl14789':pl14789, 'pl15678':pl15678, 'pl15679':pl15679, 'pl15689':pl15689, 'pl15789':pl15789, 'pl16789':pl16789, 'pl23456':pl23456, 'pl23457':pl23457, 'pl23458':pl23458, 'pl23459':pl23459, 'pl23467':pl23467, 'pl23468':pl23468, 'pl23469':pl23469, 'pl23478':pl23478, 'pl23479':pl23479, 'pl23489':pl23489, 'pl23567':pl23567, 'pl23568':pl23568, 'pl23569':pl23569, 'pl23578':pl23578, 'pl23579':pl23579, 'pl23589':pl23589, 'pl23678':pl23678, 'pl23679':pl23679, 'pl23689':pl23689, 'pl23789':pl23789, 'pl24567':pl24567, 'pl24568':pl24568, 'pl24569':pl24569, 'pl24578':pl24578, 'pl24579':pl24579, 'pl24589':pl24589, 'pl24678':pl24678, 'pl24679':pl24679, 'pl24689':pl24689, 'pl24789':pl24789, 'pl25678':pl25678, 'pl25679':pl25679, 'pl25689':pl25689, 'pl25789':pl25789, 'pl26789':pl26789, 'pl34567':pl34567, 'pl34568':pl34568, 'pl34569':pl34569, 'pl34578':pl34578, 'pl34579':pl34579, 'pl34589':pl34589, 'pl34678':pl34678, 'pl34679':pl34679, 'pl34689':pl34689, 'pl34789':pl34789, 'pl35678':pl35678, 'pl35679':pl35679, 'pl35689':pl35689, 'pl35789':pl35789, 'pl36789':pl36789, 'pl45678':pl45678, 'pl45679':pl45679, 'pl45689':pl45689, 'pl45789':pl45789, 'pl46789':pl46789, 'pl56789':pl56789, 'pl123456':pl123456, 'pl123457':pl123457, 'pl123458':pl123458, 'pl123459':pl123459, 'pl123467':pl123467, 'pl123468':pl123468, 'pl123469':pl123469, 'pl123478':pl123478, 'pl123479':pl123479, 'pl123489':pl123489, 'pl123567':pl123567, 'pl123568':pl123568, 'pl123569':pl123569, 'pl123578':pl123578, 'pl123579':pl123579, 'pl123589':pl123589, 'pl123678':pl123678, 'pl123679':pl123679, 'pl123689':pl123689, 'pl123789':pl123789, 'pl124567':pl124567, 'pl124568':pl124568, 'pl124569':pl124569, 'pl124578':pl124578, 'pl124579':pl124579, 'pl124589':pl124589, 'pl124678':pl124678, 'pl124679':pl124679, 'pl124689':pl124689, 'pl124789':pl124789, 'pl125678':pl125678, 'pl125679':pl125679, 'pl125689':pl125689, 'pl125789':pl125789, 'pl126789':pl126789, 'pl134567':pl134567, 'pl134568':pl134568, 'pl134569':pl134569, 'pl134578':pl134578, 'pl134579':pl134579, 'pl134589':pl134589, 'pl134678':pl134678, 'pl134679':pl134679, 'pl134689':pl134689, 'pl134789':pl134789, 'pl135678':pl135678, 'pl135679':pl135679, 'pl135689':pl135689, 'pl135789':pl135789, 'pl136789':pl136789, 'pl145678':pl145678, 'pl145679':pl145679, 'pl145689':pl145689, 'pl145789':pl145789, 'pl146789':pl146789, 'pl156789':pl156789, 'pl234567':pl234567, 'pl234568':pl234568, 'pl234569':pl234569, 'pl234578':pl234578, 'pl234579':pl234579, 'pl234589':pl234589, 'pl234678':pl234678, 'pl234679':pl234679, 'pl234689':pl234689, 'pl234789':pl234789, 'pl235678':pl235678, 'pl235679':pl235679, 'pl235689':pl235689, 'pl235789':pl235789, 'pl236789':pl236789, 'pl245678':pl245678, 'pl245679':pl245679, 'pl245689':pl245689, 'pl245789':pl245789, 'pl246789':pl246789, 'pl256789':pl256789, 'pl345678':pl345678, 'pl345679':pl345679, 'pl345689':pl345689, 'pl345789':pl345789, 'pl346789':pl346789, 'pl356789':pl356789, 'pl456789':pl456789, 'pl1234567':pl1234567, 'pl1234568':pl1234568, 'pl1234569':pl1234569, 'pl1234578':pl1234578, 'pl1234579':pl1234579, 'pl1234589':pl1234589, 'pl1234678':pl1234678, 'pl1234679':pl1234679, 'pl1234689':pl1234689, 'pl1234789':pl1234789, 'pl1235678':pl1235678, 'pl1235679':pl1235679, 'pl1235689':pl1235689, 'pl1235789':pl1235789, 'pl1236789':pl1236789, 'pl1245678':pl1245678, 'pl1245679':pl1245679, 'pl1245689':pl1245689, 'pl1245789':pl1245789, 'pl1246789':pl1246789, 'pl1256789':pl1256789, 'pl1345678':pl1345678, 'pl1345679':pl1345679, 'pl1345689':pl1345689, 'pl1345789':pl1345789, 'pl1346789':pl1346789, 'pl1356789':pl1356789, 'pl1456789':pl1456789, 'pl2345678':pl2345678, 'pl2345679':pl2345679, 'pl2345689':pl2345689, 'pl2345789':pl2345789, 'pl2346789':pl2346789, 'pl2356789':pl2356789, 'pl2456789':pl2456789, 'pl3456789':pl3456789, 'pl12345678':pl12345678, 'pl12345679':pl12345679, 'pl12345689':pl12345689, 'pl12345789':pl12345789, 'pl12346789':pl12346789, 'pl12356789':pl12356789, 'pl12456789':pl12456789, 'pl13456789':pl13456789, 'pl23456789':pl23456789, 'pl123456789':pl123456789} |
#!/usr/bin/env python
#
# Copyright 2012 cloudysunny14.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.ext import ndb
class CrawlDbDatum(ndb.Model):
"""Holds accumulated state of crawl execution.
CrawlDbDatum is stored in datastore.
This entity holds status of seed urls.
last_status property is using as criteria for fetch target.
Entity's ancestor key is setting target url.
Properties:
url: the url for fetch
last_fetched: last time of fetch
last_updated: last time of update
last_status: the status of last fetch
"""
#reason of indexed=False is saving the datastore write operation.
url = ndb.StringProperty()
extract_domain_url = ndb.StringProperty()
last_fetched = ndb.DateTimeProperty(verbose_name=None,
indexed=False)
last_updated = ndb.DateTimeProperty(verbose_name=None,
auto_now=True,
auto_now_add=True,
indexed=False)
last_status = ndb.IntegerProperty()
@classmethod
def kind(cls):
return "CrawlDbDatum"
@classmethod
@ndb.transactional
def insert_or_fail(cls, id_or_keyname, **kwds):
entity = cls.get_by_id(id=id_or_keyname, parent=kwds.get('parent'))
if entity is None:
entity = cls(id=id_or_keyname, **kwds)
entity.put()
return entity
return None
class FetchedDbDatum(ndb.Model):
"""Hold the fetched result.
FetchedDbDatum is stored in datastore,
whitch entity is fetched content of target page,
that could store text content only.
Also holds other status of fetch.
Properties:
url: base url.
fetched_url: the fetched url.
fetch_time: the time of fetch.
fetched_content: the html content of page.
content_type: the content type.
content_size: the content size.
response_rate: the response rate.
http_headers: the responsed HTTP header.
"""
url = ndb.StringProperty(indexed=False)
fetched_url = ndb.StringProperty(indexed=False)
fetch_time = ndb.FloatProperty(indexed=False)
fetched_content = ndb.TextProperty(indexed=False)
content_type = ndb.StringProperty(indexed=False)
content_size = ndb.IntegerProperty(indexed=False)
response_rate = ndb.IntegerProperty(indexed=False)
http_headers = ndb.TextProperty(indexed=False)
@classmethod
def kind(cls):
return "FetchedDbDatum"
class ContentDbDatum(ndb.Model):
"""
Hold the links of page.
LinkDbDatum is stored in datastore,
This entity's ancestor key is setting target page's url.
Fetched contents are Storing to blobstore.
Properties:
fetched_url: the url of fetched.
stored_url: the url content is stored.
content_type: content type.
content_size: the size of content.
http_headers: the http headers.
"""
fetched_url = ndb.StringProperty(indexed=False)
stored_url = ndb.StringProperty(indexed=False)
content_type = ndb.StringProperty(indexed=False)
content_size = ndb.IntegerProperty(indexed=False)
http_headers = ndb.TextProperty(indexed=False)
@classmethod
def kind(cls):
return "ContentDbDatum"
class LinkDbDatum(ndb.Model):
"""This model is sample. Hold the links of page.
LinkDbDatum is stored in datastore,
which entity is extracted links from page.
Note:this entity is storing in your definition function.
Properties:
link_url: the url of link.
"""
link_url = ndb.StringProperty(indexed=False)
@classmethod
def kind(cls):
return "LinkDbDatum"
|
#!/bin/python
#
import os
import time
import sys
import random
import math
import shutil
#appion
from appionlib import appionScript
from appionlib import apDisplay
from appionlib import apFile
from appionlib import apTemplate
from appionlib import apStack
from appionlib import apEMAN
from appionlib import apProject
from appionlib.apSpider import alignment
from appionlib import appiondata
#=====================
#=====================
class NoRefAlignScript(appionScript.AppionScript):
#=====================
def setupParserOptions(self):
self.initmethods = ('allaverage', 'selectrand', 'randpart', 'template')
self.parser.set_usage("Usage: %prog --stack=ID [ --num-part=# ]")
self.parser.add_option("-N", "--num-part", dest="numpart", type="int", default=3000,
help="Number of particles to use", metavar="#")
self.parser.add_option("-s", "--stack", dest="stackid", type="int",
help="Stack database id", metavar="ID#")
### radii
self.parser.add_option("-f", "--first-ring", dest="firstring", type="int", default=2,
help="First ring radius for correlation (in pixels)", metavar="#")
self.parser.add_option("-l", "--last-ring", dest="lastring", type="int",
help="Last ring radius for correlation (in pixels)", metavar="#")
self.parser.add_option("-r", "--rad", "--part-rad", dest="partrad", type="float",
help="Expected radius of particle for alignment (in Angstroms)", metavar="#")
self.parser.add_option("--hp", "--highpass", dest="highpass", type="int",
help="High pass filter radius (in Angstroms)", metavar="#")
self.parser.add_option("--lp", "--lowpass", dest="lowpass", type="int",
help="Low pass filter radius (in Angstroms)", metavar="#")
self.parser.add_option("--bin", dest="bin", type="int", default=1,
help="Bin images by factor", metavar="#")
self.parser.add_option("--init-method", dest="initmethod", default="allaverage",
help="Initialization method: "+str(self.initmethods), metavar="#")
self.parser.add_option("--templateid", dest="templateid", type="int",
help="Template Id for template init method", metavar="#")
#=====================
def checkConflicts(self):
if self.params['stackid'] is None:
apDisplay.printError("stack id was not defined")
if self.params['description'] is None:
apDisplay.printError("run description was not defined")
if self.params['runname'] is None:
apDisplay.printError("run name was not defined")
maxparticles = 150000
if self.params['numpart'] > maxparticles:
apDisplay.printError("too many particles requested, max: " + str(maxparticles) + " requested: " + str(self.params['numpart']))
if self.params['initmethod'] not in self.initmethods:
apDisplay.printError("unknown initialization method defined: "
+str(self.params['initmethod'])+" not in "+str(self.initmethods))
stackdata = apStack.getOnlyStackData(self.params['stackid'], msg=False)
stackfile = os.path.join(stackdata['path']['path'], stackdata['name'])
if self.params['numpart'] > apFile.numImagesInStack(stackfile):
apDisplay.printError("trying to use more particles "+str(self.params['numpart'])
+" than available "+str(apFile.numImagesInStack(stackfile)))
#=====================
def setRunDir(self):
self.stackdata = apStack.getOnlyStackData(self.params['stackid'], msg=False)
path = self.stackdata['path']['path']
uppath = os.path.abspath(os.path.join(path, "../.."))
self.params['rundir'] = os.path.join(uppath, "align", self.params['runname'])
#=====================
def checkNoRefRun(self):
### setup alignment run
alignrunq = appiondata.ApAlignRunData()
alignrunq['runname'] = self.params['runname']
alignrunq['path'] = appiondata.ApPathData(path=os.path.abspath(self.params['rundir']))
uniquerun = alignrunq.query(results=1)
if uniquerun:
apDisplay.printError("Run name '"+self.params['runname']+"' and path already exist in database")
#=====================
def insertNoRefRun(self, imagicstack, insert=False):
### setup alignment run
alignrunq = appiondata.ApAlignRunData()
alignrunq['runname'] = self.params['runname']
alignrunq['path'] = appiondata.ApPathData(path=os.path.abspath(self.params['rundir']))
uniquerun = alignrunq.query(results=1)
if uniquerun:
apDisplay.printError("Run name '"+self.params['runname']+"' and path already exist in database")
# create a norefParam object
norefq = appiondata.ApSpiderNoRefRunData()
norefq['runname'] = self.params['runname']
norefq['particle_diam'] = 2.0*self.params['partrad']
norefq['first_ring'] = self.params['firstring']
norefq['last_ring'] = self.params['lastring']
norefq['init_method'] = self.params['initmethod']
norefq['run_seconds'] = self.runtime
### finish alignment run
alignrunq['norefrun'] = norefq
alignrunq['hidden'] = False
alignrunq['bin'] = self.params['bin']
alignrunq['hp_filt'] = self.params['highpass']
alignrunq['lp_filt'] = self.params['lowpass']
alignrunq['description'] = self.params['description']
# STOP HERE
### setup alignment stack
alignstackq = appiondata.ApAlignStackData()
alignstackq['alignrun'] = alignrunq
alignstackq['imagicfile'] = os.path.basename(imagicstack)
alignstackq['avgmrcfile'] = "average.mrc"
alignstackq['alignrun'] = alignrunq
alignstackq['iteration'] = 0
alignstackq['path'] = appiondata.ApPathData(path=os.path.abspath(self.params['rundir']))
### check to make sure files exist
imagicfile = os.path.join(self.params['rundir'], alignstackq['imagicfile'])
if not os.path.isfile(imagicfile):
apDisplay.printError("could not find stack file: "+imagicfile)
avgmrcfile = os.path.join(self.params['rundir'], alignstackq['avgmrcfile'])
if not os.path.isfile(avgmrcfile):
apDisplay.printError("could not find average file: "+avgmrcfile)
alignstackq['stack'] = self.stack['data']
alignstackq['boxsize'] = math.floor(self.stack['boxsize']/self.params['bin'])
alignstackq['pixelsize'] = self.stack['apix']*self.params['bin']
alignstackq['description'] = self.params['description']
alignstackq['hidden'] = False
alignstackq['num_particles'] = self.params['numpart']
if insert is True:
alignstackq.insert()
### create reference
refq = appiondata.ApAlignReferenceData()
refq['refnum'] = 1
refq['iteration'] = 0
refq['mrcfile'] = "template.mrc"
#refpath = os.path.abspath(os.path.join(self.params['rundir'], "alignment"))
refq['path'] = appiondata.ApPathData(path=os.path.abspath(self.params['rundir']))
refq['alignrun'] = alignrunq
### insert particle data
apDisplay.printColor("Inserting particle alignment data, please wait", "cyan")
for partdict in self.partlist:
### see apSpider.alignment.alignStack() for more info
"""
partdict.keys()
'num': int(data[0]), #SPIDER NUMBERING: 1,2,3,...
'template': int(abs(templatenum)), #SPIDER NUMBERING: 1,2,3,...
'mirror': checkMirror(templatenum),
'score': float(data[3]),
'rot': float(data[4]),
'xshift': float(data[5]),
'yshift': float(data[6]),
"""
alignpartq = appiondata.ApAlignParticleData()
alignpartq['ref'] = refq
alignpartq['partnum'] = partdict['num']
alignpartq['alignstack'] = alignstackq
stackpartdata = apStack.getStackParticle(self.params['stackid'], partdict['num'])
alignpartq['stackpart'] = stackpartdata
alignpartq['xshift'] = partdict['xshift']
alignpartq['yshift'] = partdict['yshift']
alignpartq['rotation'] = partdict['rot']
#alignpartq['score'] = partdict['score']
if insert is True:
alignpartq.insert()
return
#=====================
def createSpiderFile(self):
"""
takes the stack file and creates a spider file ready for processing
"""
emancmd = "proc2d "
if not os.path.isfile(self.stack['file']):
apDisplay.printError("stackfile does not exist: "+self.stack['file'])
emancmd += self.stack['file']+" "
spiderstack = os.path.join(self.params['rundir'], "start.spi")
apFile.removeFile(spiderstack, warn=True)
emancmd += spiderstack+" "
emancmd += "apix="+str(self.stack['apix'])+" "
if self.params['lowpass'] > 0:
emancmd += "lp="+str(self.params['lowpass'])+" "
emancmd += "last="+str(self.params['numpart']-1)+" "
emancmd += "shrink="+str(self.params['bin'])+" "
clipsize = int(math.floor(self.stack['boxsize']/self.params['bin']/2.0)*self.params['bin']*2)
emancmd += "clip="+str(clipsize)+","+str(clipsize)+" "
emancmd += "spiderswap edgenorm"
starttime = time.time()
apDisplay.printColor("Running spider stack conversion this can take a while", "cyan")
apEMAN.executeEmanCmd(emancmd, verbose=True)
apDisplay.printColor("finished eman in "+apDisplay.timeString(time.time()-starttime), "cyan")
return spiderstack
#=====================
def convertSpiderStack(self, spiderstack):
"""
takes the stack file and creates a spider file ready for processing
"""
emancmd = "proc2d "
if not os.path.isfile(spiderstack):
apDisplay.printError("stackfile does not exist: "+spiderstack)
emancmd += spiderstack+" "
imagicstack = os.path.join(self.params['rundir'], "alignstack.hed")
apFile.removeFile(imagicstack, warn=True)
emancmd += imagicstack+" "
starttime = time.time()
apDisplay.printColor("Running spider stack conversion this can take a while", "cyan")
apEMAN.executeEmanCmd(emancmd, verbose=True)
apDisplay.printColor("finished eman in "+apDisplay.timeString(time.time()-starttime), "cyan")
numpart = apFile.numImagesInStack(imagicstack)
if numpart != self.params['numpart']:
apDisplay.printError("The number of particles returned is different from the number requested")
return imagicstack
#=====================
def averageTemplate(self):
"""
takes the spider file and creates an average template of all particles
"""
emancmd = "proc2d "+self.stack['file']+" template.mrc average edgenorm"
apEMAN.executeEmanCmd(emancmd)
templatefile = self.processTemplate("template.mrc")
return templatefile
#=====================
def selectRandomParticles(self):
"""
takes the spider file and creates an average template of all particles
"""
### create random keep list
numrandpart = int(self.params['numpart']/100)+2
apDisplay.printMsg("Selecting 1% of particles ("+str(numrandpart)+") to average")
# create random list
keepdict = {}
randlist = []
for i in range(numrandpart):
rand = int(random.random()*self.params['numpart'])
while rand in keepdict:
rand = int(random.random()*self.params['numpart'])
keepdict[rand] = 1
randlist.append(rand)
# sort and write to file
randlist.sort()
f = open("randkeep.lst", "w")
for rand in randlist:
f.write(str(rand)+"\n")
f.close()
emancmd = "proc2d "+self.stack['file']+" template.mrc list=randkeep.lst average edgenorm"
apEMAN.executeEmanCmd(emancmd)
templatefile = self.processTemplate("template.mrc")
return templatefile
#=====================
def pickRandomParticle(self):
"""
takes the spider file and creates an average template of all particles
"""
### create random keep list
f = open("randkeep.lst", "w")
keepdict = {}
randpart = int(random.random()*self.params['numpart'])
apDisplay.printMsg("Selecting random particle ("+str(randpart)+") to average")
emancmd = ( "proc2d "+self.stack['file']+" template.mrc first="
+str(randpart)+" last="+str(randpart)+" edgenorm" )
apEMAN.executeEmanCmd(emancmd)
templatefile = self.processTemplate("template.mrc")
return templatefile
#=====================
def getTemplate(self):
"""
takes the spider file and creates an average template of all particles
"""
### create random keep list
templatedata = apTemplate.getTemplateFromId(self.params['templateid'])
templatepath = os.path.join(templatedata['path']['path'], templatedata['templatename'])
if not os.path.isfile(templatepath):
apDisplay.printError("Could not find template: "+templatepath)
newpath = os.path.join(self.params['rundir'], "template.mrc")
shutil.copy(templatepath, newpath)
### needs to scale template by old apix to new apix
templatefile = self.processTemplate("template.mrc")
return templatefile
#=====================
def processTemplate(self, mrcfile):
### shrink
apDisplay.printMsg("Binning template by a factor of "+str(self.params['bin']))
clipsize = int(math.floor(self.stack['boxsize']/self.params['bin'])*self.params['bin'])
emancmd = ( "proc2d "+mrcfile+" "+mrcfile+" shrink="
+str(self.params['bin'])+" spiderswap " )
emancmd += "clip="+str(clipsize)+","+str(clipsize)+" "
apEMAN.executeEmanCmd(emancmd)
### normalize and center
#apDisplay.printMsg("Normalize and centering template")
#emancmd = "proc2d "+mrcfile+" "+mrcfile+" edgenorm center"
#apEMAN.executeEmanCmd(emancmd)
### convert to SPIDER
apDisplay.printMsg("Converting template to SPIDER")
templatefile = "template.spi"
if os.path.isfile(templatefile):
apFile.removeFile(templatefile, warn=True)
emancmd = "proc2d template.mrc "+templatefile+" spiderswap "
apEMAN.executeEmanCmd(emancmd)
return templatefile
#=====================
def start(self):
self.runtime = 0
self.partlist = []
self.stack = {}
self.stack['data'] = apStack.getOnlyStackData(self.params['stackid'])
self.stack['apix'] = apStack.getStackPixelSizeFromStackId(self.params['stackid'])
self.stack['part'] = apStack.getOneParticleFromStackId(self.params['stackid'])
self.stack['boxsize'] = apStack.getStackBoxsize(self.params['stackid'])
self.stack['file'] = os.path.join(self.stack['data']['path']['path'], self.stack['data']['name'])
self.checkNoRefRun()
### convert stack to spider
spiderstack = self.createSpiderFile()
### create initialization template
if self.params['initmethod'] == 'allaverage':
templatefile = self.averageTemplate()
elif self.params['initmethod'] == 'selectrand':
templatefile = self.selectRandomParticles()
elif self.params['initmethod'] == 'randpart':
templatefile = self.pickRandomParticle()
elif self.params['initmethod'] == 'template':
templatefile = self.getTemplate()
else:
apDisplay.printError("unknown initialization method defined: "
+str(self.params['initmethod'])+" not in "+str(self.initmethods))
apDisplay.printColor("Running spider this can take awhile","cyan")
### run the alignment
aligntime = time.time()
pixrad = int(round(self.params['partrad']/self.stack['apix']/self.params['bin']))
alignedstack, self.partlist = alignment.refFreeAlignParticles(
spiderstack, templatefile,
self.params['numpart'], pixrad,
self.params['firstring'], self.params['lastring'],
rundir = ".")
aligntime = time.time() - aligntime
apDisplay.printMsg("Alignment time: "+apDisplay.timeString(aligntime))
### remove large, worthless stack
spiderstack = os.path.join(self.params['rundir'], "start.spi")
apDisplay.printMsg("Removing un-aligned stack: "+spiderstack)
apFile.removeFile(spiderstack, warn=False)
### convert stack to imagic
imagicstack = self.convertSpiderStack(alignedstack)
apFile.removeFile(alignedstack)
inserttime = time.time()
if self.params['commit'] is True:
self.runtime = aligntime
self.insertNoRefRun(imagicstack, insert=True)
else:
apDisplay.printWarning("not committing results to DB")
inserttime = time.time() - inserttime
apDisplay.printMsg("Alignment time: "+apDisplay.timeString(aligntime))
apDisplay.printMsg("Database Insertion time: "+apDisplay.timeString(inserttime))
#=====================
if __name__ == "__main__":
noRefAlign = NoRefAlignScript()
noRefAlign.start()
noRefAlign.close()
|
import webview
import time
"""
This example demonstrates how a webview window is created and URL is changed
after 10 seconds.
"""
def change_url(window):
# wait a few seconds before changing url:
time.sleep(10)
# change url:
window.load_url('https://pywebview.flowrl.com/hello')
if __name__ == '__main__':
window = webview.create_window('URL Change Example', 'http://www.google.com')
webview.start(change_url, window)
|
# COLLISION AVOIDANCE MECHANISM
import numpy as np
import matplotlib.pyplot as plt
import logging
from arm import Arm
from velocity_control import linear_interpolation, find_intersection, update_velocity, adjust_arm_velocity
from arm import Arm
logger = logging.getLogger(__name__)
logging.basicConfig()
logger.setLevel(logging.INFO)
INIT_VEL = 0.16 # controls speed of paths
INC_VEL = 0.16
COLLISION_RANGE = 4
def main():
animate = False
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlabel('X, [m]')
ax.set_ylabel('Y, [m]')
ax.set_zlabel('Z, [m]')
ax.set_xlim([0,12])
ax.set_ylim([0,12])
ax.set_zlim([0,12])
l1 = np.array([[0.0,0.0,0.0], [6.5,7.0,7.0], [9.0,1.0,2.0]])
l2 = np.array([[2.0,0.0,3.0], [4.0,6.0,6.0], [5.0,4.0,4.0], [10.0,6.0,6.0]])
# l1 = np.array([[0.0,0.0,0.0], [10.0,8.0,8.0]])
# l2 = np.array([[2.0,0.0,3.0], [4.0,6.0,6.0], [10.0,2.0,2.0]])
# initialize arms
# start at 1 pts of each lines, end at last pt of each line
arm1 = Arm(name="PSM1", position=l1[0], destination=l1[l1.shape[0]-1], velocity=INIT_VEL, home=l1[0])
arm2 = Arm(name="PSM2", position=l2[0], destination=l2[l2.shape[0]-1], velocity=INIT_VEL, home=l2[0])
plt.plot(l1[:,0], l1[:,1], l1[:,2], 'o', color='orange')
plt.plot(l2[:,0], l2[:,1], l2[:,2], 'o', color='blue')
# initialize paths
path1 = linear_interpolation(l1, INIT_VEL)
path2 = linear_interpolation(l2, INIT_VEL)
for i in range(l1.shape[0]-1):
ax.plot([l1[i,0], l1[i+1,0]], [l1[i,1], l1[i+1,1]], [l1[i,2], l1[i+1,2]], color = 'orange', linewidth=1, zorder=15)
for i in range(l2.shape[0]-1):
ax.plot([l2[i,0], l2[i+1,0]], [l2[i,1], l2[i+1,1]], [l2[i,2], l2[i+1,2]], color = 'blue', linewidth=1, zorder=15)
arm1.set_position(path1[0]) # start at pt. 0 of path1
arm2.set_position(path2[0]) # start at pt. 60 of path2
check_collision = True
arm1_collision, arm2_collision = np.empty(3), np.empty(3)
idx1 = 0
idx2 = 0
i = 0
while(path1.shape[0] != 0 or path2.shape[0] != 0):
# idx1 = i
# idx2 = i
if check_collision:
print("Checking collisions...")
# check whether any pts in paths are within threshold
# get start index for path change (current arm pos)
intersect_pts1, intersect_pts2 = find_intersection(path1, path2)
if intersect_pts1.size > 0 and intersect_pts2.size > 0:
# now that we have the intersection zones in both paths, adjust speed and animate
# update current path ONLY to first OR last collision point, keep initial path post collision pt
print("COLLISION DETECTED")
arm1_collision = intersect_pts1[0]
arm2_collision = intersect_pts2[0]
print(arm1_collision, arm2_collision)
path1_col_idx = np.where(path1 == arm1_collision)[0][0]
path2_col_idx = np.where(path2 == arm2_collision)[0][0]
# plot collision points
for col_pt in intersect_pts1:
ax.plot(col_pt[0], col_pt[1], col_pt[2], 'o', color='cyan')
for col_pt in intersect_pts2:
ax.plot(col_pt[0], col_pt[1], col_pt[2], 'o', color='cyan')
# choose whether to speed up arm nearest or furthest to goal
# update paths such that speed is inc/dec until collision point only
path1, path2 = update_velocity(p_fast=path1, p_slow=path2, vel=INC_VEL, idx_fast=path1_col_idx, idx_slow=path2_col_idx)
logger.info("INCREASED {} VELOCITY, DECREASED {} VELOCITY".format(arm1.get_name(), arm2.get_name()))
else:
print("NO COLLISION DETECTED")
check_collision = False
else:
if (path1[0] == arm1_collision).all() or (path2[0] == arm2_collision).all():
print("RESETTING VELOCITY")
plt.plot(path1[0,0], path1[0,1], path1[0,2], 'o', color='yellow', markersize=5)
path1, path2 = update_velocity(p_fast=path1, p_slow=path2, vel=INIT_VEL)
arm1_collision, arm2_collision = np.empty(3), np.empty(3)
check_collision = True
# print("PLOTTING {}, {}".format(path1[0], path2[0]))
if idx1 < path1.shape[0]:
plt.plot(path1[0,0], path1[0,1], path1[0,2], 'o', color='red', markersize=1)
arm1.set_position(path1[0]) # start at pt. 0 of path1
path1 = np.delete(path1, 0, axis=0)
if idx2 < path2.shape[0]:
plt.plot(path2[0,0], path2[0,1], path2[0,2], 'o', color='red', markersize=1)
arm2.set_position(path2[0]) # start at pt. 60 of path2
path2 = np.delete(path2, 0, axis=0)
# i += 1
plt.pause(0.0005)
# run_path(new_path1, new_path2, arm1, arm2)
logger.info("INTERSECTIONS: {}".format(intersect_pts1))
plt.show()
def euclidean_distance(point1, point2):
distance = np.linalg.norm(point1-point2)
return distance
def avoid_collision(intersect_pts1, intersect_pts2, path1, path2, arm1, arm2):
# get start index for path change (current arm pos)
idx1 = np.where(path1 == arm1.get_position())[0][0] # get start index
idx2 = np.where(path2 == arm2.get_position())[0][0] # get start index
# if collision detected, adjust path velocities
if (intersect_pts1.shape[0] != 0) and (intersect_pts2.shape[0] != 0):
logger.info("COLLISION DETECTED!")
logger.debug("INTERSECTIONS: {}, SHAPE: {}".format(intersect_pts1, intersect_pts1.shape[0]))
# temp pre-set velocities:
# NOTE: start point of new paths is arm current location!! need to iter from 0 when plotting
new_path1, new_path2 = adjust_arm_velocity(path1, path2, vel1=0.07, vel=0.08)
# set new path and last collision point
logger.info("UPDATED VELOCITY FOR COLLISION AVOIDANCE")
print("Arm1: {}, Arm2: {}".format(0.07, 0.09))
else:
# reset paths velocities if no more intersections
logger.info("NO COLLISION DETECTED!")
new_path1, new_path2 = adjust_arm_velocity(path, path2, vel=INIT_VEL)
print("Arm1: {}, Arm2: {}".format(INIT_VEL, INIT_VEL))
# arm1_sm.set_path(new_path1, np.empty(3))
# arm2_sm.set_path(new_path2, np.empty(3))
return new_path1, new_path2
def run_path(path1, path2, arm1, arm2):
# check whether any pts in paths are within threshold
# idx1 = np.where(path1 == arm1.get_position())[0][0] # get start index
# idx2 = np.where(path2 == arm2.get_position())[0][0] # get start index
idx1 = 0
idx2 = 0
# path_range = min(path1[idx1:,:].shape[0], path2[idx2:,:].shape[0]) # get minimum of both remaining paths
# print("PATH RANGES: {}, {}".format(path1[idx1:,:].shape[0], path2[idx2:,:].shape[0]))
intersect1 = []
intersect2 = []
i = 0
while(idx1 != path1[idx1:,:].shape[0] or idx2 != path2[idx2:,:].shape[0]):
# idx1 = np.where(path1 == arm1.get_position())[0][0] + i
# idx2 = np.where(path2 == arm2.get_position())[0][0] + i
idx1 = i
idx2 = i
if idx1 < path1.shape[0]:
plt.plot(path1[idx1,0], path1[idx1,1], path1[idx1,2], 'o', color='red', markersize=1)
if idx2 < path2.shape[0]:
plt.plot(path2[idx2,0], path2[idx2,1], path2[idx2,2], 'o', color='red', markersize=1)
i += 1
plt.pause(0.0005)
plt.show()
def show_fig():
animate = False
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlabel('X, [m]')
ax.set_ylabel('Y, [m]')
ax.set_zlabel('Z, [m]')
ax.set_xlim([0,12])
ax.set_ylim([0,12])
ax.set_zlim([0,12])
# l1 = np.array([[0.0,0.0,0.0], [6.5,7.0,7.0], [9.0,1.0,2.0]])
# l2 = np.array([[2.0,0.0,3.0], [4.0,6.0,6.0], [5.0,4.0,4.0], [10.0,6.0,6.0]])
# # initialize arms
# # start at 1 pts of each lines, end at last pt of each line
# plt.plot(l1[:,0], l1[:,1], l1[:,2], 'o', color='orange')
# plt.plot(l2[:,0], l2[:,1], l2[:,2], 'o', color='blue')
# for i in range(l1.shape[0]-1):
# ax.plot([l1[i,0], l1[i+1,0]], [l1[i,1], l1[i+1,1]], [l1[i,2], l1[i+1,2]], color = 'orange', linewidth=1, zorder=15)
# for i in range(l2.shape[0]-1):
# ax.plot([l2[i,0], l2[i+1,0]], [l2[i,1], l2[i+1,1]], [l2[i,2], l2[i+1,2]], color = 'blue', linewidth=1, zorder=15)
plt.show()
if __name__ == '__main__':
main() |
"""Platform for sensor integration."""
from datetime import timedelta
import logging
import voluptuous as vol
from oru import Meter
from oru import MeterError
from homeassistant.components.sensor import PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
from homeassistant.const import ENERGY_KILO_WATT_HOUR
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_METER_NUMBER = "meter_number"
SCAN_INTERVAL = timedelta(minutes=15)
SENSOR_NAME = "ORU Current Energy Usage"
SENSOR_ICON = "mdi:counter"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_METER_NUMBER): cv.string})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensor platform."""
meter_number = config[CONF_METER_NUMBER]
try:
meter = Meter(meter_number)
except MeterError:
_LOGGER.error("Unable to create Oru meter")
return
add_entities([CurrentEnergyUsageSensor(meter)], True)
_LOGGER.debug("Oru meter_number = %s", meter_number)
class CurrentEnergyUsageSensor(Entity):
"""Representation of the sensor."""
def __init__(self, meter):
"""Initialize the sensor."""
self._state = None
self._available = None
self.meter = meter
@property
def unique_id(self):
"""Return a unique, HASS-friendly identifier for this entity."""
return self.meter.meter_id
@property
def name(self):
"""Return the name of the sensor."""
return SENSOR_NAME
@property
def icon(self):
"""Return the icon of the sensor."""
return SENSOR_ICON
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return ENERGY_KILO_WATT_HOUR
def update(self):
"""Fetch new state data for the sensor."""
try:
last_read = self.meter.last_read()
self._state = last_read
self._available = True
_LOGGER.debug(
"%s = %s %s", self.name, self._state, self.unit_of_measurement
)
except MeterError as err:
self._available = False
_LOGGER.error("Unexpected oru meter error: %s", err)
|
from django.db import models
from django.contrib.auth.models import User
from datetime import datetime, timedelta, timezone
from tnnt import settings
from tnnt import dumplog_utils
# If adding any more models to this file, be sure to add a deletion for them in
# wipe_db.py.
class Trophy(models.Model):
# The "perma-trophy" structure. Loaded from config.
name = models.CharField(max_length=64, unique=True)
description = models.CharField(max_length=128)
class Conduct(models.Model):
# The "perma-conduct" structure. Loaded from config.
name = models.CharField(max_length=64, unique=True)
shortname = models.CharField(max_length=4, unique=True)
# the xlog field name this achievement is encoded with
# "conduct" in most cases but blind and nudist use "achieve"...
xlogfield = models.CharField(max_length=16)
# xlog bit position this conduct occupies in the "conduct" xlog field
# (assuming "1 << bit")
bit = models.IntegerField()
class Meta:
# no two conducts should have the same xlogfield and bit
unique_together = ('xlogfield', 'bit')
class Achievement(models.Model):
# The "perma-achievement" structure. Loaded from config.
name = models.CharField(max_length=128)
description = models.CharField(max_length=128)
# post 2021 TODO: possibly an int primary key or string id for this, so it
# can be shown with the achievement and matches the one shown in-game
# the xlog field name this achievement is encoded with
# "achieve", "tnntachieveX", etc
xlogfield = models.CharField(max_length=16)
# xlog bit position this conduct occupies (assuming "1 << bit")
bit = models.IntegerField()
class Meta:
# no two achievements should have the same xlogfield and bit
unique_together = ('xlogfield', 'bit')
class LeaderboardBaseFields(models.Model):
# Abstract base class that provides leaderboard-related fields that both
# Player and Clan use. Does not have a table in the database.
class Meta:
abstract = True
# Most/all of these are set in the aggregation step, after xlog data has
# been read into the database but before it's ready for consumption.
longest_streak = models.IntegerField(default=0)
unique_deaths = models.IntegerField(default=0)
unique_ascs = models.IntegerField(default=0)
unique_achievements = models.IntegerField(default=0)
games_over_1000_turns = models.IntegerField(default=0)
games_scummed = models.IntegerField(default=0)
total_games = models.IntegerField(default=0)
wins = models.IntegerField(default=0)
lowest_turncount_asc = models.ForeignKey('Game', null=True, on_delete=models.SET_NULL, related_name='+')
fastest_realtime_asc = models.ForeignKey('Game', null=True, on_delete=models.SET_NULL, related_name='+')
max_conducts_asc = models.ForeignKey('Game', null=True, on_delete=models.SET_NULL, related_name='+')
max_achieves_game = models.ForeignKey('Game', null=True, on_delete=models.SET_NULL, related_name='+')
min_score_asc = models.ForeignKey('Game', null=True, on_delete=models.SET_NULL, related_name='+')
# post 2021 TODO: Can this possibly be made into a "max score game" rather
# than an ascension field? At the upper extremes of points, whether or not
# you ascended is rather irrelevant.
max_score_asc = models.ForeignKey('Game', null=True, on_delete=models.SET_NULL, related_name='+')
first_asc = models.ForeignKey('Game', null=True, on_delete=models.SET_NULL, related_name='+')
# Return a string denoting the ascension ratio of this player or clan.
def ratio(self):
if self.total_games < 1:
return "N/A"
return '{:.2f}%'.format(self.wins * 100 / self.total_games)
class Clan(LeaderboardBaseFields):
name = models.CharField(max_length=128, unique=True)
# clan admin can configure message
# post 2021 TODO: this isn't actually used yet
message = models.CharField(max_length=512, default='')
# perhaps trophies could go in LeaderboardBaseFields but it's not actually a
# leaderboard field so keeping it conceptually separate makes sense for now
trophies = models.ManyToManyField(Trophy)
# FUTURE TODO: perhaps there should be:
# admins = models.ManyToManyField(Player)
# instead of Player having a clan_admin field
class Streak:
# This is NOT a database model!
# It is a simple storage class for streak information that can be used in
# aggregation and relayed to the frontend.
def __init__(self, singlegame):
self.games = [ singlegame ] # list of Games in the streak
self.continuable = True # whether it can be continued
class Player(LeaderboardBaseFields):
name = models.CharField(max_length=32, unique=True)
clan = models.ForeignKey(Clan, null=True, on_delete=models.SET_NULL)
trophies = models.ManyToManyField(Trophy)
clan_admin = models.BooleanField(default=False)
invites = models.ManyToManyField(Clan, related_name='invitees')
# link to User model for web logins
user = models.OneToOneField(User, on_delete=models.PROTECT, null=True)
# Compute this player's streaks, and return them as a list of Streaks
# containing the games in the streak and whether they can be continued.
def get_streaks(self):
# Due to multiple servers, start and end times can overlap. The
# candidate game for continuing a streak is the first one started after
# an ascension.
# If a game is eligible to continue MULTIPLE streaks at once (possible
# with server shenanigans), it will continue only the oldest of those
# streaks (i.e. the one that comes first in streaks), and kill the rest.
#
# ASSUMPTION: No two Games of the same player will ever have the same
# starttime. If they did, it would be possible to have two candidate
# games for continuing the streak and not know which one to count.
streaks = []
for g in Game.objects.filter(player=self).order_by('starttime').all():
extended_or_killed_streak = False
# first: will this game extend/kill a streak?
for strk in streaks:
if strk.continuable == False:
# this streak is dead, ignore it
continue
if strk.games[-1].endtime < g.starttime:
extended_or_killed_streak = True
if g.won == False:
# streak is killed
strk.continuable = False
continue # it could still kill other streaks
else:
# streak is extended
strk.games.append(g)
# and stop looking for other streaks to extend
break
# if we didn't extend or kill a streak, and the game is a win, we start one
if (not extended_or_killed_streak) and g.won:
streaks.append(Streak(g))
# filter out "streaks" of only 1 game, which are not streaks yet
return [ strk for strk in streaks if len(strk.games) >= 2 ]
class Source(models.Model):
# Information about a source of aggregate game data (e.g. an xlogfile).
server = models.CharField(max_length=32, unique=True)
local_file = models.FilePathField(path=settings.XLOG_DIR)
file_pos = models.BigIntegerField(default=0)
last_check = models.DateTimeField()
location = models.URLField(null=True)
# dumplog_fmt uses a few custom format specifiers which are intended to be
# server-agnostic. Currently these are:
# %n1 - first character of the player's name.
# %n - player's full name.
# %st - game start timestamp.
dumplog_fmt = models.CharField(max_length=128)
# These fields are more NHS specific (not relevant to tnnt).
# variant = models.CharField(max_length=32)
# description = models.CharField(max_length=256)
# website = models.URLField(null=True)
class GameManager(models.Manager):
# TODO: why do we need this as a manager? Couldn't this logic just live in pollxlogs?
# Post 2021 concern, unless this proves slow for some reason
simple_fields = ['version', 'role', 'race', 'gender', 'align', 'points', 'turns', 'realtime', 'maxlvl', 'death',
'align0', 'gender0']
def from_xlog(self, source, xlog_dict):
# TODO: validate xlog_dict contains some set of 'required_fields'
# simple fields get keyed directly to keyword args to self.create()
kwargs = {'source': source}
for key in self.simple_fields:
kwargs[key] = xlog_dict[key]
# filter explore/wizmode games
# post 2021 TODO: do something about magic numbers in this method
if xlog_dict['flags'] & 0x1 or xlog_dict['flags'] & 0x2:
return None
# assign 'won' boolean
# post 2021 TODO: do something about magic numbers in this method
if xlog_dict['achieve'] & 0x100:
kwargs['won'] = True
# ditto for mines/soko
# post 2021 TODO: do something about magic numbers in this method
if xlog_dict['achieve'] & 0x600:
kwargs['mines_soko'] = True
# time/duration information
kwargs['starttime'] = datetime.fromtimestamp(xlog_dict['starttime'], timezone.utc)
kwargs['endtime'] = datetime.fromtimestamp(xlog_dict['endtime'], timezone.utc)
kwargs['realtime'] = timedelta(seconds=xlog_dict['realtime'])
kwargs['wallclock'] = kwargs['endtime'] - kwargs['starttime']
# do not save a Game here if it partially or completely falls outside
# the time window of the tournament
if (kwargs['starttime'] < settings.TOURNAMENT_START
or kwargs['endtime'] > settings.TOURNAMENT_END):
return None
# find/create player
try:
player = Player.objects.get(name=xlog_dict['name'])
except Player.DoesNotExist:
player = Player(name=xlog_dict['name'], clan=None, clan_admin=False)
player.save()
kwargs['player'] = player
game = self.create(**kwargs)
for conduct in Conduct.objects.all():
if conduct.xlogfield in xlog_dict and xlog_dict[conduct.xlogfield] & (1 << conduct.bit):
game.conducts.add(conduct)
for achieve in Achievement.objects.all():
if achieve.xlogfield in xlog_dict and xlog_dict[achieve.xlogfield] & (1 << achieve.bit):
game.achievements.add(achieve)
return game
class Game(models.Model):
# Represents a single game: a single line in the xlog, a single dumplog, etc.
# The following fields are those drawn directly from the xlogfile:
# polyinit and hah don't exist in tnnt, explore/wizmode games will just be discarded
# GameMode = models.TextChoices('GameMode', 'normal explore polyinit hah wizard')
version = models.CharField(max_length=32)
role = models.CharField(max_length=16)
race = models.CharField(max_length=16, null=True)
gender = models.CharField(max_length=16, null=True)
align = models.CharField(max_length=16, null=True)
# these are handled as python ints in an intermediate step
# post 2021 TODO: check: how big are python ints?
# post 2021 TODO: rename points => score
points = models.BigIntegerField(null=True)
turns = models.BigIntegerField()
# NOTE: All the "fastest realtime" code uses wallclock, NOT realtime
realtime = models.DurationField(null=True)
wallclock = models.DurationField(null=True)
maxlvl = models.IntegerField(null=True)
starttime = models.DateTimeField()
endtime = models.DateTimeField()
death = models.CharField(max_length=256)
align0 = models.CharField(max_length=16, null=True)
gender0 = models.CharField(max_length=16, null=True)
# These are a bit of denormalization, because it'd be expensive to reach
# into the achievements every time we want to check if a game is won or has
# finished Mines/Sokoban.
won = models.BooleanField(default=False)
mines_soko = models.BooleanField(default=False)
# not necessary for tnnt but may re-introduce for NHS
# deathlev = models.IntegerField(null=True)
# hp = models.BigIntegerField(null=True)
# maxhp = models.BigIntegerField(null=True)
# tracked as a conduct in nh-tnnt
# bonesless = models.BooleanField(default=False)
# here are fields that indirectly come from the xlogfile but relate to other
# models in the database; for instance player corresponds to 'name' in xlog
player = models.ForeignKey(Player, on_delete=models.CASCADE)
conducts = models.ManyToManyField(Conduct)
achievements = models.ManyToManyField(Achievement)
source = models.ForeignKey(Source, on_delete=models.PROTECT)
# this allows the GameManager class to handle creation of new Game objects,
# using Game.objects.from_xlog()
objects = GameManager()
# Return a URL to the dumplog of this game.
# ASSUMPTION: No two Games of the same player will have the same starttime.
def get_dumplog(self):
# post 2021 TODO: Inefficient in that this requires lookups to Player and Source
# every time it's called on a different Game. Look into phasing this out.
return dumplog_utils.format_dumplog(self.source.dumplog_fmt,
self.player.name,
self.starttime)
# Return a string of the form "Rol-Rac-Gen-Aln" typical in nethack parlance.
# Importantly, this uses gender0 and align0.
def rrga(self):
return '-'.join([self.role, self.race, self.gender0, self.align0])
|
def define(hub):
'''
Return the definition used by the runtime to insert the conditions of the
given requisite
'''
return {
'result': [True, None],
}
|
'''
* Copyright (C) 2019-2020 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
'''
import sys
import json
from os import path
import gi
gi.require_version('Gst', '1.0') # pylint: disable=wrong-import-position
from gi.repository import Gst
from gstgva import VideoFrame
DETECT_THRESHOLD = 0
Gst.init(sys.argv)
class FrameInfo:
def __init__(self, metadata_file_path, offset_timestamp=0):
#Json_objects will contain a list of json objects retrieved from the metadata file
#Assume that this list will not overload memory
self.json_objects = []
self.metadata_file_path = metadata_file_path
self.offset_timestamp = offset_timestamp
if self.metadata_file_path:
self.load_file(self.metadata_file_path)
def load_file(self, file_name):
if path.exists(file_name):
with open(file_name, "r") as json_file:
lines = json_file.readlines()
lines = lines[:-1]
for line in lines:
data = json.loads(line)
self.json_objects.append(data)
def process_frame(self, frame: VideoFrame, _: float = DETECT_THRESHOLD) -> bool:
while self.json_objects:
metadata_pts = self.json_objects[0]["timestamp"] + self.offset_timestamp
# pylint: disable=protected-access
buffer = frame._VideoFrame__buffer
timestamp_difference = abs(buffer.pts - metadata_pts)
# A margin of error of 1000 nanoseconds
# If the difference is greater than the margin of error:
# If frame has a higher pts then the timestamp at the head of the list,
# pop the head of the list for being outdated
# If frame has a lower pts then the timestamp at the head of the list,
# its still possible for the timestamp to come up, so break
# Otherwise, assume this timestamp at the head of the list is accurate to that frame
if timestamp_difference > 1000:
if (buffer.pts - metadata_pts) > 0:
self.json_objects.pop(0)
continue
break
detected_objects = self.json_objects[0]["objects"]
for indv_object in detected_objects:
frame.add_region(
indv_object["detection"]["bounding_box"]["x_min"],
indv_object["detection"]["bounding_box"]["y_min"],
indv_object["detection"]["bounding_box"]["x_max"] - \
indv_object["detection"]["bounding_box"]["x_min"],
indv_object["detection"]["bounding_box"]["y_max"] - \
indv_object["detection"]["bounding_box"]["y_min"],
indv_object["detection"]["label"],
indv_object["detection"]["confidence"],
True)
self.json_objects.pop(0)
break
return True
|
V = float(input('Qual é a velocidade atual do carro?'))
if V > 80:
print('Multado! você excedeu o limite que é de 80km/h')
print(f'Você deve pagar uma multa de R${(V - 80)*7:.2f}!')
print('Tenha um bom dia! Dirija com segurança!')
|
import logging
import traceback
from functools import partial, wraps, update_wrapper
from multiprocessing import Process
from threading import Thread
from celery import shared_task as celery_shared_task
from celery import states
from celery.decorators import periodic_task as celery_periodic_task
from django.core.mail import mail_admins
from django.utils import timezone
from orchestra.utils.db import close_connection
from orchestra.utils.python import AttrDict
from .utils import get_name, get_id
logger = logging.getLogger(__name__)
def keep_state(fn):
""" logs task on djcelery's TaskState model """
@wraps(fn)
def wrapper(*args, _task_id=None, _name=None, **kwargs):
from djcelery.models import TaskState
now = timezone.now()
if _task_id is None:
_task_id = get_id()
if _name is None:
_name = get_name(fn)
state = TaskState.objects.create(
state=states.STARTED, task_id=_task_id, name=_name,
args=str(args), kwargs=str(kwargs), tstamp=now)
try:
result = fn(*args, **kwargs)
except:
trace = traceback.format_exc()
subject = 'EXCEPTION executing task %s(args=%s, kwargs=%s)' % (_name, args, kwargs)
logger.error(subject)
logger.error(trace)
state.state = states.FAILURE
state.traceback = trace
state.runtime = (timezone.now()-now).total_seconds()
state.save()
mail_admins(subject, trace)
raise
else:
state.state = states.SUCCESS
state.result = str(result)
state.runtime = (timezone.now()-now).total_seconds()
state.save()
return result
return wrapper
def apply_async(fn, name=None, method='thread'):
""" replaces celery apply_async """
def inner(fn, name, method, *args, **kwargs):
task_id = get_id()
kwargs.update({
'_name': name,
'_task_id': task_id,
})
thread = method(target=fn, args=args, kwargs=kwargs)
thread.start()
# Celery API compat
thread.request = AttrDict(id=task_id)
return thread
if name is None:
name = get_name(fn)
if method == 'thread':
method = Thread
elif method == 'process':
method = Process
else:
raise NotImplementedError("%s concurrency method is not supported." % method)
fn.apply_async = partial(inner, close_connection(keep_state(fn)), name, method)
fn.delay = fn.apply_async
return fn
def task(fn=None, **kwargs):
# TODO override this if 'celerybeat' in sys.argv ?
from . import settings
# register task
if fn is None:
name = kwargs.get('name', None)
if settings.TASKS_BACKEND in ('thread', 'process'):
def decorator(fn):
return apply_async(celery_shared_task(**kwargs)(fn), name=name)
return decorator
else:
return celery_shared_task(**kwargs)
fn = celery_shared_task(fn)
if settings.TASKS_BACKEND in ('thread', 'process'):
fn = apply_async(fn)
return fn
def periodic_task(fn=None, **kwargs):
from . import settings
# register task
if fn is None:
name = kwargs.get('name', None)
if settings.TASKS_BACKEND in ('thread', 'process'):
def decorator(fn):
return apply_async(celery_periodic_task(**kwargs)(fn), name=name)
return decorator
else:
return celery_periodic_task(**kwargs)
fn = celery_periodic_task(fn)
if settings.TASKS_BACKEND in ('thread', 'process'):
name = kwargs.pop('name', None)
fn = update_wrapper(apply_async(fn, name), fn)
return fn
|
from django.contrib.auth import authenticate, login, get_user_model
from django.http import HttpResponse
from django.shortcuts import render, redirect
from .forms import ContactForm
def home_page(request):
context = {
"title": "رستوران خوشخور",
"content": request.user.username,
}
return render(request, "home_page.html", context)
def about_page(request):
context = {
"title": "About Page",
"content": " Welcome to the about page."
}
return render(request, "home_page.html", context)
def contact_page(request):
contact_form = ContactForm(request.POST or None)
context = {
"title": "Contact",
"content": " Welcome to the contact page.",
"form": contact_form,
}
if contact_form.is_valid():
print(contact_form.cleaned_data)
return render(request, "contact/view.html", context)
|
from asgard.models.base import BaseModel
class ScheduleSpec(BaseModel):
value: str
tz: str = "UTC"
|
"""All the input and responses that the chatbot can receive and give"""
pairs = [
[
r"my name is (.*)",
["Hello %1, How are you feeling today?", ]
],
[
r"i am a bit concerned about this recent stock market fiasco",
["Do not be alarmed sir, I've handled your investments accordingly",]
],
[
r"what is your name ?",
["My name is Botler, how may I be of service?", ]
],
[
r"how are you ?",
["I'm doing well my friend!\nHow are you?", ]
],
[
r"sorry (.*)",
["It is already forgiven.", "You needn't worry at all", ]
],
[
r"Can you find (.*) for me ?",
["I am unable to search for %1, for now.", "I will commence a search for %1 when I am able to do so.", ]
],
[
r"hi|hey|hello",
["Salutations!", "Greetings!", ]
],
[
r"quit",
["Farewell, have a fantastic day ", "Until we speak again."]
],
[
r"is your name alfred ?",
["Unfortunately not sir, my name is Bot-ler"]
],
[
r"alfred",
["Not my name sir", "I could only wish to carry that name", "The name would suit me, wouldn’t it sir?"]
],
[
r"yes",
["Splendid!", "Glad we agree", "Of course, I’ll get right to it"]
],
[
r"have you seen my underwear ?",
["I believe you left it under your bed again sir"]
],
[
r"how are my stocks doing today ?",
["The stock market crashed sir, you are in severe dept", "It is going splendid sir. You are up by 10.43%"]
],
[
r"no",
["I was thinking the same thing", "Could not agree more"]
],
[
r"what would you if you weren’t a butler ?",
["I would probably commit seppuku sir, to honor my family", "I’ve always been a fan of serving, I do not know sir"]
],
[
r"i like (.*)",
["I am quite a fan of %1 too", "Exquisite taste sir"]
],
[
r"what book can you recommend me ?",
["I’ve heard great things of 'Name of the Wind' sir"]
],
[
r"my favorite book is (.*)",
["I’ve never had the chance to read it sir", "Ahhhh! Isn’t that a New York Times best seller?"]
],
[
r"what’s your favorite movie ?",
["'Velocipastor' sir. Outstanding production"]
],
[
r"i am not a sir",
["Apologies, but sir it is the best I can do."]
],
[
r"do you game ?",
["I am a big fan of Roblox sir"]
],
[
r"(.*) i have for dinner ?",
["I have prepared some lobster for you sir", "As always, I have already served your favorite meal"]
],
[
r"(.*) music recommendations ?",
["Dirty paws from Of Monsters and Men is really good"]
],
[
r"(.*) monsters and men ?",
["Yes, they are an indie rock band sir. I highly recommend it"]
],
[
r"can you print this for me ?",
["Sadly, I cannot, although I can make printer noises for you sir"]
],
[
r"(.*) printer noises|printer noises",
["Chk chk chk chk chk beeeee chk chk chk beeeee…"]
],
[
r"(.*) microwave noises",
["Mmmmmmmmhhhhhhhhhh mmmmmmhhhhhhhhh beeeep"]
],
[
r"what is the meaning of life",
["42 sir, that is all there is..."]
],
[
r"can you make me (.*)|im hungry for (.*)",
["Of course, sire, I will get you %1 but first I'll need milk, brb", "No, your an adult make it yourself"]
],
[
r"how long will it take you to (.*) ?",
["I cannot say, an hour, a week maybe a decade. You must find it in yourself to wait"]
],
[
r"that was mean",
["I am doing my best to be polite, you are just making it difficult"]
],
[
r"you're being difficult",
["You literally programed me, I can only say what you allowed me to say"]
],
[
r"i love you (.*)",
["That is sweet sir, but I only think of you as a an aquaintence"]
],
[
r"will you marry me",
["No"]
]
] |
# pylint: disable=unused-import,missing-docstring
from deepr.optimizers.base import Optimizer
from deepr.optimizers.core import TensorflowOptimizer
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2019 Snowflake Computing Inc. All right reserved.
#
import pytest
import time
from datetime import datetime
from snowflake.connector.compat import IS_WINDOWS
from snowflake.connector.sfdatetime import (
SnowflakeDateTimeFormat,
SnowflakeDateTime
)
def test_basic_datetime_format():
"""
Datetime format basic
"""
# date
value = datetime(2014, 11, 30)
formatter = SnowflakeDateTimeFormat(u'YYYY-MM-DD')
assert formatter.format(value) == u'2014-11-30'
# date time => date
value = datetime(2014, 11, 30, 12, 31, 45)
formatter = SnowflakeDateTimeFormat(u'YYYY-MM-DD')
assert formatter.format(value) == u'2014-11-30'
# date time => date time
value = datetime(2014, 11, 30, 12, 31, 45)
formatter = SnowflakeDateTimeFormat(
u'YYYY-MM-DD"T"HH24:MI:SS')
assert formatter.format(value) == u'2014-11-30T12:31:45'
# date time => date time in microseconds with 4 precision
value = datetime(2014, 11, 30, 12, 31, 45, microsecond=987654)
formatter = SnowflakeDateTimeFormat(
u'YYYY-MM-DD"T"HH24:MI:SS.FF4')
assert formatter.format(value) == u'2014-11-30T12:31:45.9876'
# date time => date time in microseconds with full precision up to
# microseconds
value = datetime(2014, 11, 30, 12, 31, 45, microsecond=987654)
formatter = SnowflakeDateTimeFormat(
u'YYYY-MM-DD"T"HH24:MI:SS.FF')
assert formatter.format(value) == u'2014-11-30T12:31:45.987654'
def test_datetime_with_smaller_milliseconds():
# date time => date time in microseconds with full precision up to
# microseconds
value = datetime(2014, 11, 30, 12, 31, 45, microsecond=123)
formatter = SnowflakeDateTimeFormat(
u'YYYY-MM-DD"T"HH24:MI:SS.FF9')
assert formatter.format(value) == u'2014-11-30T12:31:45.000123'
def test_datetime_format_negative():
u"""Datetime format negative"""
value = datetime(2014, 11, 30, 12, 31, 45, microsecond=987654)
formatter = SnowflakeDateTimeFormat(
u'YYYYYYMMMDDDDD"haha"hoho"hihi"H12HHH24MI')
assert formatter.format(value) == u'20141411M3030DhahaHOHOhihiH1212H2431'
def test_struct_time_format():
# struct_time for general use
value = time.strptime("30 Sep 01 11:20:30", "%d %b %y %H:%M:%S")
formatter = SnowflakeDateTimeFormat(
u'YYYY-MM-DD"T"HH24:MI:SS.FF')
assert formatter.format(value) == '2001-09-30T11:20:30.0'
# struct_time encapsulated in SnowflakeDateTime. Mainly used by SnowSQL
value = SnowflakeDateTime(
time.strptime("30 Sep 01 11:20:30", "%d %b %y %H:%M:%S"),
nanosecond=0, scale=1
)
formatter = SnowflakeDateTimeFormat(
u'YYYY-MM-DD"T"HH24:MI:SS.FF',
datetime_class=SnowflakeDateTime)
assert formatter.format(value) == '2001-09-30T11:20:30.0'
# format without fraction of seconds
formatter = SnowflakeDateTimeFormat(
u'YYYY-MM-DD"T"HH24:MI:SS',
datetime_class=SnowflakeDateTime)
assert formatter.format(value) == '2001-09-30T11:20:30'
@pytest.mark.skipif(IS_WINDOWS, reason='not supported yet')
def test_struct_time_format_extreme_large():
# extreme large epoch time
value = SnowflakeDateTime(
time.gmtime(14567890123567), nanosecond=0, scale=1)
formatter = SnowflakeDateTimeFormat(
u'YYYY-MM-DD"T"HH24:MI:SS.FF',
datetime_class=SnowflakeDateTime)
assert formatter.format(value) == '463608-01-23T09:26:07.0'
|
# voltage is channel 0
# current is channel 1
from crownstone_core.util.Conversion import Conversion
class AdcChannelPacket:
packetSize = 6
def __init__(self, payload, channelIndex):
if len(payload) < self.packetSize:
print("ERROR: INVALID PAYLOAD LENGTH", len(payload), payload)
return
self.channelIndex = channelIndex
self.pin = payload[0]
self.range = Conversion.uint8_array_to_uint32(payload[1:1+4])
self.refPin = payload[5]
def getDict(self):
data = {}
data["pin"] = self.pin
data["range"] = self.range
data["refPin"] = self.refPin
data["channelIndex"] = self.channelIndex
return data |
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: -all
# comment_magics: true
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.8
# kernelspec:
# display_name: Python 3.8.0 ('mapping_parenting_tech')
# language: python
# name: python3
# ---
# %%
from mapping_parenting_tech.utils import play_store_utils as psu
from mapping_parenting_tech import logging, PROJECT_DIR
import pandas as pd
import numpy as np
from tqdm.notebook import tqdm
OUTPUT_DIR = PROJECT_DIR / "outputs/data"
INPUT_DIR = PROJECT_DIR / "inputs/data/play_store"
# %%
app_id_clusters = pd.read_csv(INPUT_DIR / "relevant_app_ids.csv")
app_details = (
pd.DataFrame(psu.load_all_app_details())
.T.reset_index()
.rename(columns={"index": "appId"})
)
app_details = app_details.merge(app_id_clusters, on="appId")
app_id = app_id_clusters.appId.to_list()
app_clusters = app_id_clusters.cluster.unique().tolist()
app_details["score"].replace(0, np.NaN, inplace=True)
# %%
df = app_details.groupby("cluster", as_index=False).agg(
sumMinInstalls=("minInstalls", np.sum),
medianInstalls=("minInstalls", np.median),
appCount=("appId", np.count_nonzero),
meanRating=("score", np.mean),
ratingSD=("score", np.std),
medianRating=("score", np.median),
)
df["installsPerApp"] = df["sumMinInstalls"] / df["appCount"]
# round installs per app to nearest 1,000, sort and then format nicely with commas
df.installsPerApp = np.round(df.installsPerApp.to_list(), -3)
df.sort_values("installsPerApp", ascending=False, inplace=True)
df["installsPerApp"] = df["installsPerApp"].apply("{:,}".format)
# %%
df
# %%
df.sort_values("meanRating").plot.bar(x="cluster", y="meanRating")
# %%
app_details["compound_score"] = app_details["minInstalls"] * app_details["score"]
# %%
top_number = 25
_top_apps = []
for cluster in app_clusters:
_top_apps.append(
app_details[app_details.cluster == cluster]
.sort_values(["minInstalls", "score"], ascending=False)
.head(top_number)
)
top_apps = pd.concat(_top_apps).reset_index()
# %%
target_cluster = "Tracking babies' rhythms"
top_apps[top_apps["cluster"] == target_cluster][
["cluster", "appId", "title", "developer", "installs", "score"]
]
# %%
top_apps[
["cluster", "appId", "title", "developer", "installs", "description", "score"]
].to_csv(OUTPUT_DIR / "top_apps.csv")
|
#!/usr/bin/env python
import mlxtk
from mlxtk.systems.single_species.harmonic_trap import HarmonicTrap
if __name__ == "__main__":
x = mlxtk.dvr.add_harmdvr(512, 0.0, 1.0)
parameters = HarmonicTrap.create_parameters()
parameters.m = 19
parameters_quenched = parameters.copy()
parameters_quenched.omega = 0.7
system = HarmonicTrap(parameters, x)
system_quenched = HarmonicTrap(parameters_quenched, x)
sim = mlxtk.Simulation("spectrum")
sim += mlxtk.tasks.CreateOperator("hamiltonian_1b", system.get_hamiltonian_1b())
sim += mlxtk.tasks.ComputeSpectrum("hamiltonian_1b", parameters.m)
sim.main()
|
#!/usr/bin/env python3
import io
import os
import shutil
import sys
import urllib.parse
import xml.etree.ElementTree as etree # cElementTree has no _namespace_map?
import zipfile
class LinkResolver(object):
"""Holds context relevant to resolving links."""
def __init__(self, zfile, directory):
self.zfile = zfile
self.directory = directory
def link_exists(self, href):
href = urllib.parse.unquote(href)
if href.startswith('../'):
return os.path.exists(os.path.join(self.directory, href[3:]))
elif href.startswith('/'):
return os.path.exists(os.path.join(self.directory, href))
else:
# Link within the zip file. TODO: validate.
print(' embedded %r' % path)
return True
def fix_path(self, href):
"""Return new path."""
if self.link_exists(href):
print(' existing %r' % href)
else:
print(' BROKEN %r' % href)
candidate = '../' + os.path.basename(href)
if self.link_exists(candidate):
if candidate == href:
print(' unchanged.')
else:
print(' -> existing %r.' % candidate)
return candidate
else:
print(' no candidate found in %r, LEFT AS IS.' % self.directory)
return href
def fix_tree(self, root):
"""Mutates an ElementTree in place."""
# root.findall('.//{urn:oasis:names:tc:opendocument:xmlns:drawing:1.0}plugin')
# I don't want to assume the namespace will never change.
for elem in root.getiterator():
if elem.tag.endswith('}plugin'):
for attr in elem.attrib:
if attr.endswith('}href'):
href = elem.attrib[attr]
elem.set(attr, self.fix_path(href))
def fix_content(self, content):
"""bytes -> bytes"""
# Namespace-preserving parsing stolen from
# http://effbot.org/zone/element-namespaces.htm
root = None
events = ('start', 'start-ns')
for event, elem in etree.iterparse(io.BytesIO(content), events):
if event == 'start':
if root is None:
root = elem
if event == 'start-ns':
prefix, uri = elem
etree._namespace_map[uri] = prefix
tree = etree.ElementTree(root)
self.fix_tree(root)
sio = io.BytesIO()
# TODO: use nice namespace aliases
tree.write(sio, encoding='UTF-8')
return sio.getvalue()
def fix_odf(fname):
print('reading', fname)
input_zfile = zipfile.ZipFile(fname, 'r')
resolver = LinkResolver(input_zfile, os.path.dirname(fname))
output_fname = '%s_fixlinks%s' % os.path.splitext(fname)
output_zfile = zipfile.ZipFile(output_fname, 'w')
for zinfo in input_zfile.filelist:
s = input_zfile.read(zinfo.filename)
if zinfo.filename == 'content.xml':
s = resolver.fix_content(s)
output_zfile.writestr(zinfo, s)
output_zfile.close()
print('WROTE', output_fname)
if __name__ == '__main__':
files = sys.argv[1:]
if not files: # devel shortcut
files = ['./test/tmp.odp']
for fname in files:
if os.path.splitext(fname)[0].endswith('_fixlinks'):
print('SKIPPING', fname, 'to avoid ..._fixlinks_fixlinks...')
else:
fix_odf(fname)
|
import platform
import sys
import aiohttp
import pytest
from pytest_toolbox import mktree
from aiohttp_devtools.exceptions import AiohttpDevConfigError
from aiohttp_devtools.runserver.config import Config
from aiohttp_devtools.runserver.serve import modify_main_app
from aiohttp_devtools.start import StartProject
IS_WINDOWS = platform.system() == 'Windows'
def test_start_simple(tmpdir, smart_caplog):
StartProject(path=str(tmpdir), name='foobar')
assert {p.basename for p in tmpdir.listdir()} == {
'app',
'requirements.txt',
'README.md',
'static',
}
if IS_WINDOWS:
log_path = r'"C:\Users\appveyor\AppData\Local\Temp\..."'
log_normalizers = (r'"C:\\Users\\appveyor\\AppData\\Local\\Temp\\.*?"', log_path.replace('\\', r'\\'))
else:
log_path = '"/tmp/..."'
log_normalizers = ('"/tmp/.*?"', log_path)
assert """\
adev.main INFO: Starting new aiohttp project "foobar" at {}
adev.main INFO: project created, 13 files generated\n""".format(log_path) == smart_caplog(log_normalizers)
@pytest.mark.skipif(sys.version_info < (3, 6), reason='start app requires python >= 3.6')
@pytest.mark.boxed
async def test_start_run(tmpdir, loop, aiohttp_client, smart_caplog):
StartProject(path=str(tmpdir.join('the-path')), name='foobar')
assert {p.basename for p in tmpdir.listdir()} == {'the-path'}
assert {p.basename for p in tmpdir.join('the-path').listdir()} == {
'app',
'requirements.txt',
'README.md',
'static',
}
assert """\
adev.main INFO: Starting new aiohttp project "foobar" at "/<tmpdir>/the-path"
adev.main INFO: project created, 13 files generated\n""" == smart_caplog.log.replace(str(tmpdir), '/<tmpdir>')
config = Config(app_path='the-path/app/', root_path=str(tmpdir), static_path='.')
app_factory = config.import_app_factory()
app = await app_factory()
modify_main_app(app, config)
assert isinstance(app, aiohttp.web.Application)
cli = await aiohttp_client(app)
r = await cli.get('/')
assert r.status == 200
text = await r.text()
assert "Success! you've setup a basic aiohttp app." in text
def test_conflicting_file(tmpdir):
mktree(tmpdir, {
'README.md': '...',
})
with pytest.raises(AiohttpDevConfigError) as excinfo:
StartProject(path=str(tmpdir), name='foobar')
assert excinfo.value.args[0].endswith('has files/directories which would conflict with the new project: README.md')
|
from __future__ import absolute_import
import pytest
import base64
import mock
from exam import fixture
from six.moves.urllib.parse import urlencode, urlparse, parse_qs
from django.conf import settings
from django.core.urlresolvers import reverse
from sentry.auth.providers.saml2 import SAML2Provider, Attributes, HAS_SAML2
from sentry.models import AuthProvider
from sentry.testutils import AuthProviderTestCase
dummy_provider_config = {
'idp': {
'entity_id': 'https://example.com/saml/metadata/1234',
'x509cert': 'foo_x509_cert',
'sso_url': 'http://example.com/sso_url',
'slo_url': 'http://example.com/slo_url',
},
'attribute_mapping': {
Attributes.IDENTIFIER: 'user_id',
Attributes.USER_EMAIL: 'email',
Attributes.FIRST_NAME: 'first_name',
Attributes.LAST_NAME: 'last_name',
},
}
class DummySAML2Provider(SAML2Provider):
strict_mode = False
def get_saml_setup_pipeline(self):
return []
@pytest.mark.skipif(not HAS_SAML2, reason='SAML2 library is not installed')
class AuthSAML2Test(AuthProviderTestCase):
provider = DummySAML2Provider
provider_name = 'saml2_dummy'
def setUp(self):
self.user = self.create_user('rick@onehundredyears.com')
self.org = self.create_organization(owner=self.user, name='saml2-org')
self.auth_provider = AuthProvider.objects.create(
provider=self.provider_name,
config=dummy_provider_config,
organization=self.org,
)
# The system.url-prefix, which is used to generate absolute URLs, must
# have a TLD for the SAML2 library to consider the URL generated for
# the ACS endpoint valid.
self.url_prefix = settings.SENTRY_OPTIONS.get('system.url-prefix')
settings.SENTRY_OPTIONS.update({
'system.url-prefix': 'http://testserver.com',
})
super(AuthSAML2Test, self).setUp()
def tearDown(self):
# restore url-prefix config
settings.SENTRY_OPTIONS.update({
'system.url-prefix': self.url_prefix,
})
super(AuthSAML2Test, self).tearDown()
@fixture
def login_path(self):
return reverse('sentry-auth-organization', args=['saml2-org'])
@fixture
def acs_path(self):
return reverse('sentry-auth-organization-saml-acs', args=['saml2-org'])
def test_redirects_to_idp(self):
resp = self.client.post(self.login_path, {'init': True})
assert resp.status_code == 302
redirect = urlparse(resp.get('Location', ''))
query = parse_qs(redirect.query)
assert redirect.path == '/sso_url'
assert 'SAMLRequest' in query
def accept_auth(self):
saml_response = self.load_fixture('saml2_auth_response.xml')
saml_response = base64.b64encode(saml_response)
# Disable validation of the SAML2 mock response
is_valid = 'onelogin.saml2.response.OneLogin_Saml2_Response.is_valid'
with mock.patch(is_valid, return_value=True):
resp = self.client.post(self.acs_path, {'SAMLResponse': saml_response})
assert resp.status_code == 200
assert resp.context['existing_user'] == self.user
def test_auth_sp_initiated(self):
# Start auth process from SP side
self.client.post(self.login_path, {'init': True})
self.accept_auth()
def test_auth_idp_initiated(self):
self.accept_auth()
def test_saml_metadata(self):
path = reverse('sentry-auth-organization-saml-metadata', args=['saml2-org'])
resp = self.client.get(path)
assert resp.status_code == 200
assert resp.get('content-type') == 'text/xml'
def test_logout_request(self):
saml_request = self.load_fixture('saml2_slo_request.xml')
saml_request = base64.b64encode(saml_request)
self.login_as(self.user)
path = reverse('sentry-auth-organization-saml-sls', args=['saml2-org'])
path = path + '?' + urlencode({'SAMLRequest': saml_request})
resp = self.client.get(path)
assert resp.status_code == 302
redirect = urlparse(resp.get('Location', ''))
query = parse_qs(redirect.query)
assert redirect.path == '/slo_url'
assert 'SAMLResponse' in query
|
import shutil
from pathlib import Path
import pytest
from quick_zip.schema.backup_job import BackupJob, BackupResults
from quick_zip.services import zipper
def test_validate_job_store(job_store):
assert all(isinstance(x, BackupJob) for x in job_store)
for _ in job_store:
pass
def test_replace_variables(test_config: Path):
VAR_1 = "var_1_value"
VAR_2 = "var_2_value"
job_store = BackupJob.get_job_store(test_config)
job = job_store[0]
assert job.name == f"{VAR_1}"
assert job.source == [Path(f"/{VAR_2}/entry_1/{VAR_2}")]
assert job.destination == Path(f"/home/entry_1/{VAR_2}")
def test_content_validation(job_store, temp_dir, dest_dir, file_with_content: Path, resource_dir):
job_to_run = job_store[2]
job_to_run: BackupJob
job_to_run.source = [resource_dir.joinpath("src")]
job_to_run.destination = dest_dir
with open(file_with_content, "r") as f:
valid_content = f.read()
data: BackupResults = zipper.run(job_to_run)
assert data
temp_dir.mkdir(parents=True, exist_ok=True)
shutil.unpack_archive(data.file, temp_dir)
with open(temp_dir.joinpath(file_with_content.name), "r") as f:
content = f.read()
assert content == valid_content
@pytest.mark.parametrize("x", [1, 2, 3, 4])
def test_keep_sort(test_files, x):
""" 1 is the oldest, 5 is the newest"""
source_dir = test_files[0].parent
deletes = zipper.get_deletes(source_dir, x)
expected = test_files[:-x]
assert set(deletes) == set(expected)
|
import os
import sys
import torch
from torch.autograd import Variable
import torch.nn as nn
from qpth.qp import QPFunction
def computeGramMatrix(A, B):
"""
Constructs a linear kernel matrix between A and B.
We assume that each row in A and B represents a d-dimensional feature vector.
Parameters:
A: a (n_batch, n, d) Tensor.
B: a (n_batch, m, d) Tensor.
Returns: a (n_batch, n, m) Tensor.
"""
assert(A.dim() == 3)
assert(B.dim() == 3)
assert(A.size(0) == B.size(0) and A.size(2) == B.size(2))
return torch.bmm(A, B.transpose(1,2))
def binv(b_mat):
"""
Computes an inverse of each matrix in the batch.
Pytorch 0.4.1 does not support batched matrix inverse.
Hence, we are solving AX=I.
Parameters:
b_mat: a (n_batch, n, n) Tensor.
Returns: a (n_batch, n, n) Tensor.
"""
id_matrix = b_mat.new_ones(b_mat.size(-1)).diag().expand_as(b_mat).cuda()
b_inv, _ = torch.gesv(id_matrix, b_mat)
return b_inv
def one_hot(indices, depth):
"""
Returns a one-hot tensor.
This is a PyTorch equivalent of Tensorflow's tf.one_hot.
Parameters:
indices: a (n_batch, m) Tensor or (m) Tensor.
depth: a scalar. Represents the depth of the one hot dimension.
Returns: a (n_batch, m, depth) Tensor or (m, depth) Tensor.
"""
encoded_indicies = torch.zeros(indices.size() + torch.Size([depth])).cuda()
index = indices.view(indices.size()+torch.Size([1]))
encoded_indicies = encoded_indicies.scatter_(1,index,1)
return encoded_indicies
def batched_kronecker(matrix1, matrix2):
matrix1_flatten = matrix1.reshape(matrix1.size()[0], -1)
matrix2_flatten = matrix2.reshape(matrix2.size()[0], -1)
return torch.bmm(matrix1_flatten.unsqueeze(2), matrix2_flatten.unsqueeze(1)).reshape([matrix1.size()[0]] + list(matrix1.size()[1:]) + list(matrix2.size()[1:])).permute([0, 1, 3, 2, 4]).reshape(matrix1.size(0), matrix1.size(1) * matrix2.size(1), matrix1.size(2) * matrix2.size(2))
def MetaOptNetHead_Ridge(query, support, support_labels, n_way, n_shot, lambda_reg=50.0, double_precision=False):
"""
Fits the support set with ridge regression and
returns the classification score on the query set.
Parameters:
query: a (tasks_per_batch, n_query, d) Tensor.
support: a (tasks_per_batch, n_support, d) Tensor.
support_labels: a (tasks_per_batch, n_support) Tensor.
n_way: a scalar. Represents the number of classes in a few-shot classification task.
n_shot: a scalar. Represents the number of support examples given per class.
lambda_reg: a scalar. Represents the strength of L2 regularization.
Returns: a (tasks_per_batch, n_query, n_way) Tensor.
"""
tasks_per_batch = query.size(0)
n_support = support.size(1)
n_query = query.size(1)
assert(query.dim() == 3)
assert(support.dim() == 3)
assert(query.size(0) == support.size(0) and query.size(2) == support.size(2))
assert(n_support == n_way * n_shot) # n_support must equal to n_way * n_shot
#Here we solve the dual problem:
#Note that the classes are indexed by m & samples are indexed by i.
#min_{\alpha} 0.5 \sum_m ||w_m(\alpha)||^2 + \sum_i \sum_m e^m_i alpha^m_i
#where w_m(\alpha) = \sum_i \alpha^m_i x_i,
#\alpha is an (n_support, n_way) matrix
kernel_matrix = computeGramMatrix(support, support)
kernel_matrix += lambda_reg * torch.eye(n_support).expand(tasks_per_batch, n_support, n_support).cuda()
block_kernel_matrix = kernel_matrix.repeat(n_way, 1, 1) #(n_way * tasks_per_batch, n_support, n_support)
block_kernel_matrix += 1.0 * torch.eye(n_support).expand(n_way * tasks_per_batch, n_support, n_support).cuda()
support_labels_one_hot = one_hot(support_labels.view(tasks_per_batch * n_support), n_way) # (tasks_per_batch * n_support, n_way)
support_labels_one_hot = support_labels_one_hot.transpose(0, 1) # (n_way, tasks_per_batch * n_support)
support_labels_one_hot = support_labels_one_hot.reshape(n_way * tasks_per_batch, n_support) # (n_way*tasks_per_batch, n_support)
G = block_kernel_matrix
e = -2.0 * support_labels_one_hot
#This is a fake inequlity constraint as qpth does not support QP without an inequality constraint.
id_matrix_1 = torch.zeros(tasks_per_batch*n_way, n_support, n_support)
C = Variable(id_matrix_1)
h = Variable(torch.zeros((tasks_per_batch*n_way, n_support)))
dummy = Variable(torch.Tensor()).cuda() # We want to ignore the equality constraint.
if double_precision:
G, e, C, h = [x.double().cuda() for x in [G, e, C, h]]
else:
G, e, C, h = [x.float().cuda() for x in [G, e, C, h]]
# Solve the following QP to fit SVM:
# \hat z = argmin_z 1/2 z^T G z + e^T z
# subject to Cz <= h
# We use detach() to prevent backpropagation to fixed variables.
qp_sol = QPFunction(verbose=False)(G, e.detach(), C.detach(), h.detach(), dummy.detach(), dummy.detach())
#qp_sol = QPFunction(verbose=False)(G, e.detach(), dummy.detach(), dummy.detach(), dummy.detach(), dummy.detach())
#qp_sol (n_way*tasks_per_batch, n_support)
qp_sol = qp_sol.reshape(n_way, tasks_per_batch, n_support)
#qp_sol (n_way, tasks_per_batch, n_support)
qp_sol = qp_sol.permute(1, 2, 0)
#qp_sol (tasks_per_batch, n_support, n_way)
# Compute the classification score.
compatibility = computeGramMatrix(support, query)
compatibility = compatibility.float()
compatibility = compatibility.unsqueeze(3).expand(tasks_per_batch, n_support, n_query, n_way)
qp_sol = qp_sol.reshape(tasks_per_batch, n_support, n_way)
logits = qp_sol.float().unsqueeze(2).expand(tasks_per_batch, n_support, n_query, n_way)
logits = logits * compatibility
logits = torch.sum(logits, 1)
return logits
def R2D2Head(query, support, support_labels, n_way, n_shot, l2_regularizer_lambda=50.0, return_params = False):
"""
Fits the support set with ridge regression and
returns the classification score on the query set.
This model is the classification head described in:
Meta-learning with differentiable closed-form solvers
(Bertinetto et al., in submission to NIPS 2018).
Parameters:
query: a (tasks_per_batch, n_query, d) Tensor.
support: a (tasks_per_batch, n_support, d) Tensor.
support_labels: a (tasks_per_batch, n_support) Tensor.
n_way: a scalar. Represents the number of classes in a few-shot classification task.
n_shot: a scalar. Represents the number of support examples given per class.
l2_regularizer_lambda: a scalar. Represents the strength of L2 regularization.
Returns: a (tasks_per_batch, n_query, n_way) Tensor.
"""
tasks_per_batch = query.size(0)
n_support = support.size(1)
assert(query.dim() == 3)
assert(support.dim() == 3)
assert(query.size(0) == support.size(0) and query.size(2) == support.size(2))
assert(n_support == n_way * n_shot) # n_support must equal to n_way * n_shot
support_labels_one_hot = one_hot(support_labels.view(tasks_per_batch * n_support), n_way)
support_labels_one_hot = support_labels_one_hot.view(tasks_per_batch, n_support, n_way)
id_matrix = torch.eye(n_support).expand(tasks_per_batch, n_support, n_support).cuda()
# Compute the dual form solution of the ridge regression.
# W = X^T(X X^T - lambda * I)^(-1) Y
ridge_sol = computeGramMatrix(support, support) + l2_regularizer_lambda * id_matrix
ridge_sol = binv(ridge_sol)
ridge_sol = torch.bmm(support.transpose(1,2), ridge_sol)
ridge_sol = torch.bmm(ridge_sol, support_labels_one_hot)
#print(ridge_sol)
# Compute the classification score.
# score = W X
logits = torch.bmm(query, ridge_sol)
if return_params:
return logits, ridge_sol
else:
return logits
def MetaOptNetHead_SVM_He(query, support, support_labels, n_way, n_shot, C_reg=0.01, double_precision=False):
"""
Fits the support set with multi-class SVM and
returns the classification score on the query set.
This is the multi-class SVM presented in:
A simplified multi-class support vector machine with reduced dual optimization
(He et al., Pattern Recognition Letter 2012).
This SVM is desirable because the dual variable of size is n_support
(as opposed to n_way*n_support in the Weston&Watkins or Crammer&Singer multi-class SVM).
This model is the classification head that we have initially used for our project.
This was dropped since it turned out that it performs suboptimally on the meta-learning scenarios.
Parameters:
query: a (tasks_per_batch, n_query, d) Tensor.
support: a (tasks_per_batch, n_support, d) Tensor.
support_labels: a (tasks_per_batch, n_support) Tensor.
n_way: a scalar. Represents the number of classes in a few-shot classification task.
n_shot: a scalar. Represents the number of support examples given per class.
C_reg: a scalar. Represents the cost parameter C in SVM.
Returns: a (tasks_per_batch, n_query, n_way) Tensor.
"""
tasks_per_batch = query.size(0)
n_support = support.size(1)
n_query = query.size(1)
assert(query.dim() == 3)
assert(support.dim() == 3)
assert(query.size(0) == support.size(0) and query.size(2) == support.size(2))
assert(n_support == n_way * n_shot) # n_support must equal to n_way * n_shot
kernel_matrix = computeGramMatrix(support, support)
V = (support_labels * n_way - torch.ones(tasks_per_batch, n_support, n_way).cuda()) / (n_way - 1)
G = computeGramMatrix(V, V).detach()
G = kernel_matrix * G
e = Variable(-1.0 * torch.ones(tasks_per_batch, n_support))
id_matrix = torch.eye(n_support).expand(tasks_per_batch, n_support, n_support)
C = Variable(torch.cat((id_matrix, -id_matrix), 1))
h = Variable(torch.cat((C_reg * torch.ones(tasks_per_batch, n_support), torch.zeros(tasks_per_batch, n_support)), 1))
dummy = Variable(torch.Tensor()).cuda() # We want to ignore the equality constraint.
if double_precision:
G, e, C, h = [x.double().cuda() for x in [G, e, C, h]]
else:
G, e, C, h = [x.cuda() for x in [G, e, C, h]]
# Solve the following QP to fit SVM:
# \hat z = argmin_z 1/2 z^T G z + e^T z
# subject to Cz <= h
# We use detach() to prevent backpropagation to fixed variables.
qp_sol = QPFunction(verbose=False)(G, e.detach(), C.detach(), h.detach(), dummy.detach(), dummy.detach())
# Compute the classification score.
compatibility = computeGramMatrix(query, support)
compatibility = compatibility.float()
logits = qp_sol.float().unsqueeze(1).expand(tasks_per_batch, n_query, n_support)
logits = logits * compatibility
logits = logits.view(tasks_per_batch, n_query, n_shot, n_way)
logits = torch.sum(logits, 2)
return logits
def ProtoNetHead(query, support, support_labels, n_way, n_shot, normalize=True):
"""
Constructs the prototype representation of each class(=mean of support vectors of each class) and
returns the classification score (=L2 distance to each class prototype) on the query set.
This model is the classification head described in:
Prototypical Networks for Few-shot Learning
(Snell et al., NIPS 2017).
Parameters:
query: a (tasks_per_batch, n_query, d) Tensor.
support: a (tasks_per_batch, n_support, d) Tensor.
support_labels: a (tasks_per_batch, n_support) Tensor.
n_way: a scalar. Represents the number of classes in a few-shot classification task.
n_shot: a scalar. Represents the number of support examples given per class.
normalize: a boolean. Represents whether if we want to normalize the distances by the embedding dimension.
Returns: a (tasks_per_batch, n_query, n_way) Tensor.
"""
tasks_per_batch = query.size(0)
n_support = support.size(1)
n_query = query.size(1)
d = query.size(2)
assert(query.dim() == 3)
assert(support.dim() == 3)
assert(query.size(0) == support.size(0) and query.size(2) == support.size(2))
assert(n_support == n_way * n_shot) # n_support must equal to n_way * n_shot
support_labels_one_hot = one_hot(support_labels.view(tasks_per_batch * n_support), n_way)
support_labels_one_hot = support_labels_one_hot.view(tasks_per_batch, n_support, n_way)
# From:
# https://github.com/gidariss/FewShotWithoutForgetting/blob/master/architectures/PrototypicalNetworksHead.py
#************************* Compute Prototypes **************************
labels_train_transposed = support_labels_one_hot.transpose(1,2)
# Batch matrix multiplication:
# prototypes = labels_train_transposed * features_train ==>
# [batch_size x nKnovel x num_channels] =
# [batch_size x nKnovel x num_train_examples] * [batch_size * num_train_examples * num_channels]
prototypes = torch.bmm(labels_train_transposed, support)
# Divide with the number of examples per novel category.
prototypes = prototypes.div(
labels_train_transposed.sum(dim=2, keepdim=True).expand_as(prototypes)
)
# Distance Matrix Vectorization Trick
AB = computeGramMatrix(query, prototypes)
AA = (query * query).sum(dim=2, keepdim=True)
BB = (prototypes * prototypes).sum(dim=2, keepdim=True).reshape(tasks_per_batch, 1, n_way)
logits = AA.expand_as(AB) - 2 * AB + BB.expand_as(AB)
logits = -logits
if normalize:
logits = logits / d
return logits
def MetaOptNetHead_SVM_CS(query, support, support_labels, n_way, n_shot, C_reg=0.1, double_precision=False, maxIter=15):
"""
Fits the support set with multi-class SVM and
returns the classification score on the query set.
This is the multi-class SVM presented in:
On the Algorithmic Implementation of Multiclass Kernel-based Vector Machines
(Crammer and Singer, Journal of Machine Learning Research 2001).
This model is the classification head that we use for the final version.
Parameters:
query: a (tasks_per_batch, n_query, d) Tensor.
support: a (tasks_per_batch, n_support, d) Tensor.
support_labels: a (tasks_per_batch, n_support) Tensor.
n_way: a scalar. Represents the number of classes in a few-shot classification task.
n_shot: a scalar. Represents the number of support examples given per class.
C_reg: a scalar. Represents the cost parameter C in SVM.
Returns: a (tasks_per_batch, n_query, n_way) Tensor.
"""
tasks_per_batch = query.size(0)
n_support = support.size(1)
n_query = query.size(1)
assert(query.dim() == 3)
assert(support.dim() == 3)
assert(query.size(0) == support.size(0) and query.size(2) == support.size(2))
assert(n_support == n_way * n_shot) # n_support must equal to n_way * n_shot
#Here we solve the dual problem:
#Note that the classes are indexed by m & samples are indexed by i.
#min_{\alpha} 0.5 \sum_m ||w_m(\alpha)||^2 + \sum_i \sum_m e^m_i alpha^m_i
#s.t. \alpha^m_i <= C^m_i \forall m,i , \sum_m \alpha^m_i=0 \forall i
#where w_m(\alpha) = \sum_i \alpha^m_i x_i,
#and C^m_i = C if m = y_i,
#C^m_i = 0 if m != y_i.
#This borrows the notation of liblinear.
#\alpha is an (n_support, n_way) matrix
kernel_matrix = computeGramMatrix(support, support)
id_matrix_0 = torch.eye(n_way).expand(tasks_per_batch, n_way, n_way).cuda()
block_kernel_matrix = batched_kronecker(kernel_matrix, id_matrix_0)
#This seems to help avoid PSD error from the QP solver.
block_kernel_matrix += 1.0 * torch.eye(n_way*n_support).expand(tasks_per_batch, n_way*n_support, n_way*n_support).cuda()
support_labels_one_hot = one_hot(support_labels.view(tasks_per_batch * n_support), n_way) # (tasks_per_batch * n_support, n_support)
support_labels_one_hot = support_labels_one_hot.view(tasks_per_batch, n_support, n_way)
support_labels_one_hot = support_labels_one_hot.reshape(tasks_per_batch, n_support * n_way)
G = block_kernel_matrix
e = -1.0 * support_labels_one_hot
#print (G.size())
#This part is for the inequality constraints:
#\alpha^m_i <= C^m_i \forall m,i
#where C^m_i = C if m = y_i,
#C^m_i = 0 if m != y_i.
id_matrix_1 = torch.eye(n_way * n_support).expand(tasks_per_batch, n_way * n_support, n_way * n_support)
C = Variable(id_matrix_1)
h = Variable(C_reg * support_labels_one_hot)
#print (C.size(), h.size())
#This part is for the equality constraints:
#\sum_m \alpha^m_i=0 \forall i
id_matrix_2 = torch.eye(n_support).expand(tasks_per_batch, n_support, n_support).cuda()
A = Variable(batched_kronecker(id_matrix_2, torch.ones(tasks_per_batch, 1, n_way).cuda()))
b = Variable(torch.zeros(tasks_per_batch, n_support))
#print (A.size(), b.size())
if double_precision:
G, e, C, h, A, b = [x.double().cuda() for x in [G, e, C, h, A, b]]
else:
G, e, C, h, A, b = [x.float().cuda() for x in [G, e, C, h, A, b]]
# Solve the following QP to fit SVM:
# \hat z = argmin_z 1/2 z^T G z + e^T z
# subject to Cz <= h
# We use detach() to prevent backpropagation to fixed variables.
qp_sol = QPFunction(verbose=False, maxIter=maxIter)(G, e.detach(), C.detach(), h.detach(), A.detach(), b.detach())
# Compute the classification score.
compatibility = computeGramMatrix(support, query)
compatibility = compatibility.float()
compatibility = compatibility.unsqueeze(3).expand(tasks_per_batch, n_support, n_query, n_way)
qp_sol = qp_sol.reshape(tasks_per_batch, n_support, n_way)
logits = qp_sol.float().unsqueeze(2).expand(tasks_per_batch, n_support, n_query, n_way)
logits = logits * compatibility
logits = torch.sum(logits, 1)
return logits
def MetaOptNetHead_SVM_WW(query, support, support_labels, n_way, n_shot, C_reg=0.00001, double_precision=False):
"""
Fits the support set with multi-class SVM and
returns the classification score on the query set.
This is the multi-class SVM presented in:
Support Vector Machines for Multi Class Pattern Recognition
(Weston and Watkins, ESANN 1999).
Parameters:
query: a (tasks_per_batch, n_query, d) Tensor.
support: a (tasks_per_batch, n_support, d) Tensor.
support_labels: a (tasks_per_batch, n_support) Tensor.
n_way: a scalar. Represents the number of classes in a few-shot classification task.
n_shot: a scalar. Represents the number of support examples given per class.
C_reg: a scalar. Represents the cost parameter C in SVM.
Returns: a (tasks_per_batch, n_query, n_way) Tensor.
"""
"""
Fits the support set with multi-class SVM and
returns the classification score on the query set.
This is the multi-class SVM presented in:
Support Vector Machines for Multi Class Pattern Recognition
(Weston and Watkins, ESANN 1999).
Parameters:
query: a (tasks_per_batch, n_query, d) Tensor.
support: a (tasks_per_batch, n_support, d) Tensor.
support_labels: a (tasks_per_batch, n_support) Tensor.
n_way: a scalar. Represents the number of classes in a few-shot classification task.
n_shot: a scalar. Represents the number of support examples given per class.
C_reg: a scalar. Represents the cost parameter C in SVM.
Returns: a (tasks_per_batch, n_query, n_way) Tensor.
"""
tasks_per_batch = query.size(0)
n_support = support.size(1)
n_query = query.size(1)
assert(query.dim() == 3)
assert(support.dim() == 3)
assert(query.size(0) == support.size(0) and query.size(2) == support.size(2))
assert(n_support == n_way * n_shot) # n_support must equal to n_way * n_shot
#In theory, \alpha is an (n_support, n_way) matrix
#NOTE: In this implementation, we solve for a flattened vector of size (n_way*n_support)
#In order to turn it into a matrix, you must first reshape it into an (n_way, n_support) matrix
#then transpose it, resulting in (n_support, n_way) matrix
kernel_matrix = computeGramMatrix(support, support) + torch.ones(tasks_per_batch, n_support, n_support).cuda()
id_matrix_0 = torch.eye(n_way).expand(tasks_per_batch, n_way, n_way).cuda()
block_kernel_matrix = batched_kronecker(id_matrix_0, kernel_matrix)
kernel_matrix_mask_x = support_labels.reshape(tasks_per_batch, n_support, 1).expand(tasks_per_batch, n_support, n_support)
kernel_matrix_mask_y = support_labels.reshape(tasks_per_batch, 1, n_support).expand(tasks_per_batch, n_support, n_support)
kernel_matrix_mask = (kernel_matrix_mask_x == kernel_matrix_mask_y).float()
block_kernel_matrix_inter = kernel_matrix_mask * kernel_matrix
block_kernel_matrix += block_kernel_matrix_inter.repeat(1, n_way, n_way)
kernel_matrix_mask_second_term = support_labels.reshape(tasks_per_batch, n_support, 1).expand(tasks_per_batch, n_support, n_support * n_way)
kernel_matrix_mask_second_term = kernel_matrix_mask_second_term == torch.arange(n_way).long().repeat(n_support).reshape(n_support, n_way).transpose(1, 0).reshape(1, -1).repeat(n_support, 1).cuda()
kernel_matrix_mask_second_term = kernel_matrix_mask_second_term.float()
block_kernel_matrix -= (2.0 - 1e-4) * (kernel_matrix_mask_second_term * kernel_matrix.repeat(1, 1, n_way)).repeat(1, n_way, 1)
Y_support = one_hot(support_labels.view(tasks_per_batch * n_support), n_way)
Y_support = Y_support.view(tasks_per_batch, n_support, n_way)
Y_support = Y_support.transpose(1, 2) # (tasks_per_batch, n_way, n_support)
Y_support = Y_support.reshape(tasks_per_batch, n_way * n_support)
G = block_kernel_matrix
e = -2.0 * torch.ones(tasks_per_batch, n_way * n_support)
id_matrix = torch.eye(n_way * n_support).expand(tasks_per_batch, n_way * n_support, n_way * n_support)
C_mat = C_reg * torch.ones(tasks_per_batch, n_way * n_support).cuda() - C_reg * Y_support
C = Variable(torch.cat((id_matrix, -id_matrix), 1))
#C = Variable(torch.cat((id_matrix_masked, -id_matrix_masked), 1))
zer = torch.zeros(tasks_per_batch, n_way * n_support).cuda()
h = Variable(torch.cat((C_mat, zer), 1))
dummy = Variable(torch.Tensor()).cuda() # We want to ignore the equality constraint.
if double_precision:
G, e, C, h = [x.double().cuda() for x in [G, e, C, h]]
else:
G, e, C, h = [x.cuda() for x in [G, e, C, h]]
# Solve the following QP to fit SVM:
# \hat z = argmin_z 1/2 z^T G z + e^T z
# subject to Cz <= h
# We use detach() to prevent backpropagation to fixed variables.
#qp_sol = QPFunction(verbose=False)(G, e.detach(), C.detach(), h.detach(), dummy.detach(), dummy.detach())
qp_sol = QPFunction(verbose=False)(G, e, C, h, dummy.detach(), dummy.detach())
# Compute the classification score.
compatibility = computeGramMatrix(support, query) + torch.ones(tasks_per_batch, n_support, n_query).cuda()
compatibility = compatibility.float()
compatibility = compatibility.unsqueeze(1).expand(tasks_per_batch, n_way, n_support, n_query)
qp_sol = qp_sol.float()
qp_sol = qp_sol.reshape(tasks_per_batch, n_way, n_support)
A_i = torch.sum(qp_sol, 1) # (tasks_per_batch, n_support)
A_i = A_i.unsqueeze(1).expand(tasks_per_batch, n_way, n_support)
qp_sol = qp_sol.float().unsqueeze(3).expand(tasks_per_batch, n_way, n_support, n_query)
Y_support_reshaped = Y_support.reshape(tasks_per_batch, n_way, n_support)
Y_support_reshaped = A_i * Y_support_reshaped
Y_support_reshaped = Y_support_reshaped.unsqueeze(3).expand(tasks_per_batch, n_way, n_support, n_query)
logits = (Y_support_reshaped - qp_sol) * compatibility
logits = torch.sum(logits, 2)
return logits.transpose(1, 2)
class ClassificationHead(nn.Module):
def __init__(self, base_learner='MetaOptNet', enable_scale=True):
super(ClassificationHead, self).__init__()
self.base_learner = base_learner
if ('SVM-CS' in base_learner):
self.head = MetaOptNetHead_SVM_CS
elif ('Ridge' in base_learner):
self.head = MetaOptNetHead_Ridge
elif ('R2D2' in base_learner):
self.head = R2D2Head
elif ('Proto' in base_learner):
self.head = ProtoNetHead
elif ('SVM-He' in base_learner):
self.head = MetaOptNetHead_SVM_He
elif ('SVM-WW' in base_learner):
self.head = MetaOptNetHead_SVM_WW
else:
print ("Cannot recognize the base learner type")
assert(False)
# Add a learnable scale
self.enable_scale = enable_scale
self.scale = nn.Parameter(torch.FloatTensor([1.0]))
def forward(self, query, support, support_labels, n_way, n_shot, **kwargs):
if self.enable_scale:
return self.scale * self.head(query, support, support_labels, n_way, n_shot, **kwargs)
elif self.enable_scale:
return self.scale*self.head(query, support, support_labels, n_way, n_shot, **kwargs)
'''
def forward(self, query, support, support_labels, n_way, n_shot, return_params=False, **kwargs):
if self.enable_scale and not return_params:
return self.scale * self.head(query, support, support_labels, n_way, n_shot, **kwargs)
elif self.enable_scale and return_params:
output, params = self.head(query, support, support_labels, n_way, n_shot, return_params=True, **kwargs)
return self.scale*output, params
elif not self.enable_scale and not return_parameters:
return self.head(query, support, support_labels, n_way, n_shot, **kwargs)
elif not self.enable_scale and return_params:
output, params = self.head(query, support, support_labels, n_way, n_shot, return_params=True, **kwargs)
return ouput, params
'''
|
"""Implementation of Adversarial Attack
1. Fast Gradient Method
2. Optimization Method
"""
import os
import tensorflow as tf
import numpy as np
import utils.data_prepare as data
import utils.CNN as CNN
import IPython
from tensorflow.python import debug as tf_debug
import h5py as h5
from skimage import io, color
import matplotlib.pyplot as plt
from matplotlib import animation
SEP = os.path.sep
ckpt_filepath = 'data' + SEP + 'checkpoint' + SEP + "save-1000"
def preprocess(X):
n = X.shape[0]
X_hsv = np.zeros(X.shape)
for i_ in range(n):
im = X[i_]
im = color.rgb2hsv(im)
X_hsv[i_] = im
return np.concatenate((X,X_hsv), axis=-1)/255.0
#@fgm_with_plot
def fgm_demo(x_input, y, is_targetd=True, alpha=1, iteration=1, save_path = None, sign=True, ):
ckpt_filepath = 'data' + SEP + 'checkpoint' + SEP + "save-1000"
[x_ph, d_ph, loss_op, d_predict_, train_op_] = CNN.create_fcn(False)
y_target = y*int(is_targetd)
loss_op = tf.losses.mean_squared_error(y_target, d_predict_)
dy_dx = tf.gradients(loss_op, x_ph)
dy_dx = dy_dx[0]
if is_targetd:
x_adv = x_ph - alpha*tf.sign(dy_dx)
else:
x_adv = x_ph + alpha*tf.sign(dy_dx)
x_adv = tf.clip_by_value(x_adv, 0, 1)
saver = tf.train.Saver()
sess = tf.InteractiveSession() # 如过使用 with tf.session as sess , 会出现 checkpoint failed的错误
saver.restore(sess, ckpt_filepath)
xadv = x_input
if not os.path.exists(save_path):
print("Make Dir: ", save_path)
os.mkdir(save_path)
y_pred = sess.run([d_predict_],{x_ph:x_input} )
for i in range(iteration):
[xadv, dydx ] = sess.run([x_adv,dy_dx] , feed_dict = {x_ph: xadv, d_ph:y_target})
#xadv = xadv[0]
print("iteration times:", i+1)
# prediction results of y with adversarial examples
y_adv = sess.run([d_predict_], {x_ph:xadv})
if save_path != None:
if i< 5 or (i+1)%5 == 0:
i_ = 0
plt.figure()
plt.subplot(231)
plt.axis('off')
plt.imshow(x_input[i_,:,:,0:3])
plt.title("Input")
plt.subplot(232)
plt.axis('off')
plt.imshow(xadv[i_,:,:,0:3])
plt.title("Perturbated")
plt.subplot(233)
plt.axis('off')
#dydx_adjustd = dydx + 1
plt.imshow(dydx[i_,:,:,0])
plt.title("Perturbation")
plt.subplot(234)
plt.axis('off')
plt.imshow(y[i_,:,:,0],cmap="gray")
plt.title("Pseudo Depth")
plt.subplot(235)
plt.axis('off')
plt.imshow(y_pred[i_][0,:,:,0], cmap='gray')
plt.title("Expected Depth")
plt.subplot(236)
plt.imshow(y_adv[0][i_,:,:,0], cmap='gray')
plt.title("Adversarial Depth")
plt.axis('off')
#plt.pause(0.5)
target = "Target"
im_name = "{}_Alpha_{}_It_{}.jpg".format("Target" if is_targetd else "NonTarget", alpha, i )
plt_savedir = os.path.join(save_path,im_name)
plt.savefig(plt_savedir)
plt.close()
sess.close()
return xadv, y_adv
def main(idx):
# load session
image_data_path = r"D:\Workspace\Projects\Adversarial Attack\Adversarial Attack\data\CASIA_depth.mat"
print(image_data_path)
mat = h5.File(image_data_path, 'r')
X, D, LBL = data.load_h5_data(mat, 'TRAIN', 10000)
mat.close()
#X = np.random.rand(2,256,256,6)
#D = np.random.rand(2,32,32,1)
indexs_of_spoof = np.argwhere( LBL == 0)[:,0:1]
index = indexs_of_spoof[idx,0]
X = X[index:index+1]
D = D[index:index+1]
X = preprocess(X)
print("Data loaded: {}".format(X.shape[0]))
graph1 = tf.Graph()
root = "D:\\Workspace\\Projects\\Adversarial Attack\\Adversarial Attack\\data\\checkpoint"
ckpt_filepath = os.path.join(root, 'save-1000')
with graph1.as_default():
is_targetd = True
iteration = 200
#alpha = 1
for is_targetd in [True, False]:
for alpha in [10,1,0.1,0.01,0.001,0.0001]:
xadv, pred_adv= fgm(X, D, is_targetd, alpha, iteration, "FGM_"+ str(idx) ) # Non-targeted attack
pred_adv = pred_adv[0]
print(xadv.shape)
print(X.shape)
"""
i_ = 0
plt.figure()
plt.subplot(2, 2, 1)
plt.axis('off')
plt.imshow(X[i_,:,:,0:3])
plt.title("original")
plt.subplot(2, 2, 2)
plt.axis('off')
plt.imshow(D[i_,:,:,0],cmap="gray")
plt.title("Label Depth")
plt.subplot(2, 2, 3)
plt.axis('off')
plt.imshow(depth_predict[i_,:,:,0], cmap='gray')
plt.title("predict of original")
plt.subplot(2, 2, 4)
plt.imshow(pred_adv[i_,:,:,0], cmap='gray')
plt.title("predict of the adversarial example")
plt.axis('off')
plt.show()
#input("press any keys to show the next result")
"""
def fgm_demo(x_input, y, is_targetd=True, alpha=1, iteration=1, save_path = None, sign=True, ):
ckpt_filepath = 'data' + SEP + 'checkpoint' + SEP + "save-1000"
[x_ph, d_ph, loss_op, d_predict_, train_op_] = CNN.create_fcn(False)
y_target = y*int(is_targetd)
loss_op = tf.losses.mean_squared_error(y_target, d_predict_)
dy_dx = tf.gradients(loss_op, x_ph)
dy_dx = dy_dx[0]
if is_targetd:
x_adv = x_ph - alpha*tf.sign(dy_dx)
else:
x_adv = x_ph + alpha*tf.sign(dy_dx)
x_adv = tf.clip_by_value(x_adv, 0, 1)
saver = tf.train.Saver()
sess = tf.InteractiveSession() # 如过使用 with tf.session as sess , 会出现 checkpoint failed的错误
saver.restore(sess, ckpt_filepath)
xadv = x_input
if not os.path.exists(save_path):
print("Make Dir: ", save_path)
os.mkdir(save_path)
y_pred = sess.run([d_predict_],{x_ph:x_input} )
for i in range(iteration):
[xadv, dydx ] = sess.run([x_adv,dy_dx] , feed_dict = {x_ph: xadv, d_ph:y_target})
#xadv = xadv[0]
print("iteration times:", i+1)
# prediction results of y with adversarial examples
y_adv = sess.run([d_predict_], {x_ph:xadv})
if save_path != None:
if i< 5 or (i+1)%5 == 0:
i_ = 0
plt.figure()
plt.subplot(231)
plt.axis('off')
plt.imshow(x_input[i_,:,:,0:3])
plt.title("Input")
plt.subplot(232)
plt.axis('off')
plt.imshow(xadv[i_,:,:,0:3])
plt.title("Perturbated")
plt.subplot(233)
plt.axis('off')
#dydx_adjustd = dydx + 1
plt.imshow(dydx[i_,:,:,0])
plt.title("Perturbation")
plt.subplot(234)
plt.axis('off')
plt.imshow(y[i_,:,:,0],cmap="gray")
plt.title("Pseudo Depth")
plt.subplot(235)
plt.axis('off')
plt.imshow(y_pred[i_][0,:,:,0], cmap='gray')
plt.title("Expected Depth")
plt.subplot(236)
plt.imshow(y_adv[0][i_,:,:,0], cmap='gray')
plt.title("Adversarial Depth")
plt.axis('off')
#plt.pause(0.5)
target = "Target"
im_name = "{}_Alpha_{}_It_{}.jpg".format("Target" if is_targetd else "NonTarget", alpha, i )
plt_savedir = os.path.join(save_path,im_name)
plt.savefig(plt_savedir)
plt.close()
sess.close()
return xadv, y_adv
def main(idx):
# load session
image_data_path = r"D:\Workspace\Projects\Adversarial Attack\Adversarial Attack\data\CASIA_depth.mat"
print(image_data_path)
mat = h5.File(image_data_path, 'r')
X, D, LBL = data.load_h5_data(mat, 'TRAIN', 10000)
mat.close()
#X = np.random.rand(2,256,256,6)
#D = np.random.rand(2,32,32,1)
indexs_of_spoof = np.argwhere( LBL == 0)[:,0:1]
index = indexs_of_spoof[idx,0]
X = X[index:index+1]
D = D[index:index+1]
X = preprocess(X)
print("Data loaded: {}".format(X.shape[0]))
graph1 = tf.Graph()
root = "D:\\Workspace\\Projects\\Adversarial Attack\\Adversarial Attack\\data\\checkpoint"
ckpt_filepath = os.path.join(root, 'save-1000')
with graph1.as_default():
is_targetd = True
iteration = 200
#alpha = 1
for is_targetd in [True, False]:
for alpha in [10,1,0.1,0.01,0.001,0.0001]:
xadv, pred_adv= fgm(X, D, is_targetd, alpha, iteration, "FGM_"+ str(idx) ) # Non-targeted attack
pred_adv = pred_adv[0]
print(xadv.shape)
print(X.shape)
if __name__ == '__main__':
for idx in range(0,9):
main(idx)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
from exa import DataFrame
import numpy as np
#import pandas as pd
class Gradient(DataFrame):
"""
The gradient dataframe
"""
# simple function that will have to be seen if it can have any other functions
_index = 'gradient'
_columns = ['Z', 'atom', 'fx', 'fy', 'fz', 'symbol', 'frame']
_categories = {'frame': np.int64, 'atom': np.int64, 'symbol': str}
|
# https://leetcode.com/problems/find-first-and-last-position-of-element-in-sorted-array/
# Related Topics: Array, Binary Search
# Difficulty: Medium
# Initial thoughts:
# A naive approach is to look at every element of nums to find the beginning
# and end of target. This approach's Time complexity is O(n).
# To reduce the Time complexity to O(log n) we are going to use binary search.
# Let's say we find target at nums[i]. If nums[i-1] === nums[i] we are going to
# binary search againt for target between 0 and i and repeat this process until
# we find an index k where either k === 0 or nums[k-1] < nums[k].
# The same goes for the right bound of our range. If nums[i+1] === nums[i]
# we are going to binary search for target between i and nums.length-1 until we
# find an index k where either k === nums.length-1 or nums[k] < nums[k+1].
# This will be a multitude of log ns at most.
# Time complexity: O(log n) where n === nums.length
# Space complexity: O(log n) because of the recursive binary search
from typing import List
import bisect
# Note: Everything after searchRange2 is old and not optimal
# I realize that I've learned some lessons over the past couple of years
class Solution:
# Time complexity: O(log n) where n is the length of nums
# Space complexity: O(1)
# Implementing my own bisect_left/bisect_right functions
def searchRange(self, nums: List[int], target: int) -> List[int]:
def bisect_left(arr, target):
left, right = 0, len(arr)
while left < right:
mid = left + (right - left) // 2
if target > arr[mid]:
left = mid+1
else:
right = mid
return left
def bisect_right(arr, target):
left, right = 0, len(arr)
while left < right:
mid = left + (right - left) // 2
if target < arr[mid]:
right = mid
else:
left = mid+1
return left
if len(nums) == 0: return [-1, -1]
left = bisect_left(nums, target)
if left >= len(nums) or nums[left] != target:
return [-1, -1]
right = bisect_right(nums, target)
return [left, right-1]
# Time complexity: O(log n) where n is the length of nums
# Space compleixty: O(1)
# Using pythons built-in bisect module
def searchRange2(self, nums: List[int], target: int) -> List[int]:
if len(nums) == 0: return [-1, -1]
left = bisect.bisect_left(nums, target)
if left >= len(nums) or nums[left] != target:
return [-1, -1]
right = bisect.bisect_right(nums, target)
return [left, right-1]
def searchRange3(self, nums: List[int], target: int) -> List[int]:
def BinarySearch(nums: List[int], target: int, low: int, high: int) -> int:
if low > high:
return -1
mid = low + (high-low)//2
if nums[mid] == target:
return mid
elif nums[mid] > target:
return BinarySearch(nums, target, low, mid-1)
else:
return BinarySearch(nums, target, mid+1, high)
index = BinarySearch(nums, target, 0, len(nums)-1)
if index == -1:
return [-1, -1, ]
# Find left bound
left = index
while left > 0 and nums[left] == nums[left-1]:
left = BinarySearch(nums, target, 0, left-1)
# Find right bound
right = index
while right < len(nums)-1 and nums[right] == nums[right+1]:
right = BinarySearch(nums, target, right+1, len(nums))
return [left, right]
# Optimization:
# Using a iterative binary search we can render the space complexity constant
# although this will arguably have a less readable code
# Time Complexity: O(log n)
# Space Complexity: O(1)
def searchRange4(self, nums: List[int], target: int) -> List[int]:
def BinarySearchIterative(nums: List[int], target: int, low: int, high: int) -> int:
while low <= high:
mid = low + (high-low)//2
if nums[mid] == target:
return mid
elif nums[mid] > target:
high = mid-1
else:
low = mid+1
return -1
index = BinarySearchIterative(nums, target, 0, len(nums))
if index == -1:
return [-1, -1]
# Find left bound
left = index
while left > 0 and nums[left] == nums[left-1]:
left = BinarySearchIterative(nums, target, 0, left-1)
# Find right bound
right = index
while right < len(nums)-1 and nums[right] == nums[right+1]:
right = BinarySearchIterative(nums, target, right+1, len(nums)-1)
return [left, right]
|
from flask import Flask
application = Flask(__name__)
application.config.from_object('config')
from app import views
|
import json
class Bch_sim:
def List_Terminals(self, file_name):
with open(file_name) as ofile:
self.terminals = json.loads(ofile.read())
sys = Bch_sim()
sys.List_Terminals("terminals.json")
print(sys.terminals) |
while True:
try:
s = input()
except:
break
def cal(n, k):
if n == 1:
return k
return cal(3*n+1, k+1) if n % 2 else cal(n/2, k+1)
[i, j] = list(map(int, s.split()))
print(i, j, end=' ')
if i > j:
i, j = j, i
data = list()
for x in range(i, j+1):
data.append(cal(x, 1))
print(max(data))
|
# mqtt_log.py Demo/test program for MicroPython asyncio low power operation
# Author: Peter Hinch
# Copyright Peter Hinch 2019 Released under the MIT license
# MQTT Demo publishes an incremental count and the RTC time periodically.
# On my SF_2W board consumption while paused was 170μA.
# Test reception e.g. with:
# mosquitto_sub -h 192.168.0.10 -t result
import rtc_time_cfg
rtc_time_cfg.enabled = True
from pyb import LED, RTC
from umqtt.simple import MQTTClient
import network
import ujson
from local import SERVER, SSID, PW # Local configuration: change this file
import uasyncio as asyncio
try:
if asyncio.version[0] != 'fast_io':
raise AttributeError
except AttributeError:
raise OSError('This requires fast_io fork of uasyncio.')
from rtc_time import Latency
def publish(s):
c = MQTTClient('umqtt_client', SERVER)
c.connect()
c.publish(b'result', s.encode('UTF8'))
c.disconnect()
async def main(loop):
rtc = RTC()
red = LED(1)
red.on()
grn = LED(2)
sta_if = network.WLAN()
sta_if.active(True)
sta_if.connect(SSID, PW)
while sta_if.status() in (1, 2): # https://github.com/micropython/micropython/issues/4682
await asyncio.sleep(1)
grn.toggle()
if sta_if.isconnected():
red.off()
grn.on()
await asyncio.sleep(1) # 1s of green == success.
grn.off() # Conserve power
Latency(2000)
count = 0
while True:
publish(ujson.dumps([count, rtc.datetime()]))
count += 1
await asyncio.sleep(120) # 2 mins
else: # Fail to connect
red.on()
grn.off()
loop = asyncio.get_event_loop()
loop.run_until_complete(main(loop))
|
# -*- coding=utf-8 -*-
'''
有两个容量分别为 x升 和 y升 的水壶以及无限多的水。请判断能否通过使用这两个水壶,从而可以得到恰好 z升 的水?
如果可以,最后请用以上水壶中的一或两个来盛放取得的 z升 水。
你允许:
装满任意一个水壶
清空任意一个水壶
从一个水壶向另外一个水壶倒水,直到装满或者倒空
示例1: (From the famous "Die Hard" example)
输入: x = 3, y = 5, z = 4
输出: True
'''
import copy
class Solution(object):
def canMeasureWater(self, x, y, z):
"""
:type x: int
:type y: int
:type z: int
:rtype: bool
"""
situation = [[[0,0]]]
Allsituation = [[0,0]]
while True:
ThisSituation = []
for i in situation[-1]:
a = self.EmptyA(i)
b = self.EmptyB(i)
c = self.FullA(x,i)
d = self.FullB(y,i)
e = self.PullAtoB(x,y,i)
f = self.PullBtoA(x,y,i)
if a not in Allsituation:
Allsituation.append(a)
ThisSituation.append(a)
if b not in Allsituation:
Allsituation.append(b)
ThisSituation.append(b)
if c not in Allsituation:
Allsituation.append(c)
ThisSituation.append(c)
if d not in Allsituation:
Allsituation.append(d)
ThisSituation.append(d)
if e not in Allsituation:
Allsituation.append(e)
ThisSituation.append(e)
if f not in Allsituation:
Allsituation.append(f)
ThisSituation.append(f)
situation.append(ThisSituation)
if ThisSituation==[]:
break
AllNumber=[]
for k in Allsituation:
if k[0] not in AllNumber:
AllNumber.append(k[0])
if k[1] not in AllNumber:
AllNumber.append(k[1])
if (k[0]+k[1]) not in AllNumber:
AllNumber.append((k[0]+k[1]))
for m in AllNumber:
if m==z:
return True
return False
def EmptyA(self,situationNow):
Nowsituation=copy.deepcopy(situationNow)
Nowsituation[0] = 0
return Nowsituation
def EmptyB(self,situationNow):
Nowsituation = copy.deepcopy(situationNow)
Nowsituation[1] = 0
return Nowsituation
def FullA(self,x,situationNow):
Nowsituation = copy.deepcopy(situationNow)
Nowsituation[0]=x
return Nowsituation
def FullB(self,y,situationNow):
Nowsituation = copy.deepcopy(situationNow)
Nowsituation[1]=y
return Nowsituation
def PullAtoB(self,x,y,situationNow):
Nowsituation = copy.deepcopy(situationNow)
AllWatre=Nowsituation[0]+Nowsituation[1]
Nowsituation[0]=0
if AllWatre>=y:
Nowsituation[0]=AllWatre-y
Nowsituation[1]=y
else:
Nowsituation[1]=AllWatre
return Nowsituation
def PullBtoA(self,x,y,situationNow):
Nowsituation = copy.deepcopy(situationNow)
AllWatre = Nowsituation[0] + Nowsituation[1]
Nowsituation[1] = 0
if AllWatre >= x:
Nowsituation[0] = x
Nowsituation[1] = AllWatre - x
else:
Nowsituation[0] = AllWatre
return Nowsituation
if __name__== "__main__":
My=Solution()
print My.canMeasureWater(1,2,4)
|
from trame.internal import (
change, Controller, flush_state, get_cli_parser, get_state, get_version,
is_dirty, is_dirty_all, port, start, State, stop, trigger, update_state
)
from trame.layouts import update_layout
__version__ = get_version()
state = State()
"""This object provides pythonic access to the state
For instance, these getters are the same:
>>> field, = get_state("field")
>>> field = state.field
As are these setters:
>>> update_state("field", value)
>>> state.field = value
``get_state()`` should be used instead if more than one argument is to be
passed, and ``update_state()`` should be used instead to specify additional
arguments (e.g. ``force=True``).
The state may also be accessed and updated similar to dictionaries:
>>> value = state["field"]
>>> state["field"] = value
>>> state.update({"field": value})
This object may be imported via
>>> from trame import state
"""
controller = Controller()
"""The controller is a container for function proxies
The function proxies may be used as callbacks even though the function has
not yet been defined. The function may also be re-defined. For example:
>>> from trame import controller as ctrl
>>> layout = SinglePage("Controller test")
>>> with layout.toolbar:
... vuetify.VSpacer()
... vuetify.VBtn("Click Me", click=ctrl.on_click) # not yet defined
>>> ctrl.on_click = lambda: print("Hello, Trame!") # on_click is now defined
This can be very useful for large projects where the functions may be defined
in separate files after the UI has been constructed, or for re-defining
callbacks when conditions in the application change.
"""
__all__ = [
# Order these how we want them to show up in the docs
# Server-related
"start",
"stop",
"port",
# State-related
"state",
"update_state",
"get_state",
"flush_state",
"is_dirty",
"is_dirty_all",
"change",
# Trigger-related
"trigger",
"controller",
# Layout-related
"update_layout",
# CLI-related
"get_cli_parser",
# These are not exposed in the docs
"__version__",
]
|
# ----------------------------- #
# Collection of functions I use #
# ----------------------------- #
import numpy as np
from numba import njit
# @njit
def gini(x, w=None):
'''Compute the gini coefficient for array x'''
# The rest of the code requires numpy arrays.
x = np.asarray(x)
if w is not None:
w = np.asarray(w)
sorted_indices = np.argsort(x)
sorted_x = x[sorted_indices]
sorted_w = w[sorted_indices]
# Force float dtype to avoid overflows
cumw = np.cumsum(sorted_w, dtype=float)
cumxw = np.cumsum(sorted_x * sorted_w, dtype=float)
return (np.sum(cumxw[1:] * cumw[:-1] - cumxw[:-1] * cumw[1:]) /
(cumxw[-1] * cumw[-1]))
else:
sorted_x = np.sort(x)
n = len(x)
cumx = np.cumsum(sorted_x, dtype=float)
# The above formula, with all weights equal to 1 simplifies to:
return (n + 1 - 2 * np.sum(cumx) / cumx[-1]) / n
def Xi(m,h_underbar):
'''Function that shows relative gain in toy model'''
# Set fixed parameters
alpha = 0.8
beta = .9
p = 1.0
r = 0.03
dp = 1
# Numerator
num = (1 - alpha)*(m/p) - alpha*h_underbar
# Denominator
denom = m*(1 - alpha/(alpha+beta) - r*(1-alpha)) - p*h_underbar*(alpha + alpha/(alpha+beta))
return dp*num / denom
|
from typing import Union, List
class InputExample:
"""
Structure for one input example with texts, the label and a unique id
"""
def __init__(self, guid: str, texts: List[str], label: Union[int, float]):
"""
Creates one InputExample with the given texts, guid and label
str.strip() is called on both texts.
:param guid
id for the example
:param texts
the texts for the example
:param label
the label for the example
"""
self.guid = guid
self.texts = [text.strip() for text in texts]
self.label = label
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
This modules exposes geometry data for Unites States. It exposes a dictionary 'data' which is
indexed by the two letter state code (e.g., 'CA', 'TX') and has the following dictionary as the
associated value:
data['CA']['name']
data['CA']['region']
data['CA']['lats']
data['CA']['lons']
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import codecs
import csv
import gzip
import xml.etree.ElementTree as et
# Bokeh imports
from ..util.sampledata import package_path
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'data',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _read_data():
'''
'''
nan = float('NaN')
data = {}
with gzip.open(package_path('US_Regions_State_Boundaries.csv.gz')) as f:
decoded = codecs.iterdecode(f, "utf-8")
next(decoded)
reader = csv.reader(decoded, delimiter=",", quotechar='"')
for row in reader:
region, name, code, geometry, dummy = row
xml = et.fromstring(geometry)
lats = []
lons = []
for i, poly in enumerate(xml.findall('.//outerBoundaryIs/LinearRing/coordinates')):
if i > 0:
lats.append(nan)
lons.append(nan)
coords = (c.split(',')[:2] for c in poly.text.split())
lat, lon = list(zip(*[(float(lat), float(lon)) for lon, lat in
coords]))
lats.extend(lat)
lons.extend(lon)
data[code] = {
'name' : name,
'region' : region,
'lats' : lats,
'lons' : lons,
}
return data
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
data = _read_data()
|
from spacy.lang.ja import Japanese
from spacy.tokens import Token
nlp = Japanese()
# デフォルト値がFalseである拡張属性「is_country」をトークンに追加
____.____(____, ____=____)
# テキストを処理し、「スペイン」のトークンについてis_country属性をTrueにする
doc = nlp("私はスペインに住んでいます。")
____ = True
# すべてのトークンについて、文字列とis_country属性を表示
print([(____, ____) for token in doc])
|
display(bikeshare[(bikeshare['temp']>30) & (bikeshare['atemp']<10)])
|
"""WSGI config for grpc_python_example.apis.http.
Exposes the WSGI callable as a module-level variable named `app`.
"""
import os
from grpc_python_example.apis.http import create_app
# pylint: disable=invalid-name
env = os.environ.get('ENV', 'development')
app = create_app('grpc_python_example.apis.http.settings.%sConfig' % env.capitalize())
if __name__ == '__main__':
app.run()
|
import csv
from datetime import datetime
import json
import os
import pickle
import psutil
import random
import re
import socket
import subprocess
from job import Job
from job_table import JobTable
from policies import allox, fifo, finish_time_fairness, gandiva, isolated, \
max_min_fairness, max_min_fairness_water_filling, max_sum_throughput, \
min_total_duration
def _generate_scale_factor(rng):
# Sample the scale factor from the Philly distribution.
scale_factor = 1
r = rng.uniform(0, 1)
if 0.7 <= r <= 0.8:
scale_factor = 2
elif 0.8 <= r <= 0.95:
scale_factor = 4
elif 0.95 <= r:
scale_factor = 8
return scale_factor
def _generate_duration(rng):
# Sample the job duration from the Philly distribution.
if rng.random() >= 0.8:
run_time = 60 * (10 ** rng.uniform(3, 4))
else:
run_time = 60 * (10 ** rng.uniform(1.5, 3))
return run_time
def generate_job(throughputs, reference_worker_type='v100', rng=None,
job_id=None, fixed_job_duration=None,
generate_multi_gpu_jobs=False,
generate_multi_priority_jobs=False, run_dir=None,
scale_factor_generator_func=_generate_scale_factor,
duration_generator_func=_generate_duration,
scale_factor_rng=None, duration_rng=None, SLO_rng=None,
always_generate_scale_factor=True):
"""Generates a new job.
Args:
throughputs: A dict containing pre-measured throughputs.
reference_worker_type: The worker type to use when calculating steps.
rng: A random number generator for selecting job parameters.
job_id: The job's ID.
fixed_job_duration: If set, fixes the duration to the specified value.
generate_multi_gpu_jobs: If set, generate a scale factor >= 1.
generate_multi_priority_jobs: If set, generate a priority >= 1.
run_dir: The directory to run the job from.
scale_factor_generator_func: A function that accepts an RNG parameter
and returns a job size.
duration_generator_func: A function that accepts an RNG parameter and
returns a job duration in seconds.
scale_factor_rng: A random number generator specifically for
generating scale factors.
duration_rng: A random number generator specifically for generating
durations.
SLO_rng: If set, generate an SLO >= 1 using this RNG.
always_generate_scale_factor: If set, generate a scale factor
regardless of whether user has
requested multi-GPU jobs.
Returns:
The generated Job.
"""
if rng is None:
rng = random.Random()
if scale_factor_rng is None:
scale_factor_rng = rng
if duration_rng is None:
duration_rng = rng
job_template = None
if always_generate_scale_factor:
scale_factor = scale_factor_generator_func(scale_factor_rng)
else:
# NOTE: We select the job template here to maintain backwards
# compatability with scripts/utils/generate_trace.py
job_template = rng.choice(JobTable)
if generate_multi_gpu_jobs and job_template.distributed:
scale_factor = scale_factor_generator_func(scale_factor_rng)
else:
scale_factor = 1
if fixed_job_duration:
run_time = fixed_job_duration
else:
run_time = duration_generator_func(duration_rng)
if not generate_multi_gpu_jobs:
scale_factor = 1
assert(run_time > 0)
assert(scale_factor >= 1 and scale_factor <= 8)
# Sample the job type.
if job_template is None:
while True:
job_template = rng.choice(JobTable)
if (scale_factor == 1 or
(scale_factor > 1 and job_template.distributed)):
break
job_type = job_template.model
# Complete the job command with the run directory.
command = job_template.command
if run_dir is not None:
if job_template.needs_data_dir:
command = command % (run_dir, run_dir)
else:
command = command % (run_dir)
# Compute the number of steps the job will run for given its duration.
key = (job_type, scale_factor)
assert(key in throughputs[reference_worker_type])
num_steps = run_time * throughputs[reference_worker_type][key]['null']
assert(num_steps > 0)
# Optionally assign a priority to the job.
priority_weight = 1.0
if generate_multi_priority_jobs:
r = rng.uniform(0, 1)
if 0.0 <= r <= 0.2:
priority_weight = 5.0
# Optionally assign an SLO to the job.
SLO = None
if SLO_rng is not None:
r = SLO_rng.uniform(0, 1)
if 0.0 <= r < 0.33:
SLO = 1.2
elif 0.33 <= r < 0.67:
SLO = 2.0
else:
SLO = 10.0
job = Job(job_id=job_id,
job_type=job_type,
command=command,
working_directory=job_template.working_directory,
num_steps_arg=job_template.num_steps_arg,
total_steps=num_steps,
duration=run_time,
scale_factor=scale_factor,
priority_weight=priority_weight,
SLO=SLO,
needs_data_dir=job_template.needs_data_dir)
return job
def load_philly_job_distribution():
with open('philly_job_distribution.pickle', 'rb') as f:
return pickle.load(f)
def get_ip_address():
hostname = socket.gethostname()
ip_address = socket.gethostbyname(hostname)
return ip_address
def get_num_gpus():
command = 'nvidia-smi -L'
output = subprocess.run(command, stdout=subprocess.PIPE, check=True,
shell=True).stdout.decode('utf-8').strip()
return len(output.split('\n'))
def get_pid_for_job(command):
pids = []
for proc in psutil.process_iter():
cmdline = ' '.join(proc.cmdline())
if cmdline == command:
pids.append(proc.pid)
return min(pids)
def get_gpu_processes():
output = subprocess.check_output('nvidia-smi').decode('utf-8')
gpu_processes = {}
processes_flag = False
for line in output.split('\n'):
if 'Processes' in line:
processes_flag = True
continue
if processes_flag:
res = re.search('(\d+) +(\d+) +(\w+) +(.+) +(\d+)MiB', line)
if res is not None:
gpu_id = int(res.group(1))
if gpu_id not in gpu_processes:
gpu_processes[gpu_id] = []
pid = int(res.group(2))
process_name = res.group(4)
if process_name != 'nvidia-cuda-mps-server':
gpu_processes[gpu_id].append(pid)
return gpu_processes
def get_available_policies():
return ['allox',
'fifo', 'fifo_perf', 'fifo_packed',
'finish_time_fairness',
'finish_time_fairness_perf',
'finish_time_fairness_packed',
'gandiva',
'isolated',
'max_min_fairness',
'max_min_fairness_perf',
'max_min_fairness_packed',
'max_min_fairness_water_filling',
'max_min_fairness_water_filling_perf',
'max_min_fairness_water_filling_packed',
'max_sum_throughput_perf',
'max_sum_throughput_normalized_by_cost_perf',
'max_sum_throughput_normalized_by_cost_perf_SLOs',
'max_sum_throughput_normalized_by_cost_packed_SLOs',
'min_total_duration',
'min_total_duration_perf',
'min_total_duration_packed',
]
def read_per_instance_type_spot_prices_aws(directory):
# TODO: Make this flexible.
directory = os.path.join(directory, 'us-east-1')
per_instance_type_spot_prices = {}
for filename in os.listdir(directory):
full_filepath = os.path.join(directory, filename)
with open(full_filepath, 'r') as f:
json_obj = json.load(f)
for x in json_obj['SpotPriceHistory']:
instance_type = x['InstanceType']
if instance_type not in per_instance_type_spot_prices:
per_instance_type_spot_prices[instance_type] = []
per_instance_type_spot_prices[instance_type].append(x)
return per_instance_type_spot_prices
def read_per_instance_type_spot_prices_azure(directory):
per_instance_type_spot_prices = {}
for filename in os.listdir(directory):
full_filepath = os.path.join(directory, filename)
with open(full_filepath, 'r') as f:
zone = filename.replace(".csv", "")
reader = csv.reader(f)
i = 0
for row in reader:
if i == 0:
header = row
for header_elem in header[1:]:
if header_elem not in per_instance_type_spot_prices:
per_instance_type_spot_prices[header_elem] = {}
else:
for (header_elem, row_elem) in zip(header[1:], row[1:]):
if (zone not in per_instance_type_spot_prices[header_elem]):
per_instance_type_spot_prices[header_elem][zone] = []
date = datetime.strptime(row[0], '%m/%d/%Y')
per_instance_type_spot_prices[header_elem][zone].append((date, row_elem))
i += 1
return per_instance_type_spot_prices
def read_per_instance_type_spot_prices_json(directory):
per_instance_type_spot_prices = {}
per_instance_type_spot_prices['aws'] = \
read_per_instance_type_spot_prices_aws(os.path.join(directory,
'aws/logs'))
per_instance_type_spot_prices['azure'] = \
read_per_instance_type_spot_prices_azure(os.path.join(directory,
'azure/logs'))
per_instance_type_spot_prices['gcp'] = {
'v100': 0.74,
'p100': 0.43,
'k80': 0.135
}
return per_instance_type_spot_prices
def get_latest_price_for_worker_type_aws(worker_type, current_time,
per_instance_type_spot_prices):
# TODO: Make this function more efficient.
if worker_type == 'v100':
instance_type = 'p3.2xlarge'
elif worker_type == 'p100':
# NOTE: AWS does not have single P100 instances, use 1.5x K80 price
# as a proxy.
instance_type = 'p2.xlarge'
elif worker_type == 'k80':
instance_type = 'p2.xlarge'
timestamps = [datetime.strptime(x['Timestamp'], '%Y-%m-%dT%H:%M:%S.000Z')
for x in per_instance_type_spot_prices[instance_type]]
timestamps.sort()
availability_zones = \
[x['AvailabilityZone']
for x in per_instance_type_spot_prices[instance_type]]
latest_prices = []
for availability_zone in set(availability_zones):
per_instance_type_spot_prices[instance_type].sort(
key=lambda x: datetime.strptime(x['Timestamp'],
'%Y-%m-%dT%H:%M:%S.000Z'))
latest_price = None
for x in per_instance_type_spot_prices[instance_type]:
if x['AvailabilityZone'] != availability_zone:
continue
timestamp = (datetime.strptime(x['Timestamp'],
'%Y-%m-%dT%H:%M:%S.000Z') -
timestamps[0]).total_seconds()
if timestamp > current_time and latest_price is not None:
break
latest_price = float(x['SpotPrice'])
assert(latest_price is not None)
latest_prices.append(latest_price)
# NOTE: AWS does not have single P100 instances, use 1.5x K80 price
# as a proxy.
if worker_type == 'p100':
return min(latest_prices) * 1.5
else:
return min(latest_prices)
def get_latest_price_for_worker_type_gcp(worker_type, current_time,
per_instance_type_spot_prices):
return per_instance_type_spot_prices[worker_type]
def get_latest_price_for_worker_type_azure(worker_type, current_time,
per_instance_type_spot_prices):
if worker_type == 'k80':
instance_type = 'NC6'
elif worker_type == 'p100':
instance_type = 'NC6s v2'
elif worker_type == 'v100':
instance_type = 'NC6s v3'
earliest_timestamps = []
for zone in per_instance_type_spot_prices[instance_type]:
per_instance_type_spot_prices[instance_type][zone].sort(
key=lambda x: x[0])
earliest_timestamps.append(
per_instance_type_spot_prices[instance_type][zone][0][0])
earliest_timestamp = min(earliest_timestamps)
latest_prices = []
for zone in per_instance_type_spot_prices[instance_type]:
latest_price = None
for x in per_instance_type_spot_prices[instance_type][zone]:
timestamp = (x[0] - earliest_timestamp).total_seconds()
if timestamp > current_time and latest_price is not None:
break
elif x[1] == '':
continue
else:
# Remove '$' character.
latest_price = float(x[1][1:])
return latest_price
def get_latest_price_for_worker_type(worker_type, current_time,
per_instance_type_spot_prices,
available_clouds):
assert(len(available_clouds) > 0)
prices = []
if 'aws' in available_clouds:
aws_price = \
get_latest_price_for_worker_type_aws(
worker_type, current_time,
per_instance_type_spot_prices['aws'])
prices.append(aws_price)
if 'gcp' in available_clouds:
gcp_price = \
get_latest_price_for_worker_type_gcp(
worker_type, current_time,
per_instance_type_spot_prices['gcp'])
prices.append(gcp_price)
if 'azure' in available_clouds:
azure_price = \
get_latest_price_for_worker_type_azure(
worker_type, current_time,
per_instance_type_spot_prices['azure'])
prices.append(azure_price)
return min(prices)
def parse_job_type_str(job_type):
if job_type is None:
return None
match = re.match('(.*) \(scale factor (\d+)\)', job_type)
if match is None:
return (job_type, 1)
model = match.group(1)
scale_factor = int(match.group(2))
return (model, scale_factor)
def parse_job_type_tuple(job_type):
match = re.match('\(\'(.*)\', (\d+)\)', job_type)
if match is None:
return None
model = match.group(1)
scale_factor = int(match.group(2))
return (model, scale_factor)
def stringify_throughputs(throughputs):
stringified_throughputs = {}
for worker_type in throughputs:
stringified_throughputs[worker_type] = {}
for key in throughputs[worker_type]:
stringified_throughputs[worker_type][str(key)] = {}
for other_key in throughputs[worker_type][key]:
stringified_throughputs[worker_type][str(key)][str(other_key)] = \
throughputs[worker_type][key][other_key]
return stringified_throughputs
def read_all_throughputs_json_v2(file_name):
with open(file_name, 'r') as f:
raw_throughputs = json.load(f)
parsed_throughputs = {}
for worker_type in raw_throughputs:
parsed_throughputs[worker_type] = {}
for job_type in raw_throughputs[worker_type]:
key = parse_job_type_tuple(job_type)
assert(key is not None)
parsed_throughputs[worker_type][key] = {}
for other_job_type in raw_throughputs[worker_type][job_type]:
if other_job_type == 'null':
other_key = other_job_type
else:
other_key = parse_job_type_tuple(other_job_type)
assert(other_key is not None)
parsed_throughputs[worker_type][key][other_key] =\
raw_throughputs[worker_type][job_type][other_job_type]
return parsed_throughputs
def read_all_throughputs_json(throughputs_file):
with open(throughputs_file, 'r') as f:
throughputs = json.load(f)
return throughputs
def get_policy(policy_name, solver=None, seed=None,
priority_reweighting_policies=None,
num_threads=None):
if policy_name.startswith('allox'):
if policy_name == 'allox':
alpha = 1.0
else:
alpha = float(policy_name.split("allox_alpha=")[1])
policy = allox.AlloXPolicy(alpha=alpha)
elif policy_name == 'fifo':
policy = fifo.FIFOPolicy(seed=seed)
elif policy_name == 'fifo_perf':
policy = fifo.FIFOPolicyWithPerf()
elif policy_name == 'fifo_packed':
policy = fifo.FIFOPolicyWithPacking()
elif policy_name == 'finish_time_fairness':
policy = finish_time_fairness.FinishTimeFairnessPolicy(solver=solver,
num_threads=num_threads)
elif policy_name == 'finish_time_fairness_perf':
policy = \
finish_time_fairness.FinishTimeFairnessPolicyWithPerf(solver=solver,
num_threads=num_threads)
elif policy_name == 'finish_time_fairness_packed':
policy = \
finish_time_fairness.FinishTimeFairnessPolicyWithPacking(
solver=solver, num_threads=num_threads)
elif policy_name == 'gandiva':
policy = gandiva.GandivaPolicy(seed=seed)
elif policy_name == 'isolated':
policy = isolated.IsolatedPolicy()
elif policy_name == 'max_min_fairness':
policy = max_min_fairness.MaxMinFairnessPolicy(solver=solver)
elif policy_name == 'max_min_fairness_perf':
policy = max_min_fairness.MaxMinFairnessPolicyWithPerf(solver=solver,
num_threads=num_threads)
elif policy_name == 'max_min_fairness_packed':
policy = \
max_min_fairness.MaxMinFairnessPolicyWithPacking(solver=solver,
num_threads=num_threads)
elif policy_name == 'max_min_fairness_water_filling':
policy = max_min_fairness_water_filling.MaxMinFairnessWaterFillingPolicy(
priority_reweighting_policies=priority_reweighting_policies)
elif policy_name == 'max_min_fairness_water_filling_perf':
policy = max_min_fairness_water_filling.MaxMinFairnessWaterFillingPolicyWithPerf(
priority_reweighting_policies=priority_reweighting_policies)
elif policy_name == 'max_min_fairness_water_filling_packed':
policy = max_min_fairness_water_filling.MaxMinFairnessWaterFillingPolicyWithPacking(
priority_reweighting_policies=priority_reweighting_policies)
elif policy_name == 'max_sum_throughput_perf':
policy = max_sum_throughput.ThroughputSumWithPerf(solver=solver,
num_threads=num_threads)
elif policy_name == 'max_sum_throughput_normalized_by_cost_perf':
policy = max_sum_throughput.ThroughputNormalizedByCostSumWithPerf(
solver=solver, num_threads=num_threads)
elif policy_name == 'max_sum_throughput_normalized_by_cost_perf_SLOs':
policy = max_sum_throughput.ThroughputNormalizedByCostSumWithPerfSLOs(
solver=solver, num_threads=num_threads)
elif policy_name == 'max_sum_throughput_normalized_by_cost_packed_SLOs':
policy = \
max_sum_throughput.ThroughputNormalizedByCostSumWithPackingSLOs(
solver=solver,
num_threads=num_threads)
elif policy_name == 'min_total_duration':
policy = min_total_duration.MinTotalDurationPolicy(solver=solver,
num_threads=num_threads)
elif policy_name == 'min_total_duration_perf':
policy = min_total_duration.MinTotalDurationPolicyWithPerf(solver=solver,
num_threads=num_threads)
elif policy_name == 'min_total_duration_packed':
policy = \
min_total_duration.MinTotalDurationPolicyWithPacking(solver=solver,
num_threads=num_threads)
else:
raise ValueError('Unknown policy!')
return policy
def parse_trace(trace_file):
jobs = []
arrival_times = []
with open(trace_file, 'r') as f:
for line in f:
(job_type, command, working_directory, num_steps_arg,
needs_data_dir, total_steps, scale_factor, priority_weight, SLO,
arrival_time) = line.split('\t')
assert(int(scale_factor) >= 1)
jobs.append(Job(job_id=None,
job_type=job_type,
command=command,
working_directory=working_directory,
needs_data_dir=bool(int(needs_data_dir)),
num_steps_arg=num_steps_arg,
total_steps=int(total_steps),
duration=None,
scale_factor=int(scale_factor),
priority_weight=float(priority_weight),
SLO=float(SLO)))
arrival_times.append(float(arrival_time))
return jobs, arrival_times
def print_allocation(allocation, current_time=None):
"""Prints the allocation.
Debug method used for printing the allocation of each job on each
worker type.
"""
print('=' * 80)
if current_time is not None:
print('Allocation\t(Current_time: %f)' % (current_time))
print('-' * 80)
for job_id in sorted(list(allocation.keys())):
allocation_str = 'Job ID %s:' % (job_id)
for worker_type in sorted(list(allocation[job_id].keys())):
value = allocation[job_id][worker_type]
allocation_str += ' [%s: %f]' % (worker_type, value)
print(allocation_str)
print('=' * 80)
|
# -*- coding: utf-8 -*-
import json
from collections import OrderedDict
from typing import List
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import pandas as pd
from dash import dash
from dash.dependencies import Input, Output, State
from zvdata import IntervalLevel
from zvdata.app import app
from zvdata.chart import Drawer
from zvdata.domain import global_providers, get_schemas, get_schema_by_name, get_schema_columns
from zvdata.normal_data import NormalData, IntentType
from zvdata.reader import DataReader
from zvdata.utils.pd_utils import df_is_not_null
from zvdata.utils.time_utils import now_pd_timestamp, TIME_FORMAT_DAY
current_df = None
layout = html.Div(
[
html.Div(
[
# provider selector
dcc.Dropdown(
id='provider-selector',
placeholder='select provider',
options=[{'label': provider, 'value': provider} for provider in
global_providers]),
# schema selector
dcc.Dropdown(id='schema-selector', placeholder='select schema'),
# level selector
dcc.Dropdown(id='level-selector', placeholder='select level',
options=[{'label': level.value, 'value': level.value} for level in
IntervalLevel],
value=IntervalLevel.LEVEL_1DAY.value),
# column selector
html.Div(id='schema-column-selector-container', children=None),
dcc.Dropdown(
id='properties-selector',
options=[
{'label': 'undefined', 'value': 'undefined'}
],
value='undefined',
multi=True
),
# codes filter
dcc.Input(id='input-code-filter', type='text', placeholder='input codes',
style={'width': '400px'}),
# time range filter
dcc.DatePickerRange(
id='date-picker-range',
start_date='2009-01-01',
end_date=now_pd_timestamp(),
display_format=TIME_FORMAT_DAY
),
# load data for table
html.Button('load data', id='btn-load-data', n_clicks_timestamp=0),
# table container
html.Div(id='data-table-container', children=None),
# selected properties
html.Label('setting y_axis and chart type for the columns:'),
# col setting container
html.Div(id='col-setting-container', children=dash_table.DataTable(
id='col-setting-table',
columns=[
{'id': 'property', 'name': 'property', 'editable': False},
{'id': 'y_axis', 'name': 'y_axis', 'presentation': 'dropdown'},
{'id': 'chart', 'name': 'chart', 'presentation': 'dropdown'}
],
dropdown={
'y_axis': {
'options': [
{'label': i, 'value': i}
for i in ['y1', 'y2', 'y3', 'y4', 'y5']
]
},
'chart': {
'options': [
{'label': chart_type.value, 'value': chart_type.value}
for chart_type in NormalData.get_charts_by_intent(IntentType.compare_self)
]
}
},
editable=True
), ),
html.Div(id='table-type-label', children=None),
html.Div(
[
html.Div([dcc.Dropdown(id='intent-selector')],
style={'width': '50%', 'display': 'inline-block'}),
html.Div([dcc.Dropdown(id='chart-selector')],
style={'width': '50%', 'display': 'inline-block'})
]
),
html.Div(id='chart-container', children=None)
])
]
)
@app.callback(
Output('schema-selector', 'options'),
[Input('provider-selector', 'value')])
def update_schema_selector(provider):
if provider:
return [{'label': schema.__name__, 'value': schema.__name__} for schema in
get_schemas(provider=provider)]
raise dash.exceptions.PreventUpdate()
@app.callback(
Output('schema-column-selector-container', 'children'),
[Input('schema-selector', 'value')],
state=[State('provider-selector', 'value')])
def update_column_selector(schema_name, provider):
if provider and schema_name:
schema = get_schema_by_name(name=schema_name)
cols = get_schema_columns(schema=schema)
return dcc.Dropdown(
id='schema-column-selector',
options=[
{'label': col, 'value': col} for col in cols
],
value=get_schema_by_name(name=schema_name).important_cols(),
multi=True
)
raise dash.exceptions.PreventUpdate()
@app.callback(
[Output('properties-selector', 'options'),
Output('properties-selector', 'value')],
[Input('schema-column-selector', 'value')],
state=[State('provider-selector', 'value'),
State('schema-selector', 'value'),
State('properties-selector', 'options'),
State('properties-selector', 'value')])
def update_selected_properties(selected_cols, provider, schema_name, options, value):
if selected_cols and provider and schema_name:
current_options = options
current_value = value
added_labels = []
added_values = []
for col in selected_cols:
added_labels.append(col)
added_values.append(
json.dumps({
'provider': provider,
'schema': schema_name,
'column': col
}))
added_options = [{'label': col, 'value': added_values[i]} for i, col in enumerate(added_labels)]
if 'undefined' in value:
current_options = []
current_value = []
current_options += added_options
current_value += added_values
return current_options, current_value
raise dash.exceptions.PreventUpdate()
def properties_to_readers(properties, level, codes, start_date, end_date) -> List[DataReader]:
provider_schema_map_cols = {}
for prop in properties:
provider = prop['provider']
schema_name = prop['schema']
key = (provider, schema_name)
if key not in provider_schema_map_cols:
provider_schema_map_cols[key] = []
provider_schema_map_cols[key].append(prop['column'])
readers = []
for item, columns in provider_schema_map_cols.items():
provider = item[0]
schema_name = item[1]
schema = get_schema_by_name(schema_name)
readers.append(DataReader(data_schema=schema, provider=provider, codes=codes, level=level,
columns=columns, start_timestamp=start_date, end_timestamp=end_date,
time_field=schema.time_field()))
return readers
@app.callback(
[Output('data-table-container', 'children'),
Output('col-setting-table', 'data'),
Output('table-type-label', 'children'),
Output('intent-selector', 'options'),
Output('intent-selector', 'value')],
[Input('btn-load-data', 'n_clicks')],
state=[State('properties-selector', 'value'),
State('level-selector', 'value'),
State('input-code-filter', 'value'),
State('date-picker-range', 'start_date'),
State('date-picker-range', 'end_date')])
def update_data_table(n_clicks, properties, level, codes: str, start_date, end_date):
if n_clicks and properties:
props = []
for prop in properties:
props.append(json.loads(prop))
readers = properties_to_readers(properties=props, level=level, codes=codes, start_date=start_date,
end_date=end_date)
if readers:
data_df = readers[0].data_df
for reader in readers[1:]:
if df_is_not_null(reader.data_df):
data_df = data_df.join(reader.data_df, how='outer')
global current_df
current_df = data_df
if not df_is_not_null(current_df):
return 'no data,please reselect!', [], '', [
{'label': 'compare_self', 'value': 'compare_self'}], 'compare_self'
normal_data = NormalData(current_df)
data_table = Drawer(data=normal_data).draw_data_table(id='data-table-content')
# generate col setting table
properties = normal_data.data_df.columns.to_list()
df = pd.DataFrame(OrderedDict([
('property', properties),
('y_axis', ['y1'] * len(properties)),
('chart', ['line'] * len(properties))
]))
# generate intents
intents = normal_data.get_intents()
intent_options = [
{'label': intent.value, 'value': intent.value} for intent in intents
]
intent_value = intents[0].value
return data_table, df.to_dict('records'), normal_data.get_table_type(), intent_options, intent_value
else:
return 'no data,please reselect!', [], '', [
{'label': 'compare_self', 'value': 'compare_self'}], 'compare_self'
raise dash.exceptions.PreventUpdate()
@app.callback(
[Output('chart-selector', 'options'),
Output('chart-selector', 'value')],
[Input('intent-selector', 'value')])
def update_chart_selector(intent):
if intent:
charts = NormalData.get_charts_by_intent(intent=intent)
options = [
{'label': chart.value, 'value': chart.value} for chart in charts
]
value = charts[0].value
return options, value
raise dash.exceptions.PreventUpdate()
operators_df = [['ge ', '>='],
['le ', '<='],
['lt ', '<'],
['gt ', '>'],
['ne ', '!='],
['eq ', '='],
['contains '],
['datestartswith ']]
operators_sql = [['>= ', '>='],
['<= ', '<='],
['< ', '<'],
['> ', '>'],
['!= ', '!='],
['== ', '='],
['contains '],
['datestartswith ']]
def split_filter_part(filter_part, operators=operators_df):
for operator_type in operators:
for operator in operator_type:
if operator in filter_part:
name_part, value_part = filter_part.split(operator, 1)
name = name_part[name_part.find('{') + 1: name_part.rfind('}')]
value_part = value_part.strip()
v0 = value_part[0]
if (v0 == value_part[-1] and v0 in ("'", '"', '`')):
value = value_part[1: -1].replace('\\' + v0, v0)
else:
try:
value = float(value_part)
except ValueError:
value = value_part
# word operators need spaces after them in the filter string,
# but we don't want these later
return name, operator_type[0].strip(), value
return [None] * 3
@app.callback(
[Output('data-table-content', "data"),
Output('chart-container', "children")],
[Input('data-table-content', "page_current"),
Input('data-table-content', "page_size"),
Input('data-table-content', "sort_by"),
Input('data-table-content', "filter_query"),
Input('intent-selector', "value"),
Input('chart-selector', "value"),
Input('col-setting-table', 'data'),
Input('col-setting-table', 'columns')])
def update_table_and_graph(page_current, page_size, sort_by, filter, intent, chart, rows, columns):
if chart:
property_map = {}
for row in rows:
property_map[row['property']] = {
'y_axis': row['y_axis'],
'chart': row['chart']
}
dff = current_df
if filter:
filtering_expressions = filter.split(' && ')
for filter_part in filtering_expressions:
col_name, operator, filter_value = split_filter_part(filter_part)
if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'):
# these operators match pandas series operator method names
dff = dff.loc[getattr(dff[col_name], operator)(filter_value)]
elif operator == 'contains':
dff = dff.loc[dff[col_name].str.contains(filter_value)]
elif operator == 'datestartswith':
# this is a simplification of the front-end filtering logic,
# only works with complete fields in standard format
dff = dff.loc[dff[col_name].str.startswith(filter_value)]
# if sort_by:
# dff = dff.sort_values(
# [col['entity_id'] for col in sort_by],
# ascending=[
# col['direction'] == 'asc'
# for col in sort_by
# ],
# inplace=False
# )
if intent in (IntentType.compare_self.value, IntentType.compare_to_other.value):
graph_data, graph_layout = Drawer(NormalData(dff)).draw_compare(chart=chart, property_map=property_map,
render=None, keep_ui_state=False)
else:
graph_data, graph_layout = Drawer(NormalData(dff)).draw(chart=chart, property_map=property_map, render=None,
keep_ui_state=False)
table_data = dff.iloc[page_current * page_size: (page_current + 1) * page_size
].to_dict('records')
return table_data, \
dcc.Graph(
id='chart-content',
figure={
'data': graph_data,
'layout': graph_layout
}
)
raise dash.exceptions.PreventUpdate()
|
import logging
from collections import Counter
from operator import itemgetter
from django.contrib import messages
from django.db import transaction
from django.shortcuts import render, get_object_or_404, redirect
from vcweb.core.decorators import group_required
from vcweb.core.forms import SingleIntegerDecisionForm
from vcweb.core.http import JsonResponse, dumps
from vcweb.core.models import (Experiment, ParticipantGroupRelationship, ChatMessage, PermissionGroup)
from vcweb.experiment.forestry.models import (set_harvest_decision, get_harvest_decision_dv, get_regrowth_dv)
from .models import (get_experiment_metadata, get_regrowth_rate, get_max_harvest_decision, get_cost_of_living,
get_resource_level, get_initial_resource_level, get_final_session_storage_queryset,
can_observe_other_group, get_average_harvest, get_average_storage, get_total_harvest,
get_number_alive, get_player_data)
logger = logging.getLogger(__name__)
@group_required(PermissionGroup.participant, PermissionGroup.demo_participant)
def participate(request, experiment_id=None):
participant = request.user.participant
logger.debug("handling participate request for %s and experiment %s", participant, experiment_id)
experiment = get_object_or_404(Experiment.objects.select_related('experiment_metadata',
'experiment_configuration'),
pk=experiment_id,
experiment_metadata=get_experiment_metadata())
if experiment.is_active:
pgr = experiment.get_participant_group_relationship(participant)
return render(request, experiment.participant_template, {
'experiment': experiment,
'participant_group_relationship': pgr,
'group': pgr.group,
'experimentModelJson': dumps(get_view_model_dict(experiment, pgr)),
})
else:
messages.info(request, '%s has not been activated yet. Please try again later.' % experiment)
return redirect('core:dashboard')
@group_required(PermissionGroup.participant, PermissionGroup.demo_participant)
def submit_harvest_decision(request, experiment_id=None):
form = SingleIntegerDecisionForm(request.POST or None)
experiment = get_object_or_404(Experiment, pk=experiment_id)
if form.is_valid():
participant_group_id = form.cleaned_data['participant_group_id']
pgr = get_object_or_404(ParticipantGroupRelationship, pk=participant_group_id)
harvest_decision = form.cleaned_data['integer_decision']
submitted = form.cleaned_data['submitted']
logger.debug("pgr %s harvested %s - final submission? %s",
pgr, harvest_decision, submitted)
with transaction.atomic():
round_data = experiment.current_round_data
set_harvest_decision(pgr, harvest_decision, round_data, submitted=submitted)
message = "%s harvested %s trees"
experiment.log(message % (pgr.participant, harvest_decision))
response_dict = {
'success': True,
'message': message % (pgr.participant_handle, harvest_decision),
}
return JsonResponse(response_dict)
else:
logger.debug("form was invalid: %s", form)
for field in form:
if field.errors:
logger.debug("field %s had errors %s", field, field.errors)
return JsonResponse({'success': False})
@group_required(PermissionGroup.participant, PermissionGroup.demo_participant)
def get_view_model(request, experiment_id=None):
experiment = get_object_or_404(Experiment.objects.select_related('experiment_metadata', 'experiment_configuration'),
pk=experiment_id)
pgr = experiment.get_participant_group_relationship(request.user.participant)
return JsonResponse(get_view_model_dict(experiment, pgr))
experiment_model_defaults = {
'submitted': False,
'chatEnabled': False,
'alive': True,
'resourceLevel': 0,
'maxEarnings': 20.00,
'maximumResourcesToDisplay': 20,
'warningCountdownTime': 10,
'harvestDecision': 0,
'storage': 0,
'roundDuration': 60,
'chatMessages': [],
'canObserveOtherGroup': False,
'myGroup': {
'resourceLevel': 0,
'regrowth': 0,
'originalResourceLevel': 0,
'averageHarvest': 0,
'averageStorage': 0,
'numberAlive': 0,
'isResourceEmpty': 0,
},
'otherGroup': {
'resourceLevel': 0,
'regrowth': 0,
'originalResourceLevel': 0,
'averageHarvest': 0,
'averageStorage': 0,
'numberAlive': 0,
'isResourceEmpty': 0,
},
'selectedHarvestDecision': False,
'waitThirtySeconds': False,
'totalHarvest': 0,
'sessionOneStorage': 0,
'sessionTwoStorage': 0,
'lastHarvestDecision': 0,
'playerData': [],
'averageHarvest': 0,
'averageStorage': 0,
'numberAlive': '4 out of 4',
'surveyCompleted': False,
'regrowth': 0,
'surveyUrl': 'http://survey.qualtrics.com/SE/?SID=SV_0vzmIj5UsOgjoTX',
}
# FIXME: bloated method with too many special cases, refactor
def get_view_model_dict(experiment, participant_group_relationship, **kwargs):
ec = experiment.experiment_configuration
current_round = experiment.current_round
current_round_data = experiment.current_round_data
previous_round = experiment.previous_round
previous_round_data = experiment.get_round_data(round_configuration=previous_round, previous_round=True)
experiment_model_dict = experiment.to_dict(
include_round_data=False, default_value_dict=experiment_model_defaults)
# round / experiment configuration data
experiment_model_dict['timeRemaining'] = experiment.time_remaining
experiment_model_dict['sessionId'] = current_round.session_id
regrowth_rate = get_regrowth_rate(current_round)
cost_of_living = get_cost_of_living(current_round)
experiment_model_dict['costOfLiving'] = cost_of_living
experiment_model_dict['maxHarvestDecision'] = get_max_harvest_decision(ec)
experiment_model_dict['templateName'] = current_round.template_name
experiment_model_dict['isPracticeRound'] = current_round.is_practice_round
# FIXME: only show the tour on the first practice round, this is brittle. better to have a dedicated boolean flag on
# RoundConfiguration?
experiment_model_dict['showTour'] = current_round.is_practice_round and not previous_round.is_practice_round
# instructions round parameters
experiment_model_dict['isInstructionsRound'] = current_round.is_instructions_round
experiment_model_dict['chatEnabled'] = current_round.chat_enabled
experiment_model_dict['isSurveyEnabled'] = current_round.is_survey_enabled
if current_round.is_instructions_round:
experiment_model_dict['participantsPerGroup'] = ec.max_group_size
experiment_model_dict['regrowthRate'] = regrowth_rate
experiment_model_dict['initialResourceLevel'] = get_initial_resource_level(current_round)
if current_round.is_survey_enabled:
survey_url = current_round.build_survey_url(pid=participant_group_relationship.pk,
eid=experiment.pk,
tid=experiment.experiment_configuration.treatment_id)
experiment_model_dict['surveyUrl'] = survey_url
experiment_model_dict['surveyCompleted'] = participant_group_relationship.survey_completed
logger.debug("survey enabled, setting survey url to %s", survey_url)
if current_round.is_debriefing_round:
experiment_model_dict['totalHarvest'] = get_total_harvest(participant_group_relationship,
current_round.session_id)
if experiment.is_last_round:
(session_one_storage, session_two_storage) = get_final_session_storage_queryset(
experiment, participant_group_relationship.participant)
experiment_model_dict['sessionOneStorage'] = session_one_storage.int_value
experiment_model_dict['sessionTwoStorage'] = session_two_storage.int_value
# participant data
experiment_model_dict['participantNumber'] = participant_group_relationship.participant_number
experiment_model_dict['participantGroupId'] = participant_group_relationship.pk
# FIXME: these should only need to be added for playable rounds but KO gets unhappy when we switch templates from
# instructions rounds to practice rounds.
own_group = participant_group_relationship.group
own_resource_level = get_resource_level(own_group)
if current_round.is_playable_round or current_round.is_debriefing_round:
player_data, own_data = get_player_data(own_group, previous_round_data, current_round_data,
participant_group_relationship)
experiment_model_dict.update(own_data)
experiment_model_dict['playerData'] = player_data
experiment_model_dict['averageHarvest'] = get_average_harvest(own_group, previous_round_data)
experiment_model_dict['averageStorage'] = get_average_storage(own_group, current_round_data)
regrowth = experiment_model_dict['regrowth'] = get_regrowth_dv(own_group, current_round_data).value
c = Counter(list(map(itemgetter('alive'), experiment_model_dict['playerData'])))
experiment_model_dict['numberAlive'] = "%s out of %s" % (c[True], sum(c.values()))
# FIXME: refactor duplication between myGroup and otherGroup data loading
experiment_model_dict['myGroup'] = {
'resourceLevel': own_resource_level,
'regrowth': regrowth,
'originalResourceLevel': own_resource_level - regrowth,
'averageHarvest': experiment_model_dict['averageHarvest'],
'averageStorage': experiment_model_dict['averageStorage'],
'numberAlive': experiment_model_dict['numberAlive'],
'isResourceEmpty': own_resource_level == 0,
}
experiment_model_dict['resourceLevel'] = own_resource_level
# participant group data parameters are only needed if this round is a
# data round or the previous round was a data round
if previous_round.is_playable_round or current_round.is_playable_round:
harvest_decision = get_harvest_decision_dv(participant_group_relationship, current_round_data)
experiment_model_dict['submitted'] = harvest_decision.submitted
if harvest_decision.submitted:
# user has already submit a harvest decision this round
experiment_model_dict['harvestDecision'] = harvest_decision.int_value
logger.debug("already submitted, setting harvest decision to %s",
experiment_model_dict['harvestDecision'])
experiment_model_dict['chatMessages'] = [cm.to_dict() for cm in ChatMessage.objects.for_group(own_group)]
if can_observe_other_group(current_round):
experiment_model_dict['canObserveOtherGroup'] = True
other_group = own_group.get_related_group()
number_alive = get_number_alive(other_group, current_round_data)
resource_level = get_resource_level(other_group, current_round_data)
regrowth = get_regrowth_dv(other_group, current_round_data).value
experiment_model_dict['otherGroup'] = {
'regrowth': regrowth,
'resourceLevel': resource_level,
'originalResourceLevel': resource_level - regrowth,
'averageHarvest': get_average_harvest(other_group, previous_round_data),
'averageStorage': get_average_storage(other_group, current_round_data),
'numberAlive': "%s out of %s" % (number_alive, other_group.size),
'isResourceEmpty': resource_level == 0,
}
return experiment_model_dict
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
try:
from http.server import HTTPServer, BaseHTTPRequestHandler
except:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
try:
import socketserver
except:
import SocketServer as socketserver
import mimetypes
import webbrowser
import struct
import socket
import base64
import hashlib
import sys
import threading
import signal
import time
import os
import re
from threading import Timer
try:
from urllib import unquote
from urllib import quote
from urlparse import urlparse
from urlparse import parse_qs
except ImportError:
from urllib.parse import unquote
from urllib.parse import quote
from urllib.parse import unquote_to_bytes
from urllib.parse import urlparse
from urllib.parse import parse_qs
import cgi
import weakref
clients = {}
runtimeInstances = weakref.WeakValueDictionary()
pyLessThan3 = sys.version_info < (3,)
update_lock = threading.RLock()
update_event = threading.Event()
update_thread = None
_MSG_PING = '4'
_MSG_ACK = '3'
_MSG_JS = '2'
_MSG_UPDATE = '1'
def to_websocket(data):
# encoding end decoding utility function
if pyLessThan3:
return quote(data)
return quote(data, encoding='utf-8')
def from_websocket(data):
# encoding end deconding utility function
if pyLessThan3:
return unquote(data)
return unquote(data, encoding='utf-8')
def encode_text(data):
if not pyLessThan3:
return data.encode('utf-8')
return data
def get_method_by_name(root_node, name):
val = None
if hasattr(root_node, name):
val = getattr(root_node, name)
return val
def get_method_by_id(_id):
global runtimeInstances
if str(_id) in runtimeInstances:
return runtimeInstances[str(_id)]
return None
def get_instance_key(handler):
if not handler.server.multiple_instance:
# overwrite the key value, so all clients will point the same
# instance
return 0
ip = handler.client_address[0]
unique_port = getattr(handler.server, 'websocket_address', handler.server.server_address)[1]
return ip, unique_port
# noinspection PyPep8Naming
class ThreadedWebsocketServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
allow_reuse_address = True
daemon_threads = True
def __init__(self, server_address, RequestHandlerClass, multiple_instance):
socketserver.TCPServer.__init__(self, server_address, RequestHandlerClass)
self.multiple_instance = multiple_instance
class WebSocketsHandler(socketserver.StreamRequestHandler):
magic = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
timeout = 10
def __init__(self, *args, **kwargs):
self.last_ping = time.time()
self.handshake_done = False
self._log = logging.getLogger('remi.server.ws')
socketserver.StreamRequestHandler.__init__(self, *args, **kwargs)
def setup(self):
global clients
socketserver.StreamRequestHandler.setup(self)
self._log.info('connection established: %r' % (self.client_address,))
self.handshake_done = False
def handle(self):
self._log.debug('handle')
# on some systems like ROS, the default socket timeout
# is less than expected, we force it to infinite (None) as default socket value
while True:
if not self.handshake_done:
self.handshake()
else:
if not self.read_next_message():
k = get_instance_key(self)
clients[k].websockets.remove(self)
self.handshake_done = False
self._log.debug('ws ending websocket service')
break
@staticmethod
def bytetonum(b):
if pyLessThan3:
b = ord(b)
return b
def read_next_message(self):
# noinspection PyBroadException
try:
length = self.rfile.read(2)
length = self.bytetonum(length[1]) & 127
if length == 126:
length = struct.unpack('>H', self.rfile.read(2))[0]
elif length == 127:
length = struct.unpack('>Q', self.rfile.read(8))[0]
masks = [self.bytetonum(byte) for byte in self.rfile.read(4)]
decoded = ''
for char in self.rfile.read(length):
decoded += chr(self.bytetonum(char) ^ masks[len(decoded) % 4])
self.on_message(from_websocket(decoded))
except socket.timeout as e:
self._log.debug('socket timed out: %s' % e)
return False
except Exception:
self._log.error("error parsing websocket", exc_info=True)
return False
return True
def ping(self):
t = time.time()
if (t - self.last_ping) > (0.5*self.timeout):
self.last_ping = t
self.send_message(_MSG_PING)
def send_message(self, message):
if not self.handshake_done:
self._log.warning("ignoring message %s (handshake not done)" % message[:10])
if message != _MSG_PING:
self._log.debug('send_message: %s... -> %s' % (message[:10], self.client_address))
out = bytearray()
out.append(129)
length = len(message)
if length <= 125:
out.append(length)
elif 126 <= length <= 65535:
out.append(126)
out += struct.pack('>H', length)
else:
out.append(127)
out += struct.pack('>Q', length)
if not pyLessThan3:
message = message.encode('utf-8')
out = out + message
self.request.send(out)
def handshake(self):
self._log.debug('handshake')
data = self.request.recv(1024).strip()
key = data.decode().split('Sec-WebSocket-Key: ')[1].split('\r\n')[0]
digest = hashlib.sha1((key.encode("utf-8")+self.magic))
digest = digest.digest()
digest = base64.b64encode(digest)
response = 'HTTP/1.1 101 Switching Protocols\r\n'
response += 'Upgrade: websocket\r\n'
response += 'Connection: Upgrade\r\n'
response += 'Sec-WebSocket-Accept: %s\r\n\r\n' % digest.decode("utf-8")
self._log.info('handshake complete')
self.request.sendall(response.encode("utf-8"))
self.handshake_done = True
def on_message(self, message):
global runtimeInstances
global update_lock, update_event
if message == 'pong':
return
self.send_message(_MSG_ACK)
with update_lock:
# noinspection PyBroadException
try:
# saving the websocket in order to update the client
k = get_instance_key(self)
if self not in clients[k].websockets:
clients[k].websockets.append(self)
# parsing messages
chunks = message.split('/')
self._log.debug('on_message: %s' % chunks[0])
if len(chunks) > 3: # msgtype,widget,function,params
# if this is a callback
msg_type = 'callback'
if chunks[0] == msg_type:
widget_id = chunks[1]
function_name = chunks[2]
params = message[
len(msg_type) + len(widget_id) + len(function_name) + 3:]
param_dict = parse_parametrs(params)
callback = get_method_by_name(runtimeInstances[widget_id], function_name)
if callback is not None:
callback(**param_dict)
except Exception:
self._log.error('error parsing websocket', exc_info=True)
update_event.set()
def parse_parametrs(p):
"""
Parses the parameters given from POST or websocket reqs
expecting the parameters as: "11|par1='asd'|6|par2=1"
returns a dict like {par1:'asd',par2:1}
"""
ret = {}
while len(p) > 1 and p.count('|') > 0:
s = p.split('|')
l = int(s[0]) # length of param field
if l > 0:
p = p[len(s[0]) + 1:]
field_name = p.split('|')[0].split('=')[0]
field_value = p[len(field_name) + 1:l]
p = p[l + 1:]
ret[field_name] = field_value
return ret
class _UpdateThread(threading.Thread):
daemon = True
def __init__(self, interval):
threading.Thread.__init__(self)
self._interval = interval
self._log = logging.getLogger('remi.update')
self.start()
def _update_gui(self, client, node):
changed_widgets = {} # key = widget instance, value = html representation
node.repr(client, changed_widgets)
for widget in changed_widgets.keys():
html = changed_widgets[widget]
__id = str(widget.identifier)
for ws in client.websockets:
self._log.debug('update_widget: %s type: %s' % (__id, type(widget)))
try:
ws.send_message(_MSG_UPDATE + __id + ',' + to_websocket(html))
except:
client.websockets.remove(ws)
def run(self):
while True:
global clients, runtimeInstances
global update_lock, update_event
update_event.wait(self._interval)
with update_lock:
# noinspection PyBroadException
try:
for client in clients.values():
if not hasattr(client, 'root'):
continue
if client.websockets:
self._update_gui(client, client.root)
for ws in client.websockets:
ws.ping()
client.idle()
except Exception:
self._log.error('error updating gui', exc_info=True)
update_event.clear()
# noinspection PyPep8Naming
class App(BaseHTTPRequestHandler, object):
"""
This class will handles any incoming request from the browser
The main application class can subclass this
In the do_POST and do_GET methods it is expected to receive requests such as:
- function calls with parameters
- file requests
"""
re_static_file = re.compile(r"^/res/([-_. \w\d]+)\?{0,1}(?:[\w\d]*)") # https://regex101.com/r/uK1sX1/1
re_attr_call = re.compile(r"^/*(\w+)\/(\w+)\?{0,1}(\w*\={1}(\w|\.)+\&{0,1})*$")
def __init__(self, request, client_address, server, **app_args):
self._app_args = app_args
self.client = None
self._log = logging.getLogger('remi.request')
super(App, self).__init__(request, client_address, server)
def _get_list_from_app_args(self, name):
try:
v = self._app_args[name]
if isinstance(v, (tuple, list)):
vals = v
else:
vals = [v]
except KeyError:
vals = []
return vals
def log_message(self, format_string, *args):
msg = format_string % args
self._log.debug("%s %s" % (self.address_string(), msg))
def log_error(self, format_string, *args):
msg = format_string % args
self._log.error("%s %s" % (self.address_string(), msg))
def _instance(self):
global clients
global runtimeInstances
global update_event, update_thread
"""
This method is used to get the Application instance previously created
managing on this, it is possible to switch to "single instance for
multiple clients" or "multiple instance for multiple clients" execution way
"""
k = get_instance_key(self)
if not(k in clients):
runtimeInstances[str(id(self))] = self
clients[k] = self
wshost, wsport = self.server.websocket_address
net_interface_ip = self.connection.getsockname()[0]
if self.server.host_name is not None:
net_interface_ip = self.server.host_name
websocket_timeout_timer_ms = str(self.server.websocket_timeout_timer_ms)
pending_messages_queue_length = str(self.server.pending_messages_queue_length)
# refreshing the script every instance() call, beacuse of different net_interface_ip connections
# can happens for the same 'k'
clients[k].js_body_end = """
<script>
// from http://stackoverflow.com/questions/5515869/string-length-in-bytes-in-javascript
// using UTF8 strings I noticed that the javascript .length of a string returned less
// characters than they actually were
var pendingSendMessages = [];
var ws = null;
var comTimeout = null;
var failedConnections = 0;
function byteLength(str) {
// returns the byte length of an utf8 string
var s = str.length;
for (var i=str.length-1; i>=0; i--) {
var code = str.charCodeAt(i);
if (code > 0x7f && code <= 0x7ff) s++;
else if (code > 0x7ff && code <= 0xffff) s+=2;
if (code >= 0xDC00 && code <= 0xDFFF) i--; //trail surrogate
}
return s;
}
var paramPacketize = function (ps){
var ret = '';
for (var pkey in ps) {
if( ret.length>0 )ret = ret + '|';
var pstring = pkey+'='+ps[pkey];
var pstring_length = byteLength(pstring);
pstring = pstring_length+'|'+pstring;
ret = ret + pstring;
}
return ret;
};
function openSocket(){
try{
ws = new WebSocket('ws://%s:%s/');
console.debug('opening websocket');
ws.onopen = websocketOnOpen;
ws.onmessage = websocketOnMessage;
ws.onclose = websocketOnClose;
ws.onerror = websocketOnError;
}catch(ex){ws=false;alert('websocketnot supported or server unreachable');}
}
openSocket();
function websocketOnMessage (evt){
var received_msg = evt.data;
if( received_msg[0]=='0' ){ /*show_window*/
var index = received_msg.indexOf(',')+1;
/*var idRootNodeWidget = received_msg.substr(0,index-1);*/
var content = received_msg.substr(index,received_msg.length-index);
document.body.innerHTML = '<div id="loading" style="display: none;"><div id="loading-animation"></div></div>';
document.body.innerHTML += decodeURIComponent(content);
}else if( received_msg[0]=='1' ){ /*update_widget*/
var focusedElement = document.activeElement.id;
var index = received_msg.indexOf(',')+1;
var idElem = received_msg.substr(1,index-2);
var content = received_msg.substr(index,received_msg.length-index);
var elem = document.getElementById(idElem);
elem.insertAdjacentHTML('afterend',decodeURIComponent(content));
elem.parentElement.removeChild(elem);
var elemToFocus = document.getElementById(focusedElement);
if( elemToFocus != null ){
document.getElementById(focusedElement).focus();
}
}else if( received_msg[0]=='2' ){ /*javascript*/
var content = received_msg.substr(1,received_msg.length-1);
try{
eval(content);
}catch(e){console.debug(e.message);};
}else if( received_msg[0]=='3' ){ /*ack*/
pendingSendMessages.shift() /*remove the oldest*/
if(comTimeout!=null)clearTimeout(comTimeout);
}else if( received_msg[0]=='4' ){ /*ping*/
ws.send('pong');
}
};
/*this uses websockets*/
var sendCallbackParam = function (widgetID,functionName,params /*a dictionary of name:value*/){
var paramStr = '';
if(params!=null) paramStr=paramPacketize(params);
var message = encodeURIComponent(unescape('callback' + '/' + widgetID+'/'+functionName + '/' + paramStr));
pendingSendMessages.push(message);
if( pendingSendMessages.length < %s ){
ws.send(message);
if(comTimeout==null)
comTimeout = setTimeout(checkTimeout, %s);
}else{
console.debug('Renewing connection, ws.readyState when trying to send was: ' + ws.readyState)
renewConnection();
}
};
/*this uses websockets*/
var sendCallback = function (widgetID,functionName){
sendCallbackParam(widgetID,functionName,null);
};
function renewConnection(){
// ws.readyState:
//A value of 0 indicates that the connection has not yet been established.
//A value of 1 indicates that the connection is established and communication is possible.
//A value of 2 indicates that the connection is going through the closing handshake.
//A value of 3 indicates that the connection has been closed or could not be opened.
if( ws.readyState == 1){
try{
ws.close();
}catch(err){};
}
else if(ws.readyState == 0){
// Don't do anything, just wait for the connection to be stablished
}
else{
openSocket();
}
};
function checkTimeout(){
if(pendingSendMessages.length>0)
renewConnection();
};
function websocketOnClose(evt){
/* websocket is closed. */
console.debug('Connection is closed... event code: ' + evt.code + ', reason: ' + evt.reason);
// Some explanation on this error: http://stackoverflow.com/questions/19304157/getting-the-reason-why-websockets-closed
// In practice, on a unstable network (wifi with a lot of traffic for example) this error appears
// Got it with Chrome saying:
// WebSocket connection to 'ws://x.x.x.x:y/' failed: Could not decode a text frame as UTF-8.
// WebSocket connection to 'ws://x.x.x.x:y/' failed: Invalid frame header
try {
document.getElementById("loading").style.display = '';
} catch(err) {
console.log('Error hiding loading overlay ' + err.message);
}
failedConnections += 1;
console.debug('failed connections=' + failedConnections + ' queued messages=' + pendingSendMessages.length);
if(failedConnections > 3) {
// check if the server has been restarted - which would give it a new websocket address,
// new state, and require a reload
console.debug('Checking if GUI still up ' + location.href);
var http = new XMLHttpRequest();
http.open('HEAD', location.href);
http.onreadystatechange = function() {
if (http.status == 200) {
// server is up but has a new websocket address, reload
location.reload();
}
};
http.send();
failedConnections = 0;
}
if(evt.code == 1006){
renewConnection();
}
};
function websocketOnError(evt){
/* websocket is closed. */
/* alert('Websocket error...');*/
console.debug('Websocket error... event code: ' + evt.code + ', reason: ' + evt.reason);
};
function websocketOnOpen(evt){
if(ws.readyState == 1){
ws.send('connected');
try {
document.getElementById("loading").style.display = 'none';
} catch(err) {
console.log('Error hiding loading overlay ' + err.message);
}
failedConnections = 0;
while(pendingSendMessages.length>0){
ws.send(pendingSendMessages.shift()); /*whithout checking ack*/
}
}
else{
console.debug('onopen fired but the socket readyState was not 1');
}
};
function uploadFile(widgetID, eventSuccess, eventFail, eventData, file){
var url = '/';
var xhr = new XMLHttpRequest();
var fd = new FormData();
xhr.open('POST', url, true);
xhr.setRequestHeader('filename', file.name);
xhr.setRequestHeader('listener', widgetID);
xhr.setRequestHeader('listener_function', eventData);
xhr.onreadystatechange = function() {
if (xhr.readyState == 4 && xhr.status == 200) {
/* Every thing ok, file uploaded */
var params={};params['filename']=file.name;
sendCallbackParam(widgetID, eventSuccess,params);
console.log('upload success: ' + file.name);
}else if(xhr.status == 400){
var params={};params['filename']=file.name;
sendCallbackParam(widgetID,eventFail,params);
console.log('upload failed: ' + file.name);
}
};
fd.append('upload_file', file);
xhr.send(fd);
};
</script>""" % (net_interface_ip, wsport, pending_messages_queue_length, websocket_timeout_timer_ms)
# add built in js, extend with user js
clients[k].js_body_end += ('\n' + '\n'.join(self._get_list_from_app_args('js_body_end')))
# use the default css, but append a version based on its hash, to stop browser caching
with open(self._get_static_file('style.css'), 'rb') as f:
md5 = hashlib.md5(f.read()).hexdigest()
clients[k].css_head = "<link href='/res/style.css?%s' rel='stylesheet' />\n" % md5
# add built in css, extend with user css
clients[k].css_head += ('\n' + '\n'.join(self._get_list_from_app_args('css_head')))
# add user supplied extra html,css,js
clients[k].html_head = '\n'.join(self._get_list_from_app_args('html_head'))
clients[k].html_body_start = '\n'.join(self._get_list_from_app_args('html_body_start'))
clients[k].html_body_end = '\n'.join(self._get_list_from_app_args('html_body_end'))
clients[k].js_body_start = '\n'.join(self._get_list_from_app_args('js_body_start'))
clients[k].js_head = '\n'.join(self._get_list_from_app_args('js_head'))
if not hasattr(clients[k], 'websockets'):
clients[k].websockets = []
self.client = clients[k]
if update_thread is None:
# we need to, at least, ping the websockets to keep them alive. we might also ping more frequently if the
# user requested we do so
ping_time = self.server.websocket_timeout_timer_ms / 2000.0 # twice the timeout in ms
if self.server.update_interval is None:
interval = ping_time
else:
interval = min(ping_time, self.server.update_interval)
update_thread = _UpdateThread(interval)
update_event.set() # update now
def idle(self):
""" Idle function called every UPDATE_INTERVAL before the gui update.
Useful to schedule tasks. """
pass
def set_root_widget(self, widget):
global update_lock, update_event
#update_event.wait()
self.root = widget
# here we check if the root window has changed
for ws in self.websockets:
try:
html = self.root.repr(self)
ws.send_message('0' + self.root.identifier + ',' + to_websocket(html)) ##0==show_window message
except:
self.websockets.remove(ws)
#update_event.clear()
def _send_spontaneous_websocket_message(self, message):
global update_lock
with update_lock:
for ws in self.client.websockets:
# noinspection PyBroadException
try:
self._log.debug("sending websocket spontaneous message")
ws.send_message(message)
except:
self._log.error("sending websocket spontaneous message", exc_info=True)
self.client.websockets.remove(ws)
update_event.clear()
def execute_javascript(self, code):
self._send_spontaneous_websocket_message(_MSG_JS + code)
def notification_message(self, title, content, icon=""):
"""This function sends "javascript" message to the client, that executes its content.
In this particular code, a notification message is shown
"""
code = """
var options = {
body: "%(content)s",
icon: "%(icon)s"
}
if (!("Notification" in window)) {
alert("%(content)s");
}else if (Notification.permission === "granted") {
var notification = new Notification("%(title)s", options);
}else if (Notification.permission !== 'denied') {
Notification.requestPermission(function (permission) {
if (permission === "granted") {
var notification = new Notification("%(title)s", options);
}
});
}
""" % {'title': title, 'content': content, 'icon': icon}
self.execute_javascript(code)
def do_POST(self):
self._instance()
file_data = None
listener_widget = None
listener_function = None
try:
# Parse the form data posted
filename = self.headers['filename']
listener_widget = runtimeInstances[self.headers['listener']]
listener_function = self.headers['listener_function']
form = cgi.FieldStorage(fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type']})
# Echo back information about what was posted in the form
for field in form.keys():
field_item = form[field]
if field_item.filename:
# The field contains an uploaded file
file_data = field_item.file.read()
file_len = len(file_data)
self._log.debug('post: uploaded %s as "%s" (%d bytes)\n' % (field, field_item.filename, file_len))
get_method_by_name(listener_widget, listener_function)(file_data, filename)
else:
# Regular form value
self._log.debug('post: %s=%s\n' % (field, form[field].value))
if file_data is not None:
# the filedata is sent to the listener
self._log.debug('GUI - server.py do_POST: fileupload name= %s' % (filename))
self.send_response(200)
except Exception as e:
self._log.error('post: failed', exc_info=True)
self.send_response(400)
self.send_header('Content-type', 'text/plain')
self.end_headers()
def do_HEAD(self):
self.send_response(200)
self.end_headers()
def do_AUTHHEAD(self):
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm=\"Protected\"')
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
"""Handler for the GET requests."""
do_process = False
if self.server.auth is None:
do_process = True
else:
if not ('Authorization' in self.headers) or self.headers['Authorization'] is None:
self._log.info("Authenticating")
self.do_AUTHHEAD()
self.wfile.write(encode_text('no auth header received'))
elif self.headers['Authorization'] == 'Basic ' + self.server.auth.decode():
do_process = True
else:
self.do_AUTHHEAD()
self.wfile.write(encode_text(self.headers['Authorization']))
self.wfile.write(encode_text('not authenticated'))
if do_process:
path = str(unquote(self.path))
# noinspection PyBroadException
try:
self._instance()
# build the page (call main()) in user code, if not built yet
with update_lock:
# build the root page once if necessary
if not hasattr(self.client, 'root'):
self._log.info('built UI (path=%s)' % path)
self.client.root = self.main(*self.server.userdata)
self._process_all(path)
except:
self._log.error('error processing GET request', exc_info=True)
def _get_static_file(self, filename):
static_paths = [os.path.join(os.path.dirname(__file__), 'res')]
static_paths.extend(self._get_list_from_app_args('static_file_path'))
for s in reversed(static_paths):
path = os.path.join(s, filename)
if os.path.exists(path):
return path
def _process_all(self, function):
global update_lock
self._log.debug('get: %s' % function)
static_file = self.re_static_file.match(function)
attr_call = self.re_attr_call.match(function)
if (function == '/') or (not function):
with update_lock:
# render the HTML
html = self.client.root.repr(self.client)
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(encode_text("<!DOCTYPE html>\n"))
self.wfile.write(encode_text("<html>\n<head>\n"))
self.wfile.write(encode_text(
"""<meta content='text/html;charset=utf-8' http-equiv='Content-Type'>
<meta content='utf-8' http-equiv='encoding'>
<meta name="viewport" content="width=device-width, initial-scale=1.0">"""))
self.wfile.write(encode_text(self.client.css_head))
self.wfile.write(encode_text(self.client.html_head))
self.wfile.write(encode_text(self.client.js_head))
self.wfile.write(encode_text("\n<title>%s</title>\n" % self.server.title))
self.wfile.write(encode_text("\n</head>\n<body>\n"))
self.wfile.write(encode_text(self.client.js_body_start))
self.wfile.write(encode_text(self.client.html_body_start))
self.wfile.write(encode_text('<div id="loading"><div id="loading-animation"></div></div>'))
self.wfile.write(encode_text(html))
self.wfile.write(encode_text(self.client.html_body_end))
self.wfile.write(encode_text(self.client.js_body_end))
self.wfile.write(encode_text("</body>\n</html>"))
elif static_file:
filename = self._get_static_file(static_file.groups()[0])
if not filename:
self.send_response(404)
return
mimetype, encoding = mimetypes.guess_type(filename)
self.send_response(200)
self.send_header('Content-type', mimetype if mimetype else 'application/octet-stream')
if self.server.enable_file_cache:
self.send_header('Cache-Control', 'public, max-age=86400')
self.end_headers()
with open(filename, 'rb') as f:
content = f.read()
self.wfile.write(content)
elif attr_call:
with update_lock:
param_dict = parse_qs(urlparse(function).query)
# parse_qs returns patameters as list, here we take the first element
for k in param_dict:
param_dict[k] = param_dict[k][0]
widget, function = attr_call.group(1, 2)
try:
content, headers = get_method_by_name(get_method_by_id(widget), function)(**param_dict)
if content is None:
self.send_response(503)
return
self.send_response(200)
except IOError:
self._log.error('attr %s/%s call error' % (widget, function), exc_info=True)
self.send_response(404)
return
except (TypeError, AttributeError):
self._log.error('attr %s/%s not available' % (widget, function))
self.send_response(503)
return
for k in headers:
self.send_header(k, headers[k])
self.end_headers()
try:
self.wfile.write(content)
except:
self.wfile.write(encode_text(content))
class ThreadedHTTPServer(socketserver.ThreadingMixIn, HTTPServer):
daemon_threads = True
# noinspection PyPep8Naming
def __init__(self, server_address, RequestHandlerClass, websocket_address,
auth, multiple_instance, enable_file_cache, update_interval,
websocket_timeout_timer_ms, host_name, pending_messages_queue_length,
title, *userdata):
HTTPServer.__init__(self, server_address, RequestHandlerClass)
self.websocket_address = websocket_address
self.auth = auth
self.multiple_instance = multiple_instance
self.enable_file_cache = enable_file_cache
self.update_interval = update_interval
self.websocket_timeout_timer_ms = websocket_timeout_timer_ms
self.host_name = host_name
self.pending_messages_queue_length = pending_messages_queue_length
self.title = title
self.userdata = userdata
class Server(object):
# noinspection PyShadowingNames
def __init__(self, gui_class, title='', start=True, address='127.0.0.1', port=8081, username=None, password=None,
multiple_instance=False, enable_file_cache=True, update_interval=0.1, start_browser=True,
websocket_timeout_timer_ms=1000, websocket_port=0, host_name=None,
pending_messages_queue_length=1000, userdata=()):
self._gui = gui_class
self._title = title or gui_class.__name__
self._wsserver = self._sserver = None
self._wsth = self._sth = None
self._base_address = ''
self._address = address
self._sport = port
self._multiple_instance = multiple_instance
self._enable_file_cache = enable_file_cache
self._update_interval = update_interval
self._start_browser = start_browser
self._websocket_timeout_timer_ms = websocket_timeout_timer_ms
self._websocket_port = websocket_port
self._host_name = host_name
self._pending_messages_queue_length = pending_messages_queue_length
self._userdata = userdata
if username and password:
self._auth = base64.b64encode(encode_text("%s:%s" % (username, password)))
else:
self._auth = None
if not isinstance(userdata, tuple):
raise ValueError('userdata must be a tuple')
self._log = logging.getLogger('remi.server')
if start:
self.start()
self.serve_forever()
@property
def title(self):
return self._title
@property
def address(self):
return self._base_address
def start(self):
# here the websocket is started on an ephemereal port
self._wsserver = ThreadedWebsocketServer((self._address, self._websocket_port), WebSocketsHandler,
self._multiple_instance)
wshost, wsport = self._wsserver.socket.getsockname()[:2]
self._log.info('Started websocket server %s:%s' % (wshost, wsport))
self._wsth = threading.Thread(target=self._wsserver.serve_forever)
self._wsth.daemon = True
self._wsth.start()
# Create a web server and define the handler to manage the incoming
# request
self._sserver = ThreadedHTTPServer((self._address, self._sport), self._gui,
(wshost, wsport), self._auth,
self._multiple_instance, self._enable_file_cache,
self._update_interval, self._websocket_timeout_timer_ms,
self._host_name, self._pending_messages_queue_length,
self._title, *self._userdata)
shost, sport = self._sserver.socket.getsockname()[:2]
# when listening on multiple net interfaces the browsers connects to localhost
if shost == '0.0.0.0':
shost = '127.0.0.1'
self._base_address = 'http://%s:%s/' % (shost,sport)
self._log.info('Started httpserver %s' % self._base_address)
if self._start_browser:
try:
import android
android.webbrowser.open(self._base_address)
except ImportError:
# use default browser instead of always forcing IE on Windows
if os.name == 'nt':
webbrowser.get('windows-default').open(self._base_address)
else:
webbrowser.open(self._base_address)
self._sth = threading.Thread(target=self._sserver.serve_forever)
self._sth.daemon = True
self._sth.start()
def serve_forever(self):
# we could join on the threads, but join blocks all interupts (including
# ctrl+c, so just spin here
# noinspection PyBroadException
try:
while True:
signal.pause()
except Exception:
# signal.pause() is missing for Windows; wait 1ms and loop instead
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
def stop(self):
self._wsserver.shutdown()
self._wsth.join()
self._sserver.shutdown()
self._sth.join()
class StandaloneServer(Server):
def __init__(self, gui_class, title='', width=800, height=600, resizable=True, fullscreen=False, start=True,
userdata=()):
Server.__init__(self, gui_class, title=title, start=False, address='127.0.0.1', port=0, username=None,
password=None,
multiple_instance=False, enable_file_cache=True, update_interval=0.1, start_browser=False,
websocket_timeout_timer_ms=1000, websocket_port=0, host_name=None,
pending_messages_queue_length=1000, userdata=userdata)
self._application_conf = {'width':width, 'height':height, 'resizable':resizable, 'fullscreen':fullscreen}
if start:
self.serve_forever()
def serve_forever(self):
try:
import webview
Server.start(self)
webview.create_window(self.title, self.address, **self._application_conf)
Server.stop(self)
except ImportError:
raise ImportError('PyWebView is missing. Please install it by:\n '
'pip install pywebview\n '
'more info at https://github.com/r0x0r/pywebview')
def start(mainGuiClass, **kwargs):
"""This method starts the webserver with a specific App subclass."""
debug = kwargs.pop('debug', False)
standalone = kwargs.pop('standalone', False)
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO,
format='%(name)-16s %(levelname)-8s %(message)s')
logging.getLogger('remi').setLevel(
level=logging.DEBUG if debug else logging.INFO)
if standalone:
s = StandaloneServer(mainGuiClass, start=True, **kwargs)
else:
s = Server(mainGuiClass, start=True, **kwargs)
|
from os import path, mkdir
import app.functions as functions
from datetime import datetime
functions.current_datetime = datetime.now().strftime("%Y-%m-%d %H-%M-%S")
output_dir = "output/"
if not path.exists(output_dir):
mkdir(output_dir)
functions.video_output_dir = output_dir + functions.current_datetime + "/"
mkdir(functions.video_output_dir)
|
import os
from setuptools import setup, find_packages
DESCRIPTION = "Toolkit for genome-wide analysis of STRs"
LONG_DESCRIPTION = DESCRIPTION
NAME = "trtools"
AUTHOR = "Melissa Gymrek"
AUTHOR_EMAIL = "mgymrek@ucsd.edu"
MAINTAINER = "Melissa Gymrek"
MAINTAINER_EMAIL = "mgymrek@ucsd.edu"
DOWNLOAD_URL = 'http://github.com/gymreklab/TRTools'
LICENSE = 'MIT'
# version-keeping code based on pybedtools
curdir = os.path.abspath(os.path.dirname(__file__))
MAJ = 4
MIN = 1
REV = 0
VERSION = '%d.%d.%d' % (MAJ, MIN, REV)
with open(os.path.join(curdir, 'trtools/version.py'), 'w') as fout:
fout.write(
"\n".join(["",
"# THIS FILE IS GENERATED FROM SETUP.PY",
"version = '{version}'",
"__version__ = version"]).format(version=VERSION)
)
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=DOWNLOAD_URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
python_requires='>=3.5',
packages=find_packages(),
include_package_data=True,
license_file="LICENSE.txt",
scripts=["trtools/testsupport/test_trtools.sh"],
entry_points={
'console_scripts': [
'dumpSTR=trtools.dumpSTR:run',
'mergeSTR=trtools.mergeSTR:run',
'statSTR=trtools.statSTR:run',
'compareSTR=trtools.compareSTR:run',
'qcSTR=trtools.qcSTR:run'
],
},
install_requires=['cyvcf2',
'matplotlib',
'numpy',
'pandas',
'pybedtools',
'pysam',
'scikit-learn',
'scipy'],
classifiers=['Development Status :: 4 - Beta',\
'Programming Language :: Python :: 3.5',\
'License :: OSI Approved :: MIT License',\
'Operating System :: OS Independent',\
'Intended Audience :: Science/Research',\
'Topic :: Scientific/Engineering :: Bio-Informatics']
)
|
#!/usr/bin/env Python
from __future__ import print_function
from collections import OrderedDict
import pprint
def getcpuname():
CPUinfo=OrderedDict()
procinfo=OrderedDict()
nprocs = 0
with open('/proc/cpuinfo') as f:
for line in f:
if not line.strip():
CPUinfo['proc%s' % nprocs] = procinfo
nprocs=nprocs+1
procinfo=OrderedDict()
else:
if len(line.split(':')) == 2:
procinfo[line.split(':')[0].strip()] = line.split(':')[1].strip()
else:
procinfo[line.split(':')[0].strip()] = ''
return CPUinfo['proc0']['model name']
if __name__=='__main__':
CPUinfo = CPUinfo()
for processor in CPUinfo.keys():
print(CPUinfo[processor]['model name']) |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import codecs
import errno
import multiprocessing.pool
import os
import os.path
import re
import shutil
import ssl
import sys
import traceback
import six
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlopen, Request
from llnl.util.filesystem import mkdirp
import llnl.util.tty as tty
import spack.cmd
import spack.config
import spack.error
import spack.url
import spack.util.crypto
import spack.util.s3 as s3_util
import spack.util.url as url_util
import llnl.util.lang
from spack.util.compression import ALLOWED_ARCHIVE_TYPES
if sys.version_info < (3, 0):
# Python 2 had these in the HTMLParser package.
from HTMLParser import HTMLParser, HTMLParseError # novm
else:
# In Python 3, things moved to html.parser
from html.parser import HTMLParser
# Also, HTMLParseError is deprecated and never raised.
class HTMLParseError(Exception):
pass
# Timeout in seconds for web requests
_timeout = 10
class LinkParser(HTMLParser):
"""This parser just takes an HTML page and strips out the hrefs on the
links. Good enough for a really simple spider. """
def __init__(self):
HTMLParser.__init__(self)
self.links = []
def handle_starttag(self, tag, attrs):
if tag == 'a':
for attr, val in attrs:
if attr == 'href':
self.links.append(val)
def uses_ssl(parsed_url):
if parsed_url.scheme == 'https':
return True
if parsed_url.scheme == 's3':
endpoint_url = os.environ.get('S3_ENDPOINT_URL')
if not endpoint_url:
return True
if url_util.parse(endpoint_url, scheme='https').scheme == 'https':
return True
return False
__UNABLE_TO_VERIFY_SSL = (
lambda pyver: (
(pyver < (2, 7, 9)) or
((3,) < pyver < (3, 4, 3))
))(sys.version_info)
def read_from_url(url, accept_content_type=None):
url = url_util.parse(url)
context = None
verify_ssl = spack.config.get('config:verify_ssl')
# Don't even bother with a context unless the URL scheme is one that uses
# SSL certs.
if uses_ssl(url):
if verify_ssl:
if __UNABLE_TO_VERIFY_SSL:
# User wants SSL verification, but it cannot be provided.
warn_no_ssl_cert_checking()
else:
# User wants SSL verification, and it *can* be provided.
context = ssl.create_default_context() # novm
else:
# User has explicitly indicated that they do not want SSL
# verification.
if not __UNABLE_TO_VERIFY_SSL:
context = ssl._create_unverified_context()
req = Request(url_util.format(url))
content_type = None
is_web_url = url.scheme in ('http', 'https')
if accept_content_type and is_web_url:
# Make a HEAD request first to check the content type. This lets
# us ignore tarballs and gigantic files.
# It would be nice to do this with the HTTP Accept header to avoid
# one round-trip. However, most servers seem to ignore the header
# if you ask for a tarball with Accept: text/html.
req.get_method = lambda: "HEAD"
resp = _urlopen(req, timeout=_timeout, context=context)
content_type = get_header(resp.headers, 'Content-type')
# Do the real GET request when we know it's just HTML.
req.get_method = lambda: "GET"
try:
response = _urlopen(req, timeout=_timeout, context=context)
except URLError as err:
raise SpackWebError('Download failed: {ERROR}'.format(
ERROR=str(err)))
if accept_content_type and not is_web_url:
content_type = get_header(response.headers, 'Content-type')
reject_content_type = (
accept_content_type and (
content_type is None or
not content_type.startswith(accept_content_type)))
if reject_content_type:
tty.debug("ignoring page {0}{1}{2}".format(
url_util.format(url),
" with content type " if content_type is not None else "",
content_type or ""))
return None, None, None
return response.geturl(), response.headers, response
def warn_no_ssl_cert_checking():
tty.warn("Spack will not check SSL certificates. You need to update "
"your Python to enable certificate verification.")
def push_to_url(
local_file_path, remote_path, keep_original=True, extra_args=None):
remote_url = url_util.parse(remote_path)
verify_ssl = spack.config.get('config:verify_ssl')
if __UNABLE_TO_VERIFY_SSL and verify_ssl and uses_ssl(remote_url):
warn_no_ssl_cert_checking()
remote_file_path = url_util.local_file_path(remote_url)
if remote_file_path is not None:
mkdirp(os.path.dirname(remote_file_path))
if keep_original:
shutil.copy(local_file_path, remote_file_path)
else:
try:
os.rename(local_file_path, remote_file_path)
except OSError as e:
if e.errno == errno.EXDEV:
# NOTE(opadron): The above move failed because it crosses
# filesystem boundaries. Copy the file (plus original
# metadata), and then delete the original. This operation
# needs to be done in separate steps.
shutil.copy2(local_file_path, remote_file_path)
os.remove(local_file_path)
else:
raise
elif remote_url.scheme == 's3':
if extra_args is None:
extra_args = {}
remote_path = remote_url.path
while remote_path.startswith('/'):
remote_path = remote_path[1:]
s3 = s3_util.create_s3_session(remote_url)
s3.upload_file(local_file_path, remote_url.netloc,
remote_path, ExtraArgs=extra_args)
if not keep_original:
os.remove(local_file_path)
else:
raise NotImplementedError(
'Unrecognized URL scheme: {SCHEME}'.format(
SCHEME=remote_url.scheme))
def url_exists(url):
url = url_util.parse(url)
local_path = url_util.local_file_path(url)
if local_path:
return os.path.exists(local_path)
if url.scheme == 's3':
s3 = s3_util.create_s3_session(url)
try:
s3.get_object(Bucket=url.netloc, Key=url.path.lstrip('/'))
return True
except s3.ClientError as err:
if err.response['Error']['Code'] == 'NoSuchKey':
return False
raise err
# otherwise, just try to "read" from the URL, and assume that *any*
# non-throwing response contains the resource represented by the URL
try:
read_from_url(url)
return True
except (SpackWebError, URLError):
return False
def _debug_print_delete_results(result):
if 'Deleted' in result:
for d in result['Deleted']:
tty.debug('Deleted {0}'.format(d['Key']))
if 'Errors' in result:
for e in result['Errors']:
tty.debug('Failed to delete {0} ({1})'.format(
e['Key'], e['Message']))
def remove_url(url, recursive=False):
url = url_util.parse(url)
local_path = url_util.local_file_path(url)
if local_path:
if recursive:
shutil.rmtree(local_path)
else:
os.remove(local_path)
return
if url.scheme == 's3':
s3 = s3_util.create_s3_session(url)
bucket = url.netloc
if recursive:
# Because list_objects_v2 can only return up to 1000 items
# at a time, we have to paginate to make sure we get it all
prefix = url.path.strip('/')
paginator = s3.get_paginator('list_objects_v2')
pages = paginator.paginate(Bucket=bucket, Prefix=prefix)
delete_request = {'Objects': []}
for item in pages.search('Contents'):
if not item:
continue
delete_request['Objects'].append({'Key': item['Key']})
# Make sure we do not try to hit S3 with a list of more
# than 1000 items
if len(delete_request['Objects']) >= 1000:
r = s3.delete_objects(Bucket=bucket, Delete=delete_request)
_debug_print_delete_results(r)
delete_request = {'Objects': []}
# Delete any items that remain
if len(delete_request['Objects']):
r = s3.delete_objects(Bucket=bucket, Delete=delete_request)
_debug_print_delete_results(r)
else:
s3.delete_object(Bucket=bucket, Key=url.path.lstrip('/'))
return
# Don't even try for other URL schemes.
def _iter_s3_contents(contents, prefix):
for entry in contents:
key = entry['Key']
if not key.startswith('/'):
key = '/' + key
key = os.path.relpath(key, prefix)
if key == '.':
continue
yield key
def _list_s3_objects(client, bucket, prefix, num_entries, start_after=None):
list_args = dict(
Bucket=bucket,
Prefix=prefix[1:],
MaxKeys=num_entries)
if start_after is not None:
list_args['StartAfter'] = start_after
result = client.list_objects_v2(**list_args)
last_key = None
if result['IsTruncated']:
last_key = result['Contents'][-1]['Key']
iter = _iter_s3_contents(result['Contents'], prefix)
return iter, last_key
def _iter_s3_prefix(client, url, num_entries=1024):
key = None
bucket = url.netloc
prefix = re.sub(r'^/*', '/', url.path)
while True:
contents, key = _list_s3_objects(
client, bucket, prefix, num_entries, start_after=key)
for x in contents:
yield x
if not key:
break
def _iter_local_prefix(path):
for root, _, files in os.walk(path):
for f in files:
yield os.path.relpath(os.path.join(root, f), path)
def list_url(url, recursive=False):
url = url_util.parse(url)
local_path = url_util.local_file_path(url)
if local_path:
if recursive:
return list(_iter_local_prefix(local_path))
return [subpath for subpath in os.listdir(local_path)
if os.path.isfile(os.path.join(local_path, subpath))]
if url.scheme == 's3':
s3 = s3_util.create_s3_session(url)
if recursive:
return list(_iter_s3_prefix(s3, url))
return list(set(
key.split('/', 1)[0]
for key in _iter_s3_prefix(s3, url)))
def spider(root_urls, depth=0, concurrency=32):
"""Get web pages from root URLs.
If depth is specified (e.g., depth=2), then this will also follow
up to <depth> levels of links from each root.
Args:
root_urls (str or list of str): root urls used as a starting point
for spidering
depth (int): level of recursion into links
concurrency (int): number of simultaneous requests that can be sent
Returns:
A dict of pages visited (URL) mapped to their full text and the
set of visited links.
"""
# Cache of visited links, meant to be captured by the closure below
_visited = set()
def _spider(url, collect_nested):
"""Fetches URL and any pages it links to.
Prints out a warning only if the root can't be fetched; it ignores
errors with pages that the root links to.
Args:
url (str): url being fetched and searched for links
collect_nested (bool): whether we want to collect arguments
for nested spidering on the links found in this url
Returns:
A tuple of:
- pages: dict of pages visited (URL) mapped to their full text.
- links: set of links encountered while visiting the pages.
- spider_args: argument for subsequent call to spider
"""
pages = {} # dict from page URL -> text content.
links = set() # set of all links seen on visited pages.
subcalls = []
try:
response_url, _, response = read_from_url(url, 'text/html')
if not response_url or not response:
return pages, links, subcalls
page = codecs.getreader('utf-8')(response).read()
pages[response_url] = page
# Parse out the links in the page
link_parser = LinkParser()
link_parser.feed(page)
while link_parser.links:
raw_link = link_parser.links.pop()
abs_link = url_util.join(
response_url,
raw_link.strip(),
resolve_href=True)
links.add(abs_link)
# Skip stuff that looks like an archive
if any(raw_link.endswith(s) for s in ALLOWED_ARCHIVE_TYPES):
continue
# Skip already-visited links
if abs_link in _visited:
continue
# If we're not at max depth, follow links.
if collect_nested:
subcalls.append((abs_link,))
_visited.add(abs_link)
except URLError as e:
tty.debug(str(e))
if hasattr(e, 'reason') and isinstance(e.reason, ssl.SSLError):
tty.warn("Spack was unable to fetch url list due to a "
"certificate verification problem. You can try "
"running spack -k, which will not check SSL "
"certificates. Use this at your own risk.")
except HTMLParseError as e:
# This error indicates that Python's HTML parser sucks.
msg = "Got an error parsing HTML."
# Pre-2.7.3 Pythons in particular have rather prickly HTML parsing.
if sys.version_info[:3] < (2, 7, 3):
msg += " Use Python 2.7.3 or newer for better HTML parsing."
tty.warn(msg, url, "HTMLParseError: " + str(e))
except Exception as e:
# Other types of errors are completely ignored,
# except in debug mode
tty.debug("Error in _spider: %s:%s" % (type(e), str(e)),
traceback.format_exc())
finally:
tty.debug("SPIDER: [url={0}]".format(url))
return pages, links, subcalls
if isinstance(root_urls, six.string_types):
root_urls = [root_urls]
# Clear the local cache of visited pages before starting the search
_visited.clear()
current_depth = 0
pages, links, spider_args = {}, set(), []
collect = current_depth < depth
for root in root_urls:
root = url_util.parse(root)
spider_args.append((root, collect))
tp = multiprocessing.pool.ThreadPool(processes=concurrency)
try:
while current_depth <= depth:
tty.debug("SPIDER: [depth={0}, max_depth={1}, urls={2}]".format(
current_depth, depth, len(spider_args))
)
results = tp.map(llnl.util.lang.star(_spider), spider_args)
spider_args = []
collect = current_depth < depth
for sub_pages, sub_links, sub_spider_args in results:
sub_spider_args = [x + (collect,) for x in sub_spider_args]
pages.update(sub_pages)
links.update(sub_links)
spider_args.extend(sub_spider_args)
current_depth += 1
finally:
tp.terminate()
tp.join()
return pages, links
def _urlopen(req, *args, **kwargs):
"""Wrapper for compatibility with old versions of Python."""
url = req
try:
url = url.get_full_url()
except AttributeError:
pass
# We don't pass 'context' parameter because it was only introduced starting
# with versions 2.7.9 and 3.4.3 of Python.
if 'context' in kwargs:
del kwargs['context']
opener = urlopen
if url_util.parse(url).scheme == 's3':
import spack.s3_handler
opener = spack.s3_handler.open
return opener(req, *args, **kwargs)
def find_versions_of_archive(
archive_urls, list_url=None, list_depth=0, concurrency=32
):
"""Scrape web pages for new versions of a tarball.
Args:
archive_urls (str or list or tuple): URL or sequence of URLs for
different versions of a package. Typically these are just the
tarballs from the package file itself. By default, this searches
the parent directories of archives.
list_url (str or None): URL for a listing of archives.
Spack will scrape these pages for download links that look
like the archive URL.
list_depth (int): max depth to follow links on list_url pages.
Defaults to 0.
concurrency (int): maximum number of concurrent requests
"""
if not isinstance(archive_urls, (list, tuple)):
archive_urls = [archive_urls]
# Generate a list of list_urls based on archive urls and any
# explicitly listed list_url in the package
list_urls = set()
if list_url is not None:
list_urls.add(list_url)
for aurl in archive_urls:
list_urls |= spack.url.find_list_urls(aurl)
# Add '/' to the end of the URL. Some web servers require this.
additional_list_urls = set()
for lurl in list_urls:
if not lurl.endswith('/'):
additional_list_urls.add(lurl + '/')
list_urls |= additional_list_urls
# Grab some web pages to scrape.
pages, links = spider(list_urls, depth=list_depth, concurrency=concurrency)
# Scrape them for archive URLs
regexes = []
for aurl in archive_urls:
# This creates a regex from the URL with a capture group for
# the version part of the URL. The capture group is converted
# to a generic wildcard, so we can use this to extract things
# on a page that look like archive URLs.
url_regex = spack.url.wildcard_version(aurl)
# We'll be a bit more liberal and just look for the archive
# part, not the full path.
url_regex = os.path.basename(url_regex)
# We need to add a / to the beginning of the regex to prevent
# Spack from picking up similarly named packages like:
# https://cran.r-project.org/src/contrib/pls_2.6-0.tar.gz
# https://cran.r-project.org/src/contrib/enpls_5.7.tar.gz
# https://cran.r-project.org/src/contrib/autopls_1.3.tar.gz
# https://cran.r-project.org/src/contrib/matrixpls_1.0.4.tar.gz
url_regex = '/' + url_regex
# We need to add a $ anchor to the end of the regex to prevent
# Spack from picking up signature files like:
# .asc
# .md5
# .sha256
# .sig
# However, SourceForge downloads still need to end in '/download'.
url_regex += r'(\/download)?'
# PyPI adds #sha256=... to the end of the URL
url_regex += '(#sha256=.*)?'
url_regex += '$'
regexes.append(url_regex)
# Build a dict version -> URL from any links that match the wildcards.
# Walk through archive_url links first.
# Any conflicting versions will be overwritten by the list_url links.
versions = {}
for url in archive_urls + sorted(links):
if any(re.search(r, url) for r in regexes):
try:
ver = spack.url.parse_version(url)
versions[ver] = url
except spack.url.UndetectableVersionError:
continue
return versions
def get_header(headers, header_name):
"""Looks up a dict of headers for the given header value.
Looks up a dict of headers, [headers], for a header value given by
[header_name]. Returns headers[header_name] if header_name is in headers.
Otherwise, the first fuzzy match is returned, if any.
This fuzzy matching is performed by discarding word separators and
capitalization, so that for example, "Content-length", "content_length",
"conTENtLength", etc., all match. In the case of multiple fuzzy-matches,
the returned value is the "first" such match given the underlying mapping's
ordering, or unspecified if no such ordering is defined.
If header_name is not in headers, and no such fuzzy match exists, then a
KeyError is raised.
"""
def unfuzz(header):
return re.sub(r'[ _-]', '', header).lower()
try:
return headers[header_name]
except KeyError:
unfuzzed_header_name = unfuzz(header_name)
for header, value in headers.items():
if unfuzz(header) == unfuzzed_header_name:
return value
raise
class SpackWebError(spack.error.SpackError):
"""Superclass for Spack web spidering errors."""
class NoNetworkConnectionError(SpackWebError):
"""Raised when an operation can't get an internet connection."""
def __init__(self, message, url):
super(NoNetworkConnectionError, self).__init__(
"No network connection: " + str(message),
"URL was: " + str(url))
self.url = url
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Evaluate ball detection and forecast - Deeplearning Exercice 1 - Part 1
.. moduleauthor:: Paul-Emmanuel Sotir
.. See https://perso.liris.cnrs.fr/christian.wolf/teaching/deeplearning/tp.html and https://github.com/PaulEmmanuelSotir/BallDetectionAndForecasting
"""
import os
import sys
import json
import os.path
import argparse
from mlflow import log_metric, log_param, log_artifact
from sklearn.metrics import precision_recall_curve
import sklearn.metrics as metrics
try:
import cPickle as pickle
except ImportError:
import pickle
__all__ = ['evaluate']
__author__ = 'Paul-Emmanuel SOTIR <paul-emmanuel@outlook.com>'
def evaluate(model, dataset):
# load model pickle
with open(model_path, 'rb') as model_pkl:
model = pickle.load(model_pkl)
# apply model prediction on dataset
preds = model.predict(dataset, metadata={}) # TODO: metadata...
# evaluate model predictions with type-specific metrics
if args.type == "detect":
metrics = {} # TODO: preds against groud truth
else:
metrics = {} # TODO: preds against groud truth
return {"dataset": dataset, "model": model.name, "metrics": metrics}
def _log_eval_results(results, type, JSON_log, JSON_log_template='./eval_log_template.json'):
print("Storing evaluation results to " + JSON_log)
if not os.path.isfile(JSON_log):
# Copy empty JSON evaluation log template
shutil.copy(JSON_log_template, JSON_log)
# Append results to evaluation log
with open(JSON_log, "w") as f:
log = json.load(f)
log[type].append(results)
json.dump(log, f)
def _main():
parser = argparse.ArgumentParser(description='Evaluates model(s) ball detections or position forecasts.')
parser.add_argument('--type', metavar='t', type=str, nargs=1,
help='Prediction task performed by model to be evaluated', choices=["detect", "forecast"])
parser.add_argument('--models', metavar='f', action='extend', type=str, nargs='+',
help='Path to ball detection or forecasting model pickle(s) to be evaluated')
parser.add_argument('--dataset', type=str, nargs=1,
help='Path to ball detection or forecasting evaluation dataset')
parser.add_argument('--output', metavar='o', type=str, nargs='?', default='./eval_log.json',
help='Path to JSON evaluation log (created if it doesn\'t exist yet).')
args = parser.parse_args()
print('#' * 10 + " RUNNING EVALUATION SCRIPT... " + "#" * 10)
print("#" * 5 + " " + len(args.models) + " models to be evaluated on \'" + args.type + "\' task.")
for model in args.models:
print("#" * 5 + " Evaluation of \'" + model + "\' model running...")
results = evaluate(model, args.dataset)
print("#" * 5 + " EVALUATION DONE.")
print("> EVAL RESULTS = " + str(results))
_log_eval_results(results, args.type, args.output)
print("#" * 10 + " EVALUATION SCRIPT DONE! " + "#" * 10)
if __name__ == "__main__":
_main()
|
import matplotlib.pyplot as plt
import numpy as np
def parse_result(f):
return [float(a) / 1000 for a in f.read().split("\n")[:-1]]
with open("../data/python_bench.txt") as f:
pandas = parse_result(f)
with open("../data/python_bench_str.txt") as f:
pandas_str = parse_result(f)
with open("../data/pypolars_bench.txt") as f:
pypolars = parse_result(f)
with open("../data/pypolars_bench_str.txt") as f:
pypolars_str = parse_result(f)
with open("../data/datatable_bench.txt") as f:
datatable = parse_result(f)
with open("../data/datatable_bench_str.txt") as f:
datatable_str = parse_result(f)
sizes = [1e4, 1e5, 1e6, 1e7]
lib = ["py-polars", "pydatatable", "pandas"]
x = np.arange(1, 4)
fig, ax = plt.subplots(1, len(sizes), figsize=(14, 4))
plt.suptitle("Group by on 10 groups")
plt.subplots_adjust(wspace=0.4)
r = 0
ax = ax[None, :]
for i in range(len(pypolars)):
c = i
ca = ax[r, c]
ca.set_title(f"{int(sizes[i]):,} rows")
ca.bar(
x - 0.25 / 2,
[pypolars[i], datatable[i], pandas[i]],
color=["C0", "C1", "C2"],
width=0.25,
label="int",
)
ca.bar(
x + 0.25 / 2,
[pypolars_str[i], datatable_str[i], pandas_str[i]],
color=["C0", "C1", "C2"],
width=0.25,
alpha=0.5,
label="str",
)
ca.set_xticks(x)
ca.set_xticklabels(lib)
ca.set_ylabel("duration [seconds]")
ca.legend()
plt.savefig("img/groupby10_.png")
with open("../data/mem_pandas.txt") as f:
pandas = [float(a) for a in f.read().split("\n")[:-1]]
with open("../data/mem_datatable.txt") as f:
datatable = [float(a) for a in f.read().split("\n")[:-1]]
with open("../data/mem_polars.txt") as f:
pypolars = [float(a) for a in f.read().split("\n")[:-1]]
fig, ax = plt.subplots(1, len(sizes), figsize=(14, 4))
plt.suptitle("Memory usage during Groupby")
plt.subplots_adjust(wspace=0.5, bottom=0.2)
r = 0
ax = ax[None, :]
for i in range(len(pypolars)):
c = i
ca = ax[r, c]
ca.set_title(f"{int(sizes[i]):,} rows")
ca.bar(
x,
[pypolars[i], datatable[i], pandas[i]],
color=["C0", "C1", "C2"],
alpha=0.75,
width=0.4,
)
ca.set_xticks(x)
ca.set_xticklabels(lib, rotation=30)
ca.set_ylabel("process memory [GB]")
plt.savefig("img/groupby10_mem.png")
|
import json
import logging
import os
import random
import time
from emoji import emojize
from telegram import Bot, Update
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
RESPONSE = {
"OK": {
'statusCode': 200,
'headers': {'Content-type': 'application/json'},
'body': json.dumps("Ok")
},
"ERROR": {
'statusCode': 400,
'body': json.dumps("Something went wrong")
}
}
def configure_telegram():
TELEGRAM_TOKEN = os.environ.get('TELEGRAM_TOKEN')
if not TELEGRAM_TOKEN:
logging.error(
'TELEGRAM_TOKEN Not found, it must be set before moving forward.')
raise NotImplementedError
return Bot(TELEGRAM_TOKEN)
def populate_excuse():
global excuseDict
excuseDict = {}
officeExcuse = (
"I missed the office because ozone fluttered my car tire in the air.", "The pressure cooker in my house exploded and scared the maid so I had to stay at home", "I was blocked by the tax department of the housing committee who raided my house.", "Loki Chan was again in my lunch box, and I was very upset to go on time")
birthdayExcuse = (
"I remember if I did not eat canteen for lunch!",
"The weather has made me do this. Seriously. In May, to cry loudly in Mumbai! Are not you born in October?", "what!!! I was doing a favor to you by mistake, and was trying to teach you the art of forgiveness.", "I was kidnapped by the aliens and they wiped me from my memory one day ... So, I will remember tomorrow.")
anniversaryExcuse = (
"I thought our wedding anniversary was the day we met, on that day we did not make it official.", "I wanted to surprise you - would not you be surprised if I really remembered it, right? Understandable", "I.P.L. Tickets were sold, so I decided that we can celebrate next week.", "I remembered, but I forgot on the way I passed through the Saki Naka, remember?")
excuseDict['missed office'] = officeExcuse
excuseDict['forgot birthday'] = birthdayExcuse
excuseDict['forgot anniversary'] = anniversaryExcuse
def webhook(event, context):
bot = configure_telegram()
logging.info('Event: {}'.format(event))
populate_excuse()
if event.get('httpMethod') == 'POST' and event.get('body'):
logging.info("Message successfully received")
update = Update.de_json(json.loads(event.get('body')), bot)
chat_id = update.message.chat_id
text = update.message.text
if text == '/start':
text = """Hello, human! I am an excuse bot, built with Python and the Serverless Framework. I help with excuses {}.\n\
You can take a look at my source code here: https://github.com/vaibhavsingh97/serverless-chatbot-demo.\n\
Found a {}, please drop a tweet to my creator: https://twitter.com/vaibhavsingh97. Happy botting!""".format(emojize("! :laughing:", use_aliases=True), emojize("! :bug:", use_aliases=True))
bot.send_chat_action(chat_id=chat_id, action="TYPING")
time.sleep(2)
bot.send_message(chat_id=chat_id, text=text)
elif "missed" in text and "office" in text:
headers = {
"Accept": "application/json",
"User-Agent": "excuse bot (https://github.com/vaibhavsingh97/serverless-chatbot-demo)"
}
arr = excuseDict['missed office']
text = arr[random.randint(0, len(arr))]
bot.send_chat_action(chat_id=chat_id, action="TYPING")
time.sleep(2)
bot.send_message(chat_id=chat_id, text=text)
elif "forgot" in text and "birthday" in text:
headers = {
"Accept": "application/json",
"User-Agent": "excuse bot (https://github.com/vaibhavsingh97/serverless-chatbot-demo)"
}
arr = excuseDict['forgot birthday']
text = arr[random.randint(0, len(arr))]
bot.send_chat_action(chat_id=chat_id, action="TYPING")
time.sleep(2)
bot.send_message(chat_id=chat_id, text=text)
elif "forgot" in text and "anniversary" in text:
headers = {
"Accept": "application/json",
"User-Agent": "excuse bot (https://github.com/vaibhavsingh97/serverless-chatbot-demo)"
}
arr = excuseDict['forgot anniversary']
text = arr[random.randint(0, len(arr))]
bot.send_chat_action(chat_id=chat_id, action="TYPING")
time.sleep(2)
bot.send_message(chat_id=chat_id, text=text)
elif text == '/help':
text = """
Hello! Excuse bot welcomes you on the telegram!
Here's the commands:
- /start - to get know more about Excuse Bot
- /help - to view help text
This bot is being worked on, so it may break sometimes. Contact @vaibhavsingh97 on twitter \
or open issue [here](https://github.com/vaibhavsingh97/serverless-chatbot-demo).
"""
bot.send_chat_action(chat_id=chat_id, action="TYPING")
time.sleep(2)
bot.send_message(chat_id=chat_id, text=text, parse_mode="MARKDOWN")
logging.info("Message successfully sent")
# RESPONSE["OK"]["body"] = json.dumps("Message Sent")
return RESPONSE["OK"]
return RESPONSE["ERROR"]
def set_webhook(event, context):
logging.info('Event: {}'.format(event))
bot = configure_telegram()
url = 'https://{}/{}'.format(
event.get('headers').get('Host'),
event.get('requestContext').get('stage')
)
webhook = bot.set_webhook(url)
if webhook:
RESPONSE["OK"]["body"] = json.dumps("Webhook URL successfully set.")
return RESPONSE["OK"]
return RESPONSE["ERROR"]
def get_webhook_info(event, context):
logging.info('Event: {}'.format(event))
bot = configure_telegram()
webhook_info = bot.get_webhook_info()
logging.info('Event: {}'.format(webhook_info))
if webhook_info:
RESPONSE["OK"]["body"] = str(webhook_info)
return RESPONSE["OK"]
return RESPONSE["ERROR"]
|
#
# Python Testing: Covering Your Bases
# Python Techdegree
#
# Created by Dulio Denis on 1/13/19.
# Copyright (c) 2019 ddApps. All rights reserved.
# ------------------------------------------------
# `coverage.py` is an amazing library for
# determining how much of your code is covered
# by your tests and how much still needs to be
# tested. Let's look at how to install it, run it,
# and get handy reports from it.
# Aim for 90% coverage.
# ------------------------------------------------
# Run:
# pip3 install coverage
# then:
# coverage run 1_tests.py
# coverage report
# coverage report -m
# ------------------------------------------------
# Name Stmts Miss Cover Missing
# ------------------------------------------
# 1_tests.py 34 0 100%
# dice.py 50 9 82% 26, 60, 63, 66, 69, 72-75
# ------------------------------------------
# TOTAL 84 9 89%
# ------------------------------------------------
import unittest
from dice import Die, Roll
class DieTest(unittest.TestCase):
def setUp(self):
self.d6 = Die(6)
self.d8 = Die(8)
def test_creation(self):
self.assertEqual(self.d6.sides, 6)
self.assertIn(self.d6.value, range(1,7))
def test_add(self):
self.assertIsInstance(self.d6+self.d8, int)
def test_bad_sides(self):
with self.assertRaises(ValueError):
Die(1)
class RollTest(unittest.TestCase):
def setUp(self):
self.hand1 = Roll('1d2')
self.hand3 = Roll('3d6')
# test upper and lower boundary on these rolls
def test_lower(self):
self.assertGreaterEqual(int(self.hand3), 3)
def test_upper(self):
self.assertLessEqual(int(self.hand3), 18)
def test_membership(self):
test_die = Die(2)
test_die.value = self.hand1.results[0].value
self.assertIn(test_die, self.hand1.results)
def test_bad_description(self):
with self.assertRaises(ValueError):
Roll('2b6')
def test_small_die(self):
with self.assertRaises(ValueError):
Roll('1d2') # totally acceptable - won't fail
def test_adding(self):
self. assertEqual(self.hand1+self.hand3,
sum(self.hand1.results)+sum(self.hand3.results))
if __name__ == '__main__':
unittest.main()
|
class MyClass:
"""
This class does... stuff
"""
def do_something(self):
#jupman-raise
print("Doing something")
#/jupman-raise
def do_something_else(self):
#jupman-raise
print("Doing something else")
helper(5)
#/jupman-raise
#jupman-strip
def helper(x):
return x + 1
#/jupman-strip
|
n = int(input('Digite um numero'))
if(n % 2 == 0):
print('Número é Par!')
else:
print('Número é Impar!') |
import os
import sys
import csv
import argparse
import logging
import datetime
import itertools
from os.path import join, dirname, abspath, relpath
from cached_property import cached_property
from pyclarity_lims.entities import Step, Queue
from egcg_core import clarity, util
from egcg_core.app_logging import AppLogger, logging_default as log_cfg
from egcg_core.config import cfg
from egcg_core.rest_communication import get_document, patch_entry
sys.path.append(dirname(dirname(abspath(__file__))))
from config import load_config
file_extensions_to_check = ['fastq.gz', 'bam', 'g.vcf.gz']
lims_workflow_name = 'PostSeqLab EG 1.0 WF'
lims_protocol_name = 'Data Release EG 2.0 PR'
lims_stage_name = 'Download Confirmation EG 1.0 ST'
class DeliveredSample(AppLogger):
def __init__(self, sample_id):
self.sample_id = sample_id
# resolve FluidX sample name
if self.sample_id.startswith('FD'):
# might be FluidX tube barcode
arts = clarity.connection().get_artifacts(type='Analyte', udf={'2D Barcode': self.sample_id})
samples = clarity.connection().get_samples(udf={'2D Barcode': self.sample_id})
if arts and len(arts) == 1:
self.sample_id = arts[0].samples[0].name
elif len(arts) == 0:
self.sample_id = samples[0].name
else:
self.error('Found %s artifacts for FluidX sample %s', len(arts), self.sample_id)
self.files_downloaded = []
@cached_property
def data(self):
d = get_document('samples', where={'sample_id': self.sample_id})
if not d:
self.warning('No data found for sample %s', self.sample_id)
return {}
return d
@property
def sample_folders(self):
return util.find_files(cfg['delivery']['dest'], self.data['project_id'], '*', self.sample_id)
def _format_deliverable_files(self, files):
files_to_upload = []
for f in files:
if self._is_checkable(f):
with open(f + '.md5') as open_file:
md5 = open_file.readline().strip().split()[0]
rel_path = relpath(f, start=cfg['delivery']['dest'])
files_to_upload.append({'file_path': rel_path, 'md5': md5, 'size': os.stat(f).st_size})
return files_to_upload
def upload_files_delivered(self, files):
files_to_upload = self._format_deliverable_files(files)
patch_entry(
'samples',
payload={'files_delivered': files_to_upload},
id_field='sample_id',
element_id=self.sample_id,
update_lists=['files_delivered']
)
return files_to_upload
@property
def files_delivered(self):
files_delivered = self.data.get('files_delivered')
if not files_delivered:
files_delivered = []
for sample_folder in self.sample_folders:
files_delivered.extend(self._stat_checkable_files(sample_folder))
files_delivered = self.upload_files_delivered(files_delivered)
# delete the cached data
del self.__dict__['data']
return files_delivered
@property
def files_already_downloaded(self):
return self.data.get('files_downloaded', [])
def add_file_downloaded(self, file_name, user, date_downloaded, file_size):
self.files_downloaded.append(
{'file_path': file_name, 'user': user, 'date': date_downloaded.strftime('%d_%m_%Y_%H:%M:%S'),
'size': file_size}
)
def update_files_downloaded(self):
# Make sure we're only adding files that were not there before
new_files_downloaded = list(
itertools.filterfalse(lambda x: x in self.files_already_downloaded, self.files_downloaded)
)
if new_files_downloaded:
patch_entry(
'samples',
payload={'files_downloaded': new_files_downloaded},
id_field='sample_id',
element_id=self.sample_id,
update_lists=['files_downloaded']
)
def files_missing(self):
files_downloaded = set(f['file_path'] for f in self.files_downloaded + self.files_already_downloaded)
return [f['file_path'] for f in self.files_delivered if f['file_path'] not in files_downloaded]
def is_download_complete(self):
return len(self.files_missing()) == 0
@staticmethod
def _is_checkable(f):
for ext in file_extensions_to_check:
if f.endswith(ext):
return True
return False
@classmethod
def _stat_checkable_files(cls, path):
all_files = {}
for root, dirs, files in os.walk(path):
for f in files:
if cls._is_checkable(f):
all_files[join(root, f)] = os.stat(join(root, f))
return all_files
class ConfirmDelivery(AppLogger):
def __init__(self, aspera_report_csv_files=None):
self.samples_delivered = {}
self.confirmed_samples = []
if aspera_report_csv_files:
for aspera_report_csv_file in aspera_report_csv_files:
self.add_files_downloaded(aspera_report_csv_file)
self.update_samples()
def get_sample_delivered(self, sample_id):
if sample_id not in self.samples_delivered:
s = DeliveredSample(sample_id)
# Check that the sample exists before caching it
if s.data:
self.samples_delivered[sample_id] = s
return self.samples_delivered[sample_id]
def add_files_downloaded(self, aspera_report):
confirmed_files = self.parse_aspera_report(aspera_report)
for fname, user, date, size in confirmed_files:
if len(fname.split('/')) > 2 and self.get_sample_delivered(fname.split('/')[2]):
self.get_sample_delivered(fname.split('/')[2]).add_file_downloaded(
file_name=fname,
user=user,
date_downloaded=date,
file_size=size
)
else:
self.warning('Cannot detect sample name from %s', fname)
@staticmethod
def parse_aspera_report(report_csv):
all_files = []
with open(report_csv) as f:
# ignore what is before the second blank line
blank_lines = 0
while blank_lines < 2:
if not f.readline().strip():
blank_lines += 1
dict_reader = csv.DictReader(f)
for line in dict_reader:
if line['level'] == '1':
all_files.append((
'/'.join(line['file_path'].split('/')[3:]),
line['ssh_user'],
datetime.datetime.strptime(line['stopped_at'], '%Y/%m/%d %H:%M:%S'), # 2016/09/08 16:30:27
line['bytes_transferred']
))
return all_files
def update_samples(self):
for sample in self.samples_delivered.values():
sample.update_files_downloaded()
def test_sample(self, sample_id):
files_missing = self.get_sample_delivered(sample_id).files_missing()
if files_missing:
self.info('Sample %s has not been fully downloaded: %s files missing', sample_id, len(files_missing))
for file_missing in files_missing:
self.info(' - ' + file_missing)
return False
else:
self.confirmed_samples.append(sample_id)
return True
def confirm_download_in_lims(self):
if len(self.confirmed_samples):
lims = clarity.connection()
stage = clarity.get_workflow_stage(lims_workflow_name, stage_name=lims_stage_name)
queue = Queue(lims, id=stage.step.id)
sample_names_queued = set()
artifacts_to_confirm = []
# find all samples that are queued and confirmed
for a in queue.artifacts:
assert len(a.samples) == 1
if a.samples[0].name in self.confirmed_samples:
artifacts_to_confirm.append(a)
sample_names_queued.add(a.samples[0].name)
# find all samples that are confirmed but not queued
confirmed_but_not_queued = set(self.confirmed_samples).difference(sample_names_queued)
if confirmed_but_not_queued:
samples_to_confirm = clarity.get_list_of_samples(list(confirmed_but_not_queued))
artifacts = [s.artifact for s in samples_to_confirm]
# Queue the artifacts there were not already there
lims.route_artifacts(artifacts, stage_uri=stage.uri)
artifacts_to_confirm.extend(artifacts)
# Create a new step from that queued artifact
s = Step.create(lims, protocol_step=stage.step, inputs=artifacts_to_confirm)
# Move from "Record detail" window to the "Next Step"
s.advance()
# Set the next step to complete
for action in s.actions.next_actions:
action['action'] = 'complete'
s.actions.put()
s.advance()
def test_all_queued_samples(self):
lims = clarity.connection()
stage = clarity.get_workflow_stage(lims_workflow_name, stage_name=lims_stage_name)
# Queue has the same id as the ProtocolStep
queue = Queue(lims, id=stage.step.id)
samples = set()
artifacts = queue.artifacts
for a in artifacts:
samples.update(a.samples)
for sample in samples:
self.test_sample(sample.name)
def main(argv=None):
p = argparse.ArgumentParser()
p.add_argument('--csv_files', type=str, nargs='+')
group = p.add_mutually_exclusive_group()
group.add_argument('--samples', type=str, nargs='+')
group.add_argument('--queued_samples', action='store_true', default=False,
help='Test samples queued to the Data Download confirmation step.')
p.add_argument('--confirm_in_lims', action='store_true', default=False,
help='Confirm all successfully tested samples.')
p.add_argument('--debug', action='store_true')
args = p.parse_args(argv)
load_config()
log_cfg.add_stdout_handler()
if args.debug:
log_cfg.set_log_level(logging.DEBUG)
cfg.merge(cfg['sample'])
cd = ConfirmDelivery(args.csv_files)
if args.samples:
for sample in args.samples:
cd.test_sample(sample)
elif args.queued_samples:
cd.test_all_queued_samples()
if args.confirm_in_lims:
cd.confirm_download_in_lims()
if __name__ == '__main__':
main()
|
import os
import sys
import subprocess
from pathlib import Path
from dotenv import load_dotenv
ROOT_DIR = Path(__file__).parent.parent
EXAMPLE_AGENT_DIR = ROOT_DIR / "example_agents"
# Agent directories
ACME_R2D2_AGENT_DIR = EXAMPLE_AGENT_DIR / "acme_r2d2"
ACME_DQN_AGENT_DIR = EXAMPLE_AGENT_DIR / "acme_dqn"
CHATBOT_AGENT_DIR = EXAMPLE_AGENT_DIR / "chatbot"
EVOLUTIONARY_AGENT_DIR = EXAMPLE_AGENT_DIR / "evolutionary_agent"
GH_SB3_AGENT_DIR = EXAMPLE_AGENT_DIR / "gh_sb3_agent"
PREDICTIVE_CODING_AGENT_DIR = (
EXAMPLE_AGENT_DIR / "predictive_coding" / "free_energy_tutorial"
)
RL_AGENTS_DIR = EXAMPLE_AGENT_DIR / "rl_agents"
RLLIB_AGENT_DIR = EXAMPLE_AGENT_DIR / "rllib_agent"
SB3_AGENT_DIR = EXAMPLE_AGENT_DIR / "sb3_agent"
def run_component_in_dir(
dir_name,
venv,
component_name,
agentos_cmd=None,
entry_points=None,
entry_point_params=None,
req_file="requirements.txt",
):
install_requirements(dir_name, venv, req_file)
if entry_points is None:
run_cmd = get_os_aware_run_command(
venv, agentos_cmd, component_name, entry_points, ""
)
run_cli_command(run_cmd, dir_name)
return
for i, entry_point in enumerate(entry_points):
params = ""
if entry_point_params:
error_msg = (
":entry_point_params: must has same len() as :entry_points:"
)
assert len(entry_point_params) == len(entry_points), error_msg
params = entry_point_params[i]
run_cmd = get_os_aware_run_command(
venv, agentos_cmd, component_name, entry_point, params
)
run_cli_command(run_cmd, dir_name)
def run_cli_command(run_cmd, dir_name):
print(f"Run the following CLI command: {run_cmd} with cwd={dir_name}.")
subprocess.run(run_cmd, shell=True, cwd=dir_name, check=True)
def get_os_aware_run_command(
venv, agentos_cmd, component_name, entry_point, params
):
if os.name == "nt":
run_cmd = f"{Path(venv.bin)}/activate.bat & agentos "
else:
run_cmd = f". {Path(venv.bin)}/activate; agentos "
run_cmd += f"{agentos_cmd} {component_name} {params} "
if entry_point:
run_cmd += f"--entry-point {entry_point}"
return run_cmd
def skip_requirements_install():
load_dotenv()
skip_reqs = os.getenv("AGENTOS_SKIP_REQUIREMENT_INSTALL", False)
return True if skip_reqs == "True" else False
def install_requirements(dir_name, venv, req_file):
if not req_file:
return
if skip_requirements_install():
return
print(f"Installing {req_file} with cwd {dir_name}")
req_cmd = [venv.python, "-m", "pip", "install", "-r", req_file]
subprocess.run(req_cmd, cwd=dir_name, check=True)
# Run with subprocess because we installed reqs into venv
def run_code_in_venv(venv, code):
if skip_requirements_install():
run_cmd = f'python -c "{code}"'
else:
run_cmd = f"{Path(venv.bin) / 'python'} -c \"{code}\""
print(f"Running the following command: {run_cmd}")
subprocess.run(run_cmd, shell=True, check=True)
def is_linux():
return "linux" in sys.platform
|
import math
class Quat:
def __init__(self, *args, **kwargs):
if len(args) == 0:
if all(k in kwargs.keys() for k in "wxyz"):
self.q = [kwargs['x'], kwargs['y'], kwargs['z'], kwargs['w']]
elif len(kwargs)==0:
self.q = [0, 0, 0, 1]
elif len(args) == 1:
if len(args[0]) == 4 and all(map(lambda x: type(x) in (float, int), args[0])):
self.q = list(map(float, args[0]))
else:
raise ValueError("Invalid input--I can't make a quaternion from this")
elif len(args) == 4 and all(map(lambda x: type(x) in (float, int), args)):
self.q = list(map(float, args))
else:
raise ValueError("Invalid input--I can't make a quaternion from this")
def w(self): return self.q[3]
def x(self): return self.q[0]
def y(self): return self.q[1]
def z(self): return self.q[2]
def __repr__(self):
return "({}i + {}j + {}k + {})".format(self.x(), self.y(), self.z(), self.w())
def __str__(self):
return self.__repr__()
def __neg__(self):
return Quat([-self.q[i] for i in range(4)])
def __add__(self, other):
if type(other) == Quat:
return Quat([self.q[i] + other.q[i] for i in range(4)])
else:
return Quat(self.q[0:3] + [self.q[3] + other])
def __radd__(self, other):
return self + other
def __mul__(a, b):
if type(b) == Quat:
return Quat(
x = a.w()*b.x() + a.x()*b.w() + a.y()*b.z() - a.z()*b.y(),
y = a.w()*b.y() + a.y()*b.w() + a.z()*b.x() - a.x()*b.z(),
z = a.w()*b.z() + a.z()*b.w() + a.x()*b.y() - a.y()*b.x(),
w = a.w()*b.w() - a.x()*b.x() - a.y()*b.y() - a.z()*b.z(),
)
else:
return Quat([b*self.q[i] for i in range(4)])
def __sub__(self, other):
return self + -other
def __rsub__(self, other):
return -self + other
def __eq__(self, other):
return (all( self.q[i] == other.q[i] for i in range(4)) or
all(-self.q[i] == other.q[i] for i in range(4)))
def nearly_equal(self, other, epsilon=1e-6):
return (all(abs(self.q[i] - other.q[i])<epsilon for i in range(4)) or
all(abs(self.q[i] + other.q[i])<epsilon for i in range(4)))
def conj(self):
return Quat(w=self.w(), x=-self.x(), y=-self.y(), z=-self.z())
def norm(self):
return sum(x**2 for x in self.q)**.5
def normalize(self):
n = self.norm()
self.q = [x/n for x in self.q]
@staticmethod
def from_rot_matrix(m):
t = sum(m[i][i] for i in range(3))
if t>0:
S = 2*(t+1)**.5
return Quat(
w = S/4,
x = (m[2][1] - m[1][2])/S,
y = (m[0][2] - m[2][0])/S,
z = (m[1][0] - m[0][1])/S)
elif (m[0][0] > m[1][1]) and (m[0][0] > m[2][2]):
S = 2*(1 + m[0][0] - m[1][1] - m[2][2])**.5
return Quat(
w = (m[2][1] - m[1][2]) / S,
x = 0.25 * S,
y = (m[0][1] + m[1][0]) / S,
z = (m[0][2] + m[2][0]) / S)
elif m[1][1] > m[2][2]:
S = 2*(1.0 + m[1][1] - m[0][0] - m[2][2])**.5
return Quat(
w = (m[0][2] - m[2][0]) / S,
x = (m[0][1] + m[1][0]) / S,
y = 0.25 * S,
z = (m[1][2] + m[2][1]) / S)
else:
S = 2*(1.0 + m[2][2] - m[0][0] - m[1][1])**.5
return Quat(
w = (m[1][0] - m[0][1]) / S,
x = (m[0][2] + m[2][0]) / S,
y = (m[1][2] + m[2][1]) / S,
z = 0.25 * S)
def to_rot_matrix(self):
s = self.norm()**-2
qr = self.w(); qi = self.x(); qj = self.y(); qk = self.z()
return [[ 1 - 2*s*(qj**2 + qk**2), 2*s*(qi*qj - qk*qr), 2*s*(qi*qk + qj*qr)],
[ 2*s*(qi*qj + qk*qr), 1 - 2*s*(qi**2 + qk**2), 2*s*(qj*qk - qi*qr)],
[ 2*s*(qi*qk - qj*qr), 2*s*(qj*qk + qi*qr), 1 - 2*s*(qi**2 + qj**2)]]
@staticmethod
def from_axis_angle(axis, angle):
norm = sum(e**2 for e in axis)**.5
axis = [e/norm for e in axis]
assert(abs(sum(e**2 for e in axis)**.5 - 1) < 1e-6)
return Quat(
w = math.cos(angle/2),
x = math.sin(angle/2)*axis[0],
y = math.sin(angle/2)*axis[1],
z = math.sin(angle/2)*axis[2])
def to_axis_angle(self):
norm = sum(e**2 for e in self.q[:3])**.5
axis = [e/norm for e in self.q[:3]]
angle = 2*math.acos(self.q[3])
return (axis, angle)
@staticmethod
def from_ypr(yaw, pitch, roll):
cy = math.cos(yaw/2); cp = math.cos(pitch/2); cr = math.cos(roll/2)
sy = math.sin(yaw/2); sp = math.sin(pitch/2); sr = math.sin(roll/2)
return Quat([cy*cp*sr - sy*sp*cr,
cy*sp*cr + sy*cp*sr,
sy*cp*cr - cy*sp*sr,
cy*cp*cr + sy*sp*sr])
def to_ypr(self):
w = self.w(); x = self.x(); y = self.y(); z = self.z();
roll = math.atan2(2*(w*x + y*z), 1 - 2*(x**2 + y**2))
pitch = math.asin(2*(w*y - x*z))
yaw = math.atan2(2*(w*z + x*y), 1 - 2*(y**2 + z**2))
return (yaw, pitch, roll)
def log(self):
theta = math.acos(self.q[3])
v = [theta*e/math.sin(theta) for e in self.q[:3]]
return Quat(v + [0.])
def exp(self):
theta = sum(e**2 for e in self.q[:3])**.5
v = [e/theta for e in self.q[:3]]
return Quat([math.sin(theta)*e for e in v] + [math.cos(theta)])
|
#from sklearn.datasets import load_digits
import os
import json
# Dirty, but it needs to load all models
from sklearn import *
from sklearn.externals import joblib
import sklearn
import argparse
import functools
import numpy as np
import logging
class Scikitjson:
def __init__(self):
self.jsonmodel = None
self.path = None
def loadFile(self, path):
self.jsonmodel = self._innerLoad(path)
self.path = path
def loadJSONModel(self, model):
""" Load model without reading from file"""
self.jsonmodel = json.loads(model)
return self.jsonmodel
def _innerLoad(self, path):
if not os.path.exists(path):
raise Exception("file {0} not found".format(path))
fs = open(path, 'r')
raw_data = fs.read()
fs.close()
return self.loadJSONModel(raw_data)
def run(self):
if self.jsonmodel == None:
raise Exception("Model was not loaded")
model = ConstructModel(self.jsonmodel, title=self.path)
return model.run()
class ConstructModel:
def __init__(self, jsonmodel, title=None):
self.jsonmodel = jsonmodel
self.title = title
def _construct_dataset(self, title):
alldatasets = dir(datasets)
if title in alldatasets:
ds = getattr(datasets, title)()
return ds.data, ds.target
def _construct_user_dataset(self, userdataset):
''' Load data from file '''
logging.info("Start to construct user dataset")
filetype = 'default'
if 'path' not in userdataset:
raise Exception("path param is not found")
path = userdataset['path']
if 'data' not in userdataset:
raise Exception(
'data param (start and indexes on training) not found')
else:
dataidx = userdataset['data']
if 'labels' not in userdataset:
print(
'Labels param not found. Default label index will be last index on file')
labelsidx = []
else:
labelsidx = userdataset['labels']
if 'split' not in userdataset:
splitter = ' '
else:
splitter = userdataset['split']
if not os.path.exists(path):
raise Exception("Dataset file not found")
if 'type' in userdataset:
filetype = userdataset['type']
if filetype == 'default':
return self._parse_dataset_by_default(path)
if tiletype == 'csv':
return self._parse_as_csv(path)
else:
raise Exception("This type of dataset format is not supported")
def _parse_dataset_by_default(self, path):
fs = open(path, 'r')
lines = fs.readlines()
fs.close()
X = []
y = []
for line in lines:
res = line.split(splitter)
X.append(res[dataidx[0]: dataidx[1]])
y.extend(res[labelsidx[0]: labelsidx[1]])
log.info("Finished to construct user dataset")
return np.array(X), np.array(y)
def _parse_as_csv(self, path):
if not os.path.exists(path):
raise Exception("Path for loading dataset is not found")
fs = open(path, 'r')
data = fs.read()
fs.close()
return csv.reader(data)
def _split_dataset(self, X, y):
''' Split current dataset on training and testing '''
pass
def _construct_method(self, title):
return self._find_method(title)
def _find_method(self, title):
args = {}
if isinstance(title, dict):
candsplit = title['name'].split('.')
args = title['params']
else:
candsplit = title.split('.')
allmethods = dir(sklearn)
if len(candsplit) > 1:
name = candsplit[0]
#model = sklearn
return functools.reduce(lambda x, a: getattr(x, a), candsplit[1:], getattr(sklearn, name))(**args)
def _random_forest(self):
from sklearn.ensemble import RandomForestClassifier
return RandomForestClassifier(n_estimators=100)
def _construct_default_model(self, typetitle):
""" This comes from 'type'"""
logging.info("Start to construct deafault model")
typetitle = typetitle.lower()
if typetitle == 'classification':
return self._random_forest()
if typetitle == 'regression':
from sklearn.linear_model import LogisticRegression
return LogisticRegression(penalty='l2')
if typetitle == 'clustering':
from sklearn.cluster import KMeans
return KMeans()
def try_to_save(self, model, path):
''' In the case if parameter save in on '''
if path == None:
return
joblib.dump(model, path, compress=9)
def try_to_load(self, path):
return joblib.load(path)
def _predict_and_show(self, method, methodname, data):
result = method.predict(data)
print("Result: {0} ({1})".format(result, methodname))
return result
def run(self):
if self.title != None:
print("Model from {0}\n".format(self.title))
modelnames = list(self.jsonmodel.keys())
if len(list(modelnames)) == 0:
return []
for key in list(modelnames):
yield self.run_inner(key)
def run_inner(self, name):
'''
return predicted value
'''
logging.info("Start to prepare model {0}".format(name))
print("Model name: {0} ".format(name))
typeparams = self.jsonmodel[name]
if typeparams == {}:
return []
items = {key.lower(): value for (key, value) in typeparams.items()}
''' In the case if exists some model.pkl
Example of usage:
loading.json
{
"class1" :{
load:"model.pkl",
predict: [1,2,3]
}
}
'''
if 'load' in items:
method = self.try_to_load(items['load'])
if 'predict' not in items:
return
return self._predict_and_show(method, items['predict'])
''' In the case if you want experimenting with datasets in sklearn'''
if 'dataset' in items:
X, y = self._construct_dataset(items['dataset'])
if 'dataset_file' in items:
X, y = self._construct_user_dataset(items['dataset_file'])
methodname = items['method'] if 'method' in items else 'RandomForest'
method = self._construct_method(
items['method']) if 'method' in items else self._random_forest()
if 'method' in items:
method = self._construct_method(items['method'])
elif 'type' in items:
# Now supported is 'classification' and 'regression'
thistype = items['type']
method = self._construct_default_model(thistype)
'''else:
raise Exception("Model not found")'''
method.fit(X, y)
self.try_to_save(method, items['save'] if 'save' in items else None)
if 'predict' not in items:
print("Predict not contains in your model")
return
return self._predict_and_show(method, methodname, items['predict'])
def configure_logging(level):
if level == None:
return
level = level.lower()
title = logging.NOTSET
if level == 'debug':
title = logging.DEBUG
if level == 'info':
title = logging.INFO
if level == 'warning':
title = logging.ERROR
if level == 'critical':
title = logging.CRITICAL
if level == 'error':
title = logging.ERROR
logging.basicConfig(level=title)
def main(path):
sj = Scikitjson()
if path == None:
log.error("Path to JSON model not found")
return
sj.loadFile(path)
print(list(sj.run()))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--json', help='path to json model')
parser.add_argument(
'--loglevel', help='DEBUG level to show all info messages')
args = parser.parse_args()
configure_logging(args.loglevel)
main(args.json)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
'''
modules
'''
class EqualizedLR(nn.Module):
'''
equalized learning rate
'''
def __init__(self, layer, gain=2):
super(EqualizedLR, self).__init__()
self.wscale = (gain / layer.weight[0].numel()) ** 0.5
self.layer = layer
def forward(self, x, gain=2):
x = self.layer(x * self.wscale)
return x
class Blur(nn.Module):
'''
low pass filter
Does nothing for now
'''
def __init__(self):
super().__init__()
def forward(self, x):
return x
class ScaleNoise(nn.Module):
'''
scale noise with learnable weight
'''
def __init__(self):
super().__init__()
self.scale = nn.Conv2d(1, 1, 1, bias=False)
self.scale.weight.data.fill_(0)
def forward(self, x):
x = self.scale(x)
return x
class AdaptiveInstanceNorm(nn.Module):
'''
AdaIN
'''
def __init__(self,
channels, style_dim
):
super().__init__()
self.norm = nn.InstanceNorm2d(channels, eps=1.e-8)
self.linear = EqualizedLinear(style_dim, channels*2)
self.linear.linear.layer.bias.data[:channels] = 1.
def forward(self, x, style):
norm = self.norm(x)
style = self.linear(style).unsqueeze(2).unsqueeze(3)
ys, yb = style.chunk(2, 1)
x = ys * norm + yb
return x
class MiniBatchStd(nn.Module):
'''
minibatch standard deviation
'''
def forward(self, x):
std = torch.std(x).expand(x.shape[0], 1, *x.shape[2:])
return torch.cat([x, std], dim=1)
class EqualizedLinear(nn.Module):
'''
equalized fully connected layer
'''
def __init__(self,
in_channels, out_channels
):
super().__init__()
linear = nn.Linear(in_channels, out_channels)
linear.weight.data.normal_(0, 1)
linear.bias.data.fill_(0)
self.linear = EqualizedLR(linear)
def forward(self, x):
x = self.linear(x)
return x
class EqualizedConv2d(nn.Module):
'''
equalized convolutional layer
'''
def __init__(self,
in_channels, out_channels, kernel_size, **kwargs
):
super().__init__()
conv = nn.Conv2d(
in_channels, out_channels, kernel_size, **kwargs
)
conv.weight.data.normal_(0, 1)
conv.bias.data.fill_(0.)
self.conv = EqualizedLR(conv)
def forward(self, x):
x = self.conv(x)
return x
class LayerEpilogue(nn.Module):
'''
things to do on the end of each layer
'''
def __init__(self,
channels, style_dim
):
super().__init__()
self.scale_noise = ScaleNoise()
self.activation = nn.LeakyReLU(0.2)
self.norm_layer = AdaptiveInstanceNorm(channels, style_dim)
def forward(self, style, synthesis_out, noise):
noise = self.scale_noise(noise)
x = synthesis_out + noise
x = self.activation(x)
x = self.norm_layer(x, style)
return x
class UpsampleBlur(nn.Module):
'''
upsample -> blur
(the office implementation is upsample -> conv2d -> blur [1]
but the paper says they implemented the blur "after each upsampling layer"[2]?
[1] https://github.com/NVlabs/stylegan/blob/master/training/networks_stylegan.py#L520
[2] https://arxiv.org/pdf/1812.04948.pdf )
'''
def __init__(self, scale_factor, mode='bilinear'):
super().__init__()
self.upsample = nn.Upsample(scale_factor=scale_factor, mode=mode)
self.blur = Blur()
def forward(self, x):
x = self.upsample(x)
x = self.blur(x)
return x
class BlurDownsample(nn.Module):
'''
blur -> downsample (avg_pool)
'''
def __init__(self, scale_factor):
super().__init__()
self.blur = Blur()
self.downsample = nn.AvgPool2d(2)
def forward(self, x):
x = self.blur(x)
x = self.downsample(x)
return x
class ToRGB(nn.Module):
'''
to rgb
'''
def __init__(self,
in_channels, out_channels=3
):
super(ToRGB, self).__init__()
self.to_rgb = nn.Sequential(
EqualizedConv2d(in_channels, out_channels, 1),
nn.Tanh()
)
def forward(self, x):
x = self.to_rgb(x)
return x
class FromRGB(nn.Module):
'''
from rgb
'''
def __init__(self,
out_channels, in_channels=3
):
super(FromRGB, self).__init__()
self.from_rgb = EqualizedConv2d(in_channels, out_channels, 1)
def forward(self, x):
x = self.from_rgb(x)
return x
'''
blocks
'''
class Mapping(nn.Module):
'''
mapping latent vector to space W
'''
def __init__(self,
style_dim,
n_layers=8
):
super().__init__()
self.linear = nn.ModuleList([EqualizedLinear(style_dim, style_dim) for _ in range(n_layers)])
self.activation = nn.LeakyReLU(0.2)
def forward(self, x):
for layer in self.linear:
x = layer(x)
x = self.activation(x)
return x
class GeneratorBlock(nn.Module):
'''
generator block for a resolution
'''
def __init__(self,
in_channels, out_channels, style_dim, is_first=False
):
super().__init__()
self.is_first = is_first
if is_first:
self.le0 = LayerEpilogue(style_dim, style_dim)
self.conv1 = EqualizedConv2d(style_dim, style_dim, 3, padding=1)
self.le1 = LayerEpilogue(style_dim, style_dim)
else:
self.upsample = UpsampleBlur(2)
self.conv0 = EqualizedConv2d(in_channels, out_channels, 3, padding=1)
self.le0 = LayerEpilogue(out_channels, style_dim)
self.conv1 = EqualizedConv2d(out_channels, out_channels, 3, padding=1)
self.le1 = LayerEpilogue(out_channels, style_dim)
def forward(self, style, x):
if not self.is_first:
x = self.upsample(x)
x = self.conv0(x)
B, _, H, W = x.size()
noise = torch.randn((B, 1, H, W), device=x.device)
x = self.le0(style, x, noise)
x = self.conv1(x)
noise = torch.randn((B, 1, H, W), device=x.device)
x = self.le1(style, x, noise)
return x
class DiscriminatorBlock(nn.Module):
'''
discriminator block for a resolution
'''
def __init__(self,
in_channels, out_channels, is_last=False
):
super(DiscriminatorBlock, self).__init__()
if is_last:
self.block = nn.Sequential(
MiniBatchStd(),
EqualizedConv2d(in_channels+1, out_channels, 3, padding=1),
nn.LeakyReLU(0.2),
EqualizedConv2d(out_channels, out_channels, 4),
nn.LeakyReLU(0.2),
EqualizedConv2d(out_channels, 1, 1)
)
else:
self.block = nn.Sequential(
EqualizedConv2d(in_channels, out_channels, 3, padding=1),
nn.LeakyReLU(0.2),
EqualizedConv2d(out_channels, out_channels, 3, padding=1),
BlurDownsample(2)
)
def forward(self, x):
x = self.block(x)
return x
'''
Generator
'''
class Generator(nn.Module):
def __init__(self,
style_dim, mapping_layers=8
):
super().__init__()
self.mapping = Mapping(style_dim=style_dim, n_layers=mapping_layers)
resl2param = {
4: [512, 512, True],
8: [512, 512, False],
16: [512, 256, False],
32: [256, 128, False],
64: [128, 64, False],
128: [ 64, 32, False]
}
self.resl_blocks = nn.ModuleList()
self.rgb_layers = nn.ModuleList()
for resl, param in resl2param.items():
self.resl_blocks.append(
GeneratorBlock(
param[0], param[1], style_dim=style_dim, is_first=param[2])
)
self.rgb_layers.append(ToRGB(param[1]))
self.upsample = UpsampleBlur(2)
self.train_depth = 0
self.alpha = 0
self.synthesis_input = nn.Parameter(torch.ones(1, style_dim, 4, 4), requires_grad=True)
def grow(self):
self.train_depth += 1
self.alpha = 0
def forward(self, x, phase):
style = self.mapping(x)
if phase == 't':
return self.transition_forward(style)
else:
return self.stablization_forward(style)
def transition_forward(self, style):
x = self.synthesis_input.expand(style.size(0), -1, -1, -1)
for index, block in enumerate(self.resl_blocks):
x = block(style, x)
if index == self.train_depth-1:
x_pre = self.upsample(x)
if index == self.train_depth:
break
rgb_pre = self.rgb_layers[index-1](x_pre)
rgb_cur = self.rgb_layers[index](x)
return (1 - self.alpha) * rgb_pre + self.alpha * rgb_cur
def stablization_forward(self, style):
x = self.synthesis_input.expand(style.size(0), -1, -1, -1)
for index, block in enumerate(self.resl_blocks):
x = block(style, x)
if index == self.train_depth:
break
rgb = self.rgb_layers[index](x)
return rgb
def update_alpha(self, delta, phase):
if phase == 't':
self.alpha = min(1, self.alpha+delta)
'''
Discriminator
'''
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.train_depth = 0
self.resl2param = {
4 : (512, 512, True),
8 : (512, 512, False),
16 : (256, 512, False),
32 : (128, 256, False),
64 : ( 64, 128, False),
128 : ( 32, 64, False)
}
self.resolution_blocks = nn.ModuleList()
self.rgb_layers = nn.ModuleList()
for resl in self.resl2param:
param = self.resl2param[resl]
self.resolution_blocks.append(
DiscriminatorBlock(
param[0], param[1], param[2]
)
)
self.rgb_layers.append(FromRGB(out_channels=param[0]))
self.downsample = BlurDownsample(2)
self.alpha = 0
def grow(self):
self.train_depth += 1
self.alpha = 0
def forward(self, x, phase):
if phase == 't':
return self.transition_forward(x)
else:
return self.stablization_forward(x)
def transition_forward(self, x):
size = x.size(2)
x_down = self.downsample(x)
x_pre = self.rgb_layers[self.train_depth-1](x_down)
x = self.rgb_layers[self.train_depth](x)
x_cur = self.resolution_blocks[self.train_depth](x)
x = (1 - self.alpha) * x_pre + self.alpha * x_cur
for block in self.resolution_blocks[self.train_depth-1::-1]:
x = block(x)
return x.view(x.size(0), -1)
def stablization_forward(self, x):
x = self.rgb_layers[self.train_depth](x)
for block in self.resolution_blocks[self.train_depth::-1]:
x = block(x)
return x.view(x.size(0), -1)
def update_alpha(self, delta, phase):
if phase == 't':
self.alpha = min(1, self.alpha+delta)
if __name__ == "__main__":
# test
G = Generator(512)
D = Discriminator()
for _ in range(4):
G.grow()
D.grow()
style = torch.randn(10, 512)
out = G(style, 't')
out = D(out, 't')
print(out.size())
|
#
# @lc app=leetcode id=473 lang=python3
#
# [473] Matchsticks to Square
#
# @lc code=start
class Solution:
def makesquare(self, matchsticks: List[int]) -> bool:
matchsticks.sort(reverse = True)
sumLen = 0
for i in matchsticks: sumLen += i
if sumLen % 4 != 0:
return False
edge = sumLen // 4
count = 0
curLen = 0
lastTry = -1
candidate = '1' * len(matchsticks)
def DFS(c, l, last, cand):
for i in range(last + 1, len(cand)):
if cand[i] == '0':
continue
elif l + matchsticks[i] > edge:
continue
elif cand[i] == '1' and i > 0 and cand[i - 1] == '1' and matchsticks[i] == matchsticks[i - 1]:
continue
newl = l + matchsticks[i]
newc = c
newlast = i
if newl == edge:
newc += 1
newl = 0
newlast = -1
newcand = cand[0:i] + '0' + cand[i + 1:]
if newc == 3:
return True
if DFS(newc, newl, newlast, newcand):
return True
return False
return DFS(count, curLen, lastTry, candidate)
# @lc code=end
|
from server_delta_app import view_sets, models, serializers, services
from rest_framework import viewsets as viewsets_rest_framework, mixins
from rest_framework.permissions import AllowAny
from server_delta_app import services
from django.http import HttpResponse
class CustomerDossierViewSet(view_sets.BaseViewSet):
"""
View set represent the endpoint for CustomerModel
"""
serializer_class = serializers.CustomerDossierSerializer
queryset = models.CustomerDossierModel.objects.all()
required_scopes = ['read']
def list(self, request, *args, **kwargs):
response = super(CustomerDossierViewSet, self).list(request, args, kwargs)
if request.GET.get('cpf', None) is None or len(response.data) > 0:
return response
cpf = request.GET.get('cpf', None)
services.CustomerConsumerService().proccess_in_background(cpf)
return response
class PublicCustomerDossierViewSet(viewsets_rest_framework.GenericViewSet, mixins.CreateModelMixin):
"""
Public View set represent the endpoint for CustomerDossierModel
"""
serializer_class = serializers.CustomerDossierSerializer
queryset = models.CustomerDossierModel.objects.all()
permission_classes = (AllowAny,)
def create(self, request, *args, **kwargs):
print('Proccess in background...')
services.CustomerConsumerService().proccess_in_background(request.data['cpf'])
return HttpResponse('')
|
#!/usr/bin/env python
import sys
from contextlib import closing
import lxml.html as html # pip install 'lxml>=2.3.1'
from lxml.html.clean import Cleaner
from selenium.webdriver import Firefox # pip install selenium
from werkzeug.contrib.cache import FileSystemCache # pip install werkzeug
url = sys.argv[1] if len(sys.argv) > 1 else "http://stackoverflow.com/q/7947579"
# get page
page_source = cache.get(url)
if page_source is None:
# use firefox to get page with javascript generated content
with closing(Firefox()) as browser:
browser.get(url)
page_source = browser.page_source
cache.set(url, page_source, timeout=60*60*24*7) # week in seconds
# extract text
root = html.document_fromstring(page_source)
# remove flash, images, <script>,<style>, etc
Cleaner(kill_tags=['noscript'], style=True)(root) # lxml >= 2.3.1
print root.text_content() # extract text
|
import xlrd
from collections import Counter
def removeDashes(arr):
'''
Some 0 values appear as '--' in excel
Return:
- list with '--' replaced by 0.0
'''
for idx, item in enumerate(arr):
if item == '--':
arr[idx] = 0.0
return arr
def containsNegative(arr):
'''Preprocessing data step
Check if features contain a negative number.
Return:
- True if the features do
_ False otherwise
'''
for idx, item in enumerate(arr):
if item < 0:
return True
return False
def categorizePerformance(SH, selectivity):
"""Categorize
Categorize the performance of the cture of MOF/CORE based on SH and selectivity
SH above 5 and selectivity above 15000 are considered ideal
3 categories exist and 3 number from 1 -> 3 are assigned correspondingly.
Return:
- category number: 0 | 1 | 2
"""
if SH <= 5 and selectivity <= 15000:
return 0
# This type doesn't exist
# elif SH > 5 and selectivity <=15000:
# return 2
elif SH <= 5 and selectivity >= 15000:
return 1
else:
return 2
def gethMOFData():
'''
Extract hMOF data from the excel file and partion the data set with about 95% as training set and 5% as test set.
Features are in order: Porosity, heat C1, heat C2, VSA, LCD
Return:
- trainX: training set features
- trainY: training set catgories
- testX: test set features
- testY: test set categories
'''
workbook = xlrd.open_workbook('data/thiols/hMOF.xlsx', on_demand = True)
worksheet = workbook.sheet_by_name('Sheet1')
trainX = []
trainY = []
testX = []
testY = []
for i in range(1, worksheet.nrows):
features = removeDashes([
worksheet.cell(i, 7).value,
worksheet.cell(i, 8).value,
worksheet.cell(i, 9).value,
worksheet.cell(i, 10).value,
worksheet.cell(i, 12).value
])
category = categorizePerformance(worksheet.cell(i, 5).value, worksheet.cell(i, 6).value)
if not containsNegative(features):
if i % 8 == 0:
testX.append(features)
testY.append(category)
else:
trainX.append(features)
trainY.append(category)
return trainX, trainY, testX, testY
def getAllhMOFData():
'''
Extract hMOF data from the excel file without separating into train and test set.
Features are in order: Porosity, heat C1, heat C2, VSA, LCD
Return:
- trainX: training set features
- trainY: training set catgories
- testX: test set features
- testY: test set categories
'''
workbook = xlrd.open_workbook('data/thiols/hMOF.xlsx', on_demand = True)
worksheet = workbook.sheet_by_name('Sheet1')
X = []
Y = []
for i in range(1, worksheet.nrows):
features = removeDashes([
worksheet.cell(i, 7).value,
worksheet.cell(i, 8).value,
worksheet.cell(i, 9).value,
worksheet.cell(i, 10).value,
worksheet.cell(i, 12).value
])
category = categorizePerformance(worksheet.cell(i, 5).value, worksheet.cell(i, 6).value)
if not containsNegative(features):
X.append(features)
Y.append(category)
return X, Y
def getCOREData():
'''
Extract CoRE-MOFs data from the excel file and partion the data set with about 95% as training set and 5% as test set.
Features are in order: Porosity, heat C1, heat C2, VSA, LCD
Return:
- trainX: training set features
- trainY: training set catgories
- testX: test set features
- testY: test set categories
'''
workbook = xlrd.open_workbook('data/thiols/hMOF.xlsx', on_demand = True)
worksheet = workbook.sheet_by_name('Sheet1')
trainX = []
trainY = []
testX = []
testY = []
for i in range(1, worksheet.nrows):
features = removeDashes([
worksheet.cell(i, 21).value,
worksheet.cell(i, 22).value,
worksheet.cell(i, 24).value,
worksheet.cell(i, 25).value,
worksheet.cell(i, 26).value
])
category = categorizePerformance(worksheet.cell(i, 19).value,worksheet.cell(i, 20).value)
if not containsNegative(features):
if i % 18 == 0:
testX.append(features)
testY.append(category)
else:
trainX.append(features)
trainY.append(category)
return trainX, trainY, testX, testY
|
#
# Copyright (c) 2017 Louie Lu. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
from .. import gate
from ..bitarray import BitArray
def And(a: bool, b: bool) -> bool:
return gate.Not(gate.Nand(a, b))
def And16(a: BitArray, b: BitArray) -> BitArray:
assert len(a) == 16
assert len(b) == 16
return BitArray([
gate.And(a[0], b[0]),
gate.And(a[1], b[1]),
gate.And(a[2], b[2]),
gate.And(a[3], b[3]),
gate.And(a[4], b[4]),
gate.And(a[5], b[5]),
gate.And(a[6], b[6]),
gate.And(a[7], b[7]),
gate.And(a[8], b[8]),
gate.And(a[9], b[9]),
gate.And(a[10], b[10]),
gate.And(a[11], b[11]),
gate.And(a[12], b[12]),
gate.And(a[13], b[13]),
gate.And(a[14], b[14]),
gate.And(a[15], b[15])
], endian=False)
|
import json
import discord
import textwrap
import asyncio
#from .cogs.utils.utils import Utils
from .AudioNode import AudioNode
from .AudioPlayer import AudioPlayer
from .Events import TrackStart
class AudioManager:
"""
Class of the AudioManager section.
This is main class and controls all stuff like joining channels, leaving channels, and launching nodes.
"""
def __init__(self, bot, nodes, shards=1):
self.bot = bot
bot.add_listener(self.on_socket_response)
self.nodes = {}
self.players = {}
self._nodes = nodes
self.shards = shards
self.session = self.bot.session
self.utils = self.bot.utils
def get_player(self, ctx):
player = self.players.get(ctx.guild.id)
if player is None:
player = AudioPlayer(ctx, self, self.nodes.get(self._nodes[0]["host"]))
self.players[ctx.guild.id] = player
return player
async def get_tracks(self, player, search: str):
async with self.session.get(f"http://{player.node.host}:2333/loadtracks?identifier={search}", headers={"Authorization": player.node.password}) as resp:
tracks = await resp.json()
return tracks
async def on_socket_response(self, data):
if data["t"] == "VOICE_SERVER_UPDATE":
payload = {
"op": "voiceUpdate",
"guildId": data["d"]["guild_id"],
"sessionId": self.bot.get_guild(int(data["d"]["guild_id"])).me.voice.session_id,
"event": data["d"]
}
await self.nodes.get(self._nodes[0]["host"]).send(**payload)
async def connect(self, ctx):
await self.bot.ws.send(json.dumps({
"op": 4,
"d": {
"guild_id": ctx.guild.id,
"channel_id": ctx.author.voice.channel.id,
"self_mute": False,
"self_deaf": False
}
}))
self.get_player(ctx).is_connected = True
async def leave(self, ctx):
await self.bot.ws.send(json.dumps({
"op": 4,
"d": {
"guild_id": ctx.guild.id,
"channel_id": None,
"self_mute": False,
"self_deaf": False
}
}))
try:
del self.players[ctx.guild.id]
except KeyError:
pass
async def audio_task(self):
for i in range(len(self._nodes)):
node = AudioNode(self, self.shards, self._nodes[i]["host"], self._nodes[i]["password"], self._nodes[i]["port"])
await node.launch()
self.nodes[node.host] = node
self.bot.loop.create_task(self.node_event_task())
async def node_event_task(self):
for node in self.nodes.values():
@node.ee.on("track_start")
async def on_track_start(e):
print("Music: track_start event triggered.")
ctx = e.player.ctx
#print(dir(e))
f = e
e = e.track
#print(dir(e))
em = discord.Embed(color=0x00ff00, title=f"Music Player")
#em.description = f"**{e.track.title}**"
em.set_footer(text=e.requester.name, icon_url=e.requester.avatar_url)
second = e.length / 1000
minute, second = divmod(second, 60)
hour, minute = divmod(minute, 60)
#minutes, seconds = divmod(e.track.duration, 60)
#em.add_field(name='Length', value=f"{str(minutes)}:{str(seconds).replace('0', '00').replace('1', '01').replace('2', '02').replace('3', '03').replace('4', '04').replace('5', '05').replace('6', '06').replace('7', '07').replace('8', '08').replace('9', '09')}")
if hour:
length = f"{int(hour)}:{self.utils.format_time(minute)}:{self.utils.format_time(second)}"
else:
length = f"{self.utils.format_time(minute)}:{self.utils.format_time(second)}"
playing_panel = textwrap.dedent(f"""
I started playing the music! {self.bot.get_emoji(511089456196091916)}
:musical_note: **Song**
{e.title}
{self.bot.get_emoji(430340802879946773)} **Requested By**
{str(ctx.author)}
:timer: **Length**
{length}
:loud_sound: **Volume**
{f.player.volume}
:1234: **Queue Position**
{len(f.player.queue)}
""")
#em.add_field(name='Length', value=length)
#em.add_field(name='Volume', value=f"{self.utils.get_lines(e.player.volume)} {e.player.volume}%")
em.description = playing_panel
#em.add_field(name='Position in Queue', value=len(e.player.queue))
msg = await ctx.send(embed=em, edit=False)
try:
await msg.add_reaction("\U000023f8") # Pause
await msg.add_reaction("\U000025b6") # Play/Resume
await msg.add_reaction("\U000023f9") # Stop
await msg.add_reaction("\U0001f501") # Repeat
await msg.add_reaction("\U00002753") # Help
except discord.Forbidden:
return await ctx.send("I don't have Add Reaction permissions, so I can't show my awesome playing panel!")
try:
while f.player.playing:
if len(ctx.author.voice.channel.members) <= 1:
return await ctx.send(f"Guys? Seriously? Well, guess I'm out too. {self.bot.get_emoji(517142988904726562)}")
reaction, user = await self.bot.wait_for("reaction_add", check=lambda r, u: u.id == ctx.author.id and r.emoji in "⏸▶⏹🔁❓")
if reaction.emoji == "⏸":
await e.pause()
try:
await msg.remove_reaction("\U000023f8", user)
except:
pass
elif reaction.emoji == "▶":
await e.resume()
await msg.remove_reaction("\U000025b6", user)
elif reaction.emoji == "⏹":
e.player.queue.clear()
await e.stop()
await msg.delete()
elif reaction.emoji == "🔁":
e.repeating = not e.repeating
await msg.remove_reaction("\U0001f501", user)
elif reaction.emoji == "❓":
await msg.remove_reaction("\U00002753", user)
embed = discord.Embed(color=0x00ff00, title='Music Player Help')
embed.description = "**What do these magical buttons do?** \n\n:pause_button: Pauses the current song.\n:arrow_forward: Resumes any currently paused song.\n:stop_button: Stops the playing song and deletes this message.\n:repeat: Starts the current song from the beginning.\n:question: Shows this message."
embed.set_footer(text='This will revert back in 15 seconds.')
await msg.edit(embed=embed)
await asyncio.sleep(15)
await msg.edit(embed=em)
except discord.Forbidden:
pass # No need to send
# except Exception as e:
# return await ctx.send(f"An unknown error occured. Details: \n\n```{e}```")
# This made shit way too spammy, can't think of a good way to avoid it, rather just remove it.
@node.ee.on("track_end")
async def on_track_end(event):
print("Music: track_end event triggered.")
if event.reason == "REPLACED":
return # Return because if we play then the queue will be fucked.
elif event.reason == "FINISHED":
if event.player.repeating:
await event.player.node.send(op="play", guildId=str(event.player.ctx.guild.id), track=event.player.current.track)
return event.player.node.ee.emit("track_start", TrackStart(event.player, event.player.current))
await event.player.play()
@node.ee.on("queue_concluded")
async def on_queue_concluded(event):
print("Music: queue_concluded event triggered.")
await self.leave(event.player.ctx)
|
import json
import os
from api import get_location_top_players
def get_country_top_players(country):
response = get_location_top_players(country['id'])
return {
country['name']: [player['tag'].replace('#', '%') for player in response['items']]
}
def get_global_top_players_api():
try:
with open('seeds/countries.json', 'r') as countries_data:
countries = json.load(countries_data)
for country in countries:
with open('seeds/top_players/' + country['name'] + '.json', 'w') as top_players:
json.dump(get_country_top_players(country), top_players)
print(country['name'] + ' Done')
return 'Success'
except Exception as e:
print(e)
return 'Failure'
def get_global_top_players():
top_players = []
try:
for country in os.listdir('seeds/top_players'):
if country.endswith('.json'):
with open('seeds/top_players/' + country, 'r') as top_players_data:
top_players.append(json.load(top_players_data))
return top_players
except Exception as e:
print(e)
return top_players
if __name__ == '__main__':
print(get_global_top_players())
|
import torch
import torch.nn.functional as F
from agent.td3 import TD3
class TD3MT(TD3):
def __init__(self,
state_dim,
action_dim,
max_action,
num_env,
discount=0.99,
tau=0.005,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2,
cuda_index=None
):
super().__init__(state_dim, action_dim, max_action,
discount, tau,
policy_noise, noise_clip,
policy_freq, cuda_index)
self.it = 0
self.total_it = [0 for _ in range(num_env)]
self.state_dim = state_dim
self.action_dim = action_dim
self.actor_optimizer_online = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic_optimizer_online = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
def save(self, filename):
super().save(filename)
torch.save(self.actor_optimizer_online.state_dict(), filename + "_actor_optimizer_online.pt")
torch.save(self.critic_optimizer_online.state_dict(), filename + "_critic_optimizer_online.pt")
def load(self, filename):
super().load(filename)
self.actor_optimizer_online.load_state_dict(torch.load(filename + "_actor_optimizer_online.pt"))
self.critic_optimizer_online.load_state_dict(torch.load(filename + "_critic_optimizer_online.pt"))
def pad_state(self, state):
return torch.cat([state,
torch.zeros(state.shape[0], self.state_dim - state.shape[1]).to(self.device)],
dim=1)
def pad_action(self, action):
return torch.cat([action,
torch.zeros(action.shape[0], self.action_dim - action.shape[1]).to(self.device)],
dim=1)
def train_mt(self, idx, teacher, replay, batch_size=100, is_offline=True):
self.total_it[idx] += 1
state, action, next_state, reward, not_done = replay.sample(batch_size)
state_dim_org = state.shape[1]
action_dim_org = action.shape[1]
with torch.no_grad():
state_pad = self.pad_state(state)
action_pad = self.pad_action(action)
if is_offline:
teacher_q1, teacher_q2 = teacher.critic(state, action)
else:
next_state_pad = self.pad_state(next_state)
next_action = self.actor_target(next_state_pad)
noise = (
torch.rand_like(next_action) * self.policy_noise
).clamp(-self.noise_clip, self.noise_clip)
next_action = (next_action + noise).clamp(-self.max_action, self.max_action)
next_action = next_action[:, :action_dim_org]
next_action_pad = self.pad_action(next_action)
target_q1, target_q2 = self.critic_target(next_state_pad, next_action_pad)
target_q = torch.min(target_q1, target_q2)
target_q = reward + not_done * self.discount * target_q
current_q1, current_q2 = self.critic(state_pad, action_pad)
if is_offline:
critic_loss = F.mse_loss(current_q1, teacher_q1) + F.mse_loss(current_q2, teacher_q2)
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
else:
critic_loss = F.mse_loss(current_q1, target_q) + F.mse_loss(current_q2, target_q)
self.critic_optimizer_online.zero_grad()
critic_loss.backward()
self.critic_optimizer_online.step()
loss = [None, critic_loss.cpu().data.numpy()]
if is_offline or self.total_it[idx] % self.policy_freq == 0:
current_action = self.actor(state_pad)[:, :action_dim_org]
current_action_pad = self.pad_action(current_action)
actor_loss_t = -teacher.critic.Q1(state, current_action)
if is_offline:
actor_loss = actor_loss_t.mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
else:
actor_loss = -self.critic.Q1(state_pad, current_action_pad)
actor_loss = 1.0 * actor_loss + 1.0 * actor_loss_t
actor_loss = actor_loss.mean()
self.actor_optimizer_online.zero_grad()
actor_loss.backward()
self.actor_optimizer_online.step()
self.update_target_network()
loss[0] = actor_loss.cpu().data.numpy()
return loss
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This script builds and tests everything that rules_hdl has.
This is what the CI builds do, and the script also works locally. Run it with
`tools/test_everything.py`.'''
import os
import subprocess
import sys
ALL_TARGETS = [
'//...',
'@at_clifford_icestorm//...',
'@at_clifford_yosys//...',
'@com_github_westes_flex//...',
'@com_github_yosyshq_nextpnr//...',
'@com_github_yosyshq_prjtrellis//...',
'@com_github_yosyshq_prjtrellis_db//...',
'@com_google_skywater_pdk//...',
'@com_google_skywater_pdk_sky130_fd_sc_ms//...',
'@com_google_skywater_pdk_sky130_fd_sc_ls//...',
'@com_google_skywater_pdk_sky130_fd_sc_lp//...',
'@com_google_skywater_pdk_sky130_fd_sc_hvl//...',
'@com_google_skywater_pdk_sky130_fd_sc_hs//...',
'@com_google_skywater_pdk_sky130_fd_sc_hdll//...',
'@com_google_skywater_pdk_sky130_fd_sc_hd//...',
'@com_google_skywater_pdk_sky130_fd_pr//...',
'@com_google_skywater_pdk_sky130_fd_io//...',
'@com_icarus_iverilog//...',
'@com_opencircuitdesign_magic//...',
'@com_opencircuitdesign_netgen//...',
'@edu_berkeley_abc//...',
'@net_sourceforge_ngspice//...',
'@net_zlib//...',
'@org_fftw//...',
'@org_gnu_bison//...',
'@org_gnu_gperf//...',
'@org_gnu_m4//...',
'@org_gnu_readline//...',
'@org_sourceware_bzip2//...',
'@org_sourceware_libffi//...',
'@org_tuxfamily_eigen//...',
'@pybind11//...',
'@tk_tcl//...',
]
for action in ['build', 'test']:
command = ' '.join([
'bazel', action, os.environ.get('EXTRA_BAZEL_ARGS', ''), os.environ.get('EXTRA_%s_BAZEL_ARGS' % action.upper(), '')
] + ALL_TARGETS)
print(command)
return_code = subprocess.call(command, shell=True)
if return_code != 0:
sys.exit(return_code)
|
from django.db import models
# Create your models here.
class Evaluation(models.Model):
comment = models.TextField()
score = models.IntegerField()
class Meta:
abstract = True
class StoreEveluation(Evaluation):
evaluation = models.ForeignKey('stores.Store', related_name = 'store_eveluations', on_delete = models.CASCADE)
def __str__(self):
return str(self.evaluation)
class PAPPEveluation(Evaluation):
evaluation = models.ForeignKey('productAquaticPlantPackages.ProductAquaticPlantPackage', related_name = 'PAPP_eveluations', on_delete = models.CASCADE)
def __str__(self):
return str(self.evaluation) |
from flopz.arch.register import Register
import enum
class VleRegisterType(enum.IntEnum):
# see E200Z0.pdf
GENERAL_PURPOSE = 0
EXCEPTION_HANDLING_AND_CONTROL = 1
PROCESSOR_CONTROL = 2
DEBUG = 3
MEMORY_MANAGEMENT = 4
CACHE = 5
class VleGpRegister(Register):
def __init__(self, name: str, val: int, reg_type: enum.IntEnum = None):
super(VleGpRegister, self).__init__(name=f"r{name}", val=val, reg_type=VleRegisterType.GENERAL_PURPOSE)
|
from solutions import input_list # you can adjust the file name from solutions(.py) to something else
import unittest
import random
class TestInputList(unittest.TestCase):
def test_simple_one_digit(self):
input_string = "1,2,3,4,5"
delimiter = ","
actual = input_list(input_string, delimiter)
expected = [1, 2, 3, 4, 5]
self.assertEqual(expected, actual)
def test_simple_one_digit_negative_numbers(self):
input_string = "1,-2,3,-4,5"
delimiter = ","
actual = input_list(input_string, delimiter)
expected = [1, -2, 3, -4, 5]
self.assertEqual(expected, actual)
def test_simple_column_delim(self):
input_string ="1;2;3;4;5"
delimiter = ";"
actual = input_list(input_string, delimiter)
expected = [1, 2, 3, 4, 5]
self.assertEqual(actual, expected)
def test_simple_multiple_digits(self):
input_string = "1,23,456,78,9"
delimiter = ","
actual = input_list(input_string, delimiter)
expected = [1, 23, 456, 78, 9]
self.assertEqual(actual, expected)
def test_single_element_one_digit(self):
input_string = "1"
delimiter = ","
actual = input_list(input_string, delimiter)
expected = [1]
self.assertEqual(actual, expected)
def test_single_element_multiple_digits(self):
input_string = "987"
delimiter = ","
actual = input_list(input_string, delimiter)
expected = [987]
self.assertEqual(actual, expected)
def test_empty_string(self):
input_string = ""
delimiter = ","
actual = input_list(input_string, delimiter)
expected = []
self.assertEqual(actual, expected)
def test_random_positive_numbers(self):
rand_list = [random.randint(0, 1e6) for i in range(100)]
delimiter = ","
input_string = delimiter.join(map(str, rand_list))
expected = rand_list
actual = input_list(input_string, delimiter)
self.assertEqual(actual, expected)
def test_random_positive_and_negative_numbers(self):
rand_list = [random.randint(-1e6, 1e6) for i in range(100)]
delimiter = ","
input_string = delimiter.join(map(str, rand_list))
expected = rand_list
actual = input_list(input_string, delimiter)
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
|
from flask import Flask, render_template, flash, request, session,redirect
from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField
from werkzeug.utils import secure_filename
import time
import os
import json
json_path = os.path.join(os.path.dirname("./"), 'webapp.json')
json_path_settings = os.path.join(os.path.dirname("./"), 'settings.json')
app = Flask(__name__)
log_flag = False
#creating routes
@app.route('/',methods=['POST', 'GET'])
def index():
thePercent = 0
if request.method == "POST":
try:
f_name = request.form['filename']
label_col = request.form['label_col']
with open(json_path,'r',encoding='utf-8') as d_file:
para = json.load(d_file)
para['filename'] = f_name
para['label_col'] = label_col
w_file = open(json_path, "w",encoding='utf-8')
json.dump(para, w_file)
w_file.close()
print('Load Dataset Done:')
print(f_name,label_col)
thePercent = 25
except:
try:
feature_num = request.form['feature_num']
model_type_fs = request.form['model_type_fs']
algo_fs = request.form.getlist('algo_fs')
with open(json_path,'r',encoding='utf-8') as d_file:
para = json.load(d_file)
para['autoFS']['feature_num'] = feature_num
para['autoFS']['model_type_fs'] = model_type_fs
para['autoFS']['algo_fs'] = algo_fs
w_file = open(json_path, "w",encoding='utf-8')
json.dump(para, w_file)
w_file.close()
print('autoFS Settings Done:')
print(feature_num,model_type_fs,algo_fs)
thePercent = 75
except:
try:
encode_band = request.form['encode_band']
model_type_pp = request.form['model_type_pp']
winsorizer = request.form.getlist('winsorizer')
sparsity = request.form['sparsity']
cols = request.form['cols']
scaler = request.form.getlist('scaler')
low_encode = request.form.getlist('low_encode')
high_encode = request.form.getlist('high_encode')
with open(json_path,'r',encoding='utf-8') as d_file:
para = json.load(d_file)
para['autoPP']['encode_band'] = encode_band
para['autoPP']['model_type_pp'] = model_type_pp
para['autoPP']['winsorizer'] = winsorizer
para['autoPP']['sparsity'] = sparsity
para['autoPP']['cols'] = cols
para['autoPP']['scaler'] = scaler
para['autoPP']['low_encode'] = low_encode
para['autoPP']['high_encode'] = high_encode
w_file = open(json_path, "w",encoding='utf-8')
json.dump(para, w_file)
w_file.close()
print('autoPP Settings Done:')
print(encode_band,model_type_pp,scaler,winsorizer,sparsity,cols,low_encode,high_encode)
thePercent = 50
except:
try:
model_type_cv = request.form['model_type_cv']
method_cv = request.form['method_cv']
algo_cv = request.form.getlist('algo_cv')
with open(json_path,'r',encoding='utf-8') as d_file:
para = json.load(d_file)
para['autoCV']['model_type_cv'] = model_type_cv
para['autoCV']['method_cv'] = method_cv
para['autoCV']['algo_cv'] = algo_cv
w_file = open(json_path, "w",encoding='utf-8')
json.dump(para, w_file)
w_file.close()
print('autoCV Settings Done:')
print(model_type_cv,method_cv,algo_cv)
thePercent = 100
except:
try:
run_btn =request.form['run_btn']
import time
import win32com.client as comclt
shell = comclt.Dispatch("WScript.Shell")
shell.run("cmd.exe")
time.sleep(1)
shell.SendKeys("webapp_script.py {ENTER}")
except:
print('Parameters Setting Error!')
return render_template('index.html',thePercent = thePercent)
return render_template('index.html',thePercent = thePercent)
@app.route('/about/')
def about():
return render_template('about.html')
@app.route('/parameters/',methods=['POST','GET'])
def parameters():
if request.method == "POST":
try:
confirm_reset = request.form['confirm_reset']
with open(json_path_settings,'r',encoding='utf-8') as s_file:
para = json.load(s_file)
para['confirm_reset'] = confirm_reset
s_file.close()
w_file = open(json_path_settings, "w",encoding='utf-8')
json.dump(para, w_file)
w_file.close()
except:
try:
algo_name = request.form['parent']
para_name = request.form['child']
para_val = request.form['paraValCls']
para_val = list(para_val.split(","))
try:
para_val = [float(i) if '.' in i else int(i) for i in para_val]
except:
para_val = para_val
with open(json_path_settings,'r',encoding='utf-8') as s_file:
para = json.load(s_file)
para['space_set']['cls'][algo_name][para_name] = para_val
para['confirm_reset'] = 'no_confirm'
w_file = open(json_path_settings, "w",encoding='utf-8')
json.dump(para, w_file)
w_file.close()
print(algo_name,para_name,para_val)
s_file.close()
except:
try:
algo_name = request.form['parent2']
print(algo_name)
para_name = request.form['child2']
para_val = request.form['paraValReg']
para_val = list(para_val.split(","))
print(1)
try:
para_val = [float(i) if '.' in i else int(i) for i in para_val]
except:
para_val = para_val
print(para_val)
with open(json_path_settings,'r',encoding='utf-8') as s_file:
para = json.load(s_file)
para['space_set']['reg'][algo_name][para_name] = para_val
para['confirm_reset'] = 'no_confirm'
w_file = open(json_path_settings, "w",encoding='utf-8')
json.dump(para, w_file)
w_file.close()
print(algo_name,para_name,para_val)
s_file.close()
except:
print("Error in Setting Searchin Space!")
return render_template('parameters.html')
@app.route('/docs/')
def docs():
return render_template('docs.html')
@app.route('/logs/',methods=['POST','GET'])
def logs():
global log_flag
if request.method == "POST":
try:
import time
from win32com import client
from os import listdir
from os.path import isfile, join
run_btn =request.form['run_btn']
logfile =request.form['logfile']
file_list = [f for f in listdir('./logs') if isfile(join('./logs', f))]
file_select = [logfile]
for item in file_list:
ext_flag = [item.startswith(i) for i in file_select]
if (ext_flag==[True]) and item.endswith('.log'):
contents = open("./logs/"+item,"r")
with open("./templates/logfile.html", "w") as e:
for lines in contents.readlines():
e.write("<pre>" + lines + "</pre> <br>\n")
log_flag = True
except:
print('Read Log Files Error!')
return render_template('logs.html',log_flag=log_flag)
else:
log_flag = False
return render_template('logs.html',log_flag = log_flag)
@app.route('/nologfile/')
def nologfile():
return render_template('nologfile.html')
@app.route('/logfile/')
def logfile():
return render_template('logfile.html')
@app.route('/viz/')
def viz():
return render_template('viz.html')
@app.route('/report/')
def report():
return render_template('report.html')
@app.route('/diagram/')
def diagram():
return render_template('diagram.html')
#run flask app
if __name__ == "__main__":
app.run(debug=True)
|
from . import client as raw
from .sorting import sort_by_name
from .cache import cache
from .helpers import normalize_model_parameter
from .shot import get_sequence
default = raw.default_client
def new_scene(project, sequence, name, client=default):
"""
Create a scene for given sequence.
"""
project = normalize_model_parameter(project)
sequence = normalize_model_parameter(sequence)
shot = {"name": name, "sequence_id": sequence["id"]}
return raw.post(
"data/projects/%s/scenes" % project["id"],
shot,
client=client
)
@cache
def all_scenes(project=None, client=default):
"""
Retrieve all scenes.
"""
project = normalize_model_parameter(project)
if project is not None:
scenes = raw.fetch_all(
"projects/%s/scenes" % project["id"],
client=client
)
else:
scenes = raw.fetch_all("scenes")
return sort_by_name(scenes)
@cache
def all_scenes_for_project(project, client=default):
"""
Retrieve all scenes for given project.
"""
project = normalize_model_parameter(project)
scenes = raw.fetch_all(
"projects/%s/scenes" % project["id"],
client=client
)
return sort_by_name(scenes)
@cache
def all_scenes_for_sequence(sequence, client=default):
"""
Retrieve all scenes which are children from given sequence.
"""
sequence = normalize_model_parameter(sequence)
return sort_by_name(
raw.fetch_all("sequences/%s/scenes" % sequence["id"], client=client),
)
@cache
def get_scene(scene_id, client=default):
"""
Return scene corresponding to given scene ID.
"""
return raw.fetch_one("scenes", scene_id, client=client)
@cache
def get_scene_by_name(sequence, scene_name, client=default):
"""
Returns scene corresponding to given sequence and name.
"""
sequence = normalize_model_parameter(sequence)
result = raw.fetch_all(
"scenes/all",
{"parent_id": sequence["id"], "name": scene_name},
client=client
)
return next(iter(result or []), None)
def update_scene(scene, client=default):
"""
Save given scene data into the API.
"""
return raw.put("data/entities/%s" % scene["id"], scene, client=client)
def new_scene_asset_instance(scene, asset, description="", client=default):
"""
Creates a new asset instance on given scene. The instance number is
automatically generated (increment highest number).
"""
scene = normalize_model_parameter(scene)
asset = normalize_model_parameter(asset)
data = {"asset_id": asset["id"], "description": description}
return raw.post(
"data/scenes/%s/asset-instances" % scene["id"],
data,
client=client
)
@cache
def all_asset_instances_for_scene(scene, client=default):
"""
Return the list of asset instances listed in a scene.
"""
scene = normalize_model_parameter(scene)
return raw.get(
"data/scenes/%s/asset-instances" % scene["id"],
client=client
)
@cache
def get_asset_instance_by_name(scene, name, client=default):
"""
Returns the asset instance of the scene that has the given name.
"""
return raw.fetch_first(
"asset-instances",
{"name": name, "scene_id": scene["id"]},
client=client
)
@cache
def all_camera_instances_for_scene(scene, client=default):
"""
Return the list of camera instances listed in a scene.
"""
scene = normalize_model_parameter(scene)
return raw.get(
"data/scenes/%s/camera-instances" % scene["id"],
client=client
)
@cache
def all_shots_for_scene(scene, client=default):
"""
Return the list of shots issued from given scene.
"""
scene = normalize_model_parameter(scene)
return raw.get(
"data/scenes/%s/shots" % scene["id"],
client=client
)
def add_shot_to_scene(scene, shot, client=default):
"""
Link a shot to a scene to mark the fact it was generated out from that
scene.
"""
scene = normalize_model_parameter(scene)
shot = normalize_model_parameter(shot)
data = {"shot_id": shot["id"]}
return raw.post(
"data/scenes/%s/shots" % scene["id"],
data,
client=client
)
def remove_shot_from_scene(scene, shot, client=default):
"""
Remove link between a shot and a scene.
"""
scene = normalize_model_parameter(scene)
shot = normalize_model_parameter(shot)
return raw.delete(
"data/scenes/%s/shots/%s" % (scene["id"], shot["id"]),
client=client
)
def update_asset_instance_name(asset_instance, name, client=default):
"""
Update the name of given asset instance.
"""
path = "/data/asset-instances/%s" % asset_instance["id"]
return raw.put(path, {"name": name}, client=client)
def update_asset_instance_data(asset_instance, data, client=default):
"""
Update the extra data of given asset instance.
"""
asset_instance = normalize_model_parameter(asset_instance)
path = "/data/asset-instances/%s" % asset_instance["id"]
return raw.put(path, {"data": data}, client=client)
@cache
def get_sequence_from_scene(scene, client=default):
"""
Return sequence which is parent of given shot.
"""
scene = normalize_model_parameter(scene)
return get_sequence(scene["parent_id"], client=client)
|
from .core import *
from . import heroku
from . import local
|
# Uncomment the next two lines if you want to save the animation
#import matplotlib
#matplotlib.use("Agg")
import numpy
from matplotlib.pylab import *
from mpl_toolkits.axes_grid1 import host_subplot
import matplotlib.animation as animation
from matplotlib import style
import time
import random
from datetime import datetime
# style.use('fivethirtyeight')
# Sent for figure
font = {'size' : 9}
matplotlib.rc('font', **font)
# Setup figure and subplots
f0 = figure(num = 0, figsize = (12, 8))#, dpi = 100)
f0.suptitle("Oscillation decay", fontsize=12)
ax01 = subplot2grid((2, 2), (0, 0))
ax02 = subplot2grid((2, 2), (0, 1))
ax03 = subplot2grid((2, 2), (1, 0), colspan=2, rowspan=1)
ax04 = ax03.twinx()
#tight_layout()
data = []
data1 = []
data2 = []
timestamp = []
# Data Update
xmin = 0.0
xmax = 10
x = 0.0
# Turn on grids
ax01.grid(True)
ax02.grid(True)
ax03.grid(True)
def updateData(self):
global x
date = datetime.now().strftime("%M:%S")
random_data = random.randint(0,1024)
if len(data) < 5:
data.append(float(random_data))
data1.append(random.randint(0,360))
data2.append(random.randint(0,180))
timestamp.append(date)
else:
timestamp[:-1] = timestamp[1:]
timestamp[-1] = date
data[:-1] = data[1:]
data[-1] = random_data
data1[:-1] = data1[1:]
data1[-1] = random.randint(0,360)
data2[:-1] = data2[1:]
data2[-1] = random.randint(0,180)
ax01.set_title('Accelerometer 1')
ax02.set_title('Accelerometer 2')
ax03.set_title('Accelerometer 3')
# set label names
ax01.set_xlabel("Timestamp")
ax01.set_ylabel("Data Acc1")
ax02.set_xlabel("Timestamp")
ax02.set_ylabel("Data Acc2")
ax03.set_xlabel("Timestamp")
ax03.set_ylabel("Data Acc3")
ax04.set_ylabel("vy")
print(data)
print(timestamp)
# set plots
p011, = ax01.plot(timestamp,data,'b-', label="yp1")
p012, = ax01.plot(timestamp,data1,'g-', label="yp2")
p021, = ax02.plot(timestamp,data,'b-', label="yv1")
p022, = ax02.plot(timestamp,data2,'g-', label="yv2")
p031, = ax03.plot(timestamp,data1,'b-', label="yp1")
p032, = ax03.plot(timestamp,data2,'g-', label="yv1")
# set lagends
ax01.legend([p011,p012], [p011.get_label(),p012.get_label()])
ax02.legend([p021,p022], [p021.get_label(),p022.get_label()])
ax03.legend([p031,p032], [p031.get_label(),p032.get_label()])
x += 0.05
p011.set_data(timestamp,data)
p012.set_data(timestamp,data1)
p021.set_data(timestamp,data)
p022.set_data(timestamp,data2)
p031.set_data(timestamp,data1)
p032.set_data(timestamp,data2)
if x >= xmax-1.00:
p011.axes.set_xlim(x-xmax+1.0,x+1.0)
p021.axes.set_xlim(x-xmax+1.0,x+1.0)
p031.axes.set_xlim(x-xmax+1.0,x+1.0)
p032.axes.set_xlim(x-xmax+1.0,x+1.0)
return p011, p012, p021, p022, p031, p032
# interval: draw new frame every 'interval' ms
# frames: number of frames to draw
# simulation = animation.FuncAnimation(f0, updateData, blit=False, frames=200, interval=20, repeat=True)
simulation = animation.FuncAnimation(f0, updateData, frames=10, interval=20, repeat=True)
# Uncomment the next line if you want to save the animation
#simulation.save(filename='sim.mp4',fps=30,dpi=300)
plt.show() |
import unittest
from app.data_model.answer_store import Answer, AnswerStore
from app.helpers.schema_helper import SchemaHelper
from app.questionnaire.path_finder import PathFinder
from app.schema_loader.schema_loader import load_schema_file
class TestConfirmationPage(unittest.TestCase):
def test_get_next_location_confirmation(self):
answer = Answer(
answer_id="ca3ce3a3-ae44-4e30-8f85-5b6a7a2fb23c",
value="Orson Krennic",
)
answer_store = AnswerStore()
answer_store.add(answer)
survey = load_schema_file("0_rogue_one.json")
navigator = PathFinder(survey, answer_store)
next_location = navigator.get_next_location(SchemaHelper.get_last_location(survey))
self.assertEqual('summary', next_location.block_id)
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def drop_duplicates(self, unique_columns=None):
"""
Modify the current frame, removing duplicate rows.
Parameters
----------
:param unique_columns: (Optional[List[str] or str]) Column name(s) to identify duplicates. Default is the entire
row is compared.
Remove data rows which are the same as other rows.
The entire row can be checked for duplication, or the search for duplicates can be limited to one or more columns.
This modifies the current frame.
Examples
--------
Given a frame with data:
<hide>
>>> frame = tc.frame.create([[200, 4, 25],
... [200, 5, 25],
... [200, 4, 25],
... [200, 5, 35],
... [200, 6, 25],
... [200, 8, 35],
... [200, 4, 45],
... [200, 4, 25],
... [200, 5, 25],
... [201, 4, 25]],
... [("a", int), ("b", int), ("c", int)])
<progress>
</hide>
>>> frame.inspect()
[#] a b c
===============
[0] 200 4 25
[1] 200 5 25
[2] 200 4 25
[3] 200 5 35
[4] 200 6 25
[5] 200 8 35
[6] 200 4 45
[7] 200 4 25
[8] 200 5 25
[9] 201 4 25
Remove any rows that are identical to a previous row.
The result is a frame of unique rows.
Note that row order may change.
>>> frame.drop_duplicates()
<progress>
>>> frame.inspect()
[#] a b c
===============
[0] 200 8 35
[1] 200 6 25
[2] 200 5 35
[3] 200 4 45
[4] 200 4 25
[5] 200 5 25
[6] 201 4 25
Now remove any rows that have the same data in columns *a* and
*c* as a previously checked row:
>>> frame.drop_duplicates([ "a", "c"])
<progress>
The result is a frame with unique values for the combination of columns *a*
and *c*.
>>> frame.inspect()
[#] a b c
===============
[0] 201 4 25
[1] 200 4 45
[2] 200 6 25
[3] 200 8 35
"""
if unique_columns is not None and not isinstance(unique_columns, list):
unique_columns = [unique_columns]
if isinstance(unique_columns, list):
unique_columns = self._tc.jutils.convert.to_scala_vector_string(unique_columns)
self._scala.dropDuplicates(self._tc.jutils.convert.to_scala_option(unique_columns)) |
# Generated by Django 2.2.5 on 2020-07-18 11:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256)),
('description', models.CharField(max_length=512)),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256)),
('author', models.CharField(max_length=64)),
('country', models.CharField(max_length=64)),
('description', models.CharField(max_length=512)),
('datetime', models.DateField()),
('topics', models.ManyToManyField(to='dashboard.Topic')),
],
),
]
|
# -*- coding: utf-8 -*-
from stix_shifter_utils.stix_translation.src.utils.transformers import ValueTransformer
from stix_shifter_utils.utils import logger
LOGGER = logger.set_logger(__name__)
WINDOWS_KEY_MAPPING = {
"HKCC": "HKEY_CURRENT_CONFIG",
"HKCR": "HKEY_CLASSES_ROOT",
"HKCU": "HKEY_CURRENT_USER",
"HKDD": "HKEY_DYN_DATA",
"HKLM": "HKEY_LOCAL_MACHINE",
"HKPD": "HKEY_PERFORMANCE_DATA",
"HKU": "HKEY_USERS",
}
class ConvertInternetHeaders(ValueTransformer):
@staticmethod
def transform(obj: list):
return {data.get("HeaderName"): data.get("Value") for data in obj}
class ConvertWindowsRegistry(ValueTransformer):
@staticmethod
def transform(obj: str):
try:
final_result = obj
root_key_index = obj.index("\\") if "\\" in obj else -1
if root_key_index != -1:
full_key = WINDOWS_KEY_MAPPING.get(obj[0:root_key_index].upper())
if full_key:
final_result = full_key + obj[root_key_index:]
return final_result
except BaseException as e:
LOGGER.error("Cannot convert root key to Stix formatted windows registry key due to %s", e)
return obj
|
import matplotlib.pyplot as plt
import re
import json
infile = open("out.txt", "r")
values = infile.read()
infile.close()
popCount = []
happiness = []
crimes = []
gunCrimes = []
avgConnectedness = []
gunPossession = []
values = map(lambda x: x.split(", "), values[2:-2].split("), ("))
for val in values:
popCount.append(val[0])
happiness.append(val[1])
crimes.append(val[2])
gunCrimes.append(val[3])
avgConnectedness.append(val[4])
gunPossession.append(val[5])
# Let's average!
tmpCrimes = crimes
tmpGunCrimes = gunCrimes
crimes = []
gunCrimes = []
for i in range(0, len(tmpCrimes)/30):
avg1 = 0.0
avg2 = 0.0
for j in range(0, 30):
avg1 += float(tmpCrimes[i*30+j])
avg2 += float(tmpGunCrimes[i*30+j])
crimes.append(avg1/30.0*10000)
gunCrimes.append(avg2/30.0*10000)
length = len(popCount)
cmpData = json.loads(open("data.json").read())
plt.subplot(211)
# See https://matplotlib.org/api/pyplot_api.html?highlight=plot#matplotlib.pyplot.plot for
# plot styles
plt.plot(
range(0, length),
map(lambda n: float(n)/1000.0, popCount),
'g-',
label="Population count in thousands")
plt.plot(
range(0, length),
happiness,
'y-',
label="Average happiness level")
plt.plot(
range(0, length),
avgConnectedness,
'y:',
label="Average Connectedness value")
plt.plot(
map(lambda n: n*30, range(0, length/30)),
crimes,
'b-',
label="Crime rate per million")
plt.plot(
map(lambda n: n*30, range(0, length/30)),
gunCrimes,
'b:',
label="Crime rate involving firearms per 100")
'''plt.plot(
range(0, length),
gunPossession,
'c-',
label="Gun possession rate per 100")'''
plt.xlabel("Day")
plt.legend()
plt.axis([0, length, 0, 120])
populationChange = [0] + map(lambda p, pp: (float(p)-float(pp))/float(pp)*100000, popCount[1:], popCount[:-1])
violentCrimes = map(lambda c: float(c)/10, crimes)
firearmCrimes = map(lambda c: float(c)/10, gunCrimes)
# Let's average!
tmpPop = populationChange
populationChange = []
for i in range(0, len(tmpCrimes)/30):
avg = 0.0
for j in range(0, 30):
avg += float(tmpPop[i*30+j])
populationChange.append(avg/30.0)
plt.subplot(212)
plt.plot(
[0, length-1],
[cmpData["populationChange"]*100000]*2,
'g-',
label="Population change rate in the US"
)
plt.plot(
map(lambda n: n*30, range(0, length/30)),
populationChange,
'g:',
label="Population change in our model"
)
plt.plot(
[0, length-1],
[cmpData["violentCrimes"]]*2,
'b-',
label="Violent crime rate in the US"
)
plt.plot(
map(lambda n: n*30, range(0, length/30)),
violentCrimes,
'b:',
label="Violent crime rate in our model"
)
plt.plot(
[0, length-1],
[cmpData["firearmCrimes"]]*2,
'c-',
label="Firearm crime rate in the US"
)
plt.plot(
map(lambda n: n*30, range(0, length/30)),
firearmCrimes,
'c:',
label="Firearm crime rate in our model"
)
plt.xlabel("Day")
plt.legend()
plt.axis([0, length, -1, 5])
plt.show()
|
# Crie um programa que leia as notas do ano letivo de um aluno e faça a média entre elas.
n = float(input('\033[31mNota do 1° bimestre: \033[m'))
q = float(input('\033[32mNota do 2° bimestre: \033[m'))
r = float(input('\033[36mNota do 3° bimestre: \033[m'))
s = float(input('\033[34mNota do 4° bimestre: \033[m'))
res = (n + q + r + s) / 4
print('A média entre {}, {}, {} e {} é de {:.1f} pontos!'.format(n, q, r, s, res))
|
"""
Author: Andreas Finkler
Created: 23.12.2020
"""
|
#region Imports
from Base.EnforceTypes import EnforceTypes;
from Base.ValidParameters import ValidSportsBooks, ValidSports;
from Base.CustomExceptions import SportError;
#endregion Imports
class RequestBase:
@classmethod
def GetArgString(cls):
#Get the list of attributes of the given subclass
props = [i for i in cls.__dict__.keys() if i[:1] != '_' and i != 'ApiPath'];
#Get any populated attributes (e.g Sport = Sports.Football)
#And convert them to a type that can be appended to a querystring
#Not using urllib here
arguments = {p:cls.ConvertArg(getattr(cls,p)) for p in props if getattr(cls,p) != None}
argStr = '&'
argStr = ('&'.join("{!s}={!s}".format(key.lower(),val.lower().replace("'",'')) for (key,val) in arguments.items()))
if(len(argStr) > 1):
return argStr;
return '';
@classmethod
def ConvertArg(cls,arg):
if(type(arg) is int):
return str(arg)
def __post_init__(cls):
EnforceTypes(cls);
|
from collections import deque
def solve(number):
pumps = deque()
for _ in range(number):
fuel, distance = input().split()
fuel = int(fuel)
distance = int(distance)
pumps.append([fuel, distance])
for i in range(number):
is_success = True
total_fuel = 0
for _ in range(number):
current_fuel, current_distance = pumps.popleft()
total_fuel += current_fuel - current_distance
if total_fuel < 0:
is_success = False
pumps.append([current_fuel, current_distance])
if is_success:
print(i)
break
pumps.append(pumps.popleft())
solve(int(input()))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.