text stringlengths 8 6.05M |
|---|
from ephemeral.build_api.lib_function import LibFunction
from ephemeral.build_api.job_builder import JobBuilder
class LibBuilder(object):
def __init__(self):
self.functions = []
self.jobs = []
def add_function(self, name, method):
t = LibFunction(name, method)
self.functions.append(t)
return t
def create_job(self, name):
job = JobBuilder(name)
self.jobs.append(job)
return job |
#!usr/bin/python
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
class sphere20(nn.Module):
def __init__(self):
super(sphere20, self).__init__()
# input: batch_size, channel_num, pic_width, pic_height (B, 3, 112, 112)
self.conv1_1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=2, padding=1) # => b*64*56*56
self.relu1_1 = nn.ReLU(64)
self.conv1_2 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) # => b*64*56*56
self.relu1_2 = nn.ReLU(64)
self.conv1_3 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) # => b*64*56*56
self.relu1_3 = nn.ReLU(64)
self.conv2_1 = nn.Conv2d(64, 128, 3, 2, 1) # => b*128*28*28
self.relu2_1 = nn.ReLU(128)
self.conv2_2 = nn.Conv2d(128, 128, 3, 1, 1, bias=False)
self.relu2_2 = nn.ReLU(128)
self.conv2_3 = nn.Conv2d(128, 128, 3, 1, 1, bias=False)
self.relu2_3 = nn.ReLU(128)
self.conv3_1 = nn.Conv2d(128, 256, 3, 2, 1) # => b*256*14*14
self.relu3_1 = nn.ReLU(256)
self.conv3_2 = nn.Conv2d(256, 256, 3, 1, 1, bias=False)
self.relu3_2 = nn.ReLU(256)
self.conv3_3 = nn.Conv2d(256, 256, 3, 1, 1, bias=False)
self.relu3_3 = nn.ReLU(256)
self.conv4_1 = nn.Conv2d(256, 512, 3, 2, 1) # => b*512*7*7
self.relu4_1 = nn.ReLU(512)
self.conv4_2 = nn.Conv2d(512, 512, 3, 1, 1, bias=False)
self.relu4_2 = nn.ReLU(512)
self.conv4_3 = nn.Conv2d(512, 512, 3, 1, 1, bias=False)
self.relu4_3 = nn.ReLU(512)
self.fc4 = nn.Linear(512 * 7 * 7, 512)
# weight: initialization
for m in self.modules():
# 如果是 卷积层 或者 Linear 层
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
if m.bias is not None:
nn.init.kaiming_uniform_(m.weight, nonlinearity='relu')
nn.init.constant_(m.bias, 0.0)
else:
nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
def forward(self, x):
x = self.relu1_1(self.conv1_1(x))
x = x + self.relu1_3(self.conv1_3(self.relu1_2(self.conv1_2(x))))
x = self.relu2_1(self.conv2_1(x))
x = x + self.relu2_3(self.conv2_3(self.relu2_2(self.conv2_2(x))))
x = self.relu3_1(self.conv3_1(x))
x = x + self.relu3_3(self.conv3_3(self.relu3_2(self.conv3_2(x))))
x = self.relu4_1(self.conv4_1(x))
x = x + self.relu4_3(self.conv4_3(self.relu4_2(self.conv4_2(x))))
x = x.view(x.size(0), -1)
x = self.fc4(x)
return x
def save(self, file_path):
with open(file_path, 'wb') as f:
torch.save(self.state_dict(), f)
|
# Python Imports
import wx.lib.agw.floatspin as FS
from datetime import datetime
import re
import time
from threading import Thread
# Local Imports
import globals
from yamaha import *
from helpers import *
class SmartVolumeFinished(eg.ActionBase):
def __call__(self):
self.plugin.smart_vol_up_start = None
self.plugin.smart_vol_down_start = None
class SmartVolumeUp(eg.ActionBase):
def __call__(self, zone, step1, step2, wait):
izone = convert_zone_to_int(self.plugin, zone)
if self.plugin.smart_vol_up_start is None:
self.plugin.smart_vol_up_start = datetime.now()
diff = datetime.now() - self.plugin.smart_vol_up_start
if diff.seconds < float(wait):
#print "Volume Up:", step1
increase_volume(self.plugin, izone, step1)
else:
#print "Volume Up:", step2
increase_volume(self.plugin, izone, step2)
def Configure(self, zone='Active Zone', step1=0.5, step2=2.0, wait=2.0):
panel = eg.ConfigPanel()
zones = get_available_zones(self.plugin, True, self.plugin.AVAILABLE_ZONES)
wx.StaticText(panel, label="Zone: ", pos=(10, 10))
choice_zone = wx.Choice(panel, -1, (10, 30), choices=zones)
if zone in zones:
choice_zone.SetStringSelection(zone)
wx.StaticText(panel, label="Increase Amount (Step 1): ", pos=(10, 60))
fs_step1 = FS.FloatSpin(panel, -1, pos=(170, 57), min_val=0.5, max_val=10,
increment=0.5, value=float(step1), agwStyle=FS.FS_LEFT)
wx.StaticText(panel, label="dB", pos=(270, 60))
fs_step1.SetFormat("%f")
fs_step1.SetDigits(1)
wx.StaticText(panel, label="Time between Step 1 to Step 2: ", pos=(10, 100))
fs_wait = FS.FloatSpin(panel, -1, pos=(170, 97), min_val=0.5, max_val=999,
increment=0.1, value=float(wait), agwStyle=FS.FS_LEFT)
wx.StaticText(panel, label="Seconds", pos=(270, 100))
fs_wait.SetFormat("%f")
fs_wait.SetDigits(1)
wx.StaticText(panel, label="Increase Amount (Step 2): ", pos=(10, 140))
fs_step2 = FS.FloatSpin(panel, -1, pos=(170, 137), min_val=0.5, max_val=10,
increment=0.5, value=float(step2), agwStyle=FS.FS_LEFT)
wx.StaticText(panel, label="dB", pos=(270, 140))
fs_step2.SetFormat("%f")
fs_step2.SetDigits(1)
while panel.Affirmed():
panel.SetResult(zones[choice_zone.GetCurrentSelection()], fs_step1.GetValue(), fs_step2.GetValue(), fs_wait.GetValue())
class SmartVolumeDown(eg.ActionBase):
def __call__(self, zone, step1, step2, wait):
izone = convert_zone_to_int(self.plugin, zone)
if self.plugin.smart_vol_down_start is None:
self.plugin.smart_vol_down_start = datetime.now()
diff = datetime.now() - self.plugin.smart_vol_down_start
if diff.seconds < float(wait):
decrease_volume(self.plugin, izone, step1)
else:
decrease_volume(self.plugin, izone, step2)
def Configure(self, zone='Active Zone', step1=0.5, step2=2.0, wait=2.0):
panel = eg.ConfigPanel()
zones = get_available_zones(self.plugin, True, self.plugin.AVAILABLE_ZONES)
wx.StaticText(panel, label="Zone: ", pos=(10, 10))
choice_zone = wx.Choice(panel, -1, (10, 30), choices=zones)
if zone in zones:
choice_zone.SetStringSelection(zone)
wx.StaticText(panel, label="Decrease Amount (Step 1): ", pos=(10, 60))
fs_step1 = FS.FloatSpin(panel, -1, pos=(170, 57), min_val=0.5, max_val=10,
increment=0.5, value=float(step1), agwStyle=FS.FS_LEFT)
wx.StaticText(panel, label="dB", pos=(270, 60))
fs_step1.SetFormat("%f")
fs_step1.SetDigits(1)
wx.StaticText(panel, label="Time between Step 1 to Step 2: ", pos=(10, 100))
fs_wait = FS.FloatSpin(panel, -1, pos=(170, 97), min_val=0.5, max_val=999,
increment=0.1, value=float(wait), agwStyle=FS.FS_LEFT)
wx.StaticText(panel, label="Seconds", pos=(270, 100))
fs_wait.SetFormat("%f")
fs_wait.SetDigits(1)
wx.StaticText(panel, label="Decrease Amount (Step 2): ", pos=(10, 140))
fs_step2 = FS.FloatSpin(panel, -1, pos=(170, 137), min_val=0.5, max_val=10,
increment=0.5, value=float(step2), agwStyle=FS.FS_LEFT)
wx.StaticText(panel, label="dB", pos=(270, 140))
fs_step2.SetFormat("%f")
fs_step2.SetDigits(1)
while panel.Affirmed():
panel.SetResult(zones[choice_zone.GetCurrentSelection()], fs_step1.GetValue(), fs_step2.GetValue(), fs_wait.GetValue())
class IncreaseVolume(eg.ActionBase):
def __call__(self, zone, step):
increase_volume(self.plugin, convert_zone_to_int(self.plugin, zone), float(step))
def Configure(self, zone='Active Zone', step=0.5):
panel = eg.ConfigPanel()
zones = get_available_zones(self.plugin, True, self.plugin.AVAILABLE_ZONES)
wx.StaticText(panel, label="Zone: ", pos=(10, 10))
choice_zone = wx.Choice(panel, -1, (10, 30), choices=zones)
if zone in zones:
choice_zone.SetStringSelection(zone)
wx.StaticText(panel, label="Increase Amount (Step): ", pos=(10, 60))
floatspin = FS.FloatSpin(panel, -1, pos=(10, 80), min_val=0.5, max_val=10,
increment=0.5, value=float(step), agwStyle=FS.FS_LEFT)
floatspin.SetFormat("%f")
floatspin.SetDigits(1)
while panel.Affirmed():
panel.SetResult(zones[choice_zone.GetCurrentSelection()], floatspin.GetValue())
class DecreaseVolume(eg.ActionBase):
def __call__(self, zone, step):
decrease_volume(self.plugin, convert_zone_to_int(self.plugin, zone), float(step))
def Configure(self, zone='Active Zone', step=0.5):
panel = eg.ConfigPanel()
zones = get_available_zones(self.plugin, True, self.plugin.AVAILABLE_ZONES)
wx.StaticText(panel, label="Zone: ", pos=(10, 10))
choice_zone = wx.Choice(panel, -1, (10, 30), choices=zones)
if zone in zones:
choice_zone.SetStringSelection(zone)
wx.StaticText(panel, label="Decrease Amount (Step): ", pos=(10, 60))
floatspin = FS.FloatSpin(panel, -1, pos=(10, 80), min_val=0.5, max_val=10,
increment=0.5, value=float(step), agwStyle=FS.FS_LEFT)
floatspin.SetFormat("%f")
floatspin.SetDigits(1)
while panel.Affirmed():
panel.SetResult(zones[choice_zone.GetCurrentSelection()], floatspin.GetValue())
class SetVolume(eg.ActionBase):
def __call__(self, zone, vol):
set_volume(self.plugin, convert_zone_to_int(self.plugin, zone), float(vol))
def Configure(self, zone='Active Zone', vol=-50.0):
panel = eg.ConfigPanel()
zones = get_available_zones(self.plugin, True, self.plugin.AVAILABLE_ZONES)
wx.StaticText(panel, label="Zone: ", pos=(10, 10))
choice_zone = wx.Choice(panel, -1, (10, 30), choices=zones)
if zone in zones:
choice_zone.SetStringSelection(zone)
wx.StaticText(panel, label="Exact Volume (dB): ", pos=(10, 60))
floatspin = FS.FloatSpin(panel, -1, pos=(10, 80), min_val=-100.0, max_val=50.0,
increment=0.5, value=float(vol), agwStyle=FS.FS_LEFT)
floatspin.SetFormat("%f")
floatspin.SetDigits(1)
while panel.Affirmed():
panel.SetResult(zones[choice_zone.GetCurrentSelection()], floatspin.GetValue())
class SetMaxVolume(eg.ActionBase):
def __call__(self, zone, vol):
set_max_volume(self.plugin, convert_zone_to_int(self.plugin, zone), float(vol))
def Configure(self, zone='Active Zone', vol=16.5):
panel = eg.ConfigPanel()
zones = get_available_zones(self.plugin, True, self.plugin.AVAILABLE_ZONES)
wx.StaticText(panel, label="Zone: ", pos=(10, 10))
choice_zone = wx.Choice(panel, -1, (10, 30), choices=zones)
if zone in zones:
choice_zone.SetStringSelection(zone)
wx.StaticText(panel, label="Max Volume (dB): ", pos=(10, 60))
floatspin = FS.FloatSpin(panel, -1, pos=(10, 80), min_val=-30.0, max_val=16.5,
increment=0.5, value=float(vol), agwStyle=FS.FS_LEFT)
floatspin.SetFormat("%f")
floatspin.SetDigits(1)
while panel.Affirmed():
panel.SetResult(zones[choice_zone.GetCurrentSelection()], floatspin.GetValue())
class SetInitVolume(eg.ActionBase):
def __call__(self, zone, vol, mode):
set_init_volume(self.plugin, convert_zone_to_int(self.plugin, zone), float(vol), mode)
def Configure(self, zone='Active Zone', vol=-50.0, mode="Off"):
panel = eg.ConfigPanel()
modes = ["Off", "On"]
zones = get_available_zones(self.plugin, True, self.plugin.AVAILABLE_ZONES)
wx.StaticText(panel, label="Zone: ", pos=(10, 10))
choice_zone = wx.Choice(panel, -1, (10, 30), choices=zones)
if zone in zones:
choice_zone.SetStringSelection(zone)
wx.StaticText(panel, label="Mode: ", pos=(10, 60))
choice_mode = wx.Choice(panel, -1, (10, 80), choices=modes)
choice_mode.SetStringSelection(mode)
wx.StaticText(panel, label="Exact Volume (dB): ", pos=(10, 110))
floatspin = FS.FloatSpin(panel, -1, pos=(10, 130), min_val=-100.0, max_val=50.0,
increment=0.5, value=float(vol), agwStyle=FS.FS_LEFT)
floatspin.SetFormat("%f")
floatspin.SetDigits(1)
while panel.Affirmed():
panel.SetResult(zones[choice_zone.GetCurrentSelection()], floatspin.GetValue(), modes[choice_mode.GetCurrentSelection()])
class SetBass(eg.ActionBase):
def __call__(self, zone, val):
set_bass(self.plugin, convert_zone_to_int(self.plugin, zone), float(val))
def Configure(self, zone='Active Zone', val=0.0):
panel = eg.ConfigPanel()
zones = get_available_zones(self.plugin, True, self.plugin.AVAILABLE_ZONES)
wx.StaticText(panel, label="Zone: ", pos=(10, 10))
choice_zone = wx.Choice(panel, -1, (10, 30), choices=zones)
if zone in zones:
choice_zone.SetStringSelection(zone)
wx.StaticText(panel, label="Exact Value (dB): ", pos=(10, 60))
floatspin = FS.FloatSpin(panel, -1, pos=(10, 80), min_val=-6.0, max_val=6.0,
increment=0.5, value=float(val), agwStyle=FS.FS_LEFT)
floatspin.SetFormat("%f")
floatspin.SetDigits(1)
while panel.Affirmed():
panel.SetResult(zones[choice_zone.GetCurrentSelection()], floatspin.GetValue())
class SetTreble(eg.ActionBase):
def __call__(self, zone, val):
set_treble(self.plugin, convert_zone_to_int(self.plugin, zone), float(val))
def Configure(self, zone='Active Zone', val=0.0):
panel = eg.ConfigPanel()
zones = get_available_zones(self.plugin, True, self.plugin.AVAILABLE_ZONES)
wx.StaticText(panel, label="Zone: ", pos=(10, 10))
choice_zone = wx.Choice(panel, -1, (10, 30), choices=zones)
if zone in zones:
choice_zone.SetStringSelection(zone)
wx.StaticText(panel, label="Exact Value (dB): ", pos=(10, 60))
floatspin = FS.FloatSpin(panel, -1, pos=(10, 80), min_val=-6.0, max_val=6.0,
increment=0.5, value=float(val), agwStyle=FS.FS_LEFT)
floatspin.SetFormat("%f")
floatspin.SetDigits(1)
while panel.Affirmed():
panel.SetResult(zones[choice_zone.GetCurrentSelection()], floatspin.GetValue())
class SetPattern1(eg.ActionBase):
def __call__(self, levels):
set_pattern1(self.plugin, levels)
def Configure(self, levels=None):
panel = eg.ConfigPanel()
if levels == None:
levels = get_system_pattern_1(self.plugin) #gets levels from receiver
adjpos = (10, 10)
floatspin = []
for speaker in levels:
wx.StaticText(panel, label=speaker[0] + " Exact Value (dB): ", pos=adjpos)
adjpos = (10, adjpos[1] + 20)
floatspin.append(FS.FloatSpin(panel, -1, pos=adjpos, min_val=-10.0, max_val=10.0,
increment=0.5, value=float(speaker[1]), agwStyle=FS.FS_LEFT))
floatspin[-1].SetFormat("%f")
floatspin[-1].SetDigits(1)
adjpos = (10, adjpos[1] + 30)
while panel.Affirmed():
for i in range(0,len(floatspin)):
levels[i] = [levels[i][0], floatspin[i].GetValue()]
panel.SetResult(levels)
class SetActiveZone(eg.ActionBase):
def __call__(self, zone):
set_active_zone(self.plugin, convert_zone_to_int(self.plugin, zone))
def Configure(self, zone='Main Zone'):
panel = eg.ConfigPanel()
zones = get_available_zones(self.plugin, False, self.plugin.AVAILABLE_ZONES) # Don't include active zone!
wx.StaticText(panel, label="Zone: ", pos=(10, 10))
choice_zone = wx.Choice(panel, -1, (10, 30), choices=zones)
if zone in zones:
choice_zone.SetStringSelection(zone)
while panel.Affirmed():
panel.SetResult(zones[choice_zone.GetCurrentSelection()])
class SetScene(eg.ActionBase):
def __call__(self, scene):
set_scene(self.plugin, int(scene))
def Configure(self, scene=1):
panel = eg.ConfigPanel()
wx.StaticText(panel, label="Scene Number: ", pos=(10, 10))
spin = wx.SpinCtrl(panel, -1, "", (10, 30), (80, -1))
spin.SetRange(1,12)
spin.SetValue(int(scene))
while panel.Affirmed():
panel.SetResult(spin.GetValue())
class SetSourceInput(eg.ActionBase):
def __call__(self, zone, source):
izone = convert_zone_to_int(self.plugin, zone)
if source =="Tuner": #special case. I don't know why
source = "TUNER"
change_source(self.plugin, source, izone)
def Configure(self, zone="Active Zone", source="HDMI1"):
panel = eg.ConfigPanel()
zones = get_available_zones(self.plugin, True, self.plugin.AVAILABLE_ZONES)
wx.StaticText(panel, label="Zone: ", pos=(10, 10))
choice_zone = wx.Choice(panel, -1, (10, 30), choices=zones)
if zone in zones:
choice_zone.SetStringSelection(zone)
inputs = self.plugin.AVAILABLE_SOURCES
wx.StaticText(panel, label="Source Input: ", pos=(10, 60))
choice_input = wx.Choice(panel, -1, (10, 80), choices=inputs)
if source in inputs:
choice_input.SetStringSelection(source)
while panel.Affirmed():
panel.SetResult(zones[choice_zone.GetCurrentSelection()], inputs[choice_input.GetCurrentSelection()])
class SetPowerStatus(eg.ActionBase):
def __call__(self, zone, status):
izone = convert_zone_to_int(self.plugin, zone)
if status == 'Toggle On/Standby':
toggle_on_standby(self.plugin, izone)
elif status == 'On':
power_on(self.plugin, izone)
elif status == 'Standby':
power_standby(self.plugin, izone)
def Configure(self, zone="Active Zone", status="Toggle On/Standby"):
panel = eg.ConfigPanel()
zones = get_available_zones(self.plugin, True, self.plugin.AVAILABLE_ZONES)
wx.StaticText(panel, label="Zone: ", pos=(10, 10))
choice_zone = wx.Choice(panel, -1, (10, 30), choices=zones)
if zone in zones:
choice_zone.SetStringSelection(zone)
statuses = [ 'Toggle On/Standby', 'On', 'Standby' ]
wx.StaticText(panel, label="Power Status: ", pos=(10, 60))
choice = wx.Choice(panel, -1, (10, 80), choices=statuses)
if status in statuses:
choice.SetStringSelection(status)
while panel.Affirmed():
panel.SetResult(zones[choice_zone.GetCurrentSelection()], statuses[choice.GetCurrentSelection()])
class SetSleepStatus(eg.ActionBase):
def __call__(self, zone, status):
izone = convert_zone_to_int(self.plugin, zone)
set_sleep(self.plugin, status, izone)
def Configure(self, zone="Active Zone", status="Off"):
panel = eg.ConfigPanel()
zones = get_available_zones(self.plugin, True, self.plugin.AVAILABLE_ZONES)
wx.StaticText(panel, label="Zone: ", pos=(10, 10))
choice_zone = wx.Choice(panel, -1, (10, 30), choices=zones)
if zone in zones:
choice_zone.SetStringSelection(zone)
statuses = [ 'Off', '30 min', '60 min', '90 min', '120 min', 'Last' ]
wx.StaticText(panel, label="Sleep Status: ", pos=(10, 60))
choice = wx.Choice(panel, -1, (10, 80), choices=statuses)
if status in statuses:
choice.SetStringSelection(status)
while panel.Affirmed():
panel.SetResult(zones[choice_zone.GetCurrentSelection()], statuses[choice.GetCurrentSelection()])
class SetSurroundMode(eg.ActionBase):
def __call__(self, mode):
if mode == 'Toggle Straight/Surround Decode':
toggle_straight_decode(self.plugin)
elif mode == 'Straight':
straight(self.plugin)
elif mode == 'Surround Decode':
surround_decode(self.plugin)
def Configure(self, mode='Toggle Straight/Surround Decode'):
panel = eg.ConfigPanel()
modes = [ 'Toggle Straight/Surround Decode', 'Straight', 'Surround Decode' ]
wx.StaticText(panel, label="Surround Mode: ", pos=(10, 10))
choice = wx.Choice(panel, -1, (10, 30), choices=modes)
if mode in modes:
choice.SetStringSelection(mode)
while panel.Affirmed():
panel.SetResult(modes[choice.GetCurrentSelection()])
class Set7ChannelMode(eg.ActionBase): # McB 1/11/2014 - Turn 7-channel mode on and off
def __call__(self, mode):
if mode == 'On':
channel7_on(self.plugin)
elif mode == 'Off':
channel7_off(self.plugin)
def Configure(self, mode='On'):
panel = eg.ConfigPanel()
modes = [ 'On', 'Off' ]
wx.StaticText(panel, label="7-Channel Mode: ", pos=(10, 10))
choice = wx.Choice(panel, -1, (10, 30), choices=modes)
if mode in modes:
choice.SetStringSelection(mode)
while panel.Affirmed():
panel.SetResult(modes[choice.GetCurrentSelection()])
class CursorAction(eg.ActionBase):
def __call__(self, action, zone):
code = None
izone = convert_zone_to_int(self.plugin, zone, convert_active=True)
if izone in [0,1]:
code = globals.CURSOR_CODES[1][action]
if izone == 2:
# Not all of the actions are supported for zone 2
code = globals.CURSOR_CODES[2].get(action, None)
if code is not None:
send_code(self.plugin, code)
else:
# It is possible the user's active zone is not yet supported
eg.PrintError("Zone {0} is not yet supported for this action".format(izone if izone > -1 else chr(-1 * izone)))
def Configure(self, action="Up", zone="Active Zone"):
panel = eg.ConfigPanel()
zones = get_available_zones(self.plugin, True, globals.All_ZONES_PLUS_ACTIVE, limit=2)
actions = globals.CURSOR_CODES[1].keys()
wx.StaticText(panel, label="Zone: ", pos=(10, 10))
choice_zone = wx.Choice(panel, -1, (10, 30), choices=zones)
if zone in zones:
choice_zone.SetStringSelection(zone)
wx.StaticText(panel, label="Cursor Action: ", pos=(10, 60))
choice_action = wx.Choice(panel, -1, (10, 80), choices=actions)
if action in actions:
choice_action.SetStringSelection(action)
while panel.Affirmed():
panel.SetResult(actions[choice_action.GetCurrentSelection()], zones[choice_zone.GetCurrentSelection()])
class OperationAction(eg.ActionBase):
def __call__(self, action, zone):
code = None
izone = convert_zone_to_int(self.plugin, zone, convert_active=True)
if izone in [0,1]:
code = globals.OPERATION_CODES[1][action]
if izone == 2:
code = globals.OPERATION_CODES[2][action]
if code is not None:
send_code(self.plugin, code)
else:
# It is possible the user's active zone is not yet supported
eg.PrintError("Zone {0} is not yet supported for this action".format(izone if izone > -1 else chr(-1 * izone)))
def Configure(self, action="Play", zone="Active Zone"):
panel = eg.ConfigPanel()
zones = get_available_zones(self.plugin, True, globals.ALL_ZONES_PLUS_ACTIVE, limit=2)
actions = globals.OPERATION_CODES[1].keys()
wx.StaticText(panel, label="Zone: ", pos=(10, 10))
choice_zone = wx.Choice(panel, -1, (10, 30), choices=zones)
if zone in zones:
choice_zone.SetStringSelection(zone)
wx.StaticText(panel, label="Operation: ", pos=(10, 60))
choice_action = wx.Choice(panel, -1, (10, 80), choices=actions)
if action in actions:
choice_action.SetStringSelection(action)
while panel.Affirmed():
panel.SetResult(actions[choice_action.GetCurrentSelection()], zones[choice_zone.GetCurrentSelection()])
class NumCharAction(eg.ActionBase):
def __call__(self, action, zone):
code = None
izone = convert_zone_to_int(self.plugin, zone, convert_active=True)
if izone in [0,1]:
code = globals.NUMCHAR_CODES[1][action]
if izone == 2:
code = globals.NUMCHAR_CODES[2][action]
if code is not None:
send_code(self.plugin, code)
else:
# It is possible the user's active zone is not yet supported
eg.PrintError("Zone {0} is not yet supported for this action".format(izone if izone > -1 else chr(-1 * izone)))
def Configure(self, action="1", zone="Main Zone"):
panel = eg.ConfigPanel()
zones = get_available_zones(self.plugin, True, globals.ALL_ZONES_PLUS_ACTIVE, limit=2)
actions = sorted(globals.NUMCHAR_CODES[1].keys(), key=lambda k: int(k) if len(k) == 1 else 10 + len(k))
wx.StaticText(panel, label="Zone: ", pos=(10, 10))
choice_zone = wx.Choice(panel, -1, (10, 30), choices=zones)
if zone in zones:
choice_zone.SetStringSelection(zone)
wx.StaticText(panel, label="Action: ", pos=(10, 60))
choice_action = wx.Choice(panel, -1, (10, 80), choices=actions)
if action in actions:
choice_action.SetStringSelection(action)
while panel.Affirmed():
panel.SetResult(actions[choice_action.GetCurrentSelection()], zones[choice_zone.GetCurrentSelection()])
class GetInfo(eg.ActionBase):
def __call__(self, object, cat):
if object == "Active Speakers":
return get_system_pattern_1(self.plugin, object)
if object == "PreOut Levels":
return get_system_pattern_1(self.plugin, object)
zone = None
#zone specific objects
if object == "Input Selection":
object = "Input_Sel"
if object == "Scene":
return "not complete"
if object == "Sound Program":
object = "Sound_Program"
if cat == "Main Zone":
zone = 1
if cat.startswith("Zone"):
zone = convert_zone_to_int(self.plugin, cat)
if object == "Volume Level":
val, unit = get_status_strings(self.plugin, ["Val", "Unit"], zone)
return "{0} {1}".format(float(val) / 10.0, unit)
if object == "Treble":
val = get_sound_video_string(self.plugin, "Val", zone, "Treble")
return "{0} ".format(float(val) / 10.0) + "dB"
if object == "Bass":
val = get_sound_video_string(self.plugin, "Val", zone, "Bass")
return "{0} ".format(float(val) / 10.0) + "dB"
if object == "Init Volume Mode":
return get_volume_string(self.plugin, "Mode", zone, "Init_Lvl")
if object == "Init Volume Level":
val = get_volume_string(self.plugin, "Val", zone, "Init_Lvl")
return "{0} ".format(float(val) / 10.0) + "dB"
if object == "Max Volume Level":
val = get_volume_string(self.plugin, "Val", zone, "Max_Lvl")
return "{0} ".format(float(val) / 10.0) + "dB"
if zone is not None:
return get_status_string(self.plugin, object,zone)
#all the rest are zone agnostic
#object, input, location to get_device_string
section = "List_Info"
if object in ["Menu Layer", "Menu Name", "Current Line", "Max Line"] \
or object in ['Line {0}'.format(i) for i in range(9)]:
object = object.replace(' ', '_')
else:
section = "Play_Info"
if object == "FM Mode":
object = "FM_Mode"
elif object == "Frequency":
try:
val, unit, band, exp = get_device_strings(self.plugin, ["Val", "Unit", "Band", "Exp"], cat, section)
if int(exp) == 0:
real_val = int(val)
else:
real_val = float(val) / pow(10, int(exp))
return "{0} {1}".format(real_val, unit)
except:
eg.PrintError("Input not active or unavailable with your model.")
return None
elif object == "Audio Mode":
object = "Current"
elif object == "Antenna Strength":
object = "Antenna_Lvl"
elif object == "Channel Number":
object = "Ch_Number"
elif object == "Channel Name":
object = "Ch_Name"
elif object == "Playback Info":
object = "Playback_Info"
elif object == "Repeat Mode":
object = "Repeat"
elif object == "Connect Information":
object = "Connect_Info"
try:
return get_device_string(self.plugin, object, cat, section)
except:
eg.PrintError("Input not active or unavailable with your model.")
def Configure(self, object="Power", cat="Main Zone"):
panel = eg.ConfigPanel()
self.cats = ["System"] + self.plugin.AVAILABLE_ZONES + self.plugin.AVAILABLE_INFO_SOURCES
wx.StaticText(panel, label="Category: ", pos=(10, 10))
self.choice_cat = wx.Choice(panel, -1, (10, 30), choices=self.cats)
if cat in self.cats:
self.choice_cat.SetStringSelection(cat)
self.choice_cat.Bind(wx.EVT_CHOICE, self.CategoryChanged)
self.objects = [ 'Power', 'Sleep', 'Volume Level', 'Mute', 'Input Selection', 'Scene', 'Straight', 'Enhancer', 'Sound Program']
wx.StaticText(panel, label="Object: ", pos=(10, 60))
self.choice_object = wx.Choice(panel, -1, (10, 80), choices=self.objects)
self.CategoryChanged()
if object in self.objects:
self.choice_object.SetStringSelection(object)
while panel.Affirmed():
panel.SetResult(self.objects[self.choice_object.GetCurrentSelection()], self.cats[self.choice_cat.GetCurrentSelection()])
def CategoryChanged(self, event=None):
cat = self.cats[self.choice_cat.GetCurrentSelection()]
if cat == "System":
self.objects = globals.SYSTEM_OBJECTS
elif cat == "Main Zone":
self.objects = globals.MAIN_ZONE_OBJECTS
elif cat.startswith("Zone"):
self.objects = globals.ZONE_OBJECTS
elif cat == "Tuner" or cat == "TUNER":
self.objects = [ 'Band', 'Frequency', 'FM Mode']
elif cat == "HD Radio":
self.objects = [ 'Band', 'Frequency', 'Audio Mode']
elif cat == "SIRIUS" or cat == "SiriusXM" or cat == "Spotify":
self.objects = globals.SIRIUS_OBJECTS
elif cat == "iPod":
self.objects = globals.GENERIC_PLAYBACK_OBJECTS
elif cat == "Bluetooth":
self.objects = [ 'Connect Information']
elif cat == "Rhapsody":
self.objects = globals.GENERIC_PLAYBACK_OBJECTS
elif cat == "SIRIUSInternetRadio":
self.objects = globals.SIRIUS_IR_OBJECTS
elif cat == "Pandora":
self.objects = globals.PANDORA_OBJECTS
elif cat == "PC" or "SERVER":
self.objects = globals.GENERIC_PLAYBACK_OBJECTS
elif cat == "NET RADIO" or cat == "NET_RADIO":
self.objects = globals.NET_RADIO_OBJECTS
elif cat == "Napster":
self.objects = globals.GENERIC_PLAYBACK_OBJECTS
elif cat == "USB":
self.objects = globals.GENERIC_PLAYBACK_OBJECTS
elif cat == "iPod (USB)" or cat == "iPod_USB" or cat == "Airplay":
self.objects = globals.GENERIC_PLAYBACK_OBJECTS
else:
eg.PrintError("Unknown Category!")
self.choice_object.Clear()
self.choice_object.AppendItems(self.objects)
self.choice_object.SetSelection(0)
class GetAvailability(eg.ActionBase):
def __call__(self, **kwargs):
setup_availability(self.plugin, **kwargs)
return list(self.plugin.AVAILABLE_SOURCES)
class AutoDetectIP(eg.ActionBase):
def __call__(self):
ip = auto_detect_ip_threaded(self.plugin)
if ip is not None:
setup_availability(self.plugin)
return ip
class VerifyStaticIP(eg.ActionBase):
def __call__(self):
if self.plugin.ip_auto_detect:
eg.PrintError('Static IP is not enabled!')
return False
else:
ip = setup_ip(self.plugin)
if ip is not None:
setup_availability(self.plugin)
return ip is not None
class NextInput(eg.ActionBase):
def __call__(self, zone, inputs):
print inputs
izone = convert_zone_to_int(self.plugin, zone, convert_active=True)
src = get_source_name(self.plugin, izone)
index = inputs.index(src) if src in inputs else -1
self._next_input(izone, index, inputs)
def _next_input(self, izone, cur_index, inputs):
next_index = 0
if cur_index != -1:
if cur_index < len(inputs) - 1:
next_index = cur_index + 1
else:
next_index = 0
else:
# Current source not in the user's list. Change to the first item?
next_index = 0
print "Warning: Current source was not in the list of sources. Changing to first source in list."
print "Switching input to", inputs[next_index]
change_source(self.plugin, inputs[next_index], izone)
t = Thread(target=self._verify_input, args=[izone, next_index, inputs])
t.start()
def _verify_input(self, izone, index, inputs, wait=0.3):
time.sleep(wait)
src = get_source_name(self.plugin, izone)
if src != inputs[index]:
eg.PrintError("Source input did not change! Your receiver may not have this input.")
print "Skipping to next input."
self._next_input(izone, index, inputs)
def Configure(self, zone='Active Zone', inputs=['HDMI1']):
panel = eg.ConfigPanel()
#reset "TUNER" to "Tuner"
newinputs = []
for source in inputs:
if source == "TUNER":
source = "Tuner"
newinputs.append(source)
inputs = newinputs
zones = get_available_zones(self.plugin, True, self.plugin.AVAILABLE_ZONES)
wx.StaticText(panel, label="Zone: ", pos=(10, 10))
choice_zone = wx.Choice(panel, -1, (45, 7), choices=zones)
if zone in zones:
choice_zone.SetStringSelection(zone)
y = 45
x_start = 10
x = x_start
num_per_row = 5
x_padding = 80
y_padding = 20
sources = self.plugin.AVAILABLE_SOURCES
self.cbs = []
for i in range(len(sources)):
if i > 0 and i % num_per_row == 0:
x = x_start
y += y_padding
cb = wx.CheckBox(panel, -1, sources[i], (x, y))
cb.SetValue(sources[i] in inputs)
self.cbs.append(cb)
x += x_padding
# Futile attempt at setting a scrollbar, not working
# panel.SetScrollbar(wx.VERTICAL, 0, 95, 100)
while panel.Affirmed():
res = []
for i in range(len(self.cbs)):
if self.cbs[i].GetValue():
if sources[i] == "Tuner":
res.append("TUNER")
else:
res.append(sources[i])
panel.SetResult(zones[choice_zone.GetCurrentSelection()], res)
class PreviousInput(eg.ActionBase):
def __call__(self, zone, inputs):
izone = convert_zone_to_int(self.plugin, zone, convert_active=True)
src = get_source_name(self.plugin, izone)
index = inputs.index(src) if src in inputs else -1
self._prev_input(izone, index, inputs)
def _prev_input(self, izone, cur_index, inputs):
prev_index = 0
if cur_index != -1:
if cur_index > 0:
prev_index = cur_index - 1
else:
prev_index = len(inputs) - 1
else:
# Current source not in the user's list. Change to the first item?
prev_index = 0
print "Warning: Current source was not in the list of sources. Changing to first source in list."
print "Switching input to", inputs[prev_index]
change_source(self.plugin, inputs[prev_index], izone)
t = Thread(target=self._verify_input, args=[izone, prev_index, inputs])
t.start()
def _verify_input(self, izone, index, inputs, wait=0.3):
time.sleep(wait)
src = get_source_name(self.plugin, izone)
if src != inputs[index]:
eg.PrintError("Source input did not change! Your receiver may not have this input.")
print "Skipping to previous input."
self._prev_input(izone, index, inputs)
def Configure(self, zone='Active Zone', inputs=['HDMI1']):
panel = eg.ConfigPanel()
#reset "TUNER" to "Tuner"
newinputs = []
for source in inputs:
if source == "TUNER":
source = "Tuner"
newinputs.append(source)
inputs = newinputs
zones = get_available_zones(self.plugin, True, self.plugin.AVAILABLE_ZONES)
wx.StaticText(panel, label="Zone: ", pos=(10, 10))
choice_zone = wx.Choice(panel, -1, (45, 7), choices=zones)
if zone in zones:
choice_zone.SetStringSelection(zone)
y = 45
x_start = 10
x = x_start
num_per_row = 5
x_padding = 80
y_padding = 20
sources = self.plugin.AVAILABLE_SOURCES
self.cbs = []
for i in range(len(sources)):
if i > 0 and i % num_per_row == 0:
x = x_start
y += y_padding
cb = wx.CheckBox(panel, -1, sources[i], (x, y))
cb.SetValue(sources[i] in inputs)
self.cbs.append(cb)
x += x_padding
# Futile attempt at setting a scrollbar, not working
# panel.SetScrollbar(wx.VERTICAL, 0, 95, 100)
while panel.Affirmed():
res = []
for i in range(len(self.cbs)):
if self.cbs[i].GetValue():
if sources[i] == "Tuner":
res.append("TUNER")
else:
res.append(sources[i])
panel.SetResult(zones[choice_zone.GetCurrentSelection()], res)
class InputVolumeTrim(eg.ActionBase):
def __call__(self, sources):
#sources = get_system_io_vol_trim(self.plugin)
#print sources
#sources = [['TUNER', '0'], ['HDMI_1', '0'], ['HDMI_2', '0'], ['HDMI_3', '0'], ['HDMI_4', '0'], ['HDMI_5', '0'], ['AV_1', '0'], ['AV_2', '0'], ['AV_3', '0'], ['AV_4', '0'], ['AV_5', '0'], ['AV_6', '0'], ['V_AUX', '0'], ['AUDIO_1', '0'], ['AUDIO_2', '0'], ['Rhapsody', '0'], ['SiriusXM', '0'], ['Spotify', '0'], ['Pandora', u'0'], ['SERVER', u'0'], ['NET_RADIO', '0'], ['USB', '0'], ['AirPlay', '0']]
set_system_io_vol_trim(self.plugin, sources)
def Configure(self, sources=None):
panel = eg.ConfigPanel()
if sources == None:
sources = get_system_io_vol_trim(self.plugin)
y = 10
x_start = 10
x = x_start
num_per_row = 3
x_padding = 110
y_padding = 45
self.fss = []
for i in range(len(sources)):
print sources[i][0]
if i > 0 and i % num_per_row == 0:
x = x_start
y += y_padding
wx.StaticText(panel, label=sources[i][0], pos=(x, y))
fs = FS.FloatSpin(panel, -1, min_val=-6.0, max_val=6.0, increment=0.5, pos=(x, y+15), value=float(sources[i][1]/10), agwStyle=FS.FS_LEFT)
fs.SetFormat("%f")
fs.SetDigits(1)
self.fss.append(fs)
x += x_padding
while panel.Affirmed():
res = []
for i in range(len(self.fss)):
res.append([sources[i][0], int(self.fss[i].GetValue()*10)])
panel.SetResult(res)
class ToggleMute(eg.ActionBase):
def __call__(self):
toggle_mute(self.plugin)
class ToggleEnhancer(eg.ActionBase):
def __call__(self):
toggle_enhancer(self.plugin)
class NextRadioPreset(eg.ActionBase):
def __call__(self):
next_radio_preset(self.plugin)
class PreviousRadioPreset(eg.ActionBase):
def __call__(self):
prev_radio_preset(self.plugin)
class ToggleRadioAMFM(eg.ActionBase):
def __call__(self):
toggle_radio_amfm(self.plugin)
class RadioAutoFreqUp(eg.ActionBase):
def __call__(self):
radio_freq(self.plugin, 'Auto Up')
class RadioAutoFreqDown(eg.ActionBase):
def __call__(self):
radio_freq(self.plugin, 'Auto Down')
class RadioFreqUp(eg.ActionBase):
def __call__(self):
radio_freq(self.plugin, 'Up')
class RadioFreqDown(eg.ActionBase):
def __call__(self):
radio_freq(self.plugin, 'Down')
class RadioSetExact(eg.ActionBase):
def __call__(self, freq, band):
set_radio_freq(self.plugin, freq, band)
def Configure(self, freq="87.5", band="FM"):
panel = eg.ConfigPanel()
self.freq = freq
self.bands = ['AM', 'FM']
wx.StaticText(panel, label="Band: ", pos=(10, 10))
self.choice_band = wx.Choice(panel, -1, (10, 30), choices=self.bands)
if band in self.bands:
self.choice_band.SetStringSelection(band)
self.choice_band.Bind(wx.EVT_CHOICE, self.BandChanged)
wx.StaticText(panel, label="Frequency: ", pos=(10, 60))
self.floatspin = FS.FloatSpin(panel, -1, pos=(10, 80), min_val=87.5, max_val=107.9,
increment=0.2, value=float(freq), agwStyle=FS.FS_LEFT)
self.floatspin.SetFormat("%f")
self.floatspin.SetDigits(1)
#self.objects = [ 'Power', 'Sleep', 'Volume Level', 'Mute', 'Input Selection', 'Scene', 'Straight', 'Enhancer', 'Sound Program']
#wx.StaticText(panel, label="Object: ", pos=(10, 60))
#self.choice_object = wx.Choice(panel, -1, (10, 80), choices=self.objects)
self.BandChanged()
#if object in self.objects:
# self.choice_object.SetStringSelection(object)
while panel.Affirmed():
panel.SetResult(self.floatspin.GetValue(), self.bands[self.choice_band.GetCurrentSelection()])
def BandChanged(self, event=None):
band = self.bands[self.choice_band.GetCurrentSelection()]
if band == "FM":
self.floatspin.SetRange(min_val=87.5, max_val=107.9)
if 87.5 <= float(self.freq) and float(self.freq) <= 107.9:
self.floatspin.SetValue(float(self.freq))
else:
self.floatspin.SetValue(float(87.5))
self.floatspin.SetIncrement(0.2)
else:
self.floatspin.SetRange(min_val=530, max_val=1710)
if 530 <= float(self.freq) and float(self.freq) <= 1710:
self.floatspin.SetValue(float(self.freq))
else:
self.floatspin.SetValue(float(530))
self.floatspin.SetIncrement(10)
class SetFeatureVideoOut(eg.ActionBase):
def __call__(self, Feature, Source):
feature_video_out(self.plugin, Feature, Source)
def Configure(self, Feature="Tuner", Source="Off"):
panel = eg.ConfigPanel()
self.Source = Source
if Feature == "TUNER":
Feature = "Tuner"
wx.StaticText(panel, label="Feature Input: ", pos=(10, 10))
choice_zone = wx.Choice(panel, -1, (95, 7), choices=self.plugin.AVAILABLE_INFO_SOURCES)
if Feature in self.plugin.AVAILABLE_INFO_SOURCES:
choice_zone.SetStringSelection(Feature)
y = 45
x_start = 10
x = x_start
num_per_row = 5
x_padding = 80
y_padding = 20
sources = ['Off'] + self.plugin.AVAILABLE_INPUT_SOURCES
self.cbs = []
for i in range(len(sources)):
if i > 0 and i % num_per_row == 0:
x = x_start
y += y_padding
cb = wx.CheckBox(panel, -1, sources[i], (x, y))
if Source == sources[i]:
cb.SetValue(True)
else:
cb.SetValue(False)
cb.Bind(wx.EVT_CHECKBOX, lambda evt, temp=i: self.SourceChanged(evt, temp))
self.cbs.append(cb)
x += x_padding
while panel.Affirmed():
for i in range(len(self.cbs)):
if self.cbs[i].GetValue():
res = sources[i]
if self.plugin.AVAILABLE_INFO_SOURCES[choice_zone.GetCurrentSelection()] == "Tuner":
xx = "TUNER"
else:
xx = self.plugin.AVAILABLE_INFO_SOURCES[choice_zone.GetCurrentSelection()]
panel.SetResult(xx, res)
def SourceChanged(self, event, item):
sources = ['Off'] + self.plugin.AVAILABLE_INPUT_SOURCES
self.Source = sources[item]
for i in range(len(sources)):
if i == item:
self.cbs[i].SetValue(True)
else:
self.cbs[i].SetValue(False)
class SetDisplayDimmer(eg.ActionBase):
def __call__(self, level):
levels = ['100%','75%','50%','25%','10%']
lev = levels.index(level)
DisplayDimmer(self.plugin, int(lev)*-1)
def Configure(self, level='100%'):
panel = eg.ConfigPanel()
levels = ['100%','75%','50%','25%','10%']
wx.StaticText(panel, label="Brightness: ", pos=(10, 10))
choice_level = wx.Choice(panel, -1, (10, 50), choices=levels)
if level in levels:
choice_level.SetStringSelection(level)
while panel.Affirmed():
panel.SetResult(levels[choice_level.GetCurrentSelection()])
class SetAudioIn(eg.ActionBase):
def __call__(self, Audio, Source):
source_audio_in(self.plugin, Audio, Source)
def Configure(self, Audio="HDMI1", Source="HDMI_1"):
panel = eg.ConfigPanel()
self.Source = Source[:-2] + Source[-1:]
print self.Source
self.VidChoices = []
PosChoices = ['HDMI1', 'HDMI2', 'HDMI3', 'HDMI4', 'HDMI5', 'HDMI6', 'HDMI7', 'HDMI8', 'HDMI9', 'AV1', 'AV2']
for x in PosChoices:
if x in self.plugin.AVAILABLE_SOURCES:
self.VidChoices.append(x)
wx.StaticText(panel, label="Video Input: ", pos=(10, 10))
self.choice_video = wx.Choice(panel, -1, (95, 7), choices=self.VidChoices)
if self.Source in self.VidChoices:
self.choice_video.SetStringSelection(self.Source)
self.choice_video.Bind(wx.EVT_CHOICE, self.VidChanged)
PosSources = ['AV1', 'AV2', 'AV3', 'AV4', 'AV5', 'AV6', 'AV7', 'AV8', 'AV9', 'AUDIO1', 'AUDIO2']
self.AudChoices = [self.Source]
for x in PosSources:
if x in self.plugin.AVAILABLE_SOURCES:
self.AudChoices.append(x)
wx.StaticText(panel, label="Audio Input: ", pos=(10, 40))
self.choice_audio = wx.Choice(panel, -1, (95, 37), choices=self.AudChoices)
if Audio in self.AudChoices:
self.choice_audio.SetStringSelection(Audio)
while panel.Affirmed():
vid = self.VidChoices[self.choice_video.GetCurrentSelection()]
vid = vid[:-1] + "_" + vid[-1:]
panel.SetResult(self.AudChoices[self.choice_audio.GetCurrentSelection()], vid)
def VidChanged(self, event):
self.AudChoices[0] = self.VidChoices[self.choice_video.GetCurrentSelection()]
self.choice_audio.Clear()
self.choice_audio.AppendItems(self.AudChoices)
self.choice_audio.SetSelection(0)
class SetWallPaper(eg.ActionBase):
def __call__(self, Pic):
wallpaper(self.plugin, Pic)
def Configure(self, Pic="Picture 1"):
panel = eg.ConfigPanel()
PicChoices = ['Picture 1', 'Picture 2', 'Picture 3', 'Gray']
wx.StaticText(panel, label="Background Image: ", pos=(10, 10))
choice_pic = wx.Choice(panel, -1, (10, 30), choices=PicChoices)
if Pic in PicChoices:
choice_pic.SetStringSelection(Pic)
while panel.Affirmed():
panel.SetResult(PicChoices[choice_pic.GetCurrentSelection()])
class SendAnyCommand(eg.ActionBase):
def __call__(self, value, action):
if action == "Put":
send_any(self.plugin, value, action)
else:
result = send_any(self.plugin, value, action)
print result
return result
def Configure(self, value="", action="Put"):
panel = eg.ConfigPanel()
wx.StaticText(panel, label="Command: ", pos=(10, 10))
CommandCtrl = wx.TextCtrl(panel, pos=(10, 30), size=(420,-1))
wx.StaticText(panel, label="Ex: <Main_Zone><Basic_Status><Volume><Lvl><Val>***</Val></Lvl>", pos=(10, 70))
wx.StaticText(panel, label=" <Exp>1</Exp><Unit>dB</Unit></Lvl></Volume></Basic_Status></Main_Zone>", pos=(10, 90))
CommandCtrl.SetValue(value)
Choices = ['Put', 'Get']
ChoiceCtrl = wx.Choice(panel, -1, (10, 110), choices=Choices)
ChoiceCtrl.SetStringSelection(action)
wx.StaticText(panel, label="***If Get is selected make sure GetParam is in place of value", pos=(10, 150))
while panel.Affirmed():
panel.SetResult(CommandCtrl.GetValue(),Choices[ChoiceCtrl.GetCurrentSelection()]) |
# -*- coding: utf-8 -*-
# 卷积神经网络训练mnist
# 训练20000次后,再进行测试,测试精度可以达到99%。
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
# mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
def read_data(file_queue):
reader = tf.TextLineReader(skip_header_lines=1)
key, value = reader.read(file_queue)
# defaults = [[0], [0.], [0.], [0.], [0.], ['']]
defaults = [[0], [0], [0], [0], [0], [0], [0], [0], [0]]
# Id,SepalLengthCm,SepalWidthCm,PetalLengthCm,PetalWidthCm,Species = tf.decode_csv(value, defaults)
line, count, isBusytime, isWorkday, isIn, isOK, hour, min, label = tf.decode_csv(value, defaults)
# 因为使用的是鸢尾花数据集,这里需要对y值做转换
# 因为label 从0开始,这里需要对y值做转换
label = tf.case({
tf.equal(label, tf.constant(1)): lambda: tf.constant(0),
tf.equal(label, tf.constant(2)): lambda: tf.constant(1),
tf.equal(label, tf.constant(3)): lambda: tf.constant(2),
tf.equal(label, tf.constant(4)): lambda: tf.constant(3),
tf.equal(label, tf.constant(5)): lambda: tf.constant(4)
}, lambda: tf.constant(-1), exclusive=True)
# return tf.stack([SepalLengthCm,SepalWidthCm,PetalLengthCm,PetalWidthCm]), preprocess_op
return tf.stack([line, count, isBusytime, isWorkday, isIn, isOK, hour, min]), tf.cast(label,tf.float32)
def create_pipeline(filename, batch_size, num_epochs=None):
file_queue = tf.train.string_input_producer([filename], num_epochs=num_epochs)
example, label = read_data(file_queue)
min_after_dequeue = 1000
capacity = min_after_dequeue + batch_size
example_batch, label_batch = tf.train.shuffle_batch(
[example, label], batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue
)
# print example_batch.shape
return example_batch, label_batch
# 定义四个函数,分别用于初始化权值W,初始化偏置项b, 构建卷积层和构建池化层。
# 初始化权值W
def weight_variable(shape):
initial = tf.truncated_normal(shape,stddev=0.1)
return tf.Variable(initial)
# 初始化偏置项b
def bias_variable(shape):
initial = tf.constant(0.1,shape=shape)
return tf.Variable(initial)
# 构建卷积层
def conv2d(x,W):
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME') #strides卷积模板移动的步长,padding 边界处理方式
# 构建池化层
def max_pool(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
x_train_batch, y_train_batch = create_pipeline('data/201504_data_train.csv', 1000, num_epochs=1000)
x_test, y_test = create_pipeline('data/201504_data_test.csv',100) # 10000行 200
# x = tf.placeholder(tf.float32,[None,784])
# y_actual = tf.placeholder(tf.float32,shape=[None,10])
x = tf.placeholder(tf.float32,[None,8])
y_actual = tf.placeholder(tf.float32,shape=[None,5])
# 接下来构建网络。整个网络由两个卷积层(包含激活层和池化层),一个全连接层,一个dropout层和一个softmax层组成。
# 构建网络
# x_image = tf.reshape(x,[-1,28,28,1]) #转换输入数据shape,以便于用于网络中
# W_conv1 = weight_variable([5,5,1,32])
# b_conv1 = bias_variable([32])
x_image = tf.reshape(x,[-1,2,4,1]) #转换输入数据shape,以便于用于网络中
W_conv1 = weight_variable([1,1,1,32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1) #第一个卷积层
h_pool1 = max_pool(h_conv1) #第一个池化层
# W_conv2 = weight_variable([5,5,32,64])
# W_conv2 = weight_variable([1,1,32,64])
# b_conv2 = bias_variable([64])
# h_conv2 = tf.nn.relu(conv2d(h_pool1,W_conv2)+b_conv2) #第二个卷积层
# h_pool2 = max_pool(h_conv2) #第二个池化层
W_fc1 = weight_variable([1*2*32,1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool1,[-1,1*2*32]) #reshape成向量
# h_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64]) #reshape成向量
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1)+b_fc1) #第一个全连接层
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob) #dropout层
# W_fc2 = weight_variable([1024,10])
# b_fc2 = bias_variable([10])
W_fc2 = weight_variable([1024,5])
b_fc2 = bias_variable([5])
y_predict = tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2)+b_fc2) #softmax层
# 训练模型
cross_entropy = -tf.reduce_sum(y_actual * tf.log(y_predict)) #求交叉熵
# train_step = tf.train.GradientDescentOptimizer(1e-3).minimize(cross_entropy) #用梯度下降法使得残差最小
train_step = tf.train.GradientDescentOptimizer(1e-3).minimize(cross_entropy) # 梯度下降法
correct_prediction = tf.equal(tf.argmax(y_predict,1),tf.argmax(y_actual,1)) #在测试阶段,测试准确度计算
accuracy = tf.reduce_mean(tf.cast(correct_prediction,"float")) #多个批次的准确度均值
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
for i in range(20000): #训练阶段,迭代1000次
batch = []
batch[0], batch[1] = sess.run([x_train_batch, y_train_batch])
# batch = mnist.train.next_batch(50) #每批次50行数据
if (i%100==0): #每训练100次,执行一次
# train_acc = accuracy.eval(feed_dict={x:batch[0],y_actual:batch[1],keep_prob:1.0})
train_acc = accuracy.eval(feed_dict={x: batch[0], y_actual: batch[1], keep_prob: 1.0})
print 'step %d, training accuracy %g' %(i,train_acc)
# train_step.run(feed_dict = {x: batch[0], y_actual: batch[1], keep_prob: 0.5})
train_step.run(feed_dict={x: batch[0], y_actual: batch[1], keep_prob: 0.5})
# test_acc = accuracy.eval(feed_dict={x:mnist.test.images,y_actual:mnist.test.labels,keep_prob:1.0})
x_test, y_test = sess.run([x_test, y_test])
test_acc = accuracy.eval(feed_dict={x:x_test,y_actual:y_test,keep_prob:1.0})
print "test accuracy %g" %test_acc
|
from model_file import Model
from abc import ABC, abstractmethod
import datetime
class Login_Controller:
def __init__(self):
pass
def show_login_menu(self):
x = True
while x == True:
print("""SELAMAT DATANG DI APLIKASI GERAI
1. Login (Admin)
2. Login (Cashier)""")
try:
choose_get = int(input("Masukkan pilihan anda: "))
except ValueError:
choose_get = 0
if choose_get == 1:
username = input("Masukkan username: ")
password = input("Masukkan password: ")
if main_model.login(username, password, 1):
admin_controller = Admin()
admin_controller.username = username
admin_controller.show_main_menu() # mengarahkan ke menu admin
x = False
else:
print("Maaf, username / password tidak sesuai dengan data kami.")
elif choose_get == 2:
username = input("Masukkan username: ")
password = input("Masukkan password: ")
if main_model.login(username, password, 2):
cashier_controller = Cashier()
cashier_controller.username = username
cashier_controller.show_main_menu() # mengarahkan ke menu admin
x = False
else:
print("Maaf, username / password tidak sesuai dengan data kami.")
else:
print("Pilihan tidak tersedia")
class Controller:
def __init__(self):
self.list_add_item_questions = ['Masukkan nama barang : ', 'Masukkan kategori barang : ', ' Masukkan harga beli barang : ',
'Masukkan harga jual barang : ', "Masukkan Stok yang tersedia (dalam kg ATAU pcs) : ", "Masukkan ID gudang penyimpanan : "]
def show_add_menu(self):
choose_add = 0
while (choose_add < 1 or choose_add > 4):
print("""
Menu mana yang akan anda gunakan? (Pilih no 1-4)
1. Tambah data barang
2. Tambah data gudang
3. Tambah stok barang (Restock)
4. Kembali ke menu utama
Masukkan pilihan anda : """ ,end='')
try:
choose_add = int(input())
except ValueError:
choose_add = 0
if choose_add == 1:
self.item_input_process()
main_model.add_item(self.list_add_item_answers)
self.show_add_menu()
elif choose_add == 2:
self.warehouse_input_process()
main_model.add_warehouse(self.list_add_item_answers)
self.show_add_menu()
elif choose_add == 3:
self.show_restock_item()
self.show_add_menu()
elif choose_add == 4:
self.show_main_menu()
else:
print("Pilihan tidak tersedia. Mohon input lagi")
def item_input_process(self):
self.list_add_item_answers = []
for i in range(0, len(self.list_add_item_questions)):
print(self.list_add_item_questions[i], end='')
if i == 5:
self.list_add_item_answers.append(0)
if i in [0,1]:
str(self.list_add_item_questions[i])
self.try_str_input(i)
else:
str(self.list_add_item_questions[i])
self.try_int_input(i)
def warehouse_input_process(self):
self.list_add_item_answers = []
print("Masukkan alamat gudang baru : ")
self.try_str_input()
def try_str_input(self, i = 0):
try:
val = str(input())
float(val)/0
except ZeroDivisionError:
print("Barang tidak boleh berwujud angka saja. Masukkan lagi detail barang")
self.try_str_input(i)
except ValueError:
self.list_add_item_answers.append(val)
def try_int_input(self, i):
try:
val = float(input())
self.list_add_item_answers.append(val)
except ValueError:
print("Kolom ini tidak boleh berisi huruf. Masukkan lagi detail barang")
self.try_int_input(i)
def show_restock_item(self):
main_model.fetch_item()
print(main_model.print_item_values())
id_stock = int(input("Masukkan ID item yang akan direstock : "))
amount_stock = float(input("Masukkan jumlah item yang akan direstock : "))
main_model.update_stock(id_stock, amount_stock)
def show_get_menu(self):
choose_get = 0
while (choose_get < 1 or choose_get > 5):
print("""
Menu mana yang akan anda gunakan? (Pilih no 1-5)
1. Tampilkan data barang
2. Tampilkan data gudang
3. Cari data barang berdasarkan nama / kategori barang
4. Urutkan data barang berdasarkan harga terbesar
5. Kembali ke menu utama
Masukkan pilihan anda : """ ,end='')
try:
choose_get = int(input())
except ValueError:
choose_get = 0
if choose_get == 1:
main_model.fetch_item()
print(main_model.print_item_values())
self.show_get_menu()
elif choose_get == 2:
print(main_model.fetch_warehouse())
self.show_get_menu()
elif choose_get == 3:
main_model.fetch_item()
self.search_data()
self.show_get_menu()
elif choose_get == 4:
main_model.fetch_item()
print(main_model.print_sorted_item())
self.show_get_menu()
elif choose_get == 5:
self.show_main_menu()
else:
print("Pilihan tidak tersedia. Mohon input lagi")
def search_data(self):
choose_get = 0
while (choose_get < 1 or choose_get > 2):
print("""
Menu mana yang akan anda gunakan? (Pilih no 1-2)
1. Masukkan nama barang ATAU kategori barang
2. Kembali ke menu utama
Masukkan pilihan anda : """ ,end='')
try:
choose_get = int(input())
except ValueError:
choose_get = 0
if choose_get == 1:
try:
val = str(input("Masukkan nama atau kategori barang : "))
float(val)/0
except ZeroDivisionError:
print("Barang tidak boleh berwujud angka saja. Masukkan lagi detail barang")
self.search_data()
except ValueError:
print(main_model.print_search_item(val))
self.search_data()
elif choose_get == 2:
self.show_get_menu()
else:
print("Pilihan tidak tersedia. Mohon input lagi")
def show_transaction_menu(self):
choose_transaction = 0
while (choose_transaction < 1 or choose_transaction > 3):
print("""
Menu mana yang akan anda gunakan? (Pilih no 1-3)
1. Buat transaksi baru
2. Lihat riwayat transaksi
3. Kembali ke menu utama
Masukkan pilihan anda : """ ,end='')
choose_transaction = int(input())
if choose_transaction == 1:
main_model.payment = 0
self.add_transaction_item()
elif choose_transaction == 2:
self.show_transaction_history()
elif choose_transaction == 3:
self.show_main_menu()
else:
print("Pilihan tidak tersedia. Mohon input lagi")
def add_transaction_item(self):
self.show_current_receipt()
main_model.temp_storage = []
print(main_model.fetch_receipt_body())
print()
item_id = int(input("Masukkan ID barang\t: "))
stock_sold = float(input("Masukkan jumlah barang\t: "))
print(main_model.check_availability(item_id, stock_sold))
print("""Lanjutkan transaksi?
1. Ya
2. Tidak, print struk
Masukkan pilihan anda : """ ,end='')
try:
print_now = int(input())
except ValueError:
print_now = 0
if print_now == 1:
self.add_transaction_item()
elif print_now == 2:
print(main_model.fetch_add_next_transaction_info())
print()
self.show_receipt_body()
print(main_model.record_transaction())
print("Proses print struk...")
print("Print sukses")
main_model.payment = 0
main_model.temp_storage = []
self.show_transaction_menu()
else:
print("Pilihan tidak tersedia. Mohon input lagi")
self.add_transaction_item()
def show_current_receipt(self):
main_model.fetch_item()
print(main_model.print_item_values())
print(main_model.fetch_add_next_transaction_info())
print()
def show_receipt_body(self):
print(main_model.fetch_receipt_body())
print()
def show_transaction_history(self):
main_model.fetch_transaction_ids()
print()
id_fetch = int(input("Masukkan ID yang akan dilihat : "))
print(main_model.fetch_transaction_history(id_fetch))
self.show_transaction_menu()
@abstractmethod
def show_main_menu(self):
pass
class Admin(Controller):
def __init__(self):
super().__init__()
self.login_as = "admin"
def show_main_menu(self):
choose_read_write = 0
while (choose_read_write < 1 or choose_read_write > 5):
print("""
Menu mana yang akan anda gunakan? (Pilih no 1-5)
1. Tambah data
2. Lihat data
3. Tambah / Lihat riwayat transaksi
4. Berhenti
5. Logout
Masukkan pilihan anda : """ ,end='')
try:
choose_read_write = int(input())
except ValueError:
choose_read_write = 0
if choose_read_write == 1:
self.show_add_menu()
elif choose_read_write == 2:
self.show_get_menu()
elif choose_read_write == 3:
main_model.payment = 0
main_model.temp_storage = []
self.show_transaction_menu()
elif choose_read_write == 4:
exit()
elif choose_read_write == 5:
print("Anda telah sukses melakukan Logout.")
print("\n"+"="*30+"\n")
self.login_as = None
login_controller.show_login_menu()
else:
print("Pilihan tidak tersedia. Mohon input lagi")
class Cashier(Controller):
def __init__(self):
super().__init__()
self.login_as = "cashier"
def show_main_menu(self):
choose_read_write = 0
while (choose_read_write < 1 or choose_read_write > 4):
print("""
Menu mana yang akan anda gunakan? (Pilih no 1-4)
1. Lihat data
2. Tambah / Lihat riwayat transaksi
3. Berhenti
4. Logout
Masukkan pilihan anda : """ ,end='')
try:
choose_read_write = int(input())
except ValueError:
choose_read_write = 0
if choose_read_write == 1:
self.show_get_menu()
elif choose_read_write == 2:
main_model.payment = 0
main_model.temp_storage = []
self.show_transaction_menu()
elif choose_read_write == 3:
exit()
elif choose_read_write == 4:
print("Anda telah sukses melakukan Logout.")
print("\n"+"="*50+"\n")
self.login_as = None
login_controller.show_login_menu()
else:
print("Pilihan tidak tersedia. Mohon input lagi")
if __name__ == "__main__":
main_model = Model()
login_controller = Login_Controller()
login_controller.show_login_menu()
|
from dataclasses import dataclass, field
from datetime import datetime
from typing import List
from rubicon_ml.domain.mixin import TagMixin
from rubicon_ml.domain.utils import uuid
DIRECTIONALITY_VALUES = ["score", "loss"]
@dataclass
class Metric(TagMixin):
name: str
value: float
id: str = field(default_factory=uuid.uuid4)
description: str = None
directionality: str = "score"
created_at: datetime = field(default_factory=datetime.utcnow)
tags: List[str] = field(default_factory=list)
def __post_init__(self):
if self.directionality not in DIRECTIONALITY_VALUES:
raise ValueError(f"metric directionality must be one of {DIRECTIONALITY_VALUES}")
|
import Day11
#Test Common to Q1 & Q2
def test_find_number_occupied_seats_1():
assert 6 == Day11.find_number_occupied_seats([['#', '#', '#'], ['#', '.', '.'], ['#', '.', '#']])
def test_find_number_occupied_seats_2():
assert 0 == Day11.find_number_occupied_seats([[], [], []])
#Test Specific to Q1
def test_next_state_1():
current_layout = [['#', '#', '#'], ['#', '.', '.'], ['#', '.', '#']]
max_row = int(len(current_layout))-1
max_column = int(len(current_layout[0]))-1
assert "." == Day11.next_state(current_layout,1,1,max_row,max_column)
def test_q1_1():
inputFile = open("Day11_SeatingSystem/InputTest.txt","r")
Lines = inputFile.readlines()
input_list = []
for line in Lines:
input_line = []
currentInput = line.strip()
for character in currentInput:
input_line.append(character)
input_list.append(input_line)
assert 37 == Day11.q1(input_list)
def test_q1_2():
inputFile = open("Day11_SeatingSystem/Input.txt","r")
Lines = inputFile.readlines()
input_list = []
for line in Lines:
input_line = []
currentInput = line.strip()
for character in currentInput:
input_line.append(character)
input_list.append(input_line)
assert 2418 == Day11.q1(input_list)
#Tests Specific to Q2
def test_next_state_Q2_1():
current_layout = [['#', '#', '#'],
['#', '.', '.'],
['#', '.', '#']]
max_row = int(len(current_layout))-1
max_column = int(len(current_layout[0]))-1
assert "." == Day11.next_state(current_layout,1,1,max_row,max_column)
def test_next_state_Q2_2():
current_layout = [['#', '#', '#','#'],
['#', '.', '.',],
['#', '.', '#']]
max_row = int(len(current_layout))-1
max_column = int(len(current_layout[0]))-1
assert "." == Day11.next_state(current_layout,1,1,max_row,max_column)
|
#park sensor without buzzer
import RPi.GPIO as GPIO
import time
trigger_pin = 23
echo_pin = 24
red_pin = 22
yellow_pin = 27
green_pin = 17
def setup(): #method to setup pins and make led's low initially
GPIO.setmode(GPIO.BCM)
GPIO.setup(echo_pin, GPIO.IN)
GPIO.setup(trigger_pin, GPIO.OUT)
GPIO.setup(red_pin, GPIO.OUT)
GPIO.setup(yellow_pin, GPIO.OUT)
GPIO.setup(green_pin, GPIO.OUT)
GPIO.output(green_pin, GPIO.LOW)
GPIO.output(yellow_pin, GPIO.LOW)
GPIO.output(red_pin, GPIO.LOW)
def red():
GPIO.output(red_pin, GPIO.HIGH)
GPIO.output(green_pin, GPIO.LOW)
GPIO.output(yellow_pin, GPIO.LOW)
def yellow():
GPIO.output(yellow_pin, GPIO.HIGH)
GPIO.output(red_pin, GPIO.LOW)
GPIO.output(green_pin, GPIO.LOW)
def green():
GPIO.output(green_pin, GPIO.HIGH)
GPIO.output(yellow_pin, GPIO.LOW)
GPIO.output(red_pin, GPIO.LOW)
def calculate_distance():
while True:
GPIO.output(trigger_pin, False) #set trigger_pin as LOW
print ("Giving delay between distance measuring cycle")
time.sleep(0.5)
GPIO.output(trigger_pin, True) #set trigger_pin as HIGH
time.sleep(0.00001)
GPIO.output(trigger_pin, False)
while GPIO.input(echo_pin)==0: #check if echo_pin is LOW
pulse_start = time.time() #saves the last known time of LOW pulse
while GPIO.input(echo_pin)==1: #Check if echo_pin is HIGH
pulse_end = time.time() #saves the last known time of HIGH pulse
pulse_duration = pulse_end - pulse_start #get pulse duration to a variable
distance = pulse_duration * 17150 #multiply pulse duration by 17150 to get distance
distance = round(distance, 2) #round to two decimal points
print ("Distance:",distance - 0.5,"cm") #print distance with 0.5 cm calibration
if distance <= 5: #call red method if distance is less than or equal to 5cm
red()
elif 6 <= distance < 20: #call yellow method if distance is greater than 6cm and less than 20cm
yellow()
elif 21 <= distance < 150: #call green method if distance is greater than 21cm and less than 150cm
green()
else:
print("Out of Range")
if __name__ == '__main__': # Program starts from here
print('Press Ctrl+C to end the program...')
setup()
try:
print('calling loop')
calculate_distance()
except KeyboardInterrupt:
print('cleaningup gpio pins')
GPIO.cleanup()
#=============================================================================================================================
#park sensor with buzzer
import RPi.GPIO as GPIO
import time
trigger_pin = 23
echo_pin = 24
red_pin = 22
yellow_pin = 27
green_pin = 17
buzzer_pin = 25
def setup(): #method to setup pins and make led's low initially
GPIO.setmode(GPIO.BCM)
GPIO.setup(echo_pin, GPIO.IN)
GPIO.setup(trigger_pin, GPIO.OUT)
GPIO.setup(red_pin, GPIO.OUT)
GPIO.setup(yellow_pin, GPIO.OUT)
GPIO.setup(green_pin, GPIO.OUT)
GPIO.setup(buzzer_pin, GPIO.OUT)
GPIO.output(green_pin, GPIO.LOW)
GPIO.output(yellow_pin, GPIO.LOW)
GPIO.output(red_pin, GPIO.LOW)
def red():
GPIO.output(red_pin, GPIO.HIGH)
GPIO.output(green_pin, GPIO.LOW)
GPIO.output(yellow_pin, GPIO.LOW)
GPIO.output(buzzer_pin, GPIO.LOW)
time.sleep(0.1)
GPIO.output(buzzer_pin, GPIO.HIGH)
time.sleep(0.1)
def yellow():
GPIO.output(yellow_pin, GPIO.HIGH)
GPIO.output(red_pin, GPIO.LOW)
GPIO.output(green_pin, GPIO.LOW)
GPIO.output(buzzer_pin, GPIO.LOW)
time.sleep(0.5)
GPIO.output(buzzer_pin, GPIO.HIGH)
time.sleep(0.5)
def green():
GPIO.output(green_pin, GPIO.HIGH)
GPIO.output(yellow_pin, GPIO.LOW)
GPIO.output(red_pin, GPIO.LOW)
GPIO.output(buzzer_pin, GPIO.LOW)
def calculate_distance():
while True:
GPIO.output(trigger_pin, False) #set trigger_pin as LOW
print ("Giving delay between distance measuring cycle")
time.sleep(0.5)
GPIO.output(trigger_pin, True) #set trigger_pin as HIGH
time.sleep(0.00001)
GPIO.output(trigger_pin, False)
while GPIO.input(echo_pin)==0: #check if echo_pin is LOW
pulse_start = time.time() #saves the last known time of LOW pulse
while GPIO.input(echo_pin)==1: #Check if echo_pin is HIGH
pulse_end = time.time() #saves the last known time of HIGH pulse
pulse_duration = pulse_end - pulse_start #get pulse duration to a variable
distance = pulse_duration * 17150 #multiply pulse duration by 17150 to get distance
distance = round(distance, 2) #round to two decimal points
print ("Distance:",distance - 0.5,"cm") #print distance with 0.5 cm calibration
GPIO.output(buzzer_pin, GPIO.LOW)
if distance <= 5: #call red method if distance is less than or equal to 5cm
red()
elif 6 <= distance < 20: #call yellow method if distance is greater than 6cm and less than 20cm
yellow()
elif 21 <= distance < 150: #call green method if distance is greater than 21cm and less than 150cm
green()
else:
print("Out of Range")
if __name__ == '__main__': # Program starts from here
print('Press Ctrl+C to end the program...')
setup()
try:
print('calling loop')
calculate_distance()
except KeyboardInterrupt:
print('cleaningup gpio pins')
GPIO.cleanup() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
"""
import wikipedia
FRENCH = "fr"
ENGLISH = "en"
def get_resum(word_to_search, language):
"""
Fonction qui renvoie le résumé du wikipedia du mot cherché
:param word_to_search: Mot à chercher
:param language: Lanque de recherche
:return: Le résumé trouvé en texte brute
"""
wikipedia.set_lang(language)
return wikipedia.summary(word_to_search)
|
r"""
Meshless Methods for Computational Mechanics (:mod:`meshless`)
==============================================================
.. currentmodule:: meshless
This repository has been created to organize the code developed during studies
with novel meshless methods.
Available modules
- ES-PIM: static and linear buckling analysis using the edge-based smoothed
point interpolation method
.. automodule:: meshless.espim
:members:
.. automodule:: meshless.dev
:members:
.. automodule:: meshless.interpolate
:members:
"""
from .version import __version__
|
#----------------URL and imports--------------------------
import requests #Should be install requests libary
URL = 'http://localhost:8088/services/users/'
#---------------REQUESTS-----------------------------------
def delete_user_by_id(id):
response = requests.delete(URL + str(id))
print('DELETE request----',response)
return response
#--------------------------------------------------------------
def get_user_by_id(id):
response = requests.get(URL + str(id))
user = response.json()
#print(response.content) #In case of error u can remove '#' and see more delailed description
return user
#--------------------------------------------------------------
|
from classifier_utils import *
def write_dmc_csv(folder):
dmct = pd.read_csv(os.path.join(folder, "dmct.csv"), index_col=0)
dmct = dmct[dmct["DMC"] != 0]
dmct = dmct.drop(columns="DMC")
dmct.to_csv(os.path.join(folder, "dmct_small.csv"))
if __name__ == "__main__":
folders = ["../analysis/martino2015/Mvalues_nonallergic_vs_allergic_only_pbmc/",
"../analysis/martino2015/Mvalues_nonallergic_vs_allergic_all/",
"../analysis/martino2018/Mvalues_control_vs_allergic/",
"../analysis/martino2018/Mvalues_control_vs_allergic_bulk/"]
for f in folders:
write_dmc_csv(f)
|
#!/usr/bin/env python3
from __future__ import print_function
import sys
import os
import subprocess
import time
fullScript = os.path.abspath(sys.argv[0])
scriptName = os.path.basename(sys.argv[0])
scriptName = os.path.splitext(scriptName)[0]
jarName = scriptName + '.jar'
toolsFolder = os.path.dirname(fullScript)
fullJarPath = os.path.join(toolsFolder, jarName)
jdkDir = os.path.dirname(toolsFolder)
jdkDir = os.path.join(jdkDir, 'jdk', 'bin', 'java')
try:
subProc = subprocess.Popen([jdkDir, '-jar', fullJarPath], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# If here, start succeeded
except:
# Start failed, try JAVA_home
try:
javaHome = os.environ['JAVA_HOME']
jdkDir = os.path.join(javaHome, 'bin', 'java')
except:
# No JAVA_HOME, try just running java from path
jdkDir = 'java'
try:
subProc = subprocess.Popen([jdkDir, '-jar', fullJarPath], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
# Really error
print('Error Launching Tool: ')
print(e)
exit(1)
# wait 3 seconds, if still open good
count = 0
while subProc.poll() is None:
time.sleep(1)
count = count + 1
if count > 2:
exit(0)
outputStd = subProc.stdout.read()
outputErr = subProc.stderr.read()
print(outputStd.decode('utf-8'))
print(outputErr.decode('utf-8'), file=sys.stderr)
|
import configparser
import telegram
import sys
class TelegramWriter:
def __init__(self, token, chat_id):
self.token = token
self.chat_id = chat_id
self.bot = telegram.Bot(token=self.token)
def write(self, msg):
self.bot.sendMessage(chat_id=self.chat_id, text=msg)
if __name__ == '__main__':
config = configparser.ConfigParser()
config.read('TelegramBot.ini')
token = config['telegram']['token']
chat_id = config['telegram']['chat_id']
w = TelegramWriter(token, chat_id)
if len(sys.argv) > 1:
w.write(sys.argv[1])
else:
w.write('Test Message')
|
from ..util import file_handling as fh
import html
import output_labels
import output_label_index
import output_responses
import output_response_index
import output_words
import output_word_index
def make_masthead(active_index):
names = ['Responses', 'Labels', 'Words']
targets = ['index_responses.html', 'index_labels.html', 'index_words.html']
links = []
for index, name in enumerate(names):
target = targets[index]
link = html.make_link(target, name)
links.append(link)
masthead = html.make_masthead(links, active_index)
return masthead
def get_word_list(codes, model_dir):
word_list = set()
for code_index, code in enumerate(codes):
# load coefficients from unigram model
model_filename = fh.make_filename(model_dir, html.replace_chars(code), 'json')
model = fh.read_json(model_filename)
if 'coefs' in model:
coefs = dict(model['coefs'])
words = [word[4:] for word in coefs.keys()]
word_list.update(words)
word_list = list(word_list)
word_list.sort()
return word_list
def make_all_pages():
output_labels.output_label_pages()
output_label_index.output_label_index()
output_responses.output_responses(dataset='Democrat-Likes')
output_responses.output_responses(dataset='Democrat-Dislikes')
output_responses.output_responses(dataset='Republican-Dislikes')
output_responses.output_responses(dataset='Republican-Likes')
output_response_index.output_response_index()
output_words.output_words()
output_word_index.output_word_index()
def main():
make_all_pages()
if __name__ == '__main__':
main()
|
from espeak_bot.main import main
main()
|
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import sys
import pkg_resources
from pants.base.build_environment import pants_version
from pants.base.exceptions import BuildConfigurationError
from pants.bin.extension_loader import load_plugins_and_backends
from pants.bin.plugin_resolver import PluginResolver
from pants.goal.goal import Goal
from pants.logging.setup import setup_logging
from pants.option.global_options import GlobalOptionsRegistrar
from pants.subsystem.subsystem import Subsystem
logger = logging.getLogger(__name__)
class OptionsInitializer(object):
"""Initializes global options and logging."""
def __init__(self, options_bootstrapper, working_set=None, exiter=sys.exit, init_logging=True):
"""
:param OptionsBootStrapper options_bootstrapper: An options bootstrapper instance.
:param pkg_resources.WorkingSet working_set: The working set of the current run as returned by
PluginResolver.resolve().
:param func exiter: A function that accepts an exit code value and exits (for tests).
:param bool init_logging: Whether or not to initialize logging as part of options init.
"""
self._options_bootstrapper = options_bootstrapper
self._working_set = working_set or PluginResolver(self._options_bootstrapper).resolve()
self._exiter = exiter
self._init_logging = init_logging
def _setup_logging(self, global_options):
"""Sets global logging."""
# N.B. quiet help says 'Squelches all console output apart from errors'.
level = 'ERROR' if global_options.quiet else global_options.level.upper()
setup_logging(level, console_stream=sys.stderr, log_dir=global_options.logdir)
def _register_options(self, subsystems, options):
"""Registers global options."""
# Standalone global options.
GlobalOptionsRegistrar.register_options_on_scope(options)
# Options for subsystems.
for subsystem in subsystems:
subsystem.register_options_on_scope(options)
# TODO(benjy): Should Goals or the entire goal-running mechanism be a Subsystem?
for goal in Goal.all():
# Register task options.
goal.register_options(options)
def _setup_options(self, options_bootstrapper, working_set):
# TODO: This inline import is currently necessary to resolve a ~legitimate cycle between
# `GoalRunner`->`EngineInitializer`->`OptionsInitializer`->`GoalRunner`.
from pants.bin.goal_runner import GoalRunner
bootstrap_options = options_bootstrapper.get_bootstrap_options()
global_bootstrap_options = bootstrap_options.for_global_scope()
if global_bootstrap_options.pants_version != pants_version():
raise BuildConfigurationError(
'Version mismatch: Requested version was {}, our version is {}.'.format(
global_bootstrap_options.pants_version, pants_version()
)
)
# Get logging setup prior to loading backends so that they can log as needed.
if self._init_logging:
self._setup_logging(global_bootstrap_options)
# Add any extra paths to python path (e.g., for loading extra source backends).
for path in global_bootstrap_options.pythonpath:
sys.path.append(path)
pkg_resources.fixup_namespace_packages(path)
# Load plugins and backends.
plugins = global_bootstrap_options.plugins
backend_packages = global_bootstrap_options.backend_packages
build_configuration = load_plugins_and_backends(plugins, working_set, backend_packages)
# Now that plugins and backends are loaded, we can gather the known scopes.
known_scope_infos = [GlobalOptionsRegistrar.get_scope_info()]
# Add scopes for all needed subsystems via a union of all known subsystem sets.
subsystems = Subsystem.closure(
GoalRunner.subsystems() | Goal.subsystems() | build_configuration.subsystems()
)
for subsystem in subsystems:
known_scope_infos.append(subsystem.get_scope_info())
# Add scopes for all tasks in all goals.
for goal in Goal.all():
known_scope_infos.extend(filter(None, goal.known_scope_infos()))
# Now that we have the known scopes we can get the full options.
options = options_bootstrapper.get_full_options(known_scope_infos)
self._register_options(subsystems, options)
# Make the options values available to all subsystems.
Subsystem.set_options(options)
return options, build_configuration
def setup(self):
return self._setup_options(self._options_bootstrapper, self._working_set)
|
import numpy as np
import math
import matplotlib.pyplot as plt
def load_pts_features(path):
""" Load interest points and SIFT features.
Args:
path: path to the file pts_feats.npz
Returns:
pts: coordinate points for two images;
an array (2,) of numpy arrays (N1, 2), (N2, 2)
feats: SIFT descriptors for two images;
an array (2,) of numpy arrays (N1, 128), (N2, 128)
"""
#
# Your code here
#
file = np.load(path, allow_pickle='True')
pts, feats = file['pts'], file['feats']
return pts, feats
def min_num_pairs():
return 4
def pickup_samples(pts1, pts2):
""" Randomly select k corresponding point pairs.
Note that here we assume that pts1 and pts2 have
been already aligned: pts1[k] corresponds to pts2[k].
This function makes use of min_num_pairs()
Args:
pts1 and pts2: point coordinates from Image 1 and Image 2
Returns:
pts1_sub and pts2_sub: N_min randomly selected points
from pts1 and pts2
"""
#
# Your code here
#
N_min = min_num_pairs()
n, _ = pts1.shape
pts1_sub = np.empty((N_min, 2))
pts2_sub = np.empty((N_min, 2))
def sample_one_unique():
listindex = []
while len(listindex) < 4:
tempindex = np.random.randint(0, n, size=1)[0]
if tempindex not in listindex:
listindex.append(tempindex)
indexarray = np.array(listindex)
for i in range(N_min):
pts1_sub[i, :] = pts1[indexarray[i], :]
pts2_sub[i, :] = pts2[indexarray[i], :]
def resample_condition():# confirm linear independence
j = 0
A = np.empty((2 * N_min, 9))
for i in range(N_min):
x = pts1_sub[i, 0]
y = pts1_sub[i, 1]
x1 = pts2_sub[i, 0]
y1 = pts2_sub[i, 1]
A[j, :] = [0, 0, 0, x, y, 1, -x * y1, -y * y1, -y1]
A[j + 1, :] = [-x, -y, -1, 0, 0, 0, x * x1, y * x1, x1]
j = j + 2
if(np.linalg.matrix_rank(A)<8):
return 1
else:
return 0
sample_one_unique()
while(resample_condition()==1):
sample_one_unique()
return pts1_sub, pts2_sub
def compute_homography(pts1, pts2):
""" Construct homography matrix and solve it by SVD
Args:
pts1: the coordinates of interest points in img1, array (N, 2)
pts2: the coordinates of interest points in img2, array (M, 2)
Returns:
H: homography matrix as array (3, 3)
"""
#
# Your code here
#
def preprocess(points): # scale and shift points to be in [-1..1]
tempnorm = np.linalg.norm(points,axis=1)
xmax = np.amax(tempnorm) / 2
tx, ty = np.mean(points, axis=0)
temph, tempw = points.shape
newpoints = np.empty((temph, 2))
for i in range(temph):
newpoints[i, 0] = 1 / xmax * points[i, 0] - tx / xmax
newpoints[i, 1] = 1 / xmax * points[i, 1] - ty / xmax
return newpoints, xmax, tx, ty
pts1_sub, pts2_sub = pts1,pts2#pickup_samples(pts1, pts2)
pts1_sub, s1, tx1, ty1 = preprocess(pts1_sub)
pts2_sub, s2, tx2, ty2 = preprocess(pts2_sub)
n, _ = pts1_sub.shape
A = np.empty((2 * n, 9))
j = 0
for i in range(n):
x = pts1_sub[i, 0]
y = pts1_sub[i, 1]
x1 = pts2_sub[i, 0]
y1 = pts2_sub[i, 1]
A[j, :] = [0, 0, 0, x, y, 1, -x * y1, -y * y1, -y1]
A[j + 1, :] = [-x, -y, -1, 0, 0, 0, x * x1, y * x1, x1]
j = j+2
u, s, v = np.linalg.svd(A)
H = v[8,:].reshape((3, 3))
#tempb = A@v[8,:].T
#tempa = transform_pts(pts1_sub,H)
T1 = np.array([[1 / s2, 0, -tx2 / s2],
[0, 1 / s2, -ty2 / s2],
[0, 0, 1]])
T = np.array([[1 / s1, 0, -tx1 / s1],
[0, 1 / s1, -ty1 / s1],
[0, 0, 1]])
H = np.linalg.inv(T1) @ H @ T
#transform_points = transform_pts(pts1, H)
return H
def transform_pts(pts, H):
""" Transform pst1 through the homography matrix to compare pts2 to find inliners
Args:
pts: interest points in img1, array (N, 2)
H: homography matrix as array (3, 3)
Returns:
transformed points, array (N, 2)
"""
#
# Your code here
#
h, w = pts.shape
temppts = np.ones((h, 3))
temppts[:, 0:2] = pts
transform_points = H @ temppts.T # 3*h
for i in range(h):
transform_points[:, i] = transform_points[:, i] / transform_points[2, i]
return transform_points.T[:, 0:2]
def count_inliers(H, pts1, pts2, threshold=5):
""" Count inliers
Tips: We provide the default threshold value, but you’re free to test other values
Args:
H: homography matrix as array (3, 3)
pts1: interest points in img1, array (N, 2)
pts2: interest points in img2, array (N, 2)
threshold: scale down threshold
Returns:
number of inliers
"""
transform_points = transform_pts(pts1, H)
n, _ = pts1.shape
count = 0
for i in range(n):
if(np.linalg.norm((transform_points[i, :] - pts2[i, :]))<threshold):
count = count+1
return count
def ransac_iters(w=0.5, d=min_num_pairs(), z=0.99):
""" Computes the required number of iterations for RANSAC.
Args:
w: probability that any given correspondence is valid
d: minimum number of pairs
z: total probability of success after all iterations
Returns:
minimum number of required iterations
"""
k = math.log2(1 - z) / math.log2(1 - math.pow(w, d))
return int(k)
def ransac(pts1, pts2):
""" RANSAC algorithm
Args:
pts1: matched points in img1, array (N, 2)
pts2: matched points in img2, array (N, 2)
Returns:
best homography observed during RANSAC, array (3, 3)
"""
#
# Your code here
#
k = ransac_iters()
best_H = None
max_count = -1
for i in range(k):
sub_pts1, sub_pts2 = pickup_samples(pts1, pts2)
H = compute_homography(sub_pts1, sub_pts2)
count = count_inliers(H, pts1, pts2)
if (count > max_count):
max_count = count
best_H = H
return best_H
def find_matches(feats1, feats2, rT=0.8):
""" Find pairs of corresponding interest points with distance comparsion
Tips: We provide the default ratio value, but you’re free to test other values
Args:
feats1: SIFT descriptors of interest points in img1, array (N, 128)
feats2: SIFT descriptors of interest points in img1, array (M, 128)
rT: Ratio of similar distances
Returns:
idx1: list of indices of matching points in img1
idx2: list of indices of matching points in img2
"""
idx1 = []
idx2 = []
#
# Your code here
#
n, _ = feats1.shape
m, _ = feats2.shape
for i in range(n):
minimum0 = 100000000
minimum1 = 100000000
index = None
for j in range(m):
tempd = np.linalg.norm(feats1[i, :] - feats2[j, :])
if (tempd < minimum1):
if (tempd < minimum0):
minimum0 = tempd
index = j
else:
minimum1 = tempd
if (minimum0 / minimum1 < rT): #
idx1.append(i)
idx2.append(index)
return idx1, idx2
def final_homography(pts1, pts2, feats1, feats2):
""" re-estimate the homography based on all inliers
Args:
pts1: the coordinates of interest points in img1, array (N, 2)
pts2: the coordinates of interest points in img2, array (M, 2)
feats1: SIFT descriptors of interest points in img1, array (N, 128)
feats2: SIFT descriptors of interest points in img1, array (M, 128)
Returns:
ransac_return: refitted homography matrix from ransac fucation, array (3, 3)
idxs1: list of matched points in image 1
idxs2: list of matched points in image 2
"""
#
# Your code here
#
threshold = 5
idxs1, idxs2 = find_matches(feats1, feats2)
n = len(idxs1)
newpts1 = np.empty((n, 2))
newpts2 = np.empty((n, 2))
for i in range(n):
newpts1[i, :] = pts1[idxs1[i], :]
newpts2[i, :] = pts2[idxs2[i], :]
best_H = ransac(newpts1, newpts2)
transform_points = transform_pts(newpts1, best_H)
inliersindex = []
count = 0
for i in range(n):
if (np.linalg.norm((transform_points[i, :] - newpts2[i, :])) < threshold):
inliersindex.append(i)
count = count + 1
inliers1 = np.empty((count, 2))
inliers2 = np.empty((count, 2))
for i in range(count):
inliers1[i,:] = newpts1[inliersindex[i],:]
inliers2[i,:] = newpts2[inliersindex[i],:]
ransac_return = compute_homography(inliers1,inliers2)
return ransac_return, idxs1, idxs2
|
class PropertyManager:
def __init__(self, person_list):
self.person_list = person_list
self.keyword_list = self.get_keyword_list()
def get_keyword_list(self):
keyword_list = []
for person in self.person_list:
for prop in person.property_list:
if prop.keyword not in keyword_list:
keyword_list.append(prop.keyword)
return keyword_list
def __str__(self):
ret_str = "PropertyManager:"
for keyword in self.keyword_list:
ret_str += "\n\t{}".format(keyword)
return ret_str
|
import unittest
from katas.kyu_6.wordify import wordify
class WordifyTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(wordify(1), 'one')
def test_equals_2(self):
self.assertEqual(wordify(10), 'ten')
def test_equals_3(self):
self.assertEqual(wordify(12), 'twelve')
def test_equals_4(self):
self.assertEqual(wordify(17), 'seventeen')
def test_equals_5(self):
self.assertEqual(wordify(56), 'fifty six')
def test_equals_6(self):
self.assertEqual(wordify(90), 'ninety')
def test_equals_7(self):
self.assertEqual(wordify(100), 'one hundred')
def test_equals_8(self):
self.assertEqual(wordify(120), 'one hundred twenty')
def test_equals_9(self):
self.assertEqual(wordify(326), 'three hundred twenty six')
|
P1_WINS = {'scissorspaper', 'paperrock', 'rockscissors'}
def rps(p1, p2):
if p1 == p2:
return 'Draw!'
return 'Player {} won!'.format(1 if p1 + p2 in P1_WINS else 2)
|
# Selection Sort
# Given an array of integers, sort the elements in the array in ascending order
def swap(nums, a, b):
temp = nums[a]
nums[a] = nums[b]
nums[b] = temp
return
def selectSort(nums):
if nums is None or len(nums) <= 1:
return nums
min_idx = 0
while min_idx <= len(nums) - 1:
for i in range(min_idx, len(nums)):
if nums[i] < nums[min_idx]:
swap(nums, min_idx, i)
min_idx += 1
return nums
print(selectSort([5, 2, 3, 1]))
print(selectSort([5, 2, 3, 1, -2]))
print(selectSort([1]))
print(selectSort([]))
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import chat_pb2 as chat__pb2
class ChatStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.realTimeChat = channel.stream_stream(
'/chat.Chat/realTimeChat',
request_serializer=chat__pb2.UserMessage.SerializeToString,
response_deserializer=chat__pb2.UserMessage.FromString,
)
self.listUserMessages = channel.unary_stream(
'/chat.Chat/listUserMessages',
request_serializer=chat__pb2.ListMessagesRequest.SerializeToString,
response_deserializer=chat__pb2.UserMessage.FromString,
)
class ChatServicer(object):
# missing associated documentation comment in .proto file
pass
def realTimeChat(self, request_iterator, context):
"""Bidrectional streaming rpc to allow chat between client and server
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def listUserMessages(self, request, context):
"""Client side streaming rpc to allow user to fetch messages from the server
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ChatServicer_to_server(servicer, server):
rpc_method_handlers = {
'realTimeChat': grpc.stream_stream_rpc_method_handler(
servicer.realTimeChat,
request_deserializer=chat__pb2.UserMessage.FromString,
response_serializer=chat__pb2.UserMessage.SerializeToString,
),
'listUserMessages': grpc.unary_stream_rpc_method_handler(
servicer.listUserMessages,
request_deserializer=chat__pb2.ListMessagesRequest.FromString,
response_serializer=chat__pb2.UserMessage.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'chat.Chat', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 11 23:29:27 2018
@author: ck807
"""
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
from keras.models import Model
from keras.layers.core import Dense, Lambda
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import Conv2D, UpSampling2D, SeparableConv2D
from keras.layers.pooling import GlobalAveragePooling2D, GlobalMaxPooling2D, MaxPooling2D
from keras.layers import Input, Add, Concatenate
from keras.layers.merge import concatenate, add
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
from keras.optimizers import SGD
from keras.utils.layer_utils import convert_all_kernels_in_model
from keras.utils.data_utils import get_file
from keras.engine.topology import get_source_inputs
from keras.applications.imagenet_utils import _obtain_input_shape
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
import keras.backend as K
from se import squeeze_excite_block
from layers import initial_conv_block, bottleneck_block, bottleneck_block_with_se
from layers import initial_SepConv_block, seperableConv_bottleneck_block_with_se
#-------------------------------------------------------------------------------------------------------------------------------------
print('Loading the data..')
trainData = np.load('trainData.npy')
trainMask = np.load('trainMask.npy')
valData = np.load('valData.npy')
valMask = np.load('valMask.npy')
print('PreProcessing the data..')
trainData = trainData.astype('float32')
trainDataMean = np.mean(trainData)
trainDataStd = np.std(trainData)
trainData -= trainDataMean
trainData /= trainDataStd
trainMask = trainMask.astype('float32')
trainMask /= 255.
valData = valData.astype('float32')
valData -= trainDataMean
valData /= trainDataStd
valMask = valMask.astype('float32')
valMask /= 255.
#-------------------------------------------------------------------------------------------------------------------------------------
print('Building and compiling the model..')
smooth = 1.
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def jaccard_coef(y_true, y_pred):
intersection = K.sum(y_true * y_pred, axis=[0, -1, -2])
sum_ = K.sum(y_true + y_pred, axis=[0, -1, -2])
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(jac)
#-------------------------------------------------------------------------------------------------------------------------------------
image_dim = 192
#-------------------------------------------------------------------------------------------------------------------------------------
input_image = Input(shape=(image_dim, image_dim, 3))
#-------------------------------------------------------------------------------------------------------------------------------------
with tf.device('/device:GPU:0'):
init = initial_SepConv_block(input_image, weight_decay=5e-4) #192x192x16
res1 = seperableConv_bottleneck_block_with_se(init, filters=32, cardinality=8, strides=1, weight_decay=5e-4)
res1 = seperableConv_bottleneck_block_with_se(res1, filters=32, cardinality=32, strides=1, weight_decay=5e-4) #192x192x32
res2 = seperableConv_bottleneck_block_with_se(res1, filters=64, cardinality=8, strides=1, weight_decay=5e-4)
res2 = seperableConv_bottleneck_block_with_se(res2, filters=64, cardinality=32, strides=1, weight_decay=5e-4) #192x192x64
res3 = seperableConv_bottleneck_block_with_se(res2, filters=96, cardinality=8, strides=1, weight_decay=5e-4)
res3 = seperableConv_bottleneck_block_with_se(res3, filters=96, cardinality=32, strides=1, weight_decay=5e-4) #192x192x96
pool1 = MaxPooling2D(pool_size=(2,2))(res3) #96x96x96
res4 = seperableConv_bottleneck_block_with_se(pool1, filters=128, cardinality=8, strides=1, weight_decay=5e-4)
res4 = seperableConv_bottleneck_block_with_se(res4, filters=128, cardinality=32, strides=1, weight_decay=5e-4) #96x96x128
pool2 = MaxPooling2D(pool_size=(2,2))(res4) #48x48x128
res5 = seperableConv_bottleneck_block_with_se(pool2, filters=160, cardinality=8, strides=1, weight_decay=5e-4)
res5 = seperableConv_bottleneck_block_with_se(res5, filters=160, cardinality=32, strides=1, weight_decay=5e-4) #48x48x160
with tf.device('/device:GPU:1'):
res6 = seperableConv_bottleneck_block_with_se(res5, filters=192, cardinality=8, strides=1, weight_decay=5e-4)
res6 = seperableConv_bottleneck_block_with_se(res6, filters=192, cardinality=32, strides=1, weight_decay=5e-4) #48x48x192
pool3 = MaxPooling2D(pool_size=(2,2))(res6) #24x24x192
res7 = seperableConv_bottleneck_block_with_se(pool3, filters=256, cardinality=8, strides=1, weight_decay=5e-4)
res7 = seperableConv_bottleneck_block_with_se(res7, filters=256, cardinality=32, strides=1, weight_decay=5e-4) #24x24x256
pool4 = MaxPooling2D(pool_size=(2,2))(res7) #12x12x256
res8 = seperableConv_bottleneck_block_with_se(pool4, filters=384, cardinality=8, strides=1, weight_decay=5e-4)
res8 = seperableConv_bottleneck_block_with_se(res8, filters=384, cardinality=32, strides=1, weight_decay=5e-4) #12x12x384
with tf.device('/device:GPU:3'):
up1 = Conv2D(256, (3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size=(2,2))(res8)) #24x24x256
merge1 = keras.layers.Add()([res7, up1])
upres8 = seperableConv_bottleneck_block_with_se(merge1, filters=256, cardinality=32, strides=1, weight_decay=5e-4) #24x24x256
up2 = Conv2D(192, (3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size=(2,2))(upres8)) #48x48x192
merge2 = keras.layers.Add()([res6, up2])
upres7 = seperableConv_bottleneck_block_with_se(merge2, filters=192, cardinality=32, strides=1, weight_decay=5e-4) #48x48x192
up3 = Conv2D(160, (3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(upres7) #48x48x160
merge3 = keras.layers.Add()([res5, up3])
upres6 = seperableConv_bottleneck_block_with_se(merge3, filters=160, cardinality=32, strides=1, weight_decay=5e-4) #48x48x160
with tf.device('/device:GPU:3'):
up4 = Conv2D(128, (3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size=(2,2))(upres6)) # 96x96x128
merge4 = keras.layers.Add()([res4, up4])
upres5 = seperableConv_bottleneck_block_with_se(merge4, filters=128, cardinality=32, strides=1, weight_decay=5e-4) # 96x96x128
up5 = Conv2D(96, (3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size=(2,2))(upres5)) #192x192x96
merge5 = keras.layers.Add()([res3, up5])
upres4 = seperableConv_bottleneck_block_with_se(merge5, filters=96, cardinality=32, strides=1, weight_decay=5e-4) #192x192x96
up6 = Conv2D(64, (3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(upres4) # 192x192x64
merge6 = keras.layers.Add()([res2, up6])
upres3 = seperableConv_bottleneck_block_with_se(merge6, filters=64, cardinality=32, strides=1, weight_decay=5e-4) #192x192x64
up7 = Conv2D(32, (3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(upres3) #192x192x32
merge7 = keras.layers.Add()([res1, up7])
upres2 = seperableConv_bottleneck_block_with_se(merge7, filters=32, cardinality=32, strides=1, weight_decay=5e-4) #192x192x32
upres1 = seperableConv_bottleneck_block_with_se(upres2, filters=32, cardinality=32, strides=1, weight_decay=5e-4) # 192x192x32
outputConvAutoEncoder = Conv2D(1, (1,1), activation='sigmoid')(upres1) #192x192x1
model = Model(input_image, outputConvAutoEncoder)
model.summary()
model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.003, momentum=0.9, nesterov=True), metrics=[dice_coef,jaccard_coef,'accuracy'])
callbacks = [
EarlyStopping(monitor='val_loss', patience=10, verbose=1),
ModelCheckpoint("-val_loss_checkpoint.h5", monitor='val_loss', verbose=1, save_best_only=True, mode='auto'),
ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, verbose=1, mode='auto', epsilon=0.01, cooldown=0, min_lr=1e-6)
]
history = model.fit(trainData, trainMask, validation_data=(valData, valMask), batch_size=4, epochs=200, verbose=1, shuffle=True, callbacks=callbacks)
model.save('final_Attention.h5')
#plt.plot(history.history['lr'])
#plt.title('Learning Rate')
#plt.xlabel('epoch')
#plt.show()
#
#plt.plot(history.history['acc'])
#plt.plot(history.history['val_acc'])
#plt.title('accuracy')
#plt.xlabel('epoch')
#plt.legend(['acc', 'val_acc'], loc='upper left')
#plt.show()
#
#plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
#plt.title('loss')
#plt.xlabel('epoch')
#plt.legend(['loss', 'val_loss'], loc='upper right')
#plt.show()
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import Imputer
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestRegressor
def get_mae(X, y):
# multiple by -1 to make positive MAE score instead of neg value returned as sklearn convention
return -1 * cross_val_score(RandomForestRegressor(50),
X, y,
scoring = 'neg_mean_absolute_error').mean()
users = pd.io.parsers.read_csv('/Users/wpettengill/Desktop/willpettengill.github.io/ai_astrology/users.csv').dropna().drop_duplicates(subset=['emailaddress'],keep='last')
users['birthplacezipcode'] = users['birthplacezipcode'].astype(str).str.zfill(5)
survey = pd.read_csv('/Users/wpettengill/Desktop/willpettengill.github.io/ai_astrology/survey.csv').dropna().drop_duplicates(subset=['emailaddress'],keep='last')
df=pd.merge(users, survey, how='inner')
train_predictors = df
categoricals = [df[col].name for col in df.columns if type(df[col][0]) is str]
one_hot_encoded_training_predictors = pd.get_dummies(train_predictors)
target = one_hot_encoded_training_predictors['ifeelpridefortheunitedstatesofamerica']
predictors_without_categoricals = train_predictors.drop(categoricals, axis=1)
mae_without_categoricals = get_mae(predictors_without_categoricals, target)
mae_one_hot_encoded = get_mae(one_hot_encoded_training_predictors, target)
print('Mean Absolute Error when Dropping Categoricals: ' + str(float(mae_without_categoricals)))
print('Mean Abslute Error with One-Hot Encoding: ' + str(float(mae_one_hot_encoded)))
#one_hot_encoded_test_predictors = pd.get_dummies(test_predictors)
#final_train, final_test = one_hot_encoded_training_predictors.align(one_hot_encoded_test_predictors, join='left', axis=1)
# make copy to avoid changing original data (when Imputing)
new_data = one_hot_encoded_training_predictors
# make new columns indicating what will be imputed
cols_with_missing = (col for col in new_data.columns
if new_data[col].isnull().any())
for col in cols_with_missing:
new_data[col + '_was_missing'] = new_data[col].isnull()
# Imputation
my_imputer = Imputer()
new_data = my_imputer.fit_transform(new_data)
y = df.godisdeadandwehavekilledhim
X = data.select_dtypes(exclude=['object'])
train_X, test_X, train_y, test_y = train_test_split(X.as_matrix(), y.as_matrix(), test_size=0.25)
my_imputer = Imputer()
train_X = my_imputer.fit_transform(train_X)
test_X = my_imputer.transform(test_X)
|
# -*- coding: utf-8 -*-
from datetime import datetime
from lxml import etree
from optparse import OptionParser
from zeit.care import add_file_logging
from zeit.connector.resource import Resource
import StringIO
import httplib
import logging
import os
import zeit.connector.connector
logger = logging.getLogger(__name__)
class Ressortindexmanipulator(object):
def __init__(self):
self.months = {
'01': 'Januar',
'02': 'Februar',
'03': u'März',
'04': 'April',
'05': 'Mai',
'06': 'Juni',
'07': 'Juli',
'08': 'August',
'09': 'September',
'10': 'Oktober',
'11': 'November',
'12': 'Dezember'
}
self.start_id = 'http://xml.zeit.de'
self.templatedir = os.path.dirname(__file__) + '/ressortindex_files/'
self.templatedocs = [
(f, self.templatedir + f)
for f in os.listdir(self.templatedir)
if os.path.isfile(self.templatedir + f)]
# Return a list with ressorts, months, years
def get_ids(self):
ressorts = self.convert_ressortxmlfile_to_list()
months = [
'01', '02', '03', '04', '05', '06', '07', '08', '09', '10',
'11', '12']
thisyear = int(datetime.today().strftime("%Y"))
thismonth = str(datetime.today().strftime("%m"))
id_infos = []
for ressort in ressorts:
year = 2009
for year in range(year, thisyear + 1):
year_str = str(year)
for month in months:
if year == 2009 and month not in ["09", "10", "11", "12"]:
continue
infos = []
infos.append(
self.start_id + '/' + ressort + '/' + year_str +
'-' + month + '/index')
infos.append(year_str)
infos.append(month)
infos.append(ressort)
id_infos.append(infos)
# don't write index files for the future
if (year == thisyear) and (month == thismonth):
break
return id_infos
def convert_ressortxmlfile_to_list(self):
output = []
xml = etree.parse(self.templatedir + 'ressorts.xml')
paths = xml.xpath('/ressortpfade/pfad')
for path in paths:
output.append(path.text)
return output
def read_index_template_file(self):
resource = open(self.templatedir + 'index')
xml = resource.read()
return xml
def ressort_divider(self, ressortstring):
ressortstring.split('/')
return xml
def write_new_xml_from_template(self, templatexml, id):
tree = etree.parse(StringIO.StringIO(templatexml))
centerpage = tree.xpath('//centerpage')[0]
# Elements which should be replaced
attr_date_first_released = tree.xpath(
'//attribute[@name="date_first_released"]')[0]
attr_ressort = tree.xpath('//attribute[@name="ressort"]')[0]
attr_subressort = tree.xpath('//attribute[@name="sub_ressort"]')[0]
attr_year = tree.xpath('//attribute[@name="year"]')[0]
bodytitle = tree.xpath('//body/title')[0]
teasertitle = tree.xpath('//teaser/title')[0]
teasertext = tree.xpath('//teaser/text')[0]
body = tree.xpath('//body')[0]
teaser = tree.xpath('//teaser')[0]
# Structure ids-variable: [
# [http://xml.zeit.de/<ressor>/<year>-<month>/index,
# <year>, <month>, <ressort>],...]
year_string = id[1]
# Prepare variables
month_string = self.months[id[2]] # '01' -> 'Januar'
# do we have a path like zeit.de/ressort/subressort/...?
ressort_strings = id[3].split('/')
# 'ressort' -> 'Ressort'
ressort_string = ressort_strings[0][0].upper() + ressort_strings[0][1:]
subressort_string = ''
if len(ressort_strings) == 2:
subressort_string = (
ressort_strings[1][0].upper() + ressort_strings[1][1:])
# Replacing
attr_date_first_released.text = datetime(
int(id[1]), int(id[2]), 1, 0, 0, 0,
microsecond=1).isoformat() + "+00:00"
attr_ressort.text = ressort_string
if len(ressort_strings) == 2:
attr_subressort.text = subressort_string
ressort_string = subressort_string # we
else:
attr_subressort.text = ''
attr_subressort.getparent().remove(attr_subressort)
attr_year.text = year_string
bodytitle.text = (
"Artikel und Nachrichten im " + month_string + " " + year_string +
" aus dem Ressort " + ressort_string + " | ZEIT ONLINE")
teasertitle.text = (
"Artikel und Nachrichten im " + month_string + " " + year_string +
" aus dem Ressort " + ressort_string + " | ZEIT ONLINE")
teasertext.text = (
"Lesen Sie alle Artikel und Nachrichten vom " +
month_string + " " + year_string + " aus dem Ressort " +
ressort_string + " auf ZEIT ONLINE")
new_resource = Resource(
id[0],
'index',
'centerpage',
StringIO.StringIO(etree.tostring(
centerpage, encoding="UTF-8", xml_declaration=True)),
contentType='text/xml')
new_resource.properties[
('date_first_released', 'http://namespaces.zeit.de/CMS/document')]\
= attr_date_first_released.text
new_resource.properties[
('ressort', 'http://namespaces.zeit.de/CMS/document')] \
= attr_ressort.text
if len(ressort_strings) == 2:
new_resource.properties[
('sub_ressort', 'http://namespaces.zeit.de/CMS/document')] \
= attr_subressort.text
new_resource.properties[
('year', 'http://namespaces.zeit.de/CMS/document')] \
= attr_year.text
return new_resource
def put_xml_from_ids(self, connector):
ids = self.get_ids()
templatexml = self.read_index_template_file()
for id in ids:
logger.info("to be written " + id[0])
connector_id = id[0]
try:
res = self.write_new_xml_from_template(templatexml, id)
connector[connector_id] = res
logger.info(id[0] + ' written')
except httplib.HTTPException:
logger.error("could not be written")
return connector
def main():
usage = "usage: %prog [options] arg"
parser = OptionParser(usage)
parser.add_option(
"-c", "--collection", dest="collection",
help="entry collection for starting the indexfilewriting")
parser.add_option("-w", "--webdav", dest="webdav",
help="webdav server uri")
parser.add_option("-l", "--log", dest="logfile",
help="logfile for errors")
parser.add_option("-f", "--force", action="store_true", dest="force",
help="no reinsurance question, for batch mode e.g.")
(options, args) = parser.parse_args()
if not options.collection:
parser.error("missing entry point for indexfilewriting")
if not options.webdav:
parser.error("missing webdav uri")
if options.logfile:
add_file_logging(logger, options.logfile)
if not options.force:
user_ok = raw_input(
'\nWriting ressortindexfiles in webdav uri will start at %s.\n'
'Are you sure? [y|n]: ' % options.collection)
else:
user_ok = "y"
if user_ok == "y":
connector = zeit.connector.connector.Connector(roots=dict(
default=options.webdav))
indexwriter = Ressortindexmanipulator()
indexwriter.start_id = options.collection
indexwriter.put_xml_from_ids(connector)
|
import torch
from argparse import ArgumentParser
from unet.utils import *
from unet.unet import UNet
parser = ArgumentParser()
parser.add_argument("--images_path", type=str, required=True)
parser.add_argument("--model", type=str, required=True)
parser.add_argument("--result_folder", type=str, required=True)
parser.add_argument("--device", type=str, default='cpu')
parser.add_argument("--min_object_size", type=float, default=0)
parser.add_argument("--max_object_size", type=float, default=np.inf)
parser.add_argument("--dpi", type=float, default=False)
parser.add_argument("--dpm", type=float, default=False)
parser.add_argument("--visualize", type=bool, default=False)
args = parser.parse_args()
# determining dpm
dpm = args.dpm if not args.dpi else dpi_to_dpm(args.dpi)
print("Loading dataset...")
predict_dataset = ReadTestDataset(args.images_path)
device = torch.device(args.device)
print("Dataset loaded")
print("Loading model...")
unet = UNet(3, 3)
unet.load_state_dict(torch.load(args.model, map_location=device))
model = ModelWrapper(unet, args.result_folder, cuda_device=device)
print("Model loaded")
print("Measuring images...")
model.measure_large_images(predict_dataset, export_path=args.result_folder,
visualize_bboxes=args.visualize, filter=[args.min_object_size, args.max_object_size],
dpm=dpm, verbose=True, tile_res=(256, 256))
|
import unittest
from Pyskell.Language.Syntax import *
from Pyskell.Language.TypeClasses import *
from Pyskell.Language.EnumList import L, Enum
from Pyskell.Language.Syntax.QuickLambda import __
class ADTTest(unittest.TestCase):
def test_adt(self):
Unit, V1, V2, V3 = data.Unit == d.V1 | d.V2 | d.V3 \
& deriving(Eq, Ord, Enum, Bounded)
self.assertEqual(V1, V1)
self.assertEqual(V2, V2)
self.assertEqual(V3, V3)
self.assertNotEqual(V1, V2)
self.assertNotEqual(V1, V3)
self.assertNotEqual(V2, V3)
Instance(Show, Unit).where(
show=lambda _x: ~(Guard(_x) | g(__ == V1) >> "a"
| g(__ == V2) >> "b"
| otherwise >> "c")
)
self.assertEqual("a", show % V1)
self.assertEqual("b", show % V2)
self.assertEqual("c", show % V3)
boundaries = bounds(V1)
self.assertEqual(V1, boundaries[0])
self.assertEqual(V3, boundaries[-1])
adt_list = L[boundaries[0], ..., boundaries[-1]]
self.assertEqual(3, len(adt_list))
self.assertTrue(V1 < V3)
self.assertFalse(V2 > V2)
self.assertFalse(V2 > V3)
|
# -*- coding: utf-8 -*-
"""
sphinx.domains.swift
~~~~~~~~~~~~~~~~~~~
The Swift domain.
:copyright: Copyright 2016 by Johannes Schriewer
:license: BSD, see LICENSE for details.
"""
import re
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx import addnodes
from sphinx.roles import XRefRole
from sphinx.locale import l_
from sphinx.domains import Domain, ObjType, Index
from sphinx.directives import ObjectDescription
from sphinx.util.nodes import make_refnode
from sphinx.util.docfields import Field, GroupedField, TypedField
from .std import SwiftStandardDomain
def _iteritems(d):
for k in d:
yield k, d[k]
class SwiftObjectDescription(ObjectDescription):
option_spec = {
'noindex': directives.flag,
}
def add_target_and_index(self, name_cls_add, sig, signode):
fullname, signature, add_to_index = name_cls_add
if 'noindex' in self.options or not add_to_index:
return
# for char in '<>()[]:, ?!':
# signature = signature.replace(char, "-")
# note target
if fullname not in self.state.document.ids:
signode['ids'].append(signature)
self.state.document.note_explicit_target(signode)
self.env.domaindata['swift']['objects'][fullname] = (self.env.docname, self.objtype, signature)
else:
objects = self.env.domaindata['swift']['objects']
self.env.warn(
self.env.docname,
'duplicate object description of %s, ' % fullname +
'other instance in ' +
self.env.doc2path(objects[fullname][0]),
self.lineno)
class SwiftClass(SwiftObjectDescription):
def handle_signature(self, sig, signode):
container_class_name = self.env.temp_data.get('swift:class')
# split on : -> first part is class name, second part is superclass list
parts = [x.strip() for x in sig.split(':', maxsplit=1)]
# if the class name contains a < then there is a generic type attachment
if '<' in parts[0]:
class_name, generic_type = parts[0].split('<')
generic_type = generic_type[:-1]
else:
class_name = parts[0]
generic_type = None
# did we catch a 'where' ?
type_constraint = None
class_parts = None
if ' ' in class_name:
class_parts = class_name.split(' ')
elif '\t' in class_name:
class_parts = class_name.split('\t')
if class_parts:
# if a part starts with `where` then we have a type constraint
for index, p in enumerate(class_parts):
if p == 'where':
type_constraint = " ".join(class_parts[index:]) + ": " + parts.pop()
class_name = " ".join(class_parts[:index])
break
if class_name.count('.'):
class_name = class_name.split('.')[-1]
# if we have more than one part this class has super classes / protocols
super_classes = None
if len(parts) > 1:
super_classes = [x.strip() for x in parts[1].split(',')]
# if a part starts with `where` then we have a type constraint
for index, sup in enumerate(super_classes):
if sup == 'where':
type_constraint = " ".join(super_classes[index:])
super_classes = super_classes[:index]
break
# Add class name
signode += addnodes.desc_addname(self.objtype, self.objtype + ' ')
signode += addnodes.desc_name(class_name, class_name)
# if we had super classes add annotation
if super_classes:
children = []
for c in super_classes:
prefix = ', ' if c != super_classes[0] else ''
# ref = addnodes.pending_xref(c, reftype='protocol', refdomain='swift', reftarget=c, refwarn=True)
ref = nodes.Text(prefix + c)
children.append(ref)
signode += addnodes.desc_type('', ' : ', *children)
# add type constraint
if type_constraint:
signode += addnodes.desc_type(type_constraint, ' ' + type_constraint)
add_to_index = True
if self.objtype == 'extension' and not super_classes:
add_to_index = False
if container_class_name:
class_name = container_class_name + '.' + class_name
return self.objtype + ' ' + class_name, self.objtype + ' ' + class_name, add_to_index
def before_content(self):
if self.names:
parts = self.names[0][1].split(" ")
if len(parts) > 1:
self.env.temp_data['swift:class'] = " ".join(parts[1:])
else:
env.temp_data['swift:class'] = self.names[0][1]
self.env.temp_data['swift:class_type'] = self.objtype
self.clsname_set = True
def after_content(self):
if self.clsname_set:
self.env.temp_data['swift:class'] = None
self.env.temp_data['swift:class_type'] = None
class SwiftClassmember(SwiftObjectDescription):
doc_field_types = [
TypedField('parameter', label=l_('Parameters'),
names=('param', 'parameter', 'arg', 'argument'),
typerolename='obj', typenames=('paramtype', 'type')),
GroupedField('errors', label=l_('Throws'), rolename='obj',
names=('raises', 'raise', 'exception', 'except', 'throw', 'throws'),
can_collapse=True),
Field('returnvalue', label=l_('Returns'), has_arg=False,
names=('returns', 'return')),
]
def _parse_parameter_list(self, parameter_list):
parameters = []
parens = {'[]': 0, '()': 0, '<>': 0}
last_split = 0
for i, c in enumerate(parameter_list):
for key, value in parens.items():
if c == key[0]:
value += 1
parens[key] = value
if c == key[1]:
value -= 1
parens[key] = value
skip_comma = False
for key, value in parens.items():
if value != 0:
skip_comma = True
if c == ',' and not skip_comma:
parameters.append(parameter_list[last_split:i].strip())
last_split = i + 1
parameters.append(parameter_list[last_split:].strip())
result = []
for parameter in parameters:
name, rest = [x.strip() for x in parameter.split(':', maxsplit=1)]
name_parts = name.split(' ', maxsplit=1)
if len(name_parts) > 1:
name = name_parts[0]
variable_name = name_parts[1]
else:
name = name_parts[0]
variable_name = name_parts[0]
equals = rest.rfind('=')
if equals >= 0:
default_value = rest[equals + 1:].strip()
param_type = rest[:equals].strip()
else:
default_value = None
param_type = rest
result.append({
"name": name,
"variable_name": variable_name,
"type": param_type,
"default": default_value
})
return result
def handle_signature(self, sig, signode):
container_class_name = self.env.temp_data.get('swift:class')
container_class_type = self.env.temp_data.get('swift:class_type')
# split into method name and rest
first_anglebracket = sig.find('<')
first_paren = sig.find('(')
if first_anglebracket >= 0 and first_paren > first_anglebracket:
split_point = sig.find('>')+1
else:
split_point = first_paren
# calculate generics
if first_anglebracket >= 0:
sp = sig[first_anglebracket:]
np = sp.find('>')
generics = sp[:np+1]
else:
generics = None
method_name = sig[0:split_point]
# find method specialization
angle_bracket = method_name.find('<')
if angle_bracket >= 0:
method_name = method_name[:angle_bracket]
rest = sig[split_point:]
# split parameter list
parameter_list = None
depth = 0
for i, c in enumerate(rest):
if c == '(':
depth += 1
elif c == ')':
depth -= 1
if depth == 0:
parameter_list = rest[1:i]
rest = rest[i + 1:]
break
if len(parameter_list) > 0:
parameters = self._parse_parameter_list(parameter_list)
else:
parameters = []
# check if it throws
throws = rest.find('throws') >= 0
# check for return type
return_type = None
arrow = rest.find('->')
if arrow >= 0:
return_type = rest[arrow + 2:].strip()
# build signature and add nodes
signature = ''
if self.objtype == 'static_method':
signode += addnodes.desc_addname("static", "static func ")
elif self.objtype == 'class_method':
signode += addnodes.desc_addname("class", "class func ")
elif self.objtype != 'init':
signode += addnodes.desc_addname("func", "func ")
if self.objtype == 'init':
signode += addnodes.desc_name('init', 'init')
signature += 'init('
for p in parameters:
signature += p['name'] + ':'
signature += ')'
else:
signode += addnodes.desc_name(method_name, method_name)
signature += method_name
signature += '('
for p in parameters:
signature += p['name'] + ':'
signature += ')'
if generics:
signode += addnodes.desc_addname(generics,generics)
params = []
sig = ''
for p in parameters:
param = p['name'] + ': ' + p['type']
sig += p['name'] + ':'
if p['default']:
param += ' = ' + p['default']
params.append(addnodes.desc_parameter(param, param))
signode += addnodes.desc_parameterlist(sig, "", *params)
title = signature
if throws:
signode += addnodes.desc_annotation("throws", "throws")
# signature += "throws"
if return_type:
signode += addnodes.desc_returns(return_type, return_type)
#signature += "-" + return_type
#if container_class_type == 'protocol':
# signature += "-protocol"
#if self.objtype == 'static_method':
# signature += '-static'
#elif self.objtype == 'class_method':
# signature += '-class'
if container_class_name:
return (container_class_name + '.' + title), (container_class_name + '.' + signature), True
return title, signature, True
class SwiftEnumCase(SwiftObjectDescription):
def handle_signature(self, sig, signode):
container_class_name = self.env.temp_data.get('swift:class')
enum_case = None
assoc_value = None
raw_value = None
# split on ( -> first part is case name
parts = [x.strip() for x in sig.split('(', maxsplit=1)]
enum_case = parts[0].strip()
if len(parts) > 1:
parts = parts[1].rsplit('=', maxsplit=1)
assoc_value = parts[0].strip()
if len(parts) > 1:
raw_value = parts[1].strip()
if assoc_value == "":
assoc_value = None
else:
assoc_value = "(" + assoc_value
else:
parts = [x.strip() for x in sig.split('=', maxsplit=1)]
enum_case = parts[0].strip()
if len(parts) > 1:
raw_value = parts[1].strip()
# Add class name
signode += addnodes.desc_name(enum_case, enum_case)
if assoc_value:
signode += addnodes.desc_type(assoc_value, assoc_value)
if raw_value:
signode += addnodes.desc_addname(raw_value, " = " + raw_value)
if container_class_name:
enum_case = container_class_name + '.' + enum_case
return enum_case, enum_case, True
var_sig = re.compile(r'^\s*(?P<name>[a-zA-Z_][a-zA-Z0-9_]*\b)(\s*:\s*(?P<type>[a-zA-Z_[(][a-zA-Z0-9_<>[\]()?!:, \t-\.]*))?(\s*=\s*(?P<value>[^{]*))?')
class SwiftClassIvar(SwiftObjectDescription):
doc_field_types = [
Field('defaultvalue', label=l_('Default'), has_arg=False,
names=('defaults', 'default')),
]
def handle_signature(self, sig, signode):
container_class_name = self.env.temp_data.get('swift:class')
match = var_sig.match(sig)
if not match:
self.env.warn(
self.env.docname,
'invalid variable/constant documentation string "%s", ' % sig,
self.lineno)
return
match = match.groupdict()
if self.objtype == 'static_var':
signode += addnodes.desc_addname("static var", "static var ")
elif self.objtype == 'static_let':
signode += addnodes.desc_addname("static let", "static let ")
elif self.objtype == 'var':
signode += addnodes.desc_addname("var", "var ")
elif self.objtype == 'let':
signode += addnodes.desc_addname("let", "let ")
name = match['name'].strip()
signature = name
signode += addnodes.desc_name(name, name)
if match['type']:
typ = match['type'].strip()
#signature += '-' + typ
signode += addnodes.desc_type(typ, " : " + typ)
if match['value'] and len(match['value']) > 0:
value = match['value'].strip()
signode += addnodes.desc_addname(value, " = " + value)
elif match['value']:
signode += addnodes.desc_addname('{ ... }', ' = { ... }')
#signature += "-" + self.objtype
if container_class_name:
name = container_class_name + '.' + name
signature = container_class_name + '.' + signature
return name, signature, True
class SwiftXRefRole(XRefRole):
def __init__(self,tipe):
super().__init__()
self.tipe = tipe
def process_link(self, env, refnode, has_explicit_title, title, target):
if "." in target:
return title, target
return title, self.tipe+" "+target
type_order = ['class', 'struct', 'enum', 'protocol', 'extension']
class SwiftModuleIndex(Index):
"""
Index subclass to provide the Swift module index.
"""
name = 'modindex'
localname = l_('Swift Module Index')
shortname = l_('Index')
@staticmethod
def indexsorter(a):
global type_order
for i, t in enumerate(type_order):
if a[0].startswith(t):
return '{:04d}{}'.format(i, a[0])
return a[0]
@staticmethod
def sigsorter(a):
global type_order
start = 0
for t in type_order:
if a[3].startswith(t):
start = len(t) + 1
break
return a[3][start]
def generate(self, docnames=None):
global type_order
content = []
collapse = 0
entries = []
for refname, (docname, typ, signature) in _iteritems(self.domain.data['objects']):
info = typ.replace("_", " ")
entries.append((
refname,
0,
docname,
signature,
info,
'',
''
))
entries = sorted(entries, key=self.sigsorter)
current_list = []
current_key = None
for entry in entries:
start = 0
for t in type_order:
if entry[3].startswith(t):
start = len(t) + 1
break
if entry[3][start].upper() != current_key:
if len(current_list) > 0:
content.append((current_key, current_list))
current_key = entry[3][start].upper()
current_list = []
current_list.append(entry)
content.append((current_key, current_list))
result = []
for key, entries in content:
e = sorted(entries, key=self.indexsorter)
result.append((key, e))
return result, collapse
class SwiftDomain(Domain):
"""Swift language domain."""
name = 'swift'
label = 'Swift'
object_types = {
'function': ObjType(l_('function'), 'function', 'obj'),
'method': ObjType(l_('method'), 'method', 'obj'),
'class_method': ObjType(l_('class method'), 'class_method', 'obj'),
'static_method': ObjType(l_('static method'), 'static_method','obj'),
'class': ObjType(l_('class'), 'class', 'obj'),
'enum': ObjType(l_('enum'), 'enum', 'obj'),
'enum_case': ObjType(l_('enum case'), 'enum_case', 'obj'),
'struct': ObjType(l_('struct'), 'struct', 'obj'),
'init': ObjType(l_('initializer'), 'init', 'obj'),
'protocol': ObjType(l_('protocol'), 'protocol', 'obj'),
'extension': ObjType(l_('extension'), 'extension', 'obj'),
'default_impl': ObjType(l_('extension'), 'default_impl', 'obj'),
'let': ObjType(l_('constant'), 'let', 'obj'),
'var': ObjType(l_('variable'), 'var', 'obj'),
'static_let': ObjType(l_('static/class constant'), 'static_let', 'obj'),
'static_var': ObjType(l_('static/class variable'), 'static_var', 'obj'),
}
directives = {
'function': SwiftClassmember,
'method': SwiftClassmember,
'class_method': SwiftClassmember,
'static_method': SwiftClassmember,
'class': SwiftClass,
'enum': SwiftClass,
'enum_case': SwiftEnumCase,
'struct': SwiftClass,
'init': SwiftClassmember,
'protocol': SwiftClass,
'extension': SwiftClass,
'default_impl': SwiftClass,
'let': SwiftClassIvar,
'var': SwiftClassIvar,
'static_let': SwiftClassIvar,
'static_var': SwiftClassIvar,
}
roles = {
'function': SwiftXRefRole("function"),
'method': SwiftXRefRole("method"),
'class': SwiftXRefRole("class"),
'enum': SwiftXRefRole("enum"),
'enum_case': SwiftXRefRole("enum_case"),
'struct': SwiftXRefRole("struct"),
'init': SwiftXRefRole("init"),
'static_method': SwiftXRefRole("static_method"),
'class_method': SwiftXRefRole("class_method"),
'protocol': SwiftXRefRole("protocol"),
'extension': SwiftXRefRole("extension"),
'default_impl': SwiftXRefRole("default_impl"),
'let': SwiftXRefRole("let"),
'var': SwiftXRefRole("var"),
'static_let': SwiftXRefRole("static_let"),
'static_var': SwiftXRefRole("static_var")
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
}
indices = [
SwiftModuleIndex,
]
def clear_doc(self, docname):
for fullname, (fn, _, _) in list(self.data['objects'].items()):
if fn == docname:
del self.data['objects'][fullname]
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
for refname, (docname, type, signature) in _iteritems(self.data['objects']):
if refname == target:
node = make_refnode(builder, fromdocname, docname, signature, contnode, target)
return node
return None
def get_objects(self):
for refname, (docname, type, signature) in _iteritems(self.data['objects']):
yield (refname, refname, type, docname, refname, 1)
def make_index(app,*args):
from .autodoc import build_index
build_index(app)
def setup(app):
from .autodoc import SwiftAutoDocumenter, ProtocolAutoDocumenter, ExtensionAutoDocumenter, EnumAutoDocumenter
app.connect('builder-inited', make_index)
app.override_domain(SwiftStandardDomain)
app.add_autodocumenter(SwiftAutoDocumenter)
app.add_autodocumenter(ProtocolAutoDocumenter)
app.add_autodocumenter(ExtensionAutoDocumenter)
app.add_autodocumenter(EnumAutoDocumenter)
app.add_domain(SwiftDomain)
app.add_config_value('swift_search_path', ['../src'], 'env')
app.add_config_value('autodoc_default_flags', [], True)
|
"""
Created by hzwangjian1
on 2017-08-04
"""
import hashlib
def test():
start = 0
end = 33
step = int(end/10)
aa = [i * 10 for i in range(step)]
for i in aa:
print(i)
for j in range(0, 368, 10):
print(j)
def test_split():
category = '游戏/直播'
root_category = category.split('/')[0]
print(root_category)
def test_assign():
org = 5
a,b,c = [org] * 3
print(a)
print(c)
def phog_dim():
with open('E://temp/docduplicate/recallOpt/temp.txt','r') as rhandler:
line = rhandler.readline()
sub_strs = line.split(',')
print(len(sub_strs))
def test_md5_hexdigest():
signature = hashlib.md5(('0c8dd438-1e78-428f-b4c9-42cf41c13dcb' + "2017-09-01 12:00:00").encode()).hexdigest()
print(signature)
def list_rerange(list_org):
lennth = len(list_org)
times_4 = []
times_2 = []
times_1 = []
for item in list_org:
if item % 4 == 0:
times_4.append(item)
elif item % 2 == 0:
times_2.append(item)
else:
times_1.append(item)
print((times_4))
print((times_2))
print((times_1))
len_times_4 = len(times_4)
len_times_2 = len(times_2)
len_times_1 = len(times_1)
if len_times_2 == 0 and len_times_1 == 0: #1 和 2都没有
return True
if len_times_2 == 0: ## 没有2有1
if len_times_4 * 2 >= len_times_1: ## 4的个数是1的个数的两倍
return True
else :
return False
if len_times_1 == 0 : ## 没有1有2
if len_times_2 % 2 == 0: ## 2个2
return True
else: ## 1个2
if len_times_4 > 0:
return True
else:
return False
## 1和2都有
if len_times_2 % 2 == 0: ## 2的个数是偶数
if len_times_4 * 2 > len_times_1: ## 4的个数是1的个数的两倍
return True
else :
return False
else: ## 2的个数是奇数,相同
if len_times_4 * 2 > len_times_1:## 4的个数是1的个数的两倍
return True
else :
return False
import scipy.misc as sic
import numpy as np
def test_png():
im = sic.imread("E://temp/videoquality/heibian_keyframe_small/VAPS2EQ4T_544_960_KF_012.png")
im = sic.imread("E://temp/deblur/images/r5e7ce087t.png")
print(im.shape)
print(im[50][:][:])
from alexutil import httputil
import os
def load_imgs():
output_dir = "E://temp/deblur/imags_dir"
if not os.path.exists(output_dir):
os.mkdir(output_dir)
with open('E://temp/deblur/video_article_pic', 'r') as fin:
line = fin.readline()
print(line)
while line:
substrs = line.split("\t")
docid, url = substrs[0], substrs[1]
save_path = os.path.join(output_dir, docid + '.jpg')
httputil.image_download(url, save_path)
line = fin.readline()
if __name__=="__main__":
# test()
# test_split()
# test_assign()
# phog_dim()
# test_md5_hexdigest()
print(list_rerange([8])) #没有1和2
print("===============================")
print(list_rerange([2,3,5,7,8,6])) #有1和2
print("===============================")
print(list_rerange([3,5,8])) #没有2
print("===============================")
print(list_rerange([2])) #没有1
print("===============================")
print(list_rerange([2,3,8,6])) #有1和2
print("===============================")
print(list_rerange([1,4,1,4,1,4,1])) #有1和2
print("===============================")
print(list_rerange([]))
digit_list = [1,2,7,4,5]
del digit_list[3]
print(digit_list)
print(digit_list[1:3])
a,b = [],[]
a.append(1)
print('a:')
print(a)
a.extend([0]*3)
print(a)
a.extend([0]*0)
print(a)
print('sum a:')
a = [[1,2,3],[11,12,13]]
b = sum(a[0])
print(b)
print('---------------test dict---------------------')
dict_a = {'a':1, 'b':2, 'c':3}
if 'a' in dict_a.keys():
print('dict_a[a]:{}'.format(dict_a['a']))
# load_imgs() |
# Defination for a binary tree node
class TreeNode(object):
def __init__(self,x):
self.val = x
self.left = None
self.right = None
########################################################
def findSecondMinimumValue(root):
row = [root]
minlist = [root.val,-1]
while row:
for r in row:
if r.val < minlist[0]:
minlist[1] = minlist[0]
minlist[0] = r.val
elif minlist[0] < r.val < minlist[1]:
minlist[1] = r.val
row = [r for r in [root.left,root.right] for root in row if r]
print row
return minlist[1]
def pathSum(root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: int
"""
if not root:
return 0
def helper(root,sum):
if not root:
return 0
if root.val == sum:
return helper(root.left,0)+helper(root.right,0)+1
else:
return helper(root.left,sum-root.val)+helper(root.right,sum-root.val)
return pathSum(root.left,sum)+pathSum(root.right,sum)+helper(root,sum)
#########################################################
root = TreeNode(10)
root.left = TreeNode(5)
root.right = TreeNode(-3)
root.right.right = TreeNode(11)
root.left.left = TreeNode(3)
root.left.right = TreeNode(2)
root.left.right.right = TreeNode(1)
root.left.left.left = TreeNode(3)
root.left.left.left = TreeNode(-2)
print pathSum(root,8) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
from web_backend.nvlserver.module import nvl_meta
from sqlalchemy import BigInteger, String, Column, Boolean, Table, ForeignKey, DateTime, func
console = Table(
'console',
nvl_meta,
Column('id', BigInteger, primary_key=True),
Column('timestamp', DateTime(timezone=True), nullable=False),
Column('user_id', BigInteger, ForeignKey('user.id'), nullable=False),
Column('message', String(4096), nullable=False),
Column('active', Boolean, default=True),
Column('deleted', Boolean, default=False),
Column('created_on', DateTime(timezone=True), server_default=func.now(), nullable=False),
Column('updated_on', DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False),
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-10-10 14:25
from __future__ import unicode_literals
from django.db import migrations, models
import user_input.models
class Migration(migrations.Migration):
dependencies = [
('user_input', '0019_auto_20171009_2052'),
]
operations = [
migrations.AlterField(
model_name='dailyuserinputencouraged',
name='pain_area',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='dailyuserinputoptional',
name='food_ate_before_workout',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='dailyuserinputoptional',
name='food_ate_during_workout',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='dailyuserinputoptional',
name='weight',
field=models.CharField(blank=True, max_length=20, null=True, validators=[user_input.models.CharMinValueValidator(30), user_input.models.CharMaxValueValidator(300)]),
),
migrations.AlterField(
model_name='dailyuserinputstrong',
name='medications_or_controlled_substances_taken',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='dailyuserinputstrong',
name='smoked_substance',
field=models.TextField(blank=True),
),
]
|
from aiogram import types
from aiogram.dispatcher.filters import Command
from keyboards.default import locations_for_button
from loader import dp
from utils.misc.calc_for_distance import choose_nearest
@dp.message_handler(Command("show_attractions"))
async def show_on_map(message: types.Message):
await message.answer(
f"Здравствуйте, {message.from_user.full_name}.\n"
f"Чтобы показать ближайшую достопримечательность,отправьте нам свое местоположение"
"нажав на кнопку ниже",
reply_markup=locations_for_button.keyboard,
)
@dp.message_handler(content_types=types.ContentType.LOCATION)
async def go_to_local(message: types.Message):
location = message.location
latitude = location.latitude
longitude = location.longitude
atract_nearest = choose_nearest(location)
text_format = "Название: {attract_name}. <a href ='{url}'>Google</a>\n Расстояние равно: {distance:.1f} км"
new_text = "\n\n".join(
[
##text_format.format(atract_name=attract_name, url=url, distance=distance)
# for atract_name, url, distance, atract_local in atract_nearest
]
)
await message.answer(f"Cпасибо за ответ\n"
"Коордитаны:\n"
f"Широта: {latitude}\n"
f"Долгота:{longitude}\n\n"
"Ближайшее к вам будет:"
f"{new_text}",
disable_web_page_preview=True
)
for atract_name, url, distance, atract_local in atract_nearest:
await message.answer_location(
latitude=atract_local["lat"],
longitude=atract_local["lon"]
)
|
"""
Tests if the MPR121 touch sensor is set up correctly.
See readme for set up instructions.
"""
from piripherals import MPR121
mpr = MPR121(bus=4, irq=0)
def _onTouch(isPressed, pinNumber):
pressedMessage = "pressed" if isPressed else "released"
print('pin %d is %s' % (pinNumber, pressedMessage))
for i in range(12):
# print status on touch and release
mpr.on_touch(i, _onTouch)
input("Touch a pin to generate output. Press Enter stop...")
|
#Define a SOLVABLE sudoku board
board = [
[0,0,0,0,0,0,0,0,0],
[0,1,5,3,0,9,7,8,0],
[0,4,0,2,0,1,0,6,0],
[0,6,4,7,0,8,1,2,0],
[0,0,0,0,0,0,0,0,0],
[0,3,7,5,0,6,8,4,0],
[0,8,0,4,0,5,0,9,0],
[0,7,9,8,0,2,4,3,0],
[0,0,0,0,0,0,0,0,0]
]
#Sudoku solving function
def sudoku_solve(board):
#reached the end when you can no longer find blanks
if not find_blanks(board):
return True
else:
blank = find_blanks(board)
for i in range(1,10):
#insert potential answer into blank and check if answer is possible
if check_valid(board,i,blank) == True:
#if true insert answer into board
board[blank[0]][blank[1]] = i
#recursively call function to check answer in board
if sudoku_solve(board) == True:
return True
#if cannot solve: wrong answer, go back and reset last blank
else:
board[blank[0]][blank[1]] = 0 #reset the last blank ie backtrack
return False
#function to check validity of input, test is an insertable integer to test, blank is a tupple of pos
def check_valid(board,test,blank):
#check column
for i in range(len(board[0])):
if board[i][blank[1]] == test and blank[0] != i:
return False
#check column
for j in range(len(board)):
if board[blank[0]][j] == test and blank[1] != j:
return False
#check quardrant
quad_col = blank[1]//3
quad_row = blank[0]//3
for i in range(quad_row*3, quad_row*3+3):
for j in range(quad_col*3, quad_col*3+3):
if board[i][j] == test and (i,j) != blank:
return False
return True
#this function takes in a board and returns coordinates of first available blank
def find_blanks(board):
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == 0:
return (i,j)
return None
def print_board(board):
for i in range(len(board)):
if i % 3 ==0 and i !=0:
print("____________________")
for j in range(len(board[0])):
if j % 3 ==0 and j !=0:
print("|", end = '')
if j==8:
print(board[i][j])
else:
print(str(board[i][j])+' ', end='')
print_board(board)
print("__________________________")
sudoku_solve(board)
print_board(board) |
import math
from unittest import TestCase
import simplejson as S
class TestFloat(TestCase):
def test_floats(self):
for num in [1617161771.7650001, math.pi, math.pi**100, math.pi**-100, 3.1]:
self.assertEquals(float(S.dumps(num)), num)
self.assertEquals(S.loads(S.dumps(num)), num)
def test_ints(self):
for num in [1, 1L, 1<<32, 1<<64]:
self.assertEquals(S.dumps(num), str(num))
self.assertEquals(int(S.dumps(num)), num)
|
"""
18. Plus One
Question:
Given a number represented as an array of digits, plus one to the number.
Example Questions Candidate Might Ask:
Q: Could the number be negative?
A: No. Assume it is a non-negative number.
Q: How are the digits ordered in the list? For example, is the number 12 represented by [1,2] or
[2,1]?
A: The digits are stored such that the most significant digit is at the head of the list.
Q: Could the number contain leading zeros, such as [0,0,1]?
A: No
"""
class Solution(object):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
for i in range(len(digits)-1, -1, -1):
if digits[i] < 9:
digits[i] += 1
return digits
else:
digits[i] = 0
digits.insert(0, 1)
return digits |
print("LETTER U HAS BEEN SUCCESSFULLY EXECUTED") |
from validators import domain
class Scrub(object):
"""
Core data handler to clean, and post results in proper
DataSerialization format for SimplyDomain.
Attributes:
subdomain: subdomain to parse
"""
def __init__(self, subdomain=""):
"""
Init class struc. Used as a object to parse and populate
results.
"""
self.subdomain = subdomain
def validate_domain(self):
"""
Use domain validator to confirm domain name.
:return: BOOL
"""
try:
val = domain(str(self.subdomain))
if val:
# domain is valid
return True
else:
# domain validation failed
return False
except Exception as e:
# TODO: add in logger class for errors
return False
|
from random import randint
from django.core.management.base import BaseCommand
from faker import Faker
from homepage.models import Student, Subject, Book
class Command(BaseCommand):
books = ['Zach0tka', 'za4etko', 'zachechotko', 'zaCHETKO', 'zacheton']
"""
Help command that generate books for each student and them to DB
You can set amount of student to generate using argument
'-a' or '--am'
"""
help = 'Generate books for all student(s) to the DB using Faker'
def handle(self, *args, **options):
faker = Faker()
students = Student.objects.all()
for student in students:
self.stdout.write('Start write book to students')
a = faker.sentence(ext_word_list=self.books, nb_words=1)
student.book = Book.objects.create(name=a)
student.save()
self.stdout.write('End write book to students')
|
import sys
import numpy as np
import lib1743734 as lib
def create_matrix(bags, N):
"""
Method that creates the matrix of distances given the Jaccard similarity.
:param bags: Array of bags.
:param N: Size of the matrix.
:return: The distance Jaccard similarity matrix J.
"""
D = np.ones([N, N])
for (i, bag1) in enumerate(bags):
for(j, bag2) in enumerate(bags):
D[i, j] -= lib.jaccard(bag1, bag2)
return D
def create_bag_output(bags):
"""
Creates the sorted array of occurrences given the bags.
:param bags: Array of bags.
:return: The sorted array of the lengths of bags.
"""
return sorted([len(mini_bag) for mini_bag in bags], reverse=True)
def create_clusters(clusters_list):
"""
Creates the clusters structure. Inside each cluster we have the indices of the elements of each cluster.
:param clusters_list: Output of method single_linkage.
:return: The array of indices on each cluster.
"""
clusters = {}
for i, elem in enumerate(clusters_list):
exist = clusters.get(elem, None)
if exist is None:
clusters[elem] = [i]
else:
clusters[elem].append(i)
to = []
for value in clusters.values():
to.append(value)
return to
def main():
np.set_printoptions(precision=2)
file_names = sys.argv[1:]
'''
1. Compute a bag of words for each document, ignoring words occurring less than 10 times.
'''
bags = [lib.bag(wc=lib.wordcount(filename=file), threshold=10) for file in file_names]
print(create_bag_output(bags))
'''
2. Use libID.jaccard() to compute the nxn matrix J (Jaccard distance).
'''
J = create_matrix(bags=bags, N=len(file_names))
print(J)
print(J.mean(axis=None))
'''
3. Cluster the documents according to J, using libID.single_linkage() with k=3
'''
clusters = lib.single_linkage(D=J, k=3)
print(create_clusters(clusters_list=clusters))
if __name__ == '__main__':
main() |
from django.shortcuts import render
from django.views.generic.base import TemplateView
from sample1.models import Article
class HomePageView(TemplateView):
template_name = 'home.html'
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['latest_articles'] = Article.objects.all()[:5]
return context
|
str = input()
vowel = ['A','E','I','O','U']
substr_list1 = []
substr_list2 = []
for i in range(1,len(str)+1):
for j in range(0,len(str)-i+1):
sub_str = ""
ans = 0
for k in range(j,j+i):
sub_str +=str[k]
for q in vowel:
if sub_str[0]==q:
substr_list2.append(sub_str)
ans = 1
break
if ans==0:
substr_list1.append(sub_str)
print(substr_list1)
print(substr_list2)
if(len(substr_list1)>len(substr_list2)):
print("Stuart {}".format(len(substr_list1)))
elif (len(substr_list1)<len(substr_list2)):
print("Kevin {}".format(len(substr_list2)))
else:
print("Draw")
|
# -*- coding: utf-8 -*-
#Reading Oauth file from the ../auth/in_auth file
from linkedin import linkedin
import json
credentials='../auth/In_auth'#add your authentication data
f=open(credentials,'r')
keys=f.readlines()
f.close()
#Read credentials keys
CONSUMER_KEY=keys[0].strip()
CONSUMER_SECRET=keys[1].strip()
USER_TOKEN=keys[2].strip()
USER_SECRET=keys[3].strip()
RETURN_URL=''
#authorizaton object
auth = linkedin.LinkedInDeveloperAuthentication(CONSUMER_KEY, CONSUMER_SECRET,USER_TOKEN, USER_SECRET, RETURN_URL,permissions=linkedin.PERMISSIONS.enums.values())
app=linkedin.LinkedInApplication(auth)
app.get_profile()
# for Oauth2 print auth.authorization_url
#store data in file
connections=app.get_connections()
connections_data='../Dev.0.0/data/In_raw'
f2=open(connections_data,'w')
f2.write(json.dumps(connections,indent=1))
f2.close()
#have the connections info in a JSON, use prettytable to show name,location
from prettytable import PrettyTable # pip install prettytable
pt = PrettyTable(field_names=['Name', 'Location'])
pt.align = 'l'
[ pt.add_row((c['firstName'] + ' ' + c['lastName'], c['location']['name']))
for c in connections['values']
if c.has_key('location')]
print pt
print connections[0].keys
pretty_data='../Dev.0.0/data/pretty_In'
f3=open(pretty_data,'w')
f3.write('Connections of:' + str(app.get_profile()['firstName'])+'\n\n')
f3.write(str(pt))
f3.close()
|
from tweets import *
import os
import unittest
class Tests(unittest.TestCase):
def test_not_a_file(self):
self.assertRaises(AssertionError, tweets_analysis, '')
self.assertRaises(AssertionError, tweets_analysis, 999)
self.assertRaises(AssertionError, tweets_analysis, 'aaa')
def test_not_json_file(self):
filename = 'temp_test_not_json.csv'
with open(filename, 'w') as f:
f.write('bla bla bla')
self.assertRaises(Exception, tweets_analysis, filename)
def test_json_contains_created_at_and_tag(self):
filename = 'temp_test_wrong_json_format.json'
with open(filename, 'w') as f:
f.write("""[{
"text": "bla",
"retweeted": false,
"tag": "#ai",
"retweet_count": 0,
"id": 1
}]""")
self.assertRaises(AssertionError, tweets_analysis, filename)
def test_tag_is_list_of_strings(self):
self.assertRaises(AssertionError, tweets_analysis, 'tweets.json', tags='#ai')
def test_unknown_tag(self):
self.assertRaises(Exception, tweets_analysis, 'tweets.json', tags=['#robot'])
def test_bin_frequency_format(self):
self.assertRaises(AssertionError, tweets_analysis, 'tweets.json', bin_frequency='bla')
self.assertRaises(AssertionError, tweets_analysis, 'tweets.json', bin_frequency='60m')
self.assertRaises(AssertionError, tweets_analysis, 'tweets.json', bin_frequency='20.5s')
self.assertRaises(AssertionError, tweets_analysis, 'tweets.json', bin_frequency='t20s')
def test_stat_sign_format(self):
self.assertRaises(AssertionError, tweets_analysis, 'tweets.json', stat_sign=-0.1)
self.assertRaises(AssertionError, tweets_analysis, 'tweets.json', stat_sign=1.1)
self.assertRaises(AssertionError, tweets_analysis, 'tweets.json', stat_sign='bla')
def test_print_corr_format(self):
self.assertRaises(AssertionError, tweets_analysis, 'tweets.json', print_corr='bla')
self.assertRaises(AssertionError, tweets_analysis, 'tweets.json', print_corr=1)
self.assertRaises(AssertionError, tweets_analysis, 'tweets.json', print_corr=0.1)
def test_enough_data_correlation(self):
self.assertRaises(AssertionError, tweets_analysis, 'tweets.json', tags=['#ai'])
def test_zzz_remove_temp_files(self):
"""
Removes the temporary files that were created for testing.
The name contains "zzz" because the function are executed in alphabetical order
and this one should be the last.
"""
for f in os.listdir():
if 'temp_test' in f:
os.remove(f)
if __name__ == '__main__':
unittest.main() |
# https://www.youtube.com/watch?v=uWvb3QzA48c&list=PLsk-HSGFjnaH5yghzu7PcOzm9NhsW0Urw&index=18
import pygame
import random
import os
#CONSTANTS - GAME
WIDTH = 600
HEIGHT = 300
FPS = 30
GROUND = HEIGHT - 30
SLOW = 3
FAST = 8
#CONSTANTS - PHYSICS
PLAYER_ACC = 0.9
PLAYER_FRICTION = -0.12
PLAYER_GRAV = 0.9
vec = pygame.math.Vector2
#DEFINE COLORS
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0 , 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
ALEXYELLOW = (235, 194, 12)
#ASSET FOLDERS
game_folder = os.path.dirname(__file__)
img_folder = os.path.join(game_folder, "img")
#DRAW TEXT
font_name = pygame.font.match_font("arial")
def draw_text(screen, text, size, x, y):
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, BLACK)
text_rect = text_surface.get_rect()
text_rect.topleft = (x, y)
screen.blit(text_surface, text_rect)
#PLAYER CLASS
class Player(pygame.sprite.Sprite):
def __init__(self, x, y, platforms):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((25, 25))
self.image.fill(BLACK)
self.rect = self.image.get_rect()
self.pos = vec(10, GROUND - 60)
self.vel = vec(0, 0)
self.acc = vec(0, 0)
self.can_jump = True
def update(self):
self.acc = vec(0, PLAYER_GRAV)
keystate = pygame.key.get_pressed()
#ARROW KEY CONTROLS
if keystate[pygame.K_UP]:
self.rect.y += -5
if keystate[pygame.K_DOWN]:
self.rect.y += 5
if keystate[pygame.K_RIGHT]:
self.acc.x = PLAYER_ACC
if keystate[pygame.K_LEFT]:
self.acc.x = -PLAYER_ACC
if self.vel.y == 0 and keystate[pygame.K_SPACE]:
self.vel.y = -20
#APPLY FRICTION IN THE X DIRECTION
self.acc.x += self.vel.x * PLAYER_FRICTION
#EQUATIONS OF MOTION
self.vel += self.acc
self.pos += self.vel + 0.5 * self.acc
#WRAP AROUND THE SIDES OF THE SCREEN
if self.pos.x > WIDTH:
self.pos.x = 0
if self.pos.x < 0:
self.pos.x = WIDTH
#SET THE NEW PLAYER POSITION BASED ON ABOVE
self.rect.midbottom = self.pos
#SIMULATE THE GROUND
if self.pos.y > GROUND:
self.pos.y = GROUND + 1
self.vel.y = 0
#HITS PLATFORM
hits = pygame.sprite.spritecollide(self, platforms, False)
if hits:
if self.rect.top > hits[0].rect.top: #jumping from underneath
self.pos.y = hits[0].rect.bottom + 25 + 1
self.vel.y = 0
else:
self.pos.y = hits[0].rect.top + 1
self.vel.y = 0
def getStats(self):
text = "Player\n" + "X: " + str(int(self.pos.x)) + ", Y: " + str(int(self.pos.y)) + "\n" + "ACC" + str(int(self.acc.x)) + " , " + str(int(self.acc.y))
return text
#PLATFORM CLASS
class Platform(pygame.sprite.Sprite):
def __init__(self, x, y, s):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((100, 25))
self.image.fill(GREEN)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.speed = s
def update(self):
self.rect.x += -self.speed
if self.rect.right < 0:
#self.rect.left = WIDTH
self.kill()
#GROUND CLASS
class Ground(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((600, 30))
self.image.fill(GREEN)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def update(self):
pass
#INITIALIZE VARIABLES
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Platformer")
clock = pygame.time.Clock()
#SPRITE GROUPS
#platform = Platform(10, GROUND - 60)
platforms = pygame.sprite.Group()
#platforms.add(platform)
ground = Ground(0, 270)
player = Player(10, 10, platforms)
all_sprites = pygame.sprite.Group()
all_sprites.add(player)
#all_sprites.add(platform)
all_sprites.add(ground)
# GAME LOOP:
# Process Events
# Update
# Draw
running = True
while running:
clock.tick(FPS)
#PROCESS EVENTS
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
#SPAWN PLATFORMS
while len(platforms) < 3:
buffer = 20
pX = random.randrange(WIDTH, WIDTH + 100)
pY = random.randrange(30, GROUND - 60)
pS = random.randrange(SLOW, FAST)
print(pX, pY)
p = Platform(pX, pY, pS)
platforms.add(p)
all_sprites.add(p)
#UPDATE
all_sprites.update()
# DRAW
screen.fill(ALEXYELLOW)
draw_text(screen, "PLATFORMER", 24, 10, 10)
text = player.getStats()
draw_text(screen, text, 12, 10, 40)
all_sprites.draw(screen)
# FLIP AFTER DRAWING
pygame.display.flip()
pygame.quit()
|
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
## @params: [JOB_NAME]
args = getResolvedOptions(sys.argv, ['JOB_NAME'])
print "*"*20
print args
print "*"*20
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
def print_df(data_frame):
try:
print "*"*20
data_frame.printSchema()
print "*"*20
print "Single record:"
data_frame.show(1)
except:
print "Unexpected error:", sys.exc_info()[0]
pass
## @type: DataSource
## @args: [database = "traffic", table_name = "backfill", push_down_predicate = my_partition_predicate, transformation_ctx = "datasource0"]
## @return: datasource0
## @inputs: []
my_partition_predicate = "(year=='2017' and month=='06' and day=='29' and hour == '11')"
datasource0 = glueContext.create_dynamic_frame.from_catalog(database = "traffic", table_name = "backfill", push_down_predicate = my_partition_predicate, transformation_ctx = "datasource0")
print_df(datasource0)
print "*"*20
my_partition_predicate = "(year=='2017' and month=='12' and day=='13' and hour == '01')"
datasource0 = glueContext.create_dynamic_frame.from_catalog(database = "traffic", table_name = "backfill", push_down_predicate = my_partition_predicate, transformation_ctx = "datasource0")
print_df(datasource0)
print "*"*20
|
def find(a, b):
for x in a:
if x == b:
return a.index(x) #インデックス番号
return -1
i = input()
values = eval(i)
index = find(values, 100)
if index == -1:
print("100は一つも含まれない")
else:
print(f"100は{index}番目に含まれる")
#
|
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import logging
from dataclasses import dataclass, field
from enum import Enum
from typing import Iterable, Mapping
from pants.engine.engine_aware import SideEffecting
from pants.engine.fs import EMPTY_DIGEST, Digest, FileDigest
from pants.engine.internals.native_engine import ( # noqa: F401
ProcessExecutionEnvironment as ProcessExecutionEnvironment,
)
from pants.engine.internals.session import RunId
from pants.engine.platform import Platform
from pants.engine.rules import collect_rules, rule
from pants.option.global_options import KeepSandboxes
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class ProductDescription:
value: str
class ProcessCacheScope(Enum):
# Cached in all locations, regardless of success or failure.
ALWAYS = "always"
# Cached in all locations, but only if the process exits successfully.
SUCCESSFUL = "successful"
# Cached only in memory (i.e. memoized in pantsd), but never persistently, regardless of
# success vs. failure.
PER_RESTART_ALWAYS = "per_restart_always"
# Cached only in memory (i.e. memoized in pantsd), but never persistently, and only if
# successful.
PER_RESTART_SUCCESSFUL = "per_restart_successful"
# Will run once per Session, i.e. once per run of Pants. This happens because the engine
# de-duplicates identical work; the process is neither memoized in memory nor cached to disk.
PER_SESSION = "per_session"
@dataclass(frozen=True)
class Process:
argv: tuple[str, ...]
description: str = dataclasses.field(compare=False)
level: LogLevel
input_digest: Digest
immutable_input_digests: FrozenDict[str, Digest]
use_nailgun: tuple[str, ...]
working_directory: str | None
env: FrozenDict[str, str]
append_only_caches: FrozenDict[str, str]
output_files: tuple[str, ...]
output_directories: tuple[str, ...]
timeout_seconds: int | float
jdk_home: str | None
execution_slot_variable: str | None
concurrency_available: int
cache_scope: ProcessCacheScope
remote_cache_speculation_delay_millis: int
def __init__(
self,
argv: Iterable[str],
*,
description: str,
level: LogLevel = LogLevel.INFO,
input_digest: Digest = EMPTY_DIGEST,
immutable_input_digests: Mapping[str, Digest] | None = None,
use_nailgun: Iterable[str] = (),
working_directory: str | None = None,
env: Mapping[str, str] | None = None,
append_only_caches: Mapping[str, str] | None = None,
output_files: Iterable[str] | None = None,
output_directories: Iterable[str] | None = None,
timeout_seconds: int | float | None = None,
jdk_home: str | None = None,
execution_slot_variable: str | None = None,
concurrency_available: int = 0,
cache_scope: ProcessCacheScope = ProcessCacheScope.SUCCESSFUL,
remote_cache_speculation_delay_millis: int = 0,
) -> None:
"""Request to run a subprocess, similar to subprocess.Popen.
This process will be hermetic, meaning that it cannot access files and environment variables
that are not explicitly populated. For example, $PATH will not be defined by default, unless
populated through the `env` parameter.
Usually, you will want to provide input files/directories via the parameter `input_digest`.
The process will then be able to access these paths through relative paths. If you want to
give multiple input digests, first merge them with `await Get(Digest, MergeDigests)`.
Often, you will want to capture the files/directories created in the process. To do this,
you can either set `output_files` or `output_directories`. The specified paths should be
specified relative to the `working_directory`, if any, and will then be used to populate
`output_digest` on the `ProcessResult`. If you want to split up this output digest into
multiple digests, use `await Get(Digest, DigestSubset)` on the `output_digest`.
To actually run the process, use `await Get(ProcessResult, Process)` or
`await Get(FallibleProcessResult, Process)`.
Example:
result = await Get(
ProcessResult, Process(["/bin/echo", "hello world"], description="demo")
)
assert result.stdout == b"hello world"
"""
if isinstance(argv, str):
raise ValueError("argv must be a sequence of strings, but was a single string.")
object.__setattr__(self, "argv", tuple(argv))
object.__setattr__(self, "description", description)
object.__setattr__(self, "level", level)
object.__setattr__(self, "input_digest", input_digest)
object.__setattr__(
self, "immutable_input_digests", FrozenDict(immutable_input_digests or {})
)
object.__setattr__(self, "use_nailgun", tuple(use_nailgun))
object.__setattr__(self, "working_directory", working_directory)
object.__setattr__(self, "env", FrozenDict(env or {}))
object.__setattr__(self, "append_only_caches", FrozenDict(append_only_caches or {}))
object.__setattr__(self, "output_files", tuple(output_files or ()))
object.__setattr__(self, "output_directories", tuple(output_directories or ()))
# NB: A negative or None time value is normalized to -1 to ease the transfer to Rust.
object.__setattr__(
self,
"timeout_seconds",
timeout_seconds if timeout_seconds and timeout_seconds > 0 else -1,
)
object.__setattr__(self, "jdk_home", jdk_home)
object.__setattr__(self, "execution_slot_variable", execution_slot_variable)
object.__setattr__(self, "concurrency_available", concurrency_available)
object.__setattr__(self, "cache_scope", cache_scope)
object.__setattr__(
self, "remote_cache_speculation_delay_millis", remote_cache_speculation_delay_millis
)
@dataclass(frozen=True)
class ProcessResult:
"""Result of executing a process which should not fail.
If the process has a non-zero exit code, this will raise an exception, unlike
FallibleProcessResult.
"""
stdout: bytes
stdout_digest: FileDigest
stderr: bytes
stderr_digest: FileDigest
output_digest: Digest
metadata: ProcessResultMetadata = field(compare=False, hash=False)
@property
def platform(self) -> Platform:
return self.metadata.platform
@dataclass(frozen=True)
class FallibleProcessResult:
"""Result of executing a process which might fail.
If the process has a non-zero exit code, this will not raise an exception, unlike ProcessResult.
"""
stdout: bytes
stdout_digest: FileDigest
stderr: bytes
stderr_digest: FileDigest
exit_code: int
output_digest: Digest
metadata: ProcessResultMetadata = field(compare=False, hash=False)
@property
def platform(self) -> Platform:
return self.metadata.platform
@dataclass(frozen=True)
class ProcessResultMetadata:
"""Metadata for a ProcessResult, which is not included in its definition of equality."""
class Source(Enum):
RAN = "ran"
HIT_LOCALLY = "hit_locally"
HIT_REMOTELY = "hit_remotely"
MEMOIZED = "memoized"
# The execution time of the process, in milliseconds, or None if it could not be captured
# (since remote execution does not guarantee its availability).
total_elapsed_ms: int | None
# The environment that the process ran in (or would have run in, if it was not a cache hit).
execution_environment: ProcessExecutionEnvironment
# Whether the ProcessResult (when it was created in the attached run_id) came from the local
# or remote cache, or ran locally or remotely. See the `self.source` method.
_source: str
# The run_id in which a ProcessResult was created. See the `self.source` method.
source_run_id: int
@property
def platform(self) -> Platform:
return Platform[self.execution_environment.platform]
def source(self, current_run_id: RunId) -> Source:
"""Given the current run_id, return the calculated "source" of the ProcessResult.
If a ProcessResult is consumed in any run_id other than the one it was created in, the its
source implicitly becomes memoization, since the result was re-used in a new run without
being recreated.
"""
return (
self.Source(self._source)
if self.source_run_id == current_run_id
else self.Source.MEMOIZED
)
class ProcessExecutionFailure(Exception):
"""Used to denote that a process exited, but was unsuccessful in some way.
For example, exiting with a non-zero code.
"""
def __init__(
self,
exit_code: int,
stdout: bytes,
stderr: bytes,
process_description: str,
*,
keep_sandboxes: KeepSandboxes,
) -> None:
# These are intentionally "public" members.
self.exit_code = exit_code
self.stdout = stdout
self.stderr = stderr
def try_decode(content: bytes) -> str:
try:
return content.decode()
except ValueError:
content_repr = repr(stdout)
return f"{content_repr[:256]}..." if len(content_repr) > 256 else content_repr
# NB: We don't use dedent on a single format string here because it would attempt to
# interpret the stdio content.
err_strings = [
f"Process '{process_description}' failed with exit code {exit_code}.",
"stdout:",
try_decode(stdout),
"stderr:",
try_decode(stderr),
]
if keep_sandboxes == KeepSandboxes.never:
err_strings.append(
"\n\nUse `--keep-sandboxes=on_failure` to preserve the process chroot for inspection."
)
super().__init__("\n".join(err_strings))
@rule
def get_multi_platform_request_description(req: Process) -> ProductDescription:
return ProductDescription(req.description)
@rule
def fallible_to_exec_result_or_raise(
fallible_result: FallibleProcessResult,
description: ProductDescription,
keep_sandboxes: KeepSandboxes,
) -> ProcessResult:
"""Converts a FallibleProcessResult to a ProcessResult or raises an error."""
if fallible_result.exit_code == 0:
return ProcessResult(
stdout=fallible_result.stdout,
stdout_digest=fallible_result.stdout_digest,
stderr=fallible_result.stderr,
stderr_digest=fallible_result.stderr_digest,
output_digest=fallible_result.output_digest,
metadata=fallible_result.metadata,
)
raise ProcessExecutionFailure(
fallible_result.exit_code,
fallible_result.stdout,
fallible_result.stderr,
description.value,
keep_sandboxes=keep_sandboxes,
)
@dataclass(frozen=True)
class InteractiveProcessResult:
exit_code: int
@dataclass(frozen=True)
class InteractiveProcess(SideEffecting):
# NB: Although InteractiveProcess supports only some of the features of Process, we construct an
# underlying Process instance to improve code reuse.
process: Process
run_in_workspace: bool
forward_signals_to_process: bool
restartable: bool
keep_sandboxes: KeepSandboxes
def __init__(
self,
argv: Iterable[str],
*,
env: Mapping[str, str] | None = None,
input_digest: Digest = EMPTY_DIGEST,
run_in_workspace: bool = False,
forward_signals_to_process: bool = True,
restartable: bool = False,
append_only_caches: Mapping[str, str] | None = None,
immutable_input_digests: Mapping[str, Digest] | None = None,
keep_sandboxes: KeepSandboxes = KeepSandboxes.never,
) -> None:
"""Request to run a subprocess in the foreground, similar to subprocess.run().
Unlike `Process`, the result will not be cached.
To run the process, use `await Effect(InteractiveProcessResult, InteractiveProcess(..))`
in a `@goal_rule`.
`forward_signals_to_process` controls whether pants will allow a SIGINT signal
sent to a process by hitting Ctrl-C in the terminal to actually reach the process,
or capture that signal itself, blocking it from the process.
"""
object.__setattr__(
self,
"process",
Process(
argv,
description="Interactive process",
env=env,
input_digest=input_digest,
append_only_caches=append_only_caches,
immutable_input_digests=immutable_input_digests,
),
)
object.__setattr__(self, "run_in_workspace", run_in_workspace)
object.__setattr__(self, "forward_signals_to_process", forward_signals_to_process)
object.__setattr__(self, "restartable", restartable)
object.__setattr__(self, "keep_sandboxes", keep_sandboxes)
@classmethod
def from_process(
cls,
process: Process,
*,
forward_signals_to_process: bool = True,
restartable: bool = False,
) -> InteractiveProcess:
return InteractiveProcess(
argv=process.argv,
env=process.env,
input_digest=process.input_digest,
forward_signals_to_process=forward_signals_to_process,
restartable=restartable,
append_only_caches=process.append_only_caches,
immutable_input_digests=process.immutable_input_digests,
)
def rules():
return collect_rules()
|
# # 1. 数据类型 (类型转换 .每个类型常用方法, )
# #python的数据类型有1、字符串 2、布尔类型 3、整数 4、浮点数
# # 5、 数字 6、列表 7、元组 8、字典 9、日期
#
# #控制字符串循环(* 在数字中表示乘号,在字符串中表示重复多少次)
# e = '我爱中国 ,'*8
# print(e)
#
# #拆分字符串
# for g in 'I love china':
# print(g)
#
# #字符串---用单引号或双引号括起来
# str = 'who are you ?'
# str2 = 'i am String'
# print(str+' '+str2)
#
# #还有一种三引号,在三引号中可以自由使用单引号或双引号
# str3 = '''我说:“很高兴认识你”'''
# print(str3)
# #除了使用 ''' 来区分单引号和双引号外,还可以用转移字符 \ 来做区分
# print('我说:\"很高兴认识你\"')
# #转义字符\可以转义很多字符,比如\n表示换行,\t表示制表符,字符\本身也要转义,所以\\表示的字符就是\
# #使用r表示不让 \ 发生转义
# print('转义前:'+'i say \"my \name is jhon.\n\t what\'s your name? Milke \\ Allen\"')
# print('转义后:'+r'i say \"my \name is jhon.\t what\'s your name? Milke \\ Allen\"')
#
# #ord()函数获取字符的整数表示,chr()函数把编码转换为对应的字符
# #'%d'% X 表示将整型转换为字符串
# print('A的ASCII编码是: '+'%d' %ord('A'))
# print('中的ASCII编码是: '+ '%d' %ord('中'))
# print('66对应的字符是: '+ chr(66))
# print('25991对应的字符是: '+chr(25991))
# #r如果知道中文的ASCII编码,可以将其转化为16进制然后在打印出来
# print('该16进制的对应的字符是: '+'\u4e2d\u6587')
#
# #十进制转各种进制 : bin()二进制 oct()八进制 hex()十六进制
# #将输入的中文转化为各种进制字符
# #获取用户输入的字符
# useWrord = str(input("请输入需要转化的文字: "))
# #循环字符串进行转码
# list2 = []
# list8 = []
# list16 = []
# for i in useWrord:
# strCode = ord(i)
# list2.append(bin(strCode))
# list8.append(oct(strCode))
# list16.append(hex(strCode))
# print('输入内容的转换为二进制是: ')
# print(list2)
# print('输入内容的转换为八进制是: ')
# print(list8)
# print('输入内容的转换为十六进制是: ')
# print(list16)
#
# #将字符串转换为一个整型
# x = '1'
# y = int(x)
# print(y)
#
# #整型转换为字符串
# a = 1
# b = '%d' %a
# print(a)
#
# # #将字符串转换为一个浮点型
# str4 = '4.1'
# c = float(str4)
# print(c)
#
# # #将浮点型转换为字符串
# d = 4.1021
# str5 = '%f'%d
# print((str5))
#
# #数字的基本应用
# print(2+1)
# print(2*2)
# print(16/2)
# print(15%6)
# print(2**5)
#
# # #获取长度
# print(len(str(2**1000000)))
#
# # 2 字符操作(截取 拼接,替换 等)
# testSente1 = '我 是中国 人,我爱中国!'
# testSente2 = '''i'm Chinese,i love China!'''
# testSente3 = 'HelloWord'
# #首字母大写
# print(testSente2.capitalize())
# #首字母小写
# print(testSente3.casefold())
# #字符串宽度填充,使用原有字符串+填充字符构成指定长度的新的字符串,数字为新生字符串的长度,注意python对大小写敏感
# #默认以空格返回
# print(testSente3.center(15))
# #以定义的填充物返回
# print(testSente3.center(16,'*'))
# #统计某个字符在字符串中出现的次数,或在字符串指定区间内完成上述操作
# print(testSente2.count('i'))
# print(testSente2.count('i',0,8))
# #对字符串进行编码操作
# print(testSente1.encode('gbk'))
# #判断字符串是不是以某个字符结束的,可以加个范围对字符串进行截取后判断
# print(testSente3.endswith('d'))
# print(testSente3.endswith('d',2,5))
# #判断字符串是不是以某个字符开始的
# print(testSente3.startswith('H'))
# #将制表符'\t'转换成指定宽度的tab键分割,默认tabsize=8
# li = 'sw\tht'
# print(li.expandtabs(16))
# #在字符串中查找指定字符串,找不到时返回-1,同样可以加上范围
# print(testSente2.find('C',0,3))
# #格式化输出的字符串
# li = 'I\'m {},{}' #两个'{}'是占位符
# print(li.format('swht','欢迎来中国'))
# #判断字符串中是否包含所找的字符串
# print(testSente2.__contains__('Chi'))
# # 在字符串中查找指定的字符串,找不到时直接报错,找到时返回位置
# print(testSente3.index('W'))
# #字符串连接
# #把join里面的字符串拆分后和原来的字符串组合成新的
# print(testSente3.join('*********'))
# print(''.join(testSente2))
# str_list = ['love', 'Python']
# print('通过join形式连接 :' + ''.join(str_list) + '\n')
# #通过 + 来连接
# print(testSente1+testSente3)
# #通过 , 来连接
# print(testSente3,testSente2)
# #直接连接
# print('直接连接这种方式' '好像只能用引号')
# #格式化连接
# print('通过格式化形式连接 :' + '%s %s' % ('love', 'Python') + '\n')
# #检查判断字符串是否包含字母数字字符
# print(testSente1.isalnum())
# #检测字符串是否只由字母组成
# print(testSente3.isalpha())
# #检测字符串是否只由数字组成
# print(testSente3.isdecimal())
# #检测字符串是否是字母开头
# #为什么这里打印false
# print(testSente2)
# print(testSente2.isidentifier())
# #检测字符串是否由数字组成
# print(testSente3.isnumeric())
# #判断字符串中所有字符是否都属于可见字符,及时判断字符串中是不是又转义符
# testSente4 = '\t这里是有转义符的'
# print(testSente4.isprintable())
# #判断字符串是不是为空字符串
# print(testSente3.isspace())
# #判断字符串是否适合当作标题(其实就是每个单词首字母大写),不允许首字母外其他地方有大写
# testSente5 = 'Aaa'
# print(testSente5.istitle())
# #判断字符串中所有字母字符是否都是大写字母
# print(testSente3.isupper())
# #根据指定的分隔符将字符串进行分割。
# print(testSente2.partition(','))
# #把字符串中的 old(旧字符串) 替换成 new(新字符串),如果指定第三个参数max,则替换不超过 max 次。
# str = "this is string example....wow!!! this is really string"
# str1= str.replace("is", "was")
# print('原来的句子: '+str)
# print('转换后的句子: '+str1)
# str1= str.replace("is", "was",3)
# print('加了最大次数转换后的句子: '+str1) #注意单替换时遇到字符串中有替换字符则被旧的字符替换
# #字符串分割,默认是空格
# print(testSente2.split())
# #在字符串后面增加指定的字符或字符串
# print(testSente2.__add__(' How are you'))
# #判断字符串是不是相等
# print(testSente2.__eq__(testSente1))
# #对字符串进行分割后返回一个列表
# testSente6 = '''I have a pen \r\n I have a apple
# apple pen'''
# testlist = testSente6.splitlines()
# print(testlist)
|
"""
hardwire.server
Copyright (C) 2014 Joseph P. Crabtree
Hardwire._site_init based on wsgi_echo example
https://github.com/tavendo/AutobahnPython/blob/master/examples/twisted/websocket/echo_wsgi/server.py
Copyright (C) 2012-2013 Tavendo GmbH
This module implements the Hardwire Web Interface Server.
"""
import sys, os, uuid, json
from twisted.python import log
from twisted.internet import reactor
from twisted.web.server import Site
from twisted.web.wsgi import WSGIResource
from twisted.web.static import File
try:
from autobahn.twisted.resource import WebSocketResource, \
WSGIRootResource, \
HTTPChannelHixie76Aware
except ImportError:
# Version < 0.7.0 of Autobahn did not have the 'twisted' object
from autobahn.resource import WebSocketResource, \
WSGIRootResource, \
HTTPChannelHixie76Aware
try:
from autobahn.wamp1.protocol import exportRpc, \
exportPub, \
exportSub, \
WampServerFactory, \
WampServerProtocol
except ImportError:
# Version < 0.8.0 of Autobahn did not support multiple WAMP versions
from autobahn.wamp import exportRpc, \
exportPub, \
exportSub, \
WampServerFactory, \
WampServerProtocol
import settings
from hardwire.signal import Signal
from threading import Thread
class SignalServerProtocol(WampServerProtocol):
"""
The SignalServerProtocol is an extension of the Autobahn WAMPServerProtocol
that handles the PubSub for signal distribution as well as RPC for managing
the signal server and calling methods to store and load data on the server.
"""
@exportRpc
def register(self, name):
"""Registers a new pubsub signal with the server."""
signal = Signal(name, self.peerstr)
print "Adding signal named " + signal.name + " owned by " + signal.owner
return signal.uri
def onSessionOpen(self):
self.registerForRpc(self, "/signalManager/")
# Register Custom Pub Handler for Signals and signal list
self.signal_handler = SignalHandler(self.factory.signals,\
self.factory.publish_signal,\
self.factory.broadcast_signals)
self.registerHandlerForPubSub(self.signal_handler, "/signals/")
self.registerHandlerForPubSub(self.signal_handler, "/signals")
class SignalHandler(object):
"""
SignalHandler contains the custom publish and subscribe methods for the
signals on the server.
"""
def __init__(self, signals, publish, broadcast):
self.signals = signals
self.publish = publish
self.broadcast = broadcast
@exportSub("", True)
def subscribe(self, topicUriPrefix, topicUriSuffix):
# Publish the signal or the signal list, whichever is being subscribe to
if topicUriSuffix:
reactor.callLater(0, self.publish, topicUriSuffix)
else:
reactor.callLater(0, self.broadcast)
return True
@exportPub("", True)
def signal_pub_handler(self, topicUriPrefix, topicUriSuffix, event):
"""
Updates the signal object's value when a new value is published to its
uri.
"""
name = event[u'name'].encode("ascii", "ignore")
value = event[u'value']
self.signals[name].value = value
return False
class SignalServerFactory(WampServerFactory):
"""
The SignalServerFactory is an extension of the Autobahn WampServerFactory.
Like the WampServerFactory, it creates instances of the specific server
protocol. In addition, it stores and manages persistent signal and client
state and implements methods for managing signals through client
connections.
"""
protocol = SignalServerProtocol
def __init__(self, url, debug):
WampServerFactory.__init__(self, url, debugWamp = debug)
Signal.register = self.register
Signal.unregister = self.unregister
Signal.publish = self.publish_signal_public
self.signals = {}
def broadcast_signals(self):
"""Brodcasts the signal list kept on the server factory."""
signalUris = {}
for name in self.signals:
signalUris[str(self.signals[name].uri)] = name
self.dispatch("/signals", json.dumps(signalUris))
def register(self, new_signal):
self.signals[new_signal.name] = new_signal
self.broadcast_signals()
def unregister(self, deleted_signal):
self.signals.pop(deleted_signal.name, None)
self.broadcast_signals()
def publish_signal(self, name):
signal = self.signals[name]
signalDict = {}
signalDict["name"] = name
signalDict["value"] = signal.value
self.dispatch(signal.uri, json.dumps(signalDict))
def publish_signal_public(self, name):
reactor.callFromThread(self.publish_signal, name)
class Hardwire(object):
_instance = None
_reactor_thread = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Hardwire, cls).__new__(
cls, *args, **kwargs)
return cls._instance
def __init__(self, app=None, port=None, debug=False):
if port is None:
self._port = settings.PORT
else:
self._port = port
import __main__
self._user_dir = os.path.dirname(os.path.abspath(__main__.__file__))
self._hw_dir = os.path.dirname(os.path.abspath(__file__))
self._app = app
self._factory_init(debug)
self._site_init(debug)
def _factory_init(self, debug):
url = "ws://%s:%d" % (settings.INTERFACE, self._port)
self.factory = SignalServerFactory(url, debug)
self.factory.protocol = SignalServerProtocol
self.factory.setProtocolOptions(allowHixie76 = True)
self.factory.startFactory()
def _site_init(self, debug):
# Twisted Web resource for our WAMP factory
ws_resource = WebSocketResource(self.factory)
# Write hardwire settings to JS file
with open(os.path.join(self._hw_dir, 'static', 'js', 'hw-settings.js'), 'w+') as f:
f.write('var hw_settings = {port: %d}' % self._port)
# Twisted Web resource for static assets
hw_static_resource = File(os.path.join(self._hw_dir, 'static'))
# Create root resource from either the user's WSGI app, the user's
# index.html, or the Hardwire default index.html
if self._app:
print "Using user-supplied WSGI app..."
wsgi_resource = WSGIResource(reactor, reactor.getThreadPool(), self._app)
child_resources = {'hw_static': hw_static_resource, \
'static': static_resource, \
settings.WSURI_SUFFIX: ws_resource}
root_resource = WSGIRootResource(wsgi_resource, child_resources)
else:
user_index_path = os.path.join(self._user_dir, 'index.html')
if os.path.isfile(user_index_path):
print "Using user-supplied index.html..."
index_path = self._user_dir
else:
print "Using Hardwire default index.html..."
index_path = os.path.join(self._hw_dir, 'templates')
root_resource = File(index_path)
root_resource.putChild("hw_static", hw_static_resource)
root_resource.putChild(settings.WSURI_SUFFIX, ws_resource)
if debug:
log.startLogging(sys.stdout)
site = Site(root_resource)
site.protocol = HTTPChannelHixie76Aware # needed if Hixie76 is supported
reactor.listenTCP(self._port, site)
def run(self):
# Run the Twisted reactor in a thread. This is not the "right" way to
# use Twisted, but we're trading server efficiency for server-side code
# simplicity.
self._reactor_thread = Thread(target=reactor.run, args=(False,))
try:
self._reactor_thread.start()
except (KeyboardInterrupt, SystemExit):
self.stop()
def stop(self):
reactor.callFromThread(reactor.stop)
self._reactor_thread.join()
|
import discord
from discord.ext import commands
# Inicializaciones
bot=commands.Bot(command_prefix="-")
bot.remove_command("help")
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Game(name="¡Computación! | -help"))
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandNotFound):
await ctx.send(f"```diff\n- No conozco ese comando :(\n```")
# Comandos Generales
@bot.command()
async def help(ctx):
embed=discord.Embed(colour=discord.Colour.green())
thumbnail="https://upload.wikimedia.org/wikipedia/commons/thumb/0/02/Circle-icons-computer.svg/768px-Circle-icons-computer.svg.png"
embed.set_thumbnail(url=thumbnail)
embed.set_author(name="Esta es una Lista de todos los Comandos Disponibles :")
embed.add_field(name="_-ping_", value="Muestra la Latencia del Bot al Servidor!", inline=False)
embed.add_field(name="_-mips_", value="Decodifica una Instrucción de MIPS!", inline=False)
embed.add_field(name="_-bi_", value="Convierte un número decimal a binario!", inline=False)
embed.add_field(name="_-hexa_", value="Convierte un número decimal a hexa!", inline=False)
embed.add_field(name="_-prom_", value="Calculo del Promedio de Tres Parciales!", inline=False)
embed.add_field(name="_-clear_", value="Limpia los mensajes de un canal de texto!", inline=False)
embed.set_image(url="https://i.gyazo.com/9feb38bdc5d70bdb93005b5d95c7345f.png")
await ctx.send(embed=embed)
@bot.command()
async def clear(ctx, amount : int):
limite=amount+1
await ctx.channel.purge(limit=limite)
if amount==1:
await ctx.send(f"```diff\n- ¡Se ha eliminado {amount} mensaje! :)\n```")
else:
await ctx.send(f"```diff\n- ¡Se han eliminado {amount} mensajes! :)\n```")
@clear.error
async def clear_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(f"```diff\n- Error de Sintaxis!\n\n```")
await ctx.send(f"```\nEjemplo :\n -clear 10 \n Este comando eliminaria 10 mensajes!\n```")
@bot.command()
async def ping(ctx):
await ctx.send(f"```\n¡Tengo una latencia de {round(bot.latency * 1000)}ms! :)\n```")
# Calificaciones
@bot.command()
async def prom(ctx,x:float,y:float,z:float):
promedio=[]
promedio.append(x)
promedio.append(y)
promedio.append(z)
promediofinal=sum(promedio)/len(promedio)
promediofinal=str(round(promediofinal,2))
if x>100 or y>100 or z>100:
await ctx.send(f"```diff\n- ¡Las calificaciones de los parciales no pueden ser mayores a cien!\n```")
elif x<0 or y<0 or z<0:
await ctx.send(f"```diff\n- ¡Las calificaciones de los parciales no pueden ser menores a cero!\n```")
else:
await ctx.send(f"```\nConversión de Promedio de Tres Parciales :\n\n -Alumno : {ctx.author.name}\n\n -Parcial #1 = {x}\n -Parcial #2 = {y}\n -Parcial #3 = {z}\n\n -Promedio Final = {promediofinal}\n\n```")
await ctx.send(f"```\n ¡Conversión Exitosa! :)\n```")
@prom.error
async def prom(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(f"```diff\n- Error de Sintaxis!\n\n```")
await ctx.send(f"```\nEjemplo :\n -prom 100 90 85\n Este comando calcularía un promedio de 91.67!\n```")
# Arquitectura de Computadoras
@bot.command()
async def bi(ctx,zeros:int,decimal:int):
binario=bin(int(decimal))[2:]
binario=binario.zfill(zeros)
await ctx.send(f"```\nConversión de Decimal a Binario :\n\n -Número Original : {decimal}\n -Número en Binario : {binario}\n -Representado en {zeros} bits\n\n```")
await ctx.send(f"```\n ¡Conversión Exitosa! :)\n```")
@bi.error
async def bi(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(f"```diff\n- Error de Sintaxis!\n\n```")
await ctx.send(f"```\nEjemplo :\n -bi 8 10\n Este comando convertiria el 10 a binario en formato de 8 bits!\n```")
@bot.command()
async def hexa(ctx,num:int):
hexa=hex(num)
await ctx.send(f"```\nConversión de Decimal a Hexadecimal :\n\n -Número Original : {num}\n -Número en Hexadecimal : {hexa}\n\n```")
await ctx.send(f"```\n ¡Conversión Exitosa! :)\n```")
@hexa.error
async def hexa(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(f"```diff\n- Error de Sintaxis!\n\n```")
await ctx.send(f"```\nEjemplo :\n -hexa 10\n Este comando convertiria el 10 a Hexadecimal!\n```")
@bot.command()
async def mips(ctx,instruction:str):
if len(instruction)==32:
if instruction[0:6] in ["000000"]:
Tipo="R"
if instruction[25:31]=="100000":
funct="ADD"
elif instruction[25:31]=="100010":
funct="SUB"
elif instruction[25:31]=="100100":
funct="AND"
elif instruction[25:31]=="100101":
funct="OR"
elif instruction[25:31]=="101010":
funct="SLT"
elif instruction[25:31]=="100110":
funct="XOR"
elif instruction[25:31]=="011010":
funct="DIV"
elif instruction[25:31]=="011000":
funct="MULT"
elif instruction[25:31]=="010000":
funct="SHIFTL"
else:
funct=f"Error - No existe función para : {instruction[25:31]}"
if instruction[20:25]!="00000":
shamt=f"Error - El Shamt es diferente de 00000"
else:
shamt="00000"
await ctx.send(f"```\nDetector de Instrucciones MIPS :\n\n -Instrucción : {instruction}\n -Tipo : {Tipo}\n -OpCode = {instruction[0:6]}\n -RS = {instruction[6:11]}\n -RT = {instruction[11:16]}\n -RD = {instruction[16:21]}\n -Shamt = {shamt}\n -Funct = {funct}\n```")
await ctx.send(f"```\n ¡Detección Exitosa!\n```")
elif instruction[0:6] in ["001000"]: #addi
Tipo="I"
funct="ADDI"
await ctx.send(f"```\nDetector de Instrucciones MIPS :\n\n -Instrucción : {instruction}\n -Tipo : {Tipo}\n -OpCode = {instruction[0:6]}\n -RS = {instruction[6:11]}\n -RT = {instruction[11:16]}\n -Offset = {instruction[16:32]}\n -Funct = {funct}\n```")
elif instruction[0:6] in ["001100"]: #andi
Tipo="I"
funct="ANDI"
await ctx.send(f"```\nDetector de Instrucciones MIPS :\n\n -Instrucción : {instruction}\n -Tipo : {Tipo}\n -OpCode = {instruction[0:6]}\n -RS = {instruction[6:11]}\n -RT = {instruction[11:16]}\n -Offset = {instruction[16:32]}\n -Funct = {funct}\n```")
elif instruction[0:6] in ["001101"]: #ori
Tipo="I"
funct="ORI"
await ctx.send(f"```\nDetector de Instrucciones MIPS :\n\n -Instrucción : {instruction}\n -Tipo : {Tipo}\n -OpCode = {instruction[0:6]}\n -RS = {instruction[6:11]}\n -RT = {instruction[11:16]}\n -Offset = {instruction[16:32]}\n -Funct = {funct}\n```")
elif instruction[0:6] in ["001110"]: #xori
Tipo="I"
funct="XORI"
await ctx.send(f"```\nDetector de Instrucciones MIPS :\n\n -Instrucción : {instruction}\n -Tipo : {Tipo}\n -OpCode = {instruction[0:6]}\n -RS = {instruction[6:11]}\n -RT = {instruction[11:16]}\n -Offset = {instruction[16:32]}\n -Funct = {funct}\n```")
elif instruction[0:6] in ["001010"]: #slti
Tipo="I"
funct="SLTI"
await ctx.send(f"```\nDetector de Instrucciones MIPS :\n\n -Instrucción : {instruction}\n -Tipo : {Tipo}\n -OpCode = {instruction[0:6]}\n -RS = {instruction[6:11]}\n -RT = {instruction[11:16]}\n -Offset = {instruction[16:32]}\n -Funct = {funct}\n```")
elif instruction[0:6] in ["000100"]: #beq
Tipo="I"
funct="BEQ"
await ctx.send(f"```\nDetector de Instrucciones MIPS :\n\n -Instrucción : {instruction}\n -Tipo : {Tipo}\n -OpCode = {instruction[0:6]}\n -RS = {instruction[6:11]}\n -RT = {instruction[11:16]}\n -Offset = {instruction[16:32]}\n -Funct = {funct}\n```")
elif instruction[0:6] in ["110001"]: #lw
Tipo="I"
funct="LW"
await ctx.send(f"```\nDetector de Instrucciones MIPS :\n\n -Instrucción : {instruction}\n -Tipo : {Tipo}\n -OpCode = {instruction[0:6]}\n -RS = {instruction[6:11]}\n -RT = {instruction[11:16]}\n -Offset = {instruction[16:32]}\n -Funct = {funct}\n```")
elif instruction[0:6] in ["101011"]: #sw
Tipo="I"
funct="SW"
await ctx.send(f"```\nDetector de Instrucciones MIPS :\n\n -Instrucción : {instruction}\n -Tipo : {Tipo}\n -OpCode = {instruction[0:6]}\n -RS = {instruction[6:11]}\n -RT = {instruction[11:16]}\n -Offset = {instruction[16:32]}\n -Funct = {funct}\n```")
else:
await ctx.send(f"```diff\n- Error de Sintaxis!\n\n```")
await ctx.send(f"```diff\n- No existe un OpCode registrado para {instruction[0:6]}\n\n```")
elif len(instruction)<32:
await ctx.send(f"```diff\n- Error de Sintaxis!\n\n```")
await ctx.send(f"```diff\n- La Instrucción contiene menos de 32 bits!\n\n```")
elif len(instruction)>32:
await ctx.send(f"```diff\n- Error de Sintaxis!\n\n```")
await ctx.send(f"```diff\n- La Instrucción contiene más de 32 bits!\n\n```")
else:
await ctx.send(f"```diff\n- Error de Sintaxis!\n\n```")
@mips.error
async def mips(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(f"```diff\n- Error de Sintaxis!\n\n```")
await ctx.send(f"```\nEjemplo :\n -mips 00101000000010010000000000001010\n Este comando daría como output una Instrucción de Tipo R!\n```")
bot.run("") |
a = input()
b = input()
c = input()
d = {
'a':a,
'b':b,
'c':c
}
n = 'a'
while(True):
if d[n] == '':
break
nx = d[n][0]
d[n] = d[n][1:]
n = nx
print(n.upper()) |
def pattern(n):
return '\n'.join(str(x) * x for x in range(2, n + 1, 2))
'''
##Task:
You have to write a function pattern which creates the following pattern
upto n number of rows.
If the Argument is 0 or a Negative Integer then it should
return "" i.e. empty string.
If any odd number is passed as argument then the pattern should last
upto the largest even number which is smaller than the passed odd number.
If the argument is 1 then also it should return "".
##Examples:
pattern(8):
22
4444
666666
88888888
pattern(5):
22
4444
Note: There are no spaces in the pattern
Hint: Use \n in string to jump to next line
'''
|
"""
The implementation of hidden markov model using tensorflow for a sequence of discrete observations.
This implementation is simpler than the original version.
The tutorial of the original version can be found here: https://web.stanford.edu/~jurafsky/slp3/A.pdf
"""
import numpy as np
import tensorflow as tf
from graphviz import Digraph
class HMMD_TF:
def __init__(self, num_of_hidden_states):
self.num_of_hidden_states = num_of_hidden_states
def declare_variables(self):
num_of_vocabularies = len(vocabulary)
num_of_observations = len(sequence)
self.tf_observations = tf.compat.v1.placeholder(shape=(num_of_observations,), dtype=tf.int32,
name='observation')
self.tf_pi = tf.Variable(initial_value=tf.random.normal(shape=(1, self.num_of_hidden_states)), dtype=tf.float32,
name='pi')
self.tf_pi = tf.nn.softmax(self.tf_pi) # this ensures that sum of pi elements is equal to 1
self.tf_A = tf.Variable(
initial_value=tf.random.normal(shape=(self.num_of_hidden_states, self.num_of_hidden_states)),
dtype=tf.float32, name='A')
self.tf_A = tf.nn.softmax(self.tf_A, axis=1)
self.tf_B = tf.Variable(initial_value=tf.random.normal(shape=(self.num_of_hidden_states, num_of_vocabularies)),
dtype=tf.float32, name='B')
self.tf_B = tf.nn.softmax(self.tf_B, axis=1)
def fit(self, observations, vocabulary, iterations=10000):
self.vocabulary = vocabulary
self.observations = observations
self.declare_variables()
# Define the cost of hidden markov model
# We need to find A, and B to minimize the cost.
last_anpha = self.compute_anpha(self.tf_A, self.tf_B, self.tf_pi, self.tf_observations, len(self.observations))
tf_cost = - tf.math.log(tf.reduce_sum(last_anpha)) # use log to training better
train_op = tf.compat.v1.train.AdamOptimizer(1e-3).minimize(tf_cost)
# training
with tf.Session() as session:
session.run(tf.global_variables_initializer())
for i in range(iterations):
session.run(train_op, feed_dict={self.tf_observations: observations})
cost = session.run(tf_cost, feed_dict={self.tf_observations: observations})
print('iteration ' + str(i) + " : cost = " + str(cost))
self.A = session.run(self.tf_A, feed_dict={self.tf_observations: observations})
self.B = session.run(self.tf_B, feed_dict={self.tf_observations: observations})
def compute_anpha(self, tf_A, tf_B, tf_pi, tf_observations, T):
last_anpha = tf.math.multiply(tf_pi, tf_B[:, tf_observations[0]]) # 1xN
for t in range(1, T):
last_anpha = tf.multiply(tf.matmul(last_anpha, tf_A), tf_B[:, tf_observations[t]]) # 1xN
return last_anpha
def draw(self):
"""
Draw HMM after finishing the training process
:return:
"""
dot = Digraph(comment='hmm')
# create hidden state nodes
for i in range(self.num_of_hidden_states):
dot.node('s' + str(i), 'State ' + str(i))
# create nodes in vocabulary
for i in range(self.B.shape[1]):
dot.node('o' + str(i), 'Observation ' + str(i))
# add weights
for i in range(self.A.shape[0]):
for j in range(self.A.shape[1]):
dot.edge('s' + str(i), 's' + str(j), label=str('%.2f' % self.A[i, j]))
for i in range(self.B.shape[0]):
for j in range(self.B.shape[1]):
dot.edge('s' + str(i), 'o' + str(j), label=str('%.2f' % self.B[i, j]))
dot.attr(label='Observations = ' + str(sequence) + '\nvocabulary = ' + str(self.vocabulary))
dot.render('../../graph-output/hmm_graph.gv', view=True)
def read_data(experiment):
sequence = np.zeros(shape=(len(experiment)))
vocabulary = dict()
for trial_idx in range(0, len(experiment)):
trial = experiment[trial_idx]
if not trial in vocabulary:
vocabulary[trial] = len(vocabulary)
sequence[trial_idx] = vocabulary[trial]
sequence = sequence.astype(dtype='int')
return sequence, vocabulary
if __name__ == '__main__':
np.set_printoptions(suppress=True)
# the size of vocaburary = 2 (i.e., T means tail, H means head)
sequence, vocabulary = read_data("THTHTHTHTHTHTHTHTHTHTHTHTHTHTHTHTHTHTHTH")
# train HMM
hmm = HMMD_TF(num_of_hidden_states=2) # we can choose a different number of hidden states
hmm.fit(sequence, vocabulary)
hmm.draw() |
# -*- coding: utf-8 -*-
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'my_library.settings')
import django
django.setup()
from django.utils import timezone
from books.models import Category, Book, Tag
def populate():
programming_books = [
{"title": "Official Python Tutorial",
"author": "Python official",
"publisher": "Python.org",
"published_date": timezone.now().date(),
"short_description": "The official tutorial published online.",
"related_url": "www.python.com",
"views": 123,
"likes": 12,
"downloads": 45},
{"title": "Tango with Django",
"author": "Leif Azzopardi",
"publisher": "Python.org",
"published_date": "2017-01-01",
"short_description": "xxx",
"related_url": "www.tangowithdjango.com",
"views": 341,
"likes": 1234,
"downloads": 1},
{"title": "Java",
"author": "Xin Han",
"publisher": "Python.org",
"published_date": "2017-01-01",
"short_description": "",
"related_url": "www.java.com",
"views": 1233,
"likes": 1040,
"downloads": 1214},]
physics_books = [
{"title": "微分几何与广义相对论",
"author": "梁灿彬",
"publisher": "科学出版社出版",
"published_date": "2017-01-01",
"short_description": "广义相对论",
"related_url": "http://www.sinap.ac.cn",
"views": 223,
"likes": 32,
"downloads": 45},
{"title": "量子力学",
"author": "曾谨言",
"publisher": "科学出版社出版",
"published_date": "2011-01-01",
"short_description": "",
"related_url": "",
"views": 331,
"likes": 144,
"downloads": 1234},
{"title": "电动力学",
"author": "郭硕鸿",
"publisher": "科学出版社出版",
"published_date": "2017-01-01",
"short_description": "",
"related_url": "",
"views": 3313,
"likes": 104,
"downloads": 124}, ]
other_books = [
{"title": "美国种族简史",
"author": "托马斯·索威尔",
"publisher": "科学出版社出版",
"published_date": "2017-01-01",
"short_description": "",
"related_url": "",
"views": 43,
"likes": 24,
"downloads": 43},
{"title": "世界为何存在",
"author": "吉姆·霍尔特",
"publisher": "科学出版社出版",
"published_date": "2017-01-01",
"short_description": "",
"related_url": "",
"views": 341,
"likes": 1234,
"downloads": 1},
{"title": "和韩欣一起飞",
"author": "Xin Han",
"publisher": "科学出版社出版",
"published_date": "2017-01-01",
"short_description": "",
"related_url": "",
"views": 1233,
"likes": 1040,
"downloads": 1214}, ]
cats = {"Programming": {"views": 22, "likes": 12, "books": programming_books},
"Physics": {"views": 32, "likes": 16, "books": physics_books},
"Others": {"views": 16, "likes": 8, "books": other_books},}
for cat, cat_data in cats.items():
c = add_cat(cat, cat_data["views"], cat_data["likes"])
if cat_data["books"]:
for b in cat_data["books"]:
book = add_book(c, b["title"], b["author"],
b['publisher'], b['published_date'],
b['short_description'], b['related_url'],
b['views'], b['likes'], b['downloads'],)
add_book_to_tag(book, add_tag("book"))
def add_book(cat, title, author, publisher,
published_date, short_description,
related_url, views=0, likes=0, downloads=0, upload=None):
b = Book.objects.get_or_create(category=cat, title=title)[0]
b.author = author
b.publisher = publisher
b.published_date = published_date
b.short_description = short_description
b.upload = upload
b.related_url = related_url
b.views = 0
b.likes = 0
b.downloads = 0
b.save()
return b
def add_book_to_tag(book, tag):
book.tag.add(tag)
book.save()
def add_cat(name, views=0, likes=0):
c = Category.objects.get_or_create(name=name)[0]
c.views = views
c.likes = likes
c.save()
return c
def add_tag(tag):
t = Tag.objects.get_or_create(tag=tag)[0]
t.save()
return t
if __name__ == "__main__":
print("Starting books population script...")
# populate()
books = Book.objects.filter(title__icontains='o')
for book in books:
print(book.title)
|
from control_panel.util import coin_data_formatter, create_timestamp
from control_panel.cryptography_func import coin_data_hasher, validate_coin_data_hash, generate_random_id, generate_block_hash
class BlockWeb:
def __init__(self):
pass
class BlockChain:
def __init__(self, ):
pass
"""
"""
def add_block(self):
"""
This is the main function that happen during mining
"""
class Block:
def __init__(self, index, wallet_id, block_chian_id, previous_hash="000"):
self.index = index
self.timestamp = create_timestamp()
self.block_id = generate_random_id()
self.hash = self.cal_block_hash()
self.previous_hash = previous_hash
self.wallet_id = wallet_id
self.block_chain_id = block_chian_id
def cal_block_hash(self):
coin_data = [
str(self.index),
str(self.timestamp),
str(self.block_id),
str(self.previous_hash),
str(self.wallet_id),
str(self.block_chain_id),
]
data_to_be_hashed = coin_data_formatter(coin_data)
return generate_block_hash(data_to_be_hashed)
def create_block(self):
"""
Block would be created and saved here
"""
def change_wallet_id(self):
"""
The would be useful for by the blockchain class to change the owner of the coin
"""
class Wallet:
def __init__(self):
self.wallet_id = generate_random_id()
self.transaction_chain = []
self.wallet_balance = self.calculate_balance()
def calculate_balance(self):
class CoinUser:
def __init__(self, first_name, last_name, email, phone_no, banker_name, account_no):
self.first_name = first_name
self.last_name = last_name
self.user_email = email
self.phone_no = banker_name
self.account_no = account_no
dc = CoinData(123, 12233, 122, 1122)
print(dc.hash) |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-07 01:37
from __future__ import unicode_literals
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('yyfeed', '0002_auto_20170106_1714'),
]
operations = [
migrations.AlterField(
model_name='feeditem',
name='publish_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'program',
'type': 'executable',
'msvs_cygwin_shell': 0,
'sources': [
'program.c',
],
'actions': [
{
'action_name': 'make-prog1',
'inputs': [
'make-prog1.py',
],
'outputs': [
'<(INTERMEDIATE_DIR)/prog1.c',
],
'action': [
'python', '<(_inputs)', '<@(_outputs)',
],
'process_outputs_as_sources': 1,
},
{
'action_name': 'make-prog2',
'inputs': [
'make-prog2.py',
],
'outputs': [
'actions-out/prog2.c',
],
'action': [
'python', '<(_inputs)', '<@(_outputs)',
],
'process_outputs_as_sources': 1,
},
],
},
],
}
|
from reference.models import Province, District
from .resources import ProvinceResource, DistrictResource
from import_export.formats import base_formats
from django.urls import reverse_lazy
from giz.import_export_views import ImportView
class ProvinceImportView(ImportView):
model = Province
template_name = 'dashboard/import/reference_import_province.html'
formats = (base_formats.XLSX,)
resource_class = ProvinceResource
success_url = reverse_lazy('importdataprovince')
def create_dataset(self, *args, **kwargs):
""" Insert an extra 'source_user' field into the data.
"""
dataset = super().create_dataset(*args, **kwargs)
length = len(dataset._data)
dataset.append_col([self.request.user.id] * length,
header="source_user")
return dataset
class DistrictImportView(ImportView):
model = District
template_name = 'dashboard/import/reference_import_district.html'
formats = (base_formats.XLSX,)
resource_class = DistrictResource
success_url = reverse_lazy('importdatadistrict')
def create_dataset(self, *args, **kwargs):
""" Insert an extra 'source_user' field into the data.
"""
dataset = super().create_dataset(*args, **kwargs)
length = len(dataset._data)
dataset.append_col([self.request.user.id] * length,
header="source_user")
return dataset
|
# This script finds the optimal classifier (PCA+KNN vs. Linear SVM) and
# justifies the use of the threshold for calling an unknown cell S or R.
%reset
import numpy as np
import pandas as pd
import os
import scanpy as sc
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.pyplot import plot, show, draw, figure, cm
import matplotlib as plt
import random
from collections import OrderedDict
import copy
import math
import matplotlib.pyplot as plt
from pandas import DataFrame, Series
import plotnine as gg
import scipy as sp
import scipy.stats as stats
import sklearn as sk
import sklearn.model_selection as model_selection
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import StratifiedKFold
import sklearn.feature_selection as feature_selection
import sklearn.linear_model as linear_model
import sklearn.pipeline as pipeline
import csv
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cluster import KMeans
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn import svm
from sklearn.svm import LinearSVC
from sklearn.calibration import CalibratedClassifierCV
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
os.chdir('/Users/kj22643/Documents/Documents/231_Classifier_Project/code')
from func_file import find_mean_AUC
from func_file import find_mean_AUC_SVM
# Set path to the folder containing the scRNAseq data
path = '/Users/kj22643/Documents/Documents/231_Classifier_Project/data'
#path = '/stor/scratch/Brock/231_10X_data/'
os.chdir(path)
sc.settings.figdir = 'KJ_plots'
sc.set_figure_params(dpi_save=300)
sc.settings.verbosity = 3
#%% Load in normalized scRNAseq data from all time points
adata = sc.read('post-cell-cycle-regress.h5ad')
adata.obs.head()
# Note: adata object contains additional column of lineage abundance info.
# this will be used to identify sensitive and resistant cells in the pre-
# treatment time point. Also contains column for the sample number, which
# corresponds to the time point
uniquelins = adata.obs['lineage'].unique()
nunique = len(uniquelins)
# Name samples by their time point (0, 7, 10 weeks and pre/post)
samps= adata.obs['sample'].unique()
timepoint = np.array(['t=0 wks', 't=7 wks', 't=10 wks'])
adata.obs.loc[adata.obs['sample']==samps[0], 'timepoint']='t=0 wks'
adata.obs.loc[adata.obs['sample']==samps[1], 'timepoint']='t=7 wks'
adata.obs.loc[adata.obs['sample']==samps[2], 'timepoint']='t=10 wks'
TP = np.array(['pre', 'post'])
adata.obs.loc[adata.obs['sample']==samps[0], 'TP']='pre'
adata.obs.loc[adata.obs['sample']==samps[1], 'TP']='post'
adata.obs.loc[adata.obs['sample']==samps[2], 'TP']='post'
# Plot all data in a umap colored by timepoint
sc.pl.umap(adata,color='timepoint',palette=['#f79d02', '#d604f7', '#00c6c6'])
#%% Calculate the pre and post treatment lineage abundances
# lineage counts = the number of cells in a lineage
# lineage abundance = the proportion of a population the cells in that lineage
# take up at a specific time point.
# Calculate the magnitude of change in lineage abundance
# change_i = abund_post_i - abund_pre_i
lincountspre= adata.obs.loc[adata.obs.TP=='pre','lineage'].value_counts()
lincountspost= adata.obs.loc[adata.obs.TP=='post','lineage'].value_counts()
npre = sum(lincountspre)
npost=sum (lincountspost)
linabundpre = lincountspre/npre
linabundpost = lincountspost/npost
# Put into a dictionary to compare the lineage abundances at each time point
dpost = linabundpost.to_dict()
dpre = linabundpre.to_dict()
# dchange = the change in lineage abundance from pre to post of an individual lineage
# key = lineage, val = change in lineage abundance
dchange = {key: dpost[key] - dpre.get(key, 0) for key in dpost.keys()}
# save this dictionary into the anndata object
adata.uns['linabundchange'] = dchange
linabundchange = adata.uns['linabundchange']
# output these lists of lineage abundances to csv files
a_file = open("linabundchange.csv", "w")
writer = csv.writer(a_file)
for key, value in linabundchange.items():
writer.writerow([key, value])
a_file.close()
linabundpre.to_csv("linabundpre.csv")
filename = "linabundpre.csv"
linabundpost.to_csv("linabundpost.csv")
filename = "linabundpost.csv"
#%%Map the lineages in dchange to the cells in the anndata object
# each cell now is labeled by the amount its lineage changes post treatment
adata.obs['linabundchange']= adata.obs['lineage'].map(dchange)
# Set the threshold for calling cells sensitive or resistant, pulling from only
# the lineages with greatest increase (R) or decrease (S) in abundance
S_threshold = 0.05
R_threshold = 0
# Plot the change in lineage abundance distribution
plt.figure()
plt.hist(adata.obs.loc[adata.obs.timepoint == 't=0 wks','linabundchange'], bins = 100)
plt.plot([R_threshold,R_threshold], [0,900], c='r', alpha = 1)
plt.plot([-S_threshold, -S_threshold],[0,900], c='g', alpha = 1)
plt.xlabel(' Change in lineage abundance')
plt.ylabel('Number of cells in the lineage')
plt.title('Distribution of lineage abundance change')
#plt.legend(loc=1, prop={'size': 15})
plt.ylim(0, 900)
plt.xlim(-0.5, 0.5)
plt.grid(b=None)
#%% Make the sensitive and resistant labels within your anndata object
classLabel = np.array(['res', 'sens', 'unknown', 'res_est', 'sens_est'])
adata.obs.loc[adata.obs.timepoint=='t=0 wks','classLabel'] = 'unknown'
adata.obs.loc[adata.obs.timepoint=='t=7 wks','classLabel'] = 'unknown'
adata.obs.loc[adata.obs.timepoint=='t=10 wks','classLabel'] = 'unknown'
adata.obs.loc[(adata.obs.linabundchange>R_threshold)&(adata.obs.timepoint=='t=0 wks'), 'classLabel'] = 'res'
adata.obs.loc[(adata.obs.linabundchange<-S_threshold)&(adata.obs.timepoint=='t=0 wks'), 'classLabel'] = 'sens'
adata.obs.loc[adata.obs.lineage=='nan', 'classLabel'] = 'unknown'
#adata.obs.loc[(adata.obs.linpropchange<-0.5)&(adata.obs.timepoint=='t=0hr'), 'classLabel'] = 'sens'
# plot the labeled cells alongside unknown cells and alone on same axes.
sc.pl.umap(adata,color='classLabel', palette = ['red', 'green', 'gray'])
sc.pl.umap(adata,color='classLabel', palette = ['red', 'green', 'white'])
#%% Now we want to take out only the data that is labeled as res and sens
# and test the classifier on the labeled dataset
adata_sr= adata[(adata.obs['classLabel']=='res')|(adata.obs['classLabel']=='sens'), :]
dfsr= pd.concat([adata_sr.obs['lineage'],adata_sr.obs['classLabel'],
pd.DataFrame(adata_sr.raw.X,index=adata_sr.obs.index,
columns=adata_sr.var_names),], axis=1)
# plot only the labeled cells
sc.pl.umap(adata_sr,color='classLabel',palette=['red', 'green'])
nsr= len(dfsr)
y= pd.factorize(dfsr['classLabel'])[0]
nres = sum(y)
mu_sr = sum(y)/len(y) # shows how we have unbalanced class sizes
print(mu_sr)
# Xsr is your cell gene matrix, y is your class labels
Xsr = dfsr.drop(columns= [ 'classLabel', 'lineage'])
# %%Assign the parameters for PCA+KNN and Linear SVM and fit the models
# PCA hyperparameters optimized in KJ_find_hyperparameters.py
# SET UP PCA
n_neighbors = 73
n_components = 500
knn = KNeighborsClassifier(n_neighbors=n_neighbors)
pca=PCA(copy=True, iterated_power='auto', n_components=n_components, random_state=0,
svd_solver='auto', tol=0.0, whiten=False)
# FIT PCA
pca.fit(Xsr, y)
# the components is a n_components x n-total genes matrix that gives the weight of
# each gene that goes into making the principal component.
components = pca.components_
compdf = pd.DataFrame(components, columns = adata_sr.var_names)
adata.uns['components'] = compdf
# We cann use the compdf to make the arrow plot of the gene weights in PC space
V = pca.fit_transform(Xsr)
PCsr = pca.transform(Xsr)
var_in_PCs= pca.explained_variance_ratio_
cdf_varPCs = var_in_PCs.cumsum()
# SET UP SVM
Copt = 1
clf = svm.LinearSVC( C=Copt)
linear_svc = LinearSVC()
# FIT SVM
clf.fit(Xsr, y)
clf = linear_svc.fit(Xsr, y)
calibrated_svc = CalibratedClassifierCV(base_estimator = linear_svc, cv= "prefit")
calibrated_svc.fit(Xsr, y)
predicted = calibrated_svc.predict(Xsr)
prob = calibrated_svc.predict_proba(Xsr)
weights = clf.coef_
weightsdf = pd.DataFrame(weights, columns = adata_sr.var_names)
adata.uns['weights'] = weightsdf
#%% Split into train/test, fit each classifier to the training data, and test
# accuracy on testing data
kCV = 5
skf = StratifiedKFold(n_splits=kCV, shuffle= True)
Atrain = {}
Atest = {}
ytest = {}
ytrain = {}
proprestest = {}
proprestrain = {}
X = Xsr
folds_dict = {'trainmat':{}, 'trainlabel':{}, 'eigenvectors':{}, 'eigvals':{}, 'meanvec':{}}
for i in range(kCV):
for train_index, test_index in skf.split(X, y):
Atrain[i] = X.iloc[train_index, :]
Atest[i] = X.iloc[test_index, :]
ytest[i]= y[test_index]
ytrain[i]= y[train_index]
proprestest[i] = sum(ytest[i])/len(ytest[i])
proprestrain[i] = sum(ytrain[i])/len(ytrain[i])
# Save all of your stratified folds into a single dictionary.
folds_dict['trainmat'] = Atrain
folds_dict['trainlabel']= ytrain
folds_dict['testmat'] = Atest
folds_dict['testlabel'] = ytest
folds_dict['prevtest'] = proprestest
folds_dict['prevtrain']= proprestrain
#%% #Compute the ROC curve for each fold
AUC_PCA=np.zeros((kCV,1))
AUC_SVM = np.zeros((kCV,1))
acc_PCA = np.zeros((kCV,1))
acc_SVM = np.zeros((kCV,1))
for i in range(kCV):
X_train = folds_dict['trainmat'][i]
y_train = folds_dict['trainlabel'][i]
X_test = folds_dict['testmat'][i]
y_test = folds_dict['testlabel'][i]
# Build new model based on training data set
pca.fit(X_train, y_train)
# Fit a nearest neighbor classifier on the model built on the training data set
knn.fit(pca.transform(X_train), y_train)
prob_scores = knn.predict_proba(pca.transform(X_test))
# Compute the nearest neighbor accuracy on the embedded test set
acc_PCAi = knn.score(pca.transform(X_test), y_test)
acc_PCA[i]=acc_PCAi
fpr, tpr, thresholds = metrics.roc_curve(y_test, prob_scores[:,1], pos_label=1)
roc_auc = auc(fpr, tpr)
AUC_PCA[i]= roc_auc
clf = linear_svc.fit(X_train, y_train)
calibrated_svc = CalibratedClassifierCV(base_estimator = linear_svc, cv= "prefit")
calibrated_svc.fit(X_train, y_train)
predicted = calibrated_svc.predict(X_test)
prob = calibrated_svc.predict_proba(X_test)
acc_SVMi = clf.score(X_test, y_test)
acc_SVM[i] = acc_SVMi
fpr, tpr, thresholds = metrics.roc_curve(y_test, prob[:,1], pos_label=1)
roc_auc = auc(fpr, tpr)
AUC_SVM[i] = roc_auc
#%% plot AUC for each classifier
meanAUC_PCA = AUC_PCA.mean()
meanAUC_SVM = AUC_SVM.mean()
meanacc_PCA = acc_PCA.mean()
meanacc_SVM = acc_SVM.mean()
plt.figure()
plt.plot(np.linspace(1,5, num=kCV), AUC_PCA, color='darkorange',
label='PCA+KNN = %0.2f' % meanAUC_PCA)
plt.plot(np.linspace(1,5,num=kCV), AUC_SVM,color='navy',
label = 'Linear SVM = %0.2f' % meanAUC_SVM, linestyle='--')
plt.ylim([0.5, 1.05])
plt.xlabel('fold')
plt.ylabel('AUC')
plt.title('AUC for each fold')
plt.legend(loc="lower right")
plt.grid(b=None)
plt.show()
plt.figure()
plt.plot(np.linspace(1,5, num=kCV), acc_PCA, color='darkorange',
label='PCA+KNN = %0.2f' % meanacc_PCA)
plt.plot(np.linspace(1,5,num=kCV), acc_SVM,color='navy',
label = 'Linear SVM = %0.2f' % meanacc_SVM, linestyle='--')
plt.ylim([0.5, 1.05])
plt.xlabel('fold')
plt.ylabel('Accuracy')
plt.title('Accuracy in prediction for each fold')
plt.legend(loc="lower right")
plt.grid(b=None)
plt.show()
# Results indicate that the better classifier is linear SVM. Now see what
# probability thresholds enable accuracy in labeled data set
#%% Generate ROC curve to determine the best threshold for deciding on class
# FOR PCA
n_classes = len(np.unique(y))
# Fit a nearest neighbor classifier on the model built on the training data set
knn.fit(pca.transform(Xsr), y)
prob_scores = knn.predict_proba(pca.transform(Xsr))
# Compute the nearest neighbor accuracy on the embedded test set
acc_knn = knn.score(pca.transform(Xsr), y)
fpr, tpr, thresholds = metrics.roc_curve(y, prob_scores[:,1], pos_label=1)
roc_auc = auc(fpr, tpr)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='black', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve labeled data PCA + KNN')
plt.legend(loc="lower right")
plt.grid(b=None)
plt.show()
#%% FOR SVM
clf = linear_svc.fit(Xsr, y)
calibrated_svc = CalibratedClassifierCV(base_estimator = linear_svc, cv= "prefit")
calibrated_svc.fit(Xsr, y)
predicted = calibrated_svc.predict(X)
prob = calibrated_svc.predict_proba(X)
fpr, tpr, thresholds = metrics.roc_curve(y, prob[:,1], pos_label=1)
roc_auc = auc(fpr, tpr)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='navy',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='black', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve labeled data Linear SVM')
plt.legend(loc="lower right")
plt.grid(b=None)
plt.show()
# This tells us that the classes are very separable, and so really any reasonable
# threshold should be able to separate the classes.
# This completes the goal of this script- to determine the classifier and
# justify the use of any threshold.
# %% Apply SVM to data,
adata_unk= adata[(adata.obs['classLabel']=='unknown'), :]
# Make a data frame with just the raw gene-cell matrix but keep the cell identity
Xunk = pd.DataFrame(adata_unk.raw.X,index=adata_unk.obs.index,
columns=adata_unk.var_names)
clf = linear_svc.fit(Xsr, y)
calibrated_svc = CalibratedClassifierCV(base_estimator = linear_svc, cv= "prefit")
calibrated_svc.fit(Xsr, y)
predicted = calibrated_svc.predict(Xunk)
prob = calibrated_svc.predict_proba(Xunk) # output a probability of being in each class
#%% Vary the threshold probability
thres_prob = 0.9
B = prob[:,1]>thres_prob
y_est=B*1
#%%
# Try mapping outcomes
# first make an indexed data frame
class_est= pd.DataFrame(y_est, index = adata_unk.obs.index)
class_est.columns=['est']
adata.obs['class_est'] = adata.obs.index.map(class_est.est)
sc.pl.umap(adata,color=['class_est'])
adata.obs.loc[adata.obs.class_est==1, 'classLabel'] = 'res'
adata.obs.loc[adata.obs.class_est==0, 'classLabel'] = 'sens'
#
sc.pl.umap(adata,color=['classLabel'])
adata_pre = adata[adata.obs['timepoint']=='t=0 wks', :]
dfpre = pd.concat([adata_pre.obs['classLabel'],
pd.DataFrame(adata_pre.raw.X,index=adata_pre.obs.index,
columns=adata_pre.var_names),], axis=1)
lin_list_pre = adata_pre.obs.lineage
#sc.pl.umap(adata_pre,color='classLabel', palette = ['red', 'green'])
npre = len(dfpre)
print(npre)
ypreb = dfpre.classLabel=='res'
ypre= ypreb*1
phirpre = (sum(ypre))/npre
phispre = 1-phirpre
print(phispre)
adata_post1 = adata[adata.obs['timepoint']=='t=7 wks', :]
dfpost1= pd.concat([adata_post1.obs['classLabel'],
pd.DataFrame(adata_post1.raw.X,index=adata_post1.obs.index,
columns=adata_pre.var_names),], axis=1)
npost1 = len(dfpost1)
print(npost1)
ypost1b = dfpost1.classLabel=='res'
ypost1= ypost1b*1
phirpost1 = (sum(ypost1))/npost1
phispost1 = 1-phirpost1
print(phispost1)
adata_post2 = adata[adata.obs['timepoint']=='t=10 wks', :]
dfpost2= pd.concat([adata_post2.obs['classLabel'],
pd.DataFrame(adata_post2.raw.X,index=adata_post2.obs.index,
columns=adata_post2.var_names),], axis=1)
npost2 = len(dfpost2)
print(npost2)
ypost2b = dfpost1.classLabel=='res'
ypost2= ypost2b*1
phirpost2 = (sum(ypost2))/npost2
phispost2 = 1-phirpost2
print(phispost2)
#%% Look at the weights of each gene! And compare the values of each
weightsdf = weightsdf.abs().sort_values(by= [0], axis =1,ascending = False)
weights1 = weightsdf.iloc[0,:]
indtop = weights1.index[0:50]
sc.pl.matrixplot(adata, indtop, groupby = 'classLabel', swap_axes = True, figsize = [5, 10])
#standard_scale = 'var',
#%%
x = -np.abs(weights[0,:])
ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
weights1 = weightsdf.iloc[0,:]
ordered_weights = weights1.iloc[(-np.abs(weights[1,:]).argsort())]
indtop = ordered_weights.index[0:50]
sc.pl.matrixplot(adata, indtop, groupby = 'classLabel', swap_axes = True)
#%%
ordered_weights=weightsdf.iloc[(-np.abs(weights[0,:])).argsort()]
x = -np.abs(weights[0,:]);
ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
indtop = ordered_comp1.index[0:50]
ordered_comp2=PC2.iloc[(-np.abs(components[1,:])).argsort()]
x2 = -np.abs(components[1,:]);
ind2 = np.unravel_index(np.argsort(x2, axis=None), x2.shape)
indtop = ordered_comp1.index[0:50]
indtop2 = ordered_comp2.index[0:50]
compdf1 = compdf.index==0
compdf2 = compdf.index==1
comps1 = compdf[compdf1]
comps2 = compdf[compdf2]
#%% Fit a nearest neighbor classifier on the model built on the training data set
# Make a nearest neighbor classifier from the down sampled sensitive cells
#knn.fit(pca.transform(Xsmall), ysmall)
knn.fit(pca.transform(Xsr), y)
thres_prob = 0.15
# Apply the pca and knn classifier to the pre, int, and post1&2 treatment samples.
y_premin = knn.predict(pca.transform(Xpremin))
pre_prob= knn.predict_proba(pca.transform(Xpremin))
B = pre_prob[:,1]>thres_prob
mu_pre = (sum(pre_prob[:,1]) + sum(y))/npre
y_pre = B*1
mu_pre_PCA = (sum(y_pre)+sum(y))/(len(Xpremin) + len(Xsr))
sigmasq_pre_PCA = mu_pre_PCA*(1-mu_pre_PCA)/(len(Xsr)+len(Xpremin))
sigmasq_pre = (mu_pre*(1-mu_pre))/(npre)
mu_pre_k = (sum(y_premin)+sum(y))/npre
#print(mu_pre)
#print(mu_pre_k)
#print(mu_pre_PCA)
#print(sigmasq_pre)
y_int = knn.predict(pca.transform(Xint))
int_prob= knn.predict_proba(pca.transform(Xint))
mu_int = (sum(int_prob[:,1])/nint)
C = int_prob[:,1]>thres_prob
y_intpr = C*1
mu_int_PCA = sum(y_intpr)/(len(y_intpr))
sigmasq_int_PCA = mu_int_PCA*(1-mu_int_PCA)/(len(Xint))
mu_int_k = (sum(y_int))/nint
#print(mu_int)
#rint(mu_int_k)
#print(mu_int_PCA)
#print(sigmasq_int_PCA)
y_post1= knn.predict(pca.transform(Xpost1))
post_prob1= knn.predict_proba(pca.transform(Xpost1))
mu_post1 = sum(post_prob1[:,1])/npost1
D= post_prob1[:,1]>thres_prob
y_postpr1 = D*1
mu_post1_PCA = sum(y_postpr1)/(len(y_postpr1))
sigmasq_post1_PCA = mu_post1_PCA*(1-mu_post1_PCA)/len(Xpost1)
mu_post1_k = sum(y_post1)/npost1
#print(mu_post1)
#print(mu_post1_k)
#print(mu_post1_PCA)
#print(sigmasq_post1_PCA)
y_post2 = knn.predict(pca.transform(Xpost2))
post_prob2= knn.predict_proba(pca.transform(Xpost2))
mu_post2 = sum(post_prob2[:,1])/npost2
E= post_prob2[:,1]>thres_prob
y_postpr2 = E*1
mu_post2_PCA = sum(y_postpr2)/(len(y_postpr2))
sigmasq_post2_PCA = mu_post2_PCA*(1-mu_post2_PCA)/len(Xpost2)
mu_post2_k = sum(y_post2)/npost2
#print(mu_post2)
#print(mu_post2_k)
#print(mu_post2_PCA)
#print(sigmasq_post2_PCA)
#%%
thres_prob = 0.15
B = pre_prob[:,1]>thres_prob
y_pre = B*1
mu_pre_PCA = (sum(y_pre)+sum(y))/(len(Xpremin) + len(Xsr))
D= post_prob1[:,1]>thres_prob
y_postpr1 = D*1
mu_post1_PCA = sum(y_postpr1)/(len(y_postpr1))
E= post_prob2[:,1]>thres_prob
y_postpr2 = E*1
mu_post2_PCA = sum(y_postpr2)/(len(y_postpr2))
print('phir0=',mu_pre_PCA)
print('phir7wks=', mu_post1_PCA)
print('phi10wks=', mu_post2_PCA)
#%%
phi_est= {'phi_t': [1-mu_pre_PCA, 1-mu_post1_PCA, 1-mu_post2_PCA],
't': [0, 1176, 1656],
'ncells': [3157,5262,4900]
}
dfphi= DataFrame(phi_est, columns= ['phi_t', 't', 'ncells'])
print(dfphi)
dfphi.to_csv("phi_t_est_pyth.csv")
#%%
import os
filename = "phi_t_est_pyth.csv"
path = "/Users/kj22643/Documents/Documents/Grant_dose_optimization/data"
fullpath = os.path.join(path, filename)
dfphi.to_csv("phi_t_est_pyth.csv")
#%% Make data frames for the pre, int, and post-treatment cells in PC space
# Pre-treatment
PCsrdf = pd.DataFrame(PCsr)
PCsrdf['classlabel'] = y
PCsrdf['time'] = 0
PCsrdf.reset_index(drop=True, inplace=True)
PCsrdf['recalled_lin'] = dfsr['recalled_lin']
# Pre-treatment
PCpmdf = pd.DataFrame(PCspremin)
PCpmdf['classlabel'] = y_pre
PCpmdf.reset_index(drop=True, inplace=True)
PCpmdf['time'] = 0
PCpmdf['recalled_lin'] = dfpremin['recalled_lin']
# t= 30 hr
PCintdf = pd.DataFrame(PCsint)
PCintdf['classlabel'] = y_intpr
PCintdf.reset_index(drop=True, inplace=True)
#PCintdf['kclust'] = kclustint
PCintdf['time'] = 30
PCintdf['recalled_lin'] = 'nan'
# t = 1176 hr
PCpost1df = pd.DataFrame(PCspost1)
PCpost1df['classlabel'] = y_postpr1
PCpost1df.reset_index(drop=True, inplace=True)
#PCpostdf['kclust'] = kclustpost
PCpost1df['time'] = 1176
PCpost1df['recalled_lin'] = 'nan'
# t= 1656hr
PCpost2df = pd.DataFrame(PCspost2)
PCpost2df['classlabel'] = y_postpr2
PCpost2df.reset_index(drop=True, inplace=True)
#PCpostdf['kclust'] = kclustpost
PCpost2df['time'] = 1176
PCpost2df['recalled_lin'] = 'nan'
#%% PC1 PC2 and PC3
# First just look at the cells used to build the classifier
PCsrdfs = PCsrdf[PCsrdf['classlabel']==0]
PCsrdfr = PCsrdf[PCsrdf['classlabel']==1]
# Then other breakdowns
PCsrdfls = PCsrdf[PCsrdf['recalled_lin']=='sens']
PCsrdflr = PCsrdf[PCsrdf['recalled_lin']=='res']
PCpmdfs = PCpmdf[PCpmdf['classlabel']==0]
PCpmdfr = PCpmdf[PCpmdf['classlabel']==1]
PCintdfs = PCintdf[PCintdf['classlabel']==0]
PCintdfr = PCintdf[PCintdf['classlabel'] == 1]
PCpost1dfs = PCpost1df[PCpost1df['classlabel']==0]
PCpost1dfr = PCpost1df[PCpost1df['classlabel'] == 1]
PCpost2dfs = PCpost2df[PCpost2df['classlabel']==0]
PCpost2dfr = PCpost2df[PCpost2df['classlabel'] == 1]
#%% WHY DOES RUNNING THIS CHANGE MY PC DATAFRAMES????
# Cells used for classifying
xsrs= np.asarray(PCsrdfs[0])
ysrs=np.asarray(PCsrdfs[1])
zsrs=np.asarray(PCsrdfs[2])
#
xsrr= np.asarray(PCsrdfr[0])
ysrr=np.asarray(PCsrdfr[1])
zsrr=np.asarray(PCsrdfr[2])
# Lineages that we isolated
xsrls= np.asarray(PCsrdfls[0])
ysrls=np.asarray(PCsrdfls[1])
zsrls=np.asarray(PCsrdfls[2])
xsrlr= np.asarray(PCsrdflr[0])
ysrlr=np.asarray(PCsrdflr[1])
zsrlr=np.asarray(PCsrdflr[2])
# Pre-treat samples
xp= np.asarray(PCpmdf[0])
yp=np.asarray(PCpmdf[1])
zp=np.asarray(PCpmdf[2])
#pre-treat sensitive
xps= np.asarray(PCpmdfs[0])
yps=np.asarray(PCpmdfs[1])
zps=np.asarray(PCpmdfs[2])
# pre-treat resistant
xpr= np.asarray(PCpmdfr[0])
ypr=np.asarray(PCpmdfr[1])
zpr=np.asarray(PCpmdfr[2])
# t=30 hr cells
xi= np.asarray(PCintdf[0])
yi=np.asarray(PCintdf[1])
zi=np.asarray(PCintdf[2])
#sens and res labels
xis= np.asarray(PCintdfs[0])
yis=np.asarray(PCintdfs[1])
zis=np.asarray(PCintdfs[2])
xir= np.asarray(PCintdfr[0])
yir=np.asarray(PCintdfr[1])
zir=np.asarray(PCintdfr[2])
# t=1176 hr cells
xpo1= np.asarray(PCpost1df[0])
ypo1=np.asarray(PCpost1df[1])
zpo1=np.asarray(PCpost1df[2])
# sens and res labels
xpos1= np.asarray(PCpost1dfs[0])
ypos1=np.asarray(PCpost1dfs[1])
zpos1=np.asarray(PCpost1dfs[2])
xpor1= np.asarray(PCpost1dfr[0])
ypor1=np.asarray(PCpost1dfr[1])
zpor1=np.asarray(PCpost1dfr[2])
# t=1656 hr cells
xpo2= np.asarray(PCpost2df[0])
ypo2=np.asarray(PCpost2df[1])
zpo2=np.asarray(PCpost2df[2])
# sens and res labels
xpos2= np.asarray(PCpost2dfs[0])
ypos2=np.asarray(PCpost2dfs[1])
zpos2=np.asarray(PCpost2dfs[2])
xpor2= np.asarray(PCpost2dfr[0])
ypor2=np.asarray(PCpost2dfr[1])
zpor2=np.asarray(PCpost2dfr[2])
#%%
fig = plt.figure(figsize=(15,15))
ax=fig.add_subplot(111,projection='3d')
# pre-treat cells for classifying
srs = ax.scatter(xsrs, ysrs, zsrs, c='g', marker='^', alpha = 1, label = 't=0 hr labeled sensitive')
srr = ax.scatter(xsrr, ysrr, zsrr, c='r', marker='^', alpha = 1, label = 't=0 hr labeled resistant')
#pre_cells = ax.scatter(xp, yp, zp, c='b', marker='o', alpha = 0.2, label = 't=0 hr remaining')
# classified pre-treatment cells
#ps = ax.scatter(xps, yps, zps, c='olivedrab', marker='+', alpha = 0.5, label = 't=0 hr est sensitive')
#ps = ax.scatter(xpr, ypr, zpr, c='pink', marker='+', alpha = 0.5, label = 't=0 hr est resistant')
#isolated lineages
#ls_pre = ax.scatter(xsrls, ysrls, zsrls, c='lime', marker='o', alpha = 1, label = 'sensitive lineage AA170')
#lr_pre = ax.scatter(xsrlr, ysrlr, zsrlr, c='fuchsia', marker='o', alpha = 1, label = 'resistant lineage AA161')
#int_cells = ax.scatter(xi, yi, zi, c='grey', marker = 'o', alpha = 0.2, label = 't=30 hr unclassified')
#ints = ax.scatter(xis, yis, zis, c='olivedrab', marker = '+', alpha = 0.5, label = 't=30 hr est sensitive')
#intr = ax.scatter(xir, yir, zir, c='pink', marker = '+', alpha = 0.5, label = 't=30 hr est resistant')
#post_cells = ax.scatter(xpo, ypo, zpo, c='c', marker = 'o', alpha = 0.1, label = 't=1344 hr')
#os = ax.scatter(xpos1, ypos1, zpos1, c='olivedrab', marker = '+', alpha = 0.5, label = 't=1176 hr est sensitive')
por = ax.scatter(xpor1, ypor1, zpor1, c='pink', marker = '+', alpha = 0.5, label = 't=1176 hr est resistant')
#pos2 = ax.scatter(xpos2, ypos2, zpos2, c='olivedrab', marker = '+', alpha = 0.5, label = 't=1656 hr est sensitive')
#por2 = ax.scatter(xpor2, ypor2, zpor2, c='pink', marker = '+', alpha = 0.5, label = 't=1656 hr est resistant')
ax.set_xlabel('PC1')
ax.set_ylabel('PC2')
ax.set_zlabel('PC3')
plt.legend(loc=2, prop={'size': 25})
#plt.title('Pre and post-treatment cells in PC space',fontsize= 20)
#ax.legend([sens_cells, res_cells], ['t=0 hr sens', 't=0 hr res'])
ax.azim = 100
ax.elev = -50
#%% PC1 vs PC2
fig = plt.figure(figsize=(10,10))
srs = plt.scatter(xsrs, ysrs, c='g', marker='^', alpha = 1, label = 't=0 hr labeled sensitive')
srr = plt.scatter(xsrr, ysrr, c='r', marker='^', alpha = 1, label = 't=0 hr labeled resistant')
#pre_cells = plt.scatter(xp, yp, c='b', marker='o', alpha = 0.2, label = 't=0 hr remaining')
#pres = plt.scatter(xps, yps, c='olivedrab', marker='+', alpha = 0.5, label = 't=0 hr est sensitive')
#prer = plt.scatter(xpr, ypr, c='pink', marker='+', alpha = 0.5, label = 't=0 hr est resistant')
#int_cells = plt.scatter(xi, yi, c='grey', marker = 'o', alpha = 0.2, label = 't=30 hr unclassified')
#ints = plt.scatter(xis, yis, c='olivedrab', marker = '+', alpha = 0.5, label = 't=30 hr est sensitive')
#intr = plt.scatter(xir, yir, c='pink', marker = '+', alpha = 0.5, label = 't=30 hr est resistant')
#post_cells = plt.scatter(xpo, ypo, c='c', marker = 'o', alpha = 0.1, label = 't=1344 hr')
#ls_pre = plt.scatter(xsrls, ysrls, c='lime', marker='o', alpha = 1, label = 'sensitive lineage AA170')
#lr_pre = plt.scatter(xsrlr, ysrlr, c='fuchsia', marker='o', alpha = 1, label = 'resistant lineage AA161')
#pos1 = plt.scatter(xpos1, ypos1, c='olivedrab', marker = '+', alpha = 0.5, label = 't=1176 hr est sensitive')
#por1 = plt.scatter(xpor1, ypor1, c='pink', marker = '+', alpha = 0.5, label = 't=1176 hr est resistant')
pos2 = plt.scatter(xpos2, ypos2, c='olivedrab', marker = '+', alpha = 0.5, label = 't=1656 hr est sensitive')
por2 = plt.scatter(xpor2, ypor2, c='pink', marker = '+', alpha = 0.5, label = 't=1656 hr est resistant')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend(loc=1, prop={'size': 15})
#%% PC1 vs PC3
fig = plt.figure(figsize=(10,10))
srs = plt.scatter(xsrs, zsrs, c='g', marker='^', alpha = 1, label = 't=0 hr labeled sensitive')
srr = plt.scatter(xsrr, zsrr, c='r', marker='^', alpha = 1, label = 't=0 hr labeled resistant')
pre_cells = plt.scatter(xp, zp, c='b', marker='o', alpha = 0.1, label = 't=0 hr remaining')
plt.xlabel('PC1')
plt.ylabel('PC3')
plt.legend(loc=1, prop={'size': 15})
#%% PC2 vs PC3
fig = plt.figure(figsize=(10,10))
srs = plt.scatter(ysrs, zsrs, c='g', marker='^', alpha = 1, label = 't=0 hr labeled sensitive')
srr = plt.scatter(ysrr, zsrr, c='r', marker='^', alpha = 1, label = 't=0 hr labeled resistant')
pre_cells = plt.scatter(xp, zp, c='b', marker='o', alpha = 0.2, label = 't=0 hr remaining')
plt.xlabel('PC2')
plt.ylabel('PC3')
plt.legend(loc=1, prop={'size': 15})
#%% Make a consensus dataframe
PCalldf = pd.concat([PCsrdf, PCpmdf, PCintdf, PCpostdf], axis=0)
# Use consensus data frame to look at the separation between the classlabels
ydfs = PCalldf[PCalldf['classlabel']==0]
yplots= ydfs[0]
xs = np.random.normal(0, 0.04, size=len(yplots))
plt.figure()
bp = PCalldf.boxplot(column=0, by='classlabel', grid=False)
# Add some random "jitter" to the x-axis
qu = plot(xs, yplots, 'r.', alpha=0.2)
plt.ylabel('PC1')
plt.figure()
bp = PCalldf.boxplot(column=1, by='classlabel', grid=False)
plt.ylabel('PC2') |
import Game
from Menu.HeadMenu import HeadMenu
from Menu.PlayerMenu.PlayerSelectionMenuItems import *
from Vector2 import Vector2
class PlayerSelection(HeadMenu):
def __init__(self, resolution, background=None, logo=None, playerMenuItems=None):
super().__init__(resolution, background, logo)
self.PlayerMenuItems = playerMenuItems if playerMenuItems is not None \
else [TwoPlayers(Vector2(0, 0)), ThreePlayers(Vector2(0, 70)), FourPlayers(Vector2(0, 140))]
def Update(self, game: Game):
newPlayerMenuItems = [pmi.Update(game) for pmi in self.PlayerMenuItems]
newstate = next((smi.GetNewState() for smi in newPlayerMenuItems if smi.IsClickedByMouse(game)), None)
return newstate if newstate is not None \
else PlayerSelection(game, self.Background, self.Logo, newPlayerMenuItems)
def Draw(self, game):
super().Draw(game)
for menuItem in self.PlayerMenuItems:
menuItem.Draw(game)
|
import shutil
import tempfile
import unittest
from persistqueue import pdict
class PDictTest(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='pdict')
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
def test_unsupported(self):
pd = pdict.PDict(self.path, 'pd')
pd['key_a'] = 'value_a'
self.assertRaises(NotImplementedError, pd.keys)
self.assertRaises(NotImplementedError, pd.iterkeys)
self.assertRaises(NotImplementedError, pd.values)
self.assertRaises(NotImplementedError, pd.itervalues)
self.assertRaises(NotImplementedError, pd.items)
self.assertRaises(NotImplementedError, pd.iteritems)
def _for():
for _ in pd:
pass
self.assertRaises(NotImplementedError, _for)
def test_add(self):
pd = pdict.PDict(self.path, 'pd')
pd['key_a'] = 'value_a'
self.assertEqual(pd['key_a'], 'value_a')
self.assertTrue('key_a' in pd)
self.assertFalse('key_b' in pd)
self.assertEqual(pd.get('key_a'), 'value_a')
self.assertEqual(pd.get('key_b'), None)
self.assertEqual(pd.get('key_b', 'absent'), 'absent')
self.assertRaises(KeyError, lambda: pd['key_b'])
pd['key_b'] = 'value_b'
self.assertEqual(pd['key_a'], 'value_a')
self.assertEqual(pd['key_b'], 'value_b')
def test_set(self):
pd = pdict.PDict(self.path, 'pd')
pd['key_a'] = 'value_a'
pd['key_b'] = 'value_b'
self.assertEqual(pd['key_a'], 'value_a')
self.assertEqual(pd['key_b'], 'value_b')
self.assertEqual(pd.get('key_a'), 'value_a')
self.assertEqual(pd.get('key_b', 'absent'), 'value_b')
pd['key_a'] = 'value_aaaaaa'
self.assertEqual(pd['key_a'], 'value_aaaaaa')
self.assertEqual(pd['key_b'], 'value_b')
def test_delete(self):
pd = pdict.PDict(self.path, 'pd')
pd['key_a'] = 'value_a'
pd['key_b'] = 'value_b'
self.assertEqual(pd['key_a'], 'value_a')
self.assertEqual(pd['key_b'], 'value_b')
del pd['key_a']
self.assertFalse('key_a' in pd)
self.assertRaises(KeyError, lambda: pd['key_a'])
self.assertEqual(pd['key_b'], 'value_b')
def test_two_dicts(self):
pd_1 = pdict.PDict(self.path, '1')
pd_2 = pdict.PDict(self.path, '2')
pd_1['key_a'] = 'value_a'
pd_2['key_b'] = 'value_b'
self.assertEqual(pd_1['key_a'], 'value_a')
self.assertEqual(pd_2['key_b'], 'value_b')
self.assertRaises(KeyError, lambda: pd_1['key_b'])
self.assertRaises(KeyError, lambda: pd_2['key_a'])
|
import pandas as pd
import statistics as st
import plotly.figure_factory as ff
import plotly.graph_objects as go
df = pd.read_csv("StudentsPerformance.csv")
data = df["reading score"].tolist()
mean = st.mean(data)
median = st.median(data)
mode = st.mode(data)
std_deviation = st.stdev(data)
first_std_deviation_start,first_std_deviation_end = mean-std_deviation,mean+std_deviation
second_std_deviation_start,second_std_deviation_end = mean-(2*std_deviation),mean+(2*std_deviation)
third_std_deviation_start,third_std_deviation_end = mean-(3*std_deviation),mean+(3*std_deviation)
fig = ff.create_distplot([data],["reading score"],show_hist=False)
fig.add_trace(go.Scatter(x=[mean,mean],y=[0,0.17],mode="lines",name="MEAN"))
fig.add_trace(go.Scatter(x=[first_std_deviation_start,first_std_deviation_start],y=[0,0.17],mode="lines",name = "STANDARD DEVIATION 1"))
fig.add_trace(go.Scatter(x=[first_std_deviation_end,first_std_deviation_end],y=[0,0.17],mode="lines",name = "STANDARD DEVIATION 1"))
fig.add_trace(go.Scatter(x=[second_std_deviation_start,second_std_deviation_start],y=[0,0.17],mode="lines",name = "STANDARD DEVIATION 2"))
fig.add_trace(go.Scatter(x=[second_std_deviation_end,second_std_deviation_end],y=[0,0.17],mode="lines",name = "STANDARD DEVIATION 2"))
thin_1_std_deviation = [result for result in data if result > first_std_deviation_start and result < first_std_deviation_end]
thin_2_std_deviation = [result for result in data if result > second_std_deviation_start and result < second_std_deviation_end]
thin_3_std_deviation = [result for result in data if result > third_std_deviation_start and result < third_std_deviation_end]
print("mean of this data is {}".format(mean))
print("median of this data is {}".format(median))
print("mode of this data is {}".format(mode))
print("standard deviation of this data is {}".format(std_deviation))
print("{}% of data lies within 1 standard deviation ".format(len(thin_1_std_deviation)*100.0/len(data)))
print("{}% of data lies within 2 standard deviation ".format(len(thin_2_std_deviation)*100.0/len(data)))
print("{}% of data lies within 3 standard deviation ".format(len(thin_3_std_deviation)*100.0/len(data)))
fig.show()
# if this code not run try to run this in collab sheet |
import dash_bootstrap_components as dbc
from dash import html
card = dbc.Card(
[
dbc.CardImg(src="/static/images/placeholder286x180.png", top=True),
dbc.CardBody(
[
html.H4("Card title", className="card-title"),
html.P(
"Some quick example text to build on the card title and "
"make up the bulk of the card's content.",
className="card-text",
),
dbc.Button("Go somewhere", color="primary"),
]
),
],
style={"width": "18rem"},
)
|
#!bin/python3
def findInstanceCount(arr, element):
sum = 0
for t in arr:
if (t == element):
sum += 1
return sum
def main():
n = int(input())
arr = []
search = []
for i in range(n):
arr.append(input())
q = int(input())
for i in range(q):
search.append(input())
for i in range(q):
print (findInstanceCount(arr, search[i]))
return(None)
main()
|
prices = {
'banana': 4,
'apple': 2,
'orange': 1.5,
'pear': 3
}
stock = {
'banana': 4,
'apple': 5,
'orange': 6,
'pear': 7
}
for price in prices:
print(f'{price}')
print(f'Price: {prices[price]}')
print(f'Stock: {stock}')
total = 0
for price in prices:
inventory = prices[price] * stock[price]
total += inventory
print(f'Total: {total}') |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 7 16:50:35 2017
@author: zx621293
"""
#Cumulative Variance explains
var= pca.explained_variance_ratio_
var1=np.cumsum(np.round(pca.explained_variance_ratio_, decimals=4)*100)
fig = plt.figure(figsize=(20, 10))
plt.suptitle('Cumulative Variance contribution for linear PCA on mice data.',fontsize=24)
fig.add_subplot(1,2,1)
plt.scatter(np.linspace(1,len(var),len(var)),var,color='red')
plt.xlabel('#th principal components',fontsize=14)
plt.title('Proportion of covariance explained',fontsize=14)
fig.add_subplot(1,2,2)
plt.plot(var1)
plt.title('Accumulative proportion of covariance explained',fontsize=14)
plt.xlabel('first # of principal components',fontsize=14)
plt.ylabel('percentage (%)',fontsize=14)
plt.axis()
plt.show()}
|
import types
import time
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.contrib.auth import authenticate, login, logout
from django.db.models import Count
from django.contrib.flatpages.models import FlatPage
from django.contrib.auth.decorators import login_required
from models import CustomUser, Constituency, RegistrationProfile
from forms import UserForm
from utils import addToQueryString
import settings
import geo
def render_with_context(request,
template,
context,
**kw):
kw['context_instance'] = RequestContext(request)
return render_to_response(template,
context,
**kw)
def home(request):
context = {}
year = settings.CONSTITUENCY_YEAR
constituencies = Constituency.objects.filter(year=year)
count = CustomUser.objects\
.aggregate(Count('constituencies',
distinct=True)).values()[0]
total = constituencies.count()
context['volunteers'] = CustomUser.objects.count()
context['total'] = total
context['count'] = count
if total:
context['percent_complete'] = int(float(count)/total*100)
else:
context['percent_complete'] = 0
if request.method == "POST":
form = UserForm(request.POST, request.FILES)
if form.is_valid():
profile = form.save()
user = authenticate(username=profile.user.email)
login(request, user)
return HttpResponseRedirect("/")
else:
context['form'] = form
else:
context['form'] = UserForm()
return render_with_context(request,
'home.html',
context)
@login_required
def delete_constituency(request, slug):
c = Constituency.objects.get(slug=slug)
request.user.constituencies.remove(c)
request.user.save()
return HttpResponseRedirect(reverse('add_constituency'))
@login_required
def add_constituency(request):
my_constituencies = request.user.current_constituencies.all()
if len(my_constituencies) > 0:
neighbours = Constituency.neighbours(my_constituencies[0])
neighbours = neighbours.exclude(pk__in=my_constituencies)
else:
neighbours = []
context = {'my_constituencies': my_constituencies,
'constituencies': list(neighbours)}
# searching for a constituency by postcode or placename
if request.method == "GET":
if request.GET.has_key("q"):
place = request.GET["q"]
context['constituencies'] = Constituency.objects.filter(name__icontains=place)
const = geo.constituency(place)
if const != None:
context['constituencies'] = Constituency.objects.filter(name=const) | context['constituencies']
if context['constituencies'].count() == 0:
context['search_fail'] = "Alas, we can't find '%s'" % place
# adding another constituency
if request.method == "POST":
if request.POST.has_key('add') and request.POST.has_key('add_c'):
add_c = request.POST.getlist('add_c')
if type(add_c) != types.ListType:
add_c = [add_c]
constituencies = Constituency.objects.all().filter(slug__in=add_c)
constituencies = constituencies.exclude(pk__in=my_constituencies)
request.user.constituencies.add(*constituencies.all())
request.user.save()
return HttpResponseRedirect("/add_constituency/")
return render_with_context(request,
'add_constituency.html',
context)
def do_login(request, key):
profile = RegistrationProfile.objects.get_user(key)
if profile:
user = authenticate(username=profile.user.email)
login(request, user)
return HttpResponseRedirect("/")
def activate_user(request, key):
profile = RegistrationProfile.objects.activate_user(key)
error = notice = ""
if not profile:
error = "Sorry, that key was invalid"
else:
notice = "Thanks, you've successfully confirmed your email"
user = authenticate(username=profile.user.email)
login(request, user)
context = {'error': error,
'notice': notice}
return HttpResponseRedirect(addToQueryString("/", context))
def user(request, id):
context = {}
user = get_object_or_404(CustomUser, pk=id)
if user == request.user:
context['user'] = user
return render_with_context(request,
'user.html',
context)
def constituency(request, slug, year=None):
if year:
year = "%s-01-01" % year
else:
year = settings.CONSTITUENCY_YEAR
try:
constituency = Constituency.objects.all()\
.filter(slug=slug, year=year).get()
except Constituency.DoesNotExist:
raise Http404
context = {'constituency':constituency}
return render_with_context(request,
'constituency.html',
context)
def logout_view(request):
logout(request)
return HttpResponseRedirect("/")
|
# this is a psuedo code for the coloring project
import tensorflow as tf
import numpy as np
from glob import glob
import math
import sys
import random
#import the necessary packages above I think should be useful
'''
To present the nueral network, I want to create an object with methods and attributes.
The attributes I think could record the values for the filter and similar weights,
and the method will be used to update the weights.
'''
#define a batch normalization object
class batch_norm(object):
def __init__(self, epsilon=1e-5, momentum = 0.9, name="batch_norm"):
with tf.variable_scope(name):
self.epsilon = epsilon
self.momentum = momentum
self.name = name
def __call__(self, x, train=True):
return tf.contrib.layers.batch_norm(x, decay=self.momentum, updates_collections=None, epsilon=self.epsilon, scale=True, scope=self.name)
#set a global variable to name each batch normalization object at each convolutional layer
batchnorm_count = 0
def bnreset():
global batchnorm_count
batchnorm_count = 0
# I'm learning from others' codes; I don't quite get why people individually divide each batch normalization and define them as objects with different names; Is this the only way to make them trainable?
def bn(x):
global batchnorm_count
batch_object = batch_norm(name=("bn" + str(batchnorm_count)))
batchnorm_count += 1
return batch_object(x)
#a modified linear relu function
def lrelu(x, leak=0.2, name="lrelu"):
return tf.maximum(x, leak*x)
'''
function for (de)convolution layers; note that I may as well just import conv2d from keras which is already defined. But I think the default relu activation is max(0, x) there; I checked
other coloring nueral network project and they apply relu function as max(x, 0.2x) as above. I don't quite understand but just follow what they did. Will learn about it more later
'''
def conv2d(input_, output_dim,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="conv2d"):
with tf.variable_scope(name):
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
return conv
def deconv2d(input_, output_shape,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="deconv2d", with_w=False):
with tf.variable_scope(name):
# filter : [height, width, output_channels, in_channels]
w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]], initializer=tf.random_normal_initializer(stddev=stddev))
deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1])
biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
if with_w:
return deconv, w, biases
else:
return deconv
class coloring_machine():
def __init__(self, input_image_size=256, batchsize=5):
# Hi Mr. Dartfler, the __init__ function is the same as the contructor function in javascript. And self is same as this
self.batch_size = batchsize
#batch is the group of training examples in one iteration to update the weights. I want to test the number of training examples in one batch from 4 ~ 10 to see which is better.
self.image_size = input_image_size
self.output_image_size = input_image_size
self.line_colordim = 1
self.color_colordim = 3
#here I specify the color dimension for the line image and the color hints so that I can specify the shape and create the tensorflow values for the image
self.gf_dim = 64
self.df_dim = 64
#img dimension at the convolutonal layers for disciminator and generator
self.d_bn1 = batch_norm(name='d_bn1')
self.d_bn2 = batch_norm(name='d_bn2')
self.d_bn3 = batch_norm(name='d_bn3')
#call three batch normalization objects for the discriminator
self.line_images = tf.placeholder(tf.float32, [self.batch_size, self.image_size, self.image_size, self.line_colordim])
self.color_images = tf.placeholder(tf.float32, [self.batch_size, self.image_size, self.image_size, self.color_colordim])
self.real_images = tf.placeholder(tf.float32, [self.batch_size, self.image_size, self.image_size, self.color_colordim])
#the second part specifies the shape of the tensors | where the line_images/ color_images represent the batch of input training examples
combined_preimage = tf.concat([self.line_images, self.color_images], 3)
#concat the line images and color hints at the color dimension
self.generated_image = self.generator(combined_preimage)
#generator() is the generator function yet to be defined
self.real_coloredimg = tf.concat([combined_preimage, self.real_images], 3)
self.fake_coloredimg = tf.concat([combined_preimage, self.generated_images], 3)
#concat at the color dimention to color the preimage for both the generated(fake) and the actual one to feed into the discriminator
self.disc_true_logits = self.discriminator(self.real_coloredimg, realness=False)
self.disc_fake_logits = self.discriminator(self.fake_coloredimg, realness=True)
self.disc_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(disc_true_logits, tf.ones_like(disc_true_logits)))
self.disc_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(disc_fake_logits, tf.zeros_like(disc_fake_logits)))
'''the discriminator is desgined to return the probabilities of iamge being real for each training examples
and the function sigmoid_cross_entropy_with_logits maps the difference between generated value and the desired one to 0~1 "input(probabilities, labels)". For the real ones it is all on; and the fake one it is all 0
reduced mean derives the mean loss among the training examples in this batch
'''
self.disc_loss = self.disc_loss_real + self.disc_loss_fake
self.gen_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(disc_fake_logits, tf.ones_like(disc_fake_logits)))
#set the loss function for generator and discriminator
t_vars = tf.trainable_variables()
#make a list of trainable variables to divide those in generator and those in discriminator
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.d_optim = tf.train.AdamOptimizer(0.0002, beta1=0.5).minimize(self.d_loss, var_list=self.d_vars)
self.g_optim = tf.train.AdamOptimizer(0.0002, beta1=0.5).minimize(self.g_loss, var_list=self.g_vars)
#train the network
#generator code
def generator(self, img_in):
s = self.output_size
#image size in each convolutional layer
s2, s4, s8, s16, s32, s64, s128 = int(s/2), int(s/4), int(s/8), int(s/16), int(s/32), int(s/64), int(s/128)
# image is (256 x 256 x input_c_dim)
e1 = conv2d(img_in, self.gf_dim, name='g_e1_conv') # e1 is (128 x 128 x self.gf_dim)
e2 = bn(conv2d(lrelu(e1), self.gf_dim*2, name='g_e2_conv')) # e2 is (64 x 64 x self.gf_dim*2)
e3 = bn(conv2d(lrelu(e2), self.gf_dim*4, name='g_e3_conv')) # e3 is (32 x 32 x self.gf_dim*4)
e4 = bn(conv2d(lrelu(e3), self.gf_dim*8, name='g_e4_conv')) # e4 is (16 x 16 x self.gf_dim*8)
e5 = bn(conv2d(lrelu(e4), self.gf_dim*8, name='g_e5_conv')) # e5 is (8 x 8 x self.gf_dim*8)
self.d4, self.d4_w, self.d4_b = deconv2d(tf.nn.relu(e5), [self.batch_size, s16, s16, self.gf_dim*8], name='g_d4', with_w=True)
d4 = bn(self.d4)
d4 = tf.concat(3, [d4, e4])
# d4 is (16 x 16 x self.gf_dim*8*2)
self.d5, self.d5_w, self.d5_b = deconv2d(tf.nn.relu(d4), [self.batch_size, s8, s8, self.gf_dim*4], name='g_d5', with_w=True)
d5 = bn(self.d5)
d5 = tf.concat(3, [d5, e3])
# d5 is (32 x 32 x self.gf_dim*4*2)
self.d6, self.d6_w, self.d6_b = deconv2d(tf.nn.relu(d5), [self.batch_size, s4, s4, self.gf_dim*2], name='g_d6', with_w=True)
d6 = bn(self.d6)
d6 = tf.concat(3, [d6, e2])
# d6 is (64 x 64 x self.gf_dim*2*2)
self.d7, self.d7_w, self.d7_b = deconv2d(tf.nn.relu(d6), [self.batch_size, s2, s2, self.gf_dim], name='g_d7', with_w=True)
d7 = bn(self.d7)
d7 = tf.concat(3, [d7, e1])
# d7 is (128 x 128 x self.gf_dim*1*2)
self.d8, self.d8_w, self.d8_b = deconv2d(tf.nn.relu(d7), [self.batch_size, s, s, self.output_colors], name='g_d8', with_w=True)
# d8 is (256 x 256 x output_c_dim)
return tf.nn.tanh(self.d8)
#for the discriminator
def discriminator(self, image, y=None, reuse=False):
# image is 256 x 256 x (input_c_dim + output_c_dim)
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse == False
h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv')) # h0 is (128 x 128 x self.df_dim)
h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv'))) # h1 is (64 x 64 x self.df_dim*2)
h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv'))) # h2 is (32 x 32 x self.df_dim*4)
h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, d_h=1, d_w=1, name='d_h3_conv'))) # h3 is (16 x 16 x self.df_dim*8)
h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')
return tf.nn.sigmoid(h4), h4
|
def fib(n):
resultado=[]
a, b = 0,1
while b<n:
resultado.append(b)
a, b = b, a+b
return resultado
print(fib(5)) |
import connexion
from connexion.resolver import RestyResolver
from flask_sqlalchemy import SQLAlchemy
# Uncomment to see import-time debug logging:
#import logging
#logging.basicConfig(level=logging.DEBUG)
api = connexion.FlaskApp(__name__, specification_dir='specs/')
api.app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/betterapis.db'
api.app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(api.app)
api.add_api('betterapis.yaml', resolver=RestyResolver('betterapis.controllers'))
|
from time import sleep
from serial import Serial
import serial.tools.list_ports
import helpers
import dds_data
ad9910_address = 'COM10'
# addresses = [cp[0] for cp in serial.tools.list_ports.comports()]
# for port in addresses:
# print port
# if 'COM4' in addresses:
# ser = Serial('COM4', 4800, timeout=2)
# ser.flush()
# ser.write('cxn?\n')
# ser.flush()
# reply1 = ser.readline()
class AD9910_Server(object):
interfaces = {}
def refresh_available_interfaces(self):
addresses = [cp[0] for cp in serial.tools.list_ports.comports()]
for address in addresses:
if address in self.interfaces.keys():
try:
self.interfaces[address].isOpen()
except:
print '{} unavailable'.format(address)
del self.interfaces[address]
else:
try:
if address == ad9910_address:
ser = Serial(address, 4800, timeout=2)
ser.close()
self.interfaces[address] = ser
print '{} available'.format(address)
except:
pass
def verify_interface(self):
for (address, ser) in self.interfaces.items():
ser.open()
ser.write('cxn?\n')
ser.flush()
response = ser.readline()
if response == "ad9910\n":
print "{} verified as ad9910".format(address)
else:
ser.close()
del self.interfaces[address]
def createProgramString(self, line, data_type, byte_string):
addr = "0x{0:02X},".format(line)
try:
data_type = int(data_type)
except ValueError:
data_type = int(helpers.tx_types_dictionary[data_type])
data_type = "0x{0:02X},".format(data_type)
return addr + data_type + byte_string + "\n"
def createProfileString(self, profile, byte_string):
data_type = "0x00,"
addr = "0x{0:02X},".format(profile + 12)
return addr + data_type + byte_string + "\n"
def compileProgramStrings(self, program_array):
length = len(program_array)
if length > 12: # truncate program to 12 if longer
length = 12
program = ""
for i in range(0, length):
line = program_array[i]
line_str = ""
if line['mode'] == 'single':
ampl_str = helpers.calcAMPL(line['ampl'])
pow_str = helpers.calcPOW(line['phase'])
ftw_str = helpers.calcFTW(line['freq'])
line_str = self.createProgramString(i, 'single', ampl_str + pow_str + ftw_str) + "\n"
elif line['mode'] == 'sweep':
if 'nsteps' in line:
sweep_dict = helpers.calcRampParameters(line['start'], line['stop'], line['dt'], line['nsteps'])
else:
sweep_dict = helpers.calcRampParameters(line['start'], line['stop'], line['dt'])
# Create ramp limits string (DDS reg 0x0B)
limits_string = helpers.calcFTW(sweep_dict['upper']) + helpers.calcFTW(sweep_dict['lower'])
line_str = self.createProgramString(i, 'drLimits', limits_string) + "\n"
# Create frequency step size string (DDS reg 0x0C)
steps_string = helpers.calcFTW(sweep_dict['n_step']) + helpers.calcFTW(sweep_dict['p_step'])
line_str += self.createProgramString(i, 'drStepSize', steps_string) + "\n"
# Create ramp rate string (DDS reg 0x0D)
ramp_rate_str = helpers.calcStepInterval(sweep_dict['n_interval']) + helpers.calcStepInterval(sweep_dict['p_interval'])
line_str += self.createProgramString(i, 'drRate', ramp_rate_str) + "\n"
# Create instruction string to tell the DDS whether the ramp should have a positive or negative slope
if sweep_dict['slope'] == 1:
line_str += self.createProgramString(i, 'sweepInvert', "0x00,") + "\n"
else:
line_str += self.createProgramString(i, 'sweepInvert', "0x01,") + "\n"
program += line_str
return program
def compileProfileStrings(self, profiles_array):
length = len(profiles_array)
# If too many profiles, just keep first 8
if length > 8:
length = 8
profile_string = ""
for i in range(0, length):
line = profiles_array[i]
ftw_str = helpers.calcFTW(line['freq'])
ampl_str = helpers.calcAMPL(line['ampl'])
pow_str = helpers.calcPOW(line['phase'])
line_str = self.createProfileString(line['profile'], ampl_str + pow_str + ftw_str) + "\n"
profile_string += line_str
return profile_string
def writeProgramAndProfiles(self, ser_interface, program_string, profile_string):
ser_interface.write(program_string)
ser_interface.flush()
ser_interface.write(profile_string)
ser_interface.flush()
ser_interface.write("Done\n")
def getEcho(self, ser_interface, program_array):
num_lines = 2 * 8
length = len(program_array)
if length > 12:
length = 12
for i in range(0, length):
line = program_array[i]
if line['mode'] == 'single':
num_lines += 2
elif line['mode'] == 'sweep':
num_lines += 4
echo = []
for i in range(0, num_lines):
echo.append(ser_interface.readline())
echo_str = ""
for s in echo:
echo_str += s
return echo_str
server = AD9910_Server()
server.refresh_available_interfaces()
server.verify_interface()
prog = server.compileProgramStrings(dds_data.program)
# print prog
prof = server.compileProfileStrings(dds_data.profiles)
# print prof
server.writeProgramAndProfiles(server.interfaces[ad9910_address], prog, prof)
print server.getEcho(server.interfaces[ad9910_address], dds_data.program)
|
from django.db import models
from django.conf import settings
from mainapp.models import Product
from datetime import timedelta
#for sitemap
from django.urls import reverse
from django.contrib.sitemaps import ping_google
class Review(models.Model):
review_id = models.AutoField(primary_key=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.DO_NOTHING,
null=True, blank=True
)
review_content = models.CharField(max_length=3000, verbose_name="Review")
previous_review = models.ForeignKey('self', verbose_name="Previous Review",
on_delete=models.CASCADE,
null=True, blank=True
)
product = models.ForeignKey(Product, on_delete=models.DO_NOTHING,
null=True, blank=True
)
update_date = models.DateTimeField(verbose_name='Update Date', auto_now=True)
def __str__(self):
return str(self.review_content)
@property
def content(self):
return self.review_content[:50]
@property
def review_by(self):
return self.user.username
@property
def updated_on(self):
return str('')+str((self.update_date + timedelta(minutes=331)).strftime("%Y-%m-%d %H:%M:%S"))
@property
def edit(self):
return str('edit')
#for sitemap
def get_absolute_url(self):
return reverse('review', kwargs={'product_id':self.product.product_id}) if self.product else reverse('reply', kwargs={'review_id':self.review_id})
def save(self, force_insert=False, force_update=False):
super().save(force_insert, force_update)
try:
ping_google()
except Exception:
# Bare 'except' because we could get a variety
# of HTTP-related exceptions.
pass
|
"""
author songjie
"""
from flask_login import current_user
from tool.lib.function import get_date_time
class TradeInfo(object):
def __init__(self, goods):
self.total = 0
self.trades = []
self.__parse(goods)
def __parse(self, goods):
self.total = len(goods)
self.trades = [self.__map_to_trade(single) for single in goods]
def __map_to_trade(self, single):
return dict(
nickname=single.user.nickname,
time=get_date_time(single.create_time, '%Y-%m-%d'),
id=single.id
)
@staticmethod
def map_to_trade(single):
return dict(
user=single.user,
time=get_date_time(single.create_time, '%Y-%m-%d'),
id=single.id,
isbn=single.isbn,
current_user=current_user
)
|
import csv
import os
import re
from datetime import date
import plotly.graph_objects as go
import questionary
from pytrends.request import TrendReq
from tabulate import tabulate
from setup import setup
class Student:
"""
Class for Student Mode.
Used as base class for `Teacher` Class
"""
def __init__(self) -> None:
self.bookPath = "data/MOCK_DATA.csv"
self.borrowPath = "data/borrowed.csv"
self.baseActions = [
"Borrow A Book",
"Return A Book",
"See All Books",
"Search By Author",
"Search By Genre",
"See Trends for a Particular Book",
"EXIT",
]
def run(self) -> None:
"""Run method for Student Class"""
TERM = os.get_terminal_size()
print("-" * TERM.columns)
print("STUDENT Mode".center(TERM.columns))
print("-" * TERM.columns)
while True:
action = questionary.select(
"Choose an action:",
choices=self.baseActions,
default=self.baseActions[6],
).ask()
if action == self.baseActions[0]:
self.borrowBook()
elif action == self.baseActions[1]:
self.returnBook()
elif action == self.baseActions[2]:
self.seeAllBooks()
elif action == self.baseActions[3]:
self.searchByAuthor()
elif action == self.baseActions[4]:
self.searchByGenre()
elif action == self.baseActions[5]:
self.plotting()
else:
exit()
def borrowBook(self):
"""Borrow Method.
Provides borrowing functionality for both Student & Teacher Class."""
with open(self.borrowPath, "r", newline="") as file_borrow_01:
reader = csv.reader(file_borrow_01)
data_borrow = list(reader)
cur_ids = []
for row in data_borrow:
if len(row) == 4:
cur_ids.append(row[1])
file_borrow_01.close()
rollNo = questionary.text(
"Enter your Admission No:",
validate=lambda x: len(x) == 4 and str(x).isdigit() == True,
).ask()
if rollNo in cur_ids:
print("You have already borrowed a book.")
print("First return that book!")
return
name = questionary.text(
"Enter your name: ",
).ask()
print("Name & Admin No. accepted!")
with open(self.bookPath, "r", newline="") as fileBooks:
data_books = []
for row in csv.reader(fileBooks):
if len(row) != 0:
data_books.append(row)
cur_books = [row[1] for row in data_books]
today = str(date.today())
fileBooks.close()
book = questionary.autocomplete(
"Choose a book",
choices=cur_books,
validate=lambda b: b in cur_books,
).ask()
toBeIns = [book, rollNo, name, today]
data_borrow = [
row
for row in csv.reader(open(self.borrowPath, "r", newline=""))
if len(row) != 0
]
data_borrow.append(toBeIns)
with open(self.borrowPath, "w", newline="") as file:
writer = csv.writer(file)
writer.writerows(data_borrow)
print("Book borrowed successfully!")
file.close()
def returnBook(self):
"""Return Method.
Provides return functionality for both Student & Teacher Class."""
with open(self.borrowPath, "r") as fileObject:
reader = csv.reader(fileObject)
data = [row for row in reader if len(row) == 4]
fileObject.close()
admNos = [row[1] for row in data]
admNo = questionary.text(
"Enter your adm No:",
validate=lambda x: x in admNos,
).ask()
dateBorrowed = date(2020, 1, 1)
for row in data:
if row[1] == admNo:
dateBorrowed = row[3]
comps = dateBorrowed.split("-")
dateBorrowed = date(int(comps[0]), int(comps[1]), int(comps[2]))
data.remove(row)
today = date.today()
daysBorrowed: int = (today - dateBorrowed).days
if daysBorrowed <= 7:
print("Amount to be Paid is Rs. 100")
elif 7 < daysBorrowed < 50:
print("-" * os.get_terminal_size().columns)
print(
f"Amount to Be Paid: {daysBorrowed*35}".center(
os.get_terminal_size().columns
)
)
print("-" * os.get_terminal_size().columns)
else:
print("You have exceeded the number of days to return the book!")
print("You must pay a fine of Rs. 500")
with open(self.borrowPath, "w", newline="") as fileObject:
writer = csv.writer(fileObject)
writer.writerows(data)
fileObject.close()
def seeAllBooks(self):
"""See All Method.
Allows either students or teachers to see All books present."""
with open(self.bookPath, "r") as fileObject:
reader = csv.reader(fileObject)
books = list()
for row in reader:
if len(row) == 5:
books.append(list(row))
print(
tabulate(
books,
headers=[
"ISBN",
"Book Name",
"No. of Pages",
"Author",
"Category",
],
tablefmt="fancy_grid",
)
)
def plotting(self):
"""
Method for plotting a graph based on
interest in book over a period of time.
"""
with open(self.bookPath, "r") as plottingFile:
data = [row for row in csv.reader(plottingFile) if len(row) != 0]
currentBooks = [row[1] for row in data if len(row) != 0]
plottingFile.close()
book = questionary.autocomplete(
"Enter the name of the book:",
choices=currentBooks,
validate=lambda x: x in currentBooks,
)
bookVal = book.ask()
timeFrames = {
"1 Month": "today 1-m",
"3 Months": "today 3-m",
"12 Months": "today 12-m",
}
tmf = questionary.select(
"Choose the timeframe:", choices=list(timeFrames.keys())
).ask()
tmfValue = timeFrames[tmf]
pytrend = TrendReq()
try:
pytrend.build_payload(kw_list=[bookVal], timeframe=tmfValue)
except Exception as e:
print("Error in building data for {}".format(bookVal))
return
else:
print("Data successfully built!")
df = pytrend.interest_over_time()
df.drop(labels="isPartial", axis=1, inplace=True)
try:
data = go.Scatter(
x=df.index, y=df[bookVal], name=bookVal, mode="lines+markers"
)
except Exception as e:
print("Error in building figure!")
print(e)
return
else:
print("Figure built successfully!!")
fig = go.Figure(data=data)
fig.show()
def searchByAuthor(self):
data = None
with open(self.bookPath, "r") as fileObject:
data = list(csv.reader(fileObject))
authors = []
for _row in data:
if _row[3] not in authors:
authors.append(_row[3])
authorChoose = questionary.autocomplete(
"Enter the author's name:",
choices=authors,
validate=lambda x: x in authors,
).ask()
authorData = []
for _row in data:
if _row[3] == authorChoose:
authorData.append(_row)
print(
tabulate(
authorData,
headers=[
"ISBN",
"BOOK NAME",
"PAGES",
"AUTHOR NAME",
"GENRE",
],
tablefmt="fancy_grid",
)
)
def searchByGenre(self):
data = None
with open(self.bookPath, "r") as fileObject:
data = list(csv.reader(fileObject))
genres = []
for row in data:
if row[4] not in genres:
genres.append(row[4])
genreChoose = questionary.select(
"Choose a genre:", choices=genres, default=genres[0]
).ask()
genreData = []
for row in data:
if row[4] == genreChoose:
genreData.append(row)
print("All Books with Genre {} are:".format(genreChoose))
print(
tabulate(
genreData,
headers=["ISBN", "BOOK NAME", "PAGES", "AUTHOR", "GENRE"],
tablefmt="fancy_grid",
)
)
# TODO: Search : Author/ Books Name
class Teacher(Student):
"""Class for Teacher Method.
Parameters
----------
Student : Base Class
Teacher inherits from Student
"""
def __init__(self) -> None:
super().__init__()
def verify(self):
attempts = 3
while True:
attempts -= 1
if attempts == 0:
print("3 attemps used up!")
exit()
username = questionary.text(
"Enter your username:",
default="root",
validate=lambda x: len(x) > 0,
).ask()
password = questionary.password(
"Enter the password:",
validate=lambda x: len(x) > 0,
).ask()
if username == "root" and password == "password":
print("Username and password validated successfully!")
print("-" * os.get_terminal_size().columns)
print("Teacher Mode".center(os.get_terminal_size().columns))
print("-" * os.get_terminal_size().columns)
break
else:
print(
"Wrong passsword!".center(
os.get_terminal_size().columns
).capitalize()
)
def run(self) -> None:
"""Run method for Teacher Class"""
self.verify()
self.baseActions.extend(
[
"Add Book",
"Remove Book",
]
)
actions = self.baseActions
while True:
action = questionary.select(
"Choose an action:", choices=actions, default=actions[0]
).ask()
if action == actions[0]:
self.borrowBook()
elif action == actions[1]:
self.returnBook()
elif action == actions[2]:
self.seeAllBooks()
elif action == actions[3]:
self.searchByAuthor()
elif action == actions[4]:
self.searchByGenre()
elif action == actions[5]:
self.plotting()
elif action == actions[7]:
self.addBook()
elif action == actions[8]:
self.removeBook()
else:
exit(0)
def addBook(self):
"""Add Book Method. Only for Teacher Class"""
with open(self.bookPath, "r") as fileObject:
data = list(csv.reader(fileObject))
cur_isbn = []
cur_categ = []
for row in data:
if len(row) == 5:
cur_isbn.append(row[0])
if row[4] not in cur_categ:
cur_categ.append(row[4])
fileObject.close()
pattern_isbn = "^[0-9]{3}-[0-9]{4}-[0-9]{3}$"
checker_isbn = re.compile(pattern=pattern_isbn)
while True:
isbn_new = questionary.text(
"Enter the ISBN of your book\
(It should be of the format ###-####-###): "
).ask()
if isbn_new in cur_isbn:
print("ISBN is already in use!")
print("Try Again!")
elif not checker_isbn.match(isbn_new):
print("Incorrect Format!\n\tTry Again!")
else:
break
bookNew = questionary.text("Enter the Book Name:").ask()
while True:
pagesNew = int(input("Enter the No. of Pages: "))
if pagesNew <= 1:
print("Incorrect Value!\n")
else:
break
authorNew = questionary.text(
"Enter the Author's name:",
validate=lambda x: len(x.split(" ")) >= 2,
).ask()
categoryNew = questionary.autocomplete(
"Enter the Category: ",
choices=cur_categ,
validate=lambda cat: cat in cur_categ,
).ask()
toBeIns = [isbn_new, bookNew, pagesNew, authorNew, categoryNew]
data.append(toBeIns)
try:
with open(self.bookPath, "w", newline="") as fileObject:
writer = csv.writer(fileObject)
writer.writerows(data)
fileObject.close()
except Exception as e:
print(e)
print("Unable to add book")
return
else:
print("Book added successfully!")
return
def removeBook(self):
"""Remove Book Method. Only for Teacher Class"""
pattern_isbn = "^[0-9]{3}-[0-9]{4}-[0-9]{3}$"
checker_isbn = re.compile(pattern=pattern_isbn)
with open(self.bookPath, "r", newline="") as fileObject:
data = [row for row in csv.reader(fileObject) if len(row) != 0]
curISBNs = [row[0] for row in data]
fileObject.close()
while True:
isbnToRemove = questionary.text(
"Enter the ISBN of the Book that you want to remove"
).ask()
if not checker_isbn.match(isbnToRemove):
print("Wrong format.\n Try Again!")
print("Correct format is : ###-####-###")
elif isbnToRemove not in curISBNs:
print("Invalid ISBN!\nTry again!")
else:
break
with open(self.bookPath, "r", newline="") as file_book_02:
data = [row for row in csv.reader(file_book_02) if len(row) == 5]
for row in data:
if str(row[0]).rstrip(" ").lstrip(" ") == isbnToRemove:
data.remove(row)
print(row)
file_book_02.close()
with open(self.bookPath, "w", newline="") as fileObject:
writer = csv.writer(fileObject)
writer.writerows(data)
print("Book successfully deleted!")
class MainApp:
def __init__(self) -> None:
try:
setup()
except Exception:
print("An error occurred!")
exit()
else:
print("All necessary modules installed successfully!")
options = ["TEACHER MODE", "STUDENT MODE"]
action = questionary.select(
"Choose a mode:",
choices=options,
default=options[1],
).ask()
main = None
if action == options[1]:
main = Student()
main.run()
elif action == options[0]:
main = Teacher()
main.run()
if __name__ == "__main__":
app = MainApp()
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Mathy utility functions."""
import jax
import jax.numpy as jnp
import jax.scipy as jsp
def matmul(a, b):
"""jnp.matmul defaults to bfloat16, but this helper function doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
def safe_trig_helper(x, fn, t=100 * jnp.pi):
return fn(jnp.where(jnp.abs(x) < t, x, x % t))
def safe_cos(x):
"""jnp.cos() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.cos)
def safe_sin(x):
"""jnp.sin() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.sin)
def mse_to_psnr(mse):
"""Compute PSNR given an MSE (we assume the maximum pixel value is 1)."""
return -10. / jnp.log(10.) * jnp.log(mse)
def psnr_to_mse(psnr):
"""Compute MSE given a PSNR (we assume the maximum pixel value is 1)."""
return jnp.exp(-0.1 * jnp.log(10.) * psnr)
def compute_avg_error(psnr, ssim, lpips):
"""The 'average' error used in the paper."""
mse = psnr_to_mse(psnr)
dssim = jnp.sqrt(1 - ssim)
return jnp.exp(jnp.mean(jnp.log(jnp.array([mse, dssim, lpips]))))
def compute_ssim(img0,
img1,
max_val,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03,
return_map=False):
"""Computes SSIM from two images.
This function was modeled after tf.image.ssim, and should produce comparable
output.
Args:
img0: array. An image of size [..., width, height, num_channels].
img1: array. An image of size [..., width, height, num_channels].
max_val: float > 0. The maximum magnitude that `img0` or `img1` can have.
filter_size: int >= 1. Window size.
filter_sigma: float > 0. The bandwidth of the Gaussian used for filtering.
k1: float > 0. One of the SSIM dampening parameters.
k2: float > 0. One of the SSIM dampening parameters.
return_map: Bool. If True, will cause the per-pixel SSIM "map" to returned
Returns:
Each image's mean SSIM, or a tensor of individual values if `return_map`.
"""
# Construct a 1D Gaussian blur filter.
hw = filter_size // 2
shift = (2 * hw - filter_size + 1) / 2
f_i = ((jnp.arange(filter_size) - hw + shift) / filter_sigma)**2
filt = jnp.exp(-0.5 * f_i)
filt /= jnp.sum(filt)
# Blur in x and y (faster than the 2D convolution).
def convolve2d(z, f):
return jsp.signal.convolve2d(
z, f, mode='valid', precision=jax.lax.Precision.HIGHEST)
filt_fn1 = lambda z: convolve2d(z, filt[:, None])
filt_fn2 = lambda z: convolve2d(z, filt[None, :])
# Vmap the blurs to the tensor size, and then compose them.
num_dims = len(img0.shape)
map_axes = tuple(list(range(num_dims - 3)) + [num_dims - 1])
for d in map_axes:
filt_fn1 = jax.vmap(filt_fn1, in_axes=d, out_axes=d)
filt_fn2 = jax.vmap(filt_fn2, in_axes=d, out_axes=d)
filt_fn = lambda z: filt_fn1(filt_fn2(z))
mu0 = filt_fn(img0)
mu1 = filt_fn(img1)
mu00 = mu0 * mu0
mu11 = mu1 * mu1
mu01 = mu0 * mu1
sigma00 = filt_fn(img0**2) - mu00
sigma11 = filt_fn(img1**2) - mu11
sigma01 = filt_fn(img0 * img1) - mu01
# Clip the variances and covariances to valid values.
# Variance must be non-negative:
sigma00 = jnp.maximum(0., sigma00)
sigma11 = jnp.maximum(0., sigma11)
sigma01 = jnp.sign(sigma01) * jnp.minimum(
jnp.sqrt(sigma00 * sigma11), jnp.abs(sigma01))
c1 = (k1 * max_val)**2
c2 = (k2 * max_val)**2
numer = (2 * mu01 + c1) * (2 * sigma01 + c2)
denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2)
ssim_map = numer / denom
ssim = jnp.mean(ssim_map, list(range(num_dims - 3, num_dims)))
return ssim_map if return_map else ssim
def linear_to_srgb(linear):
# Assumes `linear` is in [0, 1]. https://en.wikipedia.org/wiki/SRGB
eps = jnp.finfo(jnp.float32).eps
srgb0 = 323 / 25 * linear
srgb1 = (211 * jnp.maximum(eps, linear)**(5 / 12) - 11) / 200
return jnp.where(linear <= 0.0031308, srgb0, srgb1)
def srgb_to_linear(srgb):
# Assumes `srgb` is in [0, 1]. https://en.wikipedia.org/wiki/SRGB
eps = jnp.finfo(jnp.float32).eps
linear0 = 25 / 323 * srgb
linear1 = jnp.maximum(eps, ((200 * srgb + 11) / (211)))**(12 / 5)
return jnp.where(srgb <= 0.04045, linear0, linear1)
def learning_rate_decay(step,
lr_init,
lr_final,
max_steps,
lr_delay_steps=0,
lr_delay_mult=1):
"""Continuous learning rate decay function.
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
Args:
step: int, the current optimization step.
lr_init: float, the initial learning rate.
lr_final: float, the final learning rate.
max_steps: int, the number of steps during optimization.
lr_delay_steps: int, the number of steps to delay the full learning rate.
lr_delay_mult: float, the multiplier on the rate when delaying it.
Returns:
lr: the learning for current step 'step'.
"""
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin(
0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1))
else:
delay_rate = 1.
t = jnp.clip(step / max_steps, 0, 1)
log_lerp = jnp.exp(jnp.log(lr_init) * (1 - t) + jnp.log(lr_final) * t)
return delay_rate * log_lerp
def sorted_piecewise_constant_pdf(key, bins, weights, num_samples, randomized):
"""Piecewise-Constant PDF sampling from sorted bins.
Args:
key: jnp.ndarray(float32), [2,], random number generator.
bins: jnp.ndarray(float32), [batch_size, num_bins + 1].
weights: jnp.ndarray(float32), [batch_size, num_bins].
num_samples: int, the number of samples.
randomized: bool, use randomized samples.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
# Pad each weight vector (only if necessary) to bring its sum to `eps`. This
# avoids NaNs when the input is zeros or small, but has no effect otherwise.
eps = 1e-5
weight_sum = jnp.sum(weights, axis=-1, keepdims=True)
padding = jnp.maximum(0, eps - weight_sum)
weights += padding / weights.shape[-1]
weight_sum += padding
# Compute the PDF and CDF for each weight vector, while ensuring that the CDF
# starts with exactly 0 and ends with exactly 1.
pdf = weights / weight_sum
cdf = jnp.minimum(1, jnp.cumsum(pdf[..., :-1], axis=-1))
cdf = jnp.concatenate([
jnp.zeros(list(cdf.shape[:-1]) + [1]), cdf,
jnp.ones(list(cdf.shape[:-1]) + [1])
],
axis=-1)
# Draw uniform samples.
if randomized:
s = 1 / num_samples
u = jnp.arange(num_samples) * s
u += jax.random.uniform(
key,
list(cdf.shape[:-1]) + [num_samples],
maxval=s - jnp.finfo('float32').eps)
# `u` is in [0, 1) --- it can be zero, but it can never be 1.
u = jnp.minimum(u, 1. - jnp.finfo('float32').eps)
else:
# Match the behavior of jax.random.uniform() by spanning [0, 1-eps].
u = jnp.linspace(0., 1. - jnp.finfo('float32').eps, num_samples)
u = jnp.broadcast_to(u, list(cdf.shape[:-1]) + [num_samples])
# Identify the location in `cdf` that corresponds to a random sample.
# The final `True` index in `mask` will be the start of the sampled interval.
mask = u[..., None, :] >= cdf[..., :, None]
def find_interval(x):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0 = jnp.max(jnp.where(mask, x[..., None], x[..., :1, None]), -2)
x1 = jnp.min(jnp.where(~mask, x[..., None], x[..., -1:, None]), -2)
return x0, x1
bins_g0, bins_g1 = find_interval(bins)
cdf_g0, cdf_g1 = find_interval(cdf)
t = jnp.clip(jnp.nan_to_num((u - cdf_g0) / (cdf_g1 - cdf_g0), 0), 0, 1)
samples = bins_g0 + t * (bins_g1 - bins_g0)
return samples
|
import pandas as pd
import gc
from joblib import dump, load
import numpy as np
import shap
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn import preprocessing
from torch.nn.utils.weight_norm import weight_norm
from train_config import *
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import shap
import matplotlib.pyplot as plt
from itertools import repeat, chain
def revert_dict(d): return dict(
chain(*[zip(val, repeat(key)) for key, val in d.items()]))
DATA_DIM = 168
def gradboost_features():
from joblib import Parallel, delayed, dump, load
alg = load('/N/project/phyloML/deep_ils/results/train_data/nonrec/allLengths_dropna/models/results_regress_GradBoost.pkl.gz')
gb = alg[-1]
ftrs = pd.Series(gb.feature_importances_[
:168], index=columns).sort_values()
ftrs[ftrs > .005].plot.barh()
def grouped_shap(shap_values, features, groups):
groupmap = revert_dict(groups)
shap_Tdf = pd.DataFrame(
shap_values,
columns=pd.Index(features, name='features')).T
shap_Tdf['group'] = shap_Tdf.reset_index().features.map(groupmap).values
shap_grouped = shap_Tdf.groupby('group').sum().T
return shap_grouped
def main(args):
print("cpus:", os.sched_getaffinity(0))
preprocessor = load(args.preprocessor)
if args.config is None:
args.config = args.outdir/'model.config'
device = models.get_device()
config = load(args.config)
model = models.load_model(
config,
DATA_DIM,
checkpoint_dir=args.outdir)
trainset, _ = u.load_data(
data_dir=args.data_dir,
data_files=args.data_files[1:],
train_test_file=args.outdir / "train_test_split.npz",
from_scratch=args.overwrite,
test_size=0,
topology=args.topology,
dropna=True,
conditions='ebl<=120',
preprocessor=args.preprocessor,
log_proba='train')
x_test = u.load_and_filter(args.data_files[0], topologies=False).dropna()
y_test = x_test.y_prob
x_test.drop(columns='y_prob', inplace=True)
features = x_test.columns.drop('y_prob', 0, errors='ignore')
x_train = trainset.tensors[0].to(device)
try:
e = torch.load(args.outdir/'deep_explainer.torch')
except:
e = shap.DeepExplainer(
model,
x_train[np.random.choice(
len(x_train), size=(100000,), replace=False)]
)
torch.save(e, args.outdir/'deep_explainer.torch')
feature_levels = dict(zip(features.names, features.levels))
groups_by_feature = features.groupby(feature_levels['feature'])
labels = {'1_2': r'$d(1,2)$', '1_3': r'$d(1,3)$', '1_4': r'$d(1,4)$',
'2_3': r'$d(2,3)$', '2_4': r'$d(2,4)$', '3_4': r'$d(3,4)$',
'counts': 'Topology Counts',
'nsites': 'Number of Informative Sites',
'seq_length': 'Sequence Length',
'top_1': 'SCF', 'top_2': 'SCF',
'top_3': 'SCF'}
from collections import defaultdict
groups = defaultdict(list)
for f in features:
groups[labels[f[0]]].append(f)
try:
clust = load(args.outdir / 'clustering.joblib')
except:
clust = shap.utils.hclust(x_test, y_test, linkage="complete")
dump(clust, args.outdir / 'clustering.joblib')
for test_condition in ('ebl<120', 'ebl<50 and ibl<.5',
'ebl<120 and 20>ibl>3', 'ebl<120 and ibl<.5',
'ebl<50 and 20>ibl>3', 'ebl<50',):
try:
shap_values = np.load(
args.outdir/f'{test_condition}_shapley.npy')
samp = pd.read_pickle(
args.outdir / f'{test_condition}_sample.pd.gz')
except:
samp = x_test.query(test_condition).sample(500)
samp.to_pickle(args.outdir / f'{test_condition}_sample.pd.gz')
shap_values = e.shap_values(
(torch
.from_numpy(samp.values)
.to(model.device))
)
np.save(
args.outdir/f'{test_condition}_shapley.npy',
shap_values,
)
df = pd.DataFrame(
{
"mean_abs_shap": np.mean(np.abs(shap_values), axis=0),
"stdev_abs_shap": np.std(np.abs(shap_values), axis=0),
},
index=features)
df.to_pickle(args.outdir / f'{test_condition}_shapley.pd.gz')
shap_features = grouped_shap(shap_values, features, groups)
print(df.sort_values("mean_abs_shap", ascending=False)[:10])
plt.clf()
shap.summary_plot(
shap_features.values,
features=shap_features.columns,
)
plt.tight_layout()
plt.savefig(args.outdir/f'{test_condition}_shapley.png')
plt.close()
try:
quartiles = pd.qcut(
u.sigmoid(y_test).loc[samp.index],
np.linspace(0, 1, 5),
labels=np.arange(4)
)
fig, ax = plt.subplots(1, 5, figsize=(36, 6))
for q in range(4):
plt.sca(ax[q])
quartile_mask = quartiles == q
quartile_values = x_test.loc[quartiles.index[quartile_mask]]
quartile_values.columns = (quartile_values
.columns
.map(lambda s: '-'.join(map(str, s)))
.str.replace('nan', '')
.str.replace('.0', ''))
shap.summary_plot(shap_values[quartile_mask],
quartile_values,
show=False,
plot_size=None,
color_bar=False,
max_display=6)
plt.title(f"Quintile {q} of predictions")
plt.tight_layout()
plt.savefig(args.outdir/f'{test_condition}_quartiles.png')
except Exception as err:
print(err)
finally:
plt.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train and test.")
parser.add_argument(
"--config",
type=Path,
help="path to config dict (joblib pickle)",
)
parser.add_argument(
"--epochs",
type=int,
help="number of training epochs",
default=500
)
parser.add_argument(
"--patience",
type=int,
help="number of epochs with no DECREASE in val loss",
default=None
)
parser.add_argument(
"--topology",
action="store_true",
help="predict topology",
# help="binarize y (p>.999 -> 1)",
)
parser.add_argument(
"--outdir",
help="directory to store results files",
type=Path,
default="/N/project/phyloML/deep_ils/results",
)
parser.add_argument(
"--preprocessor",
type=Path,
default=None,
help="path to sklearn preprocessor"
)
parser.add_argument(
"--overwrite",
action="store_true",
help="regenerate train/test splits from db AND train the model from scratch",
)
parser.add_argument(
"--data_files",
"-i",
type=Path,
nargs="+",
default=None,
help="input hdf5 file(s) "
)
parser.add_argument(
"--data_dir",
type=Path,
default=None,
help="dir containing input files"
)
args = parser.parse_args()
main(args)
|
'''
Topic signature data:
Each file is named according to the following pattern:
measure.BNCfilt.target_word.PoS.sense.txt.bz2
for instance: tf_idf.BNCfilt.church.n.1.txt.bz2, which corresponds to the
topic signature of the first sense of church built using the tf.idf
measure.
- Each file is compresed with bzip2 (http://sources.redhat.com/bzip2/)
1) read df_wn21wn16 and copy the corresponding files into another folder
2) unzip these files
3) calculate the cosine similarity of each pair
'''
import os
import os.path
df_wn21wn16=pd.read_csv('WN21mapWn16.csv')
directory='/Users/gary/Documents/2020Fall/IntroNLP/project/FeatureSpace/TopicSignatures/signatures_lem/'
des_dir='/Users/gary/Documents/2020Fall/IntroNLP/project/FeatureSpace/TopicSingatures_SnowProject/'
total_pair = len(df_wn21wn16)
senses=[]
for i in range(total_pair):
sense1=df_wn21wn16.loc[df_wn21wn16.index[i],'sense1_wn16']
sense2=df_wn21wn16.loc[df_wn21wn16.index[i],'sense2_wn16']
senses.append(sense1)
senses.append(sense2)
for s in senses:
if s!=s:
senses.remove(s)
#copy corresponding files to des_dir
j=0
for s in set(senses):
try:
if (j%500==0):
print('processing {}/{}'.format(j,len(set(senses))))
for dirpath, dirnames, filenames in os.walk(directory):
for filename in [f for f in filenames if "BNC_filt."+s in f]:
cp='cp '+os.path.join(dirpath, filename)+' '+des_dir+filename
#copy files
os.system(cp)
#unzip='bzip2 -d '+
#print(filename)
j+=1
except:
None
|
# This file is part of beets.
# Copyright 2016, Thomas Scholtes.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import os.path
import shutil
from unittest.mock import patch, MagicMock
import tempfile
import unittest
from test import _common
from test.helper import TestHelper
from test.test_art_resize import DummyIMBackend
from mediafile import MediaFile
from beets import config, logging, ui
from beets.util import bytestring_path, displayable_path, syspath
from beets.util.artresizer import ArtResizer
from beets import art
from test.test_art import FetchImageHelper
def require_artresizer_compare(test):
def wrapper(*args, **kwargs):
if not ArtResizer.shared.can_compare:
raise unittest.SkipTest("compare not available")
else:
return test(*args, **kwargs)
wrapper.__name__ = test.__name__
return wrapper
class EmbedartCliTest(TestHelper, FetchImageHelper):
small_artpath = os.path.join(_common.RSRC, b'image-2x3.jpg')
abbey_artpath = os.path.join(_common.RSRC, b'abbey.jpg')
abbey_similarpath = os.path.join(_common.RSRC, b'abbey-similar.jpg')
abbey_differentpath = os.path.join(_common.RSRC, b'abbey-different.jpg')
def setUp(self):
super().setUp()
self.io.install()
self.setup_beets() # Converter is threaded
self.load_plugins('embedart')
def _setup_data(self, artpath=None):
if not artpath:
artpath = self.small_artpath
with open(syspath(artpath), 'rb') as f:
self.image_data = f.read()
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
def test_embed_art_from_file_with_yes_input(self):
self._setup_data()
album = self.add_album_fixture()
item = album.items()[0]
self.io.addinput('y')
self.run_command('embedart', '-f', self.small_artpath)
mediafile = MediaFile(syspath(item.path))
self.assertEqual(mediafile.images[0].data, self.image_data)
def test_embed_art_from_file_with_no_input(self):
self._setup_data()
album = self.add_album_fixture()
item = album.items()[0]
self.io.addinput('n')
self.run_command('embedart', '-f', self.small_artpath)
mediafile = MediaFile(syspath(item.path))
# make sure that images array is empty (nothing embedded)
self.assertFalse(mediafile.images)
def test_embed_art_from_file(self):
self._setup_data()
album = self.add_album_fixture()
item = album.items()[0]
self.run_command('embedart', '-y', '-f', self.small_artpath)
mediafile = MediaFile(syspath(item.path))
self.assertEqual(mediafile.images[0].data, self.image_data)
def test_embed_art_from_album(self):
self._setup_data()
album = self.add_album_fixture()
item = album.items()[0]
album.artpath = self.small_artpath
album.store()
self.run_command('embedart', '-y')
mediafile = MediaFile(syspath(item.path))
self.assertEqual(mediafile.images[0].data, self.image_data)
def test_embed_art_remove_art_file(self):
self._setup_data()
album = self.add_album_fixture()
logging.getLogger('beets.embedart').setLevel(logging.DEBUG)
handle, tmp_path = tempfile.mkstemp()
tmp_path = bytestring_path(tmp_path)
os.write(handle, self.image_data)
os.close(handle)
album.artpath = tmp_path
album.store()
config['embedart']['remove_art_file'] = True
self.run_command('embedart', '-y')
if os.path.isfile(syspath(tmp_path)):
os.remove(syspath(tmp_path))
self.fail('Artwork file {} was not deleted'.format(
displayable_path(tmp_path)
))
def test_art_file_missing(self):
self.add_album_fixture()
logging.getLogger('beets.embedart').setLevel(logging.DEBUG)
with self.assertRaises(ui.UserError):
self.run_command('embedart', '-y', '-f', '/doesnotexist')
def test_embed_non_image_file(self):
album = self.add_album_fixture()
logging.getLogger('beets.embedart').setLevel(logging.DEBUG)
handle, tmp_path = tempfile.mkstemp()
tmp_path = bytestring_path(tmp_path)
os.write(handle, b'I am not an image.')
os.close(handle)
try:
self.run_command('embedart', '-y', '-f', tmp_path)
finally:
os.remove(syspath(tmp_path))
mediafile = MediaFile(syspath(album.items()[0].path))
self.assertFalse(mediafile.images) # No image added.
@require_artresizer_compare
def test_reject_different_art(self):
self._setup_data(self.abbey_artpath)
album = self.add_album_fixture()
item = album.items()[0]
self.run_command('embedart', '-y', '-f', self.abbey_artpath)
config['embedart']['compare_threshold'] = 20
self.run_command('embedart', '-y', '-f', self.abbey_differentpath)
mediafile = MediaFile(syspath(item.path))
self.assertEqual(mediafile.images[0].data, self.image_data,
'Image written is not {}'.format(
displayable_path(self.abbey_artpath)))
@require_artresizer_compare
def test_accept_similar_art(self):
self._setup_data(self.abbey_similarpath)
album = self.add_album_fixture()
item = album.items()[0]
self.run_command('embedart', '-y', '-f', self.abbey_artpath)
config['embedart']['compare_threshold'] = 20
self.run_command('embedart', '-y', '-f', self.abbey_similarpath)
mediafile = MediaFile(syspath(item.path))
self.assertEqual(mediafile.images[0].data, self.image_data,
'Image written is not {}'.format(
displayable_path(self.abbey_similarpath)))
def test_non_ascii_album_path(self):
resource_path = os.path.join(_common.RSRC, b'image.mp3')
album = self.add_album_fixture()
trackpath = album.items()[0].path
albumpath = album.path
shutil.copy(syspath(resource_path), syspath(trackpath))
self.run_command('extractart', '-n', 'extracted')
self.assertExists(os.path.join(albumpath, b'extracted.png'))
def test_extracted_extension(self):
resource_path = os.path.join(_common.RSRC, b'image-jpeg.mp3')
album = self.add_album_fixture()
trackpath = album.items()[0].path
albumpath = album.path
shutil.copy(syspath(resource_path), syspath(trackpath))
self.run_command('extractart', '-n', 'extracted')
self.assertExists(os.path.join(albumpath, b'extracted.jpg'))
def test_clear_art_with_yes_input(self):
self._setup_data()
album = self.add_album_fixture()
item = album.items()[0]
self.io.addinput('y')
self.run_command('embedart', '-f', self.small_artpath)
self.io.addinput('y')
self.run_command('clearart')
mediafile = MediaFile(syspath(item.path))
self.assertFalse(mediafile.images)
def test_clear_art_with_no_input(self):
self._setup_data()
album = self.add_album_fixture()
item = album.items()[0]
self.io.addinput('y')
self.run_command('embedart', '-f', self.small_artpath)
self.io.addinput('n')
self.run_command('clearart')
mediafile = MediaFile(syspath(item.path))
self.assertEqual(mediafile.images[0].data, self.image_data)
def test_embed_art_from_url_with_yes_input(self):
self._setup_data()
album = self.add_album_fixture()
item = album.items()[0]
self.mock_response('http://example.com/test.jpg', 'image/jpeg')
self.io.addinput('y')
self.run_command('embedart', '-u', 'http://example.com/test.jpg')
mediafile = MediaFile(syspath(item.path))
self.assertEqual(
mediafile.images[0].data,
self.IMAGEHEADER.get('image/jpeg').ljust(32, b'\x00')
)
def test_embed_art_from_url_png(self):
self._setup_data()
album = self.add_album_fixture()
item = album.items()[0]
self.mock_response('http://example.com/test.png', 'image/png')
self.run_command('embedart', '-y', '-u', 'http://example.com/test.png')
mediafile = MediaFile(syspath(item.path))
self.assertEqual(
mediafile.images[0].data,
self.IMAGEHEADER.get('image/png').ljust(32, b'\x00')
)
def test_embed_art_from_url_not_image(self):
self._setup_data()
album = self.add_album_fixture()
item = album.items()[0]
self.mock_response('http://example.com/test.html', 'text/html')
self.run_command('embedart', '-y', '-u',
'http://example.com/test.html')
mediafile = MediaFile(syspath(item.path))
self.assertFalse(mediafile.images)
class DummyArtResizer(ArtResizer):
"""An `ArtResizer` which pretends that ImageMagick is available, and has
a sufficiently recent version to support image comparison.
"""
def __init__(self):
self.local_method = DummyIMBackend()
@patch('beets.util.artresizer.subprocess')
@patch('beets.art.extract')
class ArtSimilarityTest(unittest.TestCase):
def setUp(self):
self.item = _common.item()
self.log = logging.getLogger('beets.embedart')
self.artresizer = DummyArtResizer()
def _similarity(self, threshold):
return art.check_art_similarity(
self.log,
self.item,
b'path',
threshold,
artresizer=self.artresizer,
)
def _popen(self, status=0, stdout="", stderr=""):
"""Create a mock `Popen` object."""
popen = MagicMock(returncode=status)
popen.communicate.return_value = stdout, stderr
return popen
def _mock_popens(self, mock_extract, mock_subprocess, compare_status=0,
compare_stdout="", compare_stderr="", convert_status=0):
mock_extract.return_value = b'extracted_path'
mock_subprocess.Popen.side_effect = [
# The `convert` call.
self._popen(convert_status),
# The `compare` call.
self._popen(compare_status, compare_stdout, compare_stderr),
]
def test_compare_success_similar(self, mock_extract, mock_subprocess):
self._mock_popens(mock_extract, mock_subprocess, 0, "10", "err")
self.assertTrue(self._similarity(20))
def test_compare_success_different(self, mock_extract, mock_subprocess):
self._mock_popens(mock_extract, mock_subprocess, 0, "10", "err")
self.assertFalse(self._similarity(5))
def test_compare_status1_similar(self, mock_extract, mock_subprocess):
self._mock_popens(mock_extract, mock_subprocess, 1, "out", "10")
self.assertTrue(self._similarity(20))
def test_compare_status1_different(self, mock_extract, mock_subprocess):
self._mock_popens(mock_extract, mock_subprocess, 1, "out", "10")
self.assertFalse(self._similarity(5))
def test_compare_failed(self, mock_extract, mock_subprocess):
self._mock_popens(mock_extract, mock_subprocess, 2, "out", "10")
self.assertIsNone(self._similarity(20))
def test_compare_parsing_error(self, mock_extract, mock_subprocess):
self._mock_popens(mock_extract, mock_subprocess, 0, "foo", "bar")
self.assertIsNone(self._similarity(20))
def test_compare_parsing_error_and_failure(self, mock_extract,
mock_subprocess):
self._mock_popens(mock_extract, mock_subprocess, 1, "foo", "bar")
self.assertIsNone(self._similarity(20))
def test_convert_failure(self, mock_extract, mock_subprocess):
self._mock_popens(mock_extract, mock_subprocess, convert_status=1)
self.assertIsNone(self._similarity(20))
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
"""
File to put some utils functions
"""
import random
import conf
def to_bin(number):
return list(bin(number))[2:]
def to_int(binary):
signal = -1 if binary[0] == '1' else 1
number = int(''.join(binary[1:]), 2)
return signal*number
def similarity_of_individuals(ind1, ind2):
if len(ind1) != len(ind2):
raise Exception('The chromossomes have different sizes!')
hamming_distance = sum(c1 == c2 for c1, c2 in zip(ind1, ind2))
return float(hamming_distance) / len(ind1)
def create_model(individual):
model = []
for x in individual:
model.append((0.5 - conf.ALPHA) * int(x) + conf.ALPHA * 1)
return model
def update_model(model, individual):
for i in xrange(len(model)):
model[i] = model[i] * (1.0 - conf.ALPHA) + int(individual[i]) * conf.ALPHA
def mutate_model(model):
for i, x in enumerate(model):
if random.random() < conf.MUT_PROB:
model[i] = x * (1.0 - conf.MUT_SH) + random.randint(0,1) * conf.MUT_SH
def learning(previous, new):
if new < previous:
conf.ALPHA = conf.ALPHA * (1 + conf.Q)
else:
conf.ALPHA = conf.ALPHA_INIT
def new_population(problem):
population_size = (problem.num_bits() ** 2)
pop = []
for x in xrange(population_size):
ind = problem.new_individual()
pop.append(ind)
return pop
|
"""
Given an array A of integers and integer K, return the maximum S such that there exists i < j with A[i] + A[j] = S
and S < K. If no i, j exist satisfying this equation, return -1.
Input: A = [34,23,1,24,75,33,54,8], K = 60
Output: 58
Explanation:
We can use 34 and 24 to sum 58 which is less than 60.
"""
class Solution:
def twoSumLessThanK(self, A: List[int], K: int) -> int:
ans = -1
for j in range(-1, -len(A) - 1, -1):
for i in range(j - 1, -len(A) - 1, -1):
if A[i] + A[j] < K:
ans = max(ans, A[i] + A[j])
return ans
""" *** Ideal Solution **
a = sorted(A)
i,j = 0,len(a)-1
ans = -1
while i<j:
if a[i]+a[j]<K:
ans = max(ans,a[i]+a[j])
i += 1
else:
j -= 1
return ans
""" |
# -*- coding: utf-8 -*-
# import ngram
sentence = "I am an NLPer"
words = sentence.split(' ')
for i in range(len(words)):
print(''.join(words[i]) + ' ' , end ="")
if(i+1 == len(words)):
break
else:
print(''.join(words[i+1]))
# print(''.join(words[3]))
|
from enum import Enum
from itertools import permutations
from collections import defaultdict
class Opcodes(Enum):
ADD = 1
MULTIPLY = 2
INPUT = 3
OUTPUT = 4
JTRUE = 5
JFALSE = 6
LESSTHAN = 7
EQUALS = 8
HALT = 99
with open("input1.txt","r") as f:
data = f.readlines()[0].replace("\n","").split(',')
dataDict = defaultdict(int)
for i in range(len(data)):
dataDict.update({i: int(data[i])})
def getOperation(value):
m1 = 0
m2 = 0
m3 = 0
opcode = 0
value = str(value).zfill(5)
opcode = int(value[3:])
m1 = int(value[2])
m2 = int(value[1])
m3 = int(value[0])
# In order, I.E. 01104
# m1 is 1, m2 is 1, m3 is 0, opcode is 04
return m3, m2, m1, opcode
def getValue(dataDict, pc, mode):
if mode == 0:
return dataDict.get(dataDict.get(pc))
elif mode == 1:
return dataDict.get(pc)
def add(dataDict, pc, modes):
result = {dataDict[pc+3]: getValue(dataDict, pc+1, modes[0])+getValue(dataDict, pc+2, modes[1])}
dataDict.update(result)
pc+=4
return dataDict, pc
def multiply(dataDict, pc, modes):
result = {dataDict[pc+3]: getValue(dataDict, pc+1, modes[0])*getValue(dataDict, pc+2, modes[1])}
dataDict.update(result)
pc+=4
return dataDict, pc
def jtrue(dataDict, pc, modes):
result = getValue(dataDict, pc+1, modes[0])
if result !=0:
pc = getValue(dataDict, pc+2, modes[1])
else:
pc+=3
return pc
def jfalse(dataDict, pc, modes):
result = getValue(dataDict, pc+1, modes[0])
if result ==0:
pc = getValue(dataDict, pc+2, modes[1])
else:
pc+=3
return pc
def lessthan(dataDict, pc, modes):
result = getValue(dataDict, pc+1, modes[0])
result2 = getValue(dataDict, pc+2, modes[1])
if result < result2:
value = {dataDict[pc+3]: 1}
else:
value = {dataDict[pc+3]: 0}
pc+=4
dataDict.update(value)
return dataDict, pc
def equals(dataDict, pc, modes):
result = getValue(dataDict, pc+1, modes[0])
result2 = getValue(dataDict, pc+2, modes[1])
if result == result2:
value = {dataDict[pc+3]: 1}
else:
value = {dataDict[pc+3]: 0}
pc+=4
dataDict.update(value)
return dataDict, pc
# Output defined as datDict, pc, value
def compute(dataDict, pc, inputs):
while(True):
m3, m2, m1, opcode = getOperation(dataDict[pc])
modes = [m1, m2, m3]
converted = [m1, m2, m3, opcode]
#print("Got {}, converted: {}".format(dataDict[pc], converted))
#print("Opcode: {}, PC: {}, MODES: {}".format(Opcodes(opcode), pc, modes))
if Opcodes(opcode) == Opcodes.HALT:
return dataDict, pc, None
elif Opcodes(opcode) == Opcodes.INPUT:
value = {dataDict[pc+1]: inputs[0]}
inputs.pop(0)
dataDict.update(value)
pc+=2
elif Opcodes(opcode) == Opcodes.OUTPUT:
return dataDict, pc+2, dataDict[dataDict[pc+1]]
elif Opcodes(opcode) == Opcodes.ADD:
dataDict, pc = add(dataDict, pc, modes)
elif Opcodes(opcode) == Opcodes.MULTIPLY:
dataDict, pc = multiply(dataDict, pc, modes)
elif Opcodes(opcode) == Opcodes.JTRUE:
pc = jtrue(dataDict, pc, modes)
elif Opcodes(opcode) == Opcodes.JFALSE:
pc = jfalse(dataDict, pc, modes)
elif Opcodes(opcode) == Opcodes.LESSTHAN:
dataDict, pc = lessthan(dataDict, pc, modes)
elif Opcodes(opcode) == Opcodes.EQUALS:
dataDict, pc = equals(dataDict, pc, modes)
else:
print("INVALID OPCODE!")
phaseSettings = permutations([5,6,7,8,9])
maxThrusterSignal = 0
for phaseConfig in phaseSettings:
amplifierData = [dataDict.copy() for x in range(5)]
amplifierPcs = [0 for x in range(5)]
amplifierInputs = [[phaseConfig[x]] for x in range(5)]
amplifierInputs[0].append(0)
halted = False
finalVal = 0
while not halted:
for x in range(4):
amplifierData[x], amplifierPcs[x], value = compute(amplifierData[x], amplifierPcs[x], amplifierInputs[x])
amplifierInputs[x+1].append(value)
amplifierData[4], amplifierPcs[4], outputVal = compute(amplifierData[4], amplifierPcs[4], amplifierInputs[4])
if outputVal is None:
halted = True
else:
finalVal = outputVal
amplifierInputs[0].append(outputVal)
maxThrusterSignal = max(maxThrusterSignal, finalVal)
print(maxThrusterSignal)
|
#!usr/bin/python
def penis():
filename = raw_input("file name?: ")
with open(filename) as f:
for line in f:
for word in line.split():
print line.replace(str(word), "penis")
penis()
|
import heapq as h
import math
import random
import time
from collections import defaultdict, deque
import numpy as np
from matplotlib import pyplot as plt
from occupancyGrid import OccupancyGrid
from utils import raytrace
class CCRA:
def __init__(self, occ_grid, block_size_x, block_size_y):
# Change obstacles to be -1
self.dist_grid = occ_grid
self.dist_grid.grid *= -1
self.oriented_occ_grid = occ_grid.clone()
self.oriented_occ_grid.grid = self.oriented_occ_grid.grid.astype(np.uint8)
self.oriented_occ_grid.grid[np.nonzero(self.oriented_occ_grid.grid)] = 255
self.block_size_x = block_size_x
self.block_size_y = block_size_y
def generateRandomStart(self):
while True:
result = (random.randint(0, self.dist_grid.size_x - 1),random.randint(0, self.dist_grid.size_y - 1))
if self.oriented_occ_grid[result] != 255:
return result
def nhood4(self, x, y):
result = []
if x >= 1 and self.dist_grid[x-1, y] != -1:
result.append((x - 1, y))
if x < self.dist_grid.size_x - 1 and self.dist_grid[x+1, y] != -1:
result.append((x + 1, y))
if y >= 1 and self.dist_grid[x, y-1] != -1:
result.append((x, y - 1))
if y < self.dist_grid.size_y - 1 and self.dist_grid[x, y+1] != -1:
result.append((x, y + 1))
return result
def nhood8(self, x, y):
result = []
if x >= 1 and self.dist_grid[x-1, y] != -1:
result.append((x - 1, y))
if x < self.dist_grid.size_x - 1 and self.dist_grid[x+1, y] != -1:
result.append((x + 1, y))
if y >= 1 and self.dist_grid[x, y-1] != -1:
result.append((x, y - 1))
if y < self.dist_grid.size_y - 1 and self.dist_grid[x, y+1] != -1:
result.append((x, y + 1))
if x >= 1 and y >= 1 and self.dist_grid[x-1, y-1] != -1:
result.append((x - 1, y - 1))
if x >= 1 and y < self.dist_grid.size_y - 1 and self.dist_grid[x-1, y+1] != -1:
result.append((x - 1, y + 1))
if x < self.dist_grid.size_x - 1 and y >= 1 and self.dist_grid[x+1, y-1] != -1:
result.append((x + 1, y - 1))
if x < self.dist_grid.size_x - 1 and y < self.dist_grid.size_y - 1 and self.dist_grid[x+1, y+1] != -1:
result.append((x + 1, y + 1))
return result
def augmentedNhood4(self, current_pos, current_layer):
x = self.block_size_x
location_offsets_unrotated = [(0, self.block_size_y),
(0, -self.block_size_y),
(self.block_size_x,0),
(-self.block_size_x, 0)]
angle = current_layer * math.pi/8
locations = []
for i, j in location_offsets_unrotated:
ox, oy = current_pos
x = current_pos[0] + i
y = current_pos[1] + j
rotated_x = int(round((x - ox) * math.cos(angle) - (y - oy) * math.sin(angle) + ox,0))
rotated_y = int(round((x - ox) * math.sin(angle) + (y - oy) * math.cos(angle) + oy,0))
# Skip if out of x-bounds
if rotated_x < 0 or rotated_x >= self.dist_grid.size_x:
continue
# Skip if out of y-bounds
if rotated_y < 0 or rotated_y >= self.dist_grid.size_x:
continue
# Only add if it is not located in an object and can be reached from current position
if self.dist_grid[rotated_x, rotated_y] != -1:
end_layer = self.endLayer(current_pos, current_layer, (rotated_x, rotated_y))
if end_layer == None:
continue
else:
locations.append(((rotated_x, rotated_y), end_layer))
return locations
def endLayer(self, current_pos, current_layer, goal_pos):
# Need to check if we can move from current pos to goal pos
# In order for two adjacent cells to be pathable
# the cells must have an unoccupied layer 1 shift apart
# Thus for a path to be valid all adjacent cells must satisfy the above
# No possible orientations can be reached at goal
if self.oriented_occ_grid[goal_pos] == 255:
return None
# Get the direct path from current to goal
traversed_cells = raytrace(*current_pos, *goal_pos)
# Create this structure so we can efficiently get the next cell
next_cell = dict()
for num, cell in enumerate(traversed_cells[:-1]):
next_cell[cell] = traversed_cells[num + 1]
# depth first search through the path to find a suitable path
dfs = []
dfs.append((current_pos, current_layer))
visited = set()
while len(dfs) > 0:
pos, layer = dfs.pop()
if (pos, layer) in visited:
continue
else:
visited.add((pos, layer))
# Return True if we have reached the goal
if pos == goal_pos:
return layer
# Skip if layer is completely blocked
if self.oriented_occ_grid[pos] == 255:
continue
next_pos = next_cell[pos]
# Check adjacent and current layers of next position to see if it can be reached from here
for i in [-1, 1, 0]:
next_layer = (layer + i) % 8
if self.oriented_occ_grid[next_pos] & 2**next_layer == 0:
# print(pos, layer, next_pos, next_layer)
dfs.append((next_pos, next_layer))
# dfs didnt find a suitable path so return false
return None
def computeDistances(self, start):
# Use dijkstra
visited = set()
prev = dict()
dist = defaultdict(lambda : float('inf'))
pq = []
prev[start] = None
dist[start] = 0
# Need to put (cost, pos) into priority queue so that pq is ordered by cost
h.heappush(pq, (0, start))
while len(pq) != 0:
current_cost, current_pos = h.heappop(pq)
# Check we havent visited this point before
if current_pos not in visited:
visited.add(current_pos)
else:
continue
for nbr in self.nhood8(*current_pos):
new_cost = current_cost + 1
if new_cost < dist[nbr]:
dist[nbr] = new_cost
prev[nbr] = current_pos
self.dist_grid[nbr] = new_cost
h.heappush(pq, (dist[nbr], nbr))
def rotatedFilledCells(self, angle, size_x=None, size_y=None):
if size_x is None and size_y is None:
half_size_x = self.block_size_x//2
half_size_y = self.block_size_y//2
else:
half_size_x = size_x//2
half_size_y = size_y//2
corners = [(0,0)] * 4
# Top left, top right, bottom right, bottom left (clockwise ordering of points)
# Corners of the block centered at 0,0
corners[0] = (-half_size_x, -half_size_y)
corners[1] = (half_size_x, -half_size_y)
corners[2] = (half_size_x, half_size_y)
corners[3] = (-half_size_x, half_size_y)
for i in range(4):
x, y = corners[i]
rotated_x = x * math.cos(angle) - y * math.sin(angle)
rotated_y = x * math.sin(angle) + y * math.cos(angle)
# print(rotated_x, rotated_y)
corners[i] = (int(round(rotated_x, 0)), int(round(rotated_y, 0)))
cells = set()
cells.update(raytrace(*corners[0], *corners[1]))
cells.update(raytrace(*corners[3], *corners[2]))
cells.update(raytrace(*corners[0], *corners[3]))
cells.update(raytrace(*corners[1], *corners[2]))
# breadth first search from 0,0 the remaining cells
bfs = deque([(0,0)])
while len(bfs) > 0:
current_cell = bfs.popleft()
# If previously visited then skip
if current_cell in cells:
continue
else:
cells.add(current_cell)
# Search the neighbours
for nbr in [(current_cell[0] + 1, current_cell[1]), (current_cell[0] - 1, current_cell[1]), (current_cell[0], current_cell[1] + 1), (current_cell[0], current_cell[1] - 1)]:
if nbr not in cells:
bfs.append(nbr)
return list(cells)
def isObstacleFrontier(self, x, y):
# Obstacle frontier occurs on occupied spaces
if self.oriented_occ_grid[x, y] == 0:
return False
if x >= 1 and self.oriented_occ_grid[x - 1, y] == 0:
return True
if x < self.oriented_occ_grid.size_x - 1 and self.oriented_occ_grid[x + 1, y] == 0:
return True
if y >= 1 and self.oriented_occ_grid[x, y - 1] == 0:
return True
if y < self.oriented_occ_grid.size_y - 1 and self.oriented_occ_grid[x, y + 1] == 0:
return True
return False
def computeCSpace(self):
# Get obstacle frontiers (+ edge frontiers)
frontier_cells = []
# Add obstacle frontier cells
for i in range(self.oriented_occ_grid.size_x):
for j in range(self.oriented_occ_grid.size_y):
if self.isObstacleFrontier(i,j):
frontier_cells.append((i, j))
# Add 1 outside edges as frontier cells for x
for i in range(self.oriented_occ_grid.size_x):
frontier_cells.append((i, -1))
frontier_cells.append((i, 100))
# Add 1 outside edges as frontier cells for y
for j in range(self.oriented_occ_grid.size_y):
frontier_cells.append((-1, j))
frontier_cells.append((100, j))
combined_offsets = dict()
# The most expensive part of inflation is iterating the frontier cells, thus
# to save time we precompute all the rotation offsets and their cumulative values
# so that we only need to iterate the frontier cells once
for layer in range(8):
single_layer_offsets = self.rotatedFilledCells(layer * math.pi/8)
for i in single_layer_offsets:
if i not in combined_offsets:
combined_offsets[i] = 2**layer
else:
combined_offsets[i] |= 2**layer
# Perform inflation on all frontier cells for all rotation angles
for x, y in frontier_cells:
for (inflate_x, inflate_y), value in combined_offsets.items():
if not self.oriented_occ_grid.inBounds(x + inflate_x, y + inflate_y):
continue
# NOTE: To save space and time we use 1 grid for all layers,
# each cell contains an 8 bit binary, where the 1 is set if
# that layer is occupied, e.g. if layers 1, 2 and 5 are occupied
# then the cell will have 00010011 = 19 in it
self.oriented_occ_grid[x + inflate_x, y + inflate_y] |= value
def getNextLayer(self, current_pos, current_layer, next_pos):
# Returns none if cell not reachable otherwise returns layer in next cell
# Rules for a cell being reachable
# 1. 1 away from current cell
# 2. 1 rotation away from any possible configuration in current
occupied_next_layers = self.oriented_occ_grid[next_pos]
# If everything in the next position is occupied then not reachable
if occupied_next_layers == (2**8 - 1):
return None
next_layer = (current_layer + 1) % 8
temp_layer = current_layer
# Check that we can rotate to specific layer and then either rotate into next cell or translate into next cell
while self.oriented_occ_grid[current_pos] & 2**temp_layer == 0:
if self.oriented_occ_grid[next_pos] & 2**temp_layer == 0:
return temp_layer
if self.oriented_occ_grid[next_pos] & 2**next_layer == 0:
return next_layer
next_layer = (next_layer + 1) % 8
temp_layer = (temp_layer + 1) % 8
# Stop once we have reached the original starting layer
if temp_layer == 2**current_layer:
break
next_layer = (current_layer - 1) % 8
temp_layer = current_layer
# Checks same as above but in opposite direction
while self.oriented_occ_grid[current_pos] & 2**temp_layer == 0:
if self.oriented_occ_grid[next_pos] & 2**temp_layer == 0:
return temp_layer
if self.oriented_occ_grid[next_pos] & 2**next_layer == 0:
return next_layer
next_layer = (next_layer - 1) % 8
temp_layer = (temp_layer - 1) % 8
if temp_layer == current_layer:
break
return None
def moveToUnvisited(self, current_pos, current_layer):
bfs = deque()
bfs.append((current_pos, current_layer))
visited = set()
prev = dict()
dist = defaultdict(lambda : float('inf'))
dist[current_pos] = 0
prev[current_pos] = None
while len(bfs) > 0:
current_pos, current_layer = bfs.popleft()
if current_pos in visited:
continue
else:
visited.add(current_pos)
for nbr in self.nhood4(*current_pos):
next_layer = self.getNextLayer(current_pos, current_layer, nbr)
# If no movement to nbr exists skip
if next_layer == None:
continue
# If we found a nonzero space then return the path to here
if self.dist_grid[nbr] != 0:
path = [(nbr, next_layer), (current_pos, current_layer)]
pos = current_pos
layer = current_layer
while prev[pos] != None:
next_pos, layer = prev[pos]
path.append((next_pos, layer))
pos = next_pos
return path[::-1]
new_dist = 1 + dist[current_pos]
if new_dist < dist[nbr]:
dist[nbr] = new_dist
prev[nbr] = (current_pos, current_layer)
bfs.append((nbr,next_layer))
return []
def getVisitedCells(self, current_pos, current_layer):
angle = current_layer * math.pi/8
offset_cells = self.rotatedFilledCells(angle)
visited = []
for i, j in offset_cells:
visited.append((current_pos[0] + i, current_pos[1] + j))
return visited
def getOccupiedCells(self, current_pos, current_layer):
occupied_size_x = self.block_size_x + 2 * (self.block_size_x//2)
occupied_size_y = self.block_size_y + 2 * (self.block_size_y//2)
angle = current_layer * math.pi/8
offset_cells = self.rotatedFilledCells(angle, occupied_size_x, occupied_size_y)
occupied = []
for i, j in offset_cells:
occupied.append((current_pos[0] + i, current_pos[1] + j))
return occupied
def getPath(self, start = None):
# Compute configuration space
self.computeCSpace()
# Generate a random start point within the cspace if no start point given
if start is None:
start = self.generateRandomStart()
# Compute distance grid
self.computeDistances(start)
# traversable_mask = self.oriented_occ_grid.grid == 255
# masked_dist_grid = np.ma.MaskedArray(self.dist_grid.grid, mask=traversable_mask)
current_pos = start
# Find the first free layer
current_layer = 0
occupied_layers = self.oriented_occ_grid[current_pos]
while occupied_layers & 1 == 1:
current_layer += 1
occupied_layers = occupied_layers >> 1
path = []
# occupied = set()
# c = 0
# Follow the path of min cost
while True:
min_cost = float('inf')
best_next_pos = None
best_next_layer = None
visited = self.getVisitedCells(current_pos, current_layer)
# occupied.update(self.getOccupiedCells(current_pos, current_layer))
# Mark visited cells as visited
for i, j in visited:
self.dist_grid[i,j] = 0
for nbr, layer in self.augmentedNhood4(current_pos, current_layer):
if self.dist_grid[nbr] > 0 and self.dist_grid[nbr] < min_cost:
min_cost = self.dist_grid[nbr]
best_next_pos = nbr
best_next_layer = layer
path.append((current_pos, current_layer))
self.dist_grid[current_pos] = 0
# c += 1
# if c:
# plt.imshow(masked_dist_grid)
# plt.pause(0.1)
# plt.clf()
# c = 0
# If the surrounding cells are all 0 (i.e. visited) then look to move to next best area
if best_next_pos == None:
movement_to_unvisited = self.moveToUnvisited(current_pos, current_layer)
# If we cant reach, return path
if len(movement_to_unvisited) == 0:
return self.process_path(path)
else:
path.extend(movement_to_unvisited)
current_pos, current_layer = path[-1]
else:
current_pos = best_next_pos
current_layer = best_next_layer
def process_path(self, path):
final_path = []
prev_pos, prev_layer = path[0]
for pos, layer in path:
# Check if we need to interpolate between two points
if abs(pos[0] - prev_pos[0]) > 1 or abs(pos[1] - prev_pos[1]) > 1:
layer_at_cell = self.interpolatePath(prev_pos, prev_layer, pos, layer)
assert layer_at_cell != None, "Could not find an intermediate path"
cells = raytrace(*prev_pos, *pos)
for c in cells:
final_path.append((c, layer_at_cell[c]))
else:
final_path.append((pos, layer))
prev_pos = pos
prev_layer = layer
return final_path
def interpolatePath(self, current_pos, current_layer, goal_pos, goal_layer):
# Get the direct path from current to goal
traversed_cells = raytrace(*current_pos, *goal_pos)
# Create this structure so we can efficiently get the next cell
next_cell = dict()
for num, cell in enumerate(traversed_cells[:-1]):
next_cell[cell] = traversed_cells[num + 1]
# depth first search through the path to find a suitable path
dfs = []
dfs.append((current_pos, current_layer))
visited = set()
layer_at_cell = dict()
while len(dfs) > 0:
pos, layer = dfs.pop()
if (pos, layer) in visited:
continue
else:
visited.add((pos, layer))
layer_at_cell[pos] = layer
# Return True if we have reached the goal
if pos == goal_pos and layer == goal_layer:
return layer_at_cell
next_pos = next_cell[pos]
# Check adjacent and current layers of next position to see if it can be reached from here
for i in [-1, 1, 0]:
next_layer = (layer + i) % 8
if self.oriented_occ_grid[next_pos] & 2**next_layer == 0:
# print(pos, layer, next_pos, next_layer)
dfs.append((next_pos, next_layer))
# dfs didnt find a suitable path so return false
return None
|
from PIL import Image, ImageChops
import numpy as np
import os
from scipy.spatial import ConvexHull
from math import sqrt
from skimage.feature import blob_log, blob_dog, blob_doh
import rawpy
path_to_images = ''
faint_green_image_path = ''
laser_area = 0.17 * 0.17 # m^2
def iter_through_folder_for_images(folder_path):
"""
Iterates through a folders and returns any .jpg or .CR2 images found
:param folder_path: Top level folder path
:return: List of paths to images
"""
directory = os.fsencode(folder_path)
images = []
for root, dirs, files in os.walk(directory):
for file in files:
filename = os.fsdecode(file)
if filename.endswith(".jpg") or filename.endswith(".CR2"):
images.append(os.path.join(str(root, 'UTF-8'), filename))
return images
def only_red(im):
"""
Strips out all channels apart from red.
"""
data = np.array(im)
red, green, blue = data.T
im2 = Image.fromarray(red.T)
return im2
def difference_between_red_and_green(im):
"""
Returns an image of the difference between the red and green channels.
:param im: Original image
:return: Difference between channels
"""
data = np.array(im)
red, green, blue = data.T
redder = Image.fromarray(red.T)
greener = Image.fromarray(green.T)
difference = ImageChops.difference(redder, greener)
return difference
@np.vectorize
def emphasise_difference(a, b):
"""
Vectorised version of function allowing it to be performed on an numpy array (of which an image can take the form)
"""
return a - b if a > b else 0
@np.vectorize
def abs_difference(a, b):
return abs(a - b)
def emphasise_red_without_green(im):
"""
Emphasises areas with high levels of red and less green
"""
data = np.array(im)
red, green, blue = data.T
difference = emphasise_difference(red.T, green.T)
return Image.fromarray(difference)
def emphasise_green_without_red_or_blue(im):
"""
Emphasises areas with high levels of green and less red or blue
"""
data = np.array(im)
red, green, blue = data.T
difference = emphasise_difference(green.T, (red.T + blue.T) / 2)
return Image.fromarray(difference)
def emphasise_green_without_blue(im):
"""
Emphasises areas with high levels of green and less red or blue
"""
data = np.array(im)
red, green, blue = data.T
difference = emphasise_difference(green.T, blue.T)
return Image.fromarray(difference)
def emphasise_red_without_others(im):
"""
Emphasises areas with high levels of red and less green or blue
"""
data = np.array(im)
red, green, blue = data.T
difference = emphasise_difference(red.T, (green.T + blue.T) / 2)
return Image.fromarray(difference)
def threshold_image(im, target, maximum=255, minimum=0):
"""
If an image's pixel is not within the bounds of the minimum and maximum then set it to the target
"""
data = np.array(im)
for row_index, row in enumerate(np.array(data)):
for item_index, item in enumerate(row):
if item > maximum or item < minimum:
data[row_index][item_index] = target
new_image = Image.fromarray(data)
return new_image
# See 'http://scikit-image.org/docs/dev/auto_examples/features_detection/plot_blob.html' for an explanation of the
# blob detection algorithms below and their various merits and demerits.
# On images from [FILE_PATH_REDACTED]
# the Laplacian of Gaussian detection algorithm identified 4 points for 97% of the images, taking almost a day for the
# whole directory
def get_convex_hull_of_blobs_log(im):
"""
Uses the Laplacian of Gaussian blob detection algorithm from scikit-image to identify the laser points and returns
a ConvexHull of the points found.
:param im: Image with laser points in
:return: ConvexHull of points
"""
threshold = .2
tries = 5
# Get blobs using Laplacian of Gaussian
blobs_log = blob_log(im, max_sigma=10, threshold=threshold)
# Compute radii in the 3rd column.
blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2) # ToDo: Discount blobs with large radii
while (len(blobs_log) > 4 or 4 > len(blobs_log) >= 0) and tries > 0:
if len(blobs_log) < 4:
threshold -= 0.05
elif len(blobs_log) > 4:
threshold += 0.05
blobs_log = blob_log(im, max_sigma=10, threshold=threshold)
tries -= 1
assert len(blobs_log) == 4, len(blobs_log)
return ConvexHull([(blob[0], blob[1]) for blob in blobs_log])
def get_convex_hull_of_blobs_dog(im):
"""
Uses the Difference of Gaussian blob detection algorithm from scikit-image to identify the laser points and returns
a ConvexHull of the points found.
:param im: Image with laser points in
:return: ConvexHull of points
"""
threshold = .15
tries = 5
# Get blobs using Difference of Gaussian
blobs_dog = blob_dog(im, max_sigma=30, threshold=threshold)
# Compute radii in the 3rd column.
blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2) # ToDo: Discount blobs with large radii
while (len(blobs_dog) > 4 or 4 > len(blobs_dog) >= 0) and tries > 0:
if 2 <= len(blobs_dog) < 4:
threshold -= 0.05
elif 6 >= len(blobs_dog) > 4:
threshold += 0.05
blobs_dog = blob_dog(im, max_sigma=30, threshold=threshold)
tries -= 1
assert len(blobs_dog) == 4, len(blobs_dog)
return ConvexHull([(blob[0], blob[1]) for blob in blobs_dog])
def get_convex_hull_of_blobs_doh(im):
"""
Uses the Difference of Hessian blob detection algorithm from scikit-image to identify the laser points and returns
a ConvexHull of the points found.
:param im: Image with laser points in
:return: ConvexHull of points
"""
# Get blobs using Difference of Hessian
blobs_doh = blob_doh(im, max_sigma=10, num_sigma=1, threshold=.005)
# Compute radii in the 3rd column.
blobs_doh[:, 2] = blobs_doh[:, 2] * sqrt(2) # ToDo: Discount blobs with large radii
if 0 <= len(blobs_doh) < 4:
blobs_doh = blob_doh(im, max_sigma=10, num_sigma=1, threshold=.0025)
elif len(blobs_doh) > 4:
blobs_doh = blob_doh(im, max_sigma=10, num_sigma=1, threshold=.0075)
assert len(blobs_doh) == 4, len(blobs_doh)
return ConvexHull([(blob[0], blob[1]) for blob in blobs_doh])
if __name__ == '__main__':
try:
dodgy_images_paths = []
success = 0
for image_path in iter_through_folder_for_images(faint_green_image_path)[1:]:
# Discount the first image of each box so as to discount the test cards
if image_path.endswith('.jpg') and not image_path.endswith('01.jpg'):
current_image = Image.open(image_path, 'r')
# PIL cannot open .CR2 images so use 'rawpy' instead
elif image_path.endswith('.CR2') and not image_path.endswith('01.CR2'):
with rawpy.imread(image_path) as raw:
current_image = raw.postprocess()
else:
continue
green_without_blue = emphasise_green_without_blue(current_image)
green_without_blue = threshold_image(green_without_blue, 0, minimum=100)
green_without_blue = np.array(green_without_blue.convert('RGB'))
try:
convex = get_convex_hull_of_blobs_log(green_without_blue)
except AssertionError as e:
# If exactly four laser points have not been found, spit out the path and how many were found and save
# the red_without_green image in this folder.
print('Could not find exactly four laser points in image with path {}\n Found {} points'.format(image_path, e))
out_name = image_path.split('\\')[-1]
if out_name.endswith('.CR2'):
Image.fromarray(green_without_blue).save('EMPHASISED-{}.png'.format(out_name[:-4]))
else:
Image.fromarray(green_without_blue).save('EMPHASISED-{}'.format(out_name[:-4]))
dodgy_images_paths.append(image_path)
continue
else:
success += 1
# Calculate the size of the image in pixels
size = green_without_blue.shape
area_of_image = size[0] * size[1]
# Calculate the area of the image overall using the area covered by the lasers in pixels and knowing that
# in real life. Assumes the lasers are perpendicular to the seabed which is assuemed to be flat.
# Note: convex.volume results in the area of the shape, convex.area results in the perimeter (a.k.a. surface
# area in 3D)
area_covered_in_image = laser_area * area_of_image / convex.volume
print(area_covered_in_image)
current_image.close()
# Every ten images print the accuracy
if (success + len(dodgy_images_paths)) % 10 == 0:
print('Percentage accuracy {}%'.format(success / (success + len(dodgy_images_paths)) * 100))
print(dodgy_images_paths)
except SystemExit:
print('Percentage accuracy {}%'.format(success / (success + len(dodgy_images_paths)) * 100))
print(dodgy_images_paths)
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn import preprocessing
import tensorflow.keras as keras
def custom_activation(x):
return 2 / keras.activations.sigmoid(x)
#keras.utils.generic_utils.get_custom_objects().update({'custom_activation': Activation(custom_activation)})
def baseline_model():
# create model
model = keras.models.Sequential(
[
keras.layers.Dense(40, activation='relu', name="layer1", input_dim=9),
keras.layers.Dense(30, activation='relu', name="layer2"),
keras.layers.Dense(10, activation='relu', name="layer3"),
keras.layers.Dense(2, activation='sigmoid', name="layer5")
]
)
# Compile model #binary_crossentropy, categorical_crossentropy sparse_categorical_crossentropy
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
Attributes = pd.read_csv("hm_hospitales_covid_structured_30d_train.csv", na_values=0, na_filter=True)
Outcomes = pd.read_csv("split_train_export_30d.csv")
Output_format = {'PATIENT ID': Attributes['PATIENT ID'], 'hospital_outcome': np.zeros(1834, dtype=int)}
Output = pd.DataFrame(Output_format)
X = Attributes.drop(labels=['PATIENT ID', 'admission_datetime'], axis='columns')
X.loc[X['sex'] == 'FEMALE', 'sex'] = 1
X.loc[X['sex'] == 'MALE', 'sex'] = 2
X.loc[X['ed_diagnosis'] == 'sx_breathing_difficulty', 'ed_diagnosis'] = 1
X.loc[X['ed_diagnosis'] == 'sx_others', 'ed_diagnosis'] = 2
X.loc[X['ed_diagnosis'] == 'sx_flu', 'ed_diagnosis'] = 3
X.loc[X['ed_diagnosis'] == 'sx_fever', 'ed_diagnosis'] = 3
X.loc[X['ed_diagnosis'] == 'sx_cough', 'ed_diagnosis'] = 3
X = X.fillna(X.median())
# outliers = (X - X.median()).abs() > X.std()*3
# X[outliers] = np.nan
# X.fillna(X.median(), inplace=True)
# normalization = X.values #returns a numpy array
# min_max_scaler = preprocessing.MinMaxScaler()
# normalization_scaled = min_max_scaler.fit_transform(normalization)
# X = pd.DataFrame(normalization_scaled, columns=X.columns)
X = X[['age', 'sex', 'ed_diagnosis', 'lab_ddimer'
, 'lab_crp', 'lab_lymphocyte_percentage', 'lab_urea', 'lab_lymphocyte', 'lab_neutrophil_percentage']]
X = X.to_numpy()
Y = Outcomes.drop(labels='PATIENT ID', axis='columns')
Y = Y.to_numpy()
Y = Y.ravel()
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3)
origin_Ytest = Y_test
Y_train = keras.utils.to_categorical(Y_train)
Y_test = keras.utils.to_categorical(Y_test)
# model = keras.models.Sequential(
# [
# keras.layers.Dense(100, activation="sigmoid", name="layer1", input_dim=X_train.shape[1]),
# keras.layers.Dense(60, activation="sigmoid", name="layer2"),
# keras.layers.Dense(2, activation="sigmoid", name="layer")
# ]
# )
# model = keras.models.Sequential()
# model.add(keras.layers.Dense(500, activation='relu', input_dim=X_train.shape[1]))
# model.add(keras.layers.Dense(100, activation='relu'))
# model.add(keras.layers.Dense(50, activation='relu'))
# model.add(keras.layers.Dense(1, activation='sigmoid'))
model = keras.wrappers.scikit_learn.KerasClassifier(build_fn=baseline_model, epochs=200, batch_size=5, verbose=1)
#model.compile(optimizer='adam', loss=keras.losses.BinaryCrossentropy(), metrics=['accuracy'])
model.fit(X_train, Y_train, epochs=200, batch_size=50, validation_data=(X_train, Y_train))
Y_pred_prob = model.predict_proba(X_test)
print(Y_pred_prob)
Y_pred = model.predict(X_test)
# count = 0
# pred = np.ndarray(len(Y_pred), dtype=int)
# count = 0
# for i in range(len(Y_pred)):
# if Y_pred[i][0] > Y_pred[i][1]:
# pred[i] = 0
# else:
# count += 1
# pred[i] = 1
# Y_pred = pred
Y_test = origin_Ytest
TN, FN, TP, FP = 0, 0, 0, 0
for i in range(len(Y_test)):
if Y_test[i] == 0 and Y_pred[i] == 0:
TN += 1
if Y_test[i] == 1 and Y_pred[i] == 0:
FN += 1
if Y_test[i] == 0 and Y_pred[i] == 1:
FP += 1
if Y_test[i] == 1 and Y_pred[i] == 1:
TP += 1
print("TN:", TN, ", FN:", FN, ", TP:", TP, ", FP:", FP)
precision, recall = (TP/(FP+TP)), (TP/(FN+TP))
print("precision:", precision, ", recall:", recall)
print('F1:', 2 * ((precision*recall)/(precision+recall))) |
from django.contrib import admin
from .models import *
admin.site.register(AcademicYear)
admin.site.register(Department)
admin.site.register(Course)
admin.site.register(Semester)
admin.site.register(Teacher) |
#!/usr/bin/python3.6
from __future__ import division
import numpy as np
import pandas as pd
import sys
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
import csv
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import classification_report
#train_file = sys.argv[1]
#train_frame = pd.read_csv(sys.argv[1])
#names = ['train_data_cleaned_new.csv']
weights = [{1: .5, -1: .5 },
{1: .5, -1: .5 },
{1: .5, -1: .5 },
{1: .5, -1: .5 },
{1: .5, -1: .5 },
{1: .5, -1: .5 },
{1: .57, -1: .43 },
{1: .57, -1: .43 },
{1: .57, -1: .43 },
{1: .4, -1: .6 },
{1: .4, -1: .6 },
{1: .4, -1: .6 },
{1: .6, -1: .4 },
{1: .6, -1: .4 },
{1: .6, -1: .4 },
{1: .43, -1: .57 },
{1: .43, -1: .57 },
{1: .43, -1: .57 },
{1: .57, -1: .43 },
{1: .57, -1: .43 },
{1: .57, -1: .43 },
{1: .5, -1: .5 },
{1: .5, -1: .5 },
{1: .5, -1: .5 },
{1: .45, -1: .55 },
{1: .45, -1: .55 },
{1: .45, -1: .55 },
{1: .55, -1: .45 },
{1: .55, -1: .45 },
{1: .55, -1: .45 },
{1: .5, -1: .5 },
{1: .5, -1: .5 },
{1: .5, -1: .5 },
{1: .5, -1: .5 },
{1: .5, -1: .5 },
{1: .5, -1: .5 },
{1: .5, -1: .5 },
{1: .5, -1: .5 },
{1: .5, -1: .5 },
{1: .5, -1: .5 },
]
test_file = ''
test_frame = pd.read_csv(test_file)
cols = ['correlation','conservation','polaritychange','chargechange','hydroindexchange','secondarystruc','asa','sizechange']
cols1 = ['correlation','conservation','polaritychange','hydroindexchange','secondarystruc','asa','sizechange']
cols2 = ['correlation','conservation','polaritychange','chargechange','hydroindexchange','secondarystruc','sizechange']
cols3 = ['correlation','conservation','polaritychange','chargechange','secondarystruc','sizechange']
cm1 = ['correlation','conservation','polaritychange','chargechange']
cm2 = ['correlation','conservation','polaritychange','hydroindexchange']
cm3 = ['correlation','conservation','polaritychange','secondarystruc']
cm4 = ['correlation','conservation','polaritychange','asa']
cm5 = ['correlation','conservation','polaritychange','sizechange']
colsRes = ['class']
train_frame = pd.read_csv(sys.argv[1])
#dataset 150 150 vsetky parametre
trainArr = train_frame.as_matrix(cm1)
trainRes = train_frame.as_matrix(colsRes)
trainRes = trainRes.ravel()
testArr = test_frame.as_matrix(cm1)
testRes = test_frame.as_matrix(colsRes)
testRes = testRes.ravel()
test_class = test_frame[['class']]
rf = RandomForestClassifier(max_features='auto',n_estimators=1000,n_jobs=1,min_samples_leaf=50,class_weight="balanced")
rf.fit(trainArr,trainRes)
result = rf.predict(testArr)
"""classifier = svm.SVC(kernel = 'poly',degree=4,class_weight='balanced')
classifier.fit(trainArr, trainRes)
result = classifier.predict(testArr)
"""
predicted_class = result
mcc = matthews_corrcoef(test_class, predicted_class)
print(name +": "+ str(mcc))
#with open('majority_voting4_new.csv', 'w') as f:
# writer = csv.writer(f, delimiter=',')
# writer.writerows(zip(result))
#classifier = svm.SVC(kernel = 'linear',class_weight={1: .5, -1: .5 })
#classifier.fit(trainArr0, trainRes0)
#results = classifier.predict(testArr)
#rf = RandomForestClassifier(max_features=0.3,n_estimators=400,n_jobs=1,min_samples_leaf=50)
#rf.fit(trainArr0,trainRes0)
#results = rf.predict(testArr)
#test_frame['predicted1'] = results
#test_frame.to_csv(sys.argv[1])
#print(test_frame)
|
from flask import json
from datamanager import *
class User():
#__slots__ = ('wxid', 'name')
def __init__(self, wxid='', name='', head_url=''):
self.wxid = wxid
self.name = name
self.head_url = head_url
|
# imports
import os
import csv
import locale
# set locale settings to US
locale.setlocale( locale.LC_ALL, 'en_US.UTF-8' )
# set csv path
path = os.path.join('Financial_Records', 'budget_data_1.csv')
with open(path, newline = '') as file:
# create reader
reader = csv.reader(file, delimiter = ',')
# sets loop to start at row 2 to avoid headers
next(reader)
# create and initialize lists
dates = []
l = []
# fill the lists
for row in reader:
dates.append(row[0])
l.append(int(row[1]))
# print report
print("Financial Analysis")
print("========================================================")
print("Total Months: " + str(len(l)))
print("Total Revenue: " + str(locale.currency(sum(l), grouping = True)))
print("Average Revenue Change: " + str(locale.currency(round(sum(l)/len(l)), grouping = True)))
print("Greatest Increase in Revenue: " + dates[l.index(max(l))] + ", " + str(locale.currency(max(l), grouping = True)))
print("Greatest Decrease in Revenue: " + dates[l.index(min(l))] + ", " + str(locale.currency(min(l), grouping = True)))
print("========================================================")
# write to file
f = open("Financial_Analysis.txt", "w+")
f.write("Financial Analysis\n")
f.write("========================================================\n")
f.write("Total Months: " + str(len(l)) + "\n")
f.write("Total Revenue: " + str(locale.currency(sum(l), grouping=True)) + "\n")
f.write("Average Revenue Change: " + str(locale.currency(round(sum(l) / len(l)), grouping=True))+ "\n")
f.write("Greatest Increase in Revenue: " + dates[l.index(max(l))] + ", " + str(locale.currency(max(l), grouping=True))+ "\n")
f.write("Greatest Decrease in Revenue: " + dates[l.index(min(l))] + ", " + str(locale.currency(min(l), grouping=True))+ "\n")
f.write("========================================================")
f.close() |
planed_gifts = input()
list_planed_gifts = planed_gifts.split()
command = input()
while command != "No Money":
list_command = command.split(" ")
gift = list_command[1]
if "OutOfStock" in command:
for index, element in enumerate(list_planed_gifts):
if gift in element:
list_planed_gifts[index] = "None"
elif "Required" in command:
index_command = int(list_command[2])
index_length_list_gifts = len(list_planed_gifts) - 1
if 0 <= index_command <= index_length_list_gifts:
list_planed_gifts[index_command] = gift
elif "JustInCase" in command:
list_planed_gifts[-1] = gift
command = input()
while 'None' in list_planed_gifts:
list_planed_gifts.remove('None')
print(" ".join(list_planed_gifts)) |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# File: osc4py3/tests/dispatching.py
# <pep8 compliant>
import sys
from os.path import abspath, dirname
# Make osc4py3 available.
PACKAGE_PATH = dirname(dirname(dirname(abspath(__file__))))
if PACKAGE_PATH not in sys.path:
sys.path.insert(0, PACKAGE_PATH)
import time
import pprint
from osc4py3.oscdispatching import Dispatcher
from osc4py3.oscmethod import *
from osc4py3.oscpacketoptions import PacketOptions
from osc4py3 import oscbuildparse as obp
from testslogger import logger
print("=" * 80)
print("\nTEST DISPATCHING TO PATTERN FILTER'S FUNCTIONS\n")
# The callback.
def matchfct(*args):
try:
mf = args[-1]
args = args[:-1]
print("===== Called matchfct() on Methodfilter({!r}…) with:".format(mf.addrpattern))
except:
print(r" /!\ Bug: no arg")
for a in args:
print(" ",str(a))
# The dispatcher.
disp = Dispatcher("global",{'logger': logger})
# Message filters creation and registration.
filter0 = MethodFilter("/*", matchfct, argscheme=OSCARG_ADDRESS + OSCARG_DATAUNPACK + OSCARG_METHODFILTER)
disp.add_method(filter0)
filter1 = MethodFilter("/just", matchfct, argscheme=OSCARG_DATAUNPACK + OSCARG_METHODFILTER)
disp.add_method(filter1)
filter2 = MethodFilter("//test", matchfct, argscheme=OSCARG_DATAUNPACK + OSCARG_METHODFILTER)
disp.add_method(filter2)
filter3 = MethodFilter("/just/[a-c]/test", matchfct, argscheme=OSCARG_DATAUNPACK + OSCARG_METHODFILTER)
disp.add_method(filter3)
filter4 = MethodFilter("/*/c/", matchfct, argscheme=OSCARG_DATAUNPACK + OSCARG_METHODFILTER)
disp.add_method(filter4)
# Test messages dispatching.
msg = obp.OSCMessage("/just/a/test", None, [1, 2, 3])
print("Dispatching a message:", msg)
disp.dispatch_packet(msg, PacketOptions())
bun = obp.OSCBundle(obp.OSC_IMMEDIATELY, [
obp.OSCMessage("/just/c/test", None, ["Hello"]),
obp.OSCMessage("/just/d/test/here", None, [ 3.13, "is", "pi"]),
])
print("Dispatching a bundle:", bun)
disp.dispatch_packet(bun, PacketOptions())
|
import sys
global p
p = float(sys.argv[1])
class State:
def __init__(self, Psum, Dsum, Naces, isTwoCards, isBlackJack, pair, turn):
self.Psum = Psum
self.Dsum = Dsum
self.Naces = Naces
self.isTwoCards = isTwoCards
self.isBlackJack = isBlackJack
self.pair = pair
self.turn = turn
def transition_player(self, s2, action):
if action == 1 or action == 2: # 1 for hit
if self.isTwoCards == 0 and s2.isTwoCards == 1:
return 0
elif self.isTwoCards == 0 and s2.isTwoCards == 0:
if s2.Psum <= self.Psum:
return 0
else:
if s2.Psum - self.Psum == 10:
return p
elif s2.Psum - self.Psum < 10:
return (1 - p) / 9
elif s2.Psum - self.Psum == 11:
return (1 - p) / 9
else:
return 0
elif self.isTwoCards == 1 and s2.isTwoCards == 0:
if s2.Psum <= self.Psum:
return 0
else:
if s2.Psum - self.Psum == 10:
return p
elif s2.Psum - self.Psum < 10:
return (1 - p) / 9
elif s2.Psum - self.Psum == 11:
return (1 - p) / 9
else:
return 0
else:
return 0
if action == 3: # 3 for split
if s2.Psum <= (self.Psum / 2):
return 0
else:
if s2.Psum - (self.Psum / 2) == 10:
return p
elif s2.Psum - (self.Psum / 2) < 10:
return (1 - p) / 9
elif s2.Psum - (self.Psum / 2) == 11:
return (1 - p) / 9
else:
return 0
def transition_dealer(self, s2):
if self.Dsum >= 17:
return 0
if s2.Dsum <= self.Dsum:
return 0
else:
if s2.Dsum - self.Dsum == 10:
return p
elif 1 < s2.Dsum - self.Dsum < 10:
return (1 - p) / 9
elif s2.Dsum - self.Dsum == 11:
return (1 - p) / 9
else:
return 0
# if self.isTwoCards == 0 and s2.isTwoCards == 1:
# return 0
# elif self.isTwoCards == 0 and s2.isTwoCards == 0:
# if s2.Dsum <= self.Dsum:
# return 0
# else:
# if s2.Dsum - self.Dsum == 10:
# return p
# elif s2.Dsum - self.Dsum < 10:
# return (1 - p) / 9
# elif s2.Dsum - self.Dsum == 11:
# return (1 - p) / 9
# else:
# return 0
# elif self.isTwoCards == 1 and s2.isTwoCards == 0:
# if s2.Dsum <= self.Dsum:
# return 0
# else:
# if s2.Dsum - self.Dsum == 10:
# return p
# elif s2.Dsum - self.Dsum < 10:
# return (1 - p) / 9
# elif s2.Dsum - self.Dsum == 11:
# return (1 - p) / 9
# else:
# return 0
# else:
# return 0
def reward(self):
if self.Dsum < 17:
return 0
elif self.Dsum > 21:
return 1
else:
if self.Psum > self.Dsum:
return 1
elif self.Psum < self.Dsum:
return -1
else:
return 0
all_states = []
for i in range(5, 20):
for j in range(10):
all_states += [State(i, j + 2, 0, 1, 0, 0, 0)]
for i in range(2, 10):
for j in range(10):
<<<<<<< HEAD
all_states+=[State(i+11,j+2,1,1,0,0,0)]
=======
all_states += [State(i + 11, j + 2, 1, 1, 0, 0, 0)]
>>>>>>> 3e0fb43c1a5c8b8d9599f9891243a9d5e5e832c0
for i in range(2, 11):
for j in range(10):
all_states += [State(2 * i, j + 2, 0, 1, 0, i, 0)]
for j in range(10):
all_states += [State(12, j + 2, 2, 1, 0, 1, 0)]
for i in range(3, 22):
for j in range(10):
all_states += [State(i, j + 2, 0, 0, 0, 0, 0)]
for j in range(10):
all_states += [State(21, j + 2, 1, 1, 1, 0, 0)]
for i in range(22, 31):
for j in range(10):
all_states += [State(i, j + 2, 0, 0, 0, 0, 0)]
# print(len(all_states))
all_states_Dval = [[] for i in range(10)]
for i in range(62):
for j in range(10):
all_states_Dval[j] += [all_states[i * 10 + j]]
values = []
values2 = []
for a in range(4, 22):
player_sum = a
action = 2
dealer_states = []
for i in range(4, 27):
dealer_states += [State(player_sum, i, 0, 0, 0, 0, 1)]
# for i in range(2,21):
# dealer_states+= [State(player_sum, i, 0,1, 0, 0, 1)]
dealer_values0 = [0] * (len(dealer_states))
dealer_values1 = [0] * (len(dealer_states))
for i in range(100):
for j in range(len(dealer_states)):
s = 0
for k in range(len(dealer_states)):
s += dealer_states[j].transition_dealer(dealer_states[k]) * (dealer_states[k].reward() + dealer_values0[k])
# if a == 21 and j == 0 and i == 99:
# print(dealer_states[j].transition_dealer(dealer_states[k]))
# print(dealer_states[k].Dsum)
dealer_values1[j] = s
for j in range(len(dealer_states)):
dealer_values0[j] = dealer_values1[j]
values += [dealer_values1]
for i in range(len(values)):
v = []
for j in values[i]:
<<<<<<< HEAD
v+=[2*j]
values2+=[v]
for i in values:
print(i)
=======
v += [2 * j]
values2 += [v]
>>>>>>> 3e0fb43c1a5c8b8d9599f9891243a9d5e5e832c0
# for i in values:
# print(i)
player_values0 = [0]*(len(all_states))
player_values1 = [0]*(len(all_states))
player_actions = [1]*(len(all_states)) #1 for Hit 2 for Double 3 for Split 4 for Stand
# check this function: player_values update ; max over s1 ans s4 only; match with pseudocode
for i in range(10):
print(i)
for a in range(10):
for j in range(len(all_states_Dval[a])):
if all_states_Dval[a][j].Psum<=21:
s1 = 0
s2 = 0
s3 = 0
s4 = 0
if all_states_Dval[a][j].isTwoCards == 1:
#Hit
for k in range(len(all_states_Dval[a])):
if all_states_Dval[a][k].Psum<=21:
s1 += all_states_Dval[a][j].transition_player(all_states_Dval[a][k],1)*(player_values0[62*a+k])
else:
s1 -= all_states_Dval[a][j].transition_player(all_states_Dval[a][k],1)
# print("first s1 is ", s1)
#double
# for k in range(len(all_states_Dval[a])):
# s2 += all_states_Dval[a][j].transition_player(all_states_Dval[a][k],1)*(values2[all_states_Dval[a][k].Psum-4][all_states_Dval[a][k].Dsum-2])
# #Split
# if all_states_Dval[a][j].pair!=0:
# for k in range(len(all_states_Dval[a])):
# for n in range(len(all_states_Dval[a])):
# s3 += all_states_Dval[a][j].transition_player(all_states_Dval[a][k],3)*all_states_Dval[a][j].transition_player(all_states_Dval[a][n],3)*(player_values0[62*a+k]+player_values0[62*a+n])
else:
#Hit
for k in range(len(all_states_Dval[a])):
if all_states_Dval[a][k].Psum<=21:
s1 += all_states_Dval[a][j].transition_player(all_states_Dval[a][k],1)*(player_values0[62*a+k])
else:
s1 -= all_states_Dval[a][j].transition_player(all_states_Dval[a][k],1)
#Stand
s4 = values[all_states_Dval[a][j].Psum - 4][all_states_Dval[a][j].Dsum - 2]
l=[s1,s4]
player_values1[62*a+j]=max(l)
player_actions[62*a+j]=l.index(max(l))+1
if all_states_Dval[a][j].Psum==6 and all_states_Dval[a][j].isTwoCards==1:
print("s4 is ", s4," s1 is ", s1, " and ", a,"-",j)
print(l.index(max(l))+1)
# player_values1[62 * a + j] = s1
# print(player_values1)
for j in range(len(all_states)):
player_values0[j] = player_values1[j]
# print(player_actions)
# print(player_values1)
two_cards_states=[]
two_cards_values=[]
two_cards_actions=[]
for i in range(len(all_states)):
if all_states[i].isTwoCards==1 and all_states[i].isBlackJack==0:
two_cards_states+=[all_states[i]]
two_cards_values+=[player_values1[i]]
two_cards_actions+=[player_actions[i]]
actions=[]
for i in range(len(two_cards_actions)):
if two_cards_actions[i]==1:
actions+=["H"+str(two_cards_states[i].Psum)]
else:
actions+=["S"+str(two_cards_states[i].Psum)]
for i in range(0,len(actions),10):
print(actions[i]," ",actions[i+1]," ",actions[i+1]," ",actions[i+3]," ",actions[i+4]," ",actions[i+5]," ",actions[i+6]," ",actions[i+7]," ",actions[i+8]," ",actions[i+9]) |
from flask import Flask, request, jsonify
# custom modules
from helpers import get_stock_all, get_stock_one, is_valid
app = Flask(__name__)
@app.route("/")
def index():
return jsonify("hello")
@app.route("/stocks")
def stocks():
try:
stocks = get_stock_all()
return jsonify(stocks)
except Exception as ex:
print(ex)
return jsonify("Stocks could not be retrieved."), 500
@app.route("/stock/<stock_code>")
def stock(stock_code):
try:
args = request.args
if args:
if not is_valid(args):
return jsonify({"error": "Please provide correct range."}), 400
data = get_stock_one(stock_code=stock_code.upper(), args=args)
return jsonify({
"stock_code": stock_code.upper(),
"prices": data,
"from": args['from'] if args else None,
"to": args['to'] if args else None})
except Exception as ex:
print(ex)
return jsonify("Stock information could not be retrieved."), 500
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080, debug=True) |
#!/usr/bin/python
import time
import sys
import select
print "Start loop, type 'exit' to exit"
while True:
print "Reading..."
i,o,e = select.select([sys.stdin],[],[],0.0001)
for s in i:
if s == sys.stdin:
input = sys.stdin.readline().strip()
if (input == "exit"):
sys.exit()
else:
print "Wrote '"+input+"'"
time.sleep(1) |
# -*- coding: utf-8 -*-
"""Tests for Time zone information files (TZif)."""
import unittest
from dtformats import tzif
from tests import test_lib
class TimeZoneInformationFileTest(test_lib.BaseTestCase):
"""Time zone information file (TZif) tests."""
# pylint: disable=protected-access
def testDebugPrintFileHeader(self):
"""Tests the _DebugPrintFileHeader function."""
output_writer = test_lib.TestOutputWriter()
test_file = tzif.TimeZoneInformationFile(output_writer=output_writer)
data_type_map = test_file._GetDataTypeMap('tzif_file_header')
file_header = data_type_map.CreateStructureValues(
format_version=0x32,
number_of_leap_seconds=1,
number_of_local_time_types=2,
number_of_standard_time_indicators=3,
number_of_transition_times=4,
number_of_utc_time_indicators=5,
signature=b'TZif',
time_zone_abbreviation_strings_size=6,
unknown1=(
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e'))
test_file._DebugPrintFileHeader(file_header)
# TODO: add tests for _DebugPrintTransitionTimeIndex
# TODO: add tests for _DebugPrintTransitionTimes
def testReadFileHeader(self):
"""Tests the _ReadFileHeader function."""
output_writer = test_lib.TestOutputWriter()
test_file = tzif.TimeZoneInformationFile(output_writer=output_writer)
test_file_path = self._GetTestFilePath(['localtime.tzif'])
self._SkipIfPathNotExists(test_file_path)
with open(test_file_path, 'rb') as file_object:
test_file._ReadFileHeader(file_object)
# TODO: add tests for _ReadLeapSecondRecords
# TODO: add tests for _ReadLocalTimeTypesTable
# TODO: add tests for _ReadStandardTimeIndicators
# TODO: add tests for _ReadTransitionTimeIndex
# TODO: add tests for _ReadTimeZoneAbbreviationStrings
# TODO: add tests for _ReadTimeZoneInformation32bit
# TODO: add tests for _ReadTimeZoneInformation64bit
# TODO: add tests for _ReadTransitionTimes32bit
# TODO: add tests for _ReadTransitionTimes64bit
# TODO: add tests for _ReadUTCTimeIndicators
def testReadFileObject(self):
"""Tests the ReadFileObject function."""
output_writer = test_lib.TestOutputWriter()
test_file = tzif.TimeZoneInformationFile(
debug=True, output_writer=output_writer)
test_file_path = self._GetTestFilePath(['localtime.tzif'])
self._SkipIfPathNotExists(test_file_path)
test_file.Open(test_file_path)
if __name__ == '__main__':
unittest.main()
|
__author__ = 'bhathiyap'
class Foo(tuple):
def __new__(cls, _, *args):
return super(Foo, cls).__new__(cls, tuple(args))
def __init__(self, label_string, *_):
self.label = label_string
if __name__ == '__main__':
foo = Foo("add", 2, "+", 3)
print foo
print foo.label
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.