text stringlengths 8 6.05M |
|---|
import tensorflow as tf
import numpy as np
xy = np.loadtxt('xor.txt', unpack=True)
x_data = xy[0:-1]
y_data = xy[-1]
print (x_data)
print (y_data)
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
W = tf.Variable(tf.random_uniform([1, len(x_data)], -1. , 1.))
h = tf.matmul(W, X)
hypothesis = tf.div(1., 1. + tf.exp(-h))
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1-Y) * tf.log(1 - hypothesis))
learning_rate = tf.Variable(0.01)
train = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
with tf.Session() as sess :
tf.global_variables_initializer().run()
for step in range(1000) :
sess.run(train, feed_dict={X: x_data, Y: y_data})
if step % 200 == 0 :
print ( step , sess.run(cost, feed_dict={ X:x_data, Y:y_data }) , sess.run(W) )
correct_prediction = tf.equal(tf.floor(hypothesis + 0.5), Y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ( sess.run([hypothesis, tf.floor(hypothesis + 0.5 ), correct_prediction, accuracy], feed_dict={X:x_data, Y:y_data}))
print ("Accuracy : " , accuracy.eval({X:x_data, Y:y_data }))
|
SAFE_ZONE = 7
WIDTH_IN_BLOCKS = 31
HEIGHT_IN_BLOCKS = 12
|
# -*- coding: utf-8 -*-
# @Author: Fallen
# @Date: 2020-04-03 21:44:14
# @Last Modified by: Fallen
# @Last Modified time: 2020-04-03 21:53:36
#2.键盘输入多个人名保存到一个列表中,如果里面有重复的则提示此姓名已经存在
def func():
name = []
while True:
temp = input("输入名字(输入'q'退出):")
if temp.lower()=="q":
return name
break
elif temp in name:
print("重复输入!")
continue
else:
name.append(temp)
continue
def main():
name = func()
print(name)
print("记录完毕!")
if __name__ == "__main__":
main()
|
import pandas as pd
import numpy as np
from tqdm import tqdm
import os
import math
directory = os.fsencode('../data/csv/')
list_df=[]
for file in os.listdir(directory):
filename = os.fsdecode(file)
df=pd.read_csv(os.path.join('../data/csv/', filename))
if(df.shape[0]!=0):
list_df.append(df)
#df=pd.read_csv("../data/csv/Commodity1_price_updated_('place1', 'Place0', 'type0','item14').csv")
print("Done Reading Dataframes")
total_size=len(list_df)
train_size=int(total_size*0.7)
test_size=total_size-train_size
X_train = np.zeros((train_size, 4, 100))
Y_train = np.zeros((train_size,1))
print('Total Size',total_size)
print('Train Size',train_size)
print('Test Size',test_size)
print('Constructing Training Set')
for i in tqdm(range(0,train_size)):
step_count=list_df[i].shape[0]
if(step_count>100):
step_count=100
#print(i, list_df[i].head(2))
#print(step_count)
if(step_count==0):
continue
X_train[i, :, :step_count] = list_df[i].tail(step_count)[['ModalPrice','MinimumPrice','MaximumPrice','Arrival']].transpose()
Y_train[i,0]=list_df[i].iloc[[step_count-1]]['Label']
print('done TrainingSet')
X_test = np.zeros((test_size, 4, 100))
Y_test = np.zeros((test_size,1))
print('Constructing Test set')
for i in tqdm(range(0,test_size)):
step_count=list_df[i+train_size].shape[0]
if(step_count>100):
step_count=100
X_test[i, :, :step_count] = list_df[i+train_size].tail(step_count)[['ModalPrice','MinimumPrice','MaximumPrice','Arrival']].transpose()
Y_test[i,0]=list_df[i+train_size].iloc[[step_count-1]]['Label']
np.save('../data/TrainingSet/' + 'X_train.npy', X_train)
np.save('../data/TrainingSet/' + 'y_train.npy', Y_train)
np.save('../data/TestSet/'+ 'X_test.npy', X_test)
np.save('../data/TestSet/' + 'y_test.npy', Y_test)
|
from functools import lru_cache
from typing import Dict
def fib1(n:int) -> int:
return fib1(n-1) + fib1(n-2)
def fib2(n:int) -> int:
if n <= 2:
return n
return fib2(n - 1) + fib2(n -2)
memo: Dict[int, int] = {0: 0, 1: 1}
def fib3(n: int) -> int:
if n not in memo:
memo[n] = fib3(n - 1) + fib3(n - 2)
return memo[n]
@lru_cache(maxsize=None)
def fib4(n: int) -> int:
if n < 2:
return n
return fib4(n - 1) + fib4(n -2)
def fib5(n: int) -> int:
if n == 0:
return n
last = 0
next = 1
for _ in range(1, n):
last, next = next, last + next # escrevendo dessa maneira, next recebe o seu prórpio valor mais a soma do lest
return next
def fib6(n):
yield 0
if n > 0:
yield 1
last = 0
next = 1
for _ in range(1, n):
last, next = next, last + next
yield next
if __name__ == '__main__':
for i in fib6(50):
print(i) |
from __future__ import division
import sys
import csv
import pandas as pd
import numpy as np
import re
import math
"""
fill missing age data
"""
if __name__ == '__main__':
# finding out average of age according to "Mr.","Mrs.", etc. and store it in "age_grp_avg"
known = ['Miss.','Mrs.','Mr.','Master.']
age_grp = {'Miss.':[],'Mrs.':[],'Mr.':[],'Master.':[],'female':[],'male':[]}
from_csv = pd.read_csv('data/all_data.csv')
for index,row in from_csv.iterrows():
dumb = row['Name']
dumb = dumb.split(" ")
for prefix in dumb:
if "." in prefix and not math.isnan(float(row['Age'])):
if prefix in known:
age_grp[prefix].append(float(row['Age']))
break
else:
age_grp[row['Sex']].append(float(row['Age']))
break
# calculate average
age_grp_avg = {}
for key in age_grp:
age_grp_avg[key] = reduce(lambda x, y: x + y, age_grp[key]) / len(age_grp[key])
# fill missing data
for i in range (0, len(from_csv)):
dumb = from_csv['Name'][i]
dumb = dumb.split(" ")
for prefix in dumb:
if "." in prefix and math.isnan(float(from_csv['Age'][i])):
if prefix in known:
from_csv['Age'][i] = str(age_grp_avg[prefix])
break
else:
from_csv['Age'][i] = str(age_grp_avg[row['Sex']])
break
# store output in all_data_1.csv
from_csv.to_csv('data/all_data_1.csv',mode = 'w', index=False) |
from django.urls import path
from .views import home,product_single,category_product,about,contact,SearchView
urlpatterns = [
path('',home,name='home'),
path('about/',about, name='about'),
path('contact/',contact, name='contact_dat'),
path('product/<int:id>/',product_single,name='product_single'),
path('category/<int:id>/<slug:slug>',category_product,name='category_product'),
path('search/',SearchView,name='SearchView')
] |
import torch
from fairseq.data import data_utils
import numpy as np
from fairseq.data.language_pair_dataset import FairseqDataset
from tqdm import tqdm
def collate(
samples, pad_idx, eos_idx, word_max_length, left_pad_source=True, left_pad_target=False,
input_feeding=True,
):
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx, eos_idx, left_pad, move_eos_to_beginning,
)
def collate_char_tokens(values, pad_idx, sentence_length,
word_max_length, eos_idx=None, left_pad=False,
move_eos_to_beginning=False):
"""Convert a list of 1d tensors into a padded 2d tensor."""
size = word_max_length
res = values[0][0].new(len(values), sentence_length, size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel(), "{} != {}".format(dst.numel(), src.numel())
if move_eos_to_beginning:
assert src[-1] == eos_idx
dst[0] = eos_idx
dst[1:] = src[:-1]
else:
dst.copy_(src)
for i, line in enumerate(values):
for j, v in enumerate(line):
if len(v) > word_max_length:
v = v[-word_max_length:]
copy_tensor(v, res[i][sentence_length - len(line) + j][size - len(v):] if left_pad else
res[i][sentence_length - len(line) + j][:len(v)])
return res
id = torch.LongTensor([s['id'] for s in samples])
src_tokens = merge('source', left_pad=left_pad_source)
src_char_tokens = collate_char_tokens(values=[s['source_char'] for s in samples],
pad_idx=pad_idx,
word_max_length=word_max_length,
sentence_length=src_tokens.size(-1),
eos_idx=eos_idx,
left_pad=left_pad_source,
move_eos_to_beginning=False)
# sort by descending source length
src_lengths = torch.LongTensor([s['source'].numel() for s in samples])
src_lengths, sort_order = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
src_char_tokens = src_char_tokens.index_select(0, sort_order)
prev_output_tokens = None
target = None
if samples[0].get('target', None) is not None:
target = merge('target', left_pad=left_pad_target)
target = target.index_select(0, sort_order)
ntokens = sum(len(s['target']) for s in samples)
if input_feeding:
# we create a shifted version of targets for feeding the
# previous output token(s) into the next decoder step
prev_output_tokens = merge(
'target',
left_pad=left_pad_target,
move_eos_to_beginning=True,
)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
else:
ntokens = sum(len(s['source']) for s in samples)
batch = {
'id': id,
'nsentences': len(samples),
'ntokens': ntokens,
'net_input': {
'src_tokens': src_tokens,
'src_char_tokens': src_char_tokens,
'src_lengths': src_lengths,
},
'target': target,
}
if prev_output_tokens is not None:
batch['net_input']['prev_output_tokens'] = prev_output_tokens
return batch
class SpellCorrectRawDataset(FairseqDataset):
def __init__(self, src_file_path, tgt_file_path, dict, char_dict, word_max_length, left_pad_source=True,
left_pad_target=False,
max_source_positions=10240, max_target_positions=1024, input_feeding=True,
remove_eos_from_source=False, append_eos_to_target=False, shuffle=True):
# Read file raw
with open(src_file_path, 'r', encoding='utf-8') as src_file:
with open(tgt_file_path, 'r', encoding='utf-8') as tgt_file:
src_lines = src_file.read().split('\n')
tgt_lines = tgt_file.read().split('\n')
# check number sample input == number sample output
assert len(src_lines) == len(tgt_lines)
print('Done read raw file. Start convert to indices\n')
src_indices = []
tgt_indices = []
src_char_indices = []
for line in tqdm(src_lines, desc='Convert source'):
src_indices.append(dict.encode_line(line, add_if_not_exist=False).long())
words = line.split() + ['']
src_char_indices.append([char_dict.encode_line(' '.join(list(word)),
add_if_not_exist=False).long() for word in words])
for line in tqdm(tgt_lines, desc='Convert target'):
tgt_indices.append(dict.encode_line(line, add_if_not_exist=False).long())
self.src = src_indices
self.tgt = tgt_indices
self.src_char = src_char_indices
self.word_max_length = word_max_length
self.src_sizes = np.array([line.size(0) for line in src_indices])
self.tgt_sizes = np.array([line.size(0) for line in tgt_indices])
# check input size == target size
assert ((self.src_sizes == self.tgt_sizes).sum()) == len(src_lines)
self.src_dict = dict
self.tgt_dict = dict
self.left_pad_source = left_pad_source
self.left_pad_target = left_pad_target
self.max_source_positions = max_source_positions
self.max_target_positions = max_target_positions
self.shuffle = shuffle
self.input_feeding = input_feeding
self.remove_eos_from_source = remove_eos_from_source
self.append_eos_to_target = append_eos_to_target
def __getitem__(self, index):
tgt_item = self.tgt[index] if self.tgt is not None else None
src_item = self.src[index]
src_char_item = self.src_char[index]
# Append EOS to end of tgt sentence if it does not have an EOS and remove
# EOS from end of src sentence if it exists. This is useful when we use
# use existing datasets for opposite directions i.e., when we want to
# use tgt_dataset as src_dataset and vice versa
if self.append_eos_to_target:
eos = self.tgt_dict.eos() if self.tgt_dict else self.src_dict.eos()
if self.tgt and self.tgt[index][-1] != eos:
tgt_item = torch.cat([self.tgt[index], torch.LongTensor([eos])])
if self.remove_eos_from_source:
eos = self.src_dict.eos()
if self.src[index][-1] == eos:
src_item = self.src[index][:-1]
src_char_item = self.src_char[index][:-1]
return {
'id': index,
'source': src_item,
'source_char': src_char_item,
'target': tgt_item,
}
def __len__(self):
return len(self.src)
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch with the following keys:
- `id` (LongTensor): example IDs in the original input order
- `ntokens` (int): total number of tokens in the batch
- `net_input` (dict): the input to the Model, containing keys:
- `src_tokens` (LongTensor): a padded 2D Tensor of tokens in
the source sentence of shape `(bsz, src_len)`. Padding will
appear on the left if *left_pad_source* is ``True``.
- `src_lengths` (LongTensor): 1D Tensor of the unpadded
lengths of each source sentence of shape `(bsz)`
- `prev_output_tokens` (LongTensor): a padded 2D Tensor of
tokens in the target sentence, shifted right by one position
for input feeding/teacher forcing, of shape `(bsz,
tgt_len)`. This key will not be present if *input_feeding*
is ``False``. Padding will appear on the left if
*left_pad_target* is ``True``.
- `target` (LongTensor): a padded 2D Tensor of tokens in the
target sentence of shape `(bsz, tgt_len)`. Padding will appear
on the left if *left_pad_target* is ``True``.
"""
return collate(
samples, pad_idx=self.src_dict.pad(), eos_idx=self.src_dict.eos(),
left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target,
input_feeding=self.input_feeding, word_max_length=self.word_max_length
)
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return max(self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return (self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
indices = np.random.permutation(len(self))
else:
indices = np.arange(len(self))
if self.tgt_sizes is not None:
indices = indices[np.argsort(self.tgt_sizes[indices], kind='mergesort')]
return indices[np.argsort(self.src_sizes[indices], kind='mergesort')]
@property
def supports_prefetch(self):
return (
getattr(self.src, 'supports_prefetch', False)
and (getattr(self.tgt, 'supports_prefetch', False) or self.tgt is None)
)
def prefetch(self, indices):
self.src.prefetch(indices)
if self.tgt is not None:
self.tgt.prefetch(indices)
|
def is_partition(G, communities): ...
|
suits = ["heart", "diamond", "spade", "club"]
cards = ["two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "jack", "queen", "king", "ace"]
class Card:
def __init__(self, _suit, _value):
self.suit = _suit
self.value = _value
def get_suit(self):
return suits[self.suit]
def get_value(self):
return cards[self.value]
def generate_deck():
deck = []
for s in range(0,3):
for c in range(0, 13):
deck.append(Card(s, c))
return deck |
import os
import numpy as np
import yaml
import math
from PIL import Image
import pandas as pd
import skimage
from skimage.morphology import remove_small_objects, remove_small_holes, disk
from skimage.filters import rank, threshold_otsu
from skimage.transform import resize
import scipy.ndimage as ndimage
from scipy.ndimage.morphology import (binary_dilation, binary_erosion,
binary_fill_holes, binary_closing)
if os.name == 'nt':
os.environ['PATH'] = "C:\\tools\\openslide\\openslide-win64-20171122\\bin" + ";" + os.environ['PATH']
import openslide as openslide
class TileGenerator:
def __init__(self,
input_dir=os.getcwd(),
file_name='Test_file.svs',
output_dir=os.path.join(os.getcwd(), 'tiles'),
tile_objective_value=20,
tile_read_size_w=3000,
tile_read_size_h=3000,
nr_tiles=None,
tiss_level=4,
use_tiss_mask=True,
tiss_cutoff=0.1):
self.input_dir = input_dir
self.file_name = os.path.basename(file_name)
if output_dir is not None:
self.output_dir = os.path.join(output_dir, self.file_name)
if not os.path.isdir(self.output_dir):
os.makedirs(self.output_dir, exist_ok=True)
self.openslide_obj = openslide.OpenSlide(filename=os.path.join(self.input_dir, self.file_name))
self.tile_objective_value = np.int(tile_objective_value)
self.tile_read_size = np.array([tile_read_size_w, tile_read_size_h])
self.objective_power = np.int(self.openslide_obj.properties[openslide.PROPERTY_NAME_OBJECTIVE_POWER])
self.level_count = self.openslide_obj.level_count
self.nr_tiles = nr_tiles
self.use_tiss_mask = use_tiss_mask
self.tiss_level = tiss_level
self.tiss_cutoff = tiss_cutoff
self.level_dimensions = self.openslide_obj.level_dimensions
self.level_downsamples = self.openslide_obj.level_downsamples
def read_region(self, start_w, start_h, end_w, end_h, level=0):
openslide_obj = self.openslide_obj
im_region = openslide_obj.read_region([start_w, start_h], level, [end_w - start_w, end_h - start_h])
return im_region
def generate_tiles(self):
openslide_obj = self.openslide_obj
tile_objective_value = self.tile_objective_value
tile_read_size = self.tile_read_size
if self.use_tiss_mask:
ds_factor = self.level_downsamples[self.tiss_level]
if self.objective_power == 0:
self.objective_power = np.int(openslide_obj.properties[openslide.PROPERTY_NAME_OBJECTIVE_POWER])
rescale = np.int(self.objective_power / tile_objective_value)
openslide_read_size = np.multiply(tile_read_size, rescale)
slide_dimension = openslide_obj.level_dimensions[0]
slide_h = slide_dimension[1]
slide_w = slide_dimension[0]
tile_h = openslide_read_size[0]
tile_w = openslide_read_size[1]
iter_tot = 0
output_dir = self.output_dir
data = []
if self.nr_tiles == None:
for h in range(int(math.ceil((slide_h - tile_h) / tile_h + 1))):
for w in range(int(math.ceil((slide_w - tile_w) / tile_w + 1))):
start_h = h * tile_h
end_h = (h * tile_h) + tile_h
start_w = w * tile_w
end_w = (w * tile_w) + tile_w
if end_h > slide_h:
end_h = slide_h
if end_w > slide_w:
end_w = slide_w
#
if self.use_tiss_mask:
tiss = self.mask[int(start_h/ds_factor):int(start_h/ds_factor)+int(openslide_read_size[1]/ds_factor), int(start_w/ds_factor):int(start_w/ds_factor)+int(openslide_read_size[0]/ds_factor)]
tiss_frac = np.sum(tiss)/np.size(tiss)
else:
tiss_frac = 1
if tiss_frac > self.tiss_cutoff:
im = self.read_region(start_w, start_h, end_w, end_h)
format_str = 'Tile%d: start_w:%d, end_w:%d, start_h:%d, end_h:%d, width:%d, height:%d'
print(format_str % (
iter_tot, start_w, end_w, start_h, end_h, end_w - start_w, end_h - start_h), flush=True)
temp = np.array(im)
temp = temp[:, :, 0:3]
im = Image.fromarray(temp)
if rescale != 1:
im = im.resize(size=[np.int((end_w - start_w) / rescale), np.int((end_h - start_h) / rescale)],
resample=Image.BICUBIC)
img_save_name = 'Tile' + '_' \
+ str(tile_objective_value) + '_' \
+ str(int(start_w/rescale)) + '_' \
+ str(int(start_h/rescale))\
+ '.jpg'
im.save(os.path.join(output_dir, img_save_name), format='JPEG')
data.append([iter_tot, img_save_name, start_w, end_w, start_h, end_h, im.size[0], im.size[1]])
iter_tot += 1
else:
for i in range(self.nr_tiles):
condition = 0
while condition == 0:
h = np.random.randint(0,int(math.ceil((slide_h - tile_h) / tile_h + 1)))
w = np.random.randint(0,int(math.ceil((slide_w - tile_w) / tile_w + 1)))
start_h = h * tile_h
end_h = (h * tile_h) + tile_h
start_w = w * tile_w
end_w = (w * tile_w) + tile_w
if end_h > slide_h:
end_h = slide_h
if end_w > slide_w:
end_w = slide_w
#
if self.use_tiss_mask:
tiss = self.mask[int(start_h/ds_factor):int(start_h/ds_factor)+int(openslide_read_size[1]/ds_factor), int(start_w/ds_factor):int(start_w/ds_factor)+int(openslide_read_size[0]/ds_factor)]
tiss_frac = np.sum(tiss)/np.size(tiss)
else:
tiss_frac = 1
if tiss_frac > self.tiss_cutoff:
im = self.read_region(start_w, start_h, end_w, end_h)
format_str = 'Tile%d: start_w:%d, end_w:%d, start_h:%d, end_h:%d, width:%d, height:%d'
print(format_str % (
iter_tot, start_w, end_w, start_h, end_h, end_w - start_w, end_h - start_h), flush=True)
temp = np.array(im)
temp = temp[:, :, 0:3]
im = Image.fromarray(temp)
if rescale != 1:
im = im.resize(size=[np.int((end_w - start_w) / rescale), np.int((end_h - start_h) / rescale)],
resample=Image.BICUBIC)
img_save_name = 'Tile' + '_' \
+ str(tile_objective_value) + '_' \
+ str(int(start_w/rescale)) + '_' \
+ str(int(start_h/rescale))\
+ '.jpg'
im.save(os.path.join(output_dir, img_save_name), format='JPEG')
data.append([iter_tot, img_save_name, start_w, end_w, start_h, end_h, im.size[0], im.size[1]])
iter_tot += 1
condition = 1
df = pd.DataFrame(data,
columns=['iter', 'Tile_Name', 'start_w', 'end_w', 'start_h', 'end_h', 'size_w', 'size_h'])
df.to_csv(os.path.join(output_dir, 'Output.csv'), index=False)
def slide_thumbnail(self):
openslide_obj = self.openslide_obj
tile_objective_value = self.tile_objective_value
output_dir = self.output_dir
file_name = self.file_name
if self.objective_power == 0:
self.objective_power = np.int(openslide_obj.properties[openslide.PROPERTY_NAME_OBJECTIVE_POWER])
_, file_type = os.path.splitext(file_name)
rescale = np.int(self.objective_power / tile_objective_value)
slide_dimension = openslide_obj.level_dimensions[0]
slide_dimension_20x = np.array(slide_dimension) / rescale
thumb = openslide_obj.get_thumbnail((int(slide_dimension_20x[0] / 16), int(slide_dimension_20x[1]/16)))
thumb.save(os.path.join(output_dir, 'SlideThumb.jpg'), format='JPEG')
def param(self, save_mode=True, output_dir=None, output_name=None):
input_dir = self.input_dir
if output_dir is None:
output_dir = self.output_dir
if output_name is None:
output_name = 'param.yaml'
if self.objective_power == 0:
self.objective_power = np.int(self.openslide_obj.properties[openslide.PROPERTY_NAME_OBJECTIVE_POWER])
objective_power = self.objective_power
slide_dimension = self.openslide_obj.level_dimensions[0]
tile_objective_value = self.tile_objective_value
rescale = np.int(objective_power / tile_objective_value)
filename = self.file_name
tile_read_size = self.tile_read_size
level_count = self.level_count
level_dimensions = self.level_dimensions
level_downsamples = self.level_downsamples
param = {
'input_dir':input_dir,
'output_dir': output_dir,
'objective_power': objective_power,
'slide_dimension': slide_dimension,
'rescale': rescale,
'tile_objective_value': tile_objective_value,
'filename': filename,
'tile_read_size': tile_read_size.tolist(),
'level_count': level_count,
'level_dimensions': level_dimensions,
'level_downsamples': level_downsamples
}
if save_mode:
with open(os.path.join(output_dir, output_name), 'w') as yaml_file:
yaml.dump(param, yaml_file)
else:
return param
def load_ds_wsi(self):
openslide_obj = self.openslide_obj
if self.level_count-1 < self.tiss_level:
self.tiss_level = self.level_count-1
slide_dimension = openslide_obj.level_dimensions[self.tiss_level]
slide_h = slide_dimension[1]
slide_w = slide_dimension[0]
im = self.read_region(0, 0, slide_w, slide_h, level=self.tiss_level)
temp = np.array(im)
temp = temp[:, :, 0:3]
im = Image.fromarray(temp)
self.ds_im = im
def stain_entropy_otsu(self):
im_copy = self.ds_im.copy()
hed = skimage.color.rgb2hed(im_copy) # convert colour space
hed = (hed * 255).astype(np.uint8)
h = hed[:, :, 0]
e = hed[:, :, 1]
d = hed[:, :, 2]
selem = disk(4) # structuring element
# calculate entropy for each colour channel
h_entropy = rank.entropy(h, selem)
e_entropy = rank.entropy(e, selem)
d_entropy = rank.entropy(d, selem)
entropy = np.sum([h_entropy, e_entropy], axis=0) - d_entropy
# otsu threshold
threshold_global_otsu = threshold_otsu(entropy)
self.otsu = entropy > threshold_global_otsu
def morphology(self):
# Join together large groups of small components ('salt')
radius = int(8)
selem = disk(radius)
dilate = binary_dilation(self.otsu, selem)
radius = int(16)
selem = disk(radius)
erode = binary_erosion(dilate, selem)
rm_holes = remove_small_holes(
erode,
area_threshold=int(40)**2,
connectivity=1,
)
closing = binary_closing(rm_holes, selem)
rm_objs = remove_small_objects(
closing,
min_size=int(120)**2,
connectivity=1,
)
dilate = binary_dilation(rm_objs, selem)
rm_holes = remove_small_holes(
dilate,
area_threshold=int(40)**2,
connectivity=1,
)
self.mask = ndimage.binary_fill_holes(rm_holes)
self.mask = self.mask.astype('uint8')
|
from gevent import * |
#-*- coding:utf-8 -*-
import httplib,urllib
from html import search_result
class ishare_client():
def __init__(self):
self.cookie = ""
self.base_url = "ishare.iask.sina.com.cn"
self.search_url = "http://ishare.iask.sina.com.cn/search.php?key=%s&format=%s"
self.down_page_url = "/download.php?fileid="
self.referer_base = "http://ishare.iask.sina.com.cn/"
self.connecter = httplib.HTTPConnection(self.base_url)
def __get_header_with_cookie(self):
return {"Referer":self.referer,"Cookie":self.cookie}
def __get_header_no_cookie(self):
return {"Referer":self.referer}
def reset_cookie(self):
self.connecter.request("GET",self.referer_base)
res = self.connecter.getresponse()
header_list = res.getheaders()
for header in header_list:
if header[0] == "set-cookie":
self.cookie = header[1]
print res.getheaders()
print self.cookie
def search(self, filter, type=""):
self.reset_cookie()
if filter == None or filter == "":
return
header = {"Referer":self.referer_base,"Cookie":self.cookie}
self.connecter.request("GET", self.search_url %(filter, type), headers=header)
print self.search_url %(filter, type)
res = self.connecter.getresponse()
r = res.read()
#print r
return r
def get_download_url(self,file_id):
url = self.down_page_url + str(file_id)
header = {"Referer":self.referer_base}
self.connecter.request("POST", url, headers=header)
res = self.connecter.getresponse()
locat = res.getheader("location")
print res.read()
print locat
self.download_file(locat)
def download_file(self, url):
import ishare_download
ishare_download.save2disk(url)
def __get_ishare_download_url(self):
pass
if __name__ == "__main__":
ishare = ishare_client()
#ishare.reset_cookie()
result = ishare.search("test","pdf")
r = search_result()
r.feed(result)
r.show_result()
#ishare.get_download_url(35630056)
|
#!/usr/bin/python3
cities = ["san fran", "new york", "chicago", "dallas"]
for i, city in enumerate(cities):
print(i, city)
|
#!/usr/bin/python
import sys
import os
import urllib2
import config
def get_url(dist, section, arch):
repo = config.REPOS[dist]
return '%(base)s/dists/%(dist)s/%(section)s/binary-%(arch)s/Packages.gz' % \
dict(base=config.BASE_URLS[repo],
dist=dist,
section=section,
arch=arch)
def get_target(data_dir, dist, section, arch):
return os.path.join(data_dir, dist, section, 'binary-%s' % arch)
def download(dist, section, arch, data_dir):
url = get_url(dist, section, arch)
target = get_target(data_dir, dist, section, arch)
path = os.path.join(target, 'Packages.gz')
if not os.path.exists(target):
os.makedirs(target)
try:
fin = urllib2.urlopen(url)
fout = open(path, 'wb')
fout.write(fin.read())
fout.close()
fin.close()
return True
except urllib2.HTTPError:
return False
def main():
data_dir = sys.argv[1]
total = len(config.DISTS) * len(config.SECTIONS) * len(config.ARCHS)
count = 0
for dist in config.DISTS:
for section in config.SECTIONS:
for arch in config.ARCHS:
status = download(dist, section, arch, data_dir)
status = status and 'OK' or 'FAIL'
count += 1
print '%s of %s: %s %s %s -> %s' % \
(count, total, dist, section, arch, status)
if __name__ == '__main__':
main()
|
# Generated by Django 2.2.2 on 2019-06-18 18:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='book',
name='language',
field=models.CharField(default='English', help_text='Enter the language the book is written in.', max_length=100),
),
]
|
class LinkedList:
def __init__(self, node):
self.node = node
def add(self, next_node):
node = self.node
while node.next:
node = node.next
node.next = next_node
def remove(self, value):
node = self.node
if node.value == value:
self.node = node.next
while node.next:
if node.next.value == value:
self.node.next = node.next.next
return
node = node.next
|
from hage.Multiply import Main4
from hage.Minus import Main2
from hage.Plus import Main1
from hage.Division import Main3
class Main(Main1,Main2,Main3,Main4):
def __init__(self, q1, q2, v):
self.q1 = q1
self.q2 = q2
self.v = v
def show(self):
vvod = (self.q1 + self.q2 + self.v)
print(vvod)
#Реализуем ввод/вывод, делаем импорт и собираем все воедино
myclass = Main(int (input('Введите число 1: ')),
int (input('Введите число 2: ')),
int(input('Какую операцию вы хотите выполнить? \n 1 Сложение \n 2 Вычитание \n 3 Деление \n 4 Умножение \n')))
myclass.condition()
myclass.condition2()
myclass.condition3()
myclass.condition4()
#Создаем класс для реализации всех функций
|
#!/usr/bin/env python2.6
#
# Copyright (c) Members of the EGEE Collaboration. 2006-2009.
# See http://www.eu-egee.org/partners/ for details on the copyright holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Andrea Ceccanti (INFN)
from optparse import OptionParser
from sys import stderr,exit
import subprocess,re, string, socket
usage = """%prog [options] command
Commands:
create_db: creates a MySQL database and read/write grants for the VOMS service
based on the given options
drop_db: drops a MySQL database
grant_rw_access: Creates a read/write grant on an existing VOMS database for the user
specified in the options
grant_ro_access: Creates a read-only grant on an existing VOMS database for the user
specified in the options
"""
parser = OptionParser(usage=usage)
def setup_cl_options():
parser.add_option("--dbauser", dest="dbauser", help="Sets MySQL administrator user to USER", metavar="USER", default="root")
parser.add_option("--dbapwd", dest="dbapwd", help="Sets MySQL administrator password to PWD", metavar="PWD")
parser.add_option("--dbapwdfile", dest="dbapwdfile", help="Reads MySQL administrator password from FILE", metavar="FILE")
parser.add_option("--dbusername", dest="username", help="Sets the VOMS MySQL username to be created as USER", metavar="USER")
parser.add_option("--vomshost", dest="voms_host", help="Sets the HOST where VOMS is running", metavar="HOST")
parser.add_option("--dbpassword", dest="password", help="Sets the VOMS MySQL password for the user to be created as PWD", metavar="PWD")
parser.add_option("--dbname", dest="dbname", help="Sets the VOMS database name to DBNAME", metavar="DBNAME")
parser.add_option("--dbhost",dest="host", help="Sets the HOST where MySQL is running", metavar="HOST", default="localhost")
parser.add_option("--dbport",dest="port", help="Sets the PORT where MySQL is listening", metavar="PORT", default="3306")
parser.add_option("--mysql-command", dest="command", help="Sets the MySQL command to CMD", metavar="CMD", default="mysql")
def error_and_exit(msg):
print >>stderr, msg
exit(1)
def build_mysql_command_preamble(options):
if options.dbapwdfile:
try:
dbapwd = open(options.dbapwdfile).read()
except IOError as e:
error_and_exit(e.strerror)
else:
dbapwd = options.dbapwd
if not dbapwd:
mysql_cmd = "%s -u%s --host %s --port %s" % (options.command,
options.dbauser,
options.host,
options.port)
else:
mysql_cmd = "%s -u%s -p%s --host %s --port %s" % (options.command,
options.dbauser,
dbapwd,
options.host,
options.port)
return mysql_cmd
def db_exists(options):
mysql_cmd = build_mysql_command_preamble(options)
mysql_proc = subprocess.Popen(mysql_cmd, shell=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
try:
print >>mysql_proc.stdin, "use %s;" % options.dbname
mysql_proc.stdin.close()
except IOError as e:
err_msg = mysql_proc.stderr.read()
error_and_exit("Error checking database existence: %s. %s" % (e, err_msg))
status = mysql_proc.wait()
if status == 0:
return True
else:
err_msg = mysql_proc.stderr.read()
match = re.match("ERROR 1049", string.strip(err_msg))
if match:
return False
else:
error_and_exit("Error checking schema existence: %s" % err_msg)
def create_db(options):
print "Creating database %s" % options.dbname
if db_exists(options):
print "Schema for database %s already exists, will not create it..." % options.dbname
else:
mysql_cmd = build_mysql_command_preamble(options)
## The database is not there, let's create it
mysql_proc = subprocess.Popen(mysql_cmd, shell=True, stdin=subprocess.PIPE)
print >>mysql_proc.stdin, "create database %s;" % options.dbname
mysql_proc.stdin.close()
status = mysql_proc.wait()
if status != 0:
error_and_exit("Error creating MySQL database %s: %s" % (options.dbname, mysql_proc.stdout.read()))
grant_rw_access(options)
print "Done."
def drop_db(options):
print "Dropping database %s" % options.dbname
if not db_exists(options):
print "Schema for database %s does not exist, exiting..." % options.dbname
exit(1)
else:
mysql_cmd = build_mysql_command_preamble(options)
mysql_proc = subprocess.Popen(mysql_cmd, shell=True, stdin=subprocess.PIPE)
print >>mysql_proc.stdin, "drop database %s;" % options.dbname
mysql_proc.stdin.close()
status = mysql_proc.wait()
if status != 0:
error_and_exit("Error dropping MySQL database %s: %s" % (options.dbname, mysql_proc.stdout.read()))
print "Done."
def grant_rw_access(options):
print "Granting user %s read/write access on database %s" % (options.username, options.dbname)
mysql_cmd = build_mysql_command_preamble(options)
if len(options.username) > 16:
error_and_exit("MySQL database accont names cannot be longer than 16 characters.")
if db_exists(options):
mysql_proc = subprocess.Popen(mysql_cmd, shell=True, stdin=subprocess.PIPE)
hosts = ['localhost','localhost.%',socket.gethostname(),socket.getfqdn()]
if options.voms_host:
hosts = [options.voms_host,options.voms_host + '.%']
for host in hosts:
print >>mysql_proc.stdin, "grant all privileges on %s.* to '%s'@'%s' identified by '%s' with grant option;" % (options.dbname,
options.username,
host,
options.password)
print >>mysql_proc.stdin, "flush privileges;"
mysql_proc.stdin.close()
status = mysql_proc.wait()
if status != 0:
error_and_exit("Error granting read/write access to user %s on database %s: %s" % (options.username,
options.dbname,
mysql_proc.stdout.read()))
def grant_ro_access():
print "Granting user %s read-only access on database %s" % (options.username, options.dbname)
mysql_cmd = build_mysql_command_preamble(options)
if len(options.username) > 16:
error_and_exit("MySQL database accont names cannot be longer than 16 characters.")
if db_exists(options):
mysql_proc = subprocess.Popen(mysql_cmd, shell=True, stdin=subprocess.PIPE)
hosts = ['localhost','localhost.%',socket.gethostname(),socket.getfqdn()]
if options.voms_host:
hosts = [options.voms_host,options.voms_host + '.%']
for host in hosts:
print >>mysql_proc.stdin, "grant select on %s.* to '%s'@'%s' identified by '%s';" % (options.dbname,
options.username,
host,
options.password)
print >>mysql_proc.stdin, "flush privileges;"
mysql_proc.stdin.close()
status = mysql_proc.wait()
if status != 0:
error_and_exit("Error granting read-only access to user %s on database %s: %s" % (options.username,
options.dbname,
mysql_proc.stdout.read()))
supported_commands = {'create_db': create_db,
'drop_db': drop_db,
'grant_rw_access': grant_rw_access,
'grant_ro_access': grant_ro_access}
required_options = [ "username", "password", "dbname"]
def check_mysql_command(options):
test_cmd = "%s --version" % options.command
proc = subprocess.Popen(test_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
combined_output = "%s %s" % (out, err)
status = proc.wait()
if status != 0:
error_and_exit("Error executing %s: %s. Check your MySQL client installation." % (options.command, combined_output.strip()))
def check_args_and_options(options, args):
if len(args) != 1 or args[0] not in supported_commands.keys():
error_and_exit("Please specify a single command among the following:\n\t%s" % "\n\t".join(supported_commands.keys()))
missing_options = []
if not options.username:
missing_options.append("--dbusername")
if not options.password:
missing_options.append("--dbpassword")
if not options.dbname:
missing_options.append("--dbname")
if len(missing_options) != 0:
error_and_exit("Please specify the following missing options:\n\t%s" % "\n\t".join(missing_options))
def main():
setup_cl_options()
(options, args) = parser.parse_args()
check_args_and_options(options,args)
check_mysql_command(options)
supported_commands[args[0]](options)
if __name__ == '__main__':
main() |
from __future__ import division
import numpy as np
import numpy.random as npr
from scipy.stats import multivariate_normal as mvn
from svae.lds.synthetic_data import generate_data, rand_lds
from lds_inference_alt import filter_forward
from test_util import bmat
npr.seed(0)
### util
def get_n(lds):
return lds[0].shape[0]
def lds_to_big_Jh(data, lds):
mu_init, sigma_init, A, sigma_states, C, sigma_obs = lds
p, n = C.shape
T = data.shape[0]
h = C.T.dot(np.linalg.solve(sigma_obs, data.T)).T
h[0] += np.linalg.solve(sigma_init, mu_init)
J = np.kron(np.eye(T),C.T.dot(np.linalg.solve(sigma_obs,C)))
J[:n,:n] += np.linalg.inv(sigma_init)
ss_inv = np.linalg.inv(sigma_states)
pairblock = bmat([[A.T.dot(ss_inv).dot(A), -A.T.dot(ss_inv)],
[-ss_inv.dot(A), ss_inv]])
for t in range(0,n*(T-1),n):
J[t:t+2*n,t:t+2*n] += pairblock
return J.reshape(T*n,T*n), h.reshape(T*n)
def dense_filter(data, lds):
n = get_n(lds)
T = data.shape[0]
def filtering_model(t):
return lds_to_big_Jh(data[:t], lds)
def dense_filtered_mu_sigma(t):
J, h = filtering_model(t)
mu = np.linalg.solve(J, h)
sigma = np.linalg.inv(J)
return mu[-n:], sigma[-n:,-n:]
return zip(*[dense_filtered_mu_sigma(t) for t in range(1, T+1)])
def dense_loglike(data, lds):
T = data.shape[0]
mu_init, sigma_init, A, sigma_states, C, sigma_obs = lds
prior_lds = mu_init, sigma_init, A, sigma_states, np.zeros_like(C), sigma_obs
J, h = lds_to_big_Jh(data, prior_lds)
mu_x = np.linalg.solve(J, h)
sigma_x = np.linalg.inv(J)
bigC = np.kron(np.eye(T), C)
mu_y = np.dot(bigC, mu_x)
sigma_y = np.dot(np.dot(bigC, sigma_x), bigC.T) + np.kron(np.eye(T), sigma_obs)
return mvn.logpdf(data.ravel(), mu_y, sigma_y)
### tests
def test_filter():
def check_filter(data, lds):
(filtered_mus, filtered_sigmas), loglike = filter_forward(data, *lds)
filtered_mus2, filtered_sigmas2 = dense_filter(data, lds)
loglike2 = dense_loglike(data, lds)
assert all(map(np.allclose, filtered_mus, filtered_mus2))
assert all(map(np.allclose, filtered_sigmas, filtered_sigmas2))
assert np.isclose(loglike, loglike2)
for _ in xrange(10):
n, p, T = npr.randint(1, 5), npr.randint(1, 5), npr.randint(10,20)
lds = rand_lds(n, p)
_, data = generate_data(T, *lds)
yield check_filter, data, lds
|
# -*- coding: utf-8 -*-
# Алиса владеет интересной информацией, которую хочет заполучить Боб.
# Алиса умна, поэтому она хранит свою информацию в зашифрованном файле.
# У Алисы плохая память, поэтому она хранит все свои пароли в открытом виде в текстовом файле.
# Бобу удалось завладеть зашифрованным файлом с интересной информацией и файлом с паролями, но не смог понять какой из паролей ему нужен. Помогите ему решить эту проблему.
# Алиса зашифровала свою информацию с помощью библиотеки simple-crypt.
# Она представила информацию в виде строки, и затем записала в бинарный файл результат работы метода simplecrypt.encrypt.
# Вам необходимо установить библиотеку simple-crypt, и с помощью метода simplecrypt.decrypt узнать, какой из паролей служит ключом для расшифровки файла с интересной информацией.
# Ответом для данной задачи служит расшифрованная интересная информация Алисы.
# Файл с информацией https://stepic.org/media/attachments/lesson/24466/encrypted.bin
# Файл с паролями https://stepic.org/media/attachments/lesson/24466/passwords.txt
# Примечание:
# Для того, чтобы считать все данные из бинарного файла, можно использовать, например, следующий код:
# with open("encrypted.bin", "rb") as inp:
# encrypted = inp.read()
# Примечание:
# Работа с файлами рассмотрена в следующем уроке, поэтому вы можете вернуться к этой задаче после просмотра следующего урока.
from simplecrypt import encrypt, decrypt
def decrypt_file(file_name, key):
with open(file_name, 'rb') as inp:
encrypted = inp.read()
dec = decrypt(key, encrypted)
print("decrypted text: %s" % dec)
passwords = [line.rstrip('\n') for line in open('passwords.txt')]
for i in passwords:
try:
print(i)
decrypt_file("encrypted.bin", i)
except:
pass
# // RVrF2qdMpoq6Lib
# // Alice loves Bob |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# The above encoding declaration is required and the file must be saved as UTF-8
################################################################################
# Практичне завдання № 3.2
# Програма має розраховувати числа послідовності Фібоначчі.
# Послідовність Фібоначчі - це послідовність чисел, в якій кожний елемент
# дорівнює сумі двох попередніх. При цьому нульовий елемент вважається
# за 0, а перший 1.
# Отже, сама послідовність виглядає наступним чином:
# 0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584, 4181, 6765, 10946, …
# Вхідні дані: ціле невід'ємне число n. (Передається в програму як аргумент командного рядка.)
# Результат роботи: значення n-го числа послідовності Фібоначчі.
# Будь ласка, не використовуйте рекурсію.
# Наприклад
# Вхідні дані: 0
# Приклад виклику: python lab3_2.py 0
# Результат: 0
# Вхідні дані: 10
# Приклад виклику: python lab3_2.py 10
# Результат: 55
import sys
a = 0
b = 1
n = int(sys.argv[1])
i = 0
for i in range(n):
a, b = b, a + b
print a
# Another example
"""
import sys
x = int(sys.argv[1])
fib_prev = 0
fib_curr = 1
if x == 0:
print fib_prev
else:
if x == 1:
print fib_curr
else:
for i in range(x-1):
fib_new = fib_prev + fib_curr
fib_prev = fib_curr
fib_curr = fib_new
print fib_curr
"""
|
import numpy as np
from keras.callbacks import Callback
from keras.optimizers import SGD
from keras.models import Sequential
from keras.layers import Dense
from scipy.stats import logistic
from copy import deepcopy, copy
from sklearn.metrics import r2_score, explained_variance_score
from keras import backend as K
from bokeh.plotting import output_notebook, show
from keras.constraints import max_norm, non_neg, min_max_norm, unit_norm
from ..plot import permutation_test
from .BaseModel import BaseModel
from ..utils import YpredCallback, binary_metrics, binary_evaluation
class NN_LinearSigmoid(BaseModel):
"""2 Layer linear-logistic neural network using Keras"""
parametric = True
bootlist = ["model.vip_", "model.coef_", "model.x_loadings_", "model.x_scores_", "Y_pred", "model.pctvar_", "model.y_loadings_", "model.pfi_acc_", "model.pfi_r2q2_", "model.pfi_auc_", "model.eval_metrics_"] # list of metrics to bootstrap
def __init__(self, n_neurons=2, epochs=200, learning_rate=0.01, momentum=0.0, decay=0.0, nesterov=False, loss="binary_crossentropy", batch_size=None, verbose=0, pfi_metric="r2q2", pfi_nperm=0, pfi_mean=True, seed=None):
self.n_neurons = n_neurons
self.verbose = verbose
self.n_epochs = epochs
self.k = n_neurons
self.batch_size = batch_size
self.loss = loss
self.decay = decay
self.nesterov = nesterov
self.momentum = momentum
self.learning_rate = learning_rate
self.pfi_metric = pfi_metric
self.pfi_nperm = pfi_nperm
self.pfi_mean = pfi_mean
self.optimizer = SGD(lr=learning_rate, momentum=momentum, decay=decay, nesterov=nesterov)
self.compiled = False
self.seed = seed
self.__name__ = 'cimcb.model.NN_LinearSigmoid'
self.__params__ = {'n_neurons': n_neurons, 'epochs': epochs, 'learning_rate': learning_rate, 'momentum': momentum, 'decay': decay, 'nesterov': nesterov, 'loss': loss, 'batch_size': batch_size, 'verbose': verbose, 'seed': seed}
def set_params(self, params):
self.__init__(**params)
def train(self, X, Y, epoch_ypred=False, epoch_xtest=None, w1=False, w2=False):
""" Fit the neural network model, save additional stats (as attributes) and return Y predicted values.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Predictor variables, where n_samples is the number of samples and n_features is the number of predictors.
Y : array-like, shape = [n_samples, 1]
Response variables, where n_samples is the number of samples.
Returns
-------
y_pred_train : array-like, shape = [n_samples, 1]
Predicted y score for samples.
"""
# # If using Keras, set tf to 1 core
# config = K.tf.ConfigProto(intra_op_parallelism_threads=8, inter_op_parallelism_threads=8, allow_soft_placement=True)
# session = tf.Session(config=config)
# K.set_session(session)
# If batch-size is None:
if self.batch_size is None:
self.batch_size = len(X)
self.X = X
self.Y = Y
# If epoch_ypred is True, calculate ypred for each epoch
if epoch_ypred is True:
self.epoch = YpredCallback(self.model, X, epoch_xtest)
else:
self.epoch = Callback()
if self.compiled == False:
np.random.seed(self.seed)
self.model = Sequential()
self.model.add(Dense(self.n_neurons, activation="linear", input_dim=len(X.T)))
self.model.add(Dense(1, activation="sigmoid"))
self.model.compile(optimizer=self.optimizer, loss=self.loss, metrics=["accuracy"])
self.model.w1 = self.model.layers[0].get_weights()
self.model.w2 = self.model.layers[1].get_weights()
self.compiled == True
else:
self.model.layers[0].set_weights(self.model.w1)
self.model.layers[1].set_weights(self.model.w2)
#print("Before: {}".format(self.model.layers[1].get_weights()[0].flatten()))
# print("Before: {}".format(self.model.layers[1].get_weights()[0]))
if w1 != False:
self.model.layers[0].set_weights(w1)
self.model.w1 = w1
if w2 != False:
self.model.layers[1].set_weights(w2)
self.model.w2 = w2
# Fit
self.model.fit(X, Y, epochs=self.n_epochs, batch_size=self.batch_size, verbose=self.verbose)
self.model.pctvar_ = pctvar_calc(self.model, X, Y)
#print("After: {} .... {}".format(self.model.layers[1].get_weights()[0].flatten(), self.model.pctvar_))
layer1_weight = self.model.layers[0].get_weights()[0]
layer1_bias = self.model.layers[0].get_weights()[1]
layer2_weight = self.model.layers[1].get_weights()[0]
layer2_bias = self.model.layers[1].get_weights()[1]
# Coef vip
self.model.vip_ = garson(layer1_weight, layer2_weight.flatten())
self.model.coef_ = connectionweight(layer1_weight, layer2_weight.flatten())
# Not sure about the naming scheme (trying to match PLS)
self.model.x_loadings_ = layer1_weight
self.model.x_scores_ = np.matmul(X, self.model.x_loadings_) + layer1_bias
self.model.x_scores_alt = self.model.x_scores_
self.model.y_loadings_ = layer2_weight
self.model.y_scores = np.matmul(self.model.x_scores_alt, self.model.y_loadings_) + layer2_bias
y_pred_train = self.model.predict(X).flatten()
# Sort by pctvar
# if self.compiled == False:
# if w1 == False:
# if w2 == False:
# order = np.argsort(self.model.pctvar_)[::-1]
# self.model.x_scores_ = self.model.x_scores_[:, order]
# self.model.x_loadings_ = self.model.x_loadings_[:, order]
# self.model.y_loadings_ = self.model.y_loadings_[order]
# self.model.y_loadings_ = self.model.y_loadings_.T
# self.model.pctvar_ = self.model.pctvar_[order]
# self.model.w1[0] = self.model.w1[0][:, order]
# self.model.w2[0] = self.model.w2[0][order]
# self.compiled = True
self.model.y_loadings_ = layer2_weight.T
# Calculate pfi
if self.pfi_nperm == 0:
self.model.pfi_acc_ = np.zeros((1, len(Y)))
self.model.pfi_r2q2_ = np.zeros((1, len(Y)))
self.model.pfi_auc_ = np.zeros((1, len(Y)))
else:
pfi_acc, pfi_r2q2, pfi_auc = self.pfi(nperm=self.pfi_nperm, metric=self.pfi_metric, mean=self.pfi_mean)
self.model.pfi_acc_ = pfi_acc
self.model.pfi_r2q2_ = pfi_r2q2
self.model.pfi_auc_ = pfi_auc
self.Y_train = Y
self.Y_pred_train = y_pred_train
self.Y_pred = y_pred_train
self.X = X
self.Y = Y
self.metrics_key = []
self.model.eval_metrics_ = []
bm = binary_evaluation(Y, y_pred_train)
for key, value in bm.items():
self.model.eval_metrics_.append(value)
self.metrics_key.append(key)
self.model.eval_metrics_ = np.array(self.model.eval_metrics_)
return y_pred_train
def test(self, X, Y=None):
"""Calculate and return Y predicted value.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Test variables, where n_samples is the number of samples and n_features is the number of predictors.
Returns
-------
y_pred_test : array-like, shape = [n_samples, 1]
Predicted y score for samples.
"""
layer1_weight = self.model.layers[0].get_weights()[0]
layer1_bias = self.model.layers[0].get_weights()[1]
layer2_weight = self.model.layers[1].get_weights()[0]
layer2_bias = self.model.layers[1].get_weights()[1]
self.model.x_scores_ = np.matmul(X, layer1_weight) + layer1_bias
self.model.x_scores_alt = self.model.x_scores_
#self.model.y_scores = np.matmul(self.model.x_scores_alt, self.model.y_loadings_) + layer2_bias
y_pred_test = self.model.predict(X).flatten()
self.Y_pred = y_pred_test
# Calculate and return Y predicted value
if Y is not None:
self.metrics_key = []
self.model.eval_metrics_ = []
bm = binary_evaluation(Y, y_pred_test)
for key, value in bm.items():
self.model.eval_metrics_.append(value)
self.metrics_key.append(key)
self.model.eval_metrics_ = np.array(self.model.eval_metrics_)
return y_pred_test
def pctvar_calc(model, X, Y):
x1 = X
w1 = model.layers[0].get_weights()[0]
b1 = model.layers[0].get_weights()[1]
w2 = model.layers[1].get_weights()[0]
b2 = model.layers[1].get_weights()[1]
x2 = np.matmul(x1, w1) + b1
pctvar = []
if len(w2) == 1:
y = logistic.cdf(np.matmul(x2, w2) + b2)
#r2_i = r2_score(Y, y) * 100
r2_i = explained_variance_score(Y, y) * 100
pctvar.append(r2_i)
else:
for i in range(len(w2)):
x2 = logistic.cdf(np.matmul(x1, w1[:, i]) + b1[i])
x2 = np.reshape(x2, (-1, 1))
y = logistic.cdf(np.matmul(x2, w2[i]) + b2)
r2_i = explained_variance_score(Y, y) * 100
pctvar.append(r2_i)
# # Alternative (same result)
# for i in range(len(w2)):
# w2_i = deepcopy(w2)
# w2_i[~i] = 0
# y = logistic.cdf(np.matmul(x2, w2_i))
# #r2_i = r2_score(Y, y) * 100
# r2_i = explained_variance_score(Y, y) * 100
# pctvar.append(r2_i)
pct = np.array(pctvar)
# convert to reltive explained variance
pct = pct / np.sum(pct) * 100
return pct
def garson(A, B):
B = np.diag(B)
cw = np.dot(A, B)
cw_h = abs(cw).sum(axis=0)
rc = np.divide(abs(cw), abs(cw_h))
rc = rc.sum(axis=1)
#ri = rc / rc.sum()
return(rc)
def connectionweight(A, B):
cw = np.dot(A, B)
return cw
|
#!/usr/bin/python2.7
#coding=utf8
r'''
Fuction: wrapper of multitask.py with threading
Created: Tuyj
Created date:2015/02/07
'''
if __name__ == '__main__': import _env
import t_com as t_com
import multitask
from multitask import Queue,recvfrom,Timeout,sleep,recv,read,send,sendto
import types
import threading,signal
import time
from pyLibs.log.t_log import *
logger = LOG_MODULE_DEFINE('tMultitask')
SET_LOG_LEVEL(logger, 'debug')
class tMultitaskMgr(multitask.TaskManager):
default_stop_timeout = 5.0
def __init__(self, name):
multitask.TaskManager.__init__(self)
self.obj_name = name
self.running = False
self.hastask = False
self.isEnd = True
self.working = None
self.cond = threading.Condition()
def stop(self, timeout = int(default_stop_timeout)):
self.cond.acquire()
self.running = False
self.cond.notify()
self.cond.release()
if self.working is not None:
if self.isWorkingthread():
self.working = None
OBJ_INFO(self, logger, 'stop successfully(return stop)')
return
else:
try:
self.working.join(timeout)
self.working = None
except RuntimeError as ex:
OBJ_ERROR(self, logger, "join failure:%s",ex.message)
else:
cnt = timeout/0.1
while cnt > 0 and not self.isEnd:
time.sleep(0.1)
cnt -= 1
if not self.isEnd:
OBJ_ERROR(self, logger, 'stop timeout')
else:
OBJ_DEBUG(self, logger, 'stop successfully')
def run(self, thread_stack_size=None, ext_func=None, ext_args=(), ext_kwargs={}):
if (not self.running) and self.working is None:
if thread_stack_size is not None and thread_stack_size != 0:
threading.stack_size(thread_stack_size)
self.working = threading.Thread(target=self.run_forever, args=(ext_func,ext_args,ext_kwargs), name=self.obj_name)
self.working.start()
if thread_stack_size is not None and thread_stack_size != 0:
threading.stack_size(t_com.default_large_thread_stack_size)
OBJ_DEBUG(self, logger, "multitask working-thread started")
def run_forever(self, ext_func=None, ext_args=(), ext_kwargs={}):
if self.running:
return
if ext_func is not None:
ext_func(*ext_args, **ext_kwargs)
self.isEnd = False
self.running = True
self.add(self._taskCheck())
while self.running:
self.cond.acquire()
while self.running and not self.hastask:
self.cond.wait()
self.cond.release()
if not self.running:
return
while self.running and (self.has_runnable() or self.has_io_waits() or self.has_timeouts()):
self.run_next()
else:
self.hastask = False
self.isEnd = True
OBJ_DEBUG(self, logger, 'multitask working-thread end')
def add(self, task):
multitask.TaskManager.add(self, task)
self.cond.acquire()
if not self.hastask:
self.hastask = True
self.cond.notify()
self.cond.release()
def isWorkingthread(self):
return self.working is threading.current_thread()
def _taskCheck(self):
inv = t_com.max_multitask_inv
while True:
yield multitask.sleep(inv)
class TEST_STOPER:
def __init__(self, tasker):
self.tasker = tasker
signal.signal(signal.SIGINT, self.stop_thread)
signal.signal(signal.SIGTERM, self.stop_thread)
def __del__(self):
self.stop_thread()
def stop_thread(self, *args, **kwargs):
if self.tasker is not None:
self.tasker.stop()
self.tasker = None
def testQueue():
def printT():
while True:
print '+++'
yield multitask.sleep(3)
def printT2():
while True:
print '---'
yield multitask.sleep(0.2)
def get1(que):
while True:
print 'xxxx'
print (yield que.get())
def put1(que):
for x in xrange(0,10):
yield que.put(x)
time.sleep(0.5)
t = tMultitaskMgr('testQueue')
TEST_STOPER(t)
t.run()
t.add(printT())
t.add(printT2())
que = Queue()
t.add(get1(que))
t.add(put1(que))
time.sleep(10)
t.stop()
print 'over'
def testM():
def waitExit():
import socket
sock = socket.socket(type=socket.SOCK_DGRAM)
while True:
try:
yield multitask.recv(sock, 2)
except multitask.Timeout: pass
def printT():
while True:
print '+++'
yield multitask.sleep(3)
def printT2():
while True:
print '---'
yield multitask.sleep(0.2)
t = tMultitaskMgr('testT')
TEST_STOPER(t)
t.run()
print 'add'
t.add(waitExit())
t.add(printT())
t.add(printT2())
print 'sleep'
time.sleep(5)
print 'sleeped'
t.stop()
print 'over'
if __name__ == '__main__':
if not LOG_INIT():
raise -1
# testM()
testQueue()
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#################################################################################
# #
# validate_op_limits.py: compare the current op_limits.db to the standard #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Mar 10, 2021 #
# #
#################################################################################
import os
import sys
import re
import string
import time
import random
import math
import numpy
#
#--- reading directory list
#
path = '/data/mta/Script/MSID_limit/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
sys.path.append(mta_dir)
import mta_common_functions as mcf
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
#-------------------------------------------------------------------------------
#-- validate_op_limits: compare the current op_limits.db to the standard and report problems
#-------------------------------------------------------------------------------
def validate_op_limits():
"""
compare the current op_limits.db to the standard and report problems
input: none, but read from <past_dir>/op_limits.db_080119 and ./op_limits.db
output: op_limit_problems if there are any potential problems
"""
#
#--- remove the past checking result
#
mcf.rm_files('op_limit_problems')
#
#--- this is the most recent clean data set; use as a starndard
#
ifile = main_dir + 'Past_data/op_limits.db_080119'
p_dict = read_op_limit(ifile)
#
#--- the current op_limits.db file
#
ifile = main_dir + 'op_limits.db'
c_dict = read_op_limit(ifile)
#
#--- start checking
#
wline = ''
for msid in p_dict.keys():
#
#--- check whether msid is missing in the new op_limits.db
#
pdata = p_dict[msid]
try:
cdata = c_dict[msid]
except:
wline = wline + msid + ': Entry is missing in the new op_limit.db\n'
continue
#
#--- check all past time entries are still in the current op_limts.db
#
ptime = pdata[0]
ctime = cdata[0]
dtime = numpy.setdiff1d(ptime, ctime)
if len(dtime) > 0:
tline = ''
for ent in dtime:
tline = tline + '\t' + str(ent)
wline = wline + msid + ': Missing entry at time ' + tline + '\n'
continue
#
#--- check whether the current op_limits.db entry is in time order
#
chk = 0
for k in range(1, len(ctime)):
if ctime[k] > ctime[k-1]:
continue
else:
wline = wline + msid + ': Time is out of order at ' + str(ctime[k]) + '\n'
chk = 1
break
if chk > 0:
continue
#
#--- check whether the values are same between the standard and the new op_limits.db (in the standard potion)
#
chk = 0
for k in range(0, len(ptime)):
if (ptime[k] != ctime[k]) \
or (p_dict[msid][1][k] != c_dict[msid][1][k]) \
or (p_dict[msid][2][k] != c_dict[msid][2][k]) \
or (p_dict[msid][3][k] != c_dict[msid][3][k]) \
or (p_dict[msid][4][k] != c_dict[msid][4][k]):
wline = wline + msid + ': Time and/or Entry values are different at time ' + str(ctime[k]) + '\n'
chk = 1
break
if chk > 0:
continue
if wline != '':
with open('op_limit_problems', 'w') as fo:
fo.write(wline)
#-------------------------------------------------------------------------------
#-- read_op_limit: create a data dictionary for msid <--> data --
#-------------------------------------------------------------------------------
def read_op_limit(ifile):
"""
create a data dictionary for msid <--> data
input: ifile --- input file name
output: m_dict --- dictionary:
msid <--> [time_list, y_low_list, y_upper_list, r_low_list, r_upper_list]
"""
data = mcf.read_data_file(ifile)
m_dict = {}
prev = ''
for ent in data:
#
#--- skip none data part
#
if ent == '':
continue
if ent[0] == '#':
continue
#
#--- first 6 entries are used
#
atemp = re.split('\s+', ent)
msid = atemp[0].strip()
a1 = float(atemp[1])
a2 = float(atemp[2])
a3 = float(atemp[3])
a4 = float(atemp[4])
t = float(atemp[5])
#
#--- first time we are getting data
#
if prev == '':
alist = [[t], [a1], [a2], [a3], [a4]]
m_dict[msid] = alist
prev = msid
else:
#
#--- the previous msid is same as this one, append the data to the lists of the list
#
if msid == prev:
alist = m_dict[msid]
alist[0].append(t)
alist[1].append(a1)
alist[2].append(a2)
alist[3].append(a3)
alist[4].append(a4)
m_dict[msid] = alist
#
#--- for the case thatthis is the first time entry for the msid
#
else:
alist = [[t], [a1], [a2], [a3], [a4]]
m_dict[msid] = alist
prev = msid
return m_dict
#-------------------------------------------------------------------------------
if __name__ == '__main__':
validate_op_limits()
|
#!/usr/bin/env python3
"""Executes a SSM Document which captures in S3 a list of all installed packages for selected hosts."""
import time
import logging
import os
import csv
import datetime
import json
import boto3
from botocore.exceptions import ClientError
from tabulate import tabulate
from utils import (
configure_logging, SSMParamStoreKey, SlackMessage, json_dumps,
push_to_queue, chunk_list, put_file_to_s3, make_s3_url,
retrieve_json_from_s3)
import rpm_vercmp
try:
from io import StringIO
except ImportError:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
logging.getLogger('boto3').setLevel(logging.ERROR)
logging.getLogger('botocore').setLevel(logging.ERROR)
QUEUE_URL = os.environ.get('QUEUE_INV_URL')
# i-0781ab28c094092a9 - out of disk
# BLACKLISTED_INSTANCES = ('i-0781ab28c094092a9',)
BLACKLISTED_INSTANCES = ('',)
AREF = '<a href="{}">{}</a>'
def _bool_check_command_status(client, command_info, instance_id):
"""Retrieve status of execute SSM RunCommand."""
command_status = _check_command_status(client, command_info['CommandId'],
instance_id)
return command_status['Status'] in ('Pending', 'InProgress', 'Delayed')
def _check_command_status(client, command_id, instance_id):
"""Retrieve details of execute SSM RunCommand."""
logging.debug('Command Id: %s; InstanceId: %s', command_id, instance_id)
invocations = client.list_command_invocations(
CommandId=command_id,
InstanceId=instance_id)['CommandInvocations']
if invocations:
return invocations[0]
return {'Status': ['Pending'], 'InstanceId': instance_id,
'CommandId': command_id,
'Error': 'Unable to list command invocation.'}
def send_runcommand(ssm_client, **kwargs): # noqa
"""Takes in a boto3 session and some kwargs, splits list of instances
into groups for 50, sends RunCommand, and returns a list of the
responses.
"""
doc = 'AWS-RunShellScript'
response = []
chunks = chunk_list(kwargs['instances'], 50) # max 50 instances
for chunk in chunks: # iterate over chunks of 50 instances
response.append(ssm_client.send_command(
DocumentName=doc,
InstanceIds=chunk,
Parameters={ # value must be a list
'commands': [
"#!/bin/bash",
'bucket={bucket}'.format(**kwargs),
'instance_id=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)',
'echo $instance_id $bucket',
'if type -t rpm >/dev/null 2>&1;then',
(''' pkg_list=$(rpm -qa --queryformat '"%-30{NAME}": '''
'''"%10{VERSION}-%20{RELEASE}",' | sed -e 's~,$~~' | tr -d ' ')'''),
' echo "{${pkg_list}}" | \\',
(''' python -c 'import json, sys; print(json.dumps('''
'''json.loads(sys.stdin.read()), indent=4))' > pkg_list.json'''),
' echo Retrieved package list from rpm',
'fi',
'if type -t dpkg >/dev/null 2>&1;then',
' echo "Found debian"',
'fi',
'test -e pkg_list.json || echo unable to find pkg_list.json',
'aws s3 cp pkg_list.json s3://$bucket/patching-state/%s/${instance_id}.json' % (
kwargs['delta_date']),
'echo Completed Export',
],
},
OutputS3BucketName=kwargs['bucket'],
OutputS3KeyPrefix='command-output',
TimeoutSeconds=kwargs['timeout'],
MaxErrors='10',
)['Command']) # appends command return to a list for return
return response
def _describe_instance(client, instance):
try:
client.describe_instances(InstanceIds=[instance])
except ClientError as error:
logging.error(error)
if error.response['Code']['Message'] in ('InvalidInstanceID.NotFound', 'InvalidInstanceID.Malformed'):
logging.error('%s instance is not valid.', instance)
else:
raise
else:
return True
return False
def _is_ssm_managed(client, instance):
try:
r = client.describe_instance_information(Filters=[{'Key': 'InstanceIds', 'Values': [instance]}])
logging.debug(r.get('InstanceInformationList', []))
except Exception as error:
logging.error(error)
raise
else:
if r.get('InstanceInformationList'):
return True
return False
def _get_instance_pairs(s3_client, ec2_client, ssm_client, bucket, key):
logging.info('Retrieving instance pairs from s3://%s/%s', bucket, key)
resp = s3_client.get_object(Bucket=bucket, Key=key)
csv_body = StringIO(resp['Body'].read().decode('utf-8'))
reader = csv.DictReader(csv_body)
instances = {}
for row in reader:
if row.get('SourceInstanceId'):
for instance in (row.get('SourceInstanceId'), row.get('InstanceId')):
if not (_describe_instance(ec2_client, instance) and
_is_ssm_managed(ssm_client, instance)):
continue
instances[row.get('InstanceId')] = row.get('SourceInstanceId')
return instances
def _get_instances(s3_client, ec2_client, ssm_client, bucket, key):
logging.info('Retrieving instance list from s3://%s/%s', bucket, key)
resp = s3_client.get_object(Bucket=bucket, Key=key)
csv_body = StringIO(resp['Body'].read().decode('utf-8'))
reader = csv.DictReader(csv_body)
instances = set()
for row in reader:
if row.get('SourceInstanceId'):
for instance in (row.get('SourceInstanceId'), row.get('InstanceId')):
if (_describe_instance(ec2_client, instance) and
_is_ssm_managed(ssm_client, instance) and
instance not in BLACKLISTED_INSTANCES):
instances.add(instance)
return instances
def _process_delta_pair(prod_instance_id, source_instance_id, bucket, delta_date, s3_client):
def key_paths(source_instance_id, prod_instance_id):
"""Form S3 keys based on variables."""
non_prod_pkg_list_key = 'patching-state/{}/{}.json'.format(delta_date, source_instance_id)
prod_pkg_list_key = 'patching-state/{}/{}.json'.format(delta_date, prod_instance_id)
mismatch_versions_key = 'patching-state/{}/{}/version-mismatch.json'.format(delta_date, prod_instance_id)
not_on_prod_key = 'patching-state/{}/{}/not-on-prod.csv'.format(delta_date, prod_instance_id)
out_of_date_key = 'patching-state/{}/{}/out-of-date.csv'.format(delta_date, prod_instance_id)
return non_prod_pkg_list_key, prod_pkg_list_key, mismatch_versions_key, not_on_prod_key, out_of_date_key
def format_str(instance_id, pkg_list):
return instance_id if pkg_list else instance_id + ' (Missing pkg list)'
logging.info('Processing pair %s and %s', prod_instance_id, source_instance_id)
(non_prod_pkg_list_key, prod_pkg_list_key,
mismatch_versions_key, not_on_prod_key, out_of_date_key) = key_paths(
source_instance_id, prod_instance_id)
prod_s3_url = make_s3_url(bucket, prod_pkg_list_key)
non_prod_s3_url = make_s3_url(bucket, non_prod_pkg_list_key)
prod_pkg_list = retrieve_json_from_s3(s3_client, bucket, prod_pkg_list_key)
nonprod_pkg_list = retrieve_json_from_s3(s3_client, bucket, non_prod_pkg_list_key)
if not prod_pkg_list or not nonprod_pkg_list:
logging.error('Missing pkg list')
return {
'Prod Instance': AREF.format(
prod_s3_url, format_str(prod_instance_id, prod_pkg_list)),
'Non-Prod Instance': AREF.format(
non_prod_s3_url, format_str(
source_instance_id, nonprod_pkg_list)),
'Updates Needed': '',
'Not installed on Prod': '',
'Version Mismatch': '',
}
# not on prod but installed on nonprod
not_on_prod = set(nonprod_pkg_list).difference(set(prod_pkg_list))
# version mismatch
mismatch_versions = {}
old_versions = []
for pkg in nonprod_pkg_list:
if pkg in prod_pkg_list and rpm_vercmp.vercmp(nonprod_pkg_list[pkg], prod_pkg_list[pkg]) == 1:
mismatch_versions[pkg] = {'prod': prod_pkg_list[pkg], 'nonprod': nonprod_pkg_list[pkg]}
old_versions.append(pkg + "-" + nonprod_pkg_list[pkg])
# Uploading files to s3
put_file_to_s3(s3_client, '\n'.join(list(not_on_prod)), bucket,
not_on_prod_key)
put_file_to_s3(s3_client, '\n'.join(list(old_versions)), bucket,
out_of_date_key)
put_file_to_s3(s3_client, json.dumps(mismatch_versions, indent=4), bucket,
mismatch_versions_key)
return {
'Prod Instance': AREF.format(prod_s3_url, prod_instance_id),
'Non-Prod Instance': AREF.format(non_prod_s3_url, source_instance_id),
'Updates Needed': AREF.format(
make_s3_url(bucket, out_of_date_key), 'Out of date packages'),
'Not installed on Prod': AREF.format(
make_s3_url(bucket, not_on_prod_key), 'Not installed on Prod'),
'Version Mismatch': AREF.format(
make_s3_url(bucket, mismatch_versions_key), 'Version Mismatch'),
}
def _make_html_report(table_report):
html_report = '''<html>
<head>
<title>Delta Inventory Report</title>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
</style>
</head>
<body>
<h1>Delta Inventory Report</h1>
<p>Inventory report on instances in the prod patch list which do have a
source instance and compare the list of installed packages on both
hosts for differences.
</p>'''
html_report += tabulate(table_report, tablefmt='html')
html_report += '''
<p>Note: Reasons for missing change list are the command failed to run
or the instance isn't managed by SSM.
</p>
</body>
</html>'''
return html_report
def inventory_hosts(event, context):
"""Inventory Hosts."""
bucket = event.get('s3_bucket', os.getenv('BUCKET'))
s3_key = event.get('s3_key', os.getenv('CSV_KEY'))
delta_date = event.get('delta_date', datetime.date.today().isoformat())
session = boto3.session.Session()
ec2_client = session.client('ec2')
s3_client = session.client('s3')
ssm_client = session.client('ssm')
instances = list(_get_instances(s3_client, ec2_client, ssm_client, bucket, s3_key))
logging.info(instances)
commands = send_runcommand(ssm_client, instances=instances, timeout=3600, bucket=bucket, delta_date=delta_date)
commandids = [c['CommandId'] for c in commands]
logging.info('Commands: %s', ', '.join(commandids))
queue_data = event.copy()
queue_data['action'] = 'inventory-wait'
queue_data['commands'] = commands
queue_data['wait-count'] = 0
push_to_queue(QUEUE_URL, queue_data)
return queue_data
def command_complete(event, context):
"""Check status of in progress patching."""
queue_data = event.copy()
client = boto3.client('ssm')
command_statuses = [
_bool_check_command_status(client, command, instance_id)
for command in event['commands']
for instance_id in command['InstanceIds']]
logging.info('Command Statuses(all false will start reporting): %s',
command_statuses)
# if not any of our commands are still pending
kwargs = {}
if not any(command_statuses):
queue_data['action'] = 'report-delta'
else:
queue_data['wait-count'] += 1
kwargs['DelaySeconds'] = 60
push_to_queue(QUEUE_URL, queue_data, **kwargs)
return queue_data
def make_delta(event, context):
"""Calculate delta from inventory lists."""
bucket = event.get('s3_bucket', os.getenv('BUCKET'))
s3_key = event.get('s3_key', os.getenv('CSV_KEY'))
logging.info('Retrieving patching csv from bucket %s', bucket)
session = boto3.session.Session()
s3_client = session.client('s3')
instances = _get_instance_pairs(s3_client, session.client('ec2'), session.client('ssm'), bucket, s3_key)
logging.info(instances)
table_report = [
_process_delta_pair(prod_instance_id, source_instance_id, bucket,
event['delta_date'], s3_client)
for prod_instance_id, source_instance_id in instances.items()]
html_report = _make_html_report(table_report)
report_key = 'patch-delta/delta-report-{}.html'.format(datetime.datetime.now().isoformat(timespec='seconds'))
put_file_to_s3(s3_client, html_report, bucket, report_key)
report_url = make_s3_url(bucket, report_key)
logging.info('Report URL: %s', report_url)
slack_config = SSMParamStoreKey(
os.environ.get('DELTA_SLACK_CONFIG')
).get(decrypt=True)['Parameter']['Value']
msg = {'text': 'Delta Inventory Report', 'color': '#439FE0', 'actions': [{
'type': 'button',
'text': 'Report',
'style': 'primary',
'url': report_url
}]}
SlackMessage(**msg).send(slack_config)
return True
def init_inventory_hosts(event, context):
"""Check for schedules which we need to run inventory for.
Pull schedules, check if any schedules are happening within a week that
we've not done inventory for.
"""
def check_s3_prefix(bucket, prefix):
client = boto3.client('s3')
logging.info('Checking for objects at s3://%s/%s', bucket, prefix)
response = client.list_objects_v2(Bucket=bucket, Prefix=prefix)
logging.info(response)
return bool(response.get('Contents'))
bucket = os.getenv('BUCKET')
ssm_key = os.environ.get('SCHEDULE_SSM_KEY')
ssm_schedule = SSMParamStoreKey(ssm_key)
ssm_schedules = ssm_schedule.get(decrypt=True)
logging.debug(ssm_schedules)
logging.info(ssm_schedules['Parameter']['Value'])
schedules = json.loads(ssm_schedules['Parameter']['Value'])
for schedule in schedules:
run_inventory = False
run_time = datetime.datetime.utcfromtimestamp(schedule['run_time'])
delta_date = run_time - datetime.timedelta(days=2)
logging.info(delta_date)
delta_date = delta_date.date()
logging.info(delta_date)
if 'delta-date' in schedule:
delta_date = datetime.date.fromisoformat(schedule['delta-date'])
logging.info('Found schedule, %s to be executed at %s',
schedule['patch_name'],
run_time.isoformat())
prefix_exists = check_s3_prefix(
bucket,
'patching-state/{}'.format(delta_date.isoformat()))
logging.debug('Today: %s; Week Before schedule: %s', datetime.datetime.utcnow(), delta_date)
logging.info(datetime.date.today())
logging.info(delta_date)
if (datetime.date.today() >= delta_date and
schedule['mode'] in ('prod', ) and
not prefix_exists):
run_inventory = True
if run_inventory:
s3_bucket, s3_key = schedule['patch_list'].split(':', 1)
queue_data = {
'action': 'inventory-hosts',
'delta_date': delta_date.isoformat(),
's3_key': s3_key,
's3_bucket': s3_bucket,
}
push_to_queue(QUEUE_URL, queue_data)
return queue_data
return True
def lambda_handler(event, context):
"""Lambda function entry point."""
configure_logging({'aws_request_id': context.aws_request_id})
main(event, context)
def main(event, context):
"""Main."""
logging.info('Using queue %s', QUEUE_URL)
logging.debug(event)
actions = {
'init-inventory-hosts': init_inventory_hosts,
'inventory-hosts': inventory_hosts,
'inventory-wait': command_complete,
'report-delta': make_delta,
}
if not event.get('Records'):
logging.error('No Records key.')
logging.error(event)
# We should have only one record per event, but sanity
for record in event.get('Records', []): # if we don't have
logging.debug(json_dumps(record))
data = json.loads(record['body'])
logging.info(data)
if data['action'] in actions:
return json_dumps(actions[data['action']](data, context))
raise Exception('Unknown action {}.'.format(data['action']))
if __name__ == '__main__':
configure_logging({'aws_request_id': "local"})
os.environ['AWS_DEFAULT_REGION'] = 'us-west-2'
os.environ['BUCKET'] = 'edwards-asadmin-patching-bucket-us-west-2'
os.environ['SCHEDULE_SSM_KEY'] = '/config/patching/schedule'
os.environ['SLACK_CONFIG'] = '/onica/slack/webhook'
resp = {'action': 'init-inventory-hosts'}
while resp != True:
resp = main({'Records': [{'body': json_dumps(resp)}]}, None)
time.sleep(10)
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scanner for the CloudSQL acls rules engine."""
import itertools
from google.cloud.security.common.util import log_util
from google.cloud.security.common.data_access import cloudsql_dao
from google.cloud.security.common.gcp_type.resource import ResourceType
from google.cloud.security.scanner.audit import cloudsql_rules_engine
from google.cloud.security.scanner.scanners import base_scanner
LOGGER = log_util.get_logger(__name__)
class CloudSqlAclScanner(base_scanner.BaseScanner):
"""Scanner for CloudSQL acls,"""
def __init__(self, global_configs, scanner_configs, snapshot_timestamp,
rules):
"""Initialization.
Args:
global_configs (dict): Global configurations.
scanner_configs (dict): Scanner configurations.
snapshot_timestamp (str): Timestamp, formatted as YYYYMMDDTHHMMSSZ.
rules (str): Fully-qualified path and filename of the rules file.
"""
super(CloudSqlAclScanner, self).__init__(
global_configs,
scanner_configs,
snapshot_timestamp,
rules)
self.rules_engine = cloudsql_rules_engine.CloudSqlRulesEngine(
rules_file_path=self.rules,
snapshot_timestamp=self.snapshot_timestamp)
self.rules_engine.build_rule_book(self.global_configs)
@staticmethod
def _flatten_violations(violations):
"""Flatten RuleViolations into a dict for each RuleViolation member.
Args:
violations (list): The RuleViolations to flatten.
Yields:
dict: Iterator of RuleViolations as a dict per member.
"""
for violation in violations:
violation_data = {}
violation_data['instance_name'] = violation.instance_name
violation_data['authorized_networks'] =\
violation.authorized_networks
violation_data['ssl_enabled'] = violation.ssl_enabled
yield {
'resource_id': violation.resource_id,
'resource_type': violation.resource_type,
'rule_index': violation.rule_index,
'rule_name': violation.rule_name,
'violation_type': violation.violation_type,
'violation_data': violation_data
}
def _output_results(self, all_violations):
"""Output results.
Args:
all_violations (list): A list of violations.
"""
all_violations = self._flatten_violations(all_violations)
self._output_results_to_db(all_violations)
def _find_violations(self, cloudsql_data):
"""Find violations in the policies.
Args:
cloudsql_data (list): CloudSQL data to find violations in
Returns:
list: A list of CloudSQL violations
"""
cloudsql_data = itertools.chain(*cloudsql_data)
all_violations = []
LOGGER.info('Finding CloudSQL acl violations...')
for (cloudsql, cloudsql_acl) in cloudsql_data:
LOGGER.debug('%s => %s', cloudsql, cloudsql_acl)
violations = self.rules_engine.find_policy_violations(
cloudsql_acl)
LOGGER.debug(violations)
all_violations.extend(violations)
return all_violations
@staticmethod
def _get_resource_count(project_policies, cloudsql_acls):
"""Get resource count for org and project policies.
Args:
project_policies (list): project_policies policies from inventory.
cloudsql_acls (list): CloudSql ACLs from inventory.
Returns:
dict: Resource count map
"""
resource_counts = {
ResourceType.PROJECT: len(project_policies),
ResourceType.CLOUDSQL_ACL: len(cloudsql_acls),
}
return resource_counts
def _get_cloudsql_acls(self):
"""Get CloudSQL acls from data source.
Returns:
list: List of CloudSql acls.
"""
cloudsql_acls = {}
cloudsql_acls = (cloudsql_dao
.CloudsqlDao(self.global_configs)
.get_cloudsql_acls('cloudsql_instances',
self.snapshot_timestamp))
return cloudsql_acls
def _retrieve(self):
"""Retrieves the data for scanner.
Returns:
list: CloudSQL ACL data.
"""
cloudsql_acls_data = []
project_policies = {}
cloudsql_acls = self._get_cloudsql_acls()
cloudsql_acls_data.append(cloudsql_acls.iteritems())
cloudsql_acls_data.append(project_policies.iteritems())
return cloudsql_acls_data
def run(self):
"""Runs the data collection."""
cloudsql_acls_data = self._retrieve()
all_violations = self._find_violations(cloudsql_acls_data)
self._output_results(all_violations)
|
# Given a m * n matrix mat of ones (representing soldiers) and zeros
# (representing civilians), return the indexes of the k weakest rows
# in the matrix ordered from the weakest to the strongest.
#
# A row i is weaker than row j, if the number of soldiers in row i is
# less than the number of soldiers in row j, or they have the same number
# of soldiers but i is less than j. Soldiers are always stand in the
# frontier of a row, that is, always ones may appear first and then zeros.
class Solution:
def kWeakestRows(self, mat, k):
arr = []
for ii, row in enumerate(mat):
arr.append([sum(row), ii])
return [couple[1] for couple in sorted(arr)[:k]]
if __name__ == "__main__":
testinput1 = [[1, 1, 0, 0, 0],
[1, 1, 1, 1, 0],
[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 1, 1, 1]]
testinput2 = 3
print(Solution.kWeakestRows(Solution, testinput1, testinput2))
|
#!/usr/bin/env python
import pyspark
import sys
from operator import add
from operator import itemgetter
import random
zone = ["TKO", "SKM", "QRC", "Tai Ko", "North Point", "Fanling", "Lam Tin", "HBT", "Tai Po"]
def get_district(zone):
if( zone == "TKO" or zone == "SKM" or zone == "Fanling" or zone == "Tai Po" ):
return "NT"
elif( zone == "QRC" or zone == "Tai Ko" or zone == "North Point"):
return "Island"
else:
return "Kowloon"
def gen_balance():
return random.randrange(1,12000)*100
def gen_address():
return zone[ random.randrange(len(zone)) ]
def balance_range(x):
if( x >= 1000000):
return "more than 1 million"
else:
return "less than 1 million"
#outputUri=sys.argv[1] + "textout.txt"
numcust = 10000
numcust = int(sys.argv[1])
sc = pyspark.SparkContext()
#sc.parallelize(xrange(0, 1000)).map(lambda x: (x,x+1) ).map(lambda (a,b): ( (a%3,b%4),1) ).reduceByKey(add).saveAsTextFile(outputUri)
result = sc.parallelize(xrange(0, numcust)).map(lambda x: (gen_balance() , gen_address() ) ).map(lambda (a,b): ( ( balance_range(a) , get_district(b) ),1) ).reduceByKey(add).collect()
print(sorted(result, key=itemgetter(1) ) )
|
#!/usr/bin/env python3
import os
from tempfile import gettempdir
from torchvision import datasets
from torchvision import transforms
from tqdm import tqdm
import numpy as np
import torch.nn as nn
import torch
from eval_history import EvalHistoryFile
from utils import freeze, new_classifier, new_processor, apply_transformations
print("this script does not take any command-line argument")
dim = 28 ** 2
latent_dim = 3
n_classes = 7
tr = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.15], std=[0.3]),
])
prefix = os.path.join(gettempdir(), "emnist_")
train_set1 = datasets.EMNIST(prefix+"training1", download=True, train=True, transform=tr, split="digits")
train_set2 = datasets.EMNIST(prefix+"training2", download=True, train=True, transform=tr, split="letters")
test_set = datasets.EMNIST(prefix+"testing", download=True, train=False, transform=tr, split="letters")
# train the classifier with a disposable processor
train_on_digits = set([2, 3, 4, 5]) # not symmetric
classifier = new_classifier(latent_dim, n_classes)
digit_processors = [new_processor(dim, latent_dim) for _ in range(10)]
parameters = list(classifier.parameters())
for digit, digit_processor in enumerate(digit_processors):
if digit in train_on_digits:
parameters += list(digit_processor.parameters())
optimizer = torch.optim.Adam(parameters)
loss_func = nn.CrossEntropyLoss()
transformations = list(range(n_classes))
progress_bar = tqdm(range(1), total=1, desc="step 1/3")
for epoch in progress_bar:
loader = torch.utils.data.DataLoader(train_set1, batch_size=1, shuffle=True)
for images, labels in loader:
digit = labels[0].item()
if digit not in train_on_digits:
continue
batch = images.expand(n_classes, -1, -1, -1)
transformed = apply_transformations(batch, transformations)
latent = digit_processors[digit](transformed.view(n_classes, dim))
y_pred = classifier(latent)
loss = loss_func(y_pred, torch.LongTensor(transformations))
optimizer.zero_grad()
loss.backward()
optimizer.step()
progress_bar.set_description("step 1/3 loss=%.3f" % loss.item())
freeze(classifier)
# prepare the testing data
n_letters = 26
test_images = []
for letter in tqdm(range(n_letters), total=n_letters, desc="step 2/3"):
loader = torch.utils.data.DataLoader(test_set, batch_size=4096, shuffle=True)
for images, labels in loader:
filtering = [i for i, l in enumerate(labels) if l.item() == letter+1]
images = images[filtering].clone()
test_images.append(images)
break
# continual learning loop
history = EvalHistoryFile("hist_emnist_class_il")
processors = []
progress_bar = tqdm(range(n_letters), total=n_letters, desc="step 3/3")
for letter in progress_bar:
# train a new processor for the current letter
processor = new_processor(dim, latent_dim)
processors.append(processor)
optimizer = torch.optim.Adam(processor.parameters())
loader = torch.utils.data.DataLoader(train_set2, batch_size=1, shuffle=True)
for images, labels in loader:
if labels[0].item() != letter+1:
continue
batch = images.expand(n_classes, -1, -1, -1)
transformed = apply_transformations(batch, transformations)
latent = processor(transformed.view(n_classes, dim))
y_pred = classifier(latent)
loss = loss_func(y_pred, torch.LongTensor(transformations))
optimizer.zero_grad()
loss.backward()
optimizer.step()
progress_bar.set_description("step 3/3 loss=%.3f" % loss.item())
# evaluation on all the letters seen so far
with torch.no_grad():
hits = 0
total_images = 0
for letter in range(len(processors)):
images = test_images[letter]
n_images = images.size(0)
votes = np.zeros((n_images, len(processors)))
for y_true in range(n_classes):
transformed = apply_transformations(images, [y_true] * n_images)
features = transformed.view(n_images, dim)
# make predictions with each processor
for candidate, processor in enumerate(processors):
pred = torch.log_softmax(classifier(processor(features)), dim=1)[:, y_true]
votes[:, candidate] += pred.numpy()
# choose processor with highest vote
best = votes.argmax(axis=1)
hits += (best == letter).sum()
total_images += n_images
acc = hits / total_images
history.log(value=acc, label="task %i" % len(processors))
|
Marks = {
"math" : 90,
"chemistry" : 100,
"physics" : 100,
"hindi" : 95,
"english" : 89
}
# minimum marks
print(min(Marks.values()))
# maximum Marks
print(max(Marks.values()))
# average marks
print(sum(Marks.values()) / len(Marks.values())) |
c = 0
for x in range(0, 5):
v = float(input())
if v % 2 == 0:
c += 1
print('{} valores pares'.format(c)) |
import numpy as np
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
import cv2
from PIL import Image
class ElasticTransform(object):
@classmethod
def generate(cls, image, alpha_factor, sigma_factor, random_state=None):
open_cv_image = np.array(image)
alpha = open_cv_image.shape[1] * alpha_factor
sigma = open_cv_image.shape[1] * sigma_factor
if random_state is None:
random_state = np.random.RandomState(None)
shape = open_cv_image.shape
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))
indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))
distorted_image = map_coordinates(open_cv_image, indices, order=1, mode='reflect')
distorted_image = distorted_image.reshape(open_cv_image.shape)
return Image.fromarray(distorted_image.astype('uint8'))
|
from types import SimpleNamespace
import boto3
from treadmill.infra import constants
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
resource = kwargs.get(
'resource',
(constants.EC2 if (len(args) == 0) else args[0])
)
instance_resources = [klass._service_model.service_name.lower()
for klass in list(cls._instances.values())]
if (resource.lower() not in instance_resources):
cls._instances[cls] = super(
Singleton, cls
).__call__(*args, **kwargs)
return cls._instances[cls]
class Connection(metaclass=Singleton):
session = boto3.session.Session()
context = SimpleNamespace(
region_name=session.region_name,
domain=None
)
def __init__(self, resource=constants.EC2):
pass
def __new__(cls, resource=constants.EC2):
return boto3.client(
resource, region_name=cls.context.region_name
)
|
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import selectivesearch
import cv2
def AlpacaDB(img):
"""
Generate bounding box and show images with them
:param img: image to process
:return: nothing
"""
img_lbl, regions = selectivesearch.selective_search(
img, scale=100, sigma=0.8, min_size=10)
candidates = set()
for r in regions:
# excluding same rectangle (with different segments)
if r['rect'] in candidates:
continue
# excluding rectangle with height = 0 or width = 0
if r['rect'][2] == 0 or r['rect'][3] == 0:
continue
candidates.add(r['rect'])
# draw rectangles on the original image
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
ax.imshow(img)
for x, y, w, h in candidates:
rect = mpatches.Rectangle(
(x, y), w, h, fill=False, edgecolor='red', linewidth=1)
ax.add_patch(rect)
plt.show()
def OpenCV(im):
"""
Generate bounding box and show images with them
:param im: image to process
:return: nothing
"""
# create Selective Search Segmentation Object using default parameters
ss = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation()
# set input image on which we will run segmentation
ss.setBaseImage(im)
ss.switchToSelectiveSearchQuality()
# run selective search segmentation on input image
rects = ss.process()
print('Total Number of Region Proposals: {}'.format(len(rects)))
# number of region proposals to show
numShowRects = 100
# create a copy of original image
imOut = im.copy()
# iterate over all the region proposals
for i, rect in enumerate(rects):
# draw rectangle for region proposal till numShowRects
if i < numShowRects:
x, y, w, h = rect
cv2.rectangle(imOut, (x, y), (x + w, y + h), (0, 255, 0), 1, cv2.LINE_AA)
else:
break
# show output
cv2.imshow("Output", imOut)
cv2.waitKey()
def main():
# AlpacaDB(skimage.data.load("C:/Dev/git/Logos-Recognition-for-Webshop-Services/logorec/resources/images/banner/other/stockLogoBannerDesignServices.jpg"))
OpenCV(cv2.imread("C:/Dev/git/Logos-Recognition-for-Webshop-Services/logorec/resources/images/banner/logos/pic_013.jpg"))
if __name__ == "__main__":
main() |
#!/usr/bin/python3
"""
Copyright (c) 2015, Joshua Saxe
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name 'Joshua Saxe' nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL JOSHUA SAXE BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
def extractstrings(fullpath):
"""
Execute linux `strings` on `fullpath` sample
Args:
fullpath: absolute path of malware sample
Raises:
None
Returns:
strings: unique `strings` output for sample
"""
# Run strings command and read in its output
strings = os.popen("strings '{0}'".format(fullpath)).read()
# Extract unique strings
strings = set(strings.split("\n"))
# Put attributes in dictionary with feature name as key
return strings
#def getprocessinfo(taskreportjson, processid):
# """
# """
#
# for processinfo in taskreportjson['behavior']['generic']:
# if int(processinfo['pid']) == int(processid):
# return processinfo['process_name'], processinfo['process_path'], \
# processinfo['first_seen']
#def getprocessapicalls(taskreportjson, processid):
# """
# """
#
# for pid in taskreportjson['behavior']['apistats']:
# if pid == processid:
# return list(taskreportjson['behavior']['apistats'][pid].keys())
#def extractapis(taskidslist, apicallsdict, taskreportjson, feature):
# """
# """
#
# attributes = {'apis': {}}
#
# for processid in apicallsdict:
# if processid not in attributes['apis']:
# attributes['apis'][processid] = {}
#
# # Get process name, path and first_seen time for `processid`
# attributes['apis'][processid]['name'], attributes['apis'][processid]['path'], \
# attributes['apis'][processid]['creation'] = getprocessinfo(taskreportjson, processid)
#
# # Get dynamic API calls for processid
# attributes['apis'][processid]['apicalls'] = getprocessapicalls(taskreportjson, processid)
#
# return attributes
#def getattributes(args, feature='strings'):
# """
# Returns the specified attribute of the malware sample
#
# Args:
# args: relevant parameters for feature extraction in a list
# feature: the attribute to extract
# Raises:
# Exception if unhandleable feature is requested
# Returns:
# dictionary containing keys as attribute name and
# values as attribute values
# """
#
# # Handler for extracting strings attribute
# if feature == 'strings':
# return extractstrings(args[0], 'strings')
# # Handler for extracting API information from cuckoo reports
# elif feature == 'apis':
# return extractapis(args[0], args[1], args[2], 'apis')
# # Attribute handler not available. Raise exception
# else:
# raise Exception("Attribute extraction currently not handleable...")
|
#All information
'''import boto3
client = boto3.client('ec2')
Myec2=client.describe_instances()
print(Myec2)
#Instances information
import boto3
client = boto3.client('ec2')
Myec2=client.describe_instances()
for pythonins in Myec2['Reservations']:
print(pythonins)
#Instance ID
import boto3
client = boto3.client('ec2')
Myec2=client.describe_instances()
for pythonins in Myec2['Reservations']:
for printout in pythonins['Instances']:
print(printout['InstanceId'])
#Instance ID,Instance Type
import boto3
client = boto3.client('ec2')
Myec2=client.describe_instances()
for pythonins in Myec2['Reservations']:
for printout in pythonins['Instances']:
print(printout['InstanceId'])
print(printout['InstanceType'])
#Instance ID,Instance Type,Instance State
import boto3
client = boto3.client('ec2')
Myec2=client.describe_instances()
for pythonins in Myec2['Reservations']:
for printout in pythonins['Instances']:
print(printout['InstanceId'])
print(printout['InstanceType'])
print(printout['State']['Name'])'''
#Instance ID,Instance Type,Instance State,Instance Name
import boto3
client = boto3.client('ec2')
Myec2=client.describe_instances()
for pythonins in Myec2['Reservations']:
for printout in pythonins['Instances']:
for printname in printout['Tags']:
print(printout['InstanceId'])
print(printout['InstanceType'])
print(printout['State']['Name'])
print(printname['Value'])
|
from .BaseBrush import BaseBrush
from .controls.BooleanControl import BooleanControl
from bsp.leveleditor import LEUtils
from panda3d.core import Point3
class TetrahedronBrush(BaseBrush):
Name = "Tetrahedron"
def __init__(self):
BaseBrush.__init__(self)
self.useCentroid = self.addControl(BooleanControl(self, "Top vertex at centroid"))
def create(self, generator, mins, maxs, material, roundDecimals, temp = False):
useCentroid = self.useCentroid.getValue()
center = (mins + maxs) / 2
# The lower Z plane will be the triangle, with the lower Y value getting the two corners
c1 = LEUtils.roundVector(Point3(mins.x, mins.y, mins.z), roundDecimals)
c2 = LEUtils.roundVector(Point3(maxs.x, mins.y, mins.z), roundDecimals)
c3 = LEUtils.roundVector(Point3(center.x, maxs.y, mins.z), roundDecimals)
if useCentroid:
c4 = Point3((c1.x + c2.x + c3.x) / 3,
(c1.y + c2.y + c3.y) / 3,
maxs.z)
else:
c4 = LEUtils.roundVector(Point3(center.x, center.y, maxs.z), roundDecimals)
faces = [
[ c1, c2, c3 ],
[ c4, c1, c3 ],
[ c4, c3, c2 ],
[ c4, c2, c1 ]
]
return [self.makeSolid(generator, faces, material, temp)]
|
import json,os,time,pickle,time
import numpy as np
import scipy.io.wavfile as wavfile
import librosa
from PIL import Image
MEL_N = 40
WAV_T = (32*2) #2s
def compute_log_mel_fbank_fromsig(signal, sample_rate,n=80):
MEL_N = n
# 3.分帧
frame_size, frame_stride = 0.032, 0.032
frame_length, frame_step = int(round(frame_size * sample_rate)), int(round(frame_stride * sample_rate))
signal_length = len(signal)
num_frames = int(np.ceil(np.abs(signal_length - frame_length) / frame_step)) + 1
pad_signal_length = (num_frames - 1) * frame_step + frame_length
z = np.zeros((pad_signal_length - signal_length))
pad_signal = np.append(signal, z)
indices = np.arange(0, frame_length).reshape(1, -1) + np.arange(0, num_frames * frame_step, frame_step).reshape(-1,1)
frames = pad_signal[indices]
#2.预增强
pre_emphasis = 0.97
for i in range(frames.shape[0]):
frames[i] = np.append(frames[i][0], frames[i][1:] - pre_emphasis * frames[i][:-1])
# 4.加窗
# hamming = np.hamming(frame_length)
# frames *= hamming
# 5.N点快速傅里叶变换(N-FFT)
NFFT = 512
mag_frames = np.absolute(np.fft.rfft(frames, NFFT))
pow_frames = ((mag_frames ** 2))
# 6.提取mel Fbank
low_freq_mel = 0
high_freq_mel = 2595 * np.log10(1 + (sample_rate / 2) / 700)
n_filter = MEL_N
mel_points = np.linspace(low_freq_mel, high_freq_mel, n_filter + 2)
hz_points = 700 * (10 ** (mel_points / 2595) - 1)
fbank = np.zeros((n_filter, int(NFFT / 2 + 1)))
bin = (hz_points / (sample_rate / 2)) * (NFFT / 2)
for i in range(1, n_filter + 1):
left = int(bin[i - 1])
center = int(bin[i])
right = int(bin[i + 1])
for j in range(left, center):
fbank[i - 1, j + 1] = (j + 1 - bin[i - 1]) / (bin[i] - bin[i - 1])
for j in range(center, right):
fbank[i - 1, j + 1] = (bin[i + 1] - (j + 1)) / (bin[i + 1] - bin[i])
# 7.提取log mel Fbank
filter_banks = np.dot(pow_frames, fbank.T)
filter_banks = np.where(filter_banks == 0, np.finfo(float).eps, filter_banks)
filter_banks = 1 * np.log(filter_banks) -3 # dB
filter_banks[np.where(filter_banks<0)]=0
filter_banks=filter_banks*10.0
filter_banks = filter_banks.astype(np.uint8)
return filter_banks
def compute_log_mel_fbank(wav_file,n=80):
"""
计算音频文件的fbank特征
:param wav_file: 音频文件
"""
if 'wav' in wav_file:
sample_rate, signal = wavfile.read(wav_file)
# print(signal)
elif 'flac' in wav_file:
signal, sample_rate = librosa.load(wav_file, sr=None)
signal = signal * 32768
signal = signal.astype(np.int16)
return compute_log_mel_fbank_fromsig(signal, sample_rate, n=n)
def wav2pic(wav_file):
sample_rate, signal = wavfile.read(wav_file)
out = compute_log_mel_fbank_fromsig(signal, sample_rate,n=MEL_N)
#print(out.shape)
if out.shape[0] < WAV_T:
data = np.zeros((WAV_T, MEL_N)).astype(np.uint8)
data[:out.shape[0]] = out
else:
data = out[:WAV_T]
data = Image.fromarray(data)
data.save("%s.jpg"%(wav_file[:-4]))
def pic2array(pic_file):
img = Image.open(pic_file)
img = np.array(img)
fw = open("%s.c"%pic_file[:-4], "w")
fw.writelines("const unsigned char mel_buf[%d*%d*1]={\\\n" % (img.shape[0], img.shape[1]))
for y in range(img.shape[0]):
for x in range(img.shape[1]):
for c in range(1):
fw.writelines("%3d," % (img[y, x]))
fw.writelines(" ")
fw.writelines("\n")
fw.writelines("};\n")
fw.close()
def print_usage():
print("Usage: python3 wav2array.py xxx.wav")
#python wav2array.py xxx.wav
import sys
if __name__ == "__main__":
if len(sys.argv) != 2:
print_usage()
exit()
wav_file = sys.argv[1]
wav2pic(wav_file)
pic2array(wav_file[:-4]+'.jpg')
|
# Generated by Django 3.0.3 on 2020-07-29 05:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('Device', '0003_auto_20200727_1225'),
]
operations = [
migrations.CreateModel(
name='LiveData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('speed', models.CharField(default='Speed', max_length=200)),
('logitude', models.CharField(default='Longitude', max_length=200)),
('latitude', models.CharField(default='Latitude', max_length=200)),
('live_image', models.ImageField(blank=True, upload_to='live_image')),
('device', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='device', to='Device.Device')),
],
),
]
|
from styx_msgs.msg import TrafficLight
from sim_model import SimModel
from real_model import RealModel
import os
import cv2
import time
class TLClassifier(object):
def __init__(self, scenario):
#if scenario == "sim":
#self.model = SimModel()
#else:
self.model = RealModel("light_classification/models/tl_model")
def get_classification(self, image):
"""
Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light, BGR channel
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#TODO implement light color prediction
cv2.imwrite('/home/student/sim_images/'+str(time.time()) + '_' + '.png',image)
rospy.loginfo(' ======= get_classification cv_image ' )
return self.model.predict(image)
|
import os
import config
import zipfile
from kaggle.api.kaggle_api_extended import KaggleApi
api = KaggleApi()
api.authenticate()
print('Downloading data ...')
api.competition_download_files('lish-moa',path=os.path.join(config.ROOT_DIR,'input'))
with zipfile.ZipFile(os.path.join(config.ROOT_DIR,'input','lish-moa.zip'), 'r') as zip_ref:
zip_ref.extractall(os.path.join(config.ROOT_DIR,'input','raw'))
print('Finished data downloading')
# # remove the zip after unziping
# os.remove(os.path.join(config.ROOT_DIR,'input','lish-moa.zip'))
# # Single file download
# api.competition_download_file(
# 'lish-moa',
# 'train_targets_scored.csv',
# path=os.path.join(config.ROOT_DIR,'input'))
# # console line to download data
# kaggle competitions download -c lish-moa |
number = int(input('Please type a number:'))
if number % 2 == 0:
print('{} is even number'.format(number))
else:
print('{} is a odd number'.format(number))
|
# gone/typesys.py
'''
Gone Type System
================
This file implements basic features of the Gone type system. There is
a lot of flexibility possible here, but the best strategy might be to
not overthink the problem. At least not at first. Here are the
minimal basic requirements:
1. Types have names (e.g., 'int', 'float', 'string')
2. Types have to be comparable. (e.g., int != float).
3. Types support different operators (e.g., +, -, *, /, etc.)
To deal with all this initially, I'd recommend representing types
as simple strings. Make tables that represent the capabilities
of different types. Make some utility functions that check operators.
KEEP IT SIMPLE. REPEAT. SIMPLE.
'''
ARITHM_BIN_OPS = ["+", "-", "*", "/"]
ARITHM_UNARY_OPS = ["+", "-"]
REL_BIN_OPS = ["<", "<=", ">", ">=", "==", "!="]
BOOL_BIN_OPS = ["&&", "||", "==", "!="]
BOOL_UNARY_OPS = ["!"]
class Type():
"""Base class for our type system"""
@classmethod
def binop_type(cls, op, right_type):
"""Returns the type of applying the binary operator with the current
type and the type of the right operand, or returns None if the
operation is not valid"""
return None
@classmethod
def unaryop_type(cls, op):
"""Returns the type of applying the unary operator to the current type"""
return None
@classmethod
def get_by_name(cls, type_name):
for type_cls in cls.__subclasses__():
if type_cls.name == type_name:
return type_cls
return None
class FloatType(Type):
name = "float"
@classmethod
def binop_type(cls, op, right_type):
if issubclass(right_type, FloatType):
if op in ARITHM_BIN_OPS:
return FloatType
elif op in REL_BIN_OPS:
return BoolType
return None
@classmethod
def unaryop_type(cls, op):
if op in ARITHM_UNARY_OPS:
return FloatType
return None
class IntType(Type):
name = "int"
@classmethod
def binop_type(cls, op, right_type):
if issubclass(right_type, IntType):
if op in ARITHM_BIN_OPS:
return IntType
elif op in REL_BIN_OPS:
return BoolType
return None
@classmethod
def unaryop_type(cls, op):
if op in ARITHM_UNARY_OPS:
return IntType
return None
class CharType(Type):
name = "char"
@classmethod
def binop_type(cls, op, right_type):
if issubclass(right_type, CharType):
if op in REL_BIN_OPS:
return BoolType
return None
class BoolType(Type):
name = "bool"
@classmethod
def binop_type(cls, op, right_type):
if issubclass(right_type, BoolType) and op in BOOL_BIN_OPS:
return BoolType
return None
@classmethod
def unaryop_type(cls, op):
if op in BOOL_UNARY_OPS:
return BoolType
return None
|
#!/usr/bin/env python3
"""Show the current job queue.
"""
import qmk_redis
print('*** There are %s jobs on the queue.' % (len(qmk_redis.rq.jobs)))
for i, job in enumerate(qmk_redis.rq.jobs):
print()
if job.func_name == 'qmk_compiler.compile_firmware':
args = ', '.join([str(arg) for arg in job.args[:3]])
else:
args = ', '.join([str(arg) for arg in job.args])
kwargs = ', '.join(['%s=%s' % (key, value) for key, value in job.kwargs.items()])
if args and not kwargs:
full_args = args
elif kwargs and not args:
full_args = kwargs
else:
full_args = ', '.join([args, kwargs])
print('\t%s: %s' % (i, job.id))
print('\t %s(%s)' % (job.func_name, full_args))
|
import math
import gym
from gym import spaces, logger
from random import seed
from random import randint
from PIL import Image
from gym.utils import seeding
import numpy as np
from numpy import asarray
import cv2
def init_map(size, num_obs, border_size):
# Drawing a empty map
global_map = np.ones((size, size, 1), np.uint8) # Defining size of map
global_map.fill(0) # Drawing it white
cv2.rectangle(global_map, (border_size, border_size), (size - border_size, size - border_size), 255,
-1) # Setting world boundary
# Filling the map with obstacles
num_obstacles = randint(0, num_obs)
for obstacles in range(num_obstacles + 1):
obs_selected = randint(0, 1) # We randomly select between two obstacle types
obstacle = generate_obs(obs_selected) # We get a random obstacle position and type
if obs_selected == 0:
cv2.rectangle(global_map, obstacle[0], obstacle[1], 0, -1)
else:
cv2.circle(global_map, (obstacle[0], obstacle[1]), obstacle[2], 0, -1)
return global_map
def generate_obs(selection):
obs_pos_x = randint(0, 600)
obs_pox_y = randint(0, 600)
obstacle_list = {0: ((obs_pos_x - 30, obs_pox_y - 30), (obs_pos_x + 30, obs_pox_y + 30)),
1: (obs_pos_x, obs_pox_y, 20)}
obstacle = obstacle_list[selection]
return obstacle
def select_agent_pos(env, border_size):
row_size, col_size, _ = env.shape
possible_spawn_spot = []
while len(possible_spawn_spot) < 1:
pos_x = randint(border_size, col_size-border_size)
pos_y = randint(border_size, row_size-border_size)
test_spot = env[pos_y - 3:pos_y + 4, pos_x - 3:pos_x + 4] # We check a 7x7 pixel patch around the agent. MAYBE CORRECT??
test_spot_array = asarray(test_spot) # We convert the patch to a array
if test_spot_array.sum() == 12495:
possible_spawn_spot.append([pos_x, pos_y])
return possible_spawn_spot
class WorldEnv(gym.Env):
def __init__(self):
# RESET PARAMETERS
self.agent_step = 0
self.maxstep = 1000
# MAP PARAMETERS
self.GLOBAL_MAP_SIZE = 750
self.NUM_OBS = 15
self.BORDER_SIZE = 50
self.global_map = np.ones((self.GLOBAL_MAP_SIZE, self.GLOBAL_MAP_SIZE, 1), np.uint8)
self.slam_map = self.global_map.copy()
# AGENT PARAMETERS
self.agent_pos_x = 0
self.agent_pos_y = 0
self.agent_size = 5
self.agent_color = 100
self.agent_range = 25
self.agent_step_size = 5
# --- OBSERVATION AND ACTION SPACE ---
# Definition of observation space. We input pixel values between 0 - 255
self.observation_space = np.array(self.slam_map)
# Definition of action space.
self.action_space = spaces.Discrete(8)
self.action_dic = {0: (-self.agent_step_size, 0),
1: (self.agent_step_size, 0),
2: (0, -self.agent_step_size),
3: (0, self.agent_step_size),
4: (self.agent_step_size, self.agent_step_size),
5: (-self.agent_step_size, -self.agent_step_size),
6: (self.agent_step_size, -self.agent_step_size),
7: (-self.agent_step_size, self.agent_step_size)}
def reset(self):
# We reset the step
self.agent_step = 0
# We collect the generated map
self.global_map = init_map(self.GLOBAL_MAP_SIZE, self.NUM_OBS, self.BORDER_SIZE)
# SLAM MAP creation
self.slam_map = self.global_map.copy()
self.slam_map.fill(150)
# We get a collection of possible spawn-points
possible_spawn_points = select_agent_pos(self.global_map, self.BORDER_SIZE)
# We draw a random spawn-point and draw it on the map
self.agent_pos_x, self.agent_pos_y = possible_spawn_points[0]
pos_x = self.agent_pos_x
pos_y = self.agent_pos_y
# StartY:EndY, StartX:EndX. Initial visible area for the SLAM map
crop_img = self.global_map[pos_y - self.agent_range:pos_y + self.agent_range,
pos_x - self.agent_range:pos_x + self.agent_range]
# We add the initial visible area to the slam map
self.slam_map[pos_y - self.agent_range:pos_y + self.agent_range,
pos_x - self.agent_range:pos_x + self.agent_range] = crop_img
# We add the agent to the global map
cv2.circle(self.global_map, (self.agent_pos_x, self.agent_pos_y), self.agent_size, self.agent_color, -1)
cv2.circle(self.slam_map, (self.agent_pos_x, self.agent_pos_y), self.agent_size, self.agent_color, -1)
return self.slam_map
def step(self, action):
# --- Step related variables ---
collision = False
done = False
reward = 0
self.agent_step += 1
if self.agent_step == self.maxstep: # If the agent has taken a certain number of steps we reset
done = True
# For removal of the previous position on the global map
pre_agent_x = self.agent_pos_x
pre_agent_y = self.agent_pos_y
cv2.circle(self.global_map, (pre_agent_x, pre_agent_y), self.agent_size, 255, -1) # Remove previous global position
cv2.circle(self.slam_map, (pre_agent_x, pre_agent_y), self.agent_size, 255, -1) # Remove previous slam position
old_slam_map = self.slam_map.copy()
# --- Defining movement ---
move_x, move_y = self.action_dic[action]
self.agent_pos_x += move_x
self.agent_pos_y += move_y
# --- Updating position ---
# Adding new area to SLAM map
pos_x = self.agent_pos_x
pos_y = self.agent_pos_y
# Checking collision
test_spot = self.global_map[pos_y - 3:pos_y + 4,
pos_x - 3:pos_x + 4] # We check a 7x7 pixel patch around the agent. MAYBE CORRECT??
test_spot_array = asarray(test_spot) # We convert the patch to a array
if test_spot_array.sum() != 12495:
collision = True
done = True
# New visible area for the SLAM map
crop_img = self.global_map[pos_y - self.agent_range:pos_y + self.agent_range,
pos_x - self.agent_range:pos_x + self.agent_range]
# We add the new visible area to the slam map
self.slam_map[pos_y - self.agent_range:pos_y + self.agent_range,
pos_x - self.agent_range:pos_x + self.agent_range] = crop_img
# Checking difference
diff = cv2.absdiff(old_slam_map, self.slam_map)
_, thresh = cv2.threshold(diff, 50, 255, cv2.THRESH_BINARY)
crop_img = thresh[pos_y - self.agent_range:pos_y + self.agent_range,
pos_x - self.agent_range:pos_x + self.agent_range]
diff = asarray(crop_img)
cv2.imshow("diff", crop_img)
# We add the new position of the agent to the global and slam map
cv2.circle(self.global_map, (self.agent_pos_x, self.agent_pos_y), self.agent_size, self.agent_color, -1)
cv2.circle(self.slam_map, (self.agent_pos_x, self.agent_pos_y), self.agent_size, self.agent_color, -1)
# Defining reward
if collision:
reward -= 100
else:
num = diff.sum()/63750
if num <= 0:
reward -= 1
else:
reward += round(num, 2)
return self.slam_map, done, reward
def render(self):
cv2.imshow("Global Map", self.global_map)
cv2.imshow("SLAM Map", self.slam_map)
def close(self):
cv2.destroyAllWindows()
quit()
"""
world = WorldEnv()
done = False
for i in range(100):
done = False
state = world.reset()
world.render()
while not done:
num = randint(0, 7)
_, done, reward = world.step(num)
print(reward)
world.render()
cv2.waitKey(500)
"""
|
import tensorflow as tf
class PearsonCorrelationCoefficient(object):
def __init__(self, sess=tf.Session()):
"""
Compute the pairwise Pearson Correlation Coefficient (https://bit.ly/2ipHb9y)
using TensorFlow (http://www.tensorflow.org) framework.
:param sess a Tensorflow session
:usage
>>> import numpy as np
>>> x = np.array([[1,2,3,4,5,6], [5,6,7,8,9,9]]).T
>>> pcc = PearsonCorrelationCoefficient()
>>> pcc.compute_score(x)
"""
self.x_ph = tf.placeholder(tf.float32, shape=(None, None))
x_mean, x_var = tf.nn.moments(self.x_ph, axes=0)
x_op = self.x_ph - x_mean
self.w_a = tf.placeholder(tf.int32)
self.h_b = tf.placeholder(tf.int32)
self.w_b = tf.placeholder(tf.int32)
x_sd = tf.sqrt(x_var)
self.x_sds = tf.reshape(tf.einsum('i,k->ik', x_sd, x_sd), shape=(-1,))
c = tf.einsum('ij,ik->ikj', x_op, x_op)
c = tf.reshape(c, shape=tf.stack([self.h_b, self.w_a * self.w_b]))
self.op = tf.reshape(tf.reduce_mean(c, axis=0) / self.x_sds, shape=tf.stack([self.w_a, self.w_b]))
self.sess = sess
def compute_score(self, x):
"""
Compute the Pearson Correlation Coefficient of the x matrix. It is equivalent to `numpy.corrcoef(x.T)`
:param x: a numpy matrix containing a variable per column
:return: Pairwise Pearson Correlation Coefficient of the x matrix.
"""
if len(x.shape) == 1:
x = x.reshape((-1, 1))
assert len(x.shape) == 2 and x.shape[1] > 0
self.sess.run(tf.global_variables_initializer())
return self.sess.run(self.op, feed_dict={self.x_ph: x, self.h_b: x.shape[0],
self.w_a: x.shape[1], self.w_b: x.shape[1]})
import numpy as np
x = np.array([[1, 2, 3, 4, 5, 6], [5, 6, 7, 8, 9, 9],[5, 6, 7, 8, 9, 9]]).T
print(x)
pcc = PearsonCorrelationCoefficient()
print(pcc.compute_score(x)) |
import pyautogui
import cv2
import pytesseract
import os
import time
import numpy as np
from utils.image import image_resize
import PIL.ImageGrab
# The order of the colors is blue, green, red
lower_color = np.array([124, 132, 80])
upper_color = np.array([182, 255, 120])
# take a screenshot of the screen and store it in memory, then
# convert the PIL/Pillow image to an OpenCV compatible NumPy array
# and finally write the image to disk
def fit_text(text):
text = text.replace("S", "8")
text = text.replace("s", "6")
text = text.replace("!", "1")
text = text.replace("I", "1")
text = text.replace("i", "1")
text = text.replace("t", "1")
text = text.replace("(", "1")
text = text.replace("{", "1")
text = text.replace("f", "1")
text = text.replace("|", "1")
text = text.replace("[", "1")
text = text.replace("l", "1")
text = text.replace("Q", "2")
text = text.replace("B", "8")
text = text.replace("O", "0")
text = text.replace("0", "0")
text = text.replace("+ =", "=")
text = text.replace("<=", "=")
text = text.replace("~=", "=")
text = text.replace("<", "=")
#text = text.replace("=", "")
text = text.replace("x", "*")
text = text.replace(".", "")
text = text.replace(" ", "")
text = text.replace("\r", "")
text = text.replace("\n", "")
text = text.strip()
return text
counter = 0
last_line = ""
while True:
try:
start = time.time()
image = pyautogui.screenshot()
#image = PIL.ImageGrab.grab()
print("Screenshot", time.time() - start)
start = time.time()
image = np.array(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
(thresh, blackAndWhiteImage) = cv2.threshold(image, 240, 255, cv2.THRESH_BINARY)
image = 255 - blackAndWhiteImage
y = 755
x = 2750
h = 130
w = 500
question_img = image[y:y + h, x:x + w]
y = 903
x = 2850
answer_img = image[y:y + h, x:x + w]
image = image_resize(cv2.hconcat([question_img, answer_img]), height=25)
print("Convert", time.time() - start)
#cv2.imwrite(str(counter)+"_screen.png", image)
counter = counter + 1
start = time.time()
line_ocr = pytesseract.image_to_string(image, config='--psm 10 --oem 3')
print("OCR", time.time() - start)
start = time.time()
line = fit_text(line_ocr)
line = line.replace("=", "==")
result = str(eval(line))
print("EVAL", time.time() - start)
print(line + " : " + result)
if not line == last_line:
last_line = line
print("click")
if result == "True":
pyautogui.click(x=1422, y=832)
else:
pyautogui.click(x=1581, y=832)
time.sleep(0.05)
except Exception as exc:
pyautogui.click(x=1581, y=832)
time.sleep(0.7)
print(exc)
print("------------------")
print(line_ocr)
print(line)
print("------------------")
|
import tkinter as tk
from tkinter.messagebox import showinfo
import tkinter.filedialog as filedialog
import os
fileName = ''
def author():
showinfo('作者信息', '该Note是由Yanbin完成')
def copyRight():
showinfo('版权信息', '该Note归属于Yanbin\n个人博客:blog.luoyanbin.cn')
def open_file():
global fileName
fileName = filedialog.askopenfilename(defaultextension='.txt')
if fileName == '':
fileName = None
else:
root.title('File Name: ' + os.path.basename(fileName))
textPad.delete(1.0, 'end')
f = open(fileName, 'r')
textPad.insert(1.0, f.read())
f.close()
def new_file():
global fileName
root.title('未命名文件')
fileName = None
textPad.delete(1.0, 'end')
def save():
global fileName
try:
f = open(fileName, 'w')
text2write = textPad.get(1.0, 'end')
f.write(text2write)
f.close()
except:
save_as()
def save_as():
f = filedialog.asksaveasfilename(initialfile='未命名.txt', defaultextension='.txt')
global fileName
fileName = f
fw = open(f, 'w')
text2write = textPad.get(1.0, 'end')
fw.write(text2write)
fw.close()
root.title('File Name: ' + os.path.basename(fileName))
def cut():
textPad.event_generate('<<Cut>>')
def copy():
textPad.event_generate('<<Copy>>')
def paste():
textPad.event_generate('<<Paste>>')
def redo():
textPad.event_generate('<<Redo>>')
def undo():
textPad.event_generate('<<Undo>>')
def select_all():
textPad.tag_add('sel', '1.0', 'end')
def search():
def execute_search():
target = entry1.get()
start = '1.0'
while True:
pos = textPad.search(target, start, 'end')
if not pos:
break
textPad.tag_add('selection', pos)
start = pos + '+1c'
textPad.tag_config('selection', background='yellow', foreground='green')
topSearch.destroy()
topSearch = tk.Toplevel(root)
topSearch.geometry('300x30+200+250')
label1 = tk.Label(topSearch, text='Find')
label1.grid(row=0, column=0, padx=5)
entry1 = tk.Entry(topSearch, width=24)
entry1.grid(row=0, column=1, padx=5)
button1 = tk.Button(topSearch, text='查找', command=execute_search)
button1.grid(row=0, column=2)
root = tk.Tk()
root.title('Little Note')
root.geometry("500x500+100+100")
# Create Menu
menuBar = tk.Menu(root)
root.config(menu=menuBar)
fileMenu = tk.Menu(menuBar)
fileMenu.add_command(label='新建', accelerator='Ctrl + N', command=new_file)
fileMenu.add_command(label='打开', accelerator='Ctrl + O', command=open_file)
fileMenu.add_command(label='保存', accelerator='Ctrl + S', command=save)
fileMenu.add_command(label='另存为', accelerator='Ctrl + Shift + N', command=save_as)
menuBar.add_cascade(label='文件', menu=fileMenu)
editMenu = tk.Menu(menuBar)
editMenu.add_command(label='撤销', accelerator='Ctrl + Z', command=undo)
editMenu.add_command(label='重做', accelerator='Ctrl + y', command=redo)
editMenu.add_separator()
editMenu.add_command(label='剪切', accelerator='Ctrl + X', command=cut)
editMenu.add_command(label='复制', accelerator='Ctrl + C', command=copy)
editMenu.add_command(label='粘贴', accelerator='Ctrl + V', command=paste)
editMenu.add_separator()
editMenu.add_command(label='查找', accelerator='Ctrl + F', command=search)
editMenu.add_command(label='全选', accelerator='Ctrl + A', command=select_all)
menuBar.add_cascade(label='编辑', menu=editMenu)
aboutMenu = tk.Menu(menuBar)
aboutMenu.add_command(label='作者', command=author)
aboutMenu.add_command(label='版权', command=copyRight)
menuBar.add_cascade(label='关于', menu=aboutMenu)
# Create toolBar
toolBar = tk.Frame(root, height=25, bg='light sea green')
shortButton = tk.Button(toolBar, text='打开', command=open_file)
shortButton.pack(side='left', padx=5, pady=5)
shortButton = tk.Button(toolBar, text='保存', command=save)
shortButton.pack(side='left')
toolBar.pack(side='top', fill='x', expand='no')
# Create Status Bar
status = tk.Label(root, text='Ln20', bd=1, relief='sunken', anchor='w')
status.pack(side='bottom', fill='x')
# Create Line Number & Text
lineLable = tk.Label(root, width=2, bg='antique white')
lineLable.pack(side='left', fill='y')
textPad = tk.Text(root, undo=True)
textPad.pack(expand='yes', fill='both')
scroll = tk.Scrollbar(textPad)
textPad.config(yscrollcommand=scroll.set)
scroll.config(command=textPad.yview)
scroll.pack(side='right', fill='y')
root.mainloop()
|
import os
import sys
PROJECT_ROOT = os.path.dirname(__file__)
sys.path.insert(0, PROJECT_ROOT)
sys.path.insert(0, os.path.join(PROJECT_ROOT, '..'))
from django.core.handlers.wsgi import WSGIHandler
os.environ['DJANGO_SETTINGS_MODULE'] = 'mosbius.settings'
application = WSGIHandler()
|
# Generated by Django 2.0.5 on 2018-06-19 08:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calculation', '0026_auto_20180618_1214'),
]
operations = [
migrations.AlterUniqueTogether(
name='contractor',
unique_together={('name',)},
),
migrations.AlterUniqueTogether(
name='dish',
unique_together={('name',)},
),
migrations.AlterUniqueTogether(
name='invoice',
unique_together={('number', 'created_at')},
),
migrations.AlterUniqueTogether(
name='map',
unique_together={('name',)},
),
migrations.AlterUniqueTogether(
name='people',
unique_together={('last_name', 'first_name', 'last_name')},
),
migrations.AlterUniqueTogether(
name='product',
unique_together={('name',)},
),
]
|
from sklearn.datasets import *
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
def average(x):
""" x is a collection of numbers """
return sum(x) / len(x)
def standardize(x):
""" we center the data around mean 0, std dev 1 """
a = math.sqrt(b ** 2 + c ** 2)
a = a / a
b = b / a
c = c / a
return a, b, c
def svd(x):
return
def eigenvalues(distances):
eigenvalues = sum(map(lambda x: x ** 2, distances))
return eigenvalues
def pc_variance(eigenvalues, n_samples):
pc_variance = eigenvalues / (n_samples - 1)
return pc_variance
def total_variance(pc_variances):
return sum(pc_variances)
def scree_plot(data, distances):
""" plots the variance contribution that each principal component accounts for
in terms of the total amount of variance """
for i in distances:
ev = eigenvalues(i)
pc_var_contribution = pc_variance(ev) / total_variance(pc_variances)
plt.plot(pc_var_contribution)
plt.show()
# load data
data = load_iris()
df = pd.DataFrame(
np.c_[data["data"], data["target"]], columns=data["feature_names"] + ["target"]
)
feature_columns = df.columns[df.columns != "target"]
df = df[feature_columns]
# prep pca
ss = StandardScaler()
scaled_data = ss.fit_transform(df)
print(type(scaled_data))
print(scaled_data)
# max allowed PCs is the minimum between num_features and num_obs
pca = PCA(n_components=min(df.shape[0], df.shape[1]))
# calculate loading scores and the variation each PC accounts for
pca.fit(scaled_data)
# generate coordinates for a PCA graph based on the loading scores and the scaled data
# loading scores * scaled data
pca_data = pca.transform(scaled_data)
# before we do a scree plot, we calculate the percentage of variation that each principal component accounts for
pct_variation = np.round(pca.explained_variance_ratio_ * 100, decimals=1)
# prepare labels for the PCA graph
labels = ["PC{}".format(x) for x in range(1, len(pct_variation) + 1)]
# run scree plot to see how many principal components should go into the final plot
# select the PCs that describe the most amount of variation in the data
fig, ax = plt.subplots()
ax.bar(x=range(1, len(pct_variation) + 1), height=pct_variation, tick_label=labels)
ax.set(xlabel="Principal Components", ylabel="% Variation", title="Scree Plot of PCA")
plt.show()
# from the PCA plot, we can use the information we learned from the scree plot
# first we put the new coordinates, created by the pca.transform(scaled_data) operation, into a nice matrix
# where the rows have sample labels and the solumns have the PCA labels
pca_df = pd.DataFrame(pca_data, columns=labels)
print(pca_df.head())
# plot using the PCA dataframe
plt.scatter(pca_df.PC1, pca_df.PC1)
plt.title("My PCA Graph")
plt.xlabel("PC1 - {}%".format(pct_variation[0]))
plt.ylabel("PC1 - {}%".format(pct_variation[1]))
# this loop allows us to annotate (put) the sample names to the graph
for sample in pca_df.index:
plt.annotate(sample, (pca_df["PC1"].loc[sample], pca_df["PC2"].loc[sample]))
plt.show()
# Now let's take a look at the loading scores for PC1 to see which features have the largest influence on separating the clusters along the X-axis
# principal components are 0-indexed, so PC1 is at index 0.
loading_scores = pd.Series(pca.components_[0], index=feature_columns)
# sort the loading scores based on their magnitude of influence (absolute value, as some of the loading scores can have a negative value)
sorted_loading_scores = loading_scores.abs().sort_values(ascending=False)
# get top features as a mask criteria for our dataframe
top_features = sorted_loading_scores[:4].index.values
# print the loading scores
print(loading_scores[top_features])
print("************")
print(sorted_loading_scores)
print(type(feature_columns))
|
from sentence_splitter import SentenceSplitter, split_text_into_sentences
import argparse
import nltk
def post_proc(lines):
import re
new_lines = []
for l in lines:
new_l = nltk.sent_tokenize(l)
new_l = [l for l in new_l if len(l) > 0]
new_lines += new_l
return new_lines
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-f', "--file", required=True)
parser.add_argument("-t", '--target', default='./total.splited.latin.tok')
args = parser.parse_args()
f = open(args.file, 'r')
lines = f.readlines()
f.close()
lines = [l.strip() for l in lines]
splited_lines = []
splitter = SentenceSplitter(language='en')
for l in lines:
splited_lines += post_proc(splitter.split(text=l))
f = open(args.target, 'w')
f.write("\n".join(splited_lines) + "\n")
if __name__ == "__main__":
main()
|
#MatthewMascolo.py
#I pledge my honor that I have abided
#by the Stevens Honor System. Matthew Mascolo
#
#This program takes a list of New York Knicks players
#and staff in Before.txt, then it capitalizes all
#players' first and last names and prints them out in After.txt
def main():
inFile = 'Before.txt'
outFile = 'After.txt'
inFile = open(inFile, "r")
outFile = open(outFile, "w")
for i in inFile:
first, last = i.split()
newName = first.upper() + " " + last.upper()
print(newName, file=outFile)
inFile.close()
outFile.close()
main()
|
from django.shortcuts import render
from django.forms.models import model_to_dict
from rest_framework import viewsets
from rest_framework import mixins
from rest_framework import generics
from nba_news.models import NbaNews
from nba_news.serializers import NbaNewsSerializer
from datetime import datetime
import subprocess
from django.http import HttpResponse
class NbaNewsList(mixins.ListModelMixin,
generics.GenericAPIView):
queryset = NbaNews.objects.all().order_by("-created")
serializer_class = NbaNewsSerializer
def get(self, request, *args, **kwargs):
subprocess.run('python manage.py crawl_nba'.split())
return self.list(request, *args, **kwargs)
class NbaNewsDetail(mixins.RetrieveModelMixin,
generics.GenericAPIView):
queryset = NbaNews.objects.all().order_by("-created")
serializer_class = NbaNewsSerializer
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
class NbaNewsViewSet(viewsets.ReadOnlyModelViewSet):
queryset = NbaNews.objects.all().order_by("-created")
serializer_class = NbaNewsSerializer
def homepage(request):
return render(request, 'index.html')
def storypage(request, news_id):
news = NbaNews.objects.get(id = news_id)
dict_render = model_to_dict(news)
dict_render['created'] = 'Today'
return render(request, 'story.html', dict_render)
def crawl(request):
subprocess.run('python manage.py crawl_nba'.split())
response = HttpResponse("Finsh crawling")
return response
|
list_of_insults = [
"fart smeller"
, "I can't tell if I'm talking to your face or your asshole"
, "dickweed"
, "buttpirate"
, "douchenozzle"
, "bitch"
, "dick"
, "jackass"
, "turtledick"
, "shut your cockholster"
, "fudgepacker"
, "I think you'd be in hufflepuff"
, "wanker"
, "dumbshit"
, "suck a bag of dicks"
, "before I met you I was pro-life"
, "shitbag"
, "mouth breather"
, "I hope something you love catches on fire"
, "pissant"
, "if you die, I'm not going to your funeral"
, "if you were any less intelligent I would have to water you twice a week"
, "you look like a before picture"
, "your mother has two cunts and you're one of them"
, "dumbass"
, "you're not very nice and I don't like you"
, "son of a bitch"
, "shithead"
, "how appropriate, you fight like a cow"
, "calling you stupid is an insult to stupid people"
, "twat"
, "your mother fucks for bricks so she can build your sister a whore house"
, "fuckface"
, "horsefucker"
, "everyone who ever loved you was wrong"
, "ho bag"
, "you are a sad, lonely little man, and you have my pity"
, "why don't you go outside and play hide-and-go-fuck-yourself?"
, "clitsquiggle"
, "dickhead"
, "douche canoe"
, "asshat"
, "you're about as useful as Anne Frank's drum kit"
, "you have the most depressing smile"
, "dipshit"
, "pussy"
, "your muff is cabbage"
, "you sir, are unremarkable and unmemorable in every respect"
, "cum dumpster"
, "ball licker"
, "thundercunt"
, "you're not pretty enough to be this much of a bitch"
, "I refuse to have a battle of wits with an unarmed man"
, "skank"
, "bastard"
, "with a face like yours, I'd be very careful who and what I make fun of"
, "I hope you step on a Lego"
, "I would slap you, but I don't want to get slut on my hand"
, "prick"
, "the sound of your piss hitting the urinal is feminine"
, "I had sex with your wife"
, "turd sandwich"
, "jive turkey"
, "your mother was a hamster and your father smelt of elderberries"
, "I hope your asshole grows taste buds"
, "bitch, you dumb"
, "you are the opposite of batman"
, "if your vagina had a password, it would be \"password\""
, "I'd call you a cunt, but you lack the warmth and the depth"
, "fucktard"
, "you're the Belgacom of people"
, "you're the AT&T of people"
, "I worship the ground that awaits your grave"
, "history will judge me harshly for not having killed you"
, "go climb a wall of dicks"
, "whoever's willing to fuck you is just too lazy to jerk off"
, "you look fat when you cry"
, "anyone who has ever loved you was wrong"
, "wash your fuckin' mouth, you've got seven kinds of cock breath"
, "I hope you fall in a hole so deep that you die of thirst before you hit the bottom"
, "does your ass ever get jealous from all the shit that comes out of your mouth?"
, "I hope you out-live your children"
, "I'd insult you but nature did a better job"
, "look, I don't have the time or the crayons to explain this to you"
, "dude if I wanted a comeback I'd scrape it off your moms face"
, "you should have been a blow job!"
, "Scruffy-looking nerf-herder"
, "somewhere there is a tree, tirelessly producing oxygen so that you can stay alive. Find it and apologize"
, "if you had another brain it'd die of loneliness"
, "I hope your day will be as pleasent as you are"
, "I've been hated by better people than you"
, "I've been called worse by better people"
, "YOU'RE AN INANIMATE FUCKING OBJECT."
, "you must've been born on a highway cause thats where all the accidents happen"
, "I will plant a tree in your mother's ass and fuck your sister in its shade"
, "go write some php"
, "you have the mental agility of a bagel"
, "you are the human equivalent of a snap-back fedora"
, "too bad your mother didn't believe in abortion"
, "you have the personality of an unflushed toilet"
, "mouth-breather"
, "you're useless as the g in lasagna"
, "If I had a gun with two bullets, and I was in a room with you, Hitler, and Bin Laden, I would shoot you twice"
]
|
def lowercase_count(strng):
return sum(a.islower() for a in strng)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 14 17:22:55 2020
@author: thomas
"""
#Import modules
import numpy as np
import pandas as pd
import os, sys
import time as t
import matplotlib as mpl
import pathlib
import copy
#CONSTANTS
cwd_PYTHON = os.getcwd() + '/'
#config = sys.argv[1]
Re = sys.argv[1]
### VTK Functions
##### We need to extract a few different things from the VTK file
##### Lines 0-10: Junk
##### Line 11: time value
##### Line 12: Gives the dimensions for coordinates Nx, Ny, Ndim
##### Line 13: Name of variable, #of points, data type
##### Line 14 -> trunc(Nx/9)+1+14: X_coord values
##### For this demonstration, we know Nx = 1024
##### Line 128: var name, #of pts, data type
##### Lines 129 -> 129+trunc(Ny/9)+1: y_coord values
##### For this demonstration, we know Ny = 1024
##### Lines 244-245: Z_coord info = 0
##### Line 246: POINT_DATA, #of pts = Npts
##### Line 247: SCALARS, var Name, data type
##### Line 248: Junk
##### Lines 249 -> 249 + trunc(Npts/9)+1: var values
##### For this demonstration, Npts = 1048576
##### Line 116757: FIELD, FieldData, 2
##### Line 116758: var Name, Ndim, Npts, data type
##### Lines 116759 -> trunc(Npts*Ndim/9)+1+116759: field values
##### For this demonstration: Npts = 1048576
##### Line 233268: var Name, Ndim, data type
##### Line 233269 -> 233269 + trunc(Ndim*Npts/9)+1: field values
#VTK Functions
def ReadVTK(data):
#Obtain specific values and arrays from read file
#Time
timeValue = float(data[11][0])
print('time = ',timeValue)
#Obtain Refinement sizes
Nx = int(data[12][1])
Ny = int(data[12][2])
#print('Nx = ',Nx)
#print('Ny = ',Ny)
#Obtain xcoord
Nxline = int(np.trunc(Nx/9.0)+1.0)
startIdx = 14
endIdx = startIdx+Nxline
xList = data[startIdx:endIdx]
#print(xList[0])
xFlat = [item for sublist in xList for item in sublist]
xVals = [float(i) for i in xFlat]
xArr = np.array(xVals)
#Obtain ycoord
Nyline = int(np.trunc(Ny/9.0)+1.0)
startIdx = endIdx+1
endIdx = startIdx+Nyline
yList = data[startIdx:endIdx]
#print(yList[0])
yFlat = [item for sublist in yList for item in sublist]
yVals = [float(i) for i in yFlat]
yArr = np.array(yVals)
#Scalar Data
Npts = int(data[endIdx+2][1])
Nlines = int(np.trunc(Npts/9.0)+1.0)
#Obtain Omega
startIdx = endIdx+5
endIdx = startIdx+Nlines
pList = data[startIdx:endIdx]
pFlat = [item for sublist in pList for item in sublist]
pVals = [float(i) for i in pFlat]
pArr = np.array(pVals)
pArr = pArr.reshape((Nx,Ny))
#Field Data
Ndim = int(data[endIdx+1][1])
Nlines = int(np.trunc(Npts*Ndim/9.0)+1.0)
#Obtain Pressure
startIdx = endIdx + 2
endIdx = startIdx + Nlines
wList = data[startIdx:endIdx]
wFlat = [item for sublist in wList for item in sublist]
wVals = [float(i) for i in wFlat]
wArr = np.array(wVals)
wArr = wArr.reshape((Nx,Ny))
#Obtain Velocity
Ndim = int(data[endIdx][1])
Nlines = int(np.trunc(Npts*Ndim/9.0)+1.0)
startIdx = endIdx + 1
endIdx = startIdx + Nlines
uList = data[startIdx:endIdx]
uFlat = [item for sublist in uList for item in sublist]
uVals = [float(i) for i in uFlat]
uArr = np.array(uVals)
uArr = uArr.reshape((Nx,Ny,Ndim))
uxArr = uArr[:,:,0]
uyArr = uArr[:,:,1]
#print(uArr[0])
#Make mesh out of xArr and yArr
mx,my = np.meshgrid(xArr,yArr,indexing='ij')
#print(mx[0])
return (timeValue,mx,my,wArr.T,pArr.T,uxArr.T,uyArr.T)
def ReadAllVTK(cwd,idx):
count = 0
with open(cwd+'DATA%05d.vtk'%idx) as fp:
for line in iter(fp.readline, ''):
count += 1
allData = [[] for i in range(count)]
print(len(allData))
count = 0
with open(cwd+'DATA%05d.vtk'%idx) as fp:
for line in fp:
allData[count] = line.split()
count += 1
print(count)
return allData
### MAIN SCRIPT
#### READ ALL VTK FILES IN A SIMULATION DIRECTORY
#### CALCULATE AVERAGE FIELD DATA
#### EXPORT AVERAGED FIELD DATA TO CSV
if __name__ == '__main__':
PERIOD = 0.1
#Simulation Parameters
cwd_Re = cwd_PYTHON+'../Fig4_VisitFiles_Single/Re'+Re+'/'
#Calculate # Periods
DUMP_INT = 20.0
nPer = 1
#Paths to data and plots
cwd_DATA = cwd_Re+'/VTK/'
countPer = 0
countPlot = 0
for countPer in range(nPer):
AVGfile = pathlib.Path(cwd_DATA+'AVG/AVG_%04d.csv'%countPer)
if not AVGfile.exists ():
start = t.clock()
#Create sum arrays
Nx, Ny = 1024, 1024
mxsum = np.zeros((Nx,Ny))
mysum = np.zeros((Nx,Ny))
wsum = np.zeros((Nx,Ny))
psum = np.zeros((Nx,Ny))
uxsum = np.zeros((Nx,Ny))
uysum = np.zeros((Nx,Ny))
#Create posData sum database
for idx in range(0,int(DUMP_INT)):
dumpIdx = int(DUMP_INT)*countPer+idx
#Read All VTK Data into List
allData = ReadAllVTK(cwd_DATA,dumpIdx)
#Extract important values from allData
time, mx, my, wArr, pArr, uxArr, uyArr = ReadVTK(allData)
#Add fields to sum
mxsum += mx
mysum += my
wsum += wArr
psum += pArr
uxsum += uxArr
uysum += uyArr
countPlot += 1
#Calculate Averaged Fields
mxavg = mxsum/DUMP_INT
myavg = mysum/DUMP_INT
wavg = wsum/DUMP_INT
pavg = psum/DUMP_INT
uxavg = uxsum/DUMP_INT
uyavg = uysum/DUMP_INT
#Export Averaged Field Data
#Create a dataframe containing flattened arrays for (mx, my, avgW, avgP, avgU)
avgDict = {'mx':mxavg.flatten(),'my':myavg.flatten(),
'avgW':wavg.flatten(),'avgP':pavg.flatten(),
'avgUx':uxavg.flatten(),'avgUy':uyavg.flatten()}
avgFieldData = pd.DataFrame(data=avgDict)
#Export Avg Field Data to .csv file
pathlib.Path(cwd_DATA+'/AVG/').mkdir(parents=True, exist_ok=True)
avgFieldData.to_csv(cwd_DATA+'/AVG/AVG_%04d.csv'%(100),index=False,sep=' ',float_format='%.5e')
stend = t.clock()
diff = stend - start
print('Time to run for 1 period = %.5fs'%diff)
sys.stdout.flush()
else:
print('Re = %s: Per = %i: AVG file exists already'%(Re,countPer))
|
# try to ingest some candidates
import astropy.units as u
from astropy.time import Time
import marshaltools
avro_id = '634445152015015010'
# use wisely
#avro_ids = [
# 634209464915015007, 634313330115015002, 634242335115015022, 628140190315015023,
# 634209464415015033, 634258942115015000, 627195522015015005, 634182000215015004,
# 634306734915015002, 634147162715015055
# ]
#avro_id = '634209464915015007'
avro_id = '634313330115015002'
prog = marshaltools.ProgramList("AMPEL Test", load_sources=False, load_candidates=False)
#prog = marshaltools.ProgramList("AMPEL Test", load_candidates=True)
prog.ingest_avro(['634242335115015022', '628140190315015023'], be_anal=True)
# ----------------------------------------------------------------------------- #
# manually ingest the package (this workds)
##status = marshaltools.gci_utils.growthcgi(
## 'ingest_avro_id.cgi',
## auth=(prog.user, prog.passwd),
## to_json=False,
## data={'avroid': str(avro_id), 'programidx': 3}
## )
##print (status)
### see if the source you have is in the candidate page
##start = Time.now()-5*u.min
##end = Time.now()+5*u.min
##cand = prog.query_candidate_page('selected', start, end)
##ingested_ids = [cc['candid'] for cc in cand]
##for c in cand:
## print (c['candid'])
##print (len(cand))
##print (avro_id in ingested_ids)
##print (type(ingested_ids[0]))
##print (str(ingested_ids[0]) == avro_id)
# ----------------------------------------------------------------------------- #
#res = prog.ingest_avro('627195524515015019', be_anal=True)
#print ("ingestion result:", res)
#res = prog.ingest_avro('634445152015015010', be_anal=True)
#print ("ingestion result:", res)
#res = prog.ingest_avro(['623497692415015010', '627195524515015019'], be_anal=True)
#print ("ingestion result:", res)
|
from state import myenv
from ops import mine, ProjTask
import schemas
class rollback(ProjTask):
def work(self, *args, **kw):
schema = schemas.Cap(myenv.home)
curr = schema.current_release()
prev = schema.get_previous()
schema.switch_current_to(prev)
mine("rm -rf '%s'" % curr)
|
from common.run_method import RunMethod
import allure
@allure.step("极题库")
def questionMaterial_uploadImages_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极题库"
url = f"/service-question/questionMaterial/uploadImages"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极题库")
def questionMaterial_updateLibraryStatus_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极题库"
url = f"/service-question/questionMaterial/updateLibraryStatus"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极题库")
def questionMaterial_putToLibrary_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极题库"
url = f"/service-question/questionMaterial/putToLibrary"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通")
def questionMaterial_uploadMaterial_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通"
url = f"/service-question/questionMaterial/uploadMaterial"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通")
def questionMaterial_deleteMaterial_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通"
url = f"/service-question/questionMaterial/deleteMaterial"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通")
def questionMaterial_getMaterialsByGstPage_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通"
url = f"/service-question/questionMaterial/getMaterialsByGstPage"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通")
def questionMaterial_updateMaterial_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通"
url = f"/service-question/questionMaterial/updateMaterial"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极题库")
def questionMaterial_getMaterialsByGtkPage_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极题库"
url = f"/service-question/questionMaterial/getMaterialsByGtkPage"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极题库/添加老师区域归属")
def questionMaterial_addOrModTeacherMaterialDepartment_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极题库/添加老师区域归属"
url = f"/service-question/questionMaterial/addOrModTeacherMaterialDepartment"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极题库/查询老师区域归属")
def questionMaterial_getTeacherMaterialDepartment_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极题库/查询老师区域归属"
url = f"/service-question/questionMaterial/getTeacherMaterialDepartment"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极题库/上传人历史地址")
def questionMaterial_getHistoryAddress_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极题库/上传人历史地址"
url = f"/service-question/questionMaterial/getHistoryAddress"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
|
import os
from authomatic.providers import oauth2, oauth2, openid
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
CSRF_ENABLED = True
SECRET_KEY = 'you-will-never-guess'
#authentication + API config
CONFIG = {
'fb': {
'class_': oauth2.Facebook,
#Facebook is an authorizationProvider too.
'consumer_key': '1413571332252515',
'consumer_secret':'7b930cd8a0515af23fd4e5ef82f0b72c',
#But it is also an OAuth 2.0 provider and it needs scope.
'scope': ['user_about_me', 'email', 'publish_stream']
},
'gg': {
'class_': oauth2.Google,
'consumer_key': '481726523803.apps.googleusercontent.com',
'consumer_secret': 'kQpNVsNObiRbzzIbf63nunK_',
'scope': ['user_about_me', 'email', 'publish_stream']
}
}
#authentication providers
PROVIDERS = [
{ 'name': 'facebook', 'url': 'login/fb' },
{ 'name': 'twitter', 'url': 'login/tw' }]
# { 'name': 'AOL', 'url': 'http://openid.aol.com/<username>' },
# { 'name': 'Flickr', 'url': 'http://www.flickr.com/<username>' },
# { 'name': 'MyOpenID', 'url': 'https://www.myopenid.com' }]
#mail server settings
MAIL_SERVER = 'localhost'
MAIL_PORT = 25
MAIL_USERNAME = None
MAIL_PASSWORD = None
#admin list
ADMINS = ['kenjin.p@gmail.com']
#pagination
POSTS_PER_PAGE = 3
|
import click
import screed
import tempfile
import sys
import os
from Bio import SeqIO
def estimate_num_reads(input_file, num_reads, lines_per_read):
""" Return int estimate of reads"""
fd, path = tempfile.mkstemp()
try:
with os.fdopen(fd, 'w') as tmp:
with open(input_file) as fasta:
for i, line in enumerate(fasta):
tmp.write(line)
if i >= num_reads*lines_per_read:
break
if i < num_reads*lines_per_read:
sys.exit('Not enough reads to meet num_reads requirement')
tmp_file_size = os.path.getsize(path)
finally:
os.remove(path)
return int((float(os.path.getsize(input_file)) / float(tmp_file_size)) * float(num_reads))
def query_num_reads(input_file, lines_per_read):
with open(input_file) as fasta:
for i, line in enumerate(fasta):
pass
return (i+1)/lines_per_read
def get_lines_per_read(input_file):
tmp = open(input_file)
tmp = tmp.readline()[0]
if '>' == tmp:
return 2
elif '@' == tmp:
return 4
else:
sys.exit('File does not appear to be fasa or fastq')
def estimate_reads(input_file, num_reads=1000000):
lines_per_read = get_lines_per_read(input_file)
return estimate_num_reads(input_file, num_reads, lines_per_read)
@click.command()
@click.argument('input_file')
@click.option('-n', '--num_reads', default=1000000)
@click.option('--test/--no_test', default=False)
def run_experiment(input_file, num_reads, test):
lines_per_read = get_lines_per_read(input_file)
read_estimate = estimate_num_reads(input_file, num_reads, lines_per_read)
print('true number of reads: {}'.format(read_truth))
if test:
read_truth = query_num_reads(input_file, lines_per_read)
print('estimated number of reads: {}'.format(read_estimate))
return None
if __name__ == "__main__":
run_experiment()
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from pants.backend.experimental.helm.register import rules as helm_rules
from pants.backend.experimental.helm.register import target_types as helm_target_types
from pants.backend.helm.check.kubeconform.chart import rules as chart_rules
from pants.backend.helm.check.kubeconform.deployment import rules as deployment_rules
def target_types():
return helm_target_types()
def rules():
return [*helm_rules(), *chart_rules(), *deployment_rules()]
|
#! /usr/bin/env python
from numpy import ma
from numpy.lib.function_base import append
import open3d
import numpy as np
from ctypes import * # convert float to uint32
import tf
import rospy
from std_msgs.msg import Header, String, Float64MultiArray
from geometry_msgs.msg import PoseStamped
from sensor_msgs.msg import PointCloud2, PointField
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from visualization_msgs.msg import Marker
import sensor_msgs.point_cloud2 as pc2
import matplotlib.pyplot as plt
import math
from sklearn.cluster import MeanShift, estimate_bandwidth
from itertools import cycle
global o3dpc
global header
FIELDS_XYZ = [
PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
]
FIELDS_XYZRGB = FIELDS_XYZ + \
[PointField(name='rgb', offset=12, datatype=PointField.UINT32, count=1)]
# Bit operations
BIT_MOVE_16 = 2**16
BIT_MOVE_8 = 2**8
convert_rgbUint32_to_tuple = lambda rgb_uint32: (
(rgb_uint32 & 0x00ff0000)>>16, (rgb_uint32 & 0x0000ff00)>>8, (rgb_uint32 & 0x000000ff)
)
convert_rgbFloat_to_tuple = lambda rgb_float: convert_rgbUint32_to_tuple(
int(cast(pointer(c_float(rgb_float)), POINTER(c_uint32)).contents.value)
)
class TableTop:
def __init__(self):
self.topic_debug = "apbot/points_debug"
self.topic_table_top = "apbot/table_top"
self.topic_sink_center = "apbot/sink_center"
self.topic_table_top_center = "apbot/table_top_center"
self.topic_table_dimensions = "apbot/table_dimensions"
self.z_min = 0.5
self.z_max = 2.0
self.check = False
self.publishing_frame = "base_footprint"
self.global_frame = "base_footprint"
self.table_top_center = PoseStamped()
self.sink_center = PoseStamped()
self.table_dimesions = PoseStamped()
# The 2 variables that can be used to see the point clouds being processed
# Ros vizualization
self.debug = 0
# open3d vizualization
self.open3d_debug = 0
def convertImage(self,msg):
# global received_ros_cloud
self.received_ros_cloud = msg
# rospy.loginfo("-- Received ROS PointCloud2 message.")
def convertCloudFromRosToOpen3d(self,ros_cloud):
global header
# Get cloud data from ros_cloud
field_names=[field.name for field in ros_cloud.fields]
cloud_data = list(pc2.read_points(ros_cloud, skip_nans=True, field_names = field_names))
header = ros_cloud.header.frame_id
# Check empty
open3d_cloud = open3d.geometry.PointCloud()
if len(cloud_data)==0:
print("Converting an empty cloud")
return None
# Set open3d_cloud
if "rgb" in field_names:
IDX_RGB_IN_FIELD=3 # x, y, z, rgb
# Get xyz
xyz = [(x,y,z) for x,y,z,rgb in cloud_data ] # (why cannot put this line below rgb?)
# Get rgb
# Check whether int or float
if type(cloud_data[0][IDX_RGB_IN_FIELD])==float: # if float (from pcl::toROSMsg)
rgb = [convert_rgbFloat_to_tuple(rgb) for x,y,z,rgb in cloud_data ]
else:
rgb = [convert_rgbUint32_to_tuple(rgb) for x,y,z,rgb in cloud_data ]
# combine
open3d_cloud.points = open3d.utility.Vector3dVector(np.array(xyz))
open3d_cloud.colors = open3d.utility.Vector3dVector(np.array(rgb)/255.0)
else:
xyz = [(x,y,z) for x,y,z in cloud_data ] # get xyz
open3d_cloud.points = open3d.utility.Vector3dVector(np.array(xyz))
# return
return open3d_cloud
def convertCloudFromOpen3dToRos(self,open3d_cloud, frame_id="camera_depth_frame"):
# Set "header"
header = Header()
header.stamp = rospy.Time.now()
header.frame_id = frame_id
# Set "fields" and "cloud_data"
points=np.asarray(open3d_cloud.points)
fields=FIELDS_XYZ
cloud_data=points
# create ros_cloud
return pc2.create_cloud(header, fields, cloud_data)
def table_top_func(self,point_cloud,z_min,z_max, debug, open3d_debug):
planes = []
count = 0
check = False
while(len(np.asarray(point_cloud.points)) > 1000):
plane_model, inliers = point_cloud.segment_plane(distance_threshold=0.0005, ransac_n=3, num_iterations=1000)
inlier_cloud = point_cloud.select_by_index(inliers)
if debug or open3d_debug:
print("cloud: ",count," Normals")
[a, b, c, d] = plane_model
print("a:",a)
print("b:",b)
print("c:",c)
print("d:",d)
print(plane_model)
# Setting colour for planes
if(count == 0):
colours_red = 0
colours_green = 0
colours_blue = 1.0
elif(count == 1):
colours_red = 0
colours_green = 1.0
colours_blue = 1.0
elif(count == 2):
colours_red = 0
colours_green = 1.0
colours_blue = 0
elif(count == 3):
colours_red = 1.0
colours_green = 1.0
colours_blue = 1.0
elif(count == 4):
colours_red = 1.0
colours_green = 1.0
colours_blue = 0
elif(count == 5):
colours_red = 1.0
colours_green = 0
colours_blue = 0
elif(count == 6):
colours_red = 1.0
colours_green = 1.0
colours_blue = 0
# setting the point clouds colour
inlier_cloud.paint_uniform_color([colours_red, colours_green, colours_blue])
# checking the normal of the plane
if (plane_model[0] < 0.05) and (plane_model[1] < 0.05) and (plane_model[2] > 0.95):
# Finding the Center
center = inlier_cloud.get_center()
if (center[2] > z_min) and (center[2]<z_max):
# Found the table
planes.append(inlier_cloud)
check = True
return planes, check
# Subtracting the previous plane
point_cloud = point_cloud.select_by_index(inliers, invert=True)
count+=1
planes.append(point_cloud)
return planes, check
def transform_frames(self,target_frame,source_frame, debug, open3d_debug):
#Finding the transform
listner = tf.TransformListener()
while not rospy.is_shutdown():
try:
trans1, quat1 = listner.lookupTransform(target_frame, source_frame, rospy.Time(0))
break
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
homogenous = []
homogenous = tf.transformations.quaternion_matrix(quat1)
for i in range(0,3):
homogenous[i][3] = trans1[i]
if debug or open3d_debug:
print("matrix")
print(homogenous)
return homogenous,trans1,quat1
def shortest_distance(self,x1, y1, a, b, c):
d = abs((a * x1 + b * y1 + c)) / (math.sqrt(a * a + b * b))
return d
def point_distance(self,point1,point2):
d = ((point1[0]-point2[0])**2 + (point1[1]-point2[1])**2 )**0.5
return d
def find_equation(self,point1,point2):
a = point2[1] - point1[1]
b = -(point2[0] - point1[0])
c = point1[1]*(point2[0]-point1[0]) - point1[0]*(point2[1]-point1[1])
return a,b,c
def ros_publishers(self):
self.pub_debug = rospy.Publisher(self.topic_debug, PointCloud2, queue_size=1)
self.pub_table_top = rospy.Publisher(self.topic_table_top, PointCloud2, queue_size=1)
self.pub_sink_center = rospy.Publisher(self.topic_sink_center, PoseStamped, queue_size=1)
self.pub_table_top_center = rospy.Publisher(self.topic_table_top_center, PoseStamped, queue_size=1)
self.pub_table_dimensions = rospy.Publisher(self.topic_table_dimensions , PoseStamped, queue_size=1)
def ros_subscriber(self):
# Subscribe to point cloud
sub2 = rospy.Subscriber('/camera/depth/points', PointCloud2, callback=self.convertImage, queue_size=10)
def sink_clustering(self,table_points,debug):
boundry_points = []
x_t = [x[0] for x in table_points]
y_t = [y[1] for y in table_points]
x_min_i = np.argmin(x_t)
x_min_p = table_points[x_min_i]
y_min_i = np.argmin(y_t)
y_min_p = table_points[y_min_i]
x_max_i = np.argmax(x_t)
x_max_p = table_points[x_max_i]
y_max_i = np.argmax(y_t)
y_max_p = table_points[y_max_i]
x_min = min(x_t)
x_max = max(x_t)
y_min = min(y_t)
y_max = max(y_t)
corner_points= [x_min_p , x_max_p , y_min_p , y_max_p]
if abs(x_min_p[1] - y_max_p[1]) < 0.1:
width1 = self.point_distance(x_min_p,y_max_p)
length1 = self.point_distance(y_max_p,x_max_p)
width2 = self.point_distance(x_max_p,y_min_p)
length2 = self.point_distance(y_min_p,x_min_p)
if abs(width2 - width1) > 0.05:
width = x_max - x_min
length = y_max - y_min
else:
width = (width1 + width2)/2
length = (length2 + length1)/2
else:
length1 = self.point_distance(x_min_p,y_max_p)
width1 = self.point_distance(y_max_p,x_max_p)
length2 = self.point_distance(x_max_p,y_min_p)
width2 = self.point_distance(y_min_p,x_min_p)
if abs(width2 - width1) > 0.05:
width = x_max - x_min
length = y_max - y_min
else:
width = (width1 + width2)/2
length = (length2 + length1)/2
print("width", "length", width, length)
self.table_dimesions.pose.position.x = width
self.table_dimesions.pose.position.y = length
table_center = [(x_min+x_max)/2,(y_min+y_max)/2]
print(x_min)
print(x_max)
print(y_min)
print(y_max)
for i in range(0,len(table_points)):
if abs(table_points[i][1] - table_points[i-1][1]) > 0.2:
# if (abs(table_points[i-1][0] - x_min) > 0.2) and (abs(table_points[i-1][0] - x_max) > 0.2) and (abs(table_points[i-1][1] - y_max) > 0.003) and (abs(table_points[i-1][1] - y_min) > 0.003):
# boundry_points.append(table_points[i-1])
# print("bounding points")
if (abs(table_points[i][0] - x_min) > 0.003) and (abs(table_points[i][0] - x_max) > 0.003) and (abs(table_points[i][1] - y_max) > 0.05) and (abs(table_points[i][1] - y_min) > 0.05):
boundry_points.append(table_points[i-1])
boundry_points.append(table_points[i])
print("bounding points")
equation_1 = self.find_equation(x_min_p,y_max_p)
equation_2 = self.find_equation(y_max_p,x_max_p)
equation_3 = self.find_equation(x_max_p,y_min_p)
equation_4 = self.find_equation(y_min_p,x_min_p)
sink_points=[]
for i in boundry_points:
d1 = self.shortest_distance(i[0],i[1],equation_1[0],equation_1[1],equation_1[2])
d2 = self.shortest_distance(i[0],i[1],equation_2[0],equation_2[1],equation_2[2])
d3 = self.shortest_distance(i[0],i[1],equation_3[0],equation_3[1],equation_3[2])
d4 = self.shortest_distance(i[0],i[1],equation_4[0],equation_4[1],equation_4[2])
if not (d1<0.05 or d2<0.05 or d3<0.05 or d4<0.05 ):
sink_points.append(i)
print("Sink Point")
self.sink_check = False
if len(sink_points) > 4 :
self.sink_check = True
if(self.sink_check):
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(sink_points, quantile=0.8, n_samples=70)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(sink_points)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
cluster_1 = []
cluster_2 = []
cluster_3 = []
for i in range(len(sink_points)):
for j in range(n_clusters_):
if labels[i] == labels_unique[j]:
cluster_1.append(sink_points[i])
x_cl = [x[0] for x in cluster_1]
y_cl = [y[1] for y in cluster_1]
sink_x_min_i = np.argmin(x_cl)
sink_y_min_i = np.argmin(y_cl)
sink_x_max_i = np.argmax(x_cl)
sink_y_max_i = np.argmax(y_cl)
sink_x_min_p = cluster_1[sink_x_min_i]
sink_y_min_p = cluster_1[sink_y_min_i]
sink_x_max_p = cluster_1[sink_x_max_i]
sink_y_max_p = cluster_1[sink_y_max_i]
sink_corner_points= [sink_x_min_p , sink_x_max_p , sink_y_min_p , sink_y_max_p]
# Ploting the data
x_b = [x[0] for x in boundry_points]
y_b = [y[1] for y in boundry_points]
x_c = [x[0] for x in corner_points]
y_c = [y[1] for y in corner_points]
if(self.sink_check):
x_s = [x[0] for x in sink_points]
y_s = [y[1] for y in sink_points]
x_cs = [x[0] for x in sink_corner_points]
y_cs = [y[1] for y in sink_corner_points]
if debug:
plt.scatter(x_t, y_t,s=32)
plt.scatter(x_b,y_b,color='red', s=32)
plt.scatter(x_c,y_c,color='green', s=32)
if(self.sink_check):
plt.scatter(x_s,y_s,color='k', s=32)
plt.scatter(x_cs,y_cs,color='c', s=32)
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,markeredgecolor='k', markersize=14)
plt.savefig('plot.png', dpi=300, bbox_inches='tight')
plt.show()
if not self.sink_check:
cluster_centers = [-1,-1,-1]
return cluster_centers,table_center
def publishing_topics(self):
# # Publishing the table top point cloud
self.pub_table_top.publish(self.ros_plane_table_top)
# Publishing the tf's
br = tf.TransformBroadcaster()
br.sendTransform((self.table_top_center.pose.position.x, self.table_top_center.pose.position.y, self.table_top_center.pose.position.z), (0,0,0,1), rospy.Time.now(), "table_top", self.global_frame)
if(self.sink_check):
br = tf.TransformBroadcaster()
br.sendTransform((self.sink_center.pose.position.x, self.sink_center.pose.position.y, self.sink_center.pose.position.z), (self.sink_center.pose.orientation.x, self.sink_center.pose.orientation.y, self.sink_center.pose.orientation.z, self.sink_center.pose.orientation.w), rospy.Time.now(), "sink", self.global_frame)
# Publishing the sink and the table poses
self.sink_center.header.stamp = rospy.Time.now()
self.pub_sink_center.publish(self.sink_center)
self.table_top_center.header.stamp = rospy.Time.now()
self.pub_table_top_center.publish(self.table_top_center)
# Public table dimensions
self.pub_table_dimensions.publish(self.table_dimesions)
print("Published all Topics...")
return self.ros_plane_table_top, self.table_top_center, self.sink_center
def rotate_orientation(self, ori, q):
rot_mat = tf.transformations.quaternion_matrix(q)
pose_rot = rot_mat.dot([ori.x, ori.y, ori.z, ori.w])
ori.x = pose_rot[0]
ori.y = pose_rot[1]
ori.z = pose_rot[2]
ori.w = pose_rot[3]
def translate_position(ori, pos, t):
pos.x += t[0]
pos.y += t[1]
pos.z += t[2]
def start_detection(self):
self.downsampled_cloud =[]
# rospy.init_node('plane_detection')
self.ros_subscriber()
self.ros_publishers()
# Transformation matix from camera frame to base_footprint to have a common frame to process the point cloud
transform,_,_ = self.transform_frames('/base_footprint', '/camera_depth_frame', self.debug, self.open3d_debug)
# wait until we recive the point cloud, required for slow processors
while True:
try:
self.received_ros_cloud
break
except:
continue
# Run the code untill stopped, it can be made into a service as well
while not self.check:
# Convert to open3d data type
received_open3d_cloud = self.convertCloudFromRosToOpen3d(self.received_ros_cloud)
# Set the origin of the point cloud to be vizualized
FOR = open3d.geometry.TriangleMesh.create_coordinate_frame(size=1, origin=[0,0,0])
# After transformation
received_open3d_cloud.transform(transform)
if(self.debug):
ros_plane = self.convertCloudFromOpen3dToRos(received_open3d_cloud, "base_footprint")
self.pub_debug.publish(ros_plane)
if(self.open3d_debug):
open3d.visualization.draw_geometries([received_open3d_cloud, FOR])
# The function to find the different planes
print("Started Segmentation")
found_plane, self.check = self.table_top_func(received_open3d_cloud, self.z_min, self.z_max, self.debug, self.open3d_debug)
if(self.check):
copy_of_plane=[found_plane[0]]
center = found_plane[0].get_center()
print("Found plane at height: ",center[2])
table_points = np.asarray(found_plane[0].points)
cluster_centers,table_center = self.sink_clustering(table_points,self.debug)
if(self.sink_check):
# Publishing the sink center
self.sink_center.header.stamp = rospy.Time.now()
self.sink_center.header.frame_id = self.publishing_frame
self.sink_center.pose.position.x = cluster_centers[0][0]
self.sink_center.pose.position.y = cluster_centers[0][1]
self.sink_center.pose.position.z = center[2]
self.sink_center.pose.orientation.x = 0
self.sink_center.pose.orientation.y = 0
self.sink_center.pose.orientation.z = 0
self.sink_center.pose.orientation.w = 1.0
homogeous,translation,rotation = self.transform_frames(self.global_frame, self.publishing_frame, self.debug, self.open3d_debug)
self.rotate_orientation(self.sink_center.pose.orientation,rotation)
self.translate_position(self.sink_center.pose.position,translation)
self.sink_center.header.frame_id = self.global_frame
self.pub_sink_center.publish(self.sink_center)
# Sink tf
br = tf.TransformBroadcaster()
br.sendTransform((self.sink_center.pose.position.x, self.sink_center.pose.position.y, self.sink_center.pose.position.z), (self.sink_center.pose.orientation.x, self.sink_center.pose.orientation.y, self.sink_center.pose.orientation.z, self.sink_center.pose.orientation.w), rospy.Time.now(), "sink", self.global_frame)
# Publishing the Table Top
self.ros_plane_table_top = self.convertCloudFromOpen3dToRos(found_plane[0], "base_footprint")
self.pub_table_top.publish(self.ros_plane_table_top)
# Publishing the table tf
center_table_top = copy_of_plane[0].get_center()
# publishing the table top center
self.table_top_center.header.stamp = rospy.Time.now()
self.table_top_center.header.frame_id = self.publishing_frame
self.table_top_center.pose.position.x = table_center[0]
self.table_top_center.pose.position.y = table_center[1]
self.table_top_center.pose.position.z = center_table_top[2]
self.table_top_center.pose.orientation.x = 0
self.table_top_center.pose.orientation.y = 0
self.table_top_center.pose.orientation.z = 0
self.table_top_center.pose.orientation.w = 1.0
homogeous,translation,rotation = self.transform_frames(self.global_frame, self.publishing_frame, self.debug, self.open3d_debug)
self.rotate_orientation(self.table_top_center.pose.orientation,rotation)
self.translate_position(self.table_top_center.pose.position,translation)
br = tf.TransformBroadcaster()
br.sendTransform((self.table_top_center.pose.position.x, self.table_top_center.pose.position.y, self.table_top_center.pose.position.z), (0,0,0,1), rospy.Time.now(), "table_top", self.global_frame)
self.table_dimesions.pose.position.z = self.table_top_center.pose.position.z
self.pub_table_dimensions.publish(self.table_dimesions)
self.table_top_center.header.frame_id = self.global_frame
self.table_top_center.pose.position.z = 1
quat_table = quaternion_from_euler(0,0,math.pi/2)
self.table_top_center.pose.orientation.x = quat_table[0]
self.table_top_center.pose.orientation.y = quat_table[1]
self.table_top_center.pose.orientation.z = quat_table[2]
self.table_top_center.pose.orientation.w = quat_table[3]
self.pub_table_top_center.publish(self.table_top_center)
else:
print("Could not Find Table Top")
table_dimensions = [self.table_dimesions.pose.position.x, self.table_dimesions.pose.position.y, self.table_dimesions.pose.position.z]
self.table_top_node_data = {"Table Center" : self.table_top_center, "Table Dimension" : table_dimensions, "Sink Center": self.sink_center, "Open 3D Table Top": found_plane[0]}
return self.table_top_node_data
# rate = rospy.Rate(10)
# while not rospy.is_shutdown():
# # Publishing the table top point cloud
# # self.pub_table_top.publish(self.ros_plane_table_top)
# self.publishing_topics()
# rate.sleep()
if __name__ == '__main__':
table_top = TableTop()
table_top.start_detection() |
def getPrime(n):
ret = n
if n == 0 or n == 1:
return
if n == 2 or n == 3:
return ret
for i in range(3, n+1, 2):
for j in range(3, n, 2):
a = i%j
if a == 0:
break
else:
ret = i
break
return ret
ret = getPrime(int(input('정수를 입력하세요: ')))
print(ret)
|
N=eval(input())
M=N
i=1
while N>0:
a='*'*i
print('{0:^{1}}'.format(a,M))
#槽{}内嵌套槽{}需要指定各个槽对应的format中的变量序号
#print(a.center((M+1)//2))为什么这里用center函数不行
N-=2
i+=2
|
"""
Output related helper functions
"""
import re
from colorama import Fore, Back, Style
def style_reset():
"""Resets all font colors"""
print(Style.RESET_ALL)
def write_header(filename, match_count):
"""Outputs the filename and number of matches"""
print(Back.WHITE)
print(Fore.BLACK)
print(filename + Fore.CYAN + " [" + str(match_count) + "]")
style_reset()
def write_match(match_lines, keyword):
"""Outputs a color-coded function definition
with the keyword highlighted
"""
print(Back.BLACK)
for line in match_lines:
# Output a comment line
if re.match(r'^#', line):
fore_color = Fore.GREEN
else:
fore_color = Fore.BLUE
print(
"{}{}".format(
fore_color,
re.sub(
re.escape(keyword),
lambda m, fg=fore_color: Fore.RED + m.group(0) + fg,
line,
flags=re.I
)
)
)
style_reset()
|
import argparse
from functions.rename import main
parser = argparse.ArgumentParser()
parser.add_argument('a', type=str, help='rename directory')
args = parser.parse_args()
main(args.a)
|
import copy
with open("input.txt") as f:
data = f.readlines()
data = [int(n.strip()) for n in data]
# Define the search area
start = 0
end = 25
target = 104054607 # The number from part 1
# First get rid of any numbers that are bigger than the target
smaller_data = [n for n in data if n < target]
# Reverse the list to look at the bigger numbers first
smaller_data.reverse()
for i in range(len(smaller_data)):
data_for_loop = copy.deepcopy(smaller_data)
# Look at each number and add it together with the remaining list
current = data_for_loop[i]
end = len(data_for_loop)
rest = data_for_loop[i+1:end]
found_it = False
while end > i+1:
#print("Trying ", str(rest))
if sum(rest) == target:
found_it = True
rest.sort()
print("I made ", target, " using ", str(rest))
print("The sum of the start and end was " + str(rest[0] + rest[len(rest)-1]))
break
else:
end = end - 1
rest = data_for_loop[i+1:end]
if not found_it:
print("Didn't find it for i ", i)
else:
break
# 11828200 too low |
class Solution:
def diffWaysToCompute(self, input: str) -> List[int]:
#分治, 递归
ans = []
for i in range(0, len(input)):
if input[i]=='*' or input[i]=='+' or input[i]=='-':
left = self.diffWaysToCompute(input[:i])
right = self.diffWaysToCompute(input[i+1:])
for l in left:
for r in right:
if input[i]=='*':
ans.append(l*r)
if input[i]=='+':
ans.append(l+r)
if input[i]=='-':
ans.append(l-r)
if len(ans)==0:
ans.append(int(input))
return ans
|
from flask_restful import Resource
from flask import request
from auth.mail_manager import generate_email_token, email_verify
from exception import MyException
class EmailToken(Resource):
@classmethod
def post(cls):
data = request.get_json()
if not data:
raise MyException('field cannot be empty', status_code=400)
return generate_email_token(data['email'])
class EmailVerify(Resource):
@classmethod
def patch(cls):
data = request.get_json()
if not data:
raise MyException('field cannot be empty', status_code=400)
return email_verify(data['token'])
|
import operations.negative as negative
from color.grayscale import GrayscaleMatrix
from operations.convolution import ConvolutionMask
from operations.convolution import convolve
def normalize(value, lowerBound, upperBound):
return (value - lowerBound) / (upperBound - lowerBound)
def apply(matrix, mask):
result = GrayscaleMatrix(matrix.width, matrix.height, value_type=float)
partial_result = convolve(matrix, mask)
for x, y in result:
result[x][y] = partial_result[x][y]
lowest, highest = result.edge_values()
for x, y in result:
result[x][y] = normalize(result[x][y], lowest, highest)
return result
def apply_all(matrix, *masks):
result = GrayscaleMatrix(matrix.width, matrix.height, value_type=float)
for mask in masks:
partial_result = convolve(matrix, mask)
for x, y in result:
result[x][y] = abs(result[x][y]) + abs(partial_result[x][y])
lowest, highest = result.edge_values()
for x, y in result:
result[x][y] = normalize(result[x][y], lowest, highest)
return result
def apply_border_detection(image, method='prewitt'):
matrix = GrayscaleMatrix.from_image(image, value_type=float)
if method == 'prewitt':
x_mask = ConvolutionMask((1, 1, 1), (0, 0, 0), (-1, -1, -1))
y_mask = ConvolutionMask((-1, 0, 1), (-1, 0, 1), (-1, 0, 1))
result = apply_all(matrix, x_mask, y_mask)
elif method == 'sobel':
x_mask = ConvolutionMask((1, 2, 1), (0, 0, 0), (-1, -2, -1))
y_mask = ConvolutionMask((-1, 0, 1), (-2, 0, 2), (-1, 0, 1))
result = apply_all(matrix, x_mask, y_mask)
elif method == 'laplace':
mask = ConvolutionMask((0, -1, 0), (-1, 4, -1), (0, -1, 0))
result = apply(matrix, mask)
elif method == 'laplace2':
mask = ConvolutionMask((0, 1, 0), (1, -4, 1), (0, 1, 0))
result = apply(matrix, mask)
else:
raise ValueError()
result.apply_to(image)
if __name__ == '__main__':
import imageio
from argparse import ArgumentParser
parser = ArgumentParser(description='Scale image')
parser.add_argument('input_file', metavar='image', help='')
parser.add_argument('-o', '--out', dest='output_file', nargs='?', help='')
parser.add_argument('-m', '--method', dest='method', nargs='?', choices=['prewitt', 'sobel', 'laplace'], default='prewitt', help='')
parser.add_argument('-n', '--negative', dest='negative', action='store_true')
args = parser.parse_args()
input_file = args.input_file
output_file = args.output_file or input_file
image = imageio.read(input_file)
apply_border_detection(image, args.method)
if args.negative:
negative.apply_negative(image)
imageio.write(image, output_file)
|
from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
path('',views.home ,name='home'),
path('cart/',views.cart ,name='cart'),
path('update_add/<slug>',views.update_add ,name='update_add'),
path('update_remove/<slug>',views.update_remove ,name='update_remove'),
path('product/<slug>',views.product ,name='product'),
path('add_to_cart/<slug>',views.add_to_cart ,name='add'),
path('remove_from_cart/<slug>',views.remove_from_cart ,name='remove'),
path('checkout/',views.checkout ,name='checkout'),
path('shirt/',views.shirt ,name='shirt'),
path('sportswear/',views.sportswear ,name='sportswear'),
path('outwear/',views.outwear ,name='outwear'),
path('signup',views.signup ,name='signup'),
path('logout/',auth_views.LogoutView.as_view(),name='logout'),
path('login/',auth_views.LoginView.as_view(),name='login')
] |
import time
from djikstrasto import *
def askdata(): #Kysytään datan sisältävän tiedoston nimi
txt = ".txt"
print("Anna datan sisältävän .txt tiedoston nimi (ilman päätettä!):")
file = input("> ")
filename = file+txt
try:
data = open(filename, "r") #avataan tiedosto, jos sellainen löytyy annetulla nimellä, muuten kutsutaan uudestaan askdataa
try:
main(data)
except ValueError: #Jos reittiä ei ole, "djikstraalgo" aiheuttaa ValueErrorin, joten jos näin käy, tulostetaan vain ilmoitus asiasta
noroute()
except FileNotFoundError:
print("")
print("Ei löydy tuollaista tiedostoa! Varmista, että se on samassa kansiossa .py tiedostojen kanssa")
print("")
askdata()
def main(data):
startTime = time.time()
dijkstra(data)
exectime = time.time()-startTime
print('')
print("Algoritmin suoritus kesti: {} sekuntia".format(exectime))
askdata()
|
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/upload', methods=['POST'])
def upload():
f = request.files['file']
f.save('test.txt')
return 'アップロードされました'
@app.route('/upload-files', methods=['POST'])
def upload_files():
files = request.files.getlist('files')
for i, f in enumerate(files):
f.save('test{}.txt'.format(i))
return 'アップロードされました'
if __name__ == '__main__':
app.run()
|
import string
import random
import requests
import os
import numpy as np
import re
def get_mapping():
char_set = list(string.ascii_lowercase)
shuffled_char_set = char_set.copy()
random.shuffle(shuffled_char_set)
true_mapping = {}
for key, value in zip(char_set, shuffled_char_set):
true_mapping[key] = value
# initialize Markov matrix
M = np.ones((26, 26))
# initial state distribution
pi = np.zeros(26)
def get_ch_indx(ch):
""" Returns the index of character """
return ord(ch) - 97
def update_pi(ch):
""" Update initial state distribution """
i = get_ch_indx(ch)
pi[i] += 1
def update_transisition(ch0, ch1):
""" Update Markov matrix """
i = get_ch_indx(ch0)
j = get_ch_indx(ch1)
M[i, j] += 1
def get_word_probability(word):
""" Returns the probability of a word """
i = get_ch_indx(word[0])
logp = np.log(pi(i))
for ch in word[1:]:
logp += np.log(M(i, j))
i = j
return logp
def get_sequence_probability(words):
logp = list(map(lambda word: get_word_probability(word), words)).sum()
return logp
def download_file():
# create a markov model based on an English dataset
# is an edit of https://www.gutenberg.org/ebooks/2701
# (I removed the front and back matter)
# download the file
if not os.path.exists('moby_dick.txt'):
print("Downloading moby dick...")
r = requests.get(
'https://lazyprogrammer.me/course_files/moby_dick.txt')
with open('moby_dick.txt', 'w', encoding="utf-8") as f:
f.write(r.content.decode())
def main():
download_file()
# for replacing non-alpha characters
regex = re.compile('[^a-zA-Z]')
# load in words
for i, line in enumerate(open('moby_dick.txt', encoding="utf-8")):
line = line.strip() # Strip spaces around
if line:
# replace all non-alpha characters with space
line = regex.sub(' ', line)
for token in line.lower().split():
# First letter, add to pi
ch0 = token[0]
update_pi(ch0)
# Update Transision probabilites
for ch1 in token[1:]:
update_transisition(ch0, ch1)
ch0 = ch1
if __name__ == "__main__":
main()
# normalize the probabilities
pi /= pi.sum()
M /= M.sum(axis=1, keepdims=True)
print(pi)
print(M)
|
#!/proj/sot/ska3/flight/bin/python
#############################################################################
# #
# update_limit_table.py: update html limit table for display #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Feb 01, 2021 #
# #
#############################################################################
import sys
import os
import string
import re
import time
import getpass
path = '/data/mta/Script/MTA_limit_trends/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
sys.path.append("/data/mta4/Script/Python3.10/MTA")
sys.path.append(bin_dir)
import mta_common_functions as mcf #---- mta common functions
import envelope_common_function as ecf #---- envelope common functions
obegin = ecf.stime_to_frac_year(48815999) #--- 1999:201:00:00:00
#---------------------------------------------------------------------------------
#-- update_limit_table: update html limit table for display --
#---------------------------------------------------------------------------------
def update_limit_table():
"""
update html limit table for display
input: none, but read from <limit_dir>/Limit_data/op_limits_new.db
output: <html_dir>/<Group>/Limit_table/<msid>_limit_table.html
"""
#
#--- create msid <---> group name dictionary
#
g_dict = create_group_dict()
u_dict = {}
#
#--- read limit database
#
ifile = limit_dir + 'Limit_data/op_limits_new.db'
data = mcf.read_data_file(ifile)
#
#--- separate the data into each msid
#
prev = ''
save = []
for ent in data:
if ent[0] == '#':
continue
atemp = re.split('#', ent)
btemp = re.split('\s+', atemp[0])
msid = btemp[0].strip()
unit = atemp[2].strip()
u_dict[msid] = unit
if msid == prev:
save.append(btemp)
else:
if prev == '':
prev = msid
save.append(btemp)
continue
else:
#
#--- collected all limits for the msid; create a limit table html page
#
try:
group = g_dict[prev]
except:
#print("MSID MISSED: " + str(prev))
prev = msid
save = [btemp]
continue
unit = u_dict[prev]
create_limit_table(save, prev, group, unit)
prev = msid
save = [btemp]
#
#--- create a html page for the last entry
#
if len(save) > 0:
try:
group = g_dict[msid]
unit = u_dict[msid]
create_limit_table(save, msid, group, unit)
except:
pass
#---------------------------------------------------------------------------------
#-- create_limit_table: update html limit table for display --
#---------------------------------------------------------------------------------
def create_limit_table(dlist, msid, group, unit):
"""
update html limit table for display
input: dlist --- a list of lists of: [<msid>, <yl>, <yu>, <rl>, <ru>, <cmsid>, <state>, <time>]
msid --- msid
group --- groupd name
unit --- unit
output: <html_dir>/<Group>/Limit_table/<msid>_limit_table.html
"""
yl_list = []
yu_list = []
rl_list = []
ru_list = []
st_list = []
tm_list = []
for ent in dlist:
msid = ent[0]
yl = ent[1]
yu = ent[2]
rl = ent[3]
ru = ent[4]
state = ent[6]
stime = ent[7]
yl_list.append(yl)
yu_list.append(yu)
rl_list.append(rl)
ru_list.append(ru)
st_list.append(state)
tm_list.append(stime)
#
#--- check which states are in the list
#
states = list(set(st_list))
slen = len(states)
nchk = 0
if 'none' in states:
nchk = 1
#
#--- check 'none' case first
#
n_list = []
if nchk == 1:
for k in range(0, len(st_list)):
if st_list[k] == 'none':
#
#--- convert time from seconds from 1998.1.1 to fractional year
#
time = ecf.stime_to_frac_year(tm_list[k])
n_list.append([time, yl_list[k], yu_list[k], rl_list[k], ru_list[k]])
#
#--- if none limit start later than 1999.201, extend the beginning to 1999.201
#
try:
n_list = compress_time_periods(n_list)
except:
pass
try:
n_list = combine_same_start(n_list)
except:
pass
try:
[n_list, ochk] = extend_start_range(n_list)
except:
pass
#
#--- check other cases
#
if nchk == 0 or slen > 1:
all_list = []
for state in states:
if state == 'none':
continue
t_list = []
for k in range(0, len(st_list)):
if state == st_list[k]:
time = ecf.stime_to_frac_year(tm_list[k])
t_list.append([time, yl_list[k], yu_list[k], rl_list[k], ru_list[k]])
if t_list[0][0] > obegin:
try:
non_first = n_list[0]
t_list = [non_first] + t_list
except:
[t_list, ochk] = extend_start_range(t_list)
t_list = compress_time_periods(t_list)
all_list.append(t_list)
#
#--- found only none case
#
if nchk == 1 and slen == 1:
#
#--- create html limit table
#
aline = create_html_table(n_list, states)
#
#--- found only other cases
#
elif nchk == 0:
aline = create_html_table(all_list, states)
#
#--- found both none and other cases
#
else:
all_list = [n_list] + all_list
tstates = ['none']
for state in states:
if state != 'none':
tstates.append(state)
aline = create_html_table(all_list, tstates)
#
#--- print out html page
#
line = '<!DOCTYPE html>\n'
line = line + '<html>\n'
line = line + '<head>\n'
line = line +' <title>MTA Trending: " ' + msid + ' limit table "</title>\n'
line = line +' <style>\n'
line = line +' </style>\n'
line = line +'</head>\n'
line = line +'<body style="width:95%;margin-left:10px; margin-right;10px;background-color:#FAEBD7;'
line = line +'font-family:Georgia, "Times New Roman", Times, serif">\n'
line = line +'<h2>' + msid.upper() + ' (' + unit + ')</h2>\n'
line = line + aline
line = line + '</body>\n</html>\n'
outdir = web_dir + group.capitalize() + '/Limit_table/'
cmd = 'mkdir -p ' + outdir
os.system(cmd)
outname = outdir + msid + '_limit_table.html'
with open(outname, 'w') as fo:
fo.write(line)
#---------------------------------------------------------------------------------
#-- combine_same_start: combine limit lists of the same start time --
#---------------------------------------------------------------------------------
def combine_same_start(n_list):
"""
combine limit lists of the same start time
input: n_list --- a list of lists [<time>, <yl>, <yu>, <rl>, <ru>]
output: s_list --- a list of lists with combined list
"""
s_list = []
nlen = len(n_list)
if nlen == 1:
return n_list
skip = 0
for k in range(0, nlen-1):
if skip > 0:
skip = 0
continue
#
#--- we assume that the same starting time lists are next each other
#
if n_list[k][0] == n_list[k+1][0]:
a_list = combine_two_limits(n_list[k], n_list[k+1])
s_list.append(a_list)
skip = 1
continue
else:
s_list.append(n_list[k])
s_list.append(n_list[nlen-1])
return s_list
#---------------------------------------------------------------------------------
#-- combine_two_limits: combine two limit table; take wider range ---
#---------------------------------------------------------------------------------
def combine_two_limits(a_list, b_list):
"""
combine two limit table; take wider range
input: a_list --- a list of limits
b_list --- another list of limits
output: combined list
assume that the list has [<time>, <yl>, <yu>, <rl>, <ru>]
"""
if a_list[1] <= b_list[1]:
yl = a_list[1]
else:
yl = b_list[1]
if a_list[2] >= b_list[2]:
yu = a_list[2]
else:
yu = b_list[2]
if a_list[3] <= b_list[3]:
rl = a_list[3]
else:
rl = b_list[3]
if a_list[4] >= b_list[4]:
ru = a_list[4]
else:
ru = b_list[4]
return [a_list[0], yl, yu, rl, ru]
#---------------------------------------------------------------------------------
#-- compress_time_periods: check whether the limits are same as one before --
#---------------------------------------------------------------------------------
def compress_time_periods(a_list):
"""
check whether the limits are same as one before and if so, remove that set
input: a_list --- a list of lists of [<time>, <y_low>, <y_up>, <r_low>, <r_up>,..]
output: n_list --- an updated lists of lists
"""
alen = len(a_list)
n_list = [a_list[0]]
for k in range(0, alen-1):
if a_list[k][1] != a_list[k+1][1]:
n_list.append(a_list[k+1])
continue
elif a_list[k][2] != a_list[k+1][2]:
n_list.append(a_list[k+1])
continue
elif a_list[k][3] != a_list[k+1][3]:
n_list.append(a_list[k+1])
continue
elif a_list[k][4] != a_list[k+1][4]:
n_list.append(a_list[k+1])
continue
else:
continue
return n_list
#---------------------------------------------------------------------------------
#-- extend_start_range: if the begining is not 1999:201 or before, set date to 1999:201
#---------------------------------------------------------------------------------
def extend_start_range(t_list):
"""
if the begining is not 1999:201 or before, set date to 1999:201
input: t_list --- a list of [[<time>, ....], [...], ...]
output t_list --- an updated list
tchk --- if 1, the the date is updated. otherwise, it is the same as before
"""
tchk = 0
if t_list[0][0] > obegin:
t_list[0][0] = 1999.55068493
tchk = 1
return [t_list, tchk]
#---------------------------------------------------------------------------------
#-- create_html_table: create html data table --
#---------------------------------------------------------------------------------
def create_html_table(all_list, states):
"""
create html data table
input: all_list --- a list of lists of data: [<time>, <y_low>, <y_top>, <r_low>, <r_top>]
states --- a list of states
output: aline --- html string of table part
"""
tlen = len(states)
if tlen == 1 and states[0] == 'none':
all_list = [all_list]
aline = '<table border=1 cellspan=2>\n'
for m in range(0, tlen):
a_list = all_list[m]
#
#--- unless the state is 'none', put the header to show which state these limits show
#
if len(states) > 1 or states[m] != 'none':
aline = aline + '<tr><td colspan=6 style="text-align:left;">State: ' + states[m] + '</td></tr>\n'
aline = aline + '<tr><th>Start Time</th><th>Stop Time</th>\n'
aline = aline + '<th>Yellow Lower</th><th>Yellow Upper</th>\n'
aline = aline + '<th>Red Lower</th><th>Red Upper</th></tr>\n'
alen = len(a_list)
for k in range(0, alen):
#
#--- setting start and stop time. if the ending is open, use '---'
#
aline = aline + '<tr><td>' + format_data(a_list[k][0]) + '</td>\n'
if k < alen-1:
aline = aline + '<td>' + format_data(a_list[k+1][0]) + '</td>\n'
else:
aline = aline + '<td> --- </td>\n'
#
#--- yellow lower, yellow upper, red lower, red upper
#
aline = aline + '<td>' + format_data(a_list[k][1]) + '</td>\n'
aline = aline + '<td>' + format_data(a_list[k][2]) + '</td>\n'
aline = aline + '<td>' + format_data(a_list[k][3]) + '</td>\n'
aline = aline + '<td>' + format_data(a_list[k][4]) + '</td>\n'
aline = aline + '</tr>\n'
if tlen == 0:
aline = aline + '<tr><td>1999.0</td><td> --- <td>\n'
aline = aline + '<td>-998</td><td>998</td><td>-999</td><td>999</td>\n'
aline = aline + '</tr>\n'
aline = aline + '</table><br />\n'
return aline
#---------------------------------------------------------------------------------
#-- format_data: format digit to clean form ---
#---------------------------------------------------------------------------------
def format_data(val):
"""
format digit to clean form
input: val --- numeric value
output: val --- cleaned up value
"""
try:
val = float(val)
except:
return val
if abs(val) < 0.01:
val = '%3.3e' % val
elif val > 10000:
val = '%3.2e' % val
else:
val = '%3.2f' % round(val, 2)
return val
#---------------------------------------------------------------------------------
#-- create_group_dict: create msid <---> group dictionary ---
#---------------------------------------------------------------------------------
def create_group_dict():
"""
create msid <---> group dictionary
input: none but read from <house_keeping>/msid_list
output: g_dict --- a dictionary of msid <--> group
"""
ifile = house_keeping + 'msid_list_all'
data = mcf.read_data_file(ifile)
g_dict = {}
for ent in data:
atemp = re.split('\s+', ent)
msid = atemp[0].strip()
group = atemp[1].strip()
g_dict[msid] = group
return g_dict
#---------------------------------------------------------------------------------
if __name__ == '__main__':
#
#--- Create a lock file and exit strategy in case of race conditions
#
name = os.path.basename(__file__).split(".")[0]
user = getpass.getuser()
if os.path.isfile(f"/tmp/{user}/{name}.lock"):
sys.exit(f"Lock file exists as /tmp/{user}/{name}.lock. Process already running/errored out. Check calling scripts/cronjob/cronlog.")
else:
os.system(f"mkdir -p /tmp/mta; touch /tmp/{user}/{name}.lock")
update_limit_table()
#
#--- Remove lock file once process is completed
#
os.system(f"rm /tmp/{user}/{name}.lock")
|
N = int(input())
while N != 0:
mp = {}
for _ in range(N):
line = input()
name = line.split()[0]
for food in line.split()[1:]:
l = mp.get(food, [])
l.append(name)
mp[food] = l
foods = list(mp.keys())
foods.sort()
for food in foods:
people = mp[food]
people.sort()
print("{} {}".format(food, " ".join(people)))
print()
N = int(input())
|
# SPDX-License-Identifier: 0BSD
# Copyright 2018 Alexander Kozhevnikov <mentalisttraceur@gmail.com>
"""Raise exceptions with a function instead of a statement.
Provides a minimal, clean and portable interface for raising exceptions
with all the advantages of functions over syntax.
Note:
This is the "no traceback" variant, for Python implementations that
do not support using a custom traceback when raising. It exists to
allow code using the ``raise_`` interface to gracefully degrade in
the absence of full traceback support.
"""
__all__ = ('raise_',)
__version__ = '1.1.9'
def raise_(exception, traceback=None):
"""Raise an exception, optionally with a custom traceback.
Arguments:
exception: The exception instance or type to raise.
traceback (optional): Traceback to raise the exception with.
Note:
This "no traceback" variant silently ignores the ``traceback``
argument, because it is meant for Python implementations
that do not support using a custom traceback when raising.
"""
raise exception
|
from itertools import groupby
def repeating_fractions(numerator, denominator):
integer, fractional = str(numerator / float(denominator)).split('.')
grouped = []
for k, g in groupby(fractional):
try:
next(g)
next(g)
grouped.append('({})'.format(k))
except StopIteration:
grouped.append(k)
return '{}.{}'.format(integer, ''.join(grouped))
|
from loss import *
from optimizer import *
import numpy as np
losses = {'mse': mse, 'ce': ce}
sgd = SGD()
class FNN:
''' Full-connection Neural Network
It is a simple network with multiple layers
'''
def __init__(self, input_dim=1):
self.layers = []
self.input_dim = input_dim
''' Forward-progation
batchX is a batch_size * M matrix
self.batchA a batch_size * N matrix
'''
def forward(self, batchX):
batchA = batchX
for layer in self.layers:
batchA = layer.forward(batchA)
self.batchA = batchA
return self.batchA
# Backward-progation
def backward(self, delta_loss):
layer_size = len(self.layers)
batchDz = self.layers[layer_size - 1].backward(delta_loss)
last_layer = self.layers[layer_size - 1]
for k in range(1, layer_size):
i = layer_size - 1 - k
layer = self.layers[i]
batchDz = layer.backward(batchDz, last_layer.w)
# print "batchDz:", batchDz
last_layer = layer
def update(self, lr=0.005):
layer_size = len(self.layers)
for k in range(layer_size):
i = layer_size - 1 - k
layer = self.layers[i]
layer.update(lr)
# Add a hidden layer or output layer
def add(self, layer):
self.layers.append(layer)
def compile(self, loss='mse', optimizer='sgd'):
self.loss = losses[loss]
self.optimizer = optimizer
# Init layers' params
input_dim = self.input_dim
for layer in self.layers:
layer.init_params(optimizer=sgd, input_dim=input_dim)
input_dim = layer.dim
def fit(self, trainX, trainY, \
lr=0.005, batch_size=1, epochs=30, shuffle=False, verbose=2):
assert(len(trainX) == len(trainY))
# Start train
size = len(trainX)
batchs = size / batch_size
if size % batch_size != 0:
batchs += 1
batch_pool = None
if epochs > 1:
batch_pool = []
for i in range(epochs):
for j in range(batchs):
# Select a batch of samples
if batch_pool is None or i == 0:
start = j * batch_size
batchX = np.array(trainX[start:start + batch_size])
batchY = np.array(trainY[start:start + batch_size])
if batch_pool != None:
batch_pool.append((batchX, batchY))
else:
(batchX, batchY) = batch_pool[j]
# Forward progation
self.forward(batchX)
# Backward progation
loss = self.loss.loss(self.batchA, batchY)
delta_loss = self.loss.delta(self.batchA, batchY)
if verbose < 3:
# print "batchX:", batchX
# print "predY:", self.batchA
# print "batchY:", batchY
print "[loss: ", loss, "]"
self.backward(delta_loss)
# Update
self.update(lr)
def predict(self, X):
Y = self.forward(X)
return Y |
from office365.runtime.client_value import ClientValue
class GroupProfile(ClientValue):
def __init__(self, name):
"""
:param str name: Group name
"""
super(GroupProfile, self).__init__()
self.mailNickname = name
self.displayName = name
self.description = None
self.mailEnabled = False
self.securityEnabled = True
self.owners = []
self.members = []
self.groupTypes = []
|
#!/usr/bin/python
'''
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
'''
from __future__ import print_function
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient
import logging
import json
import time
# Shadow JSON schema:
#
# Name: Bot
# {
# "state": {
# "desired":{
# "property":<INT VALUE>
# }
# }
# }
def myShadowUpdateCallback(payload, responseStatus, token):
print("shadow updated " + payload)
def iot_shadow():
f = open("config.json", "r")
config_data = json.load(f)
endpoint = config_data['endpoint']
root_ca = config_data['rootCA']
private_key = config_data['certificateKey']
private_cert = config_data['privateCert']
clientId = thing_name = config_data['thingName']
port = config_data['port']
# Configure logging
logger = logging.getLogger("AWSIoTPythonSDK.core")
logger.setLevel(logging.WARNING)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
# Init AWSIoTMQTTShadowClient
myAWSIoTMQTTShadowClient = AWSIoTMQTTShadowClient(clientId)
myAWSIoTMQTTShadowClient.configureEndpoint(endpoint, port)
myAWSIoTMQTTShadowClient.configureCredentials(root_ca, private_key, private_cert)
# AWSIoTMQTTShadowClient configuration
myAWSIoTMQTTShadowClient.configureAutoReconnectBackoffTime(1, 32, 20)
myAWSIoTMQTTShadowClient.configureConnectDisconnectTimeout(10) # 10 sec
myAWSIoTMQTTShadowClient.configureMQTTOperationTimeout(5) # 5 sec
# Connect to AWS IoT
myAWSIoTMQTTShadowClient.connect()
# Create a deviceShadow with persistent subscription
deviceShadowHandler = myAWSIoTMQTTShadowClient.createShadowHandlerWithName(thing_name, True)
i = 0
status = None
while True:
newstatus = get_door_status()
if i == 0 or newstatus != status:
shadowMessage = '{"state":{"reported":{"status":"' + newstatus + '"}}}'
deviceShadowHandler.shadowUpdate(shadowMessage, myShadowUpdateCallback, 5)
status = newstatus
i += 1;
time.sleep(5)
def get_door_status(filename="./status.txt"):
file = open(filename, "r")
content = file.read(1)
file.close()
return content
if __name__ == "__main__":
iot_shadow() |
import asyncio
from unittest.mock import Mock
import pytest
from .context import initial_bot_state
@pytest.mark.asyncio
async def test_expand():
bot = initial_bot_state([])
bot.can_build_building = Mock(return_value=True)
expand_stub = Mock(return_value=None)
bot.expand_now = asyncio.coroutine(expand_stub)
success = await bot.expand()
assert success
expand_stub.assert_called()
@pytest.mark.asyncio
async def test_cannot_expand_if_cant_build():
bot = initial_bot_state([])
bot.can_build_building = Mock(return_value=False)
expand_stub = Mock(return_value=None)
bot.expand_now = asyncio.coroutine(expand_stub)
success = await bot.expand()
assert not success
expand_stub.assert_not_called()
|
# coding: utf-8
# In[41]:
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
import numpy as np
# In[42]:
def sigmoid_activation(x):
return 1.0/(1+np.exp(-x))
# In[43]:
def predict(X, W):
preds = sigmoid_activation(X.dot(W))
preds[preds <=0.5] = 0
preds[preds >0] =1
return preds
# In[44]:
(X,y) = make_blobs(n_samples=1000,n_features=2, cluster_std=1.5, random_state=1, centers=2)
# In[45]:
y = y.reshape(y.shape[0],1)
# In[46]:
X = np.c_[X, np.ones(X.shape[0])]
# In[47]:
(trainX, testX, trainY, testY) = train_test_split(X, y, test_size = 0.5, random_state = 42)
# In[55]:
print("[INFO] Training...")
# In[49]:
W = np.random.rand(X.shape[1], 1)
losses = []
# In[50]:
epochs = 100
alpha = 0.01
# In[51]:
for epoch in np.arange(0, epochs):
preds = sigmoid_activation(trainX.dot(W))
error = preds - trainY
loss = np.sum(error**2)
losses.append(loss)
gradient = trainX.T.dot(error) # gradient in the dot product between data pointx X and the error.
W += -alpha * gradient #updating the weights by taking step in the -ve direction of the gradient
if epoch ==0 or (epoch+1)%5 ==0:
print("[INFO] epoch = {}, loss={:.7f}".format(int(epoch+1), loss))
# In[52]:
print("[INFO] Evaluating...")
preds = predict(testX, W)
print(classification_report(testY, preds))
# In[54]:
plt.style.use("ggplot")
plt.figure()
plt.title("Data")
plt.scatter(testX[:,0], testX[:, 1], marker='o', c=testY, s=30)
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, epochs), losses)
plt.title("Training Loss")
plt.xlabel("Epoch#")
plt.ylabel("Loss")
plt.show()
# In[ ]:
|
from PySide2.QtCore import QObject, Signal
from time import sleep, time
from random import randint
from freebitcoin.API import API
from helpers.RucaptchaAPI import RucaptchaAPI
class User(QObject):
signal_update_column_color = Signal(int, list)
signal_update_column_text = Signal(int, str)
signal_update_chart = Signal(list)
def __init__(self, index, login, password, proxy, key):
QObject.__init__(self)
self.key = key
self.index = index
self.login = login
self.password = password
self.proxy = proxy
def alert_user(self, success):
self.signal_update_column_color.emit(self.index, [20, 200, 20] if success else [200, 20, 20])
def update_chart(self, coins):
self.signal_update_chart.emit([time(), float(coins)])
def update_balance(self, balance):
self.signal_update_column_text.emit(self.index, balance)
def run(self):
try:
self.api = API(self.login, self.password, self.proxy, self.key)
self.update_balance( self.api.parse_coins() )
self.alert_user(True)
while True:
current_balance, coins = self.api.collect_coins( self.api.create_data() )
self.update_chart(coins)
self.update_balance(current_balance)
sleep(3600 + randint(23, 78))
except Exception as msg:
self.alert_user(False)
print(msg) |
# https://wikidocs.net/28
class Cookie:
pass
a = Cookie()
b = Cookie()
print(a)
print(b)
class FourCal:
def __init__(self, first, second):
self.first = first
self.second = second
def setdata(self, first, second):
self.first = first
self.second = second
def add(self):
return self.first + self.second
def mul(self):
return self.first * self.second
def sub(self):
return self.first - self.second
def div(self):
return self.first / self.second
a = FourCal(4, 3)
print(a.add())
print(a.mul())
print(a.sub())
print(a.div())
class MoreFourCal(FourCal):
def pow(self):
return self.first ** self.second
b = MoreFourCal(6, 3)
print(b.add())
print(b.mul())
print(b.sub())
print(b.div())
print(b.pow())
# overriding
class SafeFourCal(FourCal):
def div(self):
return 0 if self.second == 0 else self.first / self.second
c = SafeFourCal(4, 0)
print(c.div())
class Family:
lastname = 'Kim'
fam1 = Family()
fam2 = Family()
print(1, fam1.lastname, id(fam1.lastname))
print(2, fam2.lastname, id(fam2.lastname))
fam1.lastname = 'Park'
print(1, fam1.lastname, id(fam1.lastname))
print(2, fam2.lastname, id(fam2.lastname))
|
import cv2
import sys
import os
import logging as log
import datetime as dt
from time import sleep
import time
import click
import platform
def get_lock_screen_cmd():
cmd_dict = {
'Linux' : 'gnome-screensaver-command --lock &',
'Darwin': '/System/Library/CoreServices/Menu\ Extras/user.menu/Contents/Resources/CGSession -suspend'
}
os_type = platform.system()
if os_type in cmd_dict:
return cmd_dict[os_type]
else:
raise Exception('Unsupported OS platform, Linux and MacOS only.')
@click.command()
@click.option('--delay-seconds', help='activate command after this many seconds without detecting a face', default=5)
@click.option('--sleep-seconds', help='sleep every this many seconds', default=0.5)
def run(delay_seconds, sleep_seconds):
# cascPath = "/home/wei/dev/facelock/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
log.basicConfig(filename='webcam.log',level=log.INFO)
video_capture = cv2.VideoCapture(0)
anterior = 0
counter = 0
TRIGGER = int(sleep_seconds * delay_seconds)
# fps = video_capture.get(cv2.CAP_PROP_FPS) # Gets the frames per second
# multiplier = fps * seconds
while True:
if not video_capture.isOpened():
print('Unable to load camera.')
sleep(3)
pass
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
if len (faces) < 1:
counter += 1
log.info("no face at "+str(dt.datetime.now()) + ' counter='+ str(counter))
if counter > TRIGGER:
os.popen(get_lock_screen_cmd())
# video_capture.release()
# cv2.destroyAllWindows()
# break
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
if anterior != len(faces):
anterior = len(faces)
# log.info("faces: "+str(len(faces))+" at "+str(dt.datetime.now()))
counter = 0
# Display the resulting frame
cv2.imshow('Face Detection', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Display the resulting frame
#cv2.imshow('Video', frame)
time.sleep(sleep_seconds) # Sleep for x second
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
run() |
#cash register
#Ayo Akinrinade 08.12.18
cs = 0
ff = 0
h = 0
s = 0
sd = 0
md = 0
ld = 0
ss = 0
ms = 0
cashier_name = input("Cashier Name: ")
print("------Cash Register------")
print("=========================")
print("Cashier: %s" % cashier_name)
print("1. Chicken Strips - $3.50")
print("2. French Fries - $2.50")
print("3. Hamburger - $4.00")
print("4. Salad - $3.75")
print("5. Sm Drink - $1.25")
print("6. Md Drink - $1.50")
print("7. Lg Drink - $1.75")
print("8. Sm Milk Shake - $2.25")
print("9. Md Milk Shake - $2.75")
print("=========================")
order = input(": ")
L = list(order)
cs = L.count("1")
ff = L.count("2")
h = L.count("3")
s = L.count("4")
sd = L.count("5")
md = L.count("6")
ld = L.count("7")
ss = L.count("8")
ms = L.count("9")
total_cost = (3.5*cs)+(2.5*ff)+(4*h)+(3.75*s)+(1.25*sd)+(1.5*md)+(1.75*ld)+(2.25*ss)+(2.75*ms)
print("\nOrder----------------")
print("%d Chicken Strips" % cs)
print("%d French Fries" % ff)
print("%d Hamburgers" % h)
print("%d Salads" % s)
print("%d Small Drinks" % sd)
print("%d Medium Drinks" % md)
print("%d Large Drinks" % ld)
print("%d Small Milk Shakes" % ss)
print("%d Medium Milk Shakes" % ms)
print("Total Cost-----------")
print("$ %.2f" % total_cost)
print("Cashier: %s" % cashier_name)
print("---------------------")
print(" Have a Good Day") |
import pandas as pd
import numpy as np
from prepare import *
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.gaussian_process.kernels import RBF
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.metrics import classification_report, accuracy_score
from sklearn.model_selection import KFold, cross_val_score, cross_validate
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
import warnings
warnings.simplefilter('ignore')
class NLP_model():
''' Creates classification models using a variety of Sklearn models.
Methods:
----------------------------------------------------------------
> split: preforms train/test split. Can also preform X/y split if given a target array.
> tf: gets the term frequency of the lemmatized column of the dataframe.
> tf_idf: gets the term frequency-inverse document frequency
----------------------------------------------------------------
Arguments:
- data: Pandas DataFrame
- classifiers: List of classification models
- names: Names of classification models
- lang: Specifies a language to create a lang/not_lang label from
- top_langs: Specifies the top n langs to create labels for, non-top_langs will be labeled 'other'
'''
def __init__(self, data:pd.DataFrame, classifiers: list, names: list, lang = None, top_langs = None):
''' Passes dataframe, list of actual classifiers and their names, as well as checks
for kwargs lang or top_lang
Creates a zip of classifiers and their names
'''
# Creating class instance of df
self.df = data.copy(deep = True)
#Checking for individual language specified or n_langs and creating label column
# For individual lang specification
if lang != None and top_langs == None: # Checking for lang
self.lang = lang # assigning lang attribute
# creating label column
self.df['label'] = self.df.prog_lang.apply(lambda x: x.lower() if x == self.lang else f'not_{self.lang.lower()}')
if top_langs != None and lang == None: # Checking for top_langs
self.top_langs = self.df.prog_lang.value_counts()[:top_langs] # getting top n langs
# Creating labels column from top n languages
self.df['label'] = self.df.prog_lang.apply(lambda x: x.lower() if x in self.top_langs else 'other')
if lang != None and top_langs != None:
raise AttributeError('Must specify either lang or top_langs, cant create labels for both.')
if top_langs != None and top_langs < 2:
raise AttributeError("Must specify more than one lang, if you want to check for a single language, use lang argument instead.")
# Clean dataframe
self.df.lemmatized = self.df.lemmatized.apply(basic_clean)
# Creating class attributes
self.classifiers = classifiers
self.names = names
models = [(classifiers[n], names[n]) for n in range(len(names))] # creating tuple list of models and names
self.models = models
def split(self, df, target = None):
'''
This function takes in a dataframe and, optionally, a target_var array. Performs a train,
test split with no stratification. Returns train and test dfs.
'''
# Checking for y specified
if target is None: # if no y, preform regular train, validate, test split
train, test = train_test_split(df, test_size=.2,
random_state=1312)
self.train, self.test = train, test # setting self versions of each df
return train, test
# If y is specified preform X/y train, validate, test split
else:
X_train, X_test, y_train, y_test = train_test_split(df, target, test_size=.2, random_state=1312)
self.X_train, self.X_test,\
self.y_train, self.y_test = X_train, X_test, y_train, y_test # attributes for each X/y df and array
return X_train, X_test, y_train, y_test
def tf(self):
''' Gets the term frequency of lematized column in the df and returns
a dataframe with raw value_counts, frequency, and augmented frequency
'''
# For each lemmatized doc, append to series
docs = [] # init empty series for split documents
words = [] # init empty series for unique words
for doc in self.df['lemmatized'].values:
for word in doc.split(): # iterating through each word in a split doc
words.append(word) # add to words
word_ser = pd.Series(words) # turn w
# Creating a df from unique words containing raw term count,
tf_df = (pd.DataFrame({'raw_count': word_ser.value_counts()})) # raw counts of each term
tf_df['frequency'] = tf_df.raw_count / tf_df.raw_count.sum() # frequency of each term
tf_df['augmented_frequency'] = tf_df.frequency / tf_df.frequency.max() # augmented freq of words
return tf_df
def tf_idf(self):
''' Gets tf_idf and returns the dataframe of TfidVectorizer
'''
tfidf = TfidfVectorizer() # Make the opbject
bag_of_words = tfidf.fit_transform(self.df['lemmatized'].values) # Fit_transform on lemmatized
tfidf_df = pd.DataFrame(bag_of_words.todense(), columns=tfidf.get_feature_names()) # Wrapping in a dataframe
return tfidf_df
def count_vectorize(self, ngram_range = (1,1)):
''' Preforms a count vectorizeation with ngrams of n length.
WARNING: If not cached on system can take a long time to process,
creates a cacehd csv for faster use in future iterations.
'''
# Checking for cached vectorized csv
print('''Creating vectorized dataframe now. Vectorization may take a while, please wait...''')
# Using Bag of Words count vectorizer for hexamers
cv = CountVectorizer(ngram_range=(1,1)) # make the object
vectors = cv.fit_transform(self.df.lemmatized.values) # fit_transform on lemmatized col
self.vocab_count = cv.vocabulary_
# Wraps vectorized array in a dataframe with feature names as the columns
vector_df = pd.DataFrame(vectors.todense(), columns = cv.get_feature_names())
# assigning vectorized dataframe as an attribute
self.vectorized = vector_df.copy()
return vector_df
def metrics(self, metric_type = 'accuracy', splits = 3):
''' Checks for and encodes label column
Creates a metrics df measuring metric_type, accuracy by default.
Preforms a kfold a number of times determined by splits.
'''
try: # checking if label exists, if not raise KeyError, didnt specify a lang or top_langs
self.df['label']
except KeyError:
return KeyError('Must specify language target in class to create models')
try: # Checking if vectorization has already run, if yes there will be an attribute vectorized df
self.vectorized
except AttributeError: # If no vectorized attribute exists get vectorized df calling self.count_vectorize
print('Have not run count_vectorize method yet, running now...')
self.vectorized = self.count_vectorize()
print('All done! Moving on to modeling, this may take a while...')
target = 'label' # Setting target to label
# checking for lang or top_langs
if self.df[target].nunique() == 2: # If one lang chosen
s = self.df[target].replace([f'{self.lang.lower()}', f'not_{self.lang.lower()}'], [1,0]) # Endode lang as 1 not_lang as 0
else: # if top_langs
lang_list = [l.lower() for l in list(self.top_langs.index)] # getting a list of all lower case langs in top lang
lang_list.append('other') # appending 'other' label
lang_encode = list(range(1, len(self.top_langs)+1)) # list of numbers to encode top_langs as
lang_encode.append(0) # appending 0 for other
s = self.df[target].replace(lang_list, lang_encode) # encoding top_langs
X_train, X_test, y_train, y_test = self.split(self.vectorized, s)
result = [] # init empty results list
for (classifier, name) in self.models: # iterate through zipped models
kfold = KFold(n_splits = splits) # number of kfolds set to splits
scores = cross_validate(classifier, X_train, y_train, cv = kfold, scoring = metric_type, return_estimator=True) # cross validate on each kfold
result.append(scores) # append to results
msg = "{0}: Validate accuracy: {1}".format(name, scores['test_score'].mean())
print(msg)
estimators = [res['estimator'] for res in result] # list comp for estimators/classifiers
results = [res['test_score'] for res in result] # results of validation scores
avg_res = [round(res['test_score'].mean(), 4) * 100 for res in result] # list comp to get mean of cross val tests for each model
metrics_df = pd.DataFrame(data = zip(self.names, avg_res), columns = ['model', f'average_{metric_type}%']) # wrap zipped model names and results in dataframe
model_scores = [(estimators[n], results[n]) for n in range(len(estimators))] # Creating list of tuples for model objects and their scores
# Creating attribute for testing
self.model_scores = model_scores
return metrics_df.sort_values(by = [f'average_{metric_type}%'], ascending = False) # return sorted metric df
def test_on_best(self):
''' Gets best preforming model from a list of estimators garnered from cross validation
and tests model accuracy on Test dataset provided as an arg. Returns model.
'''
# Making list of models from models_scores
models = []
for m in self.model_scores:
for mdl in m[0]:
models.append(mdl)
# Making list of scores from cross_val
scores = []
for m in self.model_scores:
for score in m[1]:
scores.append(score)
# Creating list of tuples for models and scores
estimator_scores = [(models[n], scores[n]) for n in range(len(scores))]
# Creating helper list to get max score
maxs = [tup[1] for tup in estimator_scores]
# Getting best model and score on test
for tup in estimator_scores:
if tup[1] == max(maxs):
mdl = (tup[0])
print(f'Best model: {tup[0]}\nValidate score: {round(tup[1], 4) *100}%\nTest Score: {round(mdl.score(self.X_test, self.y_test), 3) *100}%')
return mdl |
from networkx import read_graphml
from rate_card import generate_rate_card
import argparse
def main(rate_card_path, graph_path) -> float:
"""
Calculates total cost of graph based on a graphml file and a rate card csv
:param rate_card_path: path to rate card csv
:type rate_card_path: str, Path
:param graph_path: path to graphml file
:type graph_path: str, Path
:return: total cost of all infrastructure in graph
:rtype: float
"""
# Initialise total cost
total_cost = 0
# read graphml file into networkx graph
with open(graph_path) as file:
graph = read_graphml(file)
# generate rate_card and minimum distances between nodes
rate_card, min_weights = generate_rate_card(rate_card_path, graph)
# iterate through all nodes
for node_name, node_type in graph.nodes(data="type"):
# find cost structure from rate card
cost = rate_card[node_type.lower()]
# if cost is a flat rate, add it to total cost
if cost.isnumeric():
total_cost += float(cost)
# if cost is based on minimum distance between node and another node of certain type (e.g. cabinet)
# lookup minimum distance from min_weights and calculate cost
elif 'x' in cost:
multiplier, source = cost.split('x')
total_cost += float(multiplier) * min_weights[source.lower()][node_name]
else:
raise NotImplementedError("")
# iterate through edges
for edge in graph.edges(data=True):
# get material and length from graph data
material = edge[2]['material']
length = float(edge[2]['length'])
# add cost to material based on multiplier * length
total_cost += float(rate_card[material]) * length
return total_cost
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This program takes a given graph and rate card and calculates the "
"total cost of the network")
parser.add_argument("-r", "--rate_card", help="path to rate card csv", required=True)
parser.add_argument("-g", "--graph", help="path to graph .graphml file", required=True)
args = parser.parse_args()
rate_card_path_arg = args.rate_card
graph_path_arg = args.graph
if not graph_path_arg:
raise ValueError("Must provide path to graph .graphml file")
if not rate_card_path_arg:
raise ValueError("Must provide path to rate card .csv file")
# check that rate_card is a csv
if not rate_card_path_arg.endswith('.csv'):
raise TypeError("Rate Card must be a csv file")
# check that graph is a .graphml file
if not graph_path_arg.endswith('.graphml'):
raise TypeError("Graph must be a .graphml file")
print(main(rate_card_path_arg, graph_path_arg))
|
#!/usr/bin/env python
# coding: utf-8
import tornado.ioloop
import tornado.web
import requests
import json
__author__ = 'linyang95#aol.com'
__version__ = '0.1.0'
def net_post(uri, payload):
return requests.post(uri, data=payload)
class MainHandler(tornado.web.RequestHandler):
def prepare(self):
self.add_header('Access-Control-Allow-Origin', '*')
self.add_header('Access-Control-Allow-Methods', 'OPTIONS,GET,POST')
self.add_header('Access-Control-Allow-Headers', 'Content-Type')
def post(self):
self.get()
def get(self):
payload = dict((k, self.get_argument(k, ""))
for k in self.request.arguments)
r = net_post(self.get_argument("src", ""), payload)
self.write(r.text)
application = tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
application.listen(8200)
tornado.ioloop.IOLoop.current().start()
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as tud
from torch.nn.parameter import Parameter
from collections import Counter
import numpy as np
import random
import math
import pandas as pd
import scipy
import sklearn
from sklearn.metrics.pairwise import cosine_similarity
USE_CUDA = torch.cuda.is_available()
# 为了保证实验结果可以复现,我们经常会把各种random seed固定在某一个值
random.seed(53113)
np.random.seed(53113)
torch.manual_seed(53113)
if USE_CUDA:
torch.cuda.manual_seed(53113)
# 设定一些超参数
K = 100 # number of negative samples
C = 3 # nearby words threshold
NUM_EPOCHS = 2 # The number of epochs of training
MAX_VOCAB_SIZE = 30000 # the vocabulary size
BATCH_SIZE = 128 # the batch size
LEARNING_RATE = 0.2 # the initial learning rate
EMBEDDING_SIZE = 100
LOG_FILE = "word-embedding.log"
# tokenize函数,把一篇文本转化成一个个单词
def word_tokenize(text):
return text.split() |
# coding: utf-8
import urllib.parse
import functools
import lglass.rpsl
class Database(object):
"""Database is an abstract class which defines some constants and setter/getter
for subscripts. You have to extend from it by creating a new class and
overriding __init__, get, list, find, save and delete to conform to the
database protocol."""
def __init__(self):
raise NotImplementedError("Instances of Database are not permitted")
def get(self, type, primary_key):
"""Get specific object addressed by type and primary_key from database. Returns
lglass.rpsl.Object. This method shall raise a KeyError if the object was not
found."""
raise NotImplementedError("get() is not implemented")
def list(self, filter=None, limit=None):
"""Return list of matching RPSL object specs, filter can be a callable taking
two arguments (``type``, ``primary_key``), and limit can be a int. RPSL object specs
are tuples consisting of two str instances."""
raise NotImplementedError("list() is not implemented")
def find(self, key, types=None, limit=None):
"""Finds an object by searching the whole database for key. It's possible
to supply a list of acceptable object types and to provide a limit of objects.
This method returns a list of :py:class:`lglass.rpsl.Object`."""
raise NotImplementedError("find() is not implemented")
def save(self, obj):
"""Save object in database."""
raise NotImplementedError("save() is not implemented")
def delete(self, type, primary_key):
"""Delete object in database."""
raise NotImplementedError("delete() is not implemented")
def flush(self):
"""Flush database cache."""
raise NotImplementedError("flush() is not implemented")
object_types = {
"as-block",
"as-set",
"aut-num",
"dns",
"filter-set",
"inet6num",
"inetnum",
"inet-rtr",
"irt",
"key-cert",
"mntner",
"organisation",
"peering-set",
"person",
"poem",
"poetic-form",
"role",
"route",
"route6",
"route-set",
"rtr-set",
"schema"
}
def save_all(self, objs):
for obj in objs:
self.save(obj)
def get_all(self):
return (self.get(*spec) for spec in self.list())
def schema(self, type):
""" Return schema for type. Raises a KeyError if schema was not found. """
if type == "schema":
return lglass.rpsl.SchemaObject.SCHEMA_SCHEMA
name = type.upper() + "-SCHEMA"
specs = [("schema", name), ("schemas", name), ("schema", type),
("schemas", type)]
obj = None
for spec in specs:
try:
obj = self.get(*spec)
except KeyError:
continue
break
if obj is None:
raise KeyError("schema({})".format(type))
return lglass.rpsl.SchemaObject(obj)
def update(self, other):
for obj in other:
if obj.spec not in self:
self.save(obj)
else:
old_obj = self.get(*obj.spec)
if obj != old_obj:
self.save(obj)
def __len__(self):
return len(self.list())
def __iter__(self):
for type, primary_key in self.list():
yield self.get(type, primary_key)
def __contains__(self, key):
if not isinstance(key, tuple):
raise TypeError("Expected key to be tuple of length 2, got {}".format(key))
try:
self.get(*key)
except KeyError:
return False
return True
def __getitem__(self, key):
if not isinstance(key, tuple):
raise TypeError("Expected key to be tuple of length 2, got {}".format(key))
return self.get(*key)
def __delitem__(self, key):
if not isinstance(key, tuple):
raise TypeError("Expected key to be tuple of length 2, got {}".format(key))
self.delete(*key)
url_schemes = {}
def register(scheme_or_cls):
"""This decorator adds the supplied class to the url_schemes dict with
the schema specified in ``name``"""
def decorator(cls):
if hasattr(cls, "from_url") and callable(cls.from_url):
url_schemes[(cls.__module__ + "." + cls.__name__).lower()] = cls
url_schemes[(cls.__name__).lower()] = cls
if name is not None:
url_schemes[name.lower()] = cls
return cls
name = None
if isinstance(scheme_or_cls, type):
return decorator(scheme_or_cls)
else:
name = scheme_or_cls
return functools.wraps(scheme_or_cls)(decorator)
def from_url(url):
"""Create database from URL. This function accepts a str or an URL tuple.
The URL schema may have multiple formats:
1. ``{db_name}``
2. ``whois+{db_name}``
3. ``whois+{module}+{db_name}``
"""
import importlib
if isinstance(url, str):
url = urllib.parse.urlparse(url)
scheme = url.scheme.split("+", 2)
if len(scheme) == 1:
scheme = scheme[0]
if "." in scheme:
importlib.import_module(scheme.rsplit(".", 1)[0])
elif len(scheme) == 2:
assert scheme[0] == "whois"
scheme = scheme[1]
elif len(scheme) == 3:
assert scheme[0] == "whois"
importlib.import_module(scheme[1])
scheme = scheme[2]
return url_schemes[scheme].from_url(url)
def build_chain(urls):
db = None
for url in urls:
new_db = from_url(url)
if db is not None:
new_db.database = db
db = new_db
return db
@register("dict")
class DictDatabase(Database):
"""This database backend operates completely in memory by using a Python
dictionary to organize the information. It uses only builtin Python data types
like list, tuple, and dict."""
def __init__(self):
self.backend = dict()
def save(self, object):
self.backend[object.real_spec] = object
def delete(self, type, primary_key):
del self.backend[type, primary_key]
def get(self, type, primary_key):
return self.backend[type, primary_key]
def list(self):
return list(self.backend.keys())
def find(self, primary_key, types=None):
objects = []
for type, pkey in self.backend.keys():
if pkey != primary_key:
continue
if types is not None and type not in types:
continue
objects.append((type, pkey))
return [self.get(*spec) for spec in objects]
@classmethod
def from_url(cls, url):
return cls()
|
#!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
def f(x):
# Function
return np.cos(x) - x
def f_(x):
# Derivative of the function
return - np.sin(x) - 1
def get_next_x(x):
# Approximate a new x value using Newton's Method (Newton Raphson)
return x - f_(x)
if __name__ == '__main__':
X = np.linspace(-10, 10, 500, endpoint=True)
F = f(X)
# x0
x = -6.
# x values of iterations
xs = []
# Iteration count is specified
# for _ in xrange(1,10):
# xs.append(x)
# x = get_next_x(x)
# Threshold is specified
while np.abs(f_(x)) > 0.01:
xs.append(x)
x = get_next_x(x)
xs.append(x)
# y values of iterations
ys = f(xs)
print(xs, ys)
plt.plot(xs, ys, 'ro-')
# First point represented in green color
plt.plot(xs[0], ys[0], 'go')
# Last point represented in blue color
plt.plot(xs[-1], ys[-1], 'bo')
plt.plot(X, F, 'k-')
plt.grid(True)
plt.show()
|
import requests
import json
import time
import argparse
import datetime
import numpy as np
import pandas as pd
import os
from bs4 import BeautifulSoup
def setup():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--cookies", help="add cookies", required=True)
args = parser.parse_args()
return args
def get_stories(cookies):
r = requests.get("https://medium.com/sourcedtech/stats/stories", cookies=cookies)
soup = BeautifulSoup(r.text, "html.parser")
idlist = []
titlelist = []
myclass = soup.findAll("tr", {"class": "sortableTable-row js-statsTableRow"})
for a in myclass:
children = a.find("h2", {"class":"sortableTable-title"})
titlelist.append(children.text)
idlist.append(a.attrs["data-action-value"])
return idlist, titlelist
def export_medium(directory, story, end_date, cookies):
start_year = datetime.date(2018, 1, 1)
today = datetime.date.today()
delta = (today - start_year).days
figures_array = np.zeros((1+delta, 7), dtype=np.object_)
figures_array[0, 0] = "Story id"
figures_array[0, 1] = "Date"
figures_array[0, 2] = "Views"
figures_array[0, 3] = "Reads"
figures_array[0, 4] = "Reading rate"
figures_array[0, 5] = "Upvotes"
figures_array[0, 6] = "Claps"
url = "https://medium.com/stats/"+ story +"/1514764800/"+end_date
cookies = dict(cookies_are=cookies)
r = requests.get(url, cookies=cookies)
response = str(r.text)
response = response.replace("])}while(1);</x>", "")
response_json = json.loads(response)
datecolumn = start_year
for n in range(0,delta):
figures_array[n + 1, 0] = story
figures_array[n + 1, 1] = str(datecolumn)
for i in range(0, len(response_json['payload']['value'])):
date = response_json['payload']['value'][i]['collectedAt']
date = time.strftime('%Y-%m-%d', time.localtime(date / 1000))
views = response_json['payload']['value'][i]['views']
reads = response_json['payload']['value'][i]['reads']
upvotes = response_json['payload']['value'][i]['upvotes']
claps = response_json['payload']['value'][i]['claps']
if date == str(datecolumn):
figures_array[n + 1, 2] += views
figures_array[n + 1, 3] += reads
figures_array[n + 1, 5] += upvotes
figures_array[n + 1, 6] += claps
if figures_array[n + 1, 2]!=0:
figures_array[n + 1, 4] = round(figures_array[n + 1, 3]/figures_array[n + 1, 2],2)
datecolumn += datetime.timedelta(days=1)
today = str(today)
today = today.replace("-", "")
df = pd.DataFrame(figures_array[1:], columns=figures_array[0])
df.to_csv(directory + "medium_"+story+"_"+today+".csv", sep=',', encoding='utf-8', index=False)
return(df)
def main():
args = setup()
utc_datetime = datetime.datetime.utcnow()
rightnow = utc_datetime.strftime("%Y-%m-%d %H:%M:%S.%f")
end_date = str(int(time.mktime(time.strptime(rightnow, '%Y-%m-%d %H:%M:%S.%f'))) * 1000)
directory = "output-medium/"
if not os.path.exists(directory):
os.makedirs(directory)
cookies = {"cookies": args.cookies}
idlist, titlelist = get_stories(cookies)
print("\nChoose to which story you want the metrics:")
for i in range(len(idlist)):
print("[", i, "] ", titlelist[i], " (id", idlist[i], ")")
option = input("Your option: ")
story = idlist[int(option)]
cookies = args.cookies
print(export_medium(directory, story, end_date, cookies))
if __name__ == '__main__':
main() |
#!/usr/bin/env python
# coding: utf-8
# In[3]:
import pandas as pd
import numpy as np
from pandas import DataFrame
from bitstring import BitArray
import os
import re
# In[4]:
Data=pd.read_csv('C:\\Users\\sarah\\Desktop\\Udemy\\Pythoncourse\\FakeData.csv')
# In[5]:
Data
# In[6]:
#the goal is to build The values into a word. The LSB (lease significant bit) is to the right
# in this case it would be 0b0111110 (hex F = binary 1111)
# E is really 16, not 15/ also wanted to lead with zero to make sure leading zeros aren't dropped
# In[7]:
x="4hF"
regex=r"(\d+)h"
re.sub(regex,'0x',x)
# In[8]:
# Trying to figure out how to
x=Data.Value[1]
regex=r"(\d+)'h"
BitArray(re.sub(regex,'0x',x))
# In[9]:
def Bitnum(x): # this changes the values into a string bitstring function can recognize
# BitArray recognizes 0b as binary and 0x as hex
regex=r"(\d+)'b" # search for any decimals, followed by a tick and then b
x=re.sub(regex,'0b',x) # replace what is found int the regex search with 0b
regex=r"(\d+)'h"
x=re.sub(regex,'0x',x)
return x
# In[29]:
# In[ ]:
# In[11]:
Data['BitVals']=Data.Value.apply(Bitnum)
# In[12]:
Data
# In[13]:
BitWord=BitArray(Data.BitVals[3]+Data.BitVals[2])
# In[14]:
BitWord
# In[20]:
def BitBin(x): #This turns anything in hex to binary
x=BitArray(x).bin
return x
# In[22]:
Data['Bits']=Data.BitVals.apply(BitBin)
# In[23]:
Data
# In[28]:
Bitword=Data.Bits[3]+Data.Bits[2]+Data.Bits[1]+Data.Bits[0]
Bitword #This is the answer
# In[43]:
#-----------------Now for checks
#number of dogs 10binary = 2decimal
Bitword[6:8]
# In[45]:
len(Bitword) #checking to make sure leading zeros weren't dropped
# In[46]:
Data.Bits
# In[ ]:
|
# Clock interrupt
enable_interrupt(0)
# Keyboard interrupt
enable_interrupt(1)
|
aHTMLLinks = ["https://tim.blog/derek-sivers-reloaded-on-the-tim-ferriss-show-transcript/",
"https://tim.blog/daymond-john-on-the-tim-ferriss-show-transcript/",
"https://tim.blog/edward-norton-on-the-tim-ferriss-show-transcript/",
"https://tim.blog/naval-ravikant-on-the-tim-ferriss-show-transcript/",
"https://tim.blog/2018/01/01/the-tim-ferriss-show-transcripts-caroline-paul/",
"https://tim.blog/2018/01/01/the-tim-ferriss-show-transcripts-on-philosophy-and-riches/",
"https://tim.blog/2018/01/01/the-tim-ferriss-show-transcripts-bj-miller/",
"https://tim.blog/2018/01/01/the-tim-ferriss-show-transcripts-paulo-coelho/",
"https://tim.blog/2018/01/01/the-tim-ferriss-show-transcripts-on-zero-to-hero-transformations/",
"https://tim.blog/2018/01/01/the-tim-ferriss-show-transcripts-joshua-skenes/",
"https://tim.blog/2018/01/01/the-tim-ferriss-show-transcripts-mike-rowe/",
"https://tim.blog/2018/01/01/the-tim-ferriss-show-transcripts-the-secrets-of-gymnastic-strength-training/",
"https://tim.blog/2018/01/01/the-tim-ferriss-show-transcripts-how-to-optimize-creative-output-jarvis-vs-ferriss/",
"https://tim.blog/2018/01/01/the-tim-ferriss-show-transcripts-assessing-risk-and-living-without-a-rope-lessons-from-alex-honnold/",
"https://tim.blog/2018/01/01/the-tim-ferriss-show-transcripts-lessons-from-war-tribal-societies-and-a-non-fiction-life-sebastian-junger/",
"https://tim.blog/2018/01/01/the-tim-ferriss-show-transcripts-how-to-be-tim-ferriss-freakonomics-stephen-j-dubner/",
"https://tim.blog/2018/01/01/the-tim-ferriss-show-transcripts-marc-andreessen/",
"https://tim.blog/2018/06/01/the-tim-ferriss-show-transcripts-kevin-kelly-ai-virtual-reality-and-the-inevitable/",
"https://tim.blog/2018/06/01/the-tim-ferriss-show-transcripts-ramit-sethi-how-creatives-should-negotiate/",
"https://tim.blog/2018/06/05/the-tim-ferriss-show-transcripts-jamie-foxx-part-2/",
"https://tim.blog/2018/06/01/the-tim-ferriss-show-transcripts-malcolm-gladwell/",
"https://tim.blog/2018/06/01/the-tim-ferriss-show-transcripts-ryan-holiday-169-useful-lessons-from-workaholics-anonymous-corporate-implosions-and-more/",
"https://tim.blog/2018/06/01/the-tim-ferriss-show-transcripts-shay-carl-from-manual-laborer-to-2-3-billion-youtube-views/",
"https://tim.blog/2018/06/05/the-tim-ferriss-show-transcripts-the-random-show-new-favorite-books-memory-training-and-bets-on-vr/",
"https://tim.blog/2018/06/05/the-tim-ferriss-show-transcripts-dom-dagostino-the-power-of-the-ketogenic-diet/",
"https://tim.blog/2018/06/04/the-tim-ferriss-show-transcripts-chris-young/",
"https://tim.blog/2018/06/04/the-tim-ferriss-show-transcripts-nicholas-mccarthy/",
"https://tim.blog/2018/06/05/the-tim-ferriss-show-transcripts-how-to-cage-the-monkey-mind/",
"https://tim.blog/2018/06/04/the-tim-ferriss-show-transcripts-mike-birbiglia/",
"https://tim.blog/2018/06/05/the-tim-ferriss-show-transcripts-seth-godin-on-how-to-think-small-to-go-big/",
"https://tim.blog/2018/06/06/the-tim-ferriss-show-transcripts-tony-robbins-on-achievement-versus-fulfillment/",
"https://tim.blog/2018/06/06/the-tim-ferriss-show-transcripts-the-secrets-of-gymnastic-strength-training-part-two/",
"https://tim.blog/2018/06/06/the-tim-ferriss-show-transcripts-how-to-waste-money-to-improve-the-quality-of-your-life/",
"https://tim.blog/2018/06/06/the-tim-ferriss-show-transcripts-jason-nemer/",
"https://tim.blog/2018/06/06/the-tim-ferriss-show-transcripts-cal-fussman-the-master-storyteller-returns/",
"https://tim.blog/2018/06/06/the-tim-ferriss-show-transcripts-shep-gordon/",
"https://tim.blog/2018/06/06/the-tim-ferriss-show-transcripts-practicing-what-you-preach/",
"https://tim.blog/2018/06/05/the-tim-ferriss-show-transcripts-tony-robbins-on-how-to-resolve-internal-conflict/",
"https://tim.blog/2018/06/04/the-tim-ferriss-show-transcripts-jocko-willink-on-discipline-leadership-and-overcoming-doubt/",
"https://tim.blog/2018/06/06/the-tim-ferriss-show-transcripts-dom-dagostino-on-disease-prevention-cancer-and-living-longer/",
"https://tim.blog/2018/06/06/the-tim-ferriss-show-transcripts-shay-carl-on-wealth-parenting-and-the-future-of-video/",
"https://tim.blog/2018/06/05/the-tim-ferriss-show-transcripts-matt-mullenweg-on-characteristics-and-practices-of-successful-entrepreneurs/",
"https://tim.blog/2018/06/05/the-tim-ferriss-show-transcripts-the-art-and-science-of-learning-anything-faster/",
"https://tim.blog/2018/06/20/the-tim-ferriss-show-transcripts-the-return-of-drunk-dialing/",
"https://tim.blog/2018/06/05/the-tim-ferriss-show-transcripts-peter-attia-david-sabatini-and-navdeep-chandel/",
"https://tim.blog/2018/06/20/the-tim-ferriss-show-transcripts-erik-vance/",
"https://tim.blog/2018/06/05/the-tim-ferriss-show-transcripts-david-dhh-heinemeier-hansson/",
"https://tim.blog/2018/06/06/the-tim-ferriss-show-transcripts-dave-camarillo/",
"https://tim.blog/2018/06/21/the-tim-ferriss-show-transcripts-drunk-dialing-ladies-night-edition/",
"https://tim.blog/2018/06/19/the-tim-ferriss-show-transcripts-charles-poliquin-his-favorite-mass-building-program-his-nighttime-routine-for-better-sleep-and-much-more/",
"https://tim.blog/2018/06/06/the-tim-ferriss-show-transcripts-stephen-dubner-the-art-of-storytelling-and-facing-malcolm-gladwell-in-a-fist-fight/",
"https://tim.blog/2018/06/19/the-tim-ferriss-show-transcripts-susan-garrett/",
"https://tim.blog/2018/06/20/the-tim-ferriss-show-transcripts-meditation-mindset-and-mastery/",
"https://tim.blog/2018/06/20/the-tim-ferriss-show-transcripts-derek-sivers-distilled/",
"https://tim.blog/2018/06/19/the-tim-ferriss-show-transcripts-david-heinemeier-hansson-on-digital-security-company-culture-and-the-value-of-schooling/",
"https://tim.blog/2018/06/19/the-tim-ferriss-show-transcripts-josh-waitzkin-distilled/",
"https://tim.blog/2018/06/19/the-tim-ferriss-show-transcripts-mark-bittman/",
"https://tim.blog/2018/06/21/the-tim-ferriss-show-transcripts-brene-brown-distilled/",
"https://tim.blog/2018/06/19/the-tim-ferriss-show-transcripts-ezra-klein/",
"https://tim.blog/2018/06/19/the-tim-ferriss-show-transcripts-the-random-show-threesome-tim-ferriss-kevin-rose-and-matt-mullenweg/",
"https://tim.blog/2018/06/20/the-tim-ferriss-show-transcripts-becoming-the-best-version-of-you/",
"https://tim.blog/2018/06/20/the-tim-ferriss-show-transcripts-a-j-jacobs/",
"https://tim.blog/2018/06/20/the-tim-ferriss-show-transcripts-what-i-learned-in-2016/",
"https://tim.blog/2018/06/21/the-tim-ferriss-show-transcripts-fasting-vs-slow-carb-diet-top-150-purchases-balancing-productivity-and-relaxation-and-more/",
"https://tim.blog/2018/06/21/the-tim-ferriss-show-transcripts-debbie-millman/",
"https://tim.blog/2018/06/21/the-tim-ferriss-show-transcripts-whitney-cummings/",
"https://tim.blog/2018/06/20/the-tim-ferriss-show-transcripts-arnold-schwarzenegger-part-2/",
"https://tim.blog/2018/06/21/the-tim-ferriss-show-transcripts-dr-martin-gibala/",
"https://tim.blog/2018/06/21/the-tim-ferriss-show-transcripts-kara-swisher/",
"https://tim.blog/2018/06/22/the-tim-ferriss-show-transcripts-adam-robinson-interview/",
"https://tim.blog/2018/06/21/the-tim-ferriss-show-transcripts-soman-chainani/",
"https://tim.blog/2018/06/01/the-tim-ferriss-show-transcripts-mr-money-mustache/",
"https://tim.blog/2018/06/21/the-tim-ferriss-show-transcripts-jerrod-carmichael/",
"https://tim.blog/2018/06/21/the-tim-ferriss-show-transcripts-krista-tippett/",
"https://tim.blog/2018/06/22/the-tim-ferriss-show-transcripts-the-random-show-with-kevin-rose-224-the-random-show-drinking-urine-exploring-japan-and-figuring-out-life/",
"https://tim.blog/2018/06/05/the-tim-ferriss-show-transcripts-john-crowley/",
"https://tim.blog/2018/06/05/the-tim-ferriss-show-transcripts-dr-phil-zimbardo/",
"https://tim.blog/2018/06/05/the-tim-ferriss-show-transcripts-caroline-paul-on-conquering-fear-and-reducing-anxiety/",
"https://tim.blog/2018/06/05/the-tim-ferriss-show-transcripts-jerzy-gregorek-and-naval-ravikant/",
"https://tim.blog/2018/06/05/the-tim-ferriss-show-transcripts-ricardo-semler/",
"https://tim.blog/2018/06/22/the-tim-ferriss-show-transcripts-debbie-millman-on-the-secrets-tactics-and-creative-processes-of-high-performers-and-achievers/",
"https://tim.blog/2018/06/22/the-tim-ferriss-show-transcripts-cheryl-strayed/",
"https://tim.blog/2018/06/20/the-tim-ferriss-show-transcripts-how-to-control-stress-upgrade-your-nutrition-and-build-the-mindset-of-a-gladiator/",
"https://tim.blog/2018/06/04/the-tim-ferriss-show-transcripts-cory-booker/",
"https://tim.blog/2018/06/04/the-tim-ferriss-show-transcripts-marie-kondo/",
"https://tim.blog/2018/06/05/the-tim-ferriss-show-transcripts-dorian-yates/",
"https://tim.blog/2018/06/05/the-tim-ferriss-show-transcripts-murray-carter/",
"https://tim.blog/2017/05/25/transcript-dr-rhonda-patrick-on-exploring-smart-drugs-fasting-and-fat-loss/",
"https://tim.blog/2018/06/01/the-tim-ferriss-show-transcripts-ryan-flaherty/",
"https://tim.blog/2018/06/01/the-tim-ferriss-show-transcripts-art-de-vany/",
"https://tim.blog/2018/06/20/the-tim-ferriss-show-transcripts-accelerated-learning-my-story/",
"https://tim.blog/2018/06/01/the-tim-ferriss-show-transcripts-esther-perel/",
"https://tim.blog/2018/06/05/the-tim-ferriss-show-transcripts-phil-keoghan/",
"https://tim.blog/2018/06/05/the-tim-ferriss-show-transcripts-vince-vaughn/",
"https://tim.blog/2018/06/01/the-tim-ferriss-show-transcripts-nick-szabo/",
"https://tim.blog/2018/06/05/the-tim-ferriss-show-transcripts-david-blaine/",
"https://tim.blog/2018/06/01/the-tim-ferriss-show-transcripts-jason-khalipa/",
"https://tim.blog/2018/06/01/the-tim-ferriss-show-transcripts-cool-tools-for-travel-with-kevin-kelly/",
"https://tim.blog/2018/06/01/the-tim-ferriss-show-transcripts-reid-hoffman-ten-commandments/",
"https://tim.blog/2018/06/01/the-tim-ferriss-show-transcripts-blake-mycoskie/",
"https://tim.blog/2018/05/30/the-tim-ferriss-show-transcripts-myers-briggs-diet-mistakes-and-immortality/",
"https://tim.blog/2018/05/30/the-tim-ferriss-show-transcripts-kyle-maynard/",
"https://tim.blog/2018/05/30/the-tim-ferriss-show-transcripts-mark-bell/",
"https://tim.blog/2018/06/04/the-tim-ferriss-show-transcripts-morning-routines-and-strategies/",
"https://tim.blog/2018/06/04/the-tim-ferriss-show-transcripts-when-to-quit-lessons-from-world-class-entrepreneurs-investors-authors-and-more/",
"https://tim.blog/2018/06/20/the-tim-ferriss-show-transcripts-how-to-turn-failure-into-success/",
"https://tim.blog/2018/05/30/the-tim-ferriss-show-transcripts-michael-gervais/",
"https://tim.blog/2018/06/22/the-tim-ferriss-show-transcripts-physical-training-and-dating-strategies/",
"https://tim.blog/2018/05/30/the-tim-ferriss-show-transcripts-bill-rasmussen/",
"https://tim.blog/2018/05/30/the-tim-ferriss-show-transcripts-cal-fussman-and-larry-king/",
"https://tim.blog/2018/05/30/the-tim-ferriss-show-transcripts-phil-hellmuth/",
"https://tim.blog/2018/05/30/tim-ferriss-show-transcript-maria-sharapova/",
"https://tim.blog/2018/02/01/the-tim-ferriss-show-transcripts-stewart-copeland/",
"https://tim.blog/2018/02/01/the-tim-ferriss-show-transcripts-darren-aronofsky/",
"https://tim.blog/2018/02/01/the-tim-ferriss-show-transcripts-ray-dalio/",
"https://tim.blog/2018/02/01/the-tim-ferriss-show-transcripts-bill-burr/",
"https://tim.blog/2018/02/02/the-tim-ferriss-show-transcripts-favorite-books-supplements-simple-technologies-and-more/",
"https://tim.blog/2018/02/02/the-tim-ferriss-show-transcripts-tools-and-tips-for-better-sleep/",
"https://tim.blog/2018/02/02/the-tim-ferriss-show-transcripts-eric-ripert/",
"https://tim.blog/2018/02/02/the-tim-ferriss-show-transcripts-the-3-critical-rules-of-branding/",
"https://tim.blog/2018/02/02/the-tim-ferriss-show-transcripts-investing-wisdom-from-marc-andreessen-peter-thiel-reid-hoffman-chris-sacca-and-others/",
"https://tim.blog/2018/02/02/the-tim-ferriss-show-transcripts-intimacy-emotional-baggage-relationship-longevity-and-more-esther-perel/",
"https://tim.blog/2018/02/02/the-tim-ferriss-show-transcripts-sir-richard-branson/",
"https://tim.blog/2018/02/02/the-tim-ferriss-show-transcripts-walter-isaacson/",
"https://tim.blog/2018/02/02/the-tim-ferriss-show-transcripts-arianna-huffington/",
"https://tim.blog/2018/02/02/the-tim-ferriss-show-transcripts-discipline-equals-freedom-jocko-willink/",
"https://tim.blog/2018/02/02/the-tim-ferriss-show-transcripts-terry-laughlin/",
"https://tim.blog/2018/02/03/the-tim-ferriss-show-podcasts-sharon-salzberg/",
"https://tim.blog/2018/02/03/the-tim-ferriss-show-transcripts-tim-oreilly/",
"https://tim.blog/2018/02/03/the-tim-ferriss-show-transcripts-brian-grazer/",
"https://tim.blog/2018/02/03/the-tim-ferriss-show-transcripts-alice-little/",
"https://tim.blog/2018/02/03/the-tim-ferriss-show-transcripts-stewart-brand/",
"https://tim.blog/2018/02/03/the-tim-ferriss-show-transcripts-how-to-say-no/",
"https://tim.blog/2018/02/03/the-tim-ferriss-show-transcripts-tim-urban/",
"https://tim.blog/2018/02/03/the-tim-ferriss-show-transcripts-the-answers-to-my-favorite-questions/",
"https://tim.blog/2018/02/04/the-tim-ferriss-show-transcripts-m-sanjayan/",
"https://tim.blog/2018/02/04/the-tim-ferriss-show-transcripts-mike-maples/",
"https://tim.blog/2018/02/04/the-tim-ferriss-show-transcripts-terry-crews/",
"https://tim.blog/2018/02/04/the-tim-ferriss-show-transcripts-bozoma-saint-john/",
"https://tim.blog/2018/02/04/the-tim-ferriss-show-transcripts-how-to-handle-information-overwhelm-and-social-media/",
"https://tim.blog/2018/02/04/the-tim-ferriss-show-transcripts-gretchen-rubin/",
"https://tim.blog/2018/02/06/the-tim-ferriss-show-transcripts-how-the-best-overcome-fear/",
"https://tim.blog/2018/02/06/the-tim-ferriss-show-transcripts-lessons-and-warnings-from-successful-risk-takers/",
"https://tim.blog/2018/02/06/the-tim-ferriss-show-transcripts-catherine-hoke/",
"https://tim.blog/2018/02/06/the-tim-ferriss-show-transcripts-best-investments-bad-advice-to-avoid-and-other-life-lessons/",
"https://tim.blog/2018/02/06/the-tim-ferriss-show-transcripts-the-4-hour-workweek-revisited/",
"https://tim.blog/2018/02/06/the-tim-ferriss-show-transcripts-how-to-build-popular-podcasts-and-blogs/",
"https://tim.blog/2018/02/07/the-tim-ferriss-show-transcripts-bob-metcalfe/",
"https://tim.blog/2018/06/04/the-tim-ferriss-show-transcripts-dr-gabor-mate/",
"https://tim.blog/2018/06/04/the-tim-ferriss-show-transcripts-how-to-secure-financial-freedom-maximize-productivity-and-protect-your-health/",
"https://tim.blog/2018/06/04/the-tim-ferriss-show-transcripts-jack-kornfield/",
"https://tim.blog/2018/03/10/tim-ferriss-show-transcript-joe-gebbia/",
"https://tim.blog/2018/03/22/tim-ferriss-show-transcript-aubrey-marcus/",
"https://tim.blog/2018/03/22/the-tim-ferriss-show-transcripts-frank-blake/",
"https://tim.blog/2018/06/26/the-tim-ferriss-show-transcripts-debbie-millman-how-to-prioritize-your-life-and-make-time-for-what-matters/",
"https://tim.blog/2018/06/26/the-tim-ferriss-show-transcripts-daniel-pink/",
"https://tim.blog/2018/06/26/the-tim-ferriss-show-transcripts-discipline-sex-and-psychedelics-the-return-of-drunk-dialing/",
"https://tim.blog/2018/06/26/the-tim-ferriss-show-transcripts-karlie-kloss/",
"https://tim.blog/2018/06/26/the-tim-ferriss-show-transcripts-katie-couric/",
"https://tim.blog/2018/06/26/the-tim-ferriss-show-transcripts-astro-teller-how-to-think-10x-bigger/",
"https://tim.blog/2018/06/26/the-tim-ferriss-show-transcripts-tim-kennedy/",
"https://tim.blog/2018/06/26/the-tim-ferriss-show-podcasts-nick-thompson/",
"https://tim.blog/2018/06/26/the-tim-ferriss-show-transcripts-joseph-gordon-levitt/",
"https://tim.blog/2018/06/26/the-tim-ferriss-show-transcripts-michael-pollan/",
"https://tim.blog/2018/06/26/the-tim-ferriss-show-transcripts-cindy-eckert/",
"https://tim.blog/2018/06/26/the-tim-ferriss-show-transcripts-lessons-learned-traveling-the-world/",
"https://tim.blog/2018/06/26/the-tim-ferriss-show-transcripts-whitney-wolfe-herd/",
"https://tim.blog/2018/06/26/the-tim-ferriss-show-transcripts-steve-jurvetson/",
"https://tim.blog/2018/06/26/the-tim-ferriss-show-transcripts-one-person-businesses-that-make-1m-per-year/",
"https://tim.blog/2018/06/26/the-tim-ferriss-show-transcripts-liz-lambert/",
"https://tim.blog/2018/06/27/the-tim-ferriss-show-transcripts-brandon-stanton/",
"https://tim.blog/2018/06/30/the-tim-ferriss-show-transcripts-adam-robinson-outflanking-and-outsmarting-the-competition/",
"https://tim.blog/2018/06/30/the-tim-ferriss-show-transcripts-tim-ferriss-goes-to-maximum-security-prison/",
"https://tim.blog/2018/07/05/the-tim-ferriss-show-transcripts-cal-fussman-corners-tim-ferriss/",
"https://tim.blog/2018/07/07/the-tim-ferriss-show-transcripts-lessons-from-richard-branson-tony-robbins-ray-dalio-and-other-icons-325/",
"https://tim.blog/2018/07/13/the-tim-ferriss-show-transcripts-reid-hoffman-brian-chesky-how-to-scale-to-100m-users/",
"https://tim.blog/2018/07/18/the-tim-ferriss-show-transcripts-aisha-tyler/",
"https://tim.blog/2018/07/25/the-tim-ferriss-show-transcripts-jason-fried/",
"https://tim.blog/2018/08/01/the-tim-ferriss-show-transcripts-the-return-of-drunk-dialing-how-to-ask-better-questions-take-better-risks-and-more/",
"https://tim.blog/2018/08/07/the-tim-ferriss-show-transcripts-ann-miura-ko/",
"https://tim.blog/2018/08/11/the-tim-ferriss-show-transcripts-coach-george-raveling/",
"https://tim.blog/2018/08/20/the-tim-ferriss-show-transcripts-random-show-fasting-biohacking-and-tony-robbins/",
"https://tim.blog/2018/08/29/the-tim-ferriss-show-transcripts-drew-houston/",
"https://tim.blog/2018/09/11/the-tim-ferriss-show-transcripts-doris-kearns-goodwin/",
"https://tim.blog/2018/09/17/the-tim-ferriss-show-transcripts-scott-belsky/"] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.