content stringlengths 5 1.05M |
|---|
import unittest
import os
from modules.DatabaseModule.DBManager import DBManager
from opentera.db.models.TeraAsset import TeraAsset
from opentera.config.ConfigManager import ConfigManager
from tests.opentera.db.models.BaseModelsTest import BaseModelsTest
class TeraAssetTest(BaseModelsTest):
filename = os.path.join(os.path.dirname(__file__), 'TeraAssetTest.db')
SQLITE = {
'filename': filename
}
def test_defaults(self):
for asset in TeraAsset.query.all():
self.assertGreater(len(asset.asset_name), 0)
self.assertIsNotNone(asset.asset_session)
self.assertIsNotNone(asset.asset_service_uuid)
self.assertIsNotNone(asset.asset_uuid)
def test_to_json(self):
for asset in TeraAsset.query.all():
json = asset.to_json()
self.assertGreater(len(json), 0)
def test_from_json(self):
for asset in TeraAsset.query.all():
json = asset.to_json()
new_asset = TeraAsset()
new_asset.from_json(json)
self.assertEqual(new_asset.asset_name, asset.asset_name)
self.assertEqual(new_asset.asset_service_uuid, asset.asset_service_uuid)
self.assertEqual(new_asset.asset_type, asset.asset_type)
self.assertEqual(new_asset.asset_uuid, asset.asset_uuid)
self.assertEqual(new_asset.id_asset, asset.id_asset)
self.assertEqual(new_asset.id_device, asset.id_device)
self.assertEqual(new_asset.id_session, asset.id_session)
|
import sys
import os
from PIL import Image
# grab first and second arguments passed on terminal
actual_folder = sys.argv[1]
new_folder = sys.argv[2]
# check is new folder exists, if not create
if not os.path.exists(new_folder):
os.makedirs(new_folder)
# loop through Pokedex, grap each image, convert and save to new folder
for filename in os.listdir(actual_folder):
img = Image.open(f'{actual_folder}{filename}')
clean_name = os.path.splitext(filename)[0]
# print(clean_name)
img.save(f'{new_folder}{clean_name}.png', 'png')
print('all done!')
|
#Import all the Misc classes
from .Misc import *
#Import all the sprite classes
from .Sprites import *
#Import Screens
from .Screens import *
#Import popups
from .Popups import *
#Import Achievements
from .Achievements import *
#Import Stages
from .Stages import *
#Import instruction screens
from .Instructions import *
#Import AI modes
from .AI_modes import *
#Import the game.py
from .game import *
|
import serial
import struct
from math import sqrt, pow
def chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
def reverse_bit_order(buf, width):
# Just to get the length the same
newbuf = range(len(buf))
for i in range(len(buf)):
origbin = bin(i)[2:].zfill(width)
newbin = origbin[::-1]
newi = int(newbin, 2)
#print i, origbin, newbin, newi
newbuf[newi] = buf[i]
return newbuf
class VHDLFFT(object):
def __init__(self, port, thesize, thelogsize):
self.SIZE = thesize
self.LOGSIZE = thelogsize
self.ser = serial.Serial(port, 500000, timeout=0.05)
def read_raw(self):
self.ser.write("S")
data = self.ser.read(self.SIZE*2)
if len(data) == self.SIZE*2:
outbuf = struct.unpack('BB'*self.SIZE, data)
outbuf = list(chunks(outbuf, 2))
#outbuf = reverse_bit_order(outbuf, self.LOGSIZE)
return outbuf
return []
def read(self):
outbuf = self.read_raw()
newoutbuf = []
for re, im in outbuf:
mag = sqrt(pow(float(re),2)+pow(float(im),2))
newoutbuf.append(mag)
return newoutbuf
|
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Greg Hazel
# based on code by Bram Cohen
from __future__ import division
import math
import random
from BTL.obsoletepythonsupport import set
class Choker(object):
def __init__(self, config, schedule):
self.config = config
self.schedule = schedule
self.connections = []
self.count = 0
self.unchokes_since_last = 0
self.interval = 5
self.shutting_down = False
#self.magic_number = 6 # magic 6 : (30 / self.interval)
self.magic_number = (30 / self.interval)
schedule(self.interval, self._round_robin)
def _round_robin(self):
self.schedule(self.interval, self._round_robin)
self.count += 1
# don't do more work than you have to
if not self.connections:
return
# rotation for round-robin
if self.count % self.magic_number == 0:
for i, c in enumerate(self.connections):
u = c.upload
if u.choked and u.interested:
self.connections = self.connections[i:] + self.connections[:i]
break
self._rechoke()
## new
############################################################################
def _rechoke(self):
# step 1:
# get sorted in order of preference lists of peers
# one for downloading torrents, and one for seeding torrents
down_pref = []
seed_pref = []
for i, c in enumerate(self.connections):
u = c.upload
if c.download.have.numfalse == 0 or not u.interested:
continue
# I cry.
if c.download.multidownload.storage.have.numfalse != 0:
## heuristic for downloading torrents
if not c.download.is_snubbed():
## simple download rate based
down_pref.append((-c.download.get_rate(), i))
## ratio based
#dr = c.download.get_rate()
#ur = max(1, u.get_rate())
#ratio = dr / ur
#down_pref.append((-ratio, i))
else:
## heuristic for seeding torrents
## Uoti special
## if c._decrypt is not None:
## seed_pref.append((self.count, u.get_rate(), i))
## elif (u.unchoke_time > self.count - self.magic_number or
## u.buffer and c.connection.is_flushed()):
## seed_pref.append((u.unchoke_time, u.get_rate(), i))
## else:
## seed_pref.append((1, u.get_rate(), i))
## sliding, first pass (see below)
r = u.get_rate()
if c._decrypt is not None:
seed_pref.append((2, r, i))
else:
seed_pref.append((1, r, i))
down_pref.sort()
seed_pref.sort()
#pprint(down_pref)
#pprint(seed_pref)
down_pref = [ self.connections[i] for junk, i in down_pref ]
seed_pref = [ self.connections[i] for junk, junk, i in seed_pref ]
max_uploads = self._max_uploads()
## sliding, second pass
## # up-side-down sum for an idea of capacity
## uprate_sum = sum(rates[-max_uploads:])
## if max_uploads == 0:
## avg_uprate = 0
## else:
## avg_uprate = uprate_sum / max_uploads
## #print 'avg_uprate', avg_uprate, 'of', max_uploads
## self.extra_slots = max(self.extra_slots - 1, 0)
## if avg_uprate > self.arbitrary_min:
## for r in rates:
## if r < (avg_uprate * 0.80): # magic 80%
## self.extra_slots += 2
## break
## self.extra_slots = min(len(seed_pref), self.extra_slots)
## max_uploads += self.extra_slots
## #print 'plus', self.extra_slots
# step 2:
# split the peer lists by a ratio to fill the available upload slots
d_uploads = max(1, int(round(max_uploads * 0.70)))
s_uploads = max(1, int(round(max_uploads * 0.30)))
#print 'original', 'ds', d_uploads, 'us', s_uploads
extra = max(0, d_uploads - len(down_pref))
if extra > 0:
s_uploads += extra
d_uploads -= extra
extra = max(0, s_uploads - len(seed_pref))
if extra > 0:
s_uploads -= extra
d_uploads = min(d_uploads + extra, len(down_pref))
#print 'ds', d_uploads, 'us', s_uploads
down_pref = down_pref[:d_uploads]
seed_pref = seed_pref[:s_uploads]
preferred = set(down_pref)
preferred.update(seed_pref)
# step 3:
# enforce unchoke states
count = 0
to_choke = []
for i, c in enumerate(self.connections):
u = c.upload
if c in preferred:
u.unchoke(self.count)
count += 1
else:
to_choke.append(c)
# step 4:
# enforce choke states and handle optimistics
count = 0
optimistics = max(self.config['min_uploads'],
max_uploads - len(preferred))
#print 'optimistics', optimistics
for c in to_choke:
u = c.upload
if c.download.have.numfalse == 0:
u.choke()
elif count >= optimistics:
u.choke()
else:
# this one's optimistic
u.unchoke(self.count)
if u.interested:
count += 1
############################################################################
def shutdown(self):
self.shutting_down = True
def connection_made(self, connection):
p = random.randrange(len(self.connections) + 1)
self.connections.insert(p, connection)
def connection_lost(self, connection):
self.connections.remove(connection)
if (not self.shutting_down and
connection.upload.interested and not connection.upload.choked):
self._rechoke()
def interested(self, connection):
if not connection.upload.choked:
self._rechoke()
def not_interested(self, connection):
if not connection.upload.choked:
self._rechoke()
def _max_uploads(self):
uploads = self.config['max_uploads']
rate = self.config['max_upload_rate'] / 1024
if uploads > 0:
pass
elif rate <= 0:
uploads = 7 # unlimited, just guess something here...
elif rate < 9:
uploads = 2
elif rate < 15:
uploads = 3
elif rate < 42:
uploads = 4
else:
uploads = int(math.sqrt(rate * .6))
return uploads
|
"""
This script was made by Nick at 19/07/20.
To implement code for data pipeline. (e.g. custom class subclassing torch.utils.data.Dataset)
"""
|
from copy import copy, deepcopy
import pytest
from zquantum.core.history.example_functions import (
Function2,
Function5,
function_1,
function_3,
function_4,
function_6,
)
from zquantum.core.history.recorder import recorder
@pytest.mark.parametrize("func", [Function2(5), function_3, function_4, Function5(0.5)])
def test_recorder_can_be_copied_shallowly(func):
recorded = recorder(func)
recorded_copy = copy(recorded)
assert recorded_copy.target is recorded.target
assert recorded_copy.predicate is recorded.predicate
@pytest.mark.parametrize("func", [Function2(5), function_3, function_4, Function5(0.5)])
def test_recorder_can_be_copied_deeply(func):
recorded = recorder(func)
recorded_copy = deepcopy(recorded)
assert recorded_copy.target == recorded.target
assert recorded_copy.predicate == recorded.predicate
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" ManagedInstanceGroup: describes a managed instance group
"""
from googleapiclient.http import HttpError
from vm_network_migration.modules.instance_group_modules.instance_group import InstanceGroup
class ManagedInstanceGroup(InstanceGroup):
def __init__(self, compute, project, instance_group_name, network_name,
subnetwork_name, preserve_instance_ip):
""" Initialization
Args:
compute: google compute engine
project: project ID
instance_group_name: name of the instance group
network_name: target network
subnetwork_name: target subnet
preserve_instance_ip: (only valid for unmanaged instance group) whether
to preserve instances external IPs
"""
super(ManagedInstanceGroup, self).__init__(compute, project,
instance_group_name,
network_name,
subnetwork_name,
preserve_instance_ip)
self.instance_group_manager_api = None
self.autoscaler_api = None
self.operation = None
# self.zone_or_region is the region name for a RegionManagedInstanceGroup, and
# is the zone name for a SingleZoneManagedInstanceGroup
self.zone_or_region = None
self.original_instance_group_configs = None
self.new_instance_group_configs = None
self.is_multi_zone = False
self.autoscaler = None
self.autoscaler_configs = None
self.selfLink = None
def get_instance_group_configs(self) -> dict:
""" Get the configs of the instance group
Returns: configs
"""
args = {
'project': self.project,
'instanceGroupManager': self.instance_group_name
}
self.add_zone_or_region_into_args(args)
return self.instance_group_manager_api.get(**args).execute()
def create_instance_group(self, configs) -> dict:
""" Create an instance group
Args:
configs: instance group's configs
Returns: a deserialized object of the response
"""
args = {
'project': self.project,
'body': configs
}
self.add_zone_or_region_into_args(args)
create_instance_group_operation = self.instance_group_manager_api.insert(
**args).execute()
if self.is_multi_zone:
self.operation.wait_for_region_operation(
create_instance_group_operation['name'])
else:
self.operation.wait_for_zone_operation(
create_instance_group_operation['name'])
# If an autoscaler serves the original instance group,
# it should be recreated
if self.autoscaler != None and not self.autoscaler_exists():
self.insert_autoscaler()
return create_instance_group_operation
def delete_instance_group(self) -> dict:
""" Delete an instance group
Returns: a deserialized object of the response
"""
if self.autoscaler != None and self.autoscaler_exists():
self.delete_autoscaler()
args = {
'project': self.project,
'instanceGroupManager': self.instance_group_name
}
self.add_zone_or_region_into_args(args)
delete_instance_group_operation = self.instance_group_manager_api.delete(
**args).execute()
if self.is_multi_zone:
self.operation.wait_for_region_operation(
delete_instance_group_operation['name'])
else:
self.operation.wait_for_zone_operation(
delete_instance_group_operation['name'])
return delete_instance_group_operation
def retrieve_instance_template_name(self, instance_group_configs) -> str:
""" Get the name of the instance template which is used by
the instance group
Args:
instance_group_configs: configs of the instance group
Returns: name of the instance template
"""
instance_template_link = instance_group_configs['instanceTemplate']
return instance_template_link.split('/')[-1]
def modify_instance_group_configs_with_instance_template(self,
instance_group_configs,
instance_template_link) -> dict:
""" Modify the instance group with the new instance template link
Args:
instance_group_configs: configs of the instance group
instance_template_link: instance template link
Returns: modified configs of the instance group
"""
instance_group_configs['instanceTemplate'] = instance_template_link
instance_group_configs['versions'][0][
'instanceTemplate'] = instance_template_link
return instance_group_configs
def add_zone_or_region_into_args(self, args):
""" Add the zone/region key into args.
Args:
args: a dictionary object
"""
if self.is_multi_zone:
args['region'] = self.zone_or_region
else:
args['zone'] = self.zone_or_region
def get_autoscaler(self):
""" Get the autoscaler's name which is serving the instance group
Returns: autoscaler's name if there is an autoscaler
"""
if self.original_instance_group_configs == None:
self.original_instance_group_configs = self.get_instance_group_configs()
if 'autoscaler' not in self.original_instance_group_configs['status']:
return None
else:
return \
self.original_instance_group_configs['status'][
'autoscaler'].split(
'/')[-1]
def get_autoscaler_configs(self):
""" Get the configs of the instance group's autoscaler
Returns: configs
"""
if self.autoscaler != None:
args = {
'project': self.project,
'autoscaler': self.autoscaler
}
self.add_zone_or_region_into_args(args)
autoscaler_configs = self.autoscaler_api.get(**args).execute()
return autoscaler_configs
return None
def autoscaler_exists(self) -> bool:
""" Check if the autoscaler exists
Returns: boolean
"""
try:
autoscaler_configs = self.get_autoscaler_configs()
except HttpError:
return False
else:
return autoscaler_configs != None
def delete_autoscaler(self) -> dict:
""" Delete the autoscaler
Returns: a deserialized object of the response
"""
args = {
'project': self.project,
'autoscaler': self.autoscaler
}
self.add_zone_or_region_into_args(args)
delete_autoscaler_operation = self.autoscaler_api.delete(
**args).execute()
if self.is_multi_zone:
self.operation.wait_for_region_operation(
delete_autoscaler_operation['name'])
else:
self.operation.wait_for_zone_operation(
delete_autoscaler_operation['name'])
return delete_autoscaler_operation
def insert_autoscaler(self) -> dict:
"""Create an autoscaler
Returns: a deserialized object of the response
"""
args = {
'project': self.project,
'body': self.autoscaler_configs
}
self.add_zone_or_region_into_args(args)
insert_autoscaler_operation = self.autoscaler_api.insert(
**args).execute()
if self.is_multi_zone:
self.operation.wait_for_region_operation(
insert_autoscaler_operation['name'])
else:
self.operation.wait_for_zone_operation(
insert_autoscaler_operation['name'])
return insert_autoscaler_operation
def set_target_pool(self, target_pool_selfLink):
""" Set the target pool of the managed instance group
Args:
target_pool_selfLink: selfLink of the target pool
Returns: a deserialized Python object of the response
"""
current_target_pools = self.get_target_pools()
current_target_pools.append(target_pool_selfLink)
args = {
'project': self.project,
'instanceGroupManager': self.instance_group_name,
'body': {
'targetPools': current_target_pools
}
}
self.add_zone_or_region_into_args(args)
set_target_pool_operation = self.instance_group_manager_api.setTargetPools(
**args).execute()
if self.is_multi_zone:
self.operation.wait_for_region_operation(
set_target_pool_operation['name'])
else:
self.operation.wait_for_zone_operation(
set_target_pool_operation['name'])
return set_target_pool_operation
def remove_target_pool(self, target_pool_selfLink):
""" Remove the target pool of the managed instance group
Args:
target_pool_selfLink: selfLink of the target pool
Returns: a deserialized Python object of the response
"""
current_target_pools = self.get_target_pools()
current_target_pools.remove(target_pool_selfLink)
args = {
'project': self.project,
'instanceGroupManager': self.instance_group_name,
'body': {
'targetPools': current_target_pools
}
}
self.add_zone_or_region_into_args(args)
remove_target_pool_operation = self.instance_group_manager_api.setTargetPools(
**args).execute()
if self.is_multi_zone:
self.operation.wait_for_region_operation(
remove_target_pool_operation['name'])
else:
self.operation.wait_for_zone_operation(
remove_target_pool_operation['name'])
return remove_target_pool_operation
def get_target_pools(self) -> list:
"""Get a list of target pools served by the instance group
Returns: a list of target pools' selfLink
"""
configs = self.get_instance_group_configs()
if 'targetPools' not in configs:
return []
return configs['targetPools']
def list_instances(self) -> list:
""" List managed instances' selfLinks
Returns: a list of instances' selfLinks
"""
instance_selfLinks = []
args = {
'project': self.project,
'instanceGroupManager': self.instance_group_name,
}
self.add_zone_or_region_into_args(args)
list_instances_operation = self.instance_group_manager_api.listManagedInstances(
**args).execute()
for item in list_instances_operation['managedInstances']:
instance_selfLinks.append(item['instance'])
return instance_selfLinks
|
import requests
# request types
r = requests.get('http://api.github.com/events')
r = requests.post('http://api.github.com/events', \
data = {'key':'value'})
r = requests.put('http://httpbin.org/put', data = {'key':'value'})
r = requests.delete('http://httpbin.org/delete')
r = requests.head('http://httpbin.org/get')
r = requests.options('http://httpbin.org/get')
# check responce status
r.status_code == requests.codes.ok
# OR
r.raise_for_status()
# embed in a try-except statement
try:
r.raise_for_status()
except Exception as exc:
print 'There was a problem : {}'.format(exc)
# passing parameters in URLs
payload = {'key1': 'value1', 'key2': ['value2', 'value3']}
r = requests.get('http://api.github.com/events', params=paylaod)
#http://httpbin.org/get?key1=value1&key2=value2&key2=value3
## Saving Downloaded Files to the Hard Drive ##
# pattern to use for saving a DLed stream to a file :
with open(filename, 'wb') as fb:
for piece in r.iter_content(byte_size): # byte_size ~ 10000
fb.write(piece)
|
from tensorflow.python.lib.io import file_io
import os
base_path = 'gs://sacred-reality-201417-mlengine/models'
def save_model(file_name):
with file_io.FileIO(file_name, mode='rb') as f:
with file_io.FileIO(os.path.join(base_path, file_name), mode='w+') as output_f:
output_f.write(f.read())
def get_filename(base_name):
counter = 1
while file_io.file_exists(generate_name(base_name, counter)):
counter += 1
return generate_name(base_name, counter)
def generate_name(name, counter):
return '{}-{}'.format(name, counter)
|
class BaseAWSLogsException(Exception):
code = 1
def hint(self):
return "Unknown Error."
class UnknownDateError(BaseAWSLogsException):
code = 3
def hint(self):
return "awslogs doesn't understand '{0}' as a date.".format(self.args[0])
class TooManyStreamsFilteredError(BaseAWSLogsException):
code = 6
def hint(self):
return ("The number of streams that match your pattern '{0}' is '{1}'. "
"AWS API limits the number of streams you can filter by to {2}."
"It might be helpful to you to not filter streams by any "
"pattern and filter the output of awslogs.").format(*self.args)
class NoStreamsFilteredError(BaseAWSLogsException):
code = 7
def hint(self):
return ("No streams match your pattern '{0}'.").format(*self.args)
|
# coding: utf-8
'''
3. Finding Experts on the Java Programming Language on StackOverflow [4 points]
Download the StackOverflow network stackoverflow-Java.txt.gz:
http://snap.stanford.edu/class/cs224w-data/hw0/stackoverflow-Java.txt.gz.
An edge (a, b) in the network means that person a endorsed an answer from
person b on a Java-related question.
'''
import snap
# Load the network
SOURCE_FILE = './data/stackoverflow-Java.txt'
SOGraph = snap.LoadEdgeList(snap.PNGraph, SOURCE_FILE, 0, 1)
assert 146874 == SOGraph.GetNodes()
assert 333606 == SOGraph.GetEdges()
def sortTIntFltH(mapping, desc=True):
return sorted([(nodeId, mapping[nodeId])
for nodeId in mapping
], reverse=desc,
key=lambda x: x[1])
# 3.1
components = snap.TCnComV()
snap.GetWccs(SOGraph, components)
print("The number of weakly connected components in the SO network"
"is %s." % (len(components)))
# 3.2
maxWeaklyConnectedComponent = snap.GetMxWcc(SOGraph)
print("The largest weakly connected component in the SO network"
"has %s nodes and %s edges." % (
maxWeaklyConnectedComponent.GetNodes(),
maxWeaklyConnectedComponent.GetEdges()))
# 3.3
TOPN = 3
SOPageRanks = snap.TIntFltH()
snap.GetPageRank(SOGraph, SOPageRanks, 0.85, 1e-4, 1000)
sortedSOPageRanks = sortTIntFltH(SOPageRanks)
print("The node IDs of the top %s most central nodes in the network "
"by PageRank scores are %s with scores %s respectively." % (
TOPN,
tuple(t[0] for t in sortedSOPageRanks[:TOPN]),
tuple(t[1] for t in sortedSOPageRanks[:TOPN])))
# 3.4
TOPN = 3
hubsScores = snap.TIntFltH()
authScores = snap.TIntFltH()
snap.GetHits(SOGraph, hubsScores, authScores, 100)
sortedHubScores = sortTIntFltH(hubsScores)
sortedAuthScores = sortTIntFltH(authScores)
print("The node IDs of the top %s hubs in the network by HITS scores "
"are %s with scores %s respectively." % (
TOPN,
tuple(t[0] for t in sortedHubScores[:TOPN]),
tuple(t[1] for t in sortedHubScores[:TOPN])))
print
print("The node IDs of the top %s authorities in the network by HITS "
"scores are %s with score %s respectively." % (
TOPN,
tuple(t[0] for t in sortedAuthScores[:TOPN]),
tuple(t[1] for t in sortedAuthScores[:TOPN])))
|
from ..src.methods import Newton
from ..src.function_examples import f_root
def test_accuracy():
f_root['max_it'] = 3
it_val = 2.238095238095238095238 # val at it=3
returned = Newton(**f_root).solve(**f_root)
print(it_val, " vs ",returned)
assert round(returned,6) == round(it_val,6)
|
from __future__ import print_function
import time
import itertools
import json
import traceback
from metis.Sample import DBSSample
from metis.CMSSWTask import CMSSWTask
from metis.StatsParser import StatsParser
from metis.Utils import send_email
if __name__ == "__main__":
pds = ["MuonEG","SingleElectron","MET","SinglePhoton","SingleMuon","DoubleMuon","JetHT","DoubleEG","HTMHT"]
proc_vers = [
# ("Run2017B","v1"),
# ("Run2017B","v2"),
# ("Run2017C","v1"),
# ("Run2017C","v2"),
# ("Run2017C","v3"),
# ("Run2017D","v1"),
# ("Run2017E","v1"),
("Run2017F","v1"),
]
dataset_names = ["/{0}/{1}-PromptReco-{2}/MINIAOD".format(x[0],x[1][0],x[1][1]) for x in itertools.product(pds,proc_vers)]
for i in range(10000):
total_summary = {}
total_counts = {}
for dsname in dataset_names:
open_dataset = False
cmsswver = "CMSSW_9_2_7_patch1"
tarfile = "/nfs-7/userdata/libCMS3/lib_CMS4_V00-00-06.tar.gz"
if "2017C-PromptReco-v2" in dsname:
open_dataset = False
if "2017C-PromptReco-v3" in dsname:
cmsswver = "CMSSW_9_2_8"
tarfile = "/nfs-7/userdata/libCMS3/lib_CMS4_V00-00-06_928.tar.gz"
open_dataset = False
if "2017D-PromptReco-v1" in dsname:
cmsswver = "CMSSW_9_2_10"
tarfile = "/nfs-7/userdata/libCMS3/lib_CMS4_V00-00-06_9210.tar.gz"
open_dataset = False
if "2017E-PromptReco-v1" in dsname:
cmsswver = "CMSSW_9_2_12"
tarfile = "/nfs-7/userdata/libCMS3/lib_CMS4_V00-00-06_9212.tar.gz"
open_dataset = False
if "2017F-PromptReco-v1" in dsname:
cmsswver = "CMSSW_9_2_13"
tarfile = "/nfs-7/userdata/libCMS3/lib_CMS4_V00-00-06_9213.tar.gz"
open_dataset = False
try:
task = CMSSWTask(
sample = DBSSample(dataset=dsname),
open_dataset = open_dataset,
flush = ((i+1)%48==0),
# flush = ((i)%48==0),
events_per_output = 450e3,
output_name = "merged_ntuple.root",
tag = "CMS4_V00-00-06",
global_tag = "", # if global tag blank, one from DBS is used
pset = "main_pset.py",
pset_args = "data=True prompt=True",
cmssw_version = cmsswver,
condor_submit_params = {"use_xrootd":True},
tarfile = tarfile,
is_data = True,
publish_to_dis = True,
)
task.process()
except:
traceback_string = traceback.format_exc()
print("Runtime error:\n{0}".format(traceback_string))
send_email(subject="metis error", body=traceback_string)
total_summary[dsname] = task.get_task_summary()
StatsParser(data=total_summary, webdir="~/public_html/dump/metis/", make_plots=False).do()
# time.sleep(1.*3600)
time.sleep(60.*60)
|
class NodeCircle:
def __init__(self,val):
self.val = val
self.next = None
def has_circle(self,haed):
slow = haed
fast = head
while(slow and fast):
fast = fast.next
slow = slow.next
if fast:
fast = fast.next
if fast == slow:
break
if fast and slow and(fast == slow):
return True
else:
return False |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class SenderInfoVO(object):
def __init__(self):
self._area = None
self._city = None
self._detail_address = None
self._mobile = None
self._name = None
self._province = None
@property
def area(self):
return self._area
@area.setter
def area(self, value):
self._area = value
@property
def city(self):
return self._city
@city.setter
def city(self, value):
self._city = value
@property
def detail_address(self):
return self._detail_address
@detail_address.setter
def detail_address(self, value):
self._detail_address = value
@property
def mobile(self):
return self._mobile
@mobile.setter
def mobile(self, value):
self._mobile = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def province(self):
return self._province
@province.setter
def province(self, value):
self._province = value
def to_alipay_dict(self):
params = dict()
if self.area:
if hasattr(self.area, 'to_alipay_dict'):
params['area'] = self.area.to_alipay_dict()
else:
params['area'] = self.area
if self.city:
if hasattr(self.city, 'to_alipay_dict'):
params['city'] = self.city.to_alipay_dict()
else:
params['city'] = self.city
if self.detail_address:
if hasattr(self.detail_address, 'to_alipay_dict'):
params['detail_address'] = self.detail_address.to_alipay_dict()
else:
params['detail_address'] = self.detail_address
if self.mobile:
if hasattr(self.mobile, 'to_alipay_dict'):
params['mobile'] = self.mobile.to_alipay_dict()
else:
params['mobile'] = self.mobile
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.province:
if hasattr(self.province, 'to_alipay_dict'):
params['province'] = self.province.to_alipay_dict()
else:
params['province'] = self.province
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = SenderInfoVO()
if 'area' in d:
o.area = d['area']
if 'city' in d:
o.city = d['city']
if 'detail_address' in d:
o.detail_address = d['detail_address']
if 'mobile' in d:
o.mobile = d['mobile']
if 'name' in d:
o.name = d['name']
if 'province' in d:
o.province = d['province']
return o
|
import re
import os
import sys
import json
import pickle
import humanize
import datetime
import inspect
import logging
from time import time
from functools import wraps
from datetime import datetime
from typing import Tuple, Callable, Union, Dict, List
from .utils import get_params, parse_time, random_string
from .backends import Backend
# Dictionary are not hashable and the python hash is not consistent
# between runs so we have to use an external dictionary hashing package
# else we will not be able to load the saved caches.
from dict_hash import sha256
log_levels = {
"debug":logging.DEBUG,
"info":logging.INFO,
"warn":logging.WARN,
"warning":logging.WARNING,
"error":logging.ERROR,
"critical":logging.CRITICAL,
"crit":logging.CRITICAL,
}
def cache(function):
"""Cache with default parameters"""
return Cache()(function)
class Cache:
def __init__(
self,
cache_path: Union[str, Tuple[str], List[str], Dict[str, str]] = "{cache_dir}/{function_name}/{_hash}.pkl",
args_to_ignore: Tuple[str] = (),
cache_dir: str = None,
validity_duration: Union[int, str] = -1,
use_source_code: bool = True,
log_level: str = "critical",
log_format: str = '%(asctime)-15s [%(levelname)s]: %(message)s',
backup_path: str = None,
backup: bool = True,
dump_kwargs:dict = {},
load_kwargs:dict = {},
):
"""
Cache the results of a function (or method).
Example:
```
from cache_decorator import Cache
@Cache()
def test(x):
return 2 * x
```
Arguments
---------
cache_path: Union[str, Tuple[str], List[str], Dict[str, str]] = "{cache_dir}/{function_name}/{_hash}.pkl",
Where to save the caches.
It's a string format and the available variables are
`cache_dir` the directory specified in the other argument.
`function_name` the name of the cached function.
`args_to_ignore` which arguments can be ignored form the input.
`args` the name of the arguments (both positional and keyword).
`defaults` the default values of the positional arguments.
`kwonlydefaults` the default values of the kwarguments.
`source` if `use_source_code` is setted to true, it's the string
with the source code of the function to cache.
`_hash` it's the hash of the parameters (excluded the ignored ones),
this is computed only if it's present in `cache_path` so
it's possible to cache functions which take non-hashable arguments.
Moreover, you can use any argument passed to the function.
Example:
```
from cache_decorator import Cache
@Cache("{cache_dir}/{x}/{y}.pkl)
def test(x, y):
return x * y
```
The extension used in the format string determines the serialization method.
The available ones are `.json .json.gz .json.bz .json.lzma .pkl .pkl.gz .pkl.bz
.pkl.lzma .pkl.zip .npy .npz .csv .csv.gz .csv.bz2 .csv.zip .csv.xz .xlsx`
This can also be used to make multiple arguments use the same cache:
Example:
```
from cache_decorator import Cache
@Cache("{cache_dir}/{x}.pkl)
def test(x, y):
return x * y
```
In this case the cache will be used watching only the `x` variable and
the `y` is ignored. so `test(1, 2)` and `test(1, 10000)` will use the same
cache (even if that's not right!). This can be used to save human readable
partial results, in any other cases you should use the `_hash`.
args_to_ignore: Tuple[str] = (),
Which arguments to ignore when computing the hash.
cache_dir: str = None,
The folder where to save the caches. If not specified it read the value of
the enviornment variable `CACHE_DIR`. If even this is empty it defaults to
"./cache". This value is substituted in the `cache_path` argument if present.
validity_duration: Union[int, str] = None,
If not None, the cache will be recomputed after the specified ammount of time.
This is done by saving a json with the same name of the cache plus `_time.json` which contains the
computation epoch.
If `validity_duration` is specified and a cache does not have it's json file, it's considered invalid.
The given time must be an integer in seconds or a string in the format (\d+[smhdw]) to specify
a given ammount of s(econds), m(inutes), h(ours), d(ays), w(eeks).
use_source_code: bool = True,
If in the computing of the hash the must also use the sourcecode of the cached function.
log_level: str = "critical",
Set the logger level to the wanted level. The usable levels are:
["debug", "info", "warning", "error", "critical"]
Alternatively a reference to the logger can be obtained with
`logging.getLogger("cache." + function.__name__)`
so it possible to fully customize it, like set the level and add filehandlers.
Example:
```
import logging
from cache_decorator import Cache
@Cache()
def test(x):
return 2 * x
logger = logging.getLogger("cache.test")
logger.setLevel(logging.DEBUG)
log_format: str = '%(asctime)-15s[%(levelname)s]: %(message)s'
Formatting of the default logger on stderr. Informations on how the formatting works can be found at
https://docs.python.org/3/library/logging.html . Moreover, as explained in the log_level, you can get
a referfence to the logger and fully customize it.
backup_path: str = None,
If the serialization fails, the decorator will try to save the computed result as a pickle.
This parameter is the formatter for the path where to save the backup result.
If it's None, it will use the same path of the cache and append `_backup.pkl` at the end.
This will never overwrite any file, so if a file at the current path is present, a random path will be
generated.
For this reason in the formatter you can use any variable such as {cache_dir}, {cache_path}, or the arguments
of the function. Moreover, there is also another two additional parameters, {_date} which is the date of the backup, and {_rnd} which is a random string that will
guarantee that no file has the same name.
backup: bool = True,
If the cache should backup the result to a .pkl in case of exception during the serializzation.
This flag is mainly for debug pourpouses.
```
"""
self.log_level = log_level
self.log_format = log_format
self.args_to_ignore = args_to_ignore
self.use_source_code = use_source_code
self.validity_duration = parse_time(validity_duration)
self.cache_path = cache_path
self.is_backup_enabled = backup
self.backup_path = backup_path
self.cache_dir = cache_dir or os.environ.get("CACHE_DIR", "./cache")
self.load_kwargs, self.dump_kwargs = load_kwargs, dump_kwargs
self._check_path_sanity(cache_path)
def _check_path_sanity(self, path: Union[str, Tuple[str], List[str], Dict[str, str]]):
"""Check that at least one backend exists that can handle the given path.
This is just a quality of life check to raise an exception early and not
after the computation is done."""
test_bk = Backend({}, {})
if isinstance(path, str):
if not test_bk.support_path(path):
raise ValueError((
"There is not backend that can support the path '{}'. "
"The available extensions are '{}'."
).format(path, test_bk.get_supported_extensions()))
elif isinstance(path, list) or isinstance(path, tuple):
for sub_path in path:
self._check_path_sanity(sub_path)
elif isinstance(path, dict):
for arg, sub_path in path.items():
self._check_path_sanity(sub_path)
else:
raise ValueError((
"Sorry, the path '{}' is not in one of the supported formats."
"We support a string, a list, tuple, or dict of paths."
).format(path)
)
@staticmethod
def store(obj, path: str) -> None:
"""Store an object at a path, this automatically choose the correct backend.
Arguments
---------
obj: Object,
The object to store
path: str,
Where to store the file, based on its extension it will choose the correct backend.
"""
dirname = os.path.dirname(os.path.abspath(path))
if dirname != "":
os.makedirs(dirname, exist_ok=True)
Backend({}, {}).dump(obj, path)
@staticmethod
def load(path: str):
"""
Load an object from a file, this automatically choose the correct backend.
Arguments
---------
path: str,
The path to the file to load file, based on its extension it will choose the correct backend.
Returns
-------
The loaded object.
"""
return Backend({}, {}).load({}, path)
@staticmethod
def compute_path(function: Callable, *args, **kwargs) -> str:
"""Return the path that a file would have if the given function
woule be called with the given arguments.
"""
# If we are dealing with a cached function then unpack it:
if "__cached_function" not in dir(function):
raise ValueError("You cannot compuite the path of a function which is not decorated with the Cache decorator.")
instance = getattr(function, "__cacher_instance")
function = getattr(function, "__cached_function")
return instance._get_formatted_path(args, kwargs, function_info=instance._compute_function_info(function))
def _compute_function_info(self, function: Callable):
function_args_specs = inspect.getfullargspec(function)
function_info = {
# Name of the function
"function_name": function.__name__,
# Arguments names
"args": function_args_specs.args or [],
"defaults": function_args_specs.defaults or [],
"kwonlydefaults": function_args_specs.kwonlydefaults or {},
"args_to_ignore": self.args_to_ignore,
}
if self.use_source_code:
# Get the sourcode of the funciton
# This will be used in the hash so that old
# Caches will not be loaded
function_info["source"] = "".join(
inspect.getsourcelines(function)[0]
)
return function_info
def _backup(self, result, path, exception, args, kwargs):
"""This function handle the backupping of the data when an the serialization fails."""
# Check if it's a structured path
if isinstance(path, list) or isinstance(path, tuple):
return self._backup(result, path[0], exception, args, kwargs)
elif isinstance(path, dict):
return self._backup(result, next(path.values()), exception, args, kwargs)
date = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
# Ensure that the we won't overwrite anything
while True:
# Generate a new random value
rnd = random_string(40) # hardcoded length but if they are enough for git it's enough for us.
backup_path = self._get_formatted_path(
args, kwargs,
formatter=self.backup_path,
extra_kwargs={
"_date":date,
"_rnd":rnd,
"cache_path":self._get_formatted_path(args, kwargs),
}
)
# Check for the existance
if os.path.exists(backup_path):
# If the file exists and the rnd var is not used
# we force it so we don't get stuck in the loop.
if "{_rnd}" not in self.backup_path:
self.backup_path += "{_rnd}"
continue
break
# Inform the user about hte problem
self.logger.critical(
"Couldn't save the result of the function '%s'. "
"Saving the result as a pickle at:\n%s"
"\nThe file was gonna be written at:\n%s\n",
self.function_info["function_name"], backup_path, path
)
# Backup the result
dirname = os.path.dirname(backup_path)
if dirname != "":
os.makedirs(dirname, exist_ok=True)
with open(backup_path, "wb") as f:
pickle.dump(result, f)
# Re-raise the exception
exception.backup_path = backup_path
exception.path = path
exception.result = result
return exception
def _get_metadata_path(self, path):
return path + ".metadata"
def _load(self, path):
# Check if it's a structured path
if isinstance(path, list) or isinstance(path, tuple):
result = []
for p in path:
cache = self._load(p)
if cache is None:
return None
result.append(cache)
if isinstance(path, tuple):
result = tuple(result)
return result
elif isinstance(path, dict):
result = {}
for key, p in path.items():
cache = self._load(p)
if cache is None:
return None
result[key] = cache
return result
# Check if the cache exists and is readable
if not os.path.isfile(path):
self.logger.info("The cache at path '%s' does not exists.", path)
return None
self.logger.info("Loading cache from %s", path)
metadata_path = self._get_metadata_path(path)
# Load the metadata if present
if os.path.isfile(metadata_path):
self.logger.info("Loading the metadata file at '%s'", metadata_path)
with open(metadata_path, "r") as f:
metadata = json.load(f)
else:
self.logger.info("The metadata file at '%s' do not exists.", metadata_path)
# TODO: do we need to to more stuff?
metadata = {}
# Check if the cache is still valid
if self.validity_duration is not None:
enlapsed_time = time() - metadata.get("creation_time", float("-inf"))
if enlapsed_time > self.validity_duration:
os.remove(path)
return None
# actually load the values
return Backend(self.load_kwargs, self.dump_kwargs).load(metadata.get("backend_metadata", {}), path)
def _dump(self, args, kwargs, result, path, start_time, end_time):
# Check if it's a structured path
if isinstance(path, list) or isinstance(path, tuple):
assert isinstance(result,list) or isinstance(result,tuple)
assert len(result) == len(path)
for r, p in zip(result, path):
self._dump(args, kwargs, r, p, start_time, end_time)
return
elif isinstance(path, dict):
assert isinstance(result, dict)
assert set(result.keys()) == set(path.keys())
for key in result.keys():
self._dump(args, kwargs, result[key], path[key], start_time, end_time)
return
# Dump the file
self.logger.info("Saving the cache at %s", path)
dirname = os.path.dirname(path)
if dirname != "":
os.makedirs(dirname, exist_ok=True)
dump_start_time = time()
backend_metadata = Backend(self.load_kwargs, self.dump_kwargs).dump(result, path) or {}
dump_end_time = time()
# Compute the metadata
metadata = {
# When the cache was created
"creation_time": start_time,
"creation_time_human": datetime.fromtimestamp(
start_time
).strftime("%Y-%m-%d %H:%M:%S"),
# How much the function took to compute the result
"time_delta":end_time - start_time,
"time_delta_human":humanize.precisedelta(end_time - start_time),
# How much time it took to serialize the result and save it to a file
"file_dump_time":dump_end_time - dump_start_time,
"file_dump_time_human":humanize.precisedelta(
dump_end_time - dump_start_time
),
# How big is the serialized result
"file_dump_size":os.path.getsize(path),
"file_dump_size_human":humanize.naturalsize(os.path.getsize(path)),
# The arguments used to load and dump the file
"load_kwargs":self.load_kwargs,
"dump_kwargs":self.dump_kwargs,
# Informations about the function
"function_name":self.function_info["function_name"],
"function_file":"%s:%s"%(
self.decorated_function.__code__.co_filename,
self.decorated_function.__code__.co_firstlineno
),
"args_to_ignore":self.function_info["args_to_ignore"],
"source":self.function_info.get("source", None),
# The data reserved for the backend to corretly serialize and
# de-serialize the values
"backend_metadata":backend_metadata,
}
params = {}
for key, val in get_params(self.function_info, args, kwargs).items():
if key in self.args_to_ignore:
continue
try:
# Check if it's json serializable
json.dumps(val)
params[key] = val
except:
pass
metadata["parameters"] = params
metadata_path = self._get_metadata_path(path)
self.logger.info("Saving the cache meta-data at %s", metadata_path)
with open(metadata_path, "w") as f:
json.dump(metadata, f, indent=4)
def _decorate_callable(self, function: Callable) -> Callable:
# wraps to support pickling
@wraps(function)
def wrapped(*args, **kwargs):
# Get the path
path = self._get_formatted_path(args, kwargs)
# Try to load the cache
result = self._load(path)
# if we got a result, reutrn it
if result is not None:
return result
self.logger.info("Computing the result for %s %s", args, kwargs)
# otherwise compute the result
start_time = time()
result = function(*args, **kwargs)
end_time = time()
# Save the result
try:
self._dump(args, kwargs, result, path, start_time, end_time)
except Exception as e:
if self.is_backup_enabled:
raise self._backup(result, path, e, args, kwargs)
raise e
return result
# add a reference to the cached function so we can unpack
# The caching if needed
setattr(wrapped, "__cached_function", function)
setattr(wrapped, "__cacher_instance", self)
return wrapped
def _get_formatted_path(self, args, kwargs, formatter=None, function_info=None, extra_kwargs=None) -> str:
"""Compute the path adding and computing the needed arguments."""
formatter = formatter or self.cache_path
if isinstance(formatter, list):
return [
self._get_formatted_path(args, kwargs, f)
for f in formatter
]
if isinstance(formatter, tuple):
return tuple([
self._get_formatted_path(args, kwargs, f)
for f in formatter
])
elif isinstance(formatter, dict):
return {
key:self._get_formatted_path(args, kwargs, v)
for key, v in formatter.items()
}
extra_kwargs = extra_kwargs or {}
function_info = function_info or self.function_info
params = get_params(function_info, args, kwargs)
if "_hash" in formatter:
params["_hash"] = sha256({"params": params, "function_info": function_info})
self.logger.debug("Got parameters %s", params)
# Handle the composite paths
for match in re.finditer(r"\{([^\{]+)(:?\..+?)+\}", formatter):
# Extract the matching string
match = match.group(0)
# Get the name of the base element and the attributes chain
root, *attrs = match[1:-1].split(".")
# Get the params to use for the attributes chain
root = params[root]
# Follow the attributes chain
for attr in attrs:
root = getattr(root, attr)
# Replace the result in the formatter
formatter = formatter.replace(match, str(root))
# Compute the path of the cache for these parameters
path = formatter.format(
cache_dir=self.cache_dir,
**params,
**function_info,
**extra_kwargs,
)
self.logger.debug("Calculated path %s", path)
return path
def _fix_docs(self, function: Callable, wrapped: Callable) -> Callable:
# Copy the doc of decoreated function
wrapped.__doc__ = function.__doc__
# Copy the name of the function and add the suffix _cached
wrapped.__name__ = function.__name__ + "_cached"
return wrapped
def decorate(self, function: Callable) -> Callable:
self.function_info = self._compute_function_info(function)
self.decorated_function = function
wrapped = self._decorate_callable(function)
wrapped = self._fix_docs(function, wrapped)
return wrapped
def __call__(self, function):
self.logger = logging.getLogger(__name__ + "." + function.__name__)
# Do not re-initialize loggers if we have to cache multiple functions
# with the same name
if not self.logger.hasHandlers():
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter(self.log_format))
self.logger.addHandler(handler)
if self.log_level.lower() not in log_levels:
raise ValueError("The logger level {} is not a supported one. The available ones are {}".format(
self.log_level.lower(),
list(log_levels.keys())
))
self.logger.setLevel(log_levels[self.log_level.lower()])
return self.decorate(function)
|
import os
import urwid
from .model import Model
max_rows, max_columns = os.popen('stty size', 'r').read().split()
MAX_ROWS = int(max_rows) - 2
MAX_COLUMNS = int(max_columns)
class ButtonLabel(urwid.SelectableIcon):
def __init__(self, text):
curs_pos = len(text) + 1
urwid.SelectableIcon.__init__(self, text, cursor_position=curs_pos)
class Button(urwid.WidgetWrap):
_selectable = True
signals = ['click']
def __init__(self, label, callback, callback_argument):
self.label = ButtonLabel(label)
display_widget = self.label
urwid.WidgetWrap.__init__(self, urwid.AttrMap(
display_widget, None, focus_map='reverted'))
urwid.connect_signal(self, 'click', callback,
user_args=[callback_argument])
def keypress(self, size, key):
if self._command_map[key] != urwid.ACTIVATE:
return key
self._emit('click')
class SearchInput(urwid.LineBox):
signals = ['enter']
def keypress(self, size, key):
if key != 'enter':
return super(SearchInput, self).keypress(size, key)
self._emit('enter')
class View(urwid.WidgetWrap):
palette = [
('reverted', 'black', 'white'),
('pg normal', 'black', 'black'),
('pg complete', 'white', 'white')
]
def __init__(self, controller, model):
self.controller = controller
self.model = model
urwid.WidgetWrap.__init__(self, self.main_window())
def on_search_input_keypress(self, search_input):
self.controller.search(search_input.original_widget.edit_text)
def search_input(self):
w = SearchInput(urwid.Edit(''), title='Search', title_align='left')
urwid.connect_signal(w, 'enter', self.on_search_input_keypress)
return w
def search_results(self):
self.search_results_walker = urwid.SimpleListWalker([])
search_results_list = urwid.ListBox(self.search_results_walker)
search_results_wrapper = urwid.LineBox(urwid.BoxAdapter(
search_results_list, MAX_ROWS - 7), title='Search results', title_align='left')
return search_results_wrapper
def queue(self):
self.queue_list_walker = urwid.SimpleFocusListWalker([])
queue_list = urwid.ListBox(self.queue_list_walker)
queue = urwid.LineBox(urwid.BoxAdapter(
queue_list, MAX_ROWS), title='Queue', title_align='left')
return queue
def player(self):
# self.player_state = urwid.Text((''))
self.currently_playing = urwid.Text(('No track playing'))
self.current_track_progress = urwid.Text(('00:00'))
self.current_track_duration = urwid.Text(('00:00'))
self.progress = urwid.ProgressBar('pg normal', 'pg complete', 0, 10000)
player = urwid.Columns([
# ('fixed', 10, self.player_state),
urwid.Pile([
urwid.Columns([
('fixed', 5, self.current_track_progress),
('fixed', 1, urwid.Text('/')),
('fixed', 5, self.current_track_duration),
('fixed', 3, urwid.Text(' | ')),
self.currently_playing
]),
self.progress
])
])
player = urwid.LineBox(player, title='Player', title_align='left')
return player
def update_progress(self, value):
self.progress.set_completion(
value * 10000 / self.model.current_track.duration)
# self.progress.render((1, ))
def update_search_results(self, results):
self.search_results_walker.clear()
for i, track in enumerate(results):
track_entry = Button(
track.label, self.controller.on_track_results_click, track)
self.search_results_walker.append(track_entry)
def update_queue(self):
self.queue_list_walker.clear()
for i, track in enumerate(self.model.queue):
track_entry = Button(
track.label, self.controller.on_track_queue_click, track)
self.queue_list_walker.append(track_entry)
def track_options_overlay(self, track):
# play_button = Button('Play', self.controller.play, track)
add_to_queue_button = Button(
'Add to queue', self.controller.add_to_queue, track)
cancel_button = Button(
'Cancel', self.controller.cancel, None)
overlay = urwid.Pile([
add_to_queue_button,
# play_button,
cancel_button
])
overlay = urwid.LineBox(
overlay, title='Track options', title_align='center')
overlay = urwid.Overlay(
urwid.Filler(overlay), self, 'center', 20, 'middle', 4)
return overlay
def main_window(self):
# Right
right = urwid.Pile([
self.search_input(),
self.search_results(),
self.player()
])
# Columns
columns = urwid.Columns([
('weight', 1, self.queue()),
('weight', 3, right)
])
# Main wrapper
w = urwid.Filler(columns, valign='top')
return w
|
"""
TXA - Transfer Register X to Accumulator.
A = X
Copies the current contents of the X register into the accumulator and sets
the zero and negative flags as appropriate.
Processor Status after use:
+------+-------------------+--------------------------+
| Flag | Description | State |
+======+===================+==========================+
| C | Carry Flag | Not affected |
+------+-------------------+--------------------------+
| Z | Zero Flag | Set is A = 0 |
+------+-------------------+--------------------------+
| I | Interrupt Disable | Not affected |
+------+-------------------+--------------------------+
| D | Decimal Mode Flag | Not affected |
+------+-------------------+--------------------------+
| B | Break Command | Not affected |
+------+-------------------+--------------------------+
| V | Overflow Flag | Not affected |
+------+-------------------+--------------------------+
| N | Negative Flag | Set if bit 7 of A is set |
+------+-------------------+--------------------------+
+-----------------+--------+-------+--------+
| Addressing Mode | Opcode | Bytes | Cycles |
+=================+========+=======+========+
| Implied | 0x8A | 1 | 2 |
+-----------------+--------+-------+--------+
See also: TAX
"""
import pytest
import m6502
@pytest.mark.parametrize(
"value, flag_n, flag_z", [
(0x0F, False, False),
(0x00, False, True),
(0xF0, True, False),
])
def test_cpu_ins_txa_imm(value: int, flag_n: bool, flag_z: bool) -> None:
"""
Transfer Accumulator, Implied.
return: None
"""
memory = m6502.Memory()
cpu = m6502.Processor(memory)
cpu.reset()
cpu.reg_a = 0x00
cpu.reg_x = value
memory[0xFCE2] = 0x8A
cpu.execute(2)
assert (
cpu.program_counter,
cpu.stack_pointer,
cpu.cycles,
cpu.flag_n,
cpu.flag_z,
cpu.reg_a,
) == (0xFCE3, 0x01FD, 2, flag_n, flag_z, value)
|
import uuid as uuid_lib
from django.contrib.auth import get_user_model
from django.core.validators import FileExtensionValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
User = get_user_model()
class VideoRaw(models.Model):
"""
This model represents the structure for the video the user uploads for conversion
"""
# possible formats that we can convert, with extra information
REQUESTED_FORMAT_CHOICES = [
("mp4", "mp4, using libx264 codec"),
("avi", "avi, using mpeg4 codec"),
("mkv", "mkv, using libvpx codec"),
("3gp", "3gp, using h263 codec"),
]
user = models.ForeignKey(User, related_name="raw_videos", on_delete=models.CASCADE)
file = models.FileField(
upload_to="raw_videos/",
validators=[
FileExtensionValidator(allowed_extensions=["mp4", "avi", "mkv", "3gp"])
],
)
uuid = models.UUIDField(db_index=True, default=uuid_lib.uuid4, editable=False)
req_format = models.CharField(
_("The format this video should be converted to."),
max_length=3,
choices=REQUESTED_FORMAT_CHOICES,
default="mp4",
)
def __str__(self):
return "%s" % self.uuid
class VideoConverted(models.Model):
user = models.ForeignKey(
User, related_name="converted_videos", on_delete=models.CASCADE
)
file = models.FileField(
upload_to="converted_videos/",
validators=[
FileExtensionValidator(allowed_extensions=["mp4", "avi", "mkv", "3gp"])
],
blank=True,
)
uuid = models.UUIDField(
default=uuid_lib.uuid4,
editable=False,
db_index=True,
)
raw = models.ForeignKey(
VideoRaw, related_name="raw", on_delete=models.CASCADE, null=True
)
created_at = models.DateTimeField(auto_now_add=True)
expiration_time = models.DateTimeField(default=None, blank=True, null=True)
remaining_expiration_time = models.DateTimeField(blank=True, null=True)
def __str__(self):
return "%s" % self.uuid
|
from datetime import timedelta
from fastapi import APIRouter
from typing import Any
from fastapi.routing import APIRoute
# from fastapi import APIRouter, Body, Depends, HTTPException
# from fastapi.security import OAuth2PasswordRequestForm
# from sqlalchemy.orm import Session
# from app import crud, models, schemas
#TODO:: keep adding from here
class LoginRoute(APIRoute):
pass
router = APIRouter(route_class=LoginRoute)
|
from zone_director import ZoneDirectorBase
class SituationZoneDirectorMixin:
INSTANCE_TUNABLES = {'_zone_director': ZoneDirectorBase.TunableReference(description='\n This zone director will automatically be requested by the situation\n during zone spin up.\n ')}
@classmethod
def get_zone_director_request(cls):
return (cls._zone_director(), cls._get_zone_director_request_type())
@classmethod
def _get_zone_director_request_type(cls):
raise NotImplementedError
|
from django.test import TestCase
from django.utils import timezone
from django.urls import reverse
from .models import Question
import datetime
# Create your tests here.
class QuestionModelTests(TestCase):
def test_was_published_recently_with_future_question(self):
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date = time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() returns False for questions whose pub_date
is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() returns True for questions whose pub_date
is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
def create_question(question_text, days):
"""
Creates a Question with pub_date +/- the timedelta of days from timezone.now()
"""
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text, pub_date=time)
class QuestionIndexViewTests(TestCase):
def test_no_questions(self):
"""
If no questions, appropriate message is displayed
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_past_question(self):
question = create_question("test past question", -30)
question.save()
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(response.context['latest_question_list'], [question])
def test_future_question(self):
question = create_question("test future question", 30)
question.save()
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_future_and_past_question(self):
question_future = create_question("test future question", 30)
question_past = create_question("test past question", -30)
question_future.save()
question_past.save()
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(response.context['latest_question_list'], [question_past])
def test_two_past_questions(self):
qone = create_question("past question one", -30)
qtwo = create_question("past question two", -30)
qone.save()
qtwo.save()
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(response.context['latest_question_list'], [qone, qtwo])
def test_two_future_questions(self):
qone = create_question("future question one", 30)
qtwo = create_question("future question two", 30)
qone.save()
qtwo.save()
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(response.context['latest_question_list'], [])
class DetailViewTest(TestCase):
def test_view_returns_404_if_future_question(self):
"""
Ensures 404 is returned if question is in the future
"""
qone = create_question("test future quesiton", 30)
qone.save()
response = self.client.get(reverse('polls:detail', args=(qone.id,)))
self.assertEqual(response.status_code, 404)
def test_view_returns_200_if_past_question(self):
"""
Ensures question_text of past question exists in detail page
"""
qone = create_question("test future quesiton", -30)
qone.save()
response = self.client.get(reverse('polls:detail', args=(qone.id,)))
self.assertContains(response, qone.question_text) |
class Solution:
def isBoomerang(self, points: List[List[int]]) -> bool:
if points[0] == points[1] or points[1] == points[2] or points[0] == points[2]: return False
return points[0][0]*(points[1][1] - points[2][1]) + points[1][0]*(points[2][1]-points[0][1]) + points[2][0]*(points[0][1]-points[1][1]) != 0 |
from collections import OrderedDict
from utils.env_utils import get_dim
from gym.spaces import Discrete
import numpy as np
class ReplayBuffer(object):
def __init__(
self,
max_replay_buffer_size,
ob_space,
action_space,
):
"""
The class state which should not mutate
"""
self._ob_space = ob_space
self._action_space = action_space
ob_dim = get_dim(self._ob_space)
ac_dim = get_dim(self._action_space)
self._max_replay_buffer_size = max_replay_buffer_size
"""
The class mutable state
"""
self._observations = np.zeros((max_replay_buffer_size, ob_dim))
# It's a bit memory inefficient to save the observations twice,
# but it makes the code *much* easier since you no longer have to
# worry about termination conditions.
self._next_obs = np.zeros((max_replay_buffer_size, ob_dim))
self._actions = np.zeros((max_replay_buffer_size, ac_dim))
# Make everything a 2D np array to make it easier for other code to
# reason about the shape of the data
self._rewards = np.zeros((max_replay_buffer_size, 1))
# self._terminals[i] = a terminal was received at time i
self._terminals = np.zeros((max_replay_buffer_size, 1), dtype='uint8')
self._top = 0
self._size = 0
def add_path(self, path):
"""
Add a path to the replay buffer.
This default implementation naively goes through every step, but you
may want to optimize this.
"""
for i, (
obs,
action,
reward,
next_obs,
terminal,
agent_info,
env_info
) in enumerate(zip(
path["observations"],
path["actions"],
path["rewards"],
path["next_observations"],
path["terminals"],
path["agent_infos"],
path["env_infos"],
)):
self.add_sample(
observation=obs,
action=action,
reward=reward,
next_observation=next_obs,
terminal=terminal,
agent_info=agent_info,
env_info=env_info,
)
def add_paths(self, paths):
for path in paths:
self.add_path(path)
def add_sample(self, observation, action, reward, next_observation,
terminal, env_info, **kwargs):
assert not isinstance(self._action_space, Discrete)
self._observations[self._top] = observation
self._actions[self._top] = action
self._rewards[self._top] = reward
self._terminals[self._top] = terminal
self._next_obs[self._top] = next_observation
self._advance()
def _advance(self):
self._top = (self._top + 1) % self._max_replay_buffer_size
if self._size < self._max_replay_buffer_size:
self._size += 1
def random_batch(self, batch_size):
indices = np.random.randint(0, self._size, batch_size)
batch = dict(
observations=self._observations[indices],
actions=self._actions[indices],
rewards=self._rewards[indices],
terminals=self._terminals[indices],
next_observations=self._next_obs[indices],
)
return batch
def get_dataset(self):
return self._observations[:self._size]
def num_steps_can_sample(self):
return self._size
def get_diagnostics(self):
return OrderedDict([
('size', self._size)
])
def end_epoch(self, epoch):
return
def get_snapshot(self):
return dict(
_observations=self._observations,
_next_obs=self._next_obs,
_actions=self._actions,
_rewards=self._rewards,
_terminals=self._terminals,
_top=self._top,
_size=self._size,
)
def restore_from_snapshot(self, ss):
for key in ss.keys():
assert hasattr(self, key)
setattr(self, key, ss[key])
class ReplayBufferCount(ReplayBuffer):
def __init__(
self,
max_replay_buffer_size,
ob_space,
action_space,
priority_sample=False
):
"""
The class state which should not mutate
"""
super().__init__(max_replay_buffer_size, ob_space, action_space,)
self._counts = np.zeros((max_replay_buffer_size, 1))
self.priority_sample = priority_sample
def add_sample(self, observation, action, reward, next_observation,
terminal, env_info, **kwargs):
assert not isinstance(self._action_space, Discrete)
self._observations[self._top] = observation
self._actions[self._top] = action
self._rewards[self._top] = reward
self._terminals[self._top] = terminal
self._next_obs[self._top] = next_observation
self._counts[self._top] = 0
self._advance()
def random_batch(self, batch_size):
if self.priority_sample:
probs = 1 / (self._counts[:self._size] + 1)
probs /= probs.sum()
indices = np.random.choice(np.arange(self._size), size=batch_size, p=probs[:, 0])
else:
indices = np.random.randint(0, self._size, batch_size)
batch = dict(
observations=self._observations[indices],
actions=self._actions[indices],
rewards=self._rewards[indices],
terminals=self._terminals[indices],
next_observations=self._next_obs[indices],
counts=np.copy(self._counts[indices]),
)
self._counts[indices] += 1
return batch
def get_snapshot(self):
ss = super().get_snapshot()
ss['_counts'] = self._counts
return ss
|
from fastapi import FastAPI
from .routers import post, user, auth, vote
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"]
)
app.include_router(post.router)
app.include_router(user.router)
app.include_router(auth.router)
app.include_router(vote.router)
@app.get("/")
def root():
return {"details": "This root"}
|
from PyQt5 import QtCore
class TableModel(QtCore.QAbstractTableModel):
def __init__(self, head_columns_names, head_rows_names):
super(TableModel, self).__init__()
self.headColStringsList = head_columns_names
self.headRowStringsList = head_rows_names
self._data = [] # this is just an empty data to use for row
self.data_column = [] # this is just an empty data to use for column
def rowCount(self, index=QtCore.QModelIndex):
# as per the docs this method should return 0 if used for table
# print(self._data)
return len(self._data)
def columnCount(self, index=QtCore.QModelIndex()):
# as per the docs this method should return 0 if used for table
# print(self.data_column)
return len(self.data_column)
def data(self, index=QtCore.QModelIndex, role=QtCore.Qt.DisplayRole):
if index.isValid():
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
value = self._data[index.row()][index.column()]
return str(value)
def setData(self, index=QtCore.QModelIndex, value=QtCore.QVariant, role=QtCore.Qt.EditRole):
if role == QtCore.Qt.EditRole:
self._data[index.row()][index.column()] = value
return True
return False
def headerData(self, section, orientation=QtCore.Qt.Orientation, role=QtCore.Qt.DisplayRole):
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
# return self._data[section]
return self.headColStringsList[section]
if orientation == QtCore.Qt.Vertical:
# return self._data[section]
return self.headRowStringsList[section]
def flags(self, index):
return QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable
def insertRow(self, position, index=QtCore.QModelIndex()):
# self.insertRows(position, self.table_rows_count, index)
self.insertRows(position, 3, index)
def insertRows(self, position, rows, parent=QtCore.QModelIndex()):
self.beginInsertRows(parent, position, position + rows - 1)
for row in range(0, rows):
# mention rows as per the column count
self._data.insert(position, ["", "", "", 0])
self.endInsertRows()
return True
def insertColumn(self, position, index=QtCore.QModelIndex()):
# self.insertColumns(position, self.table_columns_count, index)
self.insertColumns(position, 4, index)
def insertColumns(self, position, columns, parent=QtCore.QModelIndex()):
self.beginInsertColumns(parent, position, position + columns - 1)
for column in range(0, columns):
# mention the column type here example [] (empty list)
self.data_column.insert(position, [])
self.endInsertColumns()
return True
# https://forum.qt.io/topic/76087/get-content-of-cell-from-qtableview/4
# https://doc.qt.io/qtforpython/overviews/model-view-programming.html
|
# AOC 2021 - day 11 #
import math
import sys
from .common_11 import OctoGrid, parse_input
INF = math.inf
def main(data):
og = OctoGrid(data)
step = 1
while True:
og.do_step()
all_flashes = og.is_all_flash()
if all_flashes:
break
step += 1
return step
if __name__ == "__main__":
data_gen = parse_input(sys.argv[1])
print(main(data_gen))
|
import os
import pandas
def read_csv_files_from_folder_to_dataframe_dictionary(
folder: str) -> dict:
dataframe_dictionary = \
dict()
csv_files = \
__get_all_csv_files_from_folder(
folder)
for csv_file in csv_files:
dataframe_dictionary = \
__add_dataframe(
csv_file,
folder,
dataframe_dictionary)
return \
dataframe_dictionary
def __get_all_csv_files_from_folder(
folder: str) -> list:
csv_files = \
list()
for file in os.listdir(folder):
if file.endswith(".csv"):
csv_files.append(
file)
return \
csv_files
def __add_dataframe(
csv_file: str,
folder_name: str,
dataframe_dictionary: dict) -> dict:
dataframe_name = \
csv_file.replace(
'.csv',
'')
csv_path = \
os.path.join(
folder_name,
csv_file)
dataframe = \
pandas.read_csv(
filepath_or_buffer=csv_path)
dataframe_dictionary.update(
{dataframe_name: dataframe})
return \
dataframe_dictionary
|
from __future__ import print_function
from casolver.core.events.Event import Event
from lsh import cache, minhash # https://github.com/mattilyra/lsh
import argparse
import errno
import json
import logging
import logging.config
import os
import sys
import pkg_resources
import numpy as np
# Configure logger
logging.config.fileConfig(pkg_resources.resource_filename('casolver', '../conf/logging.ini'))
def atoi(text): return int(text) if text.isdigit() else text
def parse_events_file(file):
logging.info("Loading events from: %s " %(str(file)))
global allEvents
global dataList
with open(file, 'r') as f:
data=f.read()
json_file_data = json.loads(data)
allEvents = {}
dataList = []
for e in json_file_data:
e_timestamp = e['timestamp']
e_type = e['type']
e_thread = e['thread']
e_eventID = e['id']
e_loc = e['loc']
e_order = e['order']
e_dependency = e['dependency']
if 'dependencies' in e: e_dependencies = e['dependencies']
else: e_dependencies = None
if 'data' in e: e_data = e['data']
else: e_data = None
event = Event(e_timestamp, e_type, e_thread, e_eventID, e_loc, e_order, e_dependency, e_dependencies, e_data)
if 'pid' in e: event.setPid(e['pid'])
if 'socket' in e: event.setSocket(e['socket'])
if 'socket_type' in e: event.setSocketType(e['socket_type'])
if 'src' in e: event.setSrc(e['src'])
if 'src_port' in e: event.setSrcPort(e['src_port'])
if 'dst' in e: event.setDst(e['dst'])
if 'dst_port' in e: event.setDstPort(e['dst_port'])
if 'size' in e: event.setSize(e['size'])
if 'returned_value' in e: event.setReturnedValue(e['returned_value'])
if 'message' in e: event.setMessage(e['message'])
if 'filename' in e: event.setFilename(e['filename'])
if 'fd' in e: event.setFileDescriptor(e['fd'])
if 'offset' in e: event.setOffset(e['offset'])
allEvents[e_eventID] = event
if (e_type == 'SND' or e_type == 'RCV' or e_type == 'WR' or e_type == 'RD'):
if event._returned_value > 0:
if hasattr(event, "_signature") or event._msg_len > 0:
dataList.append(event)
logging.info("Trace successfully loaded!")
def outputResult(output_filename):
output_file = open(output_filename, "w")
output = []
for e in allEvents.values():
output.append(e.to_string())
output_file.write(json.dumps(output))
logging.info("Output saved to: %s" % (output_filename))
def get_jaccard_distance(lshcache, docid_a, docid_b):
a_fingerprints = set(lshcache.fingerprints[docid_a])
b_fingerprints = set(lshcache.fingerprints[docid_b])
return lshcache.hasher.jaccard(a_fingerprints,b_fingerprints)
def get_containment(lshcache, docid_a, docid_b):
a_fingerprints = set(lshcache.fingerprints[docid_a])
b_fingerprints = set(lshcache.fingerprints[docid_b])
return len(a_fingerprints & b_fingerprints) / len(a_fingerprints)
# return lshcache.hasher.jaccard(a_fingerprints,b_fingerprints)
def get_duplicates_of(lshcache, doc_id, min_jaccard=None):
if doc_id in lshcache.fingerprints:
fingerprint = lshcache.fingerprints[doc_id]
else:
raise ValueError("Must provide a document or a known document id")
candidates = set()
for bin_i, bucket in lshcache.bins_(fingerprint):
bucket_id = hash(tuple(bucket))
candidates.update(lshcache.bins[bin_i][bucket_id])
if min_jaccard is None:
return candidates
else:
return {
x
for x in candidates
if lshcache.hasher.jaccard(set(fingerprint), set(lshcache.fingerprints[x])) >= min_jaccard
}
def get_similarities_of(lshcache, doc_id, min_jaccard=None):
if doc_id in lshcache.fingerprints:
fingerprint = lshcache.fingerprints[doc_id]
else:
raise ValueError("Must provide a document or a known document id:", doc_id)
candidates = set()
for bin_i, bucket in lshcache.bins_(fingerprint):
bucket_id = hash(tuple(bucket))
candidates.update(lshcache.bins[bin_i][bucket_id])
if min_jaccard is None:
return candidates
else:
# return [
# (x, round(lshcache.hasher.jaccard(set(fingerprint), set(lshcache.fingerprints[x])),2))
# for x in candidates
# if lshcache.hasher.jaccard(set(fingerprint), set(lshcache.fingerprints[x])) >= min_jaccard
# ]
return [
(x, round((2*len(set(fingerprint) & set(lshcache.fingerprints[x]))) / (len(set(fingerprint)) + len(set(lshcache.fingerprints[x]))),2))
# lshcache.hasher.jaccard(set(fingerprint), set(lshcache.fingerprints[x])),2))
for x in candidates
if ((2*len(set(fingerprint) & set(lshcache.fingerprints[x]))) / (len(set(fingerprint)) + len(set(lshcache.fingerprints[x])))) >= min_jaccard and x != doc_id
]
def findDataDependencies_All_Events():
# seeds = np.array([ 72352, 784338, 366972, 630676, 794876, 677132, 843637, 208600, 200328, 987482])
seeds = np.array([ 82241, 37327, 892129, 314275, 984838, 268169, 654205, 386536, 43381, 745416])
hasher = minhash.MinHasher(seeds=seeds, char_ngram=5, hashbytes=4)
lshcache = cache.Cache(bands=100, hasher=hasher)
for event in dataList:
if not hasattr(event, '_signature'):
event._signature = hasher.fingerprint(event._msg.encode('utf8')).tolist()
lshcache.add_fingerprint(event._signature, event._eventId)
logging.info("Signatures added to lshcache")
for event in dataList:
event_dup = get_similarities_of(lshcache, doc_id=event._eventId, min_jaccard=0.6)
event._data_similarities = list(event_dup)
logging.info("event_dup(%s) similarities found %d" %(event._type, len(event._data_similarities)))
logging.info("finish...")
def main():
"""Main entry point for the script."""
parser = argparse.ArgumentParser(prog='casolver-py', description='')
parser.add_argument('--event_file', default=None, help="input file (falcon-solver output)")
parser.add_argument('--output_file', default=None, help="output file")
args, event_file = parser.parse_known_args()
if args.event_file is None:
logging.info("No input file")
sys.exit(os.EX_USAGE)
parse_events_file(args.event_file)
findDataDependencies_All_Events()
if args.output_file is None: output_file = "casolver_trace.json"
else : output_file = args.output_file
outputResult(output_file)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from habitat.core.logging import logger
from env.soundspaces.benchmark import Benchmark
class Challenge(Benchmark):
def __init__(self, eval_remote=False):
config_paths = os.environ["CHALLENGE_CONFIG_FILE"]
super().__init__(config_paths, eval_remote=eval_remote)
def submit(self, agent):
metrics = super().evaluate(agent)
for k, v in metrics.items():
logger.info("{}: {}".format(k, v))
|
idade = int(input("Digite sua idade "))
def classificacao(idade):
if idade <= 9:
return f"Um atleta de {idade} anos e Atleta MIRIM"
elif idade > 9 and idade <=14:
return f"Um atleta de {idade} anos e Atleta Infantil"
elif idade > 14 and idade <= 19:
return f"Um atleta de {idade} anos e Atleta Junior"
elif idade > 19 and idade <= 25:
return f"Um atleta de {idade} anos e Atleta Senior"
else:
return f"Um atleta de {idade} anos e Atleta MASTER"
print(classificacao(idade)) |
from __future__ import print_function
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
# Define the LSTM decoder for the CNN+LSTM autoencoder architecture
# Adapted from https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/03-advanced/image_captioning/model.py
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers):
"""Set the hyper-parameters and build the layers."""
super(DecoderRNN, self).__init__()
self.embed = nn.Embedding(vocab_size, embed_size)
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)
self.linear = nn.Linear(hidden_size, vocab_size)
self.init_weights()
def init_weights(self):
"""Initialize weights."""
self.embed.weight.data.uniform_(-0.1, 0.1)
self.linear.weight.data.uniform_(-0.1, 0.1)
self.linear.bias.data.fill_(0)
def forward(self, features, captions, lengths):
"""Auto-encode RRM sequence vectors."""
embeddings = self.embed(captions)
embeddings = torch.cat((features.unsqueeze(1), embeddings), 1)
packed = pack_padded_sequence(embeddings, lengths, batch_first=True)
hiddens, _ = self.lstm(packed)
outputs = self.linear(hiddens[0])
return outputs
def sample(self, features, states=None):
"""Samples reconstructed RRM sequences for given features (Greedy search)."""
sampled_ids = []
inputs = features.unsqueeze(1)
for i in range(20): # maximum sampling length
hiddens, states = self.lstm(inputs, states) # (batch_size, 1, hidden_size),
outputs = self.linear(hiddens.squeeze(1)) # (batch_size, vocab_size)
predicted = outputs.max(1)[1]
sampled_ids.append(predicted)
inputs = self.embed(predicted)
inputs = inputs.unsqueeze(1) # (batch_size, 1, embed_size)
sampled_ids = torch.cat(sampled_ids, 1) # (batch_size, 20)
return sampled_ids.squeeze()
|
"""
Restricted Boltzman's Machine [Energy based model]
"""
from sklearn.datasets import fetch_mldata
import numpy as np
import matplotlib.pyplot as plt
from ..unsupervised.restricted_boltzmann_machine import RBM
def start_restricted_bolz_machine():
"""
Drives the restricted boltzmann machine network
"""
mnist = fetch_mldata('MNIST original')
X = mnist.data / 255
y = mnist.target
# Select samples of digit 2
X = X[y == 2]
# Limit dataset to 500 samples
idx = np.random.choice(range(X.shape[0]), size=500, replace=False)
X = X[idx]
rbm = RBM(hidden=50, iters=200, batch_size=25, l_rate=0.001)
rbm.fit(X)
training, = plt.plot(range(len(rbm.training_errs)),
rbm.training_errs, label='Training Error')
plt.legend(handles=[training])
plt.title('Error Plot')
plt.ylabel('Error')
plt.xlabel('Iterations')
plt.show()
save_images(rbm, iter_='First')
save_images(rbm, iter_='Last')
def save_images(rbm, iter_=''):
"""
Saves the generated output images
"""
fig, axis = plt.subplots(5, 5)
plt.suptitle(f'Restricted Boltzmann Machine - {iter_} Iteration')
idx = 0 if iter_ == 'First' else -1
cnt = 0
for i in range(5):
for j in range(5):
axis[i, j].imshow(rbm.training_reconstructions[idx][
cnt].reshape((28, 28)), cmap='gray')
axis[i, j].axis('off')
cnt += 1
fig.savefig(f'{iter_.lower()}_iter.png')
plt.close()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 25 17:11:20 2018
@author: rstreet
"""
import rubin_sim.maf.db as db
import rubin_sim.maf.metrics as metrics
import rubin_sim.maf.slicers as slicers
import rubin_sim.maf.metricBundles as metricBundles
from rubin_sim.maf.metrics import BaseMetric
import numpy as np
from .calculate_lsst_field_visibility_astropy import calculate_lsst_field_visibility
__all__ = ['CalcExpectedVisitsMetric']
class CalcExpectedVisitsMetric(BaseMetric):
"""Function to calculate the maximum possible number of visits to a
given pointing, given the expected cadence of observation and within
the date ranges given, taking target visibility into account.
Input:
:param array ra: RAs, J2000.0, sexigesimal format
:param array dec: Decs, J2000.0, sexigesimal format
:param float cadence: Interval between successive visits in the
same single filter in hours
:param string start_date: Start of observing window YYYY-MM-DD
:param string start_date: End of observation window YYYY-MM-DD
Output:
:param list of arrays n_visits: Number of visits possible per night
for each pointing
:param list of arrays hrs_visibility: Hours of visibility per night
for each pointing
"""
def __init__(self, pointings,cadence,start_date,end_date,filter_id,
ra_col='fieldRA', dec_col='fieldDec',
metricName='CalcExpectedVisitsMetric',verbose=False):
"""Input:
:param array ra: RAs, J2000.0, sexigesimal format
:param array dec: Decs, J2000.0, sexigesimal format
:param float cadence: Interval between successive visits in the
same single filter in hours
:param string start_date: Start of observing window YYYY-MM-DD
:param string start_date: End of observation window YYYY-MM-DD
Output:
:param list of arrays n_visits: Number of visits possible per night
for each pointing
:param list of arrays hrs_visibility: Hours of visibility per night
for each pointing
"""
self.pointings = pointings
self.cadence = cadence
self.start_date = start_date
self.end_date = end_date
self.filter_id = filter_id
self.ra_col = ra_col
self.dec_col = dec_col
self.verbose = verbose
columns = [ self.ra_col, self.dec_col ]
super(CalcExpectedVisitsMetric,self).__init__(col=columns, metricName=metricName)
def run(self, dataSlice, slicePoint=None):
n_visits = []
hrs_visibility = []
if self.verbose:
print('Calculating visbility for '+str(len(self.pointings))+' fields')
for i in range(0,len(self.pointings),1):
#(ra, dec) = pointings[i]
ra = dataSlice[self.ra_col][0]
dec = dataSlice[self.dec_col][0]
if self.verbose:
print(' -> RA '+str(ra)+', Dec '+str(dec))
(total_time_visible, hrs_visible_per_night) = calculate_lsst_field_visibility(ra,dec,self.start_date,self.end_date,verbose=False)
n_visits.append( (np.array(hrs_visible_per_night) / self.cadence).astype(int) )
hrs_visibility.append( np.array(hrs_visible_per_night) )
return n_visits,hrs_visibility
|
from django.db import models
from django.conf import settings
from django.utils.html import mark_safe
from imagekit.models import ImageSpecField
from imagekit.processors import ResizeToFill
from os import path
DEFAULT_COVER = "cover/ntnui.png"
DEFAULT_LOGO = "logo/ntnui.svg"
def get_upload_location(type, instance, filename):
''' Get upload location (todo: find a way to serialize...)'''
if not type:
return None
if not instance:
return None
if not filename:
return None
return path.join(type, instance.group.slug, filename)
def get_upload_cover(instance, filename):
return get_upload_location("cover_photo", instance, filename)
def get_upload_logo(instance, filename):
return get_upload_location("logo", instance, filename)
class GroupMediaModel(models.Model):
media_id = models.AutoField(primary_key=True)
cover = models.ImageField(
upload_to=get_upload_cover, default=DEFAULT_COVER)
cover_image = ImageSpecField(source='cover', processors=[
ResizeToFill(790, 260)],
format='JPEG',
options={'quality': 100})
logo = models.ImageField(upload_to=get_upload_logo, default=DEFAULT_LOGO)
logo_image = ImageSpecField(source='logo', processors=[
ResizeToFill(100, 100)],
format='JPEG',
options={'quality': 60})
group = models.OneToOneField(
'GroupModel', on_delete=models.CASCADE, related_name='media')
class Meta:
''' Configure the name displayed in the admin panel '''
verbose_name = "Group Media"
verbose_name_plural = "Group Media"
def __str__(self):
return "Media object for {}".format(str(self.group))
def logo_tag(self):
return mark_safe('<img src={} style="height: 4rem;"/>'.format(self.logo.url if self.media_id else settings.MEDIA_URL + DEFAULT_LOGO))
def cover_tag(self):
return mark_safe('<img src={} />'.format(self.cover.url if self.media_id else settings.MEDIA_URL + DEFAULT_COVER))
|
"""
Creando un VPC con python
"""
import boto3
cliente = boto3.resource('ec2')
vpc = cliente.create_vpc(CidrBlock='172.18.0.0/16')
vpc.create_tags(Tags=[{"Key":"VPC","Value":"En Python Curso AWS"}])
vpc.wait_until_available() |
from icolos.core.composite_agents.workflow import WorkFlow
from icolos.core.containers.generic import GenericData
import unittest
import os
from icolos.core.containers.gmx_state import GromacsState
from icolos.utils.enums.step_enums import StepBaseEnum, StepGromacsEnum
from tests.tests_paths import PATHS_EXAMPLEDATA, export_unit_test_env_vars
from icolos.utils.general.files_paths import attach_root_path
from icolos.core.workflow_steps.gromacs.trjconv import StepGMXTrjconv
_SGE = StepGromacsEnum()
_SBE = StepBaseEnum
class Test_Trjconv(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._test_dir = attach_root_path("tests/junk/gromacs")
if not os.path.isdir(cls._test_dir):
os.makedirs(cls._test_dir)
export_unit_test_env_vars()
def setUp(self):
with open(PATHS_EXAMPLEDATA.GROMACS_1BVG_XTC, "rb") as f:
xtc = f.read()
with open(PATHS_EXAMPLEDATA.GROMACS_1BVG_TPR, "rb") as f:
tpr = f.read()
with open(
attach_root_path(PATHS_EXAMPLEDATA.GROMACS_HOLO_STRUCTURE_GRO), "r"
) as f:
struct = f.readlines()
self.topol = GromacsState()
self.topol.tprs = {0: GenericData(_SGE.STD_TPR, file_data=tpr)}
self.topol.trajectories = {0: GenericData(_SGE.STD_XTC, file_data=xtc)}
self.topol.structures = {0: GenericData(_SGE.STD_STRUCTURE, struct)}
def test_trjconv(self):
step_conf = {
_SBE.STEPID: "test_trjconv",
_SBE.STEP_TYPE: "trjconv",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2021-fosscuda-2019a-PLUMED-2.7.1-Python-3.7.2"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: ["-center"],
_SBE.SETTINGS_ADDITIONAL: {_SBE.PIPE_INPUT: "echo -ne 1 0"},
},
}
step_trjconv = StepGMXTrjconv(**step_conf)
step_trjconv.data.gmx_state = self.topol
step_trjconv.execute()
out_path = os.path.join(self._test_dir, "traj.xtc")
step_trjconv.get_topol().write_trajectory(self._test_dir)
stat_inf = os.stat(out_path)
self.assertGreater(stat_inf.st_size, 607000)
|
"""A sample app that operates on GCS files with blobstore API's BlobReader."""
import cloudstorage
from google.appengine.api import app_identity
from google.appengine.ext import blobstore
import webapp2
class BlobreaderHandler(webapp2.RequestHandler):
def get(self):
# Get the default Cloud Storage Bucket name and create a file name for
# the object in Cloud Storage.
bucket = app_identity.get_default_gcs_bucket_name()
# Cloud Storage file names are in the format /bucket/object.
filename = '/{}/blobreader_demo'.format(bucket)
# Create a file in Google Cloud Storage and write something to it.
with cloudstorage.open(filename, 'w') as filehandle:
filehandle.write('abcde\n')
# In order to read the contents of the file using the Blobstore API,
# you must create a blob_key from the Cloud Storage file name.
# Blobstore expects the filename to be in the format of:
# /gs/bucket/object
blobstore_filename = '/gs{}'.format(filename)
blob_key = blobstore.create_gs_key(blobstore_filename)
# [START blob_reader]
# Instantiate a BlobReader for a given Blobstore blob_key.
blob_reader = blobstore.BlobReader(blob_key)
# Instantiate a BlobReader for a given Blobstore blob_key, setting the
# buffer size to 1 MB.
blob_reader = blobstore.BlobReader(blob_key, buffer_size=1048576)
# Instantiate a BlobReader for a given Blobstore blob_key, setting the
# initial read position.
blob_reader = blobstore.BlobReader(blob_key, position=0)
# Read the entire value into memory. This may take a while depending
# on the size of the value and the size of the read buffer, and is not
# recommended for large values.
blob_reader_data = blob_reader.read()
# Write the contents to the response.
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(blob_reader_data)
# Set the read position back to 0, then read and write 3 bytes.
blob_reader.seek(0)
blob_reader_data = blob_reader.read(3)
self.response.write(blob_reader_data)
self.response.write('\n')
# Set the read position back to 0, then read and write one line (up to
# and including a '\n' character) at a time.
blob_reader.seek(0)
for line in blob_reader:
self.response.write(line)
# [END blob_reader]
# Delete the file from Google Cloud Storage using the blob_key.
blobstore.delete(blob_key)
app = webapp2.WSGIApplication([
('/', BlobreaderHandler),
('/blobreader', BlobreaderHandler)], debug=True)
|
from __future__ import print_function
from datetime import datetime
from HTMLParser import HTMLParser
import re
class FbHTMLParserFindMeta(HTMLParser):
pttrn_fb_id = re.compile('^fb://group/(\d+)$')
def __init__(self, callback_found_name, callback_found_fb_id):
HTMLParser.__init__(self)
self.callback_found_name = callback_found_name
self.callback_found_fb_id = callback_found_fb_id
self.tag_level_a = 0
self.tag_level_a_target = None
def handle_starttag(self, tag, attrs):
attrs_dict = dict(attrs)
if tag == 'a':
self.tag_level_a += 1
if attrs_dict.get('class') == '_5r2h':
self.tag_level_a_target = self.tag_level_a
elif tag == 'meta':
meta_content = attrs_dict.get('content')
if meta_content:
m = self.pttrn_fb_id.search(meta_content)
if m:
try:
self.callback_found_fb_id(m.group(1))
except IndexError:
pass
def handle_data(self, data):
if self.tag_level_a_target is not None and data:
self.callback_found_name(data)
def handle_endtag(self, tag):
if tag == 'a':
if self.tag_level_a_target == self.tag_level_a:
self.tag_level_a_target = None
self.tag_level_a -= 1
class FbHTMLParserFindPost(HTMLParser):
def __init__(self, callback_found_post):
HTMLParser.__init__(self)
self._post_outer_id_startswith = 'mall_post_'
self._observed_tags = ('div', 'abbr', 'h5', 'a', 'span')
self.callback_found_post = callback_found_post
self.cur_tag_level = dict((tag, 0) for tag in self._observed_tags)
self.post_outer_tag_level = None
self.post_author_tag_level = None
self.post_inner_tag_level = None
self.cur_post_date = None
self.cur_post_author = None
self.cur_post_text = []
self.cur_post_comments = []
self.orig_post_comments = None
self.comment_list_tag_level = None
self.comment_list_content_tag_level = None
self.comment_list_content_author_tag_level = None
self.comment_list_content_message_tag_level = None
self.comment_replies_tag_level = None
self.cur_comment = None
self.prev_comment = None
def handle_starttag(self, tag, attrs):
if tag not in self._observed_tags:
return
attrib_dict = dict(attrs)
self.cur_tag_level[tag] += 1
if 'class' in attrib_dict:
tag_classes = attrib_dict['class'].split()
else:
tag_classes = []
if tag == 'div':
# check if we have a outer post div
if self.post_outer_tag_level is None\
and 'id' in attrib_dict and attrib_dict['id'].startswith(self._post_outer_id_startswith):
self.post_outer_tag_level = self.cur_tag_level[tag]
# check if we have an inner post div (contains post message text)
if self.post_outer_tag_level is not None and self.post_inner_tag_level is None\
and '_5pbx' in tag_classes and 'userContent' in tag_classes:
self.post_inner_tag_level = self.cur_tag_level[tag]
# check if we have a comment list
if self.post_outer_tag_level is not None and 'UFIList' in tag_classes:
self.comment_list_tag_level = self.cur_tag_level[tag]
self.cur_post_comments = []
# check if we have a reply list on a comment
if self.comment_list_tag_level is not None and 'UFIReplyList' in tag_classes:
self.comment_replies_tag_level = self.cur_tag_level[tag]
assert self.prev_comment and type(self.cur_post_comments) == list \
and type(self.prev_comment.get('comments')) == list
self.orig_post_comments = self.cur_post_comments
self.cur_post_comments = self.prev_comment.get('comments')
# check if we have a comment content in a comment list:
if self.comment_list_tag_level is not None and 'UFICommentContentBlock' in tag_classes:
self.comment_list_content_tag_level = self.cur_tag_level[tag]
elif tag == 'a':
# check if we have an author in a comment content:
if self.comment_list_content_tag_level is not None and 'UFICommentActorName' in tag_classes:
self.comment_list_content_author_tag_level = self.cur_tag_level[tag]
elif tag == 'span':
# check if we have a comment message:
if self.comment_list_content_tag_level is not None and 'UFICommentBody' in tag_classes:
self.comment_list_content_message_tag_level = self.cur_tag_level[tag]
elif tag == 'abbr':
# check if we have a post date field
if self.post_outer_tag_level is not None and self.comment_list_tag_level is None\
and '_5ptz' in tag_classes and 'data-utime' in attrib_dict:
dt_obj = datetime.fromtimestamp(int(attrib_dict['data-utime']))
self.cur_post_date = dt_obj.strftime('%Y-%m-%d %H:%M:%S')
# check if we have a comment date field
if self.comment_list_tag_level is not None\
and 'livetimestamp' in tag_classes and 'data-utime' in attrib_dict:
dt_obj = datetime.fromtimestamp(int(attrib_dict['data-utime']))
assert self.cur_comment
self.cur_comment['date'] = dt_obj.strftime('%Y-%m-%d %H:%M:%S')
elif tag == 'h5':
# check if we have an author field
if self.post_outer_tag_level is not None and self.post_author_tag_level is None\
and '_5pbw' in tag_classes:
self.post_author_tag_level = self.cur_tag_level[tag]
def handle_endtag(self, tag):
if tag not in self._observed_tags:
return
if tag == 'h5':
if self.post_author_tag_level == self.cur_tag_level[tag]:
self.post_author_tag_level = None
elif tag == 'a':
# check comment author:
if self.comment_list_content_author_tag_level == self.cur_tag_level[tag]:
self.comment_list_content_author_tag_level = None
elif tag == 'span':
# check comment message:
if self.comment_list_content_message_tag_level == self.cur_tag_level[tag]:
self.comment_list_content_message_tag_level = None
elif tag == 'div':
# check inner post tag
if self.post_inner_tag_level == self.cur_tag_level[tag]:
self.post_inner_tag_level = None
# check comment list
if self.comment_list_tag_level == self.cur_tag_level[tag]:
self.comment_list_tag_level = None
# check comment reply list
if self.comment_replies_tag_level == self.cur_tag_level[tag]:
self.cur_post_comments = self.orig_post_comments
self.comment_replies_tag_level = None
# check comment content
if self.comment_list_content_tag_level == self.cur_tag_level[tag]:
assert self.cur_comment
self.comment_list_content_tag_level = None
self.cur_comment['message'] = ' '.join(self.cur_comment['message'])
self.cur_post_comments.append(self.cur_comment)
self.prev_comment = self.cur_comment
self.cur_comment = None # reset
# check outer post tag
if self.post_outer_tag_level == self.cur_tag_level[tag]:
self.post_outer_tag_level = None
post_data = {
'date': self.cur_post_date,
'from': self.cur_post_author,
'message': u'\n'.join(self.cur_post_text),
'comments': self.cur_post_comments
}
self.callback_found_post(post_data)
self.cur_post_text = [] # reset
self.cur_post_comments = []
self.prev_comment = None
self.cur_tag_level[tag] -= 1
def handle_data(self, data):
if self.post_author_tag_level is not None:
self.cur_post_author = data
if self.post_inner_tag_level is not None:
self.cur_post_text.append(data)
if self.comment_list_content_author_tag_level is not None:
self.cur_comment = {
'from': data,
'date': None,
'message': [],
'comments': []
}
if self.comment_list_content_message_tag_level is not None:
assert self.cur_comment
self.cur_comment['message'].append(data)
class FbParser(object):
def __init__(self):
self.find_post_parser = FbHTMLParserFindPost(self.found_post_callback)
self.find_meta_parser = FbHTMLParserFindMeta(self.found_meta_name_callback, self.found_meta_fb_id_callback)
self.output = None
def parse(self, html):
self.output = {
'meta': {
'name': None,
'type': 'group',
'fb_id': None,
'date': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
},
'data': []
}
self.find_meta_parser.feed(html)
self.find_post_parser.feed(html)
def found_meta_name_callback(self, name):
assert self.output
self.output['meta']['name'] = name
def found_meta_fb_id_callback(self, fb_id):
assert self.output
self.output['meta']['fb_id'] = fb_id
def found_post_callback(self, post):
assert self.output
self.output['data'].append(post)
|
from django.db.models.sql.query import * # NOQA
from django.db.models.sql.query import Query
from django.db.models.sql.subqueries import * # NOQA
from django.db.models.sql.where import AND, OR
__all__ = ["Query", "AND", "OR"]
|
#!/usr/bin/env python
# encoding: utf-8
"""
LatestEntries.py
Created by Rui Carmo on 2011-09-02.
Published under the MIT license.
"""
import re, md5, urlparse, time, cgi, traceback
import yaki.Engine, yaki.Store, yaki.Locale
from yaki.Utils import *
from yaki.Layout import *
from BeautifulSoup import *
class LatestBlogEntriesWikiPlugin(yaki.Plugins.WikiPlugin):
def __init__(self, registry, webapp):
registry.register('markup',self, 'plugin','LatestEntries')
self.webapp = webapp
self.ac = webapp.getContext()
self.i18n = yaki.Locale.i18n[self.ac.locale]
def run(self, serial, tag, tagname, pagename, soup, request, response):
ac = self.ac
c = request.getContext()
# define how many blog entries to show
try:
bound = int(tag['size'])
except:
bound = 3
# filter for the namespace we want
# TODO: this should be extensible to tags sometime in the future
try:
mask = re.compile(tag['src'])
except:
mask = re.compile('^(blog)\/(\d+){4}\/(\d+){2}\/(\d+){2}.*')
# this is what entries ought to look like, ideally
canon = "0000/00/00/0000"
# find entries.
# We use the indexer's allpages here because that's updated upon server start
# ...and because we want to do our own sorting anyway.
paths = [path for path in self.ac.indexer.allpages if mask.match(path)]
# canonize paths
entries = {}
for i in paths:
(prefix, path) = i.split("/",1)
l = len(path)
p = len(prefix)+1
k = len(canon)
# add an hex digest in case there are multiple entries at the same time
if l < k:
entries[i[p:l+p] + canon[-(k-l):] + md5.new(i).hexdigest()] = i
else:
entries[i[p:] + md5.new(i).hexdigest()] = i
latest = entries.keys()
latest.sort()
latest.reverse()
# skip over the latest entry
latest = latest[1:bound+1]
posts = []
for i in latest:
name = entries[i]
try:
page = ac.store.getRevision(name)
except IOError:
print "LatestBlogEntries: could not retrieve %s" % name
continue
headers = page.headers
path = ac.base + name
linkclass = "wikilink"
posttitle = headers['title']
rellink = path
permalink = headers['bookmark'] = request.getBaseURL() + rellink
if SANITIZE_TITLE_REGEX.match(name):
permalink = permalink + "#%s" % sanitizeTitle(posttitle)
description = "permanent link to this entry"
if 'x-link' in headers.keys():
link = uri = headers['x-link']
(schema,netloc,path,parameters,query,fragment) = urlparse.urlparse(uri)
if schema in self.i18n['uri_schemas'].keys():
linkclass = self.i18n['uri_schemas'][schema]['class']
description = "external link to %s" % cgi.escape(uri)
content = yaki.Engine.renderPage(self.ac,page)
try:
soup = BeautifulSoup(content)
# remove all funky markup
for unwanted in ['img','plugin','div','pre']:
[i.extract() for i in soup.findAll(unwanted)]
paragraphs = filter(lambda p: p.contents, soup.findAll('p'))
soup = paragraphs[0]
content = soup.renderContents().decode('utf-8')
# TODO: impose bound checks here and insert ellipsis if appropriate.
# the "Read More" links are added in the template below.
except Exception, e:
print "DEBUG: failed to trim content to first paragraph for entry %s, %s" % (name, e)
continue
postinfo = renderInfo(self.i18n,headers)
metadata = renderEntryMetaData(self.i18n,headers)
# Generate c.comments
formatComments(ac,request,name, True)
comments = c.comments
try:
tags = headers['tags']
except:
tags = ""
references = ''
posts.append(ac.templates['latest-entries'] % locals())
tag.replaceWith(''.join(posts))
|
import datetime
import pytest
from ksc.collector import base
from ksc.collector.model import collector
class MockedCollector(base.Collector):
def collect(self) -> collector.Result:
...
def test_no_arg_constructor() -> None:
i = MockedCollector()
today = datetime.datetime.utcnow()
assert i.until is None
assert i.since is not None
assert i.since.day == today.day
assert i.since.month == today.month
assert i.since.year == today.year
def test_since_only_constructor() -> None:
since = datetime.datetime.utcnow().replace(month=1)
i = MockedCollector(since=since, until=None)
assert i.since is not None
assert i.until is not None
def test_until_only_constructor_fails() -> None:
until = datetime.datetime.utcnow()
with pytest.raises(ValueError):
MockedCollector(since=None, until=until)
def test_if_since_larger_then_until() -> None:
since = datetime.datetime.utcnow().replace(day=4, month=1)
until = datetime.datetime.utcnow().replace(day=3, month=1)
with pytest.raises(ValueError):
MockedCollector(since=since, until=until)
def test_dates_are_in_utc() -> None:
since = datetime.datetime.utcnow().replace(day=2, month=1)
until = datetime.datetime.utcnow().replace(day=3, month=1)
i = MockedCollector(since=since, until=until)
assert i.since.tzname() == 'UTC'
if i.until:
assert i.until.tzname() == 'UTC'
|
#!/usr/bin/env python
import functools
class Solution:
def largestNumber(self, nums):
"""
:type nums: List[int]
:rtype: str
"""
# numsString = [str(n) for n in nums]
# def compare(ns1, ns2):
# return 1 if ns2+ns1 > ns1+ns2 else -1
return ''.join(sorted([str(n) for n in nums], key=functools.cmp_to_key(lambda ns1, ns2: 1 if ns2+ns1 > ns1+ns2 else -1))).lstrip('0') or '0'
sol = Solution()
nums = [10,2]
nums = [10, 20]
nums = [0, 0]
nums = [30,3,34,5,9]
print(sol.largestNumber(nums))
|
"""Helper scintific module
Module serves for custom methods to support Customer Journey Analytics Project
"""
# IMPORTS
# -------
# Standard libraries
import re
import ipdb
import string
import math
# 3rd party libraries
from google.cloud import bigquery
import numpy as np
import pandas as pd
import nltk
nltk.download(['wordnet', 'stopwords'])
STOPWORDS = nltk.corpus.stopwords.words('english')
from scipy import stats
import statsmodels.api as sm
from statsmodels.formula.api import ols
import scikit_posthocs as sp
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import GridSearchCV
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import ComplementNB
from sklearn.metrics import f1_score
from sklearn.decomposition import PCA
import rpy2
import rpy2.rlike.container as rlc
from rpy2 import robjects
from rpy2.robjects.vectors import FloatVector
from rpy2.robjects.vectors import ListVector
from rpy2.robjects.vectors import StrVector
from rpy2.robjects import pandas2ri
from matplotlib import pyplot as plt
import seaborn as sns
# MODULE FUNCTIONS
# ----------------
def get_dissimilarity(df, normalize=True):
'''Calculates dissimilarity of observations from average
observation.
Args:
df: Data as dataframe of shape (# observations, # variables)
Returns:
ser: Calculated dissimilrity as series of size (# observations)
'''
# normalize data
if normalize:
df_scaled = StandardScaler().fit_transform(df)
df = pd.DataFrame(df_scaled, columns=df.columns, index=df.index)
else:
raise Exception('Not implemented')
# calculate multivariate dissimilarity
diss = ((df - df.mean())**2).sum(axis=1)**(1/2)
return diss
def split_data(df, diss_var, dataset_names, threshold, dis_kws={}, **split_kws):
'''Function randomly splits data into two sets, calates multivariate
dissimilarity and keep all oultiers determined by dissimilarity
treshold in each set.
Args:
df: Data as dataframe of shape (# samles, # features)
diss_var: Names of variables to calculate dissimilarity measure
as list of strings
dataset_names: Names of datasets as list of strings
threshold: Threshold for dissimilarity measure
to determine outliers as float
dis_kws: Key word arguments of dissimilarity function as dictionary
split_kws: Key word arguents of train_test_split function
Returns:
datasets: Dictionary of splitted datasets as dataframe
'''
# calculate dissimilarity series
dis_kws['normalize'] = (True if 'normalize' not in dis_kws
else dis_kws['normalize'])
dissimilarity = get_dissimilarity(df[diss_var], dis_kws['normalize'])
# Pop outlier customers
ext_mask = (dissimilarity > threshold)
X_ext = df.loc[ext_mask]
X = df.loc[~ext_mask]
# drop one random sample to keep even samples in dataset
# for purpose of having same number of samples after splitting
if X.shape[0] % 2 != 0:
split_kws['random_state'] = (1 if 'random_state' not in split_kws
else split_kws['random_state'])
remove_n = 1
drop_indices = (X.sample(remove_n,
random_state=split_kws['random_state'])
.index)
X = X.drop(drop_indices)
# Random split of sample in two groups
Xa, Xb = train_test_split(X, **split_kws)
datasets = [Xa, Xb]
# add outliers to each group
datasets = {dataset_name: dataset
for dataset_name, dataset in zip(dataset_names, datasets)}
for name, dataset in datasets.items():
datasets[name] = dataset.append(X_ext)
return datasets
def analyze_cluster_solution(df, vars_, labels, **kws):
'''Analyzes cluster solution. Following analyses are done:
1) Hypothesis testing of clusters averages difference
a) One way ANOVA
b) ANOVA assumptions
- residuals normality test: Shapiro-Wilk test
- equal variances test: Leven's test
c) Kruskal-Wallis non parametric test
d) All-Pair non parametric test, Conover test by default
2) Cluster profile vizualization
3) Cluster scatterplot vizualization
Args:
df: Dataset as pandas dataframe
of shape(# observations, # variables)
vars_: Clustering variables as list of strings
labels: Variable holding cluster labels as string
kws: Key words arguments of post-hoc test
Returns:
summary: Dataframe of hypothesis tests
post_hoc: List of post_hoc test for each clustering variable
prof_ax: Axes of profile vizualization
clst_pg: PairGrid of cluster vizulization
'''
def color_not_significant_red(val, signf=0.05):
'''Takes a scalar and returns a string withthe css property
`'color: red'` for non significant p_value
'''
color = 'red' if val > signf else 'black'
return 'color: %s' % color
# get number of seeds
num_seeds = len(df.groupby(labels).groups)
# run tests
kws['post_hoc_fnc'] = (sp.posthoc_conover if 'post_hoc_fnc' not in kws
else kws['post_hoc_fnc'])
summary, post_hoc = profile_cluster_labels(
df, labels, vars_, **kws)
# print hypothesis tests
str_ = 'PROFILE SUMMARY FOR {}'.format(labels.upper())
print(str_ + '\n' + '-' * len(str_) + '\n')
str_ = 'Hypothesis testing of clusters averages difference'
print(str_ + '\n' + '-' * len(str_))
display(summary.round(2))
# print post-hoc tests
str_ = '\nPost-hoc test: {}'.format(kws['post_hoc_fnc'].__name__)
print(str_ + '\n' + '-' * len(str_) + '\n')
for var in post_hoc:
print('\nclustering variable:', var)
display(post_hoc[var].round(2)
.style.applymap(color_not_significant_red))
# print profiles
str_ = '\nProfile vizualization'
print(str_ + '\n' + '-' * len(str_))
prof_ax = (df
.groupby(labels)
[vars_]
.mean()
.transpose()
.plot(title='Cluster Profile')
)
plt.ylabel('Standardized scale')
plt.xlabel('Clustering variables')
plt.show()
# print scatterplots
str_ = '\nClusters vizualization'
print(str_ + '\n' + '-' * len(str_))
clst_pg = sns.pairplot(x_vars=['recency', 'monetary'],
y_vars=['frequency', 'monetary'],
hue=labels, data=df, height=3.5)
clst_pg.set(yscale='log')
clst_pg.axes[0, 1].set_xscale('log')
clst_pg.fig.suptitle('Candidate Solution: {} seeds'
.format(num_seeds), y=1.01)
plt.show()
return summary, post_hoc, prof_ax, clst_pg
def profile_cluster_labels(df, group, outputs, post_hoc_fnc=sp.posthoc_conover):
'''Test distinctiveness of cluster (group) labes across clustering (output)
variables using one way ANOVA, shapiro_wilk normality test,
leven's test of equal variances, Kruskla-Wallis non parametric tests and
selected all-pairs post hoc test for each output variables.
Args:
df: Data with clustering variables and candidate solutions
as dataframe of shape (# samples, # of variables +
candidate solutions)
group: group variables for hypothesis testing as string
output: output variables for hypothesis testing as list of string
Returns:
results: Dataframe of hypothesis tests for each output
'''
# initiate summmary dataframe
summary = (df.groupby(group)[outputs]
.agg(['mean', 'median'])
.T.unstack(level=-1)
.swaplevel(axis=1)
.sort_index(level=0, axis=1))
# initiate posthoc dictionary
post_hoc = {}
# cycle over ouptputs
for i, output in enumerate(outputs):
# split group levels
levels = [df[output][df[group] == level]
for level in df[group].unique()]
# calculate F statistics and p-value
_, summary.loc[output, 'anova_p'] = stats.f_oneway(*levels)
# calculate leven's test for equal variances
_, summary.loc[output, 'levene_p'] = stats.levene(*levels)
# check if residuals are normally distributed by shapiro wilk test
model = ols('{} ~ C({})'.format(output, group), data=df).fit()
_, summary.loc[output, 'shapiro_wilk_p'] = stats.shapiro(model.resid)
# calculate H statistics and p-value for Kruskal Wallis test
_, summary.loc[output, 'kruskal_wallis_p'] = stats.kruskal(*levels)
# multiple comparison Conover's test
post_hoc[output] = post_hoc_fnc(
df, val_col=output, group_col=group) #, p_adjust ='holm')
return summary, post_hoc
def get_missmatch(**kws):
'''
Cross tabulates dataframe on 2 selected columns and
calculates missmatch proportion of rows and total
Args:
kws: Key word arguments to pd.crosstab function
Returns:
crosst_tab: result of cross tabulation as dataframe
missmatch_rows: missmatch proportion by rows as series
total_missmatch: total missmatch proportion as float
'''
cross_tab = pd.crosstab(**kws)
missmatch_rows = (cross_tab.sum(axis=1) - cross_tab.max(axis=1))
total_missmatch = missmatch_rows.sum() / cross_tab.sum().sum()
missmatch_rows = missmatch_rows / cross_tab.sum(axis=1)
missmatch_rows.name = 'missmatch_proportion'
return cross_tab, missmatch_rows, total_missmatch
def query_product_info(client, query_params):
'''Query product information from bigquery database.
Distinct records of product_sku, product_name,
product_brand, product_brand_grp,
product_category, product_category_grp,
Args:
client: Instatiated bigquery.Client to query distinct product
description(product_sku, product_name, product_category,
product_category_grp)
query_params: Query parameters for client
Returns:
product_df: product information as distict records
as pandas dataframe (# records, # variables)
'''
# Check arguments
# ----------------
assert isinstance(client, bigquery.Client)
assert isinstance(query_params, list)
# Query distinct products descriptions
# ------------------------------------
query='''
SELECT DISTINCT
hits_product.productSku AS product_sku,
hits_product.v2productName AS product_name,
hits_product.productBrand AS product_brand,
hits.contentGroup.contentGroup1 AS product_brand_grp,
hits_product.v2productCategory AS product_category,
hits.contentGroup.contentGroup2 AS product_category_grp
FROM
`bigquery-public-data.google_analytics_sample.ga_sessions_*`
LEFT JOIN UNNEST(hits) AS hits
LEFT JOIN UNNEST(hits.product) AS hits_product
WHERE
_TABLE_SUFFIX BETWEEN @start_date AND @end_date
AND hits_product.productSku IS NOT NULL
ORDER BY
product_sku
'''
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = query_params
df = client.query(query, job_config=job_config).to_dataframe()
return df
def reconstruct_brand(product_sku, df):
'''Reconstructs brand from product name and brand variables
Args:
product_sku: product_sku as of transaction records on product level
of size # transactions on produc level
df: Product information as output of
helper.query_product_info in form of dataframe
of shape (# of distinct records, # of variables)
Returns:
recon_brand: reconstructed brand column as pandas series
of size # of transactions
'''
# predict brand name from product name for each sku
# -------------------------------------------------
# valid brands
brands = ['Android',
'Chrome',
r'\bGo\b',
'Google',
'Google Now',
'YouTube',
'Waze']
# concatenate different product names for each sku
brand_df = (df[['product_sku', 'product_name']]
.drop_duplicates()
.groupby('product_sku')
['product_name']
.apply(lambda product_name: ' '.join(product_name))
.reset_index()
)
# drop (no set) sku's
brand_df = brand_df.drop(
index=brand_df.index[brand_df['product_sku'] == '(not set)'])
# predict brand name from product name for each sku
brand_df['recon_brand'] = (
brand_df['product_name']
.str.extract(r'({})'.format('|'.join(set(brands)),
flags=re.IGNORECASE))
)
# adjust brand taking account spelling errors in product names
brand_df.loc[
brand_df['product_name'].str.contains('You Tube', case=False),
'recon_brand'
] = 'YouTube'
# predict brand name from brand variables for sku's where
# brand couldn't be predected from product name
# --------------------------------------------------------
# get distinct product_sku and brand variables associations
brand_vars = ['product_brand', 'product_brand_grp']
brand_var = dict()
for brand in brand_vars:
brand_var[brand] = (df[['product_sku', brand]]
.drop(index=df.index[(df['product_sku'] == '(not set)')
| df['product_sku'].isna()
| (df[brand] == '(not set)')
| df[brand].isna()])
.drop_duplicates()
.drop_duplicates(subset='product_sku', keep=False))
# check for brand abiguity at sku level
old_brand = brand_var['product_brand'].set_index('product_sku')
new_brand = brand_var['product_brand_grp'].set_index('product_sku')
shared_sku = old_brand.index.intersection(new_brand.index)
if not shared_sku.empty:
# delete sku's with abigious brands
ambigious_sku = shared_sku[
old_brand[shared_sku].squeeze().values
!= new_brand[shared_sku].squeeze().values
]
old_brand = old_brand.drop(index=ambigious_sku, errors='ignore')
new_brand = new_brand.drop(index=ambigious_sku, errors='ignore')
# delete sku's with multiple brands in new_brand
multiple_sku = shared_sku[
old_brand[shared_sku].squeeze().values
== new_brand[shared_sku].squeeze().values
]
new_brand = new_brand.drop(index=multiple_sku, errors='ignore')
# concatenate all associations of brand variables and product sku's
brand_var = pd.concat([old_brand.rename(columns={'product_brand':
'recon_brand_var'}),
new_brand.rename(columns={'product_brand_grp':
'recon_brand_var'})])
# predict brand name from brand variables
brand_df.loc[brand_df['recon_brand'].isna(), 'recon_brand'] = (
pd.merge(brand_df['product_sku'], brand_var, on='product_sku', how='left')
['recon_brand_var']
)
# recode remaining missing (not set) brands by Google brand
# ---------------------------------------------------------
brand_df['recon_brand'] = brand_df['recon_brand'].fillna('Google')
# predict brand from brand names and variables on transaction data
# ----------------------------------------------------------------
recon_brand = (pd.merge(product_sku.to_frame(),
brand_df[['product_sku', 'recon_brand']],
on='product_sku',
how='left')
.reindex(product_sku.index)
['recon_brand'])
return recon_brand
def reconstruct_category(product_sku, df, category_spec):
'''Reconstructs category from category variables and product names.
Args:
product_sku: product_sku from transaction records on product level
of size # transactions on product level
df: Product information as output of
helper.query_product_info in form of dataframe
of shape (# of distinct records, # of variables)
category_spec: Dictionary with keys as category variable names
and values as mappings between category variable levels
to category labels in form of dataframe
Returns:
recon_category: reconstructed category column as pandas series
of size # of trasactions on product level
category_df: mappings of unique sku to category labels
'''
# Check arguments
# ----------------
assert isinstance(product_sku, pd.Series)
assert isinstance(df, pd.DataFrame)
assert isinstance(category_spec, dict)
# reconstruct category name from product name for each sku
# --------------------------------------------------------
def get_category_representation(category_label, valid_categories):
'''Handle multiple categories assigned to one sku.
For ambigious categories returns missing value.
Args:
category_label: Series of category labels for
particular sku
valid_categories: Index of valid unique categories
Returns:
label: valid category label or missing value
'''
label = valid_categories[valid_categories.isin(category_label)]
if label.empty or label.size > 1:
return np.nan
else:
return label[0]
def label_category_variable(df, category_var, label_spec):
'''reconstruct category labels from category variable.
Args:
df: Product information dataframe.
category_var: Name of category variabel to reconstruct labels
label_spec: Label mapping between category variable levels
and labels.
Returns:
var_label: Label mapping to sku as dataframe
'''
valid_categories = pd.Index(label_spec
.groupby(['category_label'])
.groups
.keys())
var_label = (pd.merge(df[['product_name', category_var]]
.drop_duplicates(),
label_spec,
how='left',
on=category_var)
[['product_name', 'category_label']]
.groupby('product_name')
['category_label']
.apply(get_category_representation,
valid_categories=valid_categories)
.reset_index())
return var_label
def screen_fit_model(data):
'''Screens Naive Bayes Classifiers and selects best model
based on f1 weigted score. Returns fitted model and score.
Args:
data: Text and respective class labels as dataframe
of shape (# samples, [text, labels])
Returns:
model: Best fitted sklearn model
f1_weighted_score: Test f1 weighted score
Note: Following hyperparameters are tested
Algorithm: MultinomialNB, ComplementNB
ngrams range: (1, 1), (1, 2), (1, 3)
binarization: False, True
'''
# vectorize text inforomation in product_name
def preprocessor(text):
# not relevant words
not_relevant_words = ['google',
'youtube',
'waze',
'android']
# transform text to lower case and remove punctuation
text = ''.join([word.lower() for word in text
if word not in string.punctuation])
# tokenize words
tokens = re.split('\W+', text)
# Drop not relevant words and lemmatize words
wn = nltk.WordNetLemmatizer()
text = ' '.join([wn.lemmatize(word) for word in tokens
if word not in not_relevant_words + STOPWORDS])
return text
# define pipeline
pipe = Pipeline([('vectorizer', CountVectorizer()),
('classifier', None)])
# define hyperparameters
param_grid = dict(vectorizer__ngram_range=[(1, 1), (1, 2), (1, 3)],
vectorizer__binary=[False, True],
classifier=[MultinomialNB(),
ComplementNB()])
# screen naive buyes models
grid_search = GridSearchCV(pipe, param_grid=param_grid, cv=5,
scoring='f1_weighted', n_jobs=-1)
# devide dataset to train and test set using stratification
# due to high imbalance of lables frequencies
x_train, x_test, y_train, y_test = train_test_split(
data['product_name'],
data['recon_category'],
test_size=0.25,
stratify=data['recon_category'],
random_state=1)
# execute screening and select best model
grid_search.fit(x_train, y_train)
# calculate f1 weighted test score
y_pred = grid_search.predict(x_test)
f1_weigted_score = f1_score(y_test, y_pred, average='weighted')
return grid_search.best_estimator_, f1_weigted_score
# reconstruct category label from cateogry variables
recon_labels = dict()
for var, label_spec in category_spec.items():
recon_labels[var] = (label_category_variable(df, var, label_spec)
.set_index('product_name'))
recon_labels['product_category'][
recon_labels['product_category'].isna()
] = recon_labels['product_category_grp'][
recon_labels['product_category'].isna()
]
# reconstruct category label from produc names
valid_categories = pd.Index(category_spec['product_category_grp']
.groupby(['category_label'])
.groups
.keys())
category_df = (pd.merge(df[['product_sku', 'product_name']]
.drop_duplicates(),
recon_labels['product_category'],
how='left',
on = 'product_name')
[['product_sku', 'product_name', 'category_label']]
.groupby('product_sku')
.agg({'product_name': lambda name: name.str.cat(sep=' '),
'category_label': lambda label:
get_category_representation(label, valid_categories)})
.reset_index())
category_df.rename(columns={'category_label': 'recon_category'},
inplace=True)
# associate category from category names and variables on transaction data
# ------------------------------------------------------------------------
recon_category = (pd.merge(product_sku.to_frame(),
category_df[['product_sku', 'recon_category']],
on='product_sku',
how='left')
)
# predict category of transactions where category is unknown
# Multinomial and Complement Naive Bayes model is screened
# and finetuned using 1-grams, 2-grams and 3-grams
# as well as binarization (Tru or False)
# best model is selected based on maximizing test f1 weigted score
# ----------------------------------------------------------------
# screen best model and fit it on training data
model, f1_weighted_score = screen_fit_model(
category_df[['product_name', 'recon_category']]
.dropna()
)
# predict category labels if model has f1_weighted_score > threshold
f1_weighted_score_threshold = 0.8
if f1_weighted_score < f1_weighted_score_threshold:
raise Exception(
'Accuracy of category prediction below threshold {:.2f}'
.format(f1_weighted_score_threshold))
else:
product_name = (pd.merge(recon_category
.loc[recon_category['recon_category'].isna(),
['product_sku']],
category_df[['product_sku', 'product_name']],
how='left',
on='product_sku')
['product_name'])
category_label = model.predict(product_name)
recon_category.loc[recon_category['recon_category'].isna(),
'recon_category'] = category_label
return recon_category['recon_category']
def reconstruct_sales_region(subcontinent):
'''Reconstruct sales region from subcontinent'''
if (pd.isna(subcontinent)
or subcontinent.lower() == '(not set)'):
sales_region = np.nan
elif ('africa' in subcontinent.lower()
or 'europe' in subcontinent.lower()):
sales_region = 'EMEA'
elif ('caribbean' in subcontinent.lower()
or subcontinent.lower() == 'central america'):
sales_region = 'Central America'
elif subcontinent.lower() == 'northern america':
sales_region = 'North America'
elif subcontinent.lower() == 'south america':
sales_region = 'South America'
elif ('asia' in subcontinent.lower()
or subcontinent.lower() == 'australasia'):
sales_region = 'APAC'
else:
raise Exception(
'Can not assign sales region to {} subcontinent'
.format(subcontinent))
return sales_region
def reconstruct_traffic_keyword(text):
'''Reconstructs traffic keywords to more simple representation'''
# if empty rename to not applicable
if pd.isna(text):
text = '(not applicable)'
# if one word with mixed numbers & letters rename to (not relevant)
elif re.search(r'(?=.*\d)(?=.*[A-Z=\-])(?=.*[a-z])([\w=-]+)', text):
text = '(not relevant)'
elif ((text != '(not provided)')
and (re.search('(\s+)', text) is not None)):
# transform text to lower case and remove punctuation
text = ''.join([word.lower() for word in text
if word not in string.punctuation.replace('/', '')])
# tokenize words
tokens = re.split('\W+|/', text)
# Drop not relevant words and lemmatize words
wn = nltk.WordNetLemmatizer()
text = ' '.join([wn.lemmatize(word) for word in tokens
if word not in STOPWORDS])
return text
def aggregate_data(df):
'''Encode and aggregate engineered and missing value free data
on client level
Args:
df: engineered and missing value free data as
pandas dataframe of shape (# transaction items, # variables)
agg_df: encoded and aggregated dataframe
of shape(# clients, # encoded & engineered variables)
with client_id index
'''
# identifiers
id_vars = pd.Index(
['client_id',
'session_id',
'transaction_id',
'product_sku']
)
# session variables
session_vars = pd.Index(
['visit_number', # avg_visits
'date', # month, week, week_day + one hot encode + sum
'pageviews', # avg_pageviews
'time_on_site', # avg_time_on_site
'ad_campaign', # sum
'source', # one hot encode + sum
'browser', # one hot encode + sum
'operating_system', # one hot encode + sum
'device_category', # one hot encode + sum
'continent', # one hot encode + sum
'subcontinent', # one hot encode + sum
'country', # one hot encode + sum
'sales_region', # one hot encode + sum
'social_referral', # sum
'social_network', # one hot encode + sum
'channel_group'] # one hot encode + sum
)
# group session variables from item to session level
session_df = (df[['client_id',
'session_id',
*session_vars.to_list()]]
.drop_duplicates()
# drop ambigious region 1 case
.drop_duplicates(subset='session_id'))
# reconstruct month, weeek and week day variables
# session_df['month'] = session_df['date'].dt.month
# session_df['week'] = session_df['date'].dt.week
session_df['week_day'] = session_df['date'].dt.weekday + 1
session_df = session_df.drop(columns='date')
# encode variables on session level
keep_vars = [
'client_id',
'session_id',
'visit_number',
'pageviews',
'time_on_site',
'social_referral',
'ad_campaign'
]
encode_vars = session_df.columns.drop(keep_vars)
enc_session_df = pd.get_dummies(session_df,
columns=encode_vars.to_list(),
prefix_sep='*')
# remove not relevant encoded variables
enc_session_df = enc_session_df.drop(
columns=enc_session_df.columns[
enc_session_df.columns.str.contains('not set|other')
]
)
# summarize session level variables on customer level
sum_vars = (pd.Index(['social_referral', 'ad_campaign'])
.append(enc_session_df
.columns
.drop(keep_vars)))
client_session_sum_df = (enc_session_df
.groupby('client_id')
[sum_vars]
.sum())
client_session_avg_df = (
enc_session_df
.groupby('client_id')
.agg(avg_visits=('visit_number', 'mean'),
avg_pageviews=('pageviews', 'mean'),
avg_time_on_site=('time_on_site', 'mean'))
)
client_session_df = pd.concat([client_session_avg_df,
client_session_sum_df],
axis=1)
# product level variables
product_vars = pd.Index([
# 'product_name', # one hot encode + sum
'product_category', # one hot encode + sum
'product_price', # avg_product_revenue
'product_quantity', # avg_product_revenue
'hour'] # one hot encoded + sum
)
avg_vars = pd.Index([
'product_price',
'product_quantity'
])
sum_vars = pd.Index([
# 'product_name',
'product_category',
'hour'
])
enc_product_df = pd.get_dummies(df[id_vars.union(product_vars)],
columns=sum_vars,
prefix_sep='*')
# summarize product level variables on customer level
client_product_sum_df = (enc_product_df
.groupby('client_id')
[enc_product_df.columns.drop(avg_vars)]
.sum())
def average_product_vars(client):
d = {}
d['avg_product_revenue'] = ((client['product_price']
* client['product_quantity'])
.sum()
/ client['product_quantity'].sum())
# ipdb.set_trace(context=15)
d['avg_unique_products'] = (client
.groupby('transaction_id')
['product_sku']
.apply(lambda sku: len(sku.unique()))
.mean())
return pd.Series(d, index=['avg_product_revenue',
'avg_unique_products'])
client_product_avg_df = (enc_product_df
.groupby('client_id')
.apply(average_product_vars))
client_product_df = pd.concat([client_product_avg_df,
client_product_sum_df]
, axis=1)
agg_df = pd.concat([client_session_df,
client_product_df],
axis=1)
return agg_df
def do_pca(X_std, **kwargs):
'''# Apply PCA to the data.'''
pca = PCA(**kwargs)
model = pca.fit(X_std)
X_pca = model.transform(X_std)
return pca, X_pca
def scree_pca(pca, plot=False, **kwargs):
'''Investigate the variance accounted for by each principal component.'''
# PCA components
n_pcs = len(pca.components_)
pcs = pd.Index(range(1, n_pcs+1), name='principal component')
# Eigen Values
eig = pca.explained_variance_.reshape(n_pcs, 1)
eig_df = pd.DataFrame(np.round(eig, 2), columns=['eigen_value'], index=pcs)
eig_df['cum_eigen_value'] = np.round(eig_df['eigen_value'].cumsum(), 2)
# Explained Variance %
var = pca.explained_variance_ratio_.reshape(n_pcs, 1)
var_df = pd.DataFrame(np.round(var, 4),
columns=['explained_var'],
index=pcs)
var_df['cum_explained_var'] = (np.round(var_df['explained_var'].cumsum()
/ var_df['explained_var'].sum(), 4))
df = pd.concat([eig_df, var_df], axis=1)
if plot:
# scree plot limit
limit = pd.DataFrame(np.ones((n_pcs, 1)),
columns=['scree_plot_limit'], index=pcs)
ax = (pd.concat([df, limit], axis=1)
.plot(y=['eigen_value', 'explained_var', 'scree_plot_limit'],
title='PCA: Scree test & Variance Analysis', **kwargs)
)
df.plot(y=['cum_explained_var'], secondary_y=True, ax=ax)
return df
def get_pc_num(scree_df, pc_num = None, exp_var_threshold=None,
eig_val_threshold=1):
'''
Selects optimum number of prinipal components according specified ojectives
wheter % of explained variance or eig_val criterion
Args:
scree_df: Dataframe as ouptu of scree_pca function
exp_var_threshold: threshold for cumulative % of epxlained variance
eig_val_threshold: min eigen value, 1 by default
Returns:
pc_num: Number of selelected principal components
exp_var: Explained variance by selected components
sum_eig: Sum of eigen values of selected components
'''
# check arguments
assert pc_num is None or pc_num <= scree_df.index.size
assert exp_var_threshold is None or (0 < exp_var_threshold <= 1)
assert 0 < eig_val_threshold < scree_df.index.size
assert (pc_num is None or exp_var_threshold is not None) or \
(pc_num is not None or exp_var_threshold is None), \
('''Either number of principal components or minimum variance
explained should be selected''')
if exp_var_threshold:
pcs = scree_df.index[scree_df['cum_explained_var'] <= exp_var_threshold]
elif pc_num:
pcs = scree_df.index[range(1, pc_num+1)]
elif exp_var_threshold is None:
pcs = scree_df.index[scree_df['eigen_value'] > eig_val_threshold]
pc_num = len(pcs)
exp_var = scree_df.loc[pc_num, 'cum_explained_var']
sum_eig = scree_df.loc[[*pcs], 'eigen_value'].sum()
return pc_num, exp_var, sum_eig
def varimax(factor_df, **kwargs):
'''
varimax rotation of factor matrix
Args:
factor_df: factor matrix as pd.DataFrame with shape
(# features, # principal components)
Return:
rot_factor_df: rotated factor matrix as pd.DataFrame
'''
factor_mtr = df2mtr(factor_df)
varimax = robjects.r['varimax']
rot_factor_mtr = varimax(factor_mtr)
return pandas2ri.ri2py(rot_factor_mtr.rx2('loadings'))
def get_components(df, pca, rotation=None, sort_by='sig_ld',
feat_details=None, plot='None', **kwargs):
'''
Show significant factor loadings depending on sample size
Args:
df: data used for pca as pd.DataFrame
pca: fitted pca object
rotation: if to apply factor matrix rotation, by default None.
sort_by: sort sequence of components, by default accoring
number of significant loadings 'sig_load'
feat_details: Dictionary of mapped feature detials, by default None
plot: 'discrete' plots heatmap enhancing sifinigicant laodings
'continuous' plots continous heatmap,
by default None
Returns:
factor_df: factor matrix as pd.DataFrame
of shape (# features, # components)
sig_ld: number of significant loadings across components as
pd. Series of size # components
cross_ld: number of significant loadings across features
(cross loadings) as pd. Series of size # features
'''
# constants
# ---------
maxstr = 100 # amount of the characters to print
# guidelines for indentifying significant factor loadings
# based on sample size. Source: Multivariate Data Analysis. 7th Edition.
factor_ld = np.linspace(0.3, 0.75, 10)
signif_sz = np.array([350, 250, 200, 150, 120, 100, 85, 70, 60, 50])
# loadings significant treshold
ld_sig = factor_ld[len(factor_ld) - (signif_sz <= df.index.size).sum()]
if rotation == 'varimax':
components = varimax(pd.DataFrame(pca.components_.T))
else:
components = pca.components_.T
# annotate factor matrix
index = pd.Index([])
for feat in df.columns:
try:
index = index.append(
pd.Index([feat]) if feat_details is None else \
pd.Index([feat_details[feat]['long_name'][:maxstr]]))
except KeyError:
index = index.append(pd.Index([feat]))
factor_df = pd.DataFrame(
np.round(components, 2),
columns = pd.Index(range(1, components.shape[1]+1),
name='principal_components'),
index = index.rename('features')
)
# select significant loadings
sig_mask = (factor_df.abs() >= ld_sig)
# calculate cross loadings
cross_ld = (sig_mask.sum(axis=1)
.sort_values(ascending=False)
.rename('cross_loadings'))
# calculate number of significant loadings per component
sig_ld = (sig_mask.sum()
.sort_values(ascending=False)
.rename('significant_loadings'))
# sort vactor matrix by loadings in components
sort_by = [*sig_ld.index] if sort_by == 'sig_ld' else sort_by
factor_df.sort_values(sort_by, ascending=False, inplace=True)
if plot == 'continuous':
plt.figure(**kwargs)
sns.heatmap(
factor_df.sort_values(sort_by, ascending=False).T,
cmap='RdYlBu', vmin=-1, vmax=1, square=True
)
plt.title('Factor matrix')
elif plot == 'discrete':
# loadings limits
ld_min, ld_sig_low, ld_sig_high, ld_max = -1, -ld_sig, ld_sig, 1
vmin, vmax = ld_min, ld_max
# create descrete scale to distingish sifnificant diffrence categories
data = factor_df.apply(
lambda col: pd.to_numeric(pd.cut(col,
[ld_min, -ld_sig, ld_sig, ld_max],
labels=[-ld_sig, 0, ld_sig])))
# plot heat map
fig = plt.figure(**kwargs)
sns.heatmap(data.T, cmap='viridis', vmin=vmin, vmax=vmax, square=True)
plt.title('Factor matrix with significant laodings: {} > loading > {}'
.format(-ld_sig, ld_sig));
return factor_df, sig_ld, cross_ld
def df2mtr(df):
'''
Convert pandas dataframe to r matrix. Category dtype is casted as
factorVector considering missing values
(original py2ri function of rpy2 can't handle this properly so far)
Args:
data: pandas dataframe of shape (# samples, # features)
with numeric dtype
Returns:
mtr: r matrix of shape (# samples # features)
'''
# check arguments
assert isinstance(df, pd.DataFrame), 'Argument df need to be a pd.Dataframe.'
# select only numeric columns
df = df.select_dtypes('number')
# create and return r matrix
values = FloatVector(df.values.flatten())
dimnames = ListVector(
rlc.OrdDict([('index', StrVector(tuple(df.index))),
('columns', StrVector(tuple(df.columns)))])
)
return robjects.r.matrix(values, nrow=len(df.index), ncol=len(df.columns),
dimnames = dimnames, byrow=True)
def screen_model(X_train, X_test, y_train, y_test, grid_search, fine_param=None,
title='MODEL SCREENING EVALUATION', verbose='text'):
'''Screen pipeline with diffrent hyperparameters.
Args:
X_train, X_test: Pandas DataFrame of shape (# samples, # features)
_train - training set, _test - test set
y_train, y_test: Pandas Series of size (# of samples, label)
grid_search: GridSearchCV object
verbose: 'text' shows grid_search results DataFrame
'plot' shows scores run chart with fine_param
fine_param: name of the parameter to fine tune model. Used only with
'plot' option
Returns:
grid_search: fitted the grid_search object
'''
# screen models
grid_search.fit(X_train, y_train)
# print output
if verbose == 'text':
# screen results
screen_results = (pd.DataFrame(grid_search.cv_results_)
.sort_values('rank_test_score'))
hyper_params = screen_results.columns[
screen_results.columns.str.contains('param_')
]
if 'param_classifier' in screen_results:
screen_results['param_classifier'] = (
screen_results['param_classifier']
.apply(lambda cls_: type(cls_).__name__)
)
screen_results['overfitting'] = (
screen_results['mean_train_score']
- screen_results['mean_test_score']
)
# calculate f1 weighted test score
y_pred = grid_search.predict(X_test)
f1_weighted_score = f1_score(y_test, y_pred, average='weighted')
# print results
print(title + '\n' + '-' * len(title))
display(screen_results
[hyper_params.union(pd.Index(['mean_train_score',
'std_train_score',
'mean_test_score',
'std_test_score',
'mean_fit_time',
'mean_score_time',
'overfitting']))])
print('Best model is {} with F1 test weighted score {:.3f}\n'
.format(type(grid_search
.best_estimator_
.named_steps
.classifier)
.__name__,
f1_weighted_score))
elif verbose == 'plot':
if fine_param in grid_search.cv_results_:
# screen results
screen_results = pd.DataFrame(grid_search.cv_results_)
# plot results
screen_results = pd.melt(
screen_results,
id_vars=fine_param,
value_vars=(screen_results.columns[
screen_results.columns
.str.contains(r'split\d_\w{4,5}_score', regex=True)
]),
var_name='score_type',
value_name=grid_search.scoring
)
screen_results['score_type'] = screen_results['score_type'].replace(
regex=r'split\d_(\w{4,5}_score)', value=r'\1'
)
sns.lineplot(x=fine_param,
y=grid_search.scoring,
hue='score_type',
data=screen_results,
err_style='bars',
ax=plt.gca(),
marker='o',
linestyle='dashed')
plt.gca().set_title(title);
return grid_search
def plot_features_significance(estimator, X_std, y, feature_names, class_names,
threshold = -np.inf, title=''):
'''Analyzes features significance of the estimator.
Args:
estimator: Sklearn estimator with coef_ or feature_improtances
attribute
X_std: Standardized inputs as dataframe
of shape (# of samples, # of features)
y: Class labels as Series of size # of samples
feature_names: list/index of features names
class_names: list of class names
threshold: Filters only significant coeficients following
|coeficient| <= threshold
title: title to put on each plot + class name will be added.
'''
assert ('coef_' in dir(estimator)
or 'feature_importances' in dir(estimator))
# get factor matrix
factor_matrix = pd.DataFrame(estimator.coef_ if 'coef_' in dir(estimator)
else estimator.feature_importances_,
index=estimator.classes_,
columns=feature_names)
cols = 2
rows = math.ceil(len(estimator.classes_) / cols)
fig, axes = plt.subplots(rows, cols,
figsize=(10*cols, 10*rows),
sharex=True);
plt.subplots_adjust(hspace=0.07, wspace=0.4)
for i, (ax, class_idx, class_name) in enumerate(
zip(axes.flatten(), estimator.classes_, class_names)):
# sort feature weigths and select features
sorted_coef = (factor_matrix
.loc[class_idx]
.abs()
.sort_values(ascending=False))
selected_feats = sorted_coef[sorted_coef >= threshold].index
selected_coef = (factor_matrix
.loc[class_idx, selected_feats]
.rename('feature weights'))
# calculate one-to-rest standardized average differences
selected_diff = (
(X_std.loc[y == class_idx, selected_feats].mean()
- X_std.loc[y != class_idx, selected_feats].mean())
.rename('standardized difference of one-to-rest everages')
)
# print bar chars
selected_df = (pd.concat([selected_coef, selected_diff], axis=1)
.sort_values('feature weights'))
selected_df.plot.barh(ax=ax, legend=True if i==0 else False)
ax.set_title(title + ' ' + class_name)
|
import floto
from floto.specs.retry_strategy import InstantRetry
from floto.specs.task import ActivityTask, ChildWorkflow
import datetime as dt
# ---------------------------------- #
# Start the workflow execution
# ---------------------------------- #
rs = InstantRetry(retries=3)
domain = 'floto_test'
input_copy_files = {'from_date':dt.date(2016,3,6).isoformat(),
'to_date':dt.date(2016,3,11).isoformat()}
copy_files = ActivityTask(domain=domain,
name='copyFiles',
version='1',
retry_strategy=rs,
input=input_copy_files)
file_length = ActivityTask(domain=domain, name='fileLength', version='1', retry_strategy=rs)
child_workflow = ChildWorkflow(workflow_type_name='read_file_lengths',
domain=domain,
workflow_type_version='1',
requires=[copy_files.id_],
retry_strategy=rs,
task_list='s3_files',
input={'activity_tasks':[file_length.serializable()]})
activity_tasks = [copy_files.serializable(), child_workflow.serializable()]
workflow_args = {'domain': 'floto_test',
'workflow_type_name': 's3_files_example',
'workflow_type_version': '1',
'task_list': 's3_files',
'workflow_id': 's3_files',
'input': {'activity_tasks':activity_tasks}}
response = floto.api.Swf().start_workflow_execution(**workflow_args)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("msgs", "0015_message_is_active")]
operations = [
migrations.RunSQL('CREATE INDEX msgs_messageaction_messages_idx ON msgs_messageaction USING GIN ("messages");')
]
|
def merge(ar, lo, mid, hi):
aux = ar[:]
i = lo
j = mid + 1
k = lo
while k <= hi:
if i > mid:
ar[k] = aux[j]
j += 1
elif j > hi:
ar[k] = aux[i]
i += 1
elif aux[i] < aux[j]:
ar[k] = aux[i]
i += 1
else:
ar[k] = aux[j]
j += 1
k += 1
return ar
def sort(ar, lo, hi):
if hi <= lo: return
mid = lo + (hi - lo) / 2
sort(ar, lo, mid)
sort(ar, mid+1, hi)
merge(ar, lo, mid, hi)
def top_down_merge_sort(ar):
sort(ar, 0, len(ar)-1)
def bottom_up_merge_sort(ar):
n = len(ar)
sz = 1
while sz < n:
lo = 0
while lo < n - sz:
merge(ar, lo, lo + sz - 1, min(lo+sz+sz - 1, n - 1))
lo += sz + sz
sz = sz + sz
|
#!/usr/bin/env python
# Copyright 2014 The Swarming Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0 that
# can be found in the LICENSE file.
"""Automated maintenance tool to run a script on bots.
To use this script, write a self-contained python script (use a .zip if
necessary), specify it on the command line and it will be packaged and triggered
on all the swarming bots corresponding to the --dimension filters specified, or
all the bots if no filter is specified.
"""
__version__ = '0.1'
import os
import tempfile
import shutil
import subprocess
import sys
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Must be first import.
import parallel_execution
from third_party import colorama
from third_party.depot_tools import fix_encoding
from utils import file_path
from utils import tools
def get_bot_list(swarming_server, dimensions, dead_only):
"""Returns a list of swarming bots."""
cmd = [
sys.executable, 'swarming.py', 'bots',
'--swarming', swarming_server,
'--bare',
]
for k, v in sorted(dimensions.iteritems()):
cmd.extend(('--dimension', k, v))
if dead_only:
cmd.append('--dead-only')
return subprocess.check_output(cmd, cwd=ROOT_DIR).splitlines()
def archive(isolate_server, script):
"""Archives the tool and return the sha-1."""
base_script = os.path.basename(script)
isolate = {
'variables': {
'command': ['python', base_script],
'files': [base_script],
},
}
tempdir = tempfile.mkdtemp(prefix=u'run_on_bots')
try:
isolate_file = os.path.join(tempdir, 'tool.isolate')
isolated_file = os.path.join(tempdir, 'tool.isolated')
with open(isolate_file, 'wb') as f:
f.write(str(isolate))
shutil.copyfile(script, os.path.join(tempdir, base_script))
cmd = [
sys.executable, 'isolate.py', 'archive',
'--isolate-server', isolate_server,
'-i', isolate_file,
'-s', isolated_file,
]
return subprocess.check_output(cmd, cwd=ROOT_DIR).split()[0]
finally:
file_path.rmtree(tempdir)
def run_serial(
swarming_server, isolate_server, priority, deadline, repeat, isolated_hash,
name, bots):
"""Runs the task one at a time.
This will be mainly bound by task scheduling latency, especially if the bots
are busy and the priority is low.
"""
result = 0
for i in xrange(repeat):
for bot in bots:
suffix = '/%d' % i if repeat > 1 else ''
task_name = parallel_execution.task_to_name(
name, {'id': bot}, isolated_hash) + suffix
cmd = [
sys.executable, 'swarming.py', 'run',
'--swarming', swarming_server,
'--isolate-server', isolate_server,
'--priority', priority,
'--deadline', deadline,
'--dimension', 'id', bot,
'--task-name', task_name,
isolated_hash,
]
r = subprocess.call(cmd, cwd=ROOT_DIR)
result = max(r, result)
return result
def run_parallel(
swarming_server, isolate_server, priority, deadline, repeat, isolated_hash,
name, bots):
tasks = []
for i in xrange(repeat):
suffix = '/%d' % i if repeat > 1 else ''
tasks.extend(
(
parallel_execution.task_to_name(
name, {'id': bot}, isolated_hash) + suffix,
isolated_hash,
{'id': bot},
) for bot in bots)
extra_args = ['--priority', priority, '--deadline', deadline]
print('Using priority %s' % priority)
for failed_task in parallel_execution.run_swarming_tasks_parallel(
swarming_server, isolate_server, extra_args, tasks):
_name, dimensions, stdout = failed_task
print('%sFailure: %s%s\n%s' % (
colorama.Fore.RED, dimensions, colorama.Fore.RESET, stdout))
def main():
parser = parallel_execution.OptionParser(
usage='%prog [options] script.py', version=__version__)
parser.add_option(
'--serial', action='store_true',
help='Runs the task serially, to be used when debugging problems since '
'it\'s slow')
parser.add_option(
'--repeat', type='int', default=1,
help='Runs the task multiple time on each bot, meant to be used as a '
'load test')
options, args = parser.parse_args()
if len(args) != 1:
parser.error(
'Must pass one python script to run. Use --help for more details')
if not options.priority:
parser.error(
'Please provide the --priority option. Either use a very low number\n'
'so the task completes as fast as possible, or an high number so the\n'
'task only runs when the bot is idle.')
# 1. Query the bots list.
bots = get_bot_list(options.swarming, options.dimensions, False)
print('Found %d bots to process' % len(bots))
if not bots:
return 1
dead_bots = get_bot_list(options.swarming, options.dimensions, True)
if dead_bots:
print('Warning: found %d dead bots' % len(dead_bots))
# 2. Archive the script to run.
isolated_hash = archive(options.isolate_server, args[0])
print('Running %s' % isolated_hash)
# 3. Trigger the tasks.
name = os.path.basename(args[0])
if options.serial:
return run_serial(
options.swarming,
options.isolate_server,
str(options.priority),
str(options.deadline),
options.repeat,
isolated_hash,
name,
bots)
return run_parallel(
options.swarming,
options.isolate_server,
str(options.priority),
str(options.deadline),
options.repeat,
isolated_hash,
name,
bots)
if __name__ == '__main__':
fix_encoding.fix_encoding()
tools.disable_buffering()
colorama.init()
sys.exit(main())
|
import pdb
import numpy as np
import scipy.io
import scipy.ndimage
import sklearn.preprocessing
import pandas as pd
class Dataset():
"""
Dataset manager class
"""
def __init__(self, data_path=None):
"""
Class intitializer.
"""
# set database path
if data_path == None:
self.data_path = './run_data/'
# feature and target vecotrs
self.features = None
self.targets = None
self.order = None
self.mask = None
self.costs = None
def load(self, dataset_name, options=None):
"""
Load dataset by name.
"""
if dataset_name == 'mnist':
self.load_mnist(options)
elif dataset_name == 'synthesized':
self.load_synthesized(options)
else:
raise NotImplementedError('dataset loader not found.')
def get(self, order='none', onehot=True):
"""
Get dataset fields.
"""
# create the reqested ordering
if order == 'rand':
self.order = np.random.permutation(self.features.shape[0])
self.features = self.features[self.order]
self.targets = self.targets[self.order]
# create the onehot representation
if onehot:
enc = sklearn.preprocessing.OneHotEncoder(sparse=False)
new_targets = enc.fit_transform(self.targets)
else:
new_targets = self.targets
return {'features':self.features,
'targets':new_targets,
'order':self.order,
'mask':self.mask,
'costs':self.costs}
def preprocess(self, normalization='none', fe_std_threshold=0.0):
# apply feature std threshold
fe_mask = self.features.std(axis=0) >= fe_std_threshold
self.features = self.features[:,fe_mask]
self.mask = fe_mask
self.costs = self.costs[fe_mask]
# apply different normalizations
if normalization == 'none':
pass
elif normalization == 'center':
self.features = self.features - self.features.mean(axis=0)
elif normalization == 'unity':
self.features = (self.features - self.features.min(axis=0))\
/ (self.features.max(axis=0) - self.features.min(axis=0))
elif normalization == 'stat':
self.features = (self.features - self.features.mean(axis=0))\
/ self.features.std(axis=0)
def load_synthesized(self, opts):
N_FEATURES = opts['n_features']
N_CLUSTERS = opts['n_clusters']
N_CLUSTERPOINTS = opts['n_clusterpoints']
STD_CLUSTERS = opts['std_clusters']
np.random.seed(1)
dataset_features = []
dataset_targets = []
cluster_labels = np.random.permutation([1]*(N_CLUSTERS//2) + [2]*(N_CLUSTERS//2))
for clus in range(N_CLUSTERS):
pos_center = np.random.rand(N_FEATURES)
label_cluser = cluster_labels[clus]
dataset_features.append(pos_center + STD_CLUSTERS*np.random.randn(N_CLUSTERPOINTS,N_FEATURES))
dataset_targets.append(np.ones((N_CLUSTERPOINTS,1), dtype=np.float) * label_cluser)
dataset_features = np.vstack(dataset_features)
dataset_targets = np.vstack(dataset_targets)
# random permutation
inds_sel = np.random.permutation(dataset_features.shape[0])
dataset_features = dataset_features[inds_sel,:]
dataset_targets = dataset_targets[inds_sel,:]
# set attributes
if 'cost-aware' in opts and opts['cost-aware'] == True:
redundant_features = np.random.randn(dataset_features.shape[0], dataset_features.shape[1])
self.features = np.hstack([dataset_features, redundant_features])
self.targets = dataset_targets.reshape(-1,1)
self.order = np.arange(self.features.shape[0])
self.costs = np.hstack([np.arange(1, dataset_features.shape[1]+1, dtype=np.float),
np.arange(1, redundant_features.shape[1]+1, dtype=np.float)])
else:
self.features = dataset_features
self.targets = dataset_targets.reshape(-1,1)
self.order = np.arange(self.features.shape[0])
self.costs = np.ones((self.features.shape[1],), dtype=np.float)
def load_mnist(self, opts):
# load and read data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./run_data/MNIST_data/", one_hot=False)
if opts == None or opts['task'] == 'singleres':
self.features = mnist.train.images
self.targets = mnist.train.labels.reshape(-1,1)
self.order = np.arange(self.features.shape[0])
self.costs = np.ones((self.features.shape[1],), dtype=np.float)
return
# check if it is a multires cost-sensitive case
elif opts['task'] == 'multires':
images = mnist.train.images
self.targets = mnist.train.labels.reshape(-1,1)
self.features = np.hstack([images,
resize_images(images, 0.5),
resize_images(images, 0.25),
resize_images(images, 0.125)])
self.targets = mnist.train.labels.reshape(-1,1)
self.order = np.arange(self.features.shape[0])
self.costs = np.hstack([np.ones((images.shape[1],), dtype=np.float) * 4.0,
np.ones((resize_images(images[:2,:], 0.500).shape[1],), dtype=np.float) * 3.0,
np.ones((resize_images(images[:2,:], 0.250).shape[1],), dtype=np.float) * 2.0,
np.ones((resize_images(images[:2,:], 0.125).shape[1],), dtype=np.float) * 1.0,])
return
else:
raise NotImplementedError('task not found!')
def resize_images(images, scale):
dimxy = int(np.sqrt(images.shape[1]))
resized_images = []
for img in images:
img_new = scipy.ndimage.interpolation.zoom(img.reshape(dimxy,dimxy), scale)
resized_images.append(img_new.reshape(1, -1))
resized_images = np.vstack(resized_images)
return resized_images
|
import numpy as np
def tilt(theta, coordinates):
new_coordinates = np.zeros((4, 2))
for i in range(len(coordinates)):
coordinate = coordinates[i]
x = coordinate[0]
y = coordinate[1]
y = 320 - y
h = np.sqrt(x**2 + y**2)
sigma = np.arctan(x/y)
new_coordinates[i][0] = h * np.sin(theta + sigma)
y = h * np.cos(theta + sigma)
new_coordinates[i][1] = 320 - y
return new_coordinates
#Defining tilt amount
l = 390
theta = np.radians(13)
delta_y = l * np.sin(theta)
#### Defining gait parameters
stride_length_front = 75
stride_length_hind = 100
kick_amount = 20
back_leg_center = -60
lift_amount = 80
home = np.array([
[0,100 + delta_y / 2],
[0,100 + delta_y / 2],
[back_leg_center,100 - delta_y / 2],
[back_leg_center,100 - delta_y / 2]
])
left_leg_back = home + np.array([
[-stride_length_front/2, 0],
[stride_length_front/2, lift_amount/4], # + less_up
[stride_length_hind/2, lift_amount/4], # + 20 , +20
[-stride_length_hind/2, -kick_amount]
])
left_leg_up = home + np.array([
[-stride_length_front/2, lift_amount],
[0, 0],
[0, 0],
[-stride_length_hind/2, lift_amount]
])
left_leg_forward = home + np.array([
[stride_length_front/2, lift_amount/4],
[-stride_length_front/2 , 0],
[-stride_length_hind/2 , -kick_amount],
[stride_length_hind/2, lift_amount/4]
])
left_leg_down = home + np.array([
[0,0],
[-stride_length_front/2, lift_amount],
[-stride_length_hind/2, lift_amount],
[0,0]
])
back_to_forward_velocity = np.array([
15.0,15.0,
1,1,
1,1,
15.0,15.0
])
back_to_forward_velocity = back_to_forward_velocity * 1
forward_to_back_velocity = np.array([
1,1,
15.0,15.0,
15.0,15.0,
1,1
])
forward_to_back_velocity * 1
acceleration = np.full(8, 100.0)
left_leg_back = tilt(theta, left_leg_back)
left_leg_up = tilt(theta, left_leg_up)
left_leg_forward = tilt(theta, left_leg_forward)
left_leg_down = tilt(theta, left_leg_down)
'''
#Right leg forward
left_leg_back = np.array([
#Front
[-forward_distance * 2, height + leaning_offset], #Left
[forward_distance, height + leaning_offset + less_up], #Right
#Hind
[forward_distance + 20, height + 20],
[-forward_distance * 2, height]
])
#Right leg down
left_leg_up = np.array([
#Front
[-forward_distance, height * 2 + leaning_offset], #Left
[mid_point, height - down_distance - 10 + leaning_offset], #Right
#Hind
[mid_point, height - down_distance], #Left
[-forward_distance, height * 2] #Right
])
#Right leg back
left_leg_forward = np.array([
#Front
[forward_distance, height + leaning_offset + less_up], #Left
[-forward_distance * 2, height + leaning_offset], #Right
#Hind
[-forward_distance * 2, height], #Left
[forward_distance + 20, height + 20] #Right
])
#Right leg up
left_leg_down = np.array([
#Front
[mid_point, height - down_distance - 10 + leaning_offset], #Left
[-forward_distance, height * 2 + leaning_offset], #Right
#Hind
[-forward_distance, height * 2], #Left
[mid_point, height - down_distance] #Right
])
'''
#siste endringer:
#Fart 0.5 -> 1
#[forward_distance + 20, height + 40] -> [forward_distance + 20, height + 20]
|
# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Nader Lahouti, Cisco Systems, Inc.
from keystoneclient.v3 import client
from neutronclient.v2_0 import client as nc
from dfa.common import config
from dfa.common import dfa_logger as logging
from dfa.common import rpc
from dfa.common import constants
LOG = logging.getLogger(__name__)
class EventsHandler(object):
"""This class defines methods to listen and process events."""
def __init__(self, ser_name, pqueue, c_pri, d_pri):
self._service = None
self._service_name = ser_name
self._clients = {}
self._pq = pqueue
self._create_pri = c_pri
self._update_pri = c_pri
self._delete_pri = d_pri
self._cfg = config.CiscoDFAConfig(ser_name).cfg
self._q_agent = constants.DFA_AGENT_QUEUE
self._url = self._cfg.dfa_rpc.transport_url
self._events_to_ignore = constants.EVENTS_FILTER_LIST
dfaq = self._cfg.dfa_notify.cisco_dfa_notify_queue % (
{'service_name': ser_name})
notify_queue = self._cfg.DEFAULT.notification_topics.split(',')
self._notify_queue = dfaq if dfaq in notify_queue else None
if self._service_name == 'keystone':
endpoint = self._cfg.DEFAULT.admin_endpoint
admin_port = self._cfg.DEFAULT.admin_port
if endpoint:
self._endpoint_url = (endpoint + 'v3/') % (
{'admin_port': admin_port if admin_port else '35357'})
else:
host = self._cfg.DEFAULT.admin_bind_host
proto = self._cfg.DEFAULT.auth_protocol
self._endpoint_url = '%(proto)s://%(host)s:%(port)s/v3/' % (
{'proto': proto if proto else 'http',
'host': host if host else 'localhost',
'port': admin_port if admin_port else '35357'})
self._service = client.Client(token=self._cfg.DEFAULT.admin_token,
endpoint=self._endpoint_url)
# Setup notification listener for the events.
self._setup_notification_listener(self._notify_queue, self._url)
@property
def nclient(self):
user = self._cfg.keystone_authtoken.admin_user
tenant = self._cfg.keystone_authtoken.admin_tenant_name
passw = self._cfg.keystone_authtoken.admin_password
if self._cfg.keystone_authtoken.auth_url:
uri = self._cfg.keystone_authtoken.auth_url + '/v2.0'
else:
proto = self._cfg.keystone_authtoken.auth_protocol
auth_host = self._cfg.keystone_authtoken.auth_host
auth_port = self._cfg.keystone_authtoken.auth_port
uri = '%(proto)s://%(host)s:%(port)s/v2.0' % (
{'proto': proto if proto else 'http',
'host': auth_host if auth_host else 'localhost',
'port': auth_port if auth_port else '35357'})
if user and tenant and passw and uri:
return nc.Client(username=user, tenant_name=tenant, password=passw,
auth_url=uri)
def _setup_notification_listener(self, topic_name, url):
"""Setup notification listener for a service."""
self.notify_listener = rpc.DfaNotifcationListener(
topic_name, url, rpc.DfaNotificationEndpoints(self))
def start(self):
if self.notify_listener:
self.notify_listener.start()
def wait(self):
if self.notify_listener:
self.notify_listener.wait()
def create_rpc_client(self, thishost):
clnt = self._clients.get(thishost)
if clnt is None:
clnt = rpc.DfaRpcClient(self._url,
'_'.join((self._q_agent, thishost)),
exchange=constants.DFA_EXCHANGE)
self._clients[thishost] = clnt
LOG.debug('Created client for agent: %(host)s',
{'host': thishost})
def callback(self, timestamp, event_type, payload):
"""Callback method for processing events in notification queue.
:param timestamp: time the message is received.
:param event_type: event type in the notification queue such as
identity.project.created, identity.project.deleted.
:param payload: Contains information of an event
"""
try:
data = (event_type, payload)
if event_type in self._events_to_ignore:
return
LOG.debug('RX NOTIFICATION ==>\nevent_type: %(event)s, '
'payload: %(payload)s\n', (
{'event': event_type, 'payload': payload}))
if 'create' in event_type:
pri = self._create_pri
elif 'delete' in event_type:
pri = self._delete_pri
elif 'update' in event_type:
pri = self._update_pri
else:
pri = self._delete_pri
self._pq.put((pri, timestamp, data))
except Exception as exc:
LOG.exception(('Error: %(err)s for event %(event)s'),
{'err': str(exc), 'event': event_type})
def event_handler(self):
"""Wait on queue for listening to the events."""
if not self._notify_queue:
LOG.error('event_handler: no notification queue for %s' % (
self._service_name))
return
LOG.debug('calling event handler for %s' % self)
self.start()
self.wait()
def send_vm_info(self, thishost, msg):
clnt = self._clients.get(thishost)
if clnt is None:
LOG.debug("send_vm_info: Agent on %s is not active." % thishost)
return
context = {}
LOG.debug("send_vm_info: host: %(host)s, msg: %(msg)s",
{'host': thishost, 'msg': msg})
thismsg = clnt.make_msg('send_vm_info', context, msg=msg)
resp = clnt.call(thismsg)
LOG.debug("send_vm_info: resp = %s" % resp)
def update_ip_rule(self, thishost, msg):
clnt = self._clients.get(thishost)
if clnt is None:
LOG.debug("update_ip_rule: Agent on %s is not active." % thishost)
return
context = {}
thismsg = clnt.make_msg('update_ip_rule', context, msg=msg)
resp = clnt.call(thismsg)
LOG.debug("update_ip_rule: resp = %s" % resp)
def send_msg_to_agent(self, thishost, msg_type, msg):
clnt = self._clients.get(thishost)
if clnt is None:
LOG.debug("send_msg_to_agent: Agent on %s is not active." % (
thishost))
return
context = {'type': msg_type}
thismsg = clnt.make_msg('send_msg_to_agent', context, msg=msg)
resp = clnt.call(thismsg)
LOG.debug("send_msg_to_agent: resp = %s" % resp)
|
# Copyright 2016 EasyStack.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.image import base
from tempest.lib import decorators
class MetadataSchemaTest(base.BaseV2ImageTest):
"""Test to get image metadata schema"""
@decorators.idempotent_id('e9e44891-3cb8-3b40-a532-e0a39fea3dab')
def test_get_metadata_namespace_schema(self):
"""Test to get image namespace schema"""
body = self.schemas_client.show_schema("metadefs/namespace")
self.assertEqual("namespace", body['name'])
@decorators.idempotent_id('ffe44891-678b-3ba0-a3e2-e0a3967b3aeb')
def test_get_metadata_namespaces_schema(self):
"""Test to get image namespaces schema"""
body = self.schemas_client.show_schema("metadefs/namespaces")
self.assertEqual("namespaces", body['name'])
@decorators.idempotent_id('fde34891-678b-3b40-ae32-e0a3e67b6beb')
def test_get_metadata_resource_type_schema(self):
"""Test to get image resource_type schema"""
body = self.schemas_client.show_schema("metadefs/resource_type")
self.assertEqual("resource_type_association", body['name'])
@decorators.idempotent_id('dfe4a891-b38b-3bf0-a3b2-e03ee67b3a3a')
def test_get_metadata_resources_types_schema(self):
"""Test to get image resource_types schema"""
body = self.schemas_client.show_schema("metadefs/resource_types")
self.assertEqual("resource_type_associations", body['name'])
@decorators.idempotent_id('dff4a891-b38b-3bf0-a3b2-e03ee67b3a3b')
def test_get_metadata_object_schema(self):
"""Test to get image object schema"""
body = self.schemas_client.show_schema("metadefs/object")
self.assertEqual("object", body['name'])
@decorators.idempotent_id('dee4a891-b38b-3bf0-a3b2-e03ee67b3a3c')
def test_get_metadata_objects_schema(self):
"""Test to get image objects schema"""
body = self.schemas_client.show_schema("metadefs/objects")
self.assertEqual("objects", body['name'])
@decorators.idempotent_id('dae4a891-b38b-3bf0-a3b2-e03ee67b3a3d')
def test_get_metadata_property_schema(self):
"""Test to get image property schema"""
body = self.schemas_client.show_schema("metadefs/property")
self.assertEqual("property", body['name'])
@decorators.idempotent_id('dce4a891-b38b-3bf0-a3b2-e03ee67b3a3e')
def test_get_metadata_properties_schema(self):
"""Test to get image properties schema"""
body = self.schemas_client.show_schema("metadefs/properties")
self.assertEqual("properties", body['name'])
@decorators.idempotent_id('dde4a891-b38b-3bf0-a3b2-e03ee67b3a3e')
def test_get_metadata_tag_schema(self):
"""Test to get image tag schema"""
body = self.schemas_client.show_schema("metadefs/tag")
self.assertEqual("tag", body['name'])
@decorators.idempotent_id('cde4a891-b38b-3bf0-a3b2-e03ee67b3a3a')
def test_get_metadata_tags_schema(self):
"""Test to get image tags schema"""
body = self.schemas_client.show_schema("metadefs/tags")
self.assertEqual("tags", body['name'])
|
import argparse
import os
import tensorflow as tf
from network import FACE_COMPLETION
# add arguments for the face completion network script
parser = argparse.ArgumentParser()
# number of iteration to run the face completion model
parser.add_argument('--num_iter', type=int, default=500)
# checkpoint directory for the stored model
parser.add_argument('--checkpointDir', type=str, default='checkpoint')
# output directory to store the completed images
parser.add_argument('--out_dir', type=str, default='completed')
# interval to print the result on console
parser.add_argument('--out_interval', type=int, default=50)
# mask type used to cover the image, only one implementation as of now i.e Center masking
parser.add_argument('--mask_type', type=str, default='center')
# total number of images in the directory
parser.add_argument('imgs', type=str, nargs='+')
args = parser.parse_args()
# check existence of checkpoint directory
assert(os.path.exists(args.checkpointDir))
# make output directory if doesn't exists
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
# configure and start the tensorflow session for face completion
with tf.Session() as sess:
fc = FACE_COMPLETION(sess, checkpoint_dir=args.checkpointDir)
fc.complete(args)
|
from .api import CellarTrackerAPI
from .enum import CellarTrackerFormat, CellarTrackerTable
class CellarTrackerClient(object):
def __init__(self, username: None, password: None):
"""Initialize the client object."""
self._api = CellarTrackerAPI()
self._username = username
self._password = password
def get(self, table: CellarTrackerTable, format: CellarTrackerFormat):
"""Get data."""
return self._api.execute(params={
"User": self._username,
"Password": self._password,
"Table": table.value,
"Format": format.value,
"Location": 1
})
|
"""Similarity Down-selection (SDS) heuristically finds the most dissimilar set of size `n`
out of a population represented as an NxN matrix where the ith row (as well as the
ith column) belong to item i, and the element (i,j) is some pairwise relation between
items i and j.
The pairwise relation is a floating point value where larger values indicate
greater dissimilarity, and where the pairwise relation between item i and itself is represented
as np.nan (this allows the program to work with numpy log conversion and sums).
SDS was originally written to find the subset of n most dissimliar conformers
(similarity being determined by average pairwise RMSD between atoms), and relevant
functions for that particular application can also be found in this script.
In the original implemenation finding the n most dissimlar conformers
for 50000x50000 matricies, SDS substationally outperformed a benchmark random sampling method in both
time and accuracy.
The pairwise RMSD matricies used for the original implemenation can be created using pwrmsd_writer.py
Note that because SDS finds the set of size `n` by building off of set `n-1`, finding set `n` also finds
also previous set sizes from 2-n.
Author: Felicity Nielson
2019-2020
"""
from time import time
import numpy as np
import pandas as pd
from os.path import *
import argparse
import os
from statsmodels.stats.weightstats import DescrStatsW
from math import log
def SDS(df, n=3):
"""Finds the `n` most dissimilar items. In the input matrix, the ith row (and
ith column) is an array belonging to item i. The matrix element (i,j) would then be the pairwise
dissimilarity metric between items i and j (for example, geometric RMSD between molecular conformers i and j).
Args:
df (pandas.DataFrame): Square matrix where each row (and by symmetry, column) is an array
corresponding to a specific item or object, and each element (i,j) the
floating point dissimilarity between items i and j. The element (i,i) must
be represented as np.nan (for log-summing).
If pairwise data between two
items is missing, this can also be represented as np.nan. What will
happen is the second item will automatically be set to np.nan in the
log-summation array once the first of one of the two items is chosen.
Thus, the second item will never be chosen. In this same manner,
entire missing items can be represented as arrays of np.nan,
as a trick to preserve externally related indexing.
n (int): Dissimilar set size to find.
1 < n < N, where N is the full population size.
Returns:
pandas.DataFrame containing indices of the items found in the dissimilar set of size `n`.
"""
# Check df matrix is square
N = len(df.index)
assert N == len(df.columns)
# Reduce n to maximum number of items if n is over, otherwise search will fail with an erorr.
M = len(df.dropna(how='all'))
if n > M:
n = M
print(f'Starting SDS search for most dissimilar set of size n = {n}...')
# First grab matrix indices of the two most dissimilar geometries
row_mx = []
for i in range(N):
row_mx.append(np.nanmax(df.loc[i]))
ind1 = np.nanargmax(row_mx)
ind2 = ind1 + 1 + np.nanargmax(row_mx[ind1+1:])
# Initialize the dissimilar matrix with the two most dissimilar
disarray = [np.array(df.loc[ind1]), np.array(df.loc[ind2])]
indices = [ind1, ind2]
# Find n-2 other most dissimilar
# Multiply the rows of the n-1 dissimilar set. Or,
# use log summing if N is large (e.g. 50000) to avoid
# exceeding floating point machine precision.
# This script uses log summing.
# The index of the largest value is the index of the nth
# item which makes the nth dissimilar set.
# Initialize array for log summing
logsum = [0 for x in range(N)]
logsum += np.log(disarray[0])
for i in range(n-2):
logsum += np.log(disarray[-1])
indn = np.nanargmax(logsum)
indices.append(indn)
disarray.append(np.array(df.loc[indn]))
return_df = pd.DataFrame([indices], index=['matrix index']).T
print('Finished')
return return_df
def conf_to_ccs(conformers, infodf):
"""Converts list of conformer indexes (from a 50x50 matrix) to Boltzmann weighted CCS average and Lowest Energy CCS.
Args:
conformers (np.array): Array of conformer indexes selected from a 50x50 rmsd matrix as returned in the
SDS() dataframe.
Returns:
df with new columns: DFT Energy, CCS, BW CCS, and Lowest Energy CCS
"""
print('Calculating CCSs...')
csvdf = infodf.loc[conformers].copy()
columns = ['n Dissimilar', 'Conformer', 'CCS', 'DFT Energy', 'BW CCS', 'Lowest Energy CCS']
finaldf = pd.DataFrame(columns=columns)
narray = np.array([x for x in range(1, len(conformers)+1)])
for n in narray:
# Calculate Boltzmann Weighted CCS
bw = bw_ccs(csvdf.iloc[0:n])
# Find CCS with the lowest energy
#idx = csvdf.loc[1:n]['dft_energy'].idxmin()
idx = csvdf.iloc[0:n]['dft_energy'].idxmin()
lec = csvdf['ccs'][idx]
finaldf = finaldf.append(pd.DataFrame([[n,conformers[n-1], csvdf['ccs'].iloc[n-1], csvdf['dft_energy'].iloc[n-1], bw, lec]],
columns=columns), ignore_index=True)
return finaldf
def bw_ccs(df):
"""Calculates the Boltzmann weighted average. Note DFT energies are
expected to be given in Hartree/mol. This function converts to kcal/mol before weighting.
Modified from ISiCLE
Args:
df (pandas.DataFrame): contains 'dft_energy' and 'ccs' columns
Returns:
Boltzmann weighted mean
"""
if len(df['dft_energy']) == 1:
return df['ccs'].values[0]
g = df['dft_energy'].values * 627.503
mn = g.min()
relG = g - mn
b = np.exp(-relG / 0.5924847535)
w = (b / b.sum()) * len(b)
ws = DescrStatsW(df['ccs'], weights=w, ddof=0)
return ws.mean
if __name__ == '__main__':
start = time()
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mtrx', type=str,
help='Path to .pkl file containing an NxN matrix')
parser.add_argument('-n', '--ndis', type=int, default=3,
help='Find the n most dissimilar items')
parser.add_argument('-c', '--conf_info', type=str, default=None,
help='tsv file containing ccs and energy info for conformers. \
Note that the index of the .tsv file must exactly match the index of the matrix')
args = parser.parse_args()
n = args.ndis
mtrx = args.mtrx
# If SDS is not already a directory, make it
directory = 'SDS'
if not exists(directory):
os.makedirs(directory)
df = pd.read_pickle(mtrx)
SDSdf = SDS(df, n=n)
narray = np.array([x for x in range(1, n+1)])
# If comparing conformers, calculate Boltzmann weighted CCS.
if args.conf_info != None:
csvdf = pd.read_csv(args.conf_info)
writedf = conf_to_ccs(SDSdf['matrix index'].values, csvdf)
else:
writedf = SDSdf
writedf['n Dissimilar'] = narray
writedf.to_csv(f'SDS/SDS_{n}_dissimilar.csv', index=False)
print((time()-start)/60, 'min')
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.http import Http404
from django.db.models import Q
from rest_framework import viewsets, mixins
from rest_framework.permissions import AllowAny, IsAdminUser, SAFE_METHODS
from rest_framework.exceptions import PermissionDenied, NotAuthenticated
from rest_framework.decorators import detail_route, list_route
from rest_framework.response import Response
from organisation.models import Company, Address, CompanyRating, Picture
from organisation.serializers import CompanySerializer, CompanyRatingSerializer, PictureSerializer, PictureUploadSerializer
from api.user_serializers import PublicUserSerializer
from palvelutori.models import User
class CompanyViewSet(mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
"""
Company listings.
"""
serializer_class = CompanySerializer
def get_queryset(self):
q = Company.objects.filter(active=True)
search = self.request.query_params.get('search', '')
if search:
# TODO use the new fulltext search functionality in
# Django 1.10
q = q.filter(
Q(email=search) |
Q(name__icontains=search) |
Q(companydescription__text__icontains=search) |
Q(addresses__streetAddress__icontains=search) |
Q(addresses__postalcode=search)
).distinct()
return q
def get_object(self):
obj = super(CompanyViewSet, self).get_object()
if self.request.method not in SAFE_METHODS:
if self.request.user.company_id != obj.id and not self.request.user.is_superuser:
self.permission_denied(
self.request,
message='Not a member of this company'
)
return obj
class CompanyRatingViewSet(viewsets.ModelViewSet):
"""
Company rating views.
Everyone can create ratings.
Anonymous users can not list, view, update or delete.
Logged in users can list, view, update and delete only their own ratings.
Staff users have no special privileges.
"""
queryset = CompanyRating.objects.filter(company__active=True)
serializer_class = CompanyRatingSerializer
permission_classes = [AllowAny]
def get_queryset(self):
q = super(CompanyRatingViewSet, self).get_queryset()
if self.request.user.is_authenticated():
q = q.filter(user=self.request.user.id)
else:
q = q.none()
return q
def perform_create(self, serializer):
if self.request.user.is_authenticated():
return serializer.save(user=self.request.user)
return super(CompanyRatingViewSet, self).perform_create(serializer)
class CompanyPictureViewSet(viewsets.ModelViewSet):
serializer_class = PictureSerializer
create_serializer_class = PictureUploadSerializer
def get_serializer_class(self):
if self.action == 'create':
return self.create_serializer_class
return self.serializer_class
def get_queryset(self):
return Picture.objects.filter(company_id=self.kwargs['company_pk'], company__active=True)
def get_object(self):
obj = super(CompanyPictureViewSet, self).get_object()
if self.request.method not in SAFE_METHODS:
if self.request.user.company_id != obj.company_id:
self.permission_denied(
self.request,
message='Not a member of this company'
)
return obj
def create(self, request, company_pk=None):
if self.request.user.company_id != int(company_pk):
self.permission_denied(
self.request,
message='Not a member of this company'
)
return super(CompanyPictureViewSet, self).create(request, company_pk=company_pk)
class CompanyUserViewSet(mixins.RetrieveModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
"""Users associated with a company.
This is a public read-only list of users associated with a specific company."""
serializer_class = PublicUserSerializer
def get_queryset(self):
return User.objects.filter(company_id=self.kwargs['company_pk'], company__active=True)
|
import json
from rest_framework import exceptions as rest_exceptions, serializers as rest_serializers
from jobs import models
class LocationsField(rest_serializers.Field):
def to_internal_value(self, data):
if data is None:
return []
if not isinstance(data, list):
raise rest_exceptions.ValidationError("Locations must be a list")
for elem in data:
if len(elem) != 2:
raise rest_exceptions.ValidationError("Location tuples must be consisting of exactly two coordinates")
if type(elem[0]) not in (int, float,) or type(elem[1]) not in (int, float,):
raise rest_exceptions.ValidationError("Locations coordinates must be numbers")
return data
def to_representation(self, value):
return json.loads(value)
class JobSerializer(rest_serializers.ModelSerializer):
result = rest_serializers.SerializerMethodField()
locations = LocationsField()
num_vehicles = rest_serializers.IntegerField(default=1, required=False)
starting_location = rest_serializers.IntegerField(default=0, required=False)
href = rest_serializers.HyperlinkedIdentityField(view_name='jobs:shortest-path-jobs-detail', lookup_field='uuid')
class Meta:
model = models.Job
fields = ('uuid', 'status', 'result', 'locations', 'num_vehicles', 'starting_location', 'href',)
read_only_fields = ('uuid', 'status', 'result', 'href',)
def create(self, validated_data):
validated_data['locations'] = json.dumps(validated_data['locations'])
validated_data['created_by'] = self.context['request'].user
return super().create(validated_data)
def get_result(self, obj):
return obj.full_result
|
#!/usr/bin/env python
import os
import subprocess
def main():
print("Generating graphouse properties")
with open('/etc/graphouse/graphouse.properties', 'w') as graphouse_config:
for env_key, value in os.environ.items():
if not env_key.startswith("GH__"):
continue
java_key = env_key.replace("GH", "graphouse", 1).lower().replace("__", ".").replace("_", "-")
if 'password' not in java_key:
print("Property '%s' value: %s" % (java_key, value))
graphouse_config.write("%s=%s\n" % (java_key, value))
print("Generating graphouse vm options")
with open('/etc/graphouse/graphouse.vmoptions', 'w') as graphouse_vm_config:
# e.g. "GH_CUSTOM_JVM_OPTIONS=-XX:MaxRAM=4g\\n-XX:MaxRAMPercentage=80.0\\n-Xss2m\\n-XX:StringTableSize=10000000\\n-XX:+UseG1GC\\n-XX:MaxGCPauseMillis=1000\\n"
vm_options = os.environ.get("GH_CUSTOM_JVM_OPTIONS", None)
if vm_options is not None:
vm_options = vm_options.replace('\\n', '\n')
graphouse_vm_config.write(vm_options)
print("Custom options:\n%s" % (vm_options))
else:
vm_xmx = os.environ.get("GH_XMX", "4g")
vm_xms = os.environ.get("GH_XMS", "256m")
vm_xss = os.environ.get("GH_XSS", "2m")
print("Xmx %s, xms %s, xss %s" % (vm_xmx, vm_xms, vm_xss))
graphouse_vm_config.write('-Xmx%s\n' % vm_xmx)
graphouse_vm_config.write('-Xms%s\n' % vm_xms)
graphouse_vm_config.write('-Xss%s\n' % vm_xss)
graphouse_vm_config.write('-XX:StringTableSize=10000000\n')
graphouse_vm_config.write('-XX:+UseG1GC\n')
graphouse_vm_config.write('-XX:MaxGCPauseMillis=1000\n')
print("Starting graphouse")
subprocess.call("/opt/graphouse/bin/graphouse")
if __name__ == "__main__":
main()
|
from easydict import EasyDict
coinrun_ppg_default_config = dict(
exp_name='coinrun_ppg_seed0',
env=dict(
is_train=True,
env_id='coinrun',
collector_env_num=64,
evaluator_env_num=10,
n_evaluator_episode=50,
stop_value=10,
manager=dict(
shared_memory=True,
),
),
policy=dict(
cuda=True,
model=dict(
obs_shape=[3, 64, 64],
action_shape=15,
encoder_hidden_size_list=[16,32,32],
actor_head_hidden_size=256,
critic_head_hidden_size=256,
impala_cnn_encoder=True,
),
learn=dict(
learning_rate=0.0005,
actor_epoch_per_collect=1,
critic_epoch_per_collect=1,
value_norm=False,
batch_size=2048,
value_weight=0.5,
entropy_weight=0.00,
clip_ratio=0.2,
aux_freq=1,
),
collect=dict(n_sample=16384, ),
eval=dict(evaluator=dict(eval_freq=96, )),
other=dict(
),
),
)
coinrun_ppg_default_config = EasyDict(coinrun_ppg_default_config)
main_config = coinrun_ppg_default_config
coinrun_ppg_create_config = dict(
env=dict(
type='procgen',
import_names=['dizoo.procgen.envs.procgen_env'],
),
env_manager=dict(type='subprocess', ),
policy=dict(type='ppg'),
)
coinrun_ppg_create_config = EasyDict(coinrun_ppg_create_config)
create_config = coinrun_ppg_create_config
if __name__ == "__main__":
from ding.entry import serial_pipeline_onpolicy_ppg
serial_pipeline_onpolicy_ppg([main_config, create_config], seed=0)
|
import os
import numpy as np
import json
from PIL import Image, ImageDraw, ImageFont
import matplotlib.pyplot as plt
def load_image(filename):
img = Image.open(filename)
img.load()
return img
def normalize_image(image):
'''
z-score normalize, sample-wise
'''
image_norm = (image - image.mean()) / image.std()
return image_norm
def compute_convolution(I, T, stride=None):
'''
This function takes an image <I> and a template <T> (both numpy arrays)
and returns a heatmap where each grid represents the output produced by
convolution at each location. You can add optional parameters (e.g. stride,
window_size, padding) to create additional functionality.
'''
p, q, _ = T.shape
n, m, _ = I.shape
result = np.zeros((n - p + 1, m - q + 1))
for i in range(n - p + 1):
for j in range(m - q + 1):
section = I[i:i+p, j:j+q, :1]
dot_product = np.sum(section*T)
result[i, j] = dot_product
return result
def predict_boxes(heatmap, kernel, quantile):
'''
This function takes heatmap and returns the bounding boxes and associated
confidence scores.
'''
output = []
max_value = np.sum(kernel * kernel)
threshold = np.quantile(heatmap, quantile)
clustered = np.copy(heatmap)
clustered[heatmap < threshold] = 0
clustered[heatmap >= threshold] = 255
n_clusters = flood_fill(clustered, limit=kernel.shape[0]*kernel.shape[1])
centers = get_centers(clustered, n_clusters)
confidences = get_confidence(heatmap, clustered, max_value, n_clusters)
for ind, (i, j) in enumerate(centers):
output.append([i, j, i + kernel.shape[0], j + kernel.shape[1],
confidences[ind]])
return output
def flood_fill(arr, limit=20, ecc_thresh=1.5):
'''
Takes in an array (corresponding to the result array after applying the
kernel) that has values of either 0 or 255, with 255 corresponding to pixels
that have been identified as part of a traffic light. This function clusters
all of the pixels by assigning a different integer to each pixel group.
limit - maximum number of pixels allowed in a cluster. If this is exceeded,
that cluster is discarded, as traffic lights should not be very big
(they should not exceed the size of the filter).
'''
n = 1
while 255 in arr:
loc = np.argwhere(arr == 255)[0]
flood_fill_helper(arr, loc, n, set())
count = len(np.argwhere(arr == n))
if count > limit:
arr[arr == n] = 0
# if eccentricity(arr, n) > ecc_thresh:
# arr[arr == n] = 0
n += 1
return n - 1
def flood_fill_helper(arr, loc, cluster_num, visited):
i, j = loc[0], loc[1]
visited.add((i, j))
if (arr[i, j] != 255):
return
arr[i, j] = cluster_num
if (i + 1, j) not in visited and i < arr.shape[0] - 1:
flood_fill_helper(arr, (i + 1, j), cluster_num, visited)
if (i, j + 1) not in visited and j < arr.shape[1] - 1:
flood_fill_helper(arr, (i, j + 1), cluster_num, visited)
if (i - 1, j) not in visited and i > 0:
flood_fill_helper(arr, (i - 1, j), cluster_num, visited)
if (i, j - 1) not in visited and j > 0:
flood_fill_helper(arr, (i, j - 1), cluster_num, visited)
def get_centers(arr, n_clusters):
'''
Returns a list of coordinates corresponding to the centers for each
out of n_clusters clusters in arr.
'''
centers = []
for i in range(1, n_clusters + 1):
matches = np.transpose((arr == i).nonzero())
if matches.size == 0:
continue
centers.append(np.mean(matches, axis=0))
return centers
def get_confidence(heatmap, arr, max_value, n_clusters):
'''
Returns a list of confidence values for each of n_cluster clusters.
arr is the clustered version of heatmap (after flood filling)
max_value is the maximum value that the heatmap can have, corresponding
to 100% confidence.
'''
confidences = []
for i in range(1, n_clusters + 1):
matches = (arr == i).nonzero()
if np.transpose(matches).size == 0:
continue
val = np.max(heatmap[matches])
s = 0.5*max_value
c = np.exp(-((val - max_value)/s)**2)
confidences.append(c)
return confidences
def eccentricity(arr, n):
'''
Returns the "eccentricity" of cluster n in arr. Eccentricity is defined
as the ratio of the range of the cluster along axis to the range along the
other axis. Traffic light clusters are roughly circular, so high
eccentricities should be discarded.
'''
matches = np.transpose((arr == n).nonzero())
if matches.size == 0:
return 1
mins = np.min(matches, axis=0)
maxes = np.max(matches, axis=0)
dx = np.abs(mins[1] - maxes[1]) + 1
dy = np.abs(mins[0] - maxes[0]) + 1
larger = max(dx, dy)
smaller = min(dx, dy)
return larger / smaller
def stop_overlap(bounding_boxes):
'''
Takes a list of bounding boxes and for any overlapping ones, take the
one with the maximum confidence.
'''
processed = set()
valid = set()
for i, (xmin1, ymin1, xmax1, ymax1, c1) in enumerate(bounding_boxes):
if i in processed:
continue
processed.add(i)
overlapping = [i]
for j, (xmin2, ymin2, xmax2, ymax2, c2) in enumerate(bounding_boxes):
if i >= j:
continue
if j in processed:
continue
if overlap_1D((xmin1, xmax1), (xmin2, xmax2)) and \
overlap_1D((ymin1, ymax1), (ymin2, ymax2)):
overlapping.append(j)
processed.add(j)
max_c = -1
max_box = -1
for k in overlapping:
if bounding_boxes[k][4] > max_c:
max_c = bounding_boxes[k][4]
max_box = k
valid.add(max_box)
new_boxes = []
for i, box in enumerate(bounding_boxes):
if i in valid:
new_boxes.append(box)
return new_boxes
def overlap_1D(interval1, interval2):
return (interval1[1] >= interval2[0] and interval2[1] >= interval1[0]) \
or (interval2[1] >= interval1[0] and interval1[1] >= interval2[0])
def display_results(image, bounding_boxes):
'''
Draws the bounding boxes given in bounding_boxes on image, and then
displays it.
'''
im = Image.fromarray(image.astype('uint8'), 'RGB')
draw = ImageDraw.Draw(im)
for i0, j0, i1, j1, c in bounding_boxes:
draw.rectangle((j0, i0, j1, i1), outline='red')
draw.text((j0, i0 - 10), f'{c: .4f}', fill=(255, 255, 255, 255))
im.show()
def display_results2(image, bounding_boxes):
'''
Draws the bounding boxes given in bounding_boxes on image, and then
displays it.
'''
im = Image.fromarray(image.astype('uint8'), 'RGB')
draw = ImageDraw.Draw(im)
for i0, j0, i1, j1 in bounding_boxes:
draw.rectangle((j0, i0, j1, i1), outline='red')
im.show()
def _detect_red_light(image, kernel, quantile, display=False):
'''
Helper function.
'''
image_norm = normalize_image(image)
heatmap = compute_convolution(image_norm, kernel)
return predict_boxes(heatmap, kernel, quantile)
def detect_red_light_mf(I, display=False):
'''
This function takes a numpy array <I> and returns a list <output>.
The length of <output> is the number of bounding boxes predicted for <I>.
Each entry of <output> is a list <[row_TL,col_TL,row_BR,col_BR,score]>.
The first four entries are four integers specifying a bounding box
(the row and column index of the top left corner and the row and column
index of the bottom right corner).
<score> is a confidence score ranging from 0 to 1.
Note that PIL loads images in RGB order, so:
I[:,:,0] is the red channel
I[:,:,1] is the green channel
I[:,:,2] is the blue channel
'''
# Paramters for the algorithm
scaling_factors = [1, 1/2, 1/3]
quantile = 0.9995
kernel_name = 'filter2.png'
kernel = load_image(kernel_name)
output = []
kernel = np.asarray(kernel, dtype='int32')[:,:,:3]
kernel_shape = kernel.shape
# Try different scale kernels, stopping if enough traffic lights
# have been found
for s in scaling_factors:
kernel = load_image(kernel_name)
if s != 1:
kernel = kernel.resize((int(kernel_shape[1]*s), int(kernel_shape[0]*s)))
kernel = np.asarray(kernel, dtype='int32')[:,:,:3]
kernel = normalize_image(kernel)
image_norm = normalize_image(I)
heatmap = compute_convolution(image_norm, kernel)
if display:
plt.imshow(heatmap)
plt.figure()
bb = predict_boxes(heatmap, kernel, quantile)
# if len(bb) >= attempts_per_scale:
# output.extend(bb)
# break
output.extend(bb)
output = stop_overlap(output)
if display:
plt.show()
display_results(I, output)
for i in range(len(output)):
assert len(output[i]) == 5
assert (output[i][4] >= 0.0) and (output[i][4] <= 1.0)
return output
# Note that you are not allowed to use test data for training.
# set the path to the downloaded data:
data_path = '../data/RedLights2011_Medium'
# load splits:
split_path = '../data/hw02_splits'
file_names_train = np.load(os.path.join(split_path,'file_names_train.npy'))
file_names_test = np.load(os.path.join(split_path,'file_names_test.npy'))
# set a path for saving predictions:
preds_path = '../data/hw02_preds'
os.makedirs(preds_path, exist_ok=True) # create directory if needed
# Set this parameter to True when you're done with algorithm development:
done_tweaking = True
'''
Make predictions on the training set.
'''
preds_train = {}
for i in range(len(file_names_train)):
print('TRAIN', i)
# read image using PIL:
I = Image.open(os.path.join(data_path,file_names_train[i]))
# convert to numpy array:
I = np.asarray(I)
preds_train[file_names_train[i]] = detect_red_light_mf(I, display=False)
#save preds (overwrites any previous predictions!)
with open(os.path.join(preds_path,'preds_train.json'),'w') as f:
json.dump(preds_train,f)
if done_tweaking:
'''
Make predictions on the test set.
'''
preds_test = {}
for i in range(len(file_names_test)):
print('TEST', i)
# read image using PIL:
I = Image.open(os.path.join(data_path,file_names_test[i]))
# convert to numpy array:
I = np.asarray(I)
preds_test[file_names_test[i]] = detect_red_light_mf(I)
# save preds (overwrites any previous predictions!)
with open(os.path.join(preds_path,'preds_test.json'),'w') as f:
json.dump(preds_test,f)
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import pprint
from collections import Counter
import pandas as pd
from pandas import DataFrame
from tabulate import tabulate
from base import BaseObject
from datadict import FindDimensions
class AcademicDimensionCalculator(BaseObject):
""" Compute the Academic Dimension """
__d_score_mapping = {
"Associates": 1.0,
"Bachelors": 2.0,
"Masters": 4.0,
"PhD": 8.0}
def __init__(self,
source_record: dict,
is_debug: bool = False):
"""
Created:
1-Nov-2019
craig.trim@ibm.com
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1151#issuecomment-15692427
Updated:
15-Nov-2019
craig.trim@ibm.com
* add 'transform-field'
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1373
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._source_record = source_record
self._dim_finder = FindDimensions(schema="degrees")
# state
self._df_result = None
self._d_degrees = None
def result(self):
class Facade(object):
@staticmethod
def degrees() -> dict:
return self._d_degrees
@staticmethod
def final() -> DataFrame:
return self._df_result
return Facade()
@staticmethod
def _transform_field(a_field: dict): # GIT-1373
"""
Purpose:
Transform Common Abbreviations to Degree Names
Notes:
- this is handled in GIT-1373-16032810
but will not be present in any tag collection <= supply_tag_20191114
- please retain this function for backward compatibility
:param a_field:
any degree-name field
:return:
a (potentially modified) degree name field
"""
if a_field['value'] == ['BA']:
a_field['tags']['supervised'].append(('Bachelor of Arts', 95))
if a_field['value'] == ['BS']:
a_field['tags']['supervised'].append(('Bachelor of Science', 95))
elif a_field['value'] == ['MA']:
a_field['tags']['supervised'].append(('Master of Arts', 95))
elif a_field['value'] == ['MS']:
a_field['tags']['supervised'].append(('Master of Science', 95))
return a_field
def _degree_fields(self) -> list:
matching_fields = []
for field in self._source_record["fields"]:
if field["name"] == "degree_name":
field = self._transform_field(field)
matching_fields.append(field)
return matching_fields
@staticmethod
def _tags(matching_fields: list) -> list:
s = set()
for field in matching_fields:
if "tags" in field:
[s.add(x[0]) for x in field["tags"]["supervised"]]
return sorted(s)
def _apply_schema(self,
tags: list) -> dict:
c = Counter()
for tag in tags:
for result in self._dim_finder.find(tag):
c.update({result: 1})
return dict(c)
def _score_mapping(self,
schema_element: str) -> float:
for k in self.__d_score_mapping:
if k == schema_element:
return self.__d_score_mapping[k]
return 0.0
def _compute_dataframe(self,
score: float,
weight_multiplier: float = 15.0):
"""
Purpose:
Transform output into an xdm-compatible DataFrame
Sample Output:
+----+----------+----------+----------+--------------+
| | Schema | Weight | zScore | zScoreNorm |
|----+----------+----------+----------+--------------|
| 0 | academic | 60 | 4 | 4 |
+----+----------+----------+----------+--------------+
:param score:
a computed score
:param weight_multiplier:
arbitrary value to keep the weight inflated above the z-score
e.g., a value with a z-score of 4 often has
an underlying weight of ~60
maintaining an approxiate ratio between z-score and weight
is useful in polar projections and other visualizations
"""
self._df_result = pd.DataFrame([{"Schema": "academic",
"Weight": score * weight_multiplier,
"zScore": score,
"zScoreNorm": score}])
if self._is_debug:
self.logger.debug('\n'.join([
f"Compute Academic Dimension",
f"\tScore: {score}",
f"\tKey Field: {self._source_record['key_field']}",
tabulate(self._df_result,
tablefmt='psql',
headers='keys')]))
def _score(self) -> float:
scores = []
for k in self._d_degrees:
scores.append(self._score_mapping(k))
if len(scores):
return max(scores)
return 0.0
def process(self):
degree_fields = self._degree_fields()
if self._is_debug:
self.logger.debug('\n'.join([
"Located Degrees",
f"\tKey Field: {self._source_record['key_field']}",
pprint.pformat(degree_fields)]))
self._d_degrees = self._apply_schema(
self._tags(degree_fields))
self._compute_dataframe(self._score())
return self.result()
|
# 累積和
N, K = map(int, input().split())
S = input()
p = '1'
count = 0
cs = []
for c in S:
if c != p:
cs.append(count)
p = c
count = 1
else:
count += 1
cs.append(count)
if p != '1':
cs.append(0)
if len(cs) < 2 * K:
print(N)
exit()
for i in range(1, len(cs)):
cs[i] += cs[i - 1]
result = cs[2 * K]
for i in range(2, len(cs) - 2 * K, 2):
if cs[2 * K + i] - cs[i - 1] > result:
result = cs[2 * K + i] - cs[i - 1]
print(result)
|
#!/usr/bin/env python
import os
import gc
import json
import pickle
import sqlite3
import argparse
import pandas as pd
from code_parser.codeparser_stdin import CodeParserStdin
from post_classifier.classifier import PostClassifier
from post_classifier.utils import (list_to_disk, load_number_list,
load_text_list, remove_rows)
from post_classifier.vectorizer import Vectorizer
from text_processing.text_eval import eval_text
from text_processing.utils import process_corpus
# Question query default settings
score_threshold = -3
ans_count_threshold = 1
# Database Queries
INIT_QUESTION_QUERY = '''SELECT Body, Id FROM questions
WHERE AnswerCount>={ans_count} AND Score>={score} ORDER BY Id ASC'''
INIT_ANSWER_QUERY = '''SELECT Body, Id FROM answers
WHERE ParentId IN {id_list} ORDER BY ParentId ASC'''
INIT_COMMENT_QUERY = '''SELECT Text AS Body, Id FROM comments
WHERE PostId IN {id_list} ORDER BY PostId ASC'''
FINAL_QUESTION_QUERY = '''SELECT Id, Title, Tags, Entities, SnippetCount, Score FROM questions
WHERE Id IN {id_list} ORDER BY Id ASC'''
FINAL_ANSWER_QUERY = '''SELECT Id, ParentId, Score FROM answers
WHERE Id IN {id_list} ORDER BY ParentId ASC'''
FINAL_COMMENT_QUERY = '''SELECT Id, PostId FROM comments
WHERE Id IN {id_list} ORDER BY PostId ASC'''
class CorpusBuilder:
def __init__(self,
classifier_path,
vectorizer_dict_path,
database_path,
export_dir,
text_eval_fn,
qparams=None):
self.classifier = PostClassifier(classifier_path)
self.vectorizer = Vectorizer(dictionary_path=vectorizer_dict_path)
self.db_conn = sqlite3.connect(database_path)
self.text_eval_fn = text_eval_fn
self.qparams = qparams
# Create paths
self.temp_dir = 'temp_files'
self.export_dir = export_dir
if not os.path.exists(self.temp_dir):
os.makedirs(self.temp_dir)
if not os.path.exists(export_dir):
os.makedirs(export_dir)
self.init_dfs = {
'q': os.path.join(export_dir, 'init_q_posts'),
'a': os.path.join(export_dir, 'init_a_posts'),
'c': os.path.join(export_dir, 'init_c_posts')
}
self.final_dfs = {
'q': os.path.join(export_dir, 'final_q_posts'),
'a': os.path.join(export_dir, 'final_a_posts'),
'c': os.path.join(export_dir, 'final_c_posts')
}
def _retrieve_db_data(self, query, post_type, eval_posts=True):
c = self.db_conn.cursor()
c.execute(query)
cols = [d[0] for d in c.description]
output_dict = {key: [] for key in cols}
if eval_posts:
codeparser = CodeParserStdin(index_path=os.path.join(
self.export_dir, 'api_index'),
extract_sequence=True,
keep_imports=False,
keep_comments=True,
keep_literals=False,
keep_method_calls=True,
keep_unsolved_method_calls=False)
# Format posts and discard low quality posts (excess punctuation)
for idx, row in enumerate(c):
print('\rpost:', idx, end='')
body = row[0]
if post_type == 'com': # replace quote char from comments
body = body.replace('`', ' ')
eval_res = self.text_eval_fn(body, row[1], codeparser)
if eval_res != -1:
output_dict[cols[0]].append(eval_res)
for ii, val in enumerate(row[1:], start=1):
output_dict[cols[ii]].append(val)
print()
codeparser.close()
else:
for idx, row in enumerate(c):
print('\rpost:', idx, end='')
for ii, val in enumerate(row):
output_dict[cols[ii]].append(val)
print()
return output_dict
def _filter_posts(self, posts, post_ids, post_type):
# Load lists from disk if given a path
if isinstance(posts, str):
posts = load_text_list(posts)
if isinstance(post_ids, str):
post_ids = load_text_list(post_ids)
# Create dump paths
predictions_path = self.init_dfs[post_type] + '_predictions'
# Vectorize posts and get classifier predictions
vectorized_doc = self.vectorizer.vectorize_list(posts)
with open(predictions_path, 'a') as pred_out:
for batch in self.classifier.feed_data(vectorized_doc, verbose=1):
self.classifier.save_predictions(
pred_out, self.classifier.make_prediction(batch, 0))
# Filter out 'unclean' posts using the predictions
labels = load_number_list(predictions_path, mode='bool')
df = pd.DataFrame(data={'Body': remove_rows(posts, labels)},
index=remove_rows(post_ids, labels))
if post_type == 'q':
self.qid_list = list(df.index)
else:
self.ansid_list = list(df.index)
# Save dataframe to disk
df.to_pickle(self.init_dfs[post_type])
def _build_initial_dataframe(self, query, post_type, keep_raw_data=False):
db_data = self._retrieve_db_data(query, post_type)
if keep_raw_data:
export_path = os.path.join(self.temp_dir, 'raw_data_' + post_type)
with open(export_path, 'wb') as out:
pickle.dump(db_data, out)
print('Raw intermediate file saved at {}.'.format(export_path))
if post_type != 'c': # skip classifier stage for comments
self._filter_posts(db_data['Body'], db_data['Id'], post_type)
else:
pd.DataFrame(data={
'Body': db_data['Body']
}, index=db_data['Id']).to_pickle(self.init_dfs['c'])
def _build_final_dataframe(self, query, post_type):
def validate_data(original_ids, db_data):
if original_ids != db_data['Id']:
raise ValueError('Validation failed. Id mismatch.')
del db_data['Id'] # discard ids from dict
# Load initial dataframe (df_index: Ids)
init_df = pd.read_pickle(self.init_dfs[post_type])
df_index = list(init_df.index)
df_dict = {'Body': list(init_df['Body'])}
# Retrieve extra database info
db_data = self._retrieve_db_data(
query.format(id_list=str(tuple(df_index))), post_type, False)
# Ensure Id matching
validate_data(df_index, db_data)
# Update dataframe dict and save final dataframe to disk
df_dict.update(db_data)
final_df = pd.DataFrame(data=df_dict, index=df_index)
final_df.to_pickle(self.final_dfs[post_type])
def build_initial_dataframes(self, qid_list=None, ansid_list=None):
print('Building initial dataframes.')
query = INIT_QUESTION_QUERY.format(ans_count=ans_count_threshold,
score=score_threshold)
if self.qparams:
query = INIT_QUESTION_QUERY.format(**self.qparams)
self._build_initial_dataframe(query, 'q')
query = INIT_ANSWER_QUERY.format(id_list=str(tuple(self.qid_list)))
self._build_initial_dataframe(query, 'a')
com_postids = []
if qid_list and ansid_list:
com_postids = qid_list + ansid_list
else:
com_postids = self.qid_list + self.ansid_list
query = INIT_COMMENT_QUERY.format(id_list=str(tuple(com_postids)))
self._build_initial_dataframe(query, 'c')
def build_final_dataframes(self):
print('Building final dataframes.')
self._build_final_dataframe(FINAL_QUESTION_QUERY, 'q')
self._build_final_dataframe(FINAL_ANSWER_QUERY, 'a')
self._build_final_dataframe(FINAL_COMMENT_QUERY, 'c')
def _build_init_corpus(self):
def progress(iterable, max_n=30):
n = len(iterable)
for index, element in enumerate(iterable):
j = (index + 1) / n
print('\r[{:{}s}] {}%'.format('=' * int(max_n * j), max_n,
int(100 * j)),
end='')
yield index, element
print()
text_list = []
qdf = pd.read_pickle(self.final_dfs['q'])
qids = list(qdf.index)
qposts = list(qdf['Body'])
qtitles = list(qdf['Title'])
print('Building initial text corpus...')
ansdf = pd.read_pickle(self.final_dfs['a'])
for idx, qid in progress(qids):
text_list.append(qtitles[idx])
text_list.append(qposts[idx])
text_list.extend(list(ansdf.loc[ansdf['ParentId'] == qid, 'Body']))
print('Saving initial text corpus to disk...')
init_corpus = os.path.join(self.export_dir, 'init_corpus')
list_to_disk(init_corpus, text_list)
return init_corpus
## TODO: include_comments=False
def build_corpus(self,
init_corpus=None,
filter_corpus=True,
token_fn='norm'):
"""
"""
if not init_corpus:
init_corpus = self._build_init_corpus()
final_corpus = os.path.join(self.export_dir, 'final_corpus_')
final_corpus = final_corpus + token_fn
if token_fn == 'norm':
process_corpus(init_corpus, final_corpus, filter_corpus, token_fn)
elif token_fn == 'lemma':
process_corpus(init_corpus, final_corpus, filter_corpus, token_fn)
def main(classifier_path,
vectorizer_dict_path,
database_path,
export_dir,
text_eval_fn,
qparams=None):
corpus_builder = CorpusBuilder(classifier_path, vectorizer_dict_path,
database_path, export_dir, text_eval_fn,
qparams)
corpus_builder.build_initial_dataframes()
corpus_builder.build_final_dataframes()
corpus_builder.build_corpus()
def param_parser(params_filepath, text_eval_fn):
params = {
'classifier_path': None,
'vectorizer_dict_path': None,
'database_path': None,
'export_dir': None,
'text_eval_fn': text_eval_fn,
'qparams': None,
}
with open(params_filepath, 'r') as _in:
params_dict = json.load(_in)
params['classifier_path'] = params_dict['corpus']['classifier_path']
params['vectorizer_dict_path'] = params_dict['corpus'][
'vectorizer_dict_path']
params['database_path'] = params_dict['database_path']
params['export_dir'] = params_dict['corpus']['export_dir']
params['qparams'] = params_dict['corpus']['qparams']
return params
def validate_file(filepath):
if not os.path.exists(filepath):
print('File "{}" does not exist.'.format(filepath))
exit()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Corpus builder.')
parser.add_argument(
'-p',
'--params',
default='params.json',
help='Path to a valid params file. (default: params.json)')
args = parser.parse_args()
validate_file(args.params)
p = param_parser(args.params, eval_text)
main(**p)
|
import subprocess
class OnboardingAppChecker:
@classmethod
def get_app_version(cls, app):
output = cls.get_app_output(app)
try:
version = output.split('version')[1]
except IndexError:
version = output
return version
@classmethod
def get_app_output(cls, app):
output = subprocess.check_output(
'{} --version'.format(app), shell=True)
return output.decode('utf-8').strip()
|
from django.contrib.auth import get_user_model
from django.db import connection
from django_tenants.utils import (
get_public_schema_name,
get_tenant_domain_model,
get_tenant_model,
)
from tenant_users.tenants.models import ExistsError
def get_current_tenant():
current_schema = connection.schema_name
TenantModel = get_tenant_model()
tenant = TenantModel.objects.get(schema_name=current_schema)
return tenant
def create_public_tenant(domain_url, owner_email, **owner_extra):
UserModel = get_user_model()
TenantModel = get_tenant_model()
public_schema_name = get_public_schema_name()
if TenantModel.objects.filter(schema_name=public_schema_name).first():
raise ExistsError('Public tenant already exists')
# Create public tenant user. This user doesn't go through object manager
# create_user function because public tenant does not exist yet
profile = UserModel.objects.create(
email=owner_email,
is_active=True,
**owner_extra,
)
profile.set_unusable_password()
profile.save()
# Create public tenant
public_tenant = TenantModel.objects.create(
schema_name=public_schema_name,
name='Public Tenant',
owner=profile,
)
# Add one or more domains for the tenant
get_tenant_domain_model().objects.create(
domain=domain_url,
tenant=public_tenant,
is_primary=True,
)
# Add system user to public tenant (no permissions)
public_tenant.add_user(profile)
|
from datetime import datetime
from unittest import TestCase
from decimal import Decimal
from expressly.models import Order
class OrderFullTest(TestCase):
def setUp(self):
date_time = datetime.utcnow().isoformat('T')
self.m = Order.get_mock_object()
self.m.date = date_time
self.m.tax = Decimal(self.m.tax).quantize(Decimal('.01'))
self.m.total = Decimal(self.m.total).quantize(Decimal('.01'))
def test_required(self):
self.assertIsNotNone(self.m.date)
self.assertIsNotNone(self.m.item_count)
self.assertIsNotNone(self.m.total)
self.assertIsNotNone(self.m.tax)
self.assertIsNotNone(self.m.post_tax_total)
def test_validate(self):
self.assertIsNone(self.m.validate())
def test_total_addition(self):
self.assertEqual(self.m.total + self.m.tax, self.m.post_tax_total)
def test_serialization(self):
json_str = str(self.m)
self.assertIn('"date": "%s"' % self.m.date, json_str)
self.assertIn('"itemCount": %i' % self.m.item_count, json_str)
self.assertIn('"preTaxTotal": ', json_str)
self.assertIn('"tax": ', json_str)
self.assertIn('"postTaxTotal": ', json_str)
|
#!/usr/bin/env python
from datajob.datajob_stack import DataJobStack
from datajob.glue.glue_job import GlueJob
from datajob.stepfunctions.stepfunctions_workflow import StepfunctionsWorkflow
import pathlib
current_dir = pathlib.Path(__file__).parent.absolute()
with DataJobStack(
stack_name="data-pipeline-pkg", project_root=current_dir
) as datajob_stack:
task1 = GlueJob(
datajob_stack=datajob_stack,
name="task1",
job_path="data_pipeline_with_packaged_project/task1.py",
)
task2 = GlueJob(
datajob_stack=datajob_stack,
name="task2",
job_path="data_pipeline_with_packaged_project/task2.py",
)
with StepfunctionsWorkflow(datajob_stack=datajob_stack) as sfn:
task1 >> task2
|
from django.db import models
class ProductManager(models.Manager):
def not_deleted(self):
return self.filter(deleted_at__isnull=True)
|
from django.db import models
# Create your models here.
class paywaylog(models.Model):
id = models.AutoField(primary_key=True)
cardid = models.CharField(null = False, max_length = 50)
|
# -- coding: utf-8 --
from __future__ import absolute_import
from unittest import main, TestCase
from tempfile import mkdtemp
from os.path import join, dirname, abspath
from shutil import rmtree, copytree
from re import sub
import random
import sys
from chime.repo_functions import ChimeRepo
from slugify import slugify
import logging
import tempfile
logging.disable(logging.CRITICAL)
repo_root = abspath(join(dirname(__file__), '..'))
sys.path.insert(0, repo_root)
from box.util.rotunicode import RotUnicode
from httmock import response, HTTMock
from mock import MagicMock
from bs4 import Comment
from chime import create_app, repo_functions, google_api_functions, view_functions, constants
from unit.chime_test_client import ChimeTestClient
import codecs
codecs.register(RotUnicode.search_function)
# these patterns help us search the HTML of a response to determine if the expected page loaded
PATTERN_BRANCH_COMMENT = u'<!-- branch: {} -->'
PATTERN_AUTHOR_COMMENT = u'<!-- author: {} -->'
PATTERN_TASK_COMMENT = u'<!-- task: {} -->'
PATTERN_TEMPLATE_COMMENT = u'<!-- template name: {} -->'
PATTERN_FILE_COMMENT = u'<!-- file type: {file_type}, file name: {file_name}, file title: {file_title} -->'
PATTERN_OVERVIEW_ITEM_CREATED = u'<p>The "{created_name}" {created_type} was created by {author_email}.</p>'
PATTERN_OVERVIEW_ACTIVITY_STARTED = u'<p>The "{activity_name}" activity was started by {author_email}.</p>'
PATTERN_OVERVIEW_COMMENT_BODY = u'<div class="comment__body">{comment_body}</div>'
PATTERN_OVERVIEW_ITEM_DELETED = u'<p>The "{deleted_name}" {deleted_type} {deleted_also}was deleted by {author_email}.</p>'
PATTERN_FLASH_TASK_DELETED = u'You deleted the "{description}" activity!'
PATTERN_FLASH_SAVED_CATEGORY = u'<li class="flash flash--notice">Saved changes to the {title} topic! Remember to submit this change for feedback when you\'re ready to go live.</li>'
PATTERN_FLASH_CREATED_CATEGORY = u'Created a new topic named {title}! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_CREATED_ARTICLE = u'Created a new article named {title}! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_SAVED_ARTICLE = u'Saved changes to the {title} article! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_DELETED_ARTICLE = u'The "{title}" article was deleted! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FORM_CATEGORY_TITLE = u'<input name="en-title" type="text" value="{title}" class="directory-modify__name" placeholder="Crime Statistics and Maps">'
PATTERN_FORM_CATEGORY_DESCRIPTION = u'<textarea name="en-description" class="directory-modify__description" placeholder="Crime statistics and reports by district and map">{description}</textarea>'
# review stuff
PATTERN_UNREVIEWED_EDITS_LINK = u'<a href="/tree/{branch_name}/" class="toolbar__item button">Unreviewed Edits</a>'
PATTERN_FEEDBACK_REQUESTED_LINK = u'<a href="/tree/{branch_name}/" class="toolbar__item button">Feedback requested</a>'
PATTERN_READY_TO_PUBLISH_LINK = u'<a href="/tree/{branch_name}/" class="toolbar__item button">Ready to publish</a>'
class TestProcess (TestCase):
def setUp(self):
self.old_tempdir, tempfile.tempdir = tempfile.tempdir, mkdtemp(prefix='chime-TestProcess-')
self.work_path = mkdtemp(prefix='chime-repo-clones-')
repo_path = dirname(abspath(__file__)) + '/../test-app.git'
upstream_repo_dir = mkdtemp(prefix='repo-upstream-', dir=self.work_path)
upstream_repo_path = join(upstream_repo_dir, 'test-app.git')
copytree(repo_path, upstream_repo_path)
self.upstream = ChimeRepo(upstream_repo_path)
repo_functions.ignore_task_metadata_on_merge(self.upstream)
self.origin = self.upstream.clone(mkdtemp(prefix='repo-origin-', dir=self.work_path), bare=True)
repo_functions.ignore_task_metadata_on_merge(self.origin)
# environ['GIT_AUTHOR_NAME'] = ' '
# environ['GIT_COMMITTER_NAME'] = ' '
# environ['GIT_AUTHOR_EMAIL'] = u'erica@example.com'
# environ['GIT_COMMITTER_EMAIL'] = u'erica@example.com'
create_app_environ = {}
create_app_environ['GA_CLIENT_ID'] = 'client_id'
create_app_environ['GA_CLIENT_SECRET'] = 'meow_secret'
self.ga_config_dir = mkdtemp(prefix='chime-config-', dir=self.work_path)
create_app_environ['RUNNING_STATE_DIR'] = self.ga_config_dir
create_app_environ['WORK_PATH'] = self.work_path
create_app_environ['REPO_PATH'] = self.origin.working_dir
create_app_environ['AUTH_DATA_HREF'] = 'http://example.com/auth.csv'
create_app_environ['BROWSERID_URL'] = 'http://localhost'
create_app_environ['LIVE_SITE_URL'] = 'http://example.org/'
create_app_environ['SUPPORT_EMAIL_ADDRESS'] = u'support@example.com'
create_app_environ['SUPPORT_PHONE_NUMBER'] = u'(123) 456-7890'
self.app = create_app(create_app_environ)
# write a tmp config file
config_values = {
"access_token": "meowser_token",
"refresh_token": "refresh_meows",
"profile_id": "12345678",
"project_domain": ""
}
with self.app.app_context():
google_api_functions.write_ga_config(config_values, self.app.config['RUNNING_STATE_DIR'])
random.choice = MagicMock(return_value="P")
def tearDown(self):
rmtree(tempfile.tempdir)
tempfile.tempdir = self.old_tempdir
def auth_csv_example_allowed(self, url, request):
if url.geturl() == 'http://example.com/auth.csv':
return response(200, '''Email domain,Organization\nexample.com,Example Org''')
raise Exception('Asked for unknown URL ' + url.geturl())
def mock_persona_verify_erica(self, url, request):
if url.geturl() == 'https://verifier.login.persona.org/verify':
return response(200, '''{"status": "okay", "email": "erica@example.com"}''')
else:
return self.auth_csv_example_allowed(url, request)
def mock_persona_verify_frances(self, url, request):
if url.geturl() == 'https://verifier.login.persona.org/verify':
return response(200, '''{"status": "okay", "email": "frances@example.com"}''')
else:
return self.auth_csv_example_allowed(url, request)
# in TestProcess
def test_editing_process_with_two_users(self):
''' Check edit process with a user looking at feedback from another user.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
args = 'Diving for Dollars', 'Ninjas', 'Flipping Out', 'So Awesome'
branch_name = erica.quick_activity_setup(*args)
# Edit the new article.
erica.edit_article('So, So Awesome', 'It was the best of times.')
# Ask for feedback
erica.follow_link('/tree/{}/'.format(branch_name))
erica.request_feedback('Is this okay?')
#
# Switch users and comment on the activity.
#
frances.open_link(erica.path)
frances.leave_feedback('It is super-great.')
#
# Switch back and look for that bit of feedback.
#
erica.reload()
words = erica.soup.find(text='It is super-great.')
comment = words.find_parent('div').find_parent('div')
author = comment.find(text='frances@example.com')
self.assertTrue(author is not None)
# in TestProcess
def test_editing_process_with_two_categories(self):
''' Check edit process with a user looking at activity from another user.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Erica starts a new task, "Diving for Dollars".
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task('Diving for Dollars')
erica_branchname = erica.get_branch_name()
# Erica creates a new category and asks for feedback.
erica.follow_link('/tree/{}/edit/other/'.format(erica_branchname))
erica.add_category('Dollars')
erica.follow_link('/tree/{}/'.format(erica_branchname))
erica.request_feedback('Is this okay?')
# Frances starts a new task, "Bobbing for Apples".
frances.open_link(constants.ROUTE_ACTIVITY)
frances.start_task('Bobbing for Apples')
frances_branchname = frances.get_branch_name()
# Frances creates a new category.
frances.follow_link('/tree/{}/edit/other/'.format(frances_branchname))
frances.add_category('Apples')
# Frances approves Erica's new work and publishes it.
frances.open_link(erica.path)
frances.leave_feedback('It is super-great.')
frances.approve_activity()
frances.publish_activity()
# Erica should now expect to see her own new category.
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task('Canticle for Leibowitz')
erica_branchname2 = erica.get_branch_name()
erica.follow_link('/tree/{}/edit/other/'.format(erica_branchname2))
self.assertIsNotNone(erica.soup.find(text='Dollars'), 'Should see first published category')
# Frances should still not expect to see Erica's published category.
frances.open_link('/tree/{}/edit/'.format(frances_branchname))
frances.follow_link('/tree/{}/edit/other/'.format(frances_branchname))
self.assertIsNone(frances.soup.find(text='Dollars'), 'Should not see first published category')
# in TestProcess
def test_notified_when_saving_article_in_published_activity(self):
''' You're notified and redirected when trying to save an article in a published activity.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
frances_email = u'frances@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in(frances_email)
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
args = 'Diving for Dollars', 'Ninjas', 'Flipping Out', 'So Awesome'
branch_name = erica.quick_activity_setup(*args)
# Edit the new article.
erica.edit_article(title_str='So, So Awesome', body_str='It was the best of times.')
article_path = erica.path
# Ask for feedback
erica.follow_link(href='/tree/{}/'.format(branch_name))
erica.request_feedback(comment_text='Is this okay?')
# Re-load the article page
erica.open_link(article_path)
#
# Switch users and publish the activity.
#
frances.open_link(url='/tree/{}/'.format(branch_name))
frances.leave_feedback(comment_text='It is super-great.')
frances.approve_activity()
frances.publish_activity()
#
# Switch back and try to make another edit.
#
erica.edit_article(title_str='Just Awful', body_str='It was the worst of times.')
# we should've been redirected to the activity overview page
self.assertEqual(erica.path, '/tree/{}/'.format(branch_name))
# a warning is flashed about working in a published branch
# we can't get the date exactly right, so test for every other part of the message
message_published = view_functions.MESSAGE_ACTIVITY_PUBLISHED.format(published_date=u'xxx', published_by=frances_email)
message_published_split = message_published.split(u'xxx')
for part in message_published_split:
self.assertIsNotNone(erica.soup.find(lambda tag: tag.name == 'li' and part in tag.text))
# in TestProcess
def test_published_branch_not_resurrected_on_save(self):
''' Saving a change on a branch that exists locally but isn't at origin because it was published doesn't re-create the branch.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
frances_email = u'frances@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in(frances_email)
# Start a new task, topic, subtopic, article
erica.open_link(constants.ROUTE_ACTIVITY)
task_description = u'Squeeze A School Of Fish Into A Bait Ball for Dolphins'
article_name = u'Stunned Fish'
args = task_description, u'Plowing Through', u'Feeding On', article_name
branch_name = erica.quick_activity_setup(*args)
article_path = erica.path
# Ask for feedback
erica.follow_link(href='/tree/{}/'.format(branch_name))
erica.request_feedback(comment_text='Is this okay?')
# Re-load the article page
erica.open_link(url=article_path)
# verify that the branch exists locally and remotely
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
self.assertTrue(branch_name in repo.branches)
# there's a remote branch with the branch name, but no tag
self.assertFalse('refs/tags/{}'.format(branch_name) in repo.git.ls_remote('origin', branch_name).split())
self.assertTrue('refs/heads/{}'.format(branch_name) in repo.git.ls_remote('origin', branch_name).split())
#
# Switch to frances, approve and publish erica's changes
#
frances.open_link(url='/tree/{}/'.format(branch_name))
frances.leave_feedback(comment_text='It is perfect.')
frances.approve_activity()
frances.publish_activity()
#
# Switch to erica, try to submit an edit to the article
#
erica.edit_article(title_str=article_name, body_str=u'Chase fish into shallow water to catch them.')
# we should've been redirected to the activity overview page
self.assertEqual(erica.path, '/tree/{}/'.format(branch_name))
# a warning is flashed about working in a published branch
# we can't get the date exactly right, so test for every other part of the message
message_published = view_functions.MESSAGE_ACTIVITY_PUBLISHED.format(published_date=u'xxx', published_by=frances_email)
message_published_split = message_published.split(u'xxx')
for part in message_published_split:
self.assertIsNotNone(erica.soup.find(lambda tag: tag.name == 'li' and part in tag.text))
# verify that the branch exists locally and not remotely
self.assertTrue(branch_name in repo.branches)
# there's a remote tag with the branch name, but no branch
self.assertTrue('refs/tags/{}'.format(branch_name) in repo.git.ls_remote('origin', branch_name).split())
self.assertFalse('refs/heads/{}'.format(branch_name) in repo.git.ls_remote('origin', branch_name).split())
# in TestProcess
def test_notified_when_browsing_in_published_activity(self):
''' You're notified and redirected when trying to browse a published activity.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
frances_email = u'frances@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in(frances_email)
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description='Eating Carrion for Vultures')
erica_branch_name = erica.get_branch_name()
# Enter the "other" folder
erica.follow_link(href='/tree/{}/edit/other/'.format(erica_branch_name))
# Create a new category
category_name = u'Forage'
category_slug = slugify(category_name)
erica.add_category(category_name=category_name)
# Ask for feedback
erica.follow_link(href='/tree/{}/'.format(erica_branch_name))
erica.request_feedback(comment_text='Is this okay?')
#
# Switch users
#
# approve and publish erica's changes
frances.open_link(url=erica.path)
frances.leave_feedback(comment_text='It is perfect.')
frances.approve_activity()
frances.publish_activity()
#
# Switch users
#
# try to open an edit page (but anticipate a redirect)
erica.open_link(url='/tree/{}/edit/other/{}/'.format(erica_branch_name, category_slug), expected_status_code=303)
# we should've been redirected to the activity overview page
self.assertEqual(erica.path, '/tree/{}/'.format(erica_branch_name))
# a warning is flashed about working in a published branch
# we can't get the date exactly right, so test for every other part of the message
message_published = view_functions.MESSAGE_ACTIVITY_PUBLISHED.format(published_date=u'xxx', published_by=frances_email)
message_published_split = message_published.split(u'xxx')
for part in message_published_split:
self.assertIsNotNone(erica.soup.find(lambda tag: tag.name == 'li' and part in tag.text))
# in TestProcess
def test_editing_process_with_conflicting_edit(self):
''' Check edit process with a user attempting to change an activity with a conflict.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
frances.open_link(constants.ROUTE_ACTIVITY)
args = 'Bobbing for Apples', 'Ninjas', 'Flipping Out', 'So Awesome'
f_branch_name = frances.quick_activity_setup(*args)
f_article_path = frances.path
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
args = 'Diving for Dollars', 'Ninjas', 'Flipping Out', 'So Awesome'
e_branch_name = erica.quick_activity_setup(*args)
# Edit the new article.
erica.edit_article(title_str='So, So Awesome', body_str='It was the best of times.')
# Ask for feedback
erica.follow_link(href='/tree/{}/'.format(e_branch_name))
erica.request_feedback(comment_text='Is this okay?')
#
# Switch users and publish the activity.
#
frances.open_link(url=erica.path)
frances.leave_feedback(comment_text='It is super-great.')
frances.approve_activity()
frances.publish_activity()
#
# Now introduce a conflicting change on the original activity,
# and verify that the expected flash warning is displayed.
#
frances.open_link(f_article_path)
frances.edit_article(title_str='So, So Awful', body_str='It was the worst of times.')
self.assertIsNotNone(frances.soup.find(text=repo_functions.MERGE_CONFLICT_WARNING_FLASH_MESSAGE),
'Should see a warning about the conflict above the article.')
frances.follow_link(href='/tree/{}/'.format(f_branch_name))
self.assertIsNotNone(frances.soup.find(text=repo_functions.MERGE_CONFLICT_WARNING_FLASH_MESSAGE),
'Should see a warning about the conflict in the activity history.')
# in TestProcess
def test_editing_process_with_conflicting_edit_but_no_publish(self):
''' Check edit process with a user attempting to change an activity with a conflict.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Frances: Start a new task
frances.open_link(constants.ROUTE_ACTIVITY)
args = 'Bobbing for Apples', 'Ninjas', 'Flipping Out', 'So Awesome'
frances.quick_activity_setup(*args)
# Erica: Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
args = 'Diving for Dollars', 'Ninjas', 'Flipping Out', 'So Awesome'
erica.quick_activity_setup(*args)
# Erica edits the new article.
erica.edit_article(title_str='So, So Awesome', body_str='It was the best of times.')
# Frances edits the new article.
frances.edit_article(title_str='So, So Awful', body_str='It was the worst of times.')
def test_editing_process_with_nonconflicting_edit(self):
''' Check edit process with a user attempting to change an activity with no conflict.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
frances.open_link(constants.ROUTE_ACTIVITY)
args = 'Bobbing for Apples', 'Ninjas', 'Flipping Out', 'So Awesome'
f_branch_name = frances.quick_activity_setup(*args)
f_article_path = frances.path
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
args = 'Diving for Dollars', 'Samurai', 'Flipping Out', 'So Awesome'
e_branch_name = erica.quick_activity_setup(*args)
# Edit the new article.
erica.edit_article(title_str='So, So Awesome', body_str='It was the best of times.')
# Ask for feedback
erica.follow_link(href='/tree/{}/'.format(e_branch_name))
erica.request_feedback(comment_text='Is this okay?')
#
# Switch users and publish the activity.
#
frances.open_link(url=erica.path)
frances.leave_feedback(comment_text='It is super-great.')
frances.approve_activity()
frances.publish_activity()
#
# Now introduce a conflicting change on the original activity,
# and verify that the expected flash warning is displayed.
#
frances.open_link(f_article_path)
frances.edit_article(title_str='So, So Awful', body_str='It was the worst of times.')
self.assertIsNone(frances.soup.find(text=repo_functions.UPSTREAM_EDIT_INFO_FLASH_MESSAGE),
'Should not see a warning about the conflict in the activity history.')
frances.follow_link(href='/tree/{}/'.format(f_branch_name))
self.assertIsNone(frances.soup.find(text=repo_functions.UPSTREAM_EDIT_INFO_FLASH_MESSAGE),
'Should not see a warning about the conflict in the activity history.')
# in TestProcess
def test_editing_process_with_conflicting_edit_on_same_article(self):
''' Two people editing the same article in the same branch get a useful error.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
frances_email = u'frances@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in(frances_email)
# Frances: Start a new task, topic, subtopic, article
frances.open_link(constants.ROUTE_ACTIVITY)
args = 'Triassic for Artemia', 'Biological', 'Toxicity', 'Assays'
frances.quick_activity_setup(*args)
branch_name = frances.get_branch_name()
# Frances and Erica load the same article
erica.open_link(frances.path)
# Erica edits the new article.
erica.edit_article(title_str='Assays', body_str='Broad leaf-like appendages')
# Frances edits the same article and gets an error
frances.edit_article(title_str='Assays', body_str='Typical primitive arthropod')
# we can't get the date exactly right, so test for every other part of the message
message_edited = view_functions.MESSAGE_PAGE_EDITED.format(published_date=u'xxx', published_by=erica_email)
message_edited_split = message_edited.split(u'xxx')
for part in message_edited_split:
self.assertIsNotNone(frances.soup.find(lambda tag: tag.name == 'li' and part in tag.text))
# Frances successfully browses elsewhere in the activity
frances.open_link(url='/tree/{}/'.format(branch_name))
# Frances successfully deletes the task
frances.open_link(url=constants.ROUTE_ACTIVITY)
frances.delete_task(branch_name=branch_name)
# Frances successfully creates a new task
frances.start_task(description='Narrow Braincase for Larger Carnassials')
# in TestProcess
def test_task_not_marked_published_after_merge_conflict(self):
''' When publishing an activity results in a merge conflict, it shouldn't be marked published.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description='Eating Carrion for Vultures')
erica_branch_name = erica.get_branch_name()
# Look for an "other" link that we know about - is it a category?
erica.follow_link(href='/tree/{}/edit/other/'.format(erica_branch_name))
# Create a new category, subcategory, and article
erica.add_category(category_name=u'Forage')
erica.add_subcategory(subcategory_name='Dead Animals')
erica.add_article(article_name='Dingos')
# Edit the new article.
erica.edit_article(title_str='Dingos', body_str='Canis Lupus Dingo')
# Ask for feedback
erica.follow_link(href='/tree/{}/'.format(erica_branch_name))
erica.request_feedback(comment_text='Is this okay?')
#
# Switch users
#
# Start a new task
frances.open_link(constants.ROUTE_ACTIVITY)
frances.start_task(description='Flying in Circles for Vultures')
frances_branch_name = frances.get_branch_name()
# Look for an "other" link that we know about - is it a category?
frances.follow_link(href='/tree/{}/edit/other/'.format(frances_branch_name))
# Create a duplicate new category, subcategory, and article
frances.add_category(category_name=u'Forage')
frances.add_subcategory(subcategory_name='Dead Animals')
frances.add_article(article_name='Dingos')
# Edit the new article.
frances.edit_article(title_str='Dingos', body_str='Apex Predator')
# Ask for feedback
frances.follow_link(href='/tree/{}/'.format(frances_branch_name))
frances.request_feedback(comment_text='Is this okay?')
frances_overview_path = frances.path
# frances approves and publishes erica's changes
frances.open_link(url=erica.path)
frances.leave_feedback(comment_text='It is perfect.')
frances.approve_activity()
frances.publish_activity()
# erica approves and publishes frances's changes
erica.open_link(url=frances_overview_path)
erica.leave_feedback(comment_text='It is not bad.')
erica.approve_activity()
erica.publish_activity(expected_status_code=500)
# we got a 500 error page about a merge conflict
pattern_template_comment_stripped = sub(ur'<!--|-->', u'', PATTERN_TEMPLATE_COMMENT)
comments = erica.soup.find_all(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'error-500') in comments)
self.assertIsNotNone(erica.soup.find(lambda tag: tag.name == 'a' and u'MergeConflict' in tag['href']))
# re-load the overview page
erica.open_link(url=frances_overview_path)
# verify that the publish button is still available
self.assertIsNotNone(erica.soup.find(lambda tag: tag.name == 'button' and tag['value'] == u'Publish'))
# in TestProcess
def test_redirect_to_overview_when_branch_published(self):
''' When you're working in a published branch and don't have a local copy, you're redirected to
that activity's overview page.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
frances_email = u'frances@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in(frances_email)
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description='Eating Carrion for Vultures')
erica_branch_name = erica.get_branch_name()
# Enter the "other" folder
erica.follow_link(href='/tree/{}/edit/other/'.format(erica_branch_name))
# Create a new category
category_name = u'Forage'
category_slug = slugify(category_name)
erica.add_category(category_name=category_name)
# Ask for feedback
erica.follow_link(href='/tree/{}/'.format(erica_branch_name))
erica.request_feedback(comment_text='Is this okay?')
#
# Switch users
#
# approve and publish erica's changes
frances.open_link(url=erica.path)
frances.leave_feedback(comment_text='It is perfect.')
frances.approve_activity()
frances.publish_activity()
# delete all trace of the branch locally
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
repo.git.checkout('master')
repo.git.branch('-D', erica_branch_name)
repo.git.remote('prune', 'origin')
#
# Switch users
#
# load an edit page
erica.open_link(url='/tree/{}/edit/other/{}/'.format(erica_branch_name, category_slug), expected_status_code=303)
# a warning is flashed about working in a published branch
# we can't get the date exactly right, so test for every other part of the message
message_published = view_functions.MESSAGE_ACTIVITY_PUBLISHED.format(published_date=u'xxx', published_by=frances_email)
message_published_split = message_published.split(u'xxx')
for part in message_published_split:
self.assertIsNotNone(erica.soup.find(lambda tag: tag.name == 'li' and part in tag.text))
# the overview page was loaded
pattern_template_comment_stripped = sub(ur'<!--|-->', u'', PATTERN_TEMPLATE_COMMENT)
comments = erica.soup.find_all(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'activity-overview') in comments)
# in TestProcess
def test_notified_when_working_in_deleted_task(self):
''' When someone else deletes a task you're working in, you're notified.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
task_description = u'Eating Carrion for Vultures'
erica.start_task(description=task_description)
erica_branch_name = erica.get_branch_name()
# Enter the "other" folder
erica.follow_link(href='/tree/{}/edit/other/'.format(erica_branch_name))
#
# Switch users
#
# delete erica's task
frances.open_link(url=constants.ROUTE_ACTIVITY)
frances.delete_task(branch_name=erica_branch_name)
self.assertEqual(PATTERN_FLASH_TASK_DELETED.format(description=task_description), frances.soup.find('li', class_='flash').text)
#
# Switch users
#
# load an edit page
erica.open_link(url='/tree/{}/edit/other/'.format(erica_branch_name))
# a warning is flashed about working in a deleted branch
self.assertIsNotNone(erica.soup.find(text=view_functions.MESSAGE_ACTIVITY_DELETED))
# in TestProcess
def test_page_not_found_when_branch_deleted(self):
''' When you're working in a deleted branch and don't have a local copy, you get a 404 error
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
task_description = u'Eating Carrion for Vultures'
erica.start_task(description=task_description)
erica_branch_name = erica.get_branch_name()
# Enter the "other" folder
erica.follow_link(href='/tree/{}/edit/other/'.format(erica_branch_name))
#
# Switch users
#
# delete erica's task
frances.open_link(url=constants.ROUTE_ACTIVITY)
frances.delete_task(branch_name=erica_branch_name)
self.assertEqual(PATTERN_FLASH_TASK_DELETED.format(description=task_description), frances.soup.find('li', class_='flash').text)
# delete all trace of the branch locally
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
repo.git.checkout('master')
repo.git.branch('-D', erica_branch_name)
repo.git.remote('prune', 'origin')
#
# Switch users
#
# load an edit page
erica.open_link(url='/tree/{}/edit/other/'.format(erica_branch_name), expected_status_code=404)
# a warning is flashed about working in a deleted branch
self.assertIsNotNone(erica.soup.find(text=view_functions.MESSAGE_ACTIVITY_DELETED))
# the 404 page was loaded
pattern_template_comment_stripped = sub(ur'<!--|-->', u'', PATTERN_TEMPLATE_COMMENT)
comments = erica.soup.find_all(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'error-404') in comments)
# in TestProcess
def test_deleted_branch_not_resurrected_on_save(self):
''' Saving a change on a branch that exists locally but isn't at origin because it was deleted doesn't re-create the branch.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
task_description = u'Squeeze A School Of Fish Into A Bait Ball for Dolphins'
erica.start_task(description=task_description)
erica_branch_name = erica.get_branch_name()
# Enter the "other" folder
erica.follow_link(href='/tree/{}/edit/other/'.format(erica_branch_name))
# Create a category, subcategory, and article
article_name = u'Stunned Fish'
erica.add_category(category_name=u'Plowing Through')
erica.add_subcategory(subcategory_name=u'Feeding On')
erica.add_article(article_name=article_name)
erica_article_path = erica.path
# verify that the branch exists locally and remotely
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
self.assertTrue(erica_branch_name in repo.branches)
self.assertIsNotNone(repo_functions.get_branch_if_exists_at_origin(clone=repo, default_branch_name='master', new_branch_name=erica_branch_name))
#
# Switch users
#
# delete erica's task
frances.open_link(url=constants.ROUTE_ACTIVITY)
frances.delete_task(branch_name=erica_branch_name)
self.assertEqual(PATTERN_FLASH_TASK_DELETED.format(description=task_description), frances.soup.find('li', class_='flash').text)
#
# Switch users
#
# load the article edit page
erica.open_link(url=erica_article_path)
# a warning is flashed about working in a deleted branch
self.assertIsNotNone(erica.soup.find(text=view_functions.MESSAGE_ACTIVITY_DELETED))
# try to save an edit to the article
erica.edit_article(title_str=article_name, body_str=u'Chase fish into shallow water to catch them.')
# we're in the article-edit template
pattern_template_comment_stripped = sub(ur'<!--|-->', u'', PATTERN_TEMPLATE_COMMENT)
comments = erica.soup.find_all(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'article-edit') in comments)
# a warning is flashed about working in a deleted branch
self.assertIsNotNone(erica.soup.find(text=view_functions.MESSAGE_ACTIVITY_DELETED))
# verify that the branch exists locally and not remotely
self.assertTrue(erica_branch_name in repo.branches)
self.assertIsNone(repo_functions.get_branch_if_exists_at_origin(clone=repo, default_branch_name='master', new_branch_name=erica_branch_name))
# in TestProcess
def test_forms_for_changes_in_active_task(self):
''' When working in an active (not published or deleted) task, forms or form buttons that allow
you to make changes are visible.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
task_description = u'Eating Carrion for Vultures'
erica.start_task(description=task_description)
erica_branch_name = erica.get_branch_name()
# Enter the "other" folder
erica.follow_link(href='/tree/{}/edit/other/'.format(erica_branch_name))
# Create a category, sub-category, article
category_name = u'Antennae Segments'
category_slug = slugify(category_name)
subcategory_name = u'Short Ovipositors'
article_name = u'Inject Eggs Directly Into a Host Body'
erica.add_category(category_name=category_name)
erica.add_subcategory(subcategory_name=subcategory_name)
subcategory_path = erica.path
erica.add_article(article_name=article_name)
article_path = erica.path
#
# All the edit forms and buttons are there as expected
#
# load an edit page
erica.open_link(url=subcategory_path)
# the drop-down comment form is there
review_modal = erica.soup.find(lambda tag: bool(tag.name == 'form' and 'review-modal' in tag.get('class')))
self.assertIsNotNone(review_modal)
# the add new topic, subtopic, and article fields is there
self.assertIsNotNone(erica.soup.find(lambda tag: bool(tag.name == 'input' and tag.get('placeholder') == 'Add topic')))
self.assertIsNotNone(erica.soup.find(lambda tag: bool(tag.name == 'input' and tag.get('placeholder') == 'Add subtopic')))
self.assertIsNotNone(erica.soup.find(lambda tag: bool(tag.name == 'input' and tag.get('placeholder') == 'Add article')))
# there's an edit (pencil) button on the category or subcategory, and a delete (trashcan) button on the article
topic_li = erica.soup.find(lambda tag: bool(tag.name == 'a' and tag.text == category_name)).find_parent('li')
self.assertIsNotNone(topic_li.find(lambda tag: bool(tag.name == 'span' and 'fa-pencil' in tag.get('class'))))
subtopic_li = erica.soup.find(lambda tag: bool(tag.name == 'a' and tag.text == subcategory_name)).find_parent('li')
self.assertIsNotNone(subtopic_li.find(lambda tag: bool(tag.name == 'span' and 'fa-pencil' in tag.get('class'))))
article_li = erica.soup.find(lambda tag: bool(tag.name == 'a' and tag.text == article_name)).find_parent('li')
self.assertIsNotNone(article_li.find(lambda tag: bool(tag.name == 'span' and 'fa-trash' in tag.get('class'))))
# load a modify page
index_filename = u'index.{}'.format(constants.CONTENT_FILE_EXTENSION)
erica.open_link(url='/tree/{}/edit/other/{}'.format(erica_branch_name, join(category_slug, index_filename)))
# there's a save and delete button on the modify category form
modify_form = erica.soup.find('textarea', attrs={'name': 'en-description'}).find_parent('form')
delete_button = modify_form.find('button', attrs={'value': 'delete_category'})
save_button = modify_form.find('button', attrs={'value': 'save_category'})
self.assertIsNotNone(delete_button)
self.assertIsNotNone(save_button)
# load an article edit page
erica.open_link(url=article_path)
# there's a save button on the edit form
edit_form = erica.soup.find(lambda tag: bool(tag.name == 'form' and u'/tree/{}/save/'.format(erica_branch_name) in tag.get('action')))
save_button = edit_form.find('button', value='Save')
self.assertIsNotNone(save_button)
# in TestProcess
def test_no_forms_for_changes_in_inactive_task(self):
''' When working in an inactive (published or deleted) task, forms or form buttons that would
allow you to make changes are hidden.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
task_description = u'Eating Carrion for Vultures'
erica.start_task(description=task_description)
erica_branch_name = erica.get_branch_name()
# Enter the "other" folder
erica.follow_link(href='/tree/{}/edit/other/'.format(erica_branch_name))
# Create a category, sub-category, article
category_name = u'Antennae Segments'
category_slug = slugify(category_name)
subcategory_name = u'Short Ovipositors'
article_name = u'Inject Eggs Directly Into a Host Body'
erica.add_category(category_name=category_name)
erica.add_subcategory(subcategory_name=subcategory_name)
subcategory_path = erica.path
erica.add_article(article_name=article_name)
article_path = erica.path
#
# Switch users
#
# delete erica's task
frances.open_link(url=constants.ROUTE_ACTIVITY)
frances.delete_task(branch_name=erica_branch_name)
self.assertEqual(PATTERN_FLASH_TASK_DELETED.format(description=task_description), frances.soup.find('li', class_='flash').text)
#
# Switch users
#
# load an edit page
erica.open_link(url=subcategory_path)
# the drop-down comment form isn't there
review_modal = erica.soup.find(lambda tag: bool(tag.name == 'form' and 'review-modal' in tag.get('class')))
self.assertIsNone(review_modal)
# the add new topic, subtopic, and article fields aren't there
self.assertIsNone(erica.soup.find(lambda tag: bool(tag.name == 'input' and tag.get('placeholder') == 'Add topic')))
self.assertIsNone(erica.soup.find(lambda tag: bool(tag.name == 'input' and tag.get('placeholder') == 'Add subtopic')))
self.assertIsNone(erica.soup.find(lambda tag: bool(tag.name == 'input' and tag.get('placeholder') == 'Add article')))
# there's no edit (pencil) button on the category or subcategory, and no delete (trashcan) button on the article
topic_li = erica.soup.find(lambda tag: bool(tag.name == 'a' and tag.text == category_name)).find_parent('li')
self.assertIsNone(topic_li.find(lambda tag: bool(tag.name == 'span' and 'fa-pencil' in tag.get('class'))))
subtopic_li = erica.soup.find(lambda tag: bool(tag.name == 'a' and tag.text == subcategory_name)).find_parent('li')
self.assertIsNone(subtopic_li.find(lambda tag: bool(tag.name == 'span' and 'fa-pencil' in tag.get('class'))))
article_li = erica.soup.find(lambda tag: bool(tag.name == 'a' and tag.text == article_name)).find_parent('li')
self.assertIsNone(article_li.find(lambda tag: bool(tag.name == 'span' and 'fa-trash' in tag.get('class'))))
# load a modify page
index_filename = u'index.{}'.format(constants.CONTENT_FILE_EXTENSION)
erica.open_link(url='/tree/{}/edit/other/{}'.format(erica_branch_name, join(category_slug, index_filename)))
# there's no save or delete button on the modify category form
modify_form = erica.soup.find('textarea', attrs={'name': 'en-description'}).find_parent('form')
delete_button = modify_form.find('button', attrs={'value': 'delete_category'})
save_button = modify_form.find('button', attrs={'value': 'save_category'})
self.assertIsNone(delete_button)
self.assertIsNone(save_button)
# load an article edit page
erica.open_link(url=article_path)
# there's no save button on the edit form
edit_form = erica.soup.find(lambda tag: bool(tag.name == 'form' and u'/tree/{}/save/'.format(erica_branch_name) in tag.get('action')))
save_button = edit_form.find('button', value='Save')
self.assertIsNone(save_button)
# in TestProcess
def test_editing_out_of_date_article(self):
''' Check edit process with a user attempting to edit an out-of-date article.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
frances.open_link(constants.ROUTE_ACTIVITY)
args = 'Bobbing for Apples', 'Ninjas', 'Flipping Out', 'So Awesome'
frances.quick_activity_setup(*args)
frances.edit_article(title_str='So, So Awesome', body_str='It was the best of times.')
# Erica now opens the article that Frances started.
erica.open_link(frances.path)
# Frances starts a different article.
frances.open_link(dirname(dirname(frances.path)) + '/')
frances.add_article('So Terrible')
# Meanwhile, Erica completes her edits.
erica.edit_article(title_str='So, So Awesome', body_str='It was the best of times.\n\nBut also the worst of times.')
# in TestProcess
def test_published_activity_history_accuracy(self):
''' A published activity's history is constructed as expected.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
frances_email = u'frances@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(email=erica_email)
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in(email=frances_email)
# Erica starts a new task, topic, sub-topic, article
erica.open_link(constants.ROUTE_ACTIVITY)
activity_description = u'Reef-Associated Roving Coralgroupers'
topic_name = u'Plectropomus Pessuliferus'
subtopic_name = u'Recruit Giant Morays'
article_name = u'In Hunting For Food'
args = activity_description, topic_name, subtopic_name, article_name
branch_name = erica.quick_activity_setup(*args)
# edit the article
erica.edit_article(title_str=article_name, body_str=u'This is the only known instance of interspecies cooperative hunting among fish.')
# Load the activity overview page
erica.open_link(url='/tree/{}/'.format(branch_name))
# Leave a comment
comment_body = u'The invitation to hunt is initiated by head-shaking.'
erica.leave_feedback(comment_text=comment_body)
# Request feedback
erica.request_feedback()
#
# Switch users and publish the activity.
#
frances.open_link(url=erica.path)
frances.approve_activity()
frances.publish_activity()
#
# Switch users and load the activity page.
#
erica.open_link(url=constants.ROUTE_ACTIVITY)
# verify that the project is listed in the recently published column
pub_ul = erica.soup.select("#activity-list-published")[0]
# there should be an HTML comment with the branch name
comment = pub_ul.findAll(text=lambda text: isinstance(text, Comment))[0]
self.assertTrue(branch_name in comment)
pub_li = comment.find_parent('li')
# and the activity title wrapped in an a tag
self.assertIsNotNone(pub_li.find('a', text=activity_description))
# load the published activitiy's overview page
erica.open_link(url='/tree/{}/'.format(branch_name))
# a warning is flashed about working in a published branch
# we can't get the date exactly right, so test for every other part of the message
message_published = view_functions.MESSAGE_ACTIVITY_PUBLISHED.format(published_date=u'xxx', published_by=frances_email)
message_published_split = message_published.split(u'xxx')
for part in message_published_split:
self.assertIsNotNone(erica.soup.find(lambda tag: tag.name == 'li' and part in tag.text))
# there is a summary
summary_div = erica.soup.find("div", {"data-test-id": "summary-div"})
self.assertIsNotNone(summary_div)
# it's right about what's changed
self.assertIsNotNone(summary_div.find(lambda tag: bool(tag.name == 'p' and '1 article and 2 topics have been changed' in tag.text)))
# grab all the list items and make sure they match what we did above
check_rows = summary_div.find_all('li')
# the link to create a new change
change_row = check_rows.pop()
self.assertIsNotNone(change_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(change_row.find("a", {"data-test-id": "change-link"}).text, constants.TEXT_ADD_CHANGE)
# the topic creation
category_row = check_rows.pop()
self.assertIsNotNone(category_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(category_row.find('h3', {"data-test-id": "change-title"}).text, topic_name)
self.assertEqual(category_row.find('div', {"data-test-id": "change-display-type"}).text, constants.LAYOUT_DISPLAY_LOOKUP[constants.CATEGORY_LAYOUT].title())
self.assertEqual(category_row.find('p', {"data-test-id": "change-actions"}).text, u'Created')
# the subtopic creation
subcategory_row = check_rows.pop()
self.assertIsNotNone(subcategory_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(subcategory_row.find('h3', {"data-test-id": "change-title"}).text, subtopic_name)
self.assertEqual(subcategory_row.find('div', {"data-test-id": "change-display-type"}).text, constants.LAYOUT_DISPLAY_LOOKUP[constants.CATEGORY_LAYOUT].title())
self.assertEqual(subcategory_row.find('p', {"data-test-id": "change-actions"}).text, u'Created')
# the article creation & edit
article_1_row = check_rows.pop()
self.assertIsNotNone(article_1_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(article_1_row.find('h3', {"data-test-id": "change-title"}).text, article_name)
self.assertEqual(article_1_row.find('div', {"data-test-id": "change-display-type"}).text, constants.LAYOUT_DISPLAY_LOOKUP[constants.ARTICLE_LAYOUT].title())
self.assertEqual(article_1_row.find('p', {"data-test-id": "change-actions"}).text, u'Created, Edited')
# no rows left
self.assertEqual(len(check_rows), 0)
# also check the full history
history_div = erica.soup.find("div", class_="activity-log")
check_rows = history_div.find_all('div', class_='activity-log-item')
self.assertEqual(len(check_rows), 9)
# activity started
started_row = check_rows.pop()
# The "Reef-Associated Roving Coralgroupers" activity was started by erica@example.com.
self.assertEqual(started_row.find('p').text.strip(), u'The "{}" {} by {}.'.format(activity_description, repo_functions.ACTIVITY_CREATED_MESSAGE, erica_email))
topic_row = check_rows.pop()
# The "Plectropomus Pessuliferus" topic was created by erica@example.com.
self.assertEqual(topic_row.find('p').text.strip(), u'The "{}" topic was created by {}.'.format(topic_name, erica_email))
subtopic_row = check_rows.pop()
# The "Recruit Giant Morays" topic was created by erica@example.com.
self.assertEqual(subtopic_row.find('p').text.strip(), u'The "{}" topic was created by {}.'.format(subtopic_name, erica_email))
article_created_row = check_rows.pop()
# The "In Hunting For Food" article was created by erica@example.com.
self.assertEqual(article_created_row.find('p').text.strip(), u'The "{}" article was created by {}.'.format(article_name, erica_email))
article_edited_row = check_rows.pop()
# The "In Hunting For Food" article was edited by erica@example.com.
self.assertEqual(article_edited_row.find('p').text.strip(), u'The "{}" article was edited by {}.'.format(article_name, erica_email))
comment_row = check_rows.pop()
self.assertEqual(comment_row.find('div', class_='comment__author').text, erica_email)
self.assertEqual(comment_row.find('div', class_='comment__body').text, comment_body)
feedback_requested_row = check_rows.pop()
# erica@example.com requested feedback on this activity.
self.assertEqual(feedback_requested_row.find('p').text.strip(), u'{} {}'.format(erica_email, repo_functions.ACTIVITY_FEEDBACK_MESSAGE))
endorsed_row = check_rows.pop()
# frances@example.com endorsed this activity.
self.assertEqual(endorsed_row.find('p').text.strip(), u'{} {}'.format(frances_email, repo_functions.ACTIVITY_ENDORSED_MESSAGE))
published_row = check_rows.pop()
# frances@example.com published this activity.
self.assertEqual(published_row.find('p').text.strip(), u'{} {}'.format(frances_email, repo_functions.ACTIVITY_PUBLISHED_MESSAGE))
# in TestProcess
def test_published_activities_dont_mix_histories(self):
''' The histories of two published activities that were worked on simultaneously don't leak into each other.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
frances_email = u'frances@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(email=erica_email)
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in(email=frances_email)
# Erica starts two new tasks
erica.open_link(constants.ROUTE_ACTIVITY)
first_activity_description = u'Use Gestures To Coordinate Hunts'
first_branch_name = erica.quick_activity_setup(first_activity_description)
first_edit_path = erica.path
erica.open_link(constants.ROUTE_ACTIVITY)
second_activity_description = u'Come To The Coral Trout\'s Aid When Signalled'
second_branch_name = erica.quick_activity_setup(second_activity_description)
second_edit_path = erica.path
# Erica creates a new topic in the two tasks
erica.open_link(first_edit_path)
first_topic_name = u'Plectropomus Leopardus'
erica.add_category(category_name=first_topic_name)
erica.open_link(second_edit_path)
second_topic_name = u'Cheilinus Undulatus'
erica.add_category(category_name=second_topic_name)
# Erica leaves comments on the two tasks and requests feedback
erica.open_link(url='/tree/{}/'.format(first_branch_name))
first_comment_body = u'Testing their interactions with Napolean wrasse decoys.'
erica.leave_feedback(comment_text=first_comment_body)
# Request feedback
erica.request_feedback()
erica.open_link(url='/tree/{}/'.format(second_branch_name))
second_comment_body = u'The "good" wrasse would come to the trout\'s aid when signalled, whereas the "bad" one would swim in the opposite direction.'
erica.leave_feedback(comment_text=second_comment_body)
# Request feedback
erica.request_feedback()
#
# Switch users and publish the activities.
#
frances.open_link(url='/tree/{}/'.format(first_branch_name))
frances.approve_activity()
frances.publish_activity()
frances.open_link(url='/tree/{}/'.format(second_branch_name))
frances.approve_activity()
frances.publish_activity()
#
# Switch users and check the first overview page.
#
erica.open_link(url='/tree/{}/'.format(first_branch_name))
# there is a summary
summary_div = erica.soup.find("div", {"data-test-id": "summary-div"})
self.assertIsNotNone(summary_div)
# it's right about what's changed
self.assertIsNotNone(summary_div.find(lambda tag: bool(tag.name == 'p' and '1 topic has been changed' in tag.text)))
# grab all the list items and make sure they match what we did above
check_rows = summary_div.find_all('li')
# the link to create a new change
change_row = check_rows.pop()
self.assertIsNotNone(change_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(change_row.find("a", {"data-test-id": "change-link"}).text, constants.TEXT_ADD_CHANGE)
# the topic creation
category_row = check_rows.pop()
self.assertIsNotNone(category_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(category_row.find('h3', {"data-test-id": "change-title"}).text, first_topic_name)
self.assertEqual(category_row.find('div', {"data-test-id": "change-display-type"}).text, constants.LAYOUT_DISPLAY_LOOKUP[constants.CATEGORY_LAYOUT].title())
self.assertEqual(category_row.find('p', {"data-test-id": "change-actions"}).text, u'Created')
# no rows left
self.assertEqual(len(check_rows), 0)
# also check the full history
history_div = erica.soup.find("div", class_="activity-log")
check_rows = history_div.find_all('div', class_='activity-log-item')
self.assertEqual(len(check_rows), 6)
# The "Use Gestures To Coordinate Hunts" activity was started by erica@example.com.
self.assertEqual(check_rows.pop().find('p').text.strip(), u'The "{}" {} by {}.'.format(first_activity_description, repo_functions.ACTIVITY_CREATED_MESSAGE, erica_email))
# The "Plectropomus Leopardus" topic was created by erica@example.com.
self.assertEqual(check_rows.pop().find('p').text.strip(), u'The "{}" topic was created by {}.'.format(first_topic_name, erica_email))
# Testing their interactions with Napolean wrasse decoys.
self.assertEqual(check_rows.pop().find('div', class_='comment__body').text, first_comment_body)
# erica@example.com requested feedback on this activity.
self.assertEqual(check_rows.pop().find('p').text.strip(), u'{} {}'.format(erica_email, repo_functions.ACTIVITY_FEEDBACK_MESSAGE))
# frances@example.com endorsed this activity.
self.assertEqual(check_rows.pop().find('p').text.strip(), u'{} {}'.format(frances_email, repo_functions.ACTIVITY_ENDORSED_MESSAGE))
# frances@example.com published this activity.
self.assertEqual(check_rows.pop().find('p').text.strip(), u'{} {}'.format(frances_email, repo_functions.ACTIVITY_PUBLISHED_MESSAGE))
#
# Check the second overview page.
#
erica.open_link(url='/tree/{}/'.format(second_branch_name))
# there is a summary
summary_div = erica.soup.find("div", {"data-test-id": "summary-div"})
self.assertIsNotNone(summary_div)
# it's right about what's changed
self.assertIsNotNone(summary_div.find(lambda tag: bool(tag.name == 'p' and '1 topic has been changed' in tag.text)))
# grab all the list items and make sure they match what we did above
check_rows = summary_div.find_all('li')
# the link to create a new change
change_row = check_rows.pop()
self.assertIsNotNone(change_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(change_row.find("a", {"data-test-id": "change-link"}).text, constants.TEXT_ADD_CHANGE)
# the topic creation
category_row = check_rows.pop()
self.assertIsNotNone(category_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(category_row.find('h3', {"data-test-id": "change-title"}).text, second_topic_name)
self.assertEqual(category_row.find('div', {"data-test-id": "change-display-type"}).text, constants.LAYOUT_DISPLAY_LOOKUP[constants.CATEGORY_LAYOUT].title())
self.assertEqual(category_row.find('p', {"data-test-id": "change-actions"}).text, u'Created')
# no rows left
self.assertEqual(len(check_rows), 0)
# also check the full history
history_div = erica.soup.find("div", class_="activity-log")
check_rows = history_div.find_all('div', class_='activity-log-item')
self.assertEqual(len(check_rows), 6)
# The "Use Gestures To Coordinate Hunts" activity was started by erica@example.com.
self.assertEqual(check_rows.pop().find('p').text.strip(), u'The "{}" {} by {}.'.format(second_activity_description, repo_functions.ACTIVITY_CREATED_MESSAGE, erica_email))
# The "Plectropomus Leopardus" topic was created by erica@example.com.
self.assertEqual(check_rows.pop().find('p').text.strip(), u'The "{}" topic was created by {}.'.format(second_topic_name, erica_email))
# Testing their interactions with Napolean wrasse decoys.
self.assertEqual(check_rows.pop().find('div', class_='comment__body').text, second_comment_body)
# erica@example.com requested feedback on this activity.
self.assertEqual(check_rows.pop().find('p').text.strip(), u'{} {}'.format(erica_email, repo_functions.ACTIVITY_FEEDBACK_MESSAGE))
# frances@example.com endorsed this activity.
self.assertEqual(check_rows.pop().find('p').text.strip(), u'{} {}'.format(frances_email, repo_functions.ACTIVITY_ENDORSED_MESSAGE))
# frances@example.com published this activity.
self.assertEqual(check_rows.pop().find('p').text.strip(), u'{} {}'.format(frances_email, repo_functions.ACTIVITY_PUBLISHED_MESSAGE))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
"""
The following is a summary of the licenses involved in this project.
Please also refer to the LICENSE folder in this github repository
for full licensing information.
LICENSE SUMMARY:
------------------------------------------
The MIT License (MIT)
applies to:
- qtSubAndPlot class, Copyright (c) 2016 Scott W Harden
- ui_main, Copyright (c) 2016 Scott W Harden
- PyQtGraph, Copyright (c) 2012 University of North Carolina at Chapel Hill
------------------------------------------
BSD License
applies to:
- NumPy, Copyright (c) 2005-2018, NumPy Developers.
- rospy, Copyright (c) 2008, Willow Garage, Inc.
------------------------------------------
GNU GPL License
applies to:
- PyQt4, Copyright (C) 2011 Riverbank Computing Limited
Note: Redistribution possible under compatible licenses
(see https://www.gnu.org/licenses/license-list.en.html)
------------------------------------------
"""
import rospy
from PyQt4 import QtGui,QtCore
import sys
import ui_main
import numpy as np
import pyqtgraph
from audio_proc.msg import FFTData
class qtSubAndPlot(QtGui.QMainWindow, ui_main.Ui_MainWindow):
def __init__(self, parent=None):
rospy.init_node('plotter')
pyqtgraph.setConfigOption('background', 'w') #before loading widget
super(qtSubAndPlot,self).__init__(parent)
self.setupUi(self)
self.grFFT.plotItem.showGrid(True, True, 0.5)
self.grPCM.plotItem.showGrid(True, True, 0.5)
self.maxFFT = 0
self.maxPCM = 0
self.chunk = 0
self.fft = None
self.audiowave = None
self.freqs = None
self.sample_rate=rospy.get_param("~sample_rate", 48000)
rospy.Subscriber("fftData_throttle", FFTData, self.monitoringCallback)
self.connect(self, QtCore.SIGNAL("changeUI(PyQt_PyObject)"),
self.updatePlot)
def updatePlot(self, msg):
"""
updates the plot everytime it receives a signal from the subscriber
"""
self.fft = np.asarray(msg.fft,dtype=np.float32)
self.freqs = np.asarray(msg.freqs,dtype=np.float32)
self.audiowave = np.asarray(msg.wavedata,dtype=np.int32)
self.chunk = len(self.audiowave)
if not self.audiowave is None and not self.fft is None:
pcmMax=np.max(np.abs(self.audiowave))
if pcmMax>self.maxPCM:
self.maxPCM=pcmMax
self.grPCM.plotItem.setRange(yRange=[-pcmMax,pcmMax])
if np.max(self.fft)>self.maxFFT:
self.maxFFT=np.max(np.abs(self.fft))
self.grFFT.plotItem.setRange(yRange=[0,1])
self.pbLevel.setValue(1000*pcmMax/self.maxPCM)
pen=pyqtgraph.mkPen(color='b')
self.grPCM.plot(np.arange(self.chunk)/float(self.sample_rate), self.audiowave,pen=pen,clear=True)
pen=pyqtgraph.mkPen(color='r')
self.grFFT.plot(self.freqs,self.fft/self.maxFFT,pen=pen,clear=True)
def monitoringCallback(self,msg):
self.emit(QtCore.SIGNAL("changeUI(PyQt_PyObject)"),msg)
if __name__=="__main__":
app = QtGui.QApplication(sys.argv)
form = qtSubAndPlot()
form.show()
app.exec_()
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Create a new query in Kolide"
class Input:
DESCRIPTION = "description"
NAME = "name"
QUERY = "query"
class Output:
RESULTS = "results"
class CreateQueryInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"description": {
"type": "string",
"title": "Description",
"description": "Description of query",
"order": 2
},
"name": {
"type": "string",
"title": "Name",
"description": "Name of query",
"order": 1
},
"query": {
"type": "string",
"title": "Query",
"description": "Query to run on fleet",
"order": 3
}
},
"required": [
"description",
"name",
"query"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class CreateQueryOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"results": {
"type": "object",
"title": "Results",
"description": "Results from creating a new query",
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
# put your python code here
string = input().replace(" ",'')
print(round(string.count("A")/len(string), 2)) |
"""
_ _
| (_)
___ ___ __| |_ __ _ _ __ __ _ ___
/ __/ __| / _` | |/ _` | '_ \ / _` |/ _ \\
| (_| (__ | (_| | | (_| | | | | (_| | (_) |
\___\___| \__,_| |\__,_|_| |_|\__, |\___/
______ _/ | __/ |
|______||__/ |___/
"""
import re
import subprocess
class ColorSchema(object):
"""
基于 shell 的配色方案
"""
TERMINATOR = "\033[0m" # 统一终止符
FONT_RED = "\033[31m"
FONT_GREEN = "\033[32m"
FONT_YELLOW = "\033[33m"
FONT_BLUE = "\033[34m"
FONT_VIOLET = "\033[35m"
FONT_SKY_BLUE = "\033[36m"
BG_RED = "\033[41m"
BG_GREEN = "\033[42m"
BG_YELLOW = "\033[43m"
BG_BLUE = "\033[44m"
BG_VIOLET = "\033[45m"
BG_SKY_BLUE = "\033[46m"
def end(self, message):
print(self.BG_VIOLET + message + self.TERMINATOR)
def info(self, message):
message = "[Info] " + message
print(self.FONT_SKY_BLUE + message + self.TERMINATOR)
def warning(self, message):
message = "[🔔️] " + message
print(self.FONT_YELLOW + message + self.TERMINATOR)
def error(self, message):
message = "[🆘] " + message
print(self.FONT_RED + message + self.TERMINATOR)
def success(self, message):
message = "[✅] " + message
print(self.FONT_GREEN + message + self.TERMINATOR)
def title(self, message):
message = "[🚀] " + message
print(self.BG_GREEN + message + self.TERMINATOR)
class MessageBlock(ColorSchema):
"""
屏幕输出的信息块,封装统一样式
"""
TITLE = None
START = None
END = None
def __init__(self):
if self.TITLE:
self.title(message=self.TITLE)
if self.START:
self.info(message=self.START)
self.action()
if self.END:
self.end(message=self.END)
def action(self):
pass
@staticmethod
def decode_output(output):
"""
decode check_output from byte to utf-8
@param output: subprocess check_output result
"""
return output.decode('utf-8').strip().strip('\n')
class Welcome(MessageBlock):
TITLE = "Hi Man, Glad to see you here, Welcome to Star!"
START = ">>> https://github.com/pyfs/cc_django.git <<<"
END = "----------------------------------------------------------"
def action(self):
print(__doc__)
class WellDone(MessageBlock):
END = "Congratulations, Well Done Once Again ⛽️⛽️⛽️ \nlast operation execute: make install."
class PreGenError(Exception):
pass
class CheckProjectName(MessageBlock):
TITLE = "检测项目名称是否合规"
reg = r'^[_a-zA-Z][_a-zA-Z0-9]+$'
project_name = '{{ cookiecutter.PROJECT_NAME }}'
def action(self):
result = re.match(self.reg, self.project_name)
if not result:
raise PreGenError(f'check {self.project_name} error')
class PipInstallRequirements(MessageBlock):
TITLE = "安装项目 python 依赖包"
REQUIREMENTS = {
'DEFAULT': {
'input': 'Y',
'pkg': [
'django>3,<4',
'wrapt',
'Pillow',
'django-model-utils',
'psycopg2-binary',
'uWSGI',
],
},
'DRF': {
'input': '{{cookiecutter.DRF}}',
'pkg': [
'django-filter',
'django-extensions',
'drf-extensions',
'djangorestframework',
'djangorestframework-jwt'
]
},
'GRAPPELLI': {
'input': '{{cookiecutter.CELERY}}',
'pkg': [
'django-grappelli',
'django-filebrowser',
'feedparser'
]
},
'CELERY': {
'input': '{{cookiecutter.CELERY}}',
'pkg': [
'django-celery-beat',
'celery',
'amqp',
],
}
}
def action(self):
requirements = []
for key, item in self.REQUIREMENTS.items():
if item['input'].strip().lower() == 'y':
requirements += item['pkg']
self.warning('[!] installing ...')
error = subprocess.call(['pip', 'install'] + requirements)
if error:
raise PreGenError('install pkg error')
class PipFreezeRequirements(MessageBlock):
TITLE = "更新项目依赖到 requirements.txt 文件"
def action(self):
file_name = 'requirements.txt'
with open(file_name, "w") as f:
error = subprocess.call(["pip", "freeze"], stdout=f)
if error:
raise PreGenError(f'create {file_name} error')
class PreGenProjectHooks(object):
PIPELINE = [
'Welcome',
'CheckProjectName',
'PipInstallRequirements',
'PipFreezeRequirements',
'WellDone'
]
def __call__(self):
for cls_name in self.PIPELINE:
try:
eval(cls_name)()
except PreGenError as e:
print(e)
break
if __name__ == '__main__':
PreGenProjectHooks()()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Collection of tests for :mod:`orion.core.worker.consumer`."""
import datetime
import time
import pytest
import orion.core.io.experiment_builder as experiment_builder
from orion.core.utils.format_trials import tuple_to_trial
from orion.core.worker.trial_pacemaker import TrialPacemaker
from orion.storage.base import get_storage
@pytest.fixture
def config(exp_config):
"""Return a configuration."""
config = exp_config[0][0]
config["space"] = {"x": "uniform(-50, 50)"}
config["name"] = "exp"
return config
@pytest.fixture
def exp(config):
"""Return an Experiment."""
return experiment_builder.build(**config)
@pytest.fixture
def trial(exp):
"""Return a Trial which is registered in DB."""
trial = tuple_to_trial((1.0,), exp.space)
heartbeat = datetime.datetime.utcnow()
trial.experiment = exp.id
trial.status = "reserved"
trial.heartbeat = heartbeat
get_storage().register_trial(trial)
return trial
@pytest.mark.usefixtures("storage")
def test_trial_update_heartbeat(exp, trial):
"""Test that the heartbeat of a trial has been updated."""
trial_monitor = TrialPacemaker(trial, wait_time=1)
trial_monitor.start()
time.sleep(2)
trials = exp.fetch_trials_by_status("reserved")
assert trial.heartbeat != trials[0].heartbeat
heartbeat = trials[0].heartbeat
time.sleep(2)
trials = exp.fetch_trials_by_status(status="reserved")
assert heartbeat != trials[0].heartbeat
trial_monitor.stop()
@pytest.mark.usefixtures("storage")
def test_trial_heartbeat_not_updated(exp, trial):
"""Test that the heartbeat of a trial is not updated when trial is not longer reserved."""
trial_monitor = TrialPacemaker(trial, wait_time=1)
trial_monitor.start()
time.sleep(2)
trials = exp.fetch_trials_by_status("reserved")
assert trial.heartbeat != trials[0].heartbeat
get_storage().set_trial_status(trial, status="interrupted")
time.sleep(2)
# `join` blocks until all thread have finish executing. So, the test will hang if it fails.
trial_monitor.join()
assert 1
@pytest.mark.usefixtures("storage")
def test_trial_heartbeat_not_updated_inbetween(exp, trial):
"""Test that the heartbeat of a trial is not updated before wait time."""
trial_monitor = TrialPacemaker(trial, wait_time=5)
trial_monitor.start()
time.sleep(1)
trials = exp.fetch_trials_by_status("reserved")
assert trial.heartbeat.replace(microsecond=0) == trials[0].heartbeat.replace(
microsecond=0
)
heartbeat = trials[0].heartbeat
time.sleep(6)
trials = exp.fetch_trials_by_status(status="reserved")
assert heartbeat != trials[0].heartbeat
trial_monitor.stop()
|
import numpy as np
from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
from collections import namedtuple
SatelliteMeasurement = namedtuple('SatelliteMeasurement',
['position', 'distant', 'weight'])
# Change satellite position here!
def generate_data():
"""
Generate simulation data
"""
receiver_position = np.array([-10, 0.])
satelite_positions = [
np.array([20, 20]),
np.array([30, -30]),
np.array([-40, 0]),
]
# satelite_positions = [
# np.array([-2, 30]),
# np.array([2, 30]),
# ]
distance_noisy_std = 0.1
measurements = [
SatelliteMeasurement(
position=pos,
distant=np.linalg.norm(receiver_position - pos +
np.random.normal(0, distance_noisy_std, 1)),
weight=1) for pos in satelite_positions
]
return measurements
def plot_hessian_as_covarinace_2d(ax, xy, hessian, satelite_measurements):
"""
plot 2d hessian as cov
https://stackoverflow.com/questions/20126061/creating-a-confidence-ellipses-in-a-sccatterplot-using-matplotlib
you made a mistake,
angle=np.rad2deg(np.arctan2(v[0, 1], v[-1, 0]))
not
angle=np.rad2deg(np.arccos(v[0, 0])))
"""
x, y = xy
cov = np.linalg.inv(hessian)
value, v = np.linalg.eig(cov)
value = np.sqrt(value)
for j in range(1, 4):
SCALE = 3
ell = Ellipse(xy=(np.mean(x), np.mean(y)),
width=value[0] * j * SCALE,
height=value[1] * j * SCALE,
angle=np.rad2deg(np.arctan2(v[0, 1], v[1, 0])),
facecolor='none',
edgecolor='red')
ax.add_artist(ell)
info_string = 'satellite position: '
for s in satelite_measurements:
plt.scatter(*s.position)
info_string += str(s.position) + ', '
ax.set_xlim(-50, 50)
ax.set_ylim(-50, 50)
plt.title(info_string)
plt.scatter(x, y)
class GPS:
"""
want:
minimize_xy = sum_i ||h_i(xy) - dist_i||^2
where h_i(xy) = dist(xy, satelite_i)
"""
def __init__(self, satelite_measurements):
# states
self.variables_xy = np.array([0., 0.])
# data
self.satelite_measurements = satelite_measurements
# config
self.max_iteration = 5
def least_squares(self):
"""
The nonlinear least squares iteration
"""
for iteration in range(self.max_iteration):
cost = self.compute_cost(self.satelite_measurements)
jacobian = self.compute_jacobian(self.satelite_measurements)
b = self.compute_b(self.satelite_measurements)
W = self.compute_weights(self.satelite_measurements)
delta = self.solve_normal_equation(jacobian, b, W)
self.variables_xy += delta
print('cost:', cost, ' position xy:', self.variables_xy)
def h_function(self, satelite_measurement):
"""
The distance observation function
"""
diff = self.variables_xy - satelite_measurement.position
dist = np.linalg.norm(diff)
return dist
def residual(self, satelite_measurement):
"""
The residual function for GPS
"""
return self.h_function(
satelite_measurement) - satelite_measurement.distant
def compute_cost(self, satelite_measurements):
"""
The cost function for GPS
"""
cost = 0.
for s in satelite_measurements:
r = self.residual(s)
cost += r * s.weight * r
cost /= len(satelite_measurements)
return cost
def compute_jacobian(self, satelite_measurements):
"""
Compute jacobian of residual function analytically
"""
num_residuals = len(satelite_measurements)
jacobian = np.zeros([num_residuals, 2])
for i, s in enumerate(satelite_measurements):
f = self.variables_xy - s.position
jacobian[i, :] = f / np.linalg.norm(f)
np.testing.assert_allclose(
jacobian[i, :],
self.gradient_checking_simple(satelite_measurements[i]), 1e-4)
return jacobian
def compute_weights(self, satelite_measurements):
"""
Format the weight into a block-diagonal matrix
"""
W_diag = [s.weight for s in satelite_measurements]
W = np.diag(W_diag)
return W
def gradient_checking_simple(self, satelite_measurement):
"""
Gradient checking
"""
x_orig = np.copy(self.variables_xy)
delta = 1e-6
self.variables_xy = np.copy(x_orig)
self.variables_xy += np.array([delta, 0])
r_plus = self.residual(satelite_measurement)
self.variables_xy = np.copy(x_orig)
self.variables_xy += np.array([-delta, 0])
r_minus = self.residual(satelite_measurement)
grad_x = (r_plus - r_minus) / (2 * delta)
self.variables_xy = np.copy(x_orig)
self.variables_xy += np.array([0, delta])
r_plus = self.residual(satelite_measurement)
self.variables_xy = np.copy(x_orig)
self.variables_xy += np.array([0, -delta])
r_minus = self.residual(satelite_measurement)
grad_y = (r_plus - r_minus) / (2 * delta)
self.variables_xy = np.copy(x_orig)
return [grad_x, grad_y]
def compute_b(self, satelite_measurements):
"""
residual function evaluated at current variables
"""
num_residuals = len(satelite_measurements)
b = np.zeros([num_residuals, 1])
for i, s in enumerate(satelite_measurements):
b[i, :] = self.residual(s)
return b
def solve_normal_equation(self, jacobian, b, W):
"""
J^T J x = J^T b
"""
lhs = jacobian.T @ W @ jacobian
rhs = -jacobian.T @ W @ b
delta = np.linalg.solve(lhs, rhs)
delta = delta.flatten()
self.hessian = lhs
return delta
def plot_cost(self):
"""
Plot the cost field
"""
dx = dy = 0.5
Y, X = np.mgrid[slice(-50, 50 + dy, dy), slice(-50, 50 + dx, dx)]
costs = np.zeros_like(X)
cols, rows = X.shape
for col in range(cols):
for row in range(rows):
x = X[col, row]
y = Y[col, row]
self.variables_xy = np.array([x, y])
costs[col, row] = self.compute_cost(self.satelite_measurements)
im = plt.pcolormesh(X, Y, costs)
plt.colorbar(im)
plt.title('cost field')
plt.show()
def main():
satelite_measurements = generate_data()
gps = GPS(satelite_measurements)
gps.least_squares()
ax = plt.subplot(1, 2, 1, aspect='equal')
plot_hessian_as_covarinace_2d(ax, gps.variables_xy, gps.hessian,
gps.satelite_measurements)
plt.subplot(1, 2, 2, aspect='equal')
gps.plot_cost()
plt.show()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from django.http import JsonResponse
from datetime import datetime as dt
import numpy as np
from summarizer import Article, ParagraphSet, Sentence, summary_text
from summarizer.lib_detectlang import detect_lang
def parse_api(request):
lang = detect_lang(request.POST['text'])
sections = summary_text[lang[:2]](request.POST['text'], request.POST['title'])
ret = {
'summary': [unicode(sec).replace('\n', '<br>') for sec in sections],
'keyword': ['<br>'.join(sec.keywords[:3]) for sec in sections],
}
return JsonResponse(ret)
|
from typing import List
from .records import AccountOperationRecord, ACCOUNT_OPERATION_TYPE
from ._html_email_scrapers import banorte_email_scraper
EMAIL_TYPE = 'BANCO_EN_LINEA_BLOCKED_USER_EMAIL'
def is_matching(html: str) -> bool:
return 'Bloqueo de usuario' in html
@banorte_email_scraper
def scrape(fields: List[str]) -> AccountOperationRecord:
return AccountOperationRecord(
source=EMAIL_TYPE,
type=ACCOUNT_OPERATION_TYPE,
note=f'{fields[14]} | Token: {fields[10]}',
operation_date=f'{fields[6]} {fields[8].strip()}',
)
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import mock
import tempfile
import unittest
import os
from py_utils import cloud_storage # pylint: disable=import-error
from telemetry.testing import fakes
from telemetry.internal.util import file_handle
from telemetry.util import image_util
from telemetry.util import screenshot
class ScreenshotUtilTests(unittest.TestCase):
def setUp(self):
self.options = fakes.CreateBrowserFinderOptions()
def testScreenShotTakenSupportedPlatform(self):
fake_platform = self.options.fake_possible_browser.returned_browser.platform
expected_png_base64 = """
iVBORw0KGgoAAAANSUhEUgAAAAIAAAACCAIAAAD91
JpzAAAAFklEQVR4Xg3EAQ0AAABAMP1LY3YI7l8l6A
T8tgwbJAAAAABJRU5ErkJggg==
"""
fake_platform.screenshot_png_data = expected_png_base64
fh = screenshot.TryCaptureScreenShot(fake_platform, None)
screenshot_file_path = fh.GetAbsPath()
try:
actual_screenshot_img = image_util.FromPngFile(screenshot_file_path)
self.assertTrue(
image_util.AreEqual(
image_util.FromBase64Png(expected_png_base64),
actual_screenshot_img))
finally: # Must clean up screenshot file if exists.
os.remove(screenshot_file_path)
def testUploadScreenshotToCloudStorage(self):
tf = tempfile.NamedTemporaryFile(
suffix='.png', delete=False)
fh1 = file_handle.FromTempFile(tf)
local_path = '123456abcdefg.png'
with mock.patch('py_utils.cloud_storage.Insert') as mock_insert:
with mock.patch(
'telemetry.util.screenshot._GenerateRemotePath',
return_value=local_path):
url = screenshot._UploadScreenShotToCloudStorage(fh1)
mock_insert.assert_called_with(
cloud_storage.TELEMETRY_OUTPUT,
local_path,
fh1.GetAbsPath())
self.assertTrue(url is not None)
|
#print('Veja quantos dolares você pode comprar.')
#var1 = int(input('Informe o valor: '))
#var2 = var1+5.23
#print('Com {} você poderá comprar {} dólares.'.format(var1, var2))
#Desafio proposto na aula 07 do curso de python.
# crie um programa que leia quanto dinheiro uma pessoa tem na carteira e mostre quanto dolares ela pode comprar.
#não consegui fazer esse exercicio
#RESOLUÇÃO DO DESAFIO 6
real = float(input('Quanto dinheiro você tem na carteira? R$: '))
dolar = real / 5.25
euro = real / 5.94
print('Dolar hoje: US$ 5,25')
print('Euro hoje: € 5.94')
print('Com R${:.2f} você pode comprar US${:.2f}'.format(real, dolar))
print('Com R${:.2f} você pode comprar €{:.2f}'.format(real, euro)) |
from .artdeco import artdeco
from .autumn import autumn
from .glass import glass
from .metal import metal
from .neon import neon
from .rococo import rococo
from .santafe import santafe
from .sheen import sheen
from .silky import silky
from .spring import spring
from .summer import summer
from .tropical import tropical
from .winter import winter
|
import rosbag
import rospy
from arni_msgs.msg import RatedStatistics, RatedStatisticsEntity
from arni_core.helper import SEUID_DELIMITER
def create_simple_bag():
bag = rosbag.Bag('bags/john.bag', 'w')
msg = RatedStatistics()
msg.seuid = "n" + SEUID_DELIMITER + "node1"
msg.host = "host1"
now = rospy.Time.now()
msg.window_start = now - rospy.Duration(5)
msg.window_stop = now
msg.rated_statistics_entity = [create_statistic_entity()]
try:
bag.write('/statistics_rated', msg)
finally:
bag.close()
def create_statistic_entity():
msg = RatedStatisticsEntity()
msg.statistic_type = "cpu_usage_mean"
msg.actual_value = ["56.6"]
msg.expected_value = ["0 - 50"]
msg.state = [0]
return msg
rospy.init_node("temp_node")
create_simple_bag()
|
# Generated by Django 3.1 on 2022-01-18 20:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gardens', '0007_auto_20211231_2258'),
('crops', '0003_auto_20211205_1004'),
]
operations = [
migrations.AddField(
model_name='crop',
name='gardens',
field=models.ManyToManyField(to='gardens.Garden'),
),
]
|
import mongoengine as me
from flask_wtf import FlaskForm
import wtforms as wf
from wtforms.validators import DataRequired
from flask_login import UserMixin
class LoginForm(FlaskForm):
username = wf.StringField(
label="name", validators=[DataRequired("Username is required")]
)
password = wf.StringField(
label="password", validators=[DataRequired("Password is required")]
)
class RegisterForm(FlaskForm):
username = wf.StringField(
label="name", validators=[DataRequired("Username is required")]
)
password = wf.StringField(
label="password", validators=[DataRequired("Password is required")]
)
publicKey = wf.StringField(
label="public_key", validators=[DataRequired("Public key is required")]
)
class TokenBlocklist(me.Document):
jti = me.StringField()
created_at = me.DateTimeField()
meta = {"collections": "tokens"} |
import os
import json
import logging
from birdy.twitter import UserClient
logging.basicConfig(filename='tweetme.log', format='%(asctime)s %(message)s', level=logging.DEBUG)
def get_config_from_file(filename="config.json"):
"""
This function will check for the config.json file which holds the Twitter API
Tokens and Keys and will also give a user friendly message if they are
invalid. New file is created if not present in the project directory.
Returns False: if config.json is missing of has invalid configuration
Returns tuple (containing configurations): if config.json is present with
valid configuration
"""
if filename not in os.listdir():
with open(filename, mode='w') as f:
json.dump({
'consumer_key': 0,
'consumer_secret': 0,
'access_token': 0,
'access_token_secret': 0,
}, f)
return False
else:
with open(filename, mode='r') as f:
config = json.loads(f.read())
if 0 not in config.values():
return (
config["consumer_key"],
config["consumer_secret"],
config["access_token"],
config["access_token_secret"],
)
else:
return False
def get_next_tweet_from_file(tweets_file='tweets.txt', turn_file='next_tweet_index.txt'):
"""
This function reads Tweets file and Turn file and gets the next tweet.
Returns False: if tweets.txt is not present
Returns next Tweet: if valid tweets.txt is present
"""
if tweets_file not in os.listdir():
"""When tweets.txt is not present"""
with open(tweets_file, mode='w') as f:
f.write('Tweet :: URL\n')
with open(turn_file, mode='w') as f:
f.write('0')
return False
elif turn_file not in os.listdir():
"""When next_tweet_index.txt is not present, creates a new next_tweet_index.txt and writes 1 in it
and return the first tweet from tweets.txt"""
with open(turn_file, mode='w') as f:
f.write('1')
with open(tweets_file, mode='r') as f:
tweet_text = f.readline()
return tweet_text.split("::")
else:
"""When both files are present, check next_tweet_index.txt and use it's value as index to
find the next tweet from tweets.txt and write index + 1 in next_tweet_index.txt"""
with open(turn_file, mode='r') as f:
turn = int(f.readline())
with open(tweets_file, mode='r') as f:
tweets = f.readlines()
if len(tweets) <= turn:
turn = 0
with open(turn_file, mode='w') as f:
f.write(str(turn + 1))
return tweets[turn].split("::")
def manage_twitter_client():
"""
This function will create twitter client using configurations and send tweet.
"""
configError = (
"Please open config.json file located in the project directory and"
"replace the value '0' of all the tokens and keys in order to make "
"this bot work. Visit https://apps.twitter.com/ in order to get your "
"tokens and keys."
)
keys = get_config_from_file()
if not keys:
logging.error(configError)
else:
tweet = get_next_tweet_from_file()
if tweet:
client = UserClient(*keys)
response = client.api.statuses.update.post(status='{} {}'.format(tweet[0], tweet[1]))
logging.info(
'You tweet is out in the world.'
'Check it out https://twitter.com/{}/status/{}'.format(
response.data["user"]["screen_name"],
response.data["id_str"]
)
)
if __name__ == '__main__':
manage_twitter_client()
|
from .base_model import QueryableBaseModel
from ...unchained import unchained, injectable
class AntiPatternBaseModel(QueryableBaseModel):
@unchained.inject('db')
def __init__(self, db=injectable, **kwargs):
self.db = db
super().__init__(**kwargs)
def save(self, commit=False):
self.db.session.add(self)
if commit:
self.db.session.commit()
def update(self, commit=False, partial_validation=True, **kwargs):
super().update(partial_validation=partial_validation, **kwargs)
self.save(commit=commit)
|
from django.test import TestCase
class AnalyzerTestCase(TestCase):
pass
|
"""
Layer Utils for Sauvola Document Binarization
"""
import os
from tensorflow.keras import backend as K
from tensorflow.keras.constraints import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.losses import *
from tensorflow.keras.initializers import *
from tensorflow.keras.metrics import *
from tensorflow.keras.callbacks import *
from tensorflow.keras.layers import *
import numpy as np
import tensorflow as tf
from absl import logging
################################################################################
# Keras Layers
################################################################################
class SauvolaMultiWindow(Layer):
"""
MultiWindow Sauvola Keras Layer
1. Instead of doing Sauvola threshold computation for one window size,
we do this computation for a list of window sizes.
2. To speed up the computation over large window sizes,
we implement the integral feature to compute at O(1).
3. Sauvola parameters, namely, k and R, can be selected to be
trainable or not. Detailed meaning of k and R, please refer
https://scikit-image.org/docs/stable/api/skimage.filters.html#skimage.filters.threshold_sauvola
4. Default R value is made w.r.t. normalized image of range (0, 1)
"""
def __init__(self,
window_size_list=[7,15,31,63,127],
init_k=0.2,
init_R=0.5,
train_k=False,
train_R=False,
SMWConf=None,
**kwargs):
if SMWConf is None:
self.window_size_list = window_size_list
self.init_k = init_k
self.init_R = init_R
self.train_k = train_k
self.train_R = train_R
else:
self.window_size_list = SMWConf['window_size_list']
self.init_k = SMWConf['init_k']
self.init_R = SMWConf['init_R']
self.train_k = SMWConf['train_k']
self.train_R = SMWConf['train_R']
self.n_wins = len(self.window_size_list)
super(SauvolaMultiWindow, self).__init__(**kwargs)
def get_config(self) :
base_config = super().get_config()
config = {"window_size_list": self.window_size_list,
'init_k': self.init_k,
'init_R': self.init_R,
'train_k': self.train_k,
'train_R': self.train_R,
}
return dict(list(base_config.items()) + list(config.items()))
def _initialize_ii_buffer( self, x ) :
"""Compute integeral image
"""
x_pad = K.spatial_2d_padding( x, ((self.max_wh//2+1,self.max_wh//2+1), (self.max_ww//2+1,self.max_ww//2+1)) )
ii_x = K.cumsum( x_pad, axis=1 )
ii_x2 = K.cumsum( ii_x, axis=2 )
return ii_x2
def _get_max_size( self ) :
"""Compute the max size of all windows
"""
mh, mw = 0, 0
for hw in self.window_size_list :
if ( isinstance( hw, int ) ) :
h = w = hw
else :
h, w = hw[:2]
mh = max( h, mh )
mw = max( w, mw )
return mh, mw
def build(self, input_shape):
self.num_woi = len( self.window_size_list )
self.count_ii = None
self.lut = dict()
self.built = True
self.max_wh, self.max_ww = self._get_max_size()
self.k = self.add_weight(name='Sauvola_k',
shape=(1,self.num_woi,1,1,1),
dtype='float32',
initializer='ones',
trainable=self.train_k,
constraint=NonNeg(),
)
self.R = self.add_weight(name='Sauvola_R',
shape=(1,self.num_woi,1,1,1),
dtype='float32',
initializer='ones',
trainable=self.train_R,
constraint=NonNeg(),
)
w_list = [np.ones([1,self.num_woi,1,1,1], dtype='float32') * self.init_k,
np.ones([1,self.num_woi,1,1,1], dtype='float32') * self.init_R]
self.set_weights(w_list) # <- important to set initial values
return
def _compute_for_one_size( self, x, x_ii, height, width ) :
# 1. compute valid counts for this key
top = self.max_wh//2 - height//2
bot = top + height
left = self.max_ww//2 - width //2
right = left + width
Ay, Ax = (top, left) #self.max_wh, self.max_ww
By, Bx = (top, right) # Ay, Ax + width
Cy, Cx = (bot, right) #By + height, Bx
Dy, Dx = (bot, left) #Cy, Ax
ii_key = (height,width)
top_0 = -self.max_wh//2 - height//2 - 1
bot_0 = top_0 + height
left_0 = -self.max_ww//2 - width//2 - 1
right_0 = left_0 + width
Ay0, Ax0 = (top_0, left_0) #self.max_wh, self.max_ww
By0, Bx0 = (top_0, right_0) # Ay, Ax + width
Cy0, Cx0 = (bot_0, right_0) #By + height, Bx
Dy0, Dx0 = (bot_0, left_0) #Cy, Ax
# used in testing, where each batch is a sample of different shapes
counts = K.ones_like( x[:1,...,:1] )
count_ii = self._initialize_ii_buffer( counts )
# compute winsize if necessary
counts_2d = count_ii[:,Ay:Ay0, Ax:Ax0] \
+ count_ii[:,Cy:Cy0, Cx:Cx0] \
- count_ii[:,By:By0, Bx:Bx0] \
- count_ii[:,Dy:Dy0, Dx:Dx0]
# 2. compute summed feature
sum_x_2d = x_ii[:,Ay:Ay0, Ax:Ax0] \
+ x_ii[:,Cy:Cy0, Cx:Cx0] \
- x_ii[:,By:By0, Bx:Bx0] \
- x_ii[:,Dy:Dy0, Dx:Dx0]
# 3. compute average feature
avg_x_2d = sum_x_2d / counts_2d
return avg_x_2d
def _compute_for_all_sizes(self, x) :
x_win_avgs = []
# 1. compute corr(x, window_mean) for different sizes
# 1.1 compute integral image buffer
x_ii = self._initialize_ii_buffer( x )
for hw in self.window_size_list :
if isinstance( hw, int ) :
height = width = hw
else :
height, width = hw[:2]
this_avg = self._compute_for_one_size( x, x_ii, height, width )
x_win_avgs.append( this_avg )
return K.stack(x_win_avgs, axis=1)
def call(self, x):
x = K.cast(x, tf.float64)
x_2 = x**2
E_x = self._compute_for_all_sizes(x)
E_x2 = self._compute_for_all_sizes(x_2)
dev_x = K.sqrt(K.maximum(E_x2 - E_x**2, 1e-6))
T = E_x *(1. + K.cast(self.k, 'float64') * (dev_x/K.cast(self.R, 'float64')-1.))
T = K.cast(T, 'float32')
return T
def compute_output_shape(self, input_shape):
batch_size, n_rows, n_cols, n_chs = input_shape
return (batch_size, self.num_woi, n_rows, n_cols, n_chs)
class DifferenceThresh(Layer) :
def __init__(self,
img_min=0.,
img_max=1.,
init_alpha=16.,
train_alpha=False,
DTConf=None,
**kwargs) :
self.img_min = img_min
self.img_max = img_max
if DTConf is None:
self.init_alpha = init_alpha
self.train_alpha = train_alpha
else:
self.init_alpha = DTConf['init_alpha']
self.train_alpha = DTConf['train_alpha']
super().__init__(**kwargs)
def build(self, input_shapes) :
img_shape, th_shape = input_shapes
self.alpha = self.add_weight(name='alpha',
shape=(1,1,1,1),
dtype='float32',
initializer=constant(self.init_alpha),
trainable=self.train_alpha,
constraint=NonNeg(),
)
return
def call(self, inputs) :
img, th = inputs
scaled_diff = (img - th) * self.alpha / (self.img_max - self.img_min)
return scaled_diff
def get_config(self) :
base_config = super().get_config()
config = {"img_min": self.img_min,
"img_max": self.img_max,
"init_alpha": self.init_alpha,
"train_alpha": self.train_alpha
}
return dict(list(base_config.items()) + list(config.items()))
class InstanceNormalization(Layer) :
def call(self, t) :
t_mu = K.mean(t, axis=(1,2), keepdims=True)
t_sigma = K.maximum(K.std(t, axis=(1,2), keepdims=True), 1e-5)
t_norm = (t-t_mu)/t_sigma
return t_norm
def compute_output_shape(self, input_shape) :
return input_shape
################################################################################
# Metrics
################################################################################
def TextAcc(y_true, y_pred) :
"""Text class accuracy
"""
y_true_text = K.cast( y_true < 0, 'float32')
y_pred_text = K.cast( y_pred < 0, 'float32')
true_pos = y_true_text * y_pred_text
return K.sum(true_pos, axis=(1,2,3)) / (K.sum(y_true_text, axis=(1,2,3)) + 1e-5)
def Acc(y_true, y_pred) :
"""Overall accuracy
"""
y_true_text = K.cast( y_true < 0, 'float32')
y_pred_text = K.cast( y_pred < 0, 'float32')
return K.mean(binary_accuracy(y_true_text, y_pred_text), axis=(1,2))
def F1(y_true, y_pred) :
"""Fmeasure for the text class
"""
y_true_text = K.cast( y_true < 0, 'float32')
y_pred_text = K.cast( y_pred < 0, 'float32')
tp = K.sum(y_true_text * y_pred_text, axis=(1,2,3))
tn = K.sum((1-y_true_text) * (1-y_pred_text), axis=(1,2,3))
fp = K.sum((1-y_true_text) * y_pred_text, axis=(1,2,3))
fn = K.sum(y_true_text * (1-y_pred_text), axis=(1,2,3))
precision = tp / (tp + fp + 1.)
recall = tp / (tp + fn + 1.)
Fscore = 2/(1./(precision + 1e-5) + 1./(recall + 1e-5))
return Fscore
def PSNR(y_true, y_pred) :
"""Overall PSNR
"""
y_true_text = K.cast( y_true < 0, 'float32')
y_pred_text = K.cast( y_pred < 0, 'float32')
psnr = -10. * K.log(K.mean(MSE(y_true_text, y_pred_text), axis=(1,2))) / K.log(10.)
return psnr
################################################################################
# Others
################################################################################
def prepare_training(model_name, model_root='expt', patience=15) :
model_dir = os.path.join(model_root, model_name)
os.system('mkdir -p {}'.format(model_dir))
logging.info(f"use expt_dir={model_dir}")
ckpt = ModelCheckpoint(filepath='{}/{}'.format(model_dir, model_name) + '_E{epoch:02d}-Acc{val_Acc:.4f}-Tacc{val_TextAcc:.4f}-F{val_F1:.4f}-PSNR{val_PSNR:.2f}.h5',
verbose=1, save_best_only=True, save_weights_only=False,)
tb = TensorBoard(log_dir=model_dir)
es = EarlyStopping(patience=patience)
lr = ReduceLROnPlateau(factor=.5, min_lr=1e-7, patience=patience//2)
return [ckpt, tb, es], model_dir
SauvolaLayerObjects = {
'TextAcc': TextAcc,
'Acc': Acc,
'F1': F1,
'PSNR': PSNR,
'InstanceNormalization': InstanceNormalization,
'DifferenceThresh': DifferenceThresh,
'SauvolaMultiWindow': SauvolaMultiWindow,
}
|
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import sys
import jinja2
import parawrap
def expand_template(contents, params):
if not params:
params = {}
tpl = jinja2.Template(source=contents, undefined=jinja2.StrictUndefined)
return tpl.render(**params)
chosen_how = [
'selected',
'picked',
'targeted',
]
new_oslo_core_tpl = """
Hi {{firstname}} {{lastname}},
You have been {{chosen_how}} to be a new {{project}} core (if you are
willing to accept this mission). We have been watching your commits and
reviews and have noticed that you may be interested in a core position
that would be granted to you (if you are willing to accept the
responsibility of being a new core member[1] in project {{project}}).
What do you think, are you able (and willing) to accept?
If you have any questions, please feel free to respond or jump on
freenode and chat with the team on channel #openstack-oslo (one of the
other cores in oslo usually around).
This message will self-destruct in 5 seconds.
Sincerely,
The Oslo Team
[1] http://docs.openstack.org/infra/manual/core.html
"""
firstname = sys.argv[1]
lastname = sys.argv[2]
tpl_args = {
'firstname': firstname,
'project': sys.argv[3],
'lastname': lastname,
'firstname_title': firstname.title(),
'lastname_title': lastname.title(),
'chosen_how': random.choice(chosen_how),
}
tpl_value = expand_template(new_oslo_core_tpl.lstrip(), tpl_args)
tpl_value = parawrap.fill(tpl_value)
print(tpl_value)
|
import os
import copy
import numpy as np
import torch
from imageio import imread
from .models import pix2pix
from .detector import Detector
class Reconstructor:
def __init__(self, weights_path=None, detector=None):
if detector is None:
detector = Detector()
self.detector = detector
self.unet = pix2pix.UNet()
self.set_initial_weights(weights_path)
self.unet.train() # As in the original pix2pix, works as InstanceNormalization
def set_initial_weights(self, weights_path):
if weights_path is None:
weights_path = os.path.join(os.path.dirname(__file__),
'../weights/faces_hybrid_and_rotated_2.pth')
print('loading default reconstructor weights from {}'.format(weights_path))
if not os.path.exists(weights_path):
from .utils import download_from_gdrive
from .constants import p2v_model_gdrive_id
os.makedirs(os.path.dirname(weights_path), exist_ok=True)
print('\tDownloading weights...')
download_from_gdrive(p2v_model_gdrive_id, weights_path)
print('\tDone!')
self.initial_weights = torch.load(weights_path)
def run(self, image, verbose=False):
if type(image) is str:
image = imread(image)
image_cropped = self.detector.detect_and_crop(image)
net_res = self.run_net(image_cropped)
final_res = self.post_process(net_res)
if verbose:
from . import vis_depth_interactive
vis_depth_interactive(final_res['Z_surface'])
return final_res, image_cropped
def run_net(self, img):
# Because is actually instance normalization need to copy weights each time
self.unet.load_state_dict(copy.deepcopy(self.initial_weights), strict=True)
# Forward
input = torch.from_numpy(img.transpose()).float()
input = input.unsqueeze(0)
input = input.transpose(2, 3)
input = input.div(255.0).mul(2).add(-1)
output = self.unet(input)
output = output.add(1).div(2).mul(255)
# Post Processing
im_both = output.squeeze(0).detach().numpy().transpose().swapaxes(0, 1).copy()
im_pncc = im_both[:, :, 0:3]
im_depth = im_both[:, :, 3:6]
im_depth[np.logical_and(im_depth < 10, im_depth > -10)] = 0
im_pncc[np.logical_and(im_pncc < 10, im_pncc > -10)] = 0
return {'pnnc': im_pncc, 'depth': im_depth}
def post_process(self, net_res):
im_pncc = net_res['pnnc'].astype(np.float64)
im_depth = net_res['depth'].astype(np.float64)
net_X = im_depth[:, :, 0] * (1.3674) / 255 - 0.6852
net_Y = im_depth[:, :, 1] * (1.8401) / 255 - 0.9035
net_Z = im_depth[:, :, 2] * (0.7542) / 255 - 0.2997
mask = np.any(im_depth, axis=2) * np.all(im_pncc, axis=2)
X = np.tile(np.linspace(-1, 1, im_depth.shape[1]), (im_depth.shape[0], 1))
Y = np.tile(np.linspace(1, -1, im_depth.shape[0]).reshape(-1, 1), (1, im_depth.shape[1]))
# Normalize fixed grid according to the network result, as X,Y are actually redundant
X = (X - np.mean(X[mask])) / np.std(X[mask]) * np.std(net_X[mask]) + np.mean(net_X[mask])
Y = (Y - np.mean(Y[mask])) / np.std(Y[mask]) * np.std(net_Y[mask]) + np.mean(net_Y[mask])
Z = net_Z * 2 # Due to image resizing
f = 1 / (X[0, 1] - X[0, 0])
Z_surface = Z * f
Z_surface[mask == False] = np.nan
Z[mask == False] = np.nan
return {'Z': Z, 'X': X, 'Y': Y, 'Z_surface': Z_surface}
|
sp98_dept_filled = ( # adding brackets is nice to chain methods and pretty-print
sp98_dept
.groupby(['departement', 'week'])
['prix']
.mean()
.unstack('departement')
.fillna(method='ffill')
.fillna(method='bfill')
)
Xpca = PCA(n_components=2).fit_transform(sp98_dept_filled.T)
plt.figure(figsize=(7, 5))
points = plt.scatter(Xpca[:, 0], Xpca[:, 1], s=10)
for (x, y), name in zip(Xpca, sp98_dept_filled.columns):
plt.annotate(" {}".format(name), (x,y),)
|
from qtstrap import *
from codex import SerialDevice
from .bundles import SigBundle, SlotBundle
class SubscriptionManager(QObject):
signals = {
'add_device':[SerialDevice],
'remove_device': [str]
}
slots = {
'device_added': [SerialDevice],
'device_removed': [str],
'subscribed': [None],
}
new_subscribers = []
@classmethod
def subscribe(cls, target):
old_init = target.__init__
def get_added():
def on_device_added(self, device):
self.devices[device.guid] = device
if hasattr(self, 'device_added'):
self.device_added(device)
return on_device_added
def get_removed():
def on_device_removed(self, guid):
if hasattr(self, 'device_removed'):
self.device_removed(guid)
self.devices.pop(guid)
return on_device_removed
def new_init(obj, *args, **kwargs):
old_init(obj, *args, **kwargs)
obj.signals = SigBundle(cls.signals)
obj.slots = SlotBundle(cls.slots)
obj.slots.link_to(obj)
obj.devices = {}
cls.new_subscribers.append(obj)
target.on_device_added = get_added()
target.on_device_removed = get_removed()
target.__init__ = new_init
return target
@classmethod
def subscribe_to(cls, device_name):
def get_added():
def on_device_added(self, device):
if device.profile_name == device_name:
if self.device:
return
self.device = device
self.setEnabled(True)
if hasattr(self, 'connected'):
self.connected(device)
return on_device_added
def get_removed():
def on_device_removed(self, guid):
if self.device is None or self.device.guid != guid:
return
self.device = None
self.setEnabled(False)
if hasattr(self, 'disconnected'):
self.disconnected(guid)
return on_device_removed
def decorator(target):
target.on_device_added = get_added()
target.on_device_removed = get_removed()
old_init = target.__init__
def new_init(obj, *args, **kwargs):
old_init(obj, *args, **kwargs)
obj.slots = SlotBundle(cls.slots)
obj.slots.link_to(obj)
obj.device = None
obj.setEnabled(False)
cls.new_subscribers.append(obj)
target.__init__ = new_init
return target
return decorator
def __init__(self, parent=None):
super().__init__(parent=parent)
self.subscribers = []
self.check_for_new_subscribers()
def check_for_new_subscribers(self):
for new_sub in self.new_subscribers:
if new_sub not in self.subscribers:
self.connect_subscriber(new_sub)
self.new_subscribers.remove(new_sub)
def connect_subscriber(self, subscriber):
if hasattr(subscriber, 'slots'):
if hasattr(subscriber.slots, 'device_added') and hasattr(subscriber.slots, 'device_removed'):
self.parent().signals.device_added.connect(subscriber.slots.device_added)
self.parent().signals.device_removed.connect(subscriber.slots.device_removed)
for device in self.parent().devices:
subscriber.slots.device_added(self.parent().devices[device])
if hasattr(subscriber.slots, 'subscribed'):
subscriber.slots.subscribed()
if hasattr(subscriber, 'signals'):
if hasattr(subscriber.signals, 'add_device') and hasattr(subscriber.signals, 'remove_device'):
subscriber.signals.add_device.connect(self.parent().slots.add_device)
subscriber.signals.remove_device.connect(self.parent().slots.remove_device) |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="MutliProcessMStepRegression",
version="0.1.0",
author="王文皓(wangwenhao)",
author_email="DATA-OG@139.com",
description="python多进程逐步回归。python step-wise regression with multi-processing.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/wangwenhao-DATA-OG/MultiProcessMStepRegression",
packages=setuptools.find_packages(),
install_requires = ['scikit-learn>=0.20.4','statsmodels>=0.10.0'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
python_requires='>=3.4',
) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 1 2 3 4 5 6 7 8 9 9
# 3456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
"""
ReguLog - Unpacks and analyzes log archives with regular expressions
ReguLog can be used via a GUI (start script with no argument) or on the command line to:
- Search for log files according to regexp patterns into directories and archive files. The zip,
tar and gzipped-tar archive formats are supported. Data is searched recursively into directories
and archive files down to the deepest level. At the end of the scan process, an overview of the
identified log files is shown with their earliest and latest modification times.
- Extract the identified log files into a destination directory, and apply several schemes for
file concatenation (log4j style) and directory re-ordering.
- Search for regexp patterns in log files, in archive or directories, and extract fields (using the
Python syntax e.g. to extract a 2-digit day field "(?P<day>[0-3][0-9])"). The found occurrences
can be displayed on match or on change, and may include references to fields (e.g. "{day}").
Additional pre-defined fields are available such as the timestamp, extracted with a regexp too.
Python code can as well be entered and executed on search start, event match and search end.
- Export the data of the found events in XML/CSV files for post-processing.
- Store a working pattern into a new or existing patterns XML file, then re-use patterns for
other searches.
Known issues:
- A tar file packed in a zip file cannot be read ("seek" error)
- QT: during search, too many displayed events can lead to HMI freeze or crash. As a
workaround set the verbosity to "Quiet" or run the tool in command-line mode.
- QT/bfScriptInterface: "Kill" button does not work
- QT/bfScriptInterface: console view is scrolled to bottom when new text is generated, so
it is not possible to keep the view during command execution
- QT: it is not possible to select multiple directories
"""
#Revisions:
# 0.1 : First version with tar file support
# 0.2 : Use of ScriptInterface for GUI
# 0.3-d1 : Changed name to regulog
# 0.3-d2 : Associated options to commands
# 0.3-d3 : Implement unpack, make dist with new ScriptInterface
# 0.3-d4 : Added support for MIDF for input files and dirs, renaming of options and commands
# 0.3-d5 : Re-factored patterns, add support for input XML file with log event definition
# 0.3-d6 : Re-factored log archive opening
# 0.3-d7 : Added LogSource class, implemented simple version of unpack
# 0.3-d8 : Consolidated unpack for all kinds of sources and HMI re-ordering
# 0.3-d9 : Renamed "unpack" to "extract", completed "keep source dirs", changed default values
# 0.3-d10 : Update with new bfScriptInterface supporting multiple files/dirs selection
# 0.3-d11 : Re-factored extract procedure to support file re-ordering and concatenation
# 0.3-d12 : Completed extract procedure with options - Issue PR1: very long extraction time
# 0.3-d13 : Split extract procedure, rewrote procedures to process files in archive order
# 0.3 : Removed default test values for release, added list of known issues
# 0.4.1 : Set last modification time to value found on original file
# 0.4.2 : Corrected hanging with reducedirs if all files in source have different names
# 0.4.3 : Some re-factoring for pathfilter, change path regex to filename regex in event
# 0.4.4 : Fixed today's timestamp on joined files
# 0.5.1 : First version of log analysis with patterns
# 0.5.2 : Implementation of event save, display on match, display on change, timestamp
# 0.5.3 : Consolidation of ElementTree patch, fix of CDATA removal while saving, re-factoring
# 0.5.4 : Re-factoring of many elements, chronological order, newline in HMI
# 0.5.5 : Support of multiline patterns, improvement of timestamp pattern and logic
# 0.5.6 : Case sensitive option, hide timestamp option
# 0.5.7 : Fixed timestamp in ipsec.log, implemented global source
# 0.5.8 : Lookup functions, re-factoring search function, change of syntax to {field}-style
# 0.5.9 : XML/CSV export
# 0.6.0 : Python execution
# 0.6.1 : HMI changes following new ScriptInterface, widgets location, re-wording
# 0.6.2 : Python execution consolidation, displayIfChanged introduced
# 0.6.3 : Introduced checkPathFilter, named groups in path filter now create dirs
# 0.6.4 : Reducedir separately in each output dir, avoid duplicate files in tar
# 0.6.5 : Moved code to new EventSet class with lookup, corrections in scanPath
# 0.6.6 : Added Python functions, fully pseudo-path in _source_path, fixed multiline issue
# 0.6.7 : Further Python functions (e.g. get_fields), __str__ for Event, _user_fields
# 0.6.8 : Kill button grayed, user fields from timestamp rex, _core, _flat_core, fixed Event str
# 0.6.9 : Changed finalization sequence (all Python, then display strings), added Event.execute()
# 0.6.10 : Path filter aib with custoconf, save XML/CSV even if empty, _flat not stored
# 0.6.11 : Fixed CSV export if event list empty, bfElemTree import, Linux compatibility
# 0.6.12 : Added delete_event, global variables/functions in execute, .tar for IMO in pathfilter
# 0.7.0 : Added immediate, execonfile, code compile, adapted XML/XSD, order kept EventType save
# 0.7.1 : TODO New options for time window selection and timstamp fix, Include/Parent/Tag tags
# TODO support cascaded event types (Parent, Include), with includes of patterns in other files
# TODO keep comments in saved file, improve formatting - see http://effbot.org/zone/element-pi.htm
# TODO add option to fix timestamp inconsistencies
# TODO add options to set min max timestamps to search
# TODO join syslog-style log rotation
# TODO limit size of joined log files
# TODO support GUI selection of event types sorted through pre-defined tags
# TODO improve logs overview with real timestamps in files and nice directories walking
# TODO CSV export to check all possible fields in all events, not only first event
# TODO check name of fields given in python in set_field and add_field
# TODO improve events search performance
# TODO add option remove duplicated events
# TODO improve error message after execution error (now execution stack displayed)
# TODO fix kill button
# TODO multithreaded search
# TODO Re-edit event type parameters
# TODO improve globalsource management for extract (single dest dir), LOG dirs reduction
# TODO Improve global source, i.e. each found archive in dir treated as soon as found
# TODO Display previous lines when an event is displayed
# TODO Add option ignore Python errors
# Imports
import os, sys, traceback, tarfile, zipfile, re, datetime, time, shutil, collections
import psutil
import bfcommons, bfcommons.bfElemTree as ET
__version__ = "0.7.0"
# aib specific settings
if 'aib' in __version__:
defaultPathFilter = r"(.*_Logs\.\d{14}\.(?P<arn>[^.]{,6})\.tar.*|.*)" +\
r"(/inbox/(?P<lsap>LSAP)/(?P<pn>[^/]+)/.*|" +\
r"(ics|bite|messaging|export|control|WLM|TLM|Diameter|Satcom|" +\
r"IMACS|PKI|abdc|GCM|ground|ipsec|agsm)" +\
r"[^/]*\.log[^/]*|messages[\d\-/]*|custoconf/(config|custo)/)"
defaultRexTimestamp = r"^#\d\d#(?P<_Y>\d{4})(?P<_M>\d\d)(?P<_D>\d\d)-" +\
r"(?P<_h>\d\d)(?P<_m>\d\d)(?P<_s>\d\d);" +\
r"([\d\-;]+#){3}(?P<FPFWS>\d\d)#([\-\w]+#){2}(?P<FLT>[^# ]+) *#" +\
r"\.*(?P<ACID>[^#\.]+)#([^#]+##?){9}|" +\
r"^\[(?P<_D1>\d\d)/(?P<_M1>\d\d)/(?P<_Y1>\d?\d?\d\d) (?P<_h1>\d\d):(?P<_m1>\d\d):" +\
r"(?P<_s1>\d\d)\] \w+ *- |" +\
r"^(?P<_Y2>\d{4})-(?P<_D2>\d\d)-(?P<_M2>\d\d) (?P<_h2>\d\d):(?P<_m2>\d\d):" +\
r"(?P<_s2>\d\d)([^\-]+- ){2}|" +\
r"^(?P<_M3>[JFMASOND][a-z]{2}) (?P<_D3>[0123 ]\d) (?P<_h3>\d\d):(?P<_m3>\d\d):" +\
r"(?P<_s3>\d\d) (?P<HOST>[^ ]+) |" +\
r"^#(?P<_Y4>\d{4}) (?P<_M4>\d\d) (?P<_D4>\d\d) (?P<_h4>\d\d):(?P<_m4>\d\d):(?P<_s4>\d\d)#|" +\
r"^(?P<_Y5>\d{4})-(?P<_M5>\d\d)-(?P<_D5>\d\d) (?P<_h5>\d\d):(?P<_m5>\d\d):(?P<_s5>\d\d),"
defaultRexFilename = "(\.log|messages)[.\d\-]*"
defaultRexText = ".*"
else:
defaultPathFilter = ".*\\.log.*"
defaultRexTimestamp = r"^(?P<_Y>\d{4})-(?P<_D>\d\d)-(?P<_M>\d\d) (?P<_h>\d\d):" +\
r"(?P<_m>\d\d):(?P<_s>\d\d)"
defaultRexFilename = ".*"
defaultRexText = ".*"
class Event():
"""Data of found occurrences in logs. To be completely defined, the object methods need to be
called in the following order:
- __init__ to initialize standard fields
- setRaw, setLinenum to set the internal fields once consolidated
- parseText to extract fields from the text match
- parseTimestamp to extract time/date fields from text (calls setTimestamp)
- execute to run execOnMatch code
- parseDisplay to generate the display_on_match field as defined in event type"""
def __init__(self, eventType, path):
"""Initializes an event with the standard fields"""
self.eventType = eventType
# Defines user and system fields dictionaries
self.sfields = dict()
self.ufields = dict()
# Stores standard values (check as well get_field for additional items)
self.sfields['_name'] = eventType.name
self.sfields['_description'] = eventType.description if eventType.description else ""
self.sfields['_source_path'] = path
self.sfields['_source_filename'] = os.path.basename(path)
# Default values if left undefined or failure
self.sfields['_display_on_match'] = None
self.ufields['_changed_fields'] = None
self.setSeqnum(-1)
self.setTimestamp()
self.timestampSpan = (0,0) # Default text span if no timestamp has been found
def __str__(self):
return "Event: id:" + str(id(self)) + " ts:" + str(self.timestamp) +\
" seqnum:" + str(self.seqnum) +\
" ufields:" + str(self.ufields) + " sfields:" + str(self.sfields)
# Function advertised for Python code
def set_field(self, name, value):
if name in self.sfields:
raise RuntimeError("Overwriting " + name + " system field not allowed")
else:
self.ufields[name] = value
# Function advertised for Python code
def set_fields(self, dictionary):
for (name, value) in dictionary.items():
try:
self.set_field(name, value)
except:
pass
# Function advertised for Python code
def add_field(self, name, value):
if name in self.sfields or name in self.ufields:
raise RuntimeError("Field " + name + " already exists")
else:
self.ufields[name] = value
# Function advertised for Python code
def add_fields(self, dictionary):
for (name, value) in dictionary.items():
try:
self.add_field(name, value)
except:
pass
# Function advertised for Python code
def has_field(self, name):
"""Returns true if the given field name or virtual field name is part of the event"""
return (name in ['_user_fields', '_system_fields', '_flat', '_core', '_flat_core']) or \
(name in self.ufields) or (name in self.sfields)
# Function advertised for Python code
def get_field(self, name):
if name in self.ufields: return self.ufields[name]
elif name in self.sfields: return self.sfields[name]
elif name == "_user_fields": return str(self.ufields)
elif name == "_system_fields": return str(self.sfields)
elif name == "_flat":
return self.sfields['_raw'].replace('\n', '')
elif name == "_core":
raw = self.sfields['_raw']
return raw[0:self.timestampSpan[0]] + raw[self.timestampSpan[1]:]
elif name == "_flat_core":
raw = self.sfields['_raw']
core = raw[0:self.timestampSpan[0]] + raw[self.timestampSpan[1]:]
return core.replace('\n', '')
raise RuntimeError("Field " + name + " not found")
# Function advertised for Python code
def get_user_fields(self):
return self.ufields
# Function advertised for Python code
def get_system_fields(self):
return self.sfields
# Function advertised for Python code
def seconds_since(self, ev):
return (self.timestamp-ev.timestamp).total_seconds()
def setRaw(self, raw):
self.sfields['_raw'] = raw
def setLinenum(self, linenum):
self.sfields['_line_number'] = str(linenum)
def setSeqnum(self, num):
self.seqnum = num
self.sfields['_sequence_number'] = str(num)
def setTimestamp(self, timestamp=None):
"""Sets the timesamp of this event and related fields, minimum time if timestamp not given"""
self.timestamp = timestamp if timestamp is not None else datetime.datetime.min
self.sfields['_timestamp'] = self.timestamp.isoformat()
self.sfields['_date'] = str(self.timestamp.date())
self.sfields['_time'] = str(self.timestamp.time())
def replaceFields(self, text, events):
"""Replaces fields given as "{field_name}" in a string by their values
from the object user and system dictionaries"""
# Replaces special chars
res = text.replace(r"\t", "\t")
res = res.replace(r"\n", "\n")
# Special functions:
# {fieldname} : field value of current event
# {rfieldname@evname:} : last field value of other event
# {rfieldname@evname:rcfieldname=fieldname} : lookup of value of rfield in other event
parts = re.split("({[^{}]+})", res)
res = parts[0]
#fields = dict(self.sfields.items() + self.ufields.items())
for i in range(1, len(parts), 2):
trans = "N/A" # default value if transformation not successful
# Extracts text between curly brackets
src = parts[i][1:-1].strip()
# Case of simple ref to local field (no "@")
(fieldname, sep, src) = src.partition("@")
if len(sep) == 0:
if self.has_field(fieldname):
trans = self.get_field(fieldname)
else:
trans = "FIELD '" + fieldname + "' NOT FOUND"
# Case of ref to another event ("@" present), checks event type name
else:
(evname, sep, src) = src.partition(":")
if evname not in events:
trans = "EVENT TYPE '" + evname + "' NOT FOUND"
else:
ev = None
trans = "NO MATCHING EVENT"
# Latest value of this event (no ":"), determines nearest event in the past
if len(sep) == 0:
ev = events.get_event(name=evname, before=self)
# Lookup (":" present), need to extract fields around "="
else:
(rfieldname, sep, cfieldname) = src.partition("=")
if len(sep) == 0:
trans = "LOOKUP CONDITION '" + src + "' NOT VALID"
elif not self.has_field(cfieldname):
trans = "COMPARISON FIELD '" + fieldname + "' NOT FOUND"
else:
ev = events.get_event(name=evname, before=self,
fields={rfieldname:self.get_field(cfieldname)})
# Extraction of field value from found ev
if ev:
if ev.has_field(fieldname):
trans = ev.get_field(fieldname)
else:
trans = "FIELD '" + fieldname + "' NOT IN FOUND EVENT"
# Concatenates result with rest of string
if trans is None: trans = "N/A"
res += trans + parts[i+1]
return res
def parseText(self, textRexResult=None):
# Parses text if not already provided
if textRexResult is None:
textRexResult = self.eventType.searchText(self.sfields['_raw'])
# At this point, it must be assumed that string matched
assert textRexResult is not None
# Adds detected fields to user fields
self.ufields = textRexResult.groupdict()
def parseTimestamp(self, alternativeText=None, sourceTime=None):
"""Matches compiled regexp, if yes updates system fields, may raise exceptions. Searches in
alternativeText if given, otherwise in current _raw system field"""
ts = self.eventType.searchTimestamp(alternativeText if alternativeText else self.sfields['_raw'])
assert ts is not None, "Timestamp regex does not match in" + str(self.sfields['_raw'])
# Gets named groups
tsfields = ts.groupdict()
# Searches for keys in tsfields with names compatible with timestamp fields
names = dict()
for k in tsfields.keys():
# Select a field name if name syntax is as _<letter><empty_or_digit>
if tsfields[k] is not None and len(tsfields[k])>0 and k[0] == '_' and len(k) in [2,3] and \
k[1] in ['Y', 'M', 'D', 'h', 'm', 's'] and \
(len(k) == 2 or (len(k)==3 and k[2] >= "0" and k[2] <= "9")):
names[k[1]] = k
assert len(names) >= 4, "Not enough timestamp fields"
# Year field may not be present in timestamp fields
if 'Y' in names:
year = int(tsfields[names['Y']])
if year < 100: year += 2000 # If the year is acquired as 2-digits (e.g. "31/12/16")
else:
year = sourceTime.year if sourceTime is not None else datetime.datetime.now().year
# Month field may be given as text
m = tsfields[names['M']]
if len(m) <= 2:
month = int(m)
else:
m = m[0:3].upper()
months = {"JAN":1,"FEB":2,"MAR":3,"APR":4,"MAY":5,"JUN":6,
"JUL":7,"AUG":8,"SEP":9,"OCT":10,"NOV":11,"DEC":12}
month = int(months[m])
# Second field may not be present
if 's' in names:
second = int(tsfields[names['s']])
else:
second = 0
# Convert common time values
day = int(tsfields[names['D']])
hour = int(tsfields[names['h']])
minute = int(tsfields[names['m']])
# Sets the timestamp fields and retrieves additional fields if everything went well
self.setTimestamp(datetime.datetime(year, month, day, hour, minute, second))
self.timestampSpan = ts.span()
for k in tsfields.keys():
if tsfields[k] is not None and k[0] != "_":
self.add_field(k, tsfields[k])
def parseDisplay(self, previousEvent=None, events=None):
"""Generates _changed_fields and _display_on_match according to EventType"""
# Determines changed fields
res = ''
if previousEvent is not None:
for k in self.ufields:
if k not in previousEvent.ufields or previousEvent.ufields[k] != self.ufields[k]:
res += (',' if len(res) > 0 else '') + k
self.sfields['_changed_fields'] = res if len(res) > 0 else None
# Computes display string on match
if self.eventType.displayOnMatch:
self.sfields['_display_on_match'] = self.replaceFields(self.eventType.displayOnMatch, events)
def display(self, hideTimestamp):
"""Prints the event as string if display on match is defined otherwise nothing displayed"""
if self.sfields['_display_on_match']:
if (self.eventType.displayIfChanged and self.sfields['_changed_fields'] is not None) or \
(not self.eventType.displayIfChanged):
t = self.sfields['_timestamp'] if not hideTimestamp else ""
print t + " " + self.sfields['_display_on_match']
def toXML(self, full=True):
"""Returns an XML element, all elements as CDATA if full, otherwise a meaningful subset"""
# Creates basic element object
template = "<Event></Event>"
elem = ET.fromstring(template)
# Selection of fields if not full
sel1 = ["_timestamp"]
sel2 = ["_flat", "_source_path", "_line_number"]
# Export system fields in alphabetical order or first set of selected fields
for k in sorted(self.sfields.keys() + ["_flat", "_flat_core", "_core"]) if full else sel1:
e = ET.Element(k)
if full:
e.appendCDATA(self.get_field(k))
else:
e.text = self.get_field(k) if self.get_field(k) is not None else ""
elem.append(e)
# Export user fields in alphabetical order
for k in sorted(self.ufields):
e = ET.Element(k)
if full:
e.appendCDATA(self.get_field(k))
else:
e.text = self.get_field(k) if self.get_field(k) is not None else ""
elem.append(e)
# Export second set of selected fields
if not full:
for k in sel2:
e = ET.Element(k)
e.text = self.get_field(k) if self.get_field(k) is not None else ""
elem.append(e)
return elem
def execute(self, executionContext):
"""Executes the python code ExecOnMatch"""
# Executes compiled code with re-definition of 'event', if code present
executionContext.setLocalVariables(dict(event=self))
executionContext.execute('Match', self.eventType.name)
def __cmp__(self, other):
if self.timestamp != other.timestamp:
return -1 if self.timestamp < other.timestamp else 1
elif self.seqnum != other.seqnum:
return -1 if self.seqnum < other.seqnum else 1
class ExecutionContext:
"""Special object encapsulating the execution of Python code"""
def __init__(self, events, eventTypes):
"""Inits the object"""
# Stores main objects
self.events = events
self.eventTypes = eventTypes
# Defines additional functions to be visible as local/global functions in execution context
def get_event(name=None, fields=None, before=None):
return self.events.get_event(name, fields, before)
def get_events(name=None, fields=None, before=None, limit=0):
return self.events.get_events(name, fields, before, limit)
def delete_event(event):
return self.events.delete_event(event)
# Copy of local variables to be extended at each run (including local functions above)
self.locals = locals().copy()
# chronological, outputdir
def setLocalVariables(self, vars):
"""Set local variables given as a dict"""
self.locals.update(vars)
def execute(self, phase, name):
"""Executes the given code, where phase gives the ExecOn<phase> code to call ('Init', 'File',
'Match' or 'Wrapup'), name the name of the event type and localVariables a dict of
local variables available in currant and later executions"""
# Determines the related compiled code
evt = self.eventTypes[name]
code = None
if phase == 'Init' and evt.execOnInit is not None:
code = evt.compiledExecOnInit
elif phase == 'File' and evt.execOnFile is not None:
code = evt.compiledExecOnFile
elif phase == 'Match' and evt.execOnMatch is not None:
code = evt.compiledExecOnMatch
elif phase == 'Wrapup' and evt.execOnWrapup is not None:
code = evt.compiledExecOnWrapup
# Stops if no code needs to be executed
if code is None:
return
# Stores given name in local variables
self.locals['name'] = name
# Executes the correct compiled code in onw set of local variables and global variables
exec code in self.locals, globals()
class EventSet(dict):
""""Structure holding events found during search. Events are arranged in lists per eventType,
where each list is referenced by the related event type name in a dictionary."""
def __init__(self, eventTypes):
"""Inits the object using the list of event types, i.e. creates empty lists in dict"""
# Sequence of all events used for event searches
self.sequence = list()
# Main structure holding events
for k in eventTypes.keys():
self[k] = list()
# Sequence number to be increased after each addition of event
self.curSeqnum = 0
def add_event(self, event):
"""Adds event after setting the sequence number in event"""
# Sets the sequence number to globally managed value, then increments it
event.setSeqnum(self.curSeqnum)
self.curSeqnum += 1
# Adds event to lists
self[event.eventType.name].append(event)
self.sequence.append(event)
def delete_event(self, event):
"""Removes given event from lists"""
# Removes event from both lists if found
if event in self.sequence:
# print ">>>delete from sequence", id(event), "at index", self.sequence.index(event)
del self.sequence[self.sequence.index(event)]
if event in self[event.eventType.name]:
del self[event.eventType.name][self[event.eventType.name].index(event)]
def get_events(self, name=None, fields=None, before=None, limit=0):
"""Returns an iterator on the latest events in multi-criterion search. The
function may raise exceptions if the parameters are invalid, or may return None if no
event was found. Events are searched in the full list (self.sequence) starting from the end.
Parameters:
- name: name of the event, or search all events if no name given
- before: given as a timestamp or event
- fields: dictionary of field names/values, all need to match
- limit: max number of events to return (default 0)"""
# Validates inputs
if name is not None:
assert name in self, "Given event name " + str(name) + " is not known in event set"
# Main loop into full list of events, or dedicated list if name is given
num = 0
# print ">>>sequence: ", map(id, self.sequence if name is None else self[name])
for ev in reversed(self.sequence if name is None else self[name]):
# print ">>>>>>>cur event:", ev
isMatching = True
# Checks name
if name is not None and ev.eventType.name != name: isMatching = False
# Checks fields
if isMatching and fields is not None:
for kf in fields.keys():
if (kf not in ev.sfields and kf not in ev.ufields) or \
(kf in ev.ufields and ev.ufields[kf] != fields[kf]) or \
(kf in ev.sfields and ev.sfields[kf] != fields[kf]):
isMatching = False
# Checks before
if isMatching and before is not None:
if (hasattr(before, "timestamp") and before.timestamp < ev.timestamp) or \
(hasattr(before, "seqnum") and before.seqnum <= ev.seqnum) or \
(hasattr(before, "utcfromtimestamp") and before < ev.timestamp):
isMatching = False
# Final test
if isMatching:
yield ev
num += 1
if num == limit: break
def get_event(self, name=None, fields=None, before=None):
"""Returns a single event or None, same search criteria as get_events, except limit parameter"""
for e in self.get_events(name, fields, before, limit=1):
return e
return None
def sortEvents(self):
"""Sorts all events according to their timestamps and sequence number, then sets sequence
numbers according to new ordering"""
# Reset list for all events
self.sequence = list()
# Sorts each list of stored events, and adds it to the main list
for l in self.values():
l.sort()
self.sequence.extend(l)
# Finally sorts all events in main list
self.sequence.sort()
# Re-compute sequence numbers
for i in range(len(self.sequence)):
self.sequence[i].setSeqnum(i)
return self.sequence
def finalizeEvents(self, executionContext):
"""Deferred execution of Python code and parsing of display strings for chronological search"""
# Executes the python code of all the events in the sequence
# Needs full list and references to index because events can be deleted during execution
fullseq = list(self.sequence)
for e in fullseq:
if e in self.sequence and not e.eventType.immediate:
e.execute(executionContext)
# Creates display strings of each event in each list
for l in self.values():
prev = None
for ev in l:
ev.parseDisplay(prev, self)
prev = ev
def save(self, outputdir):
"""Saves the content of the events into XML/CSV files in outputdir"""
# Creates 1 CSV and 2 XML files per event name, one simplified and one full
for k in self.keys():
for ext in [".xml", ".full.xml", ".csv"]:
# Creates XML file (own file creation to avoid keeping whole file in memory)
filename = os.path.join(outputdir, k + ext)
with open(filename, "w") as f:
# CSV export
if ext is ".csv" :
sfsel = ["_timestamp", "_name", "_display_on_match", "_changed_fields", "_flat"]
ufsel = sorted(self[k][0].ufields.keys()) if len(self[k])>0 else []
# CSV Header
for s in sfsel + ufsel: f.write(s + ";")
f.write("\n")
# Export events
def trans(s): return "" if s is None else s.replace("\n", " ").replace(";", " ")
for ev in self[k]:
for kf in sfsel + ufsel:
f.write(trans(ev.get_field(kf) if ev.has_field(kf) else None) + ";")
f.write("\n")
# XML export
else:
# XML Header
f.write("<?xml version='1.0' encoding='utf-8'?>\n<RegulogEvents>\n")
# Adds events to XML data
for ev in self[k]:
xev = ev.toXML("full" in ext)
f.write(" " + ET.tostring(xev) + "\n")
# XML End of file
f.write("</RegulogEvents>\n")
class EventSearchContext(dict):
def __init__(self, verbosity, eventTypes, chronological, outputdir):
# Internal variables
self.verbosity = verbosity
self.eventTypes = eventTypes
self.chronological = chronological
# Used to display advancement
self.numProcessedLines = 0
self.numFoundEvents = 0
self.lastPrintedAdvancement = datetime.datetime.now()
self.lastNumProcessedLines = 0
# Creates main structure holding events, i.e. dict of lists of events, key is event name
self.events = EventSet(self.eventTypes)
# Creates execution context
self.executionContext = ExecutionContext(self.events, self.eventTypes)
# Execute start Python code of events
d = dict(verbosity=verbosity, output_directory = outputdir, chronological=chronological)
self.executionContext.setLocalVariables(d)
for evt in self.eventTypes.values():
self.executionContext.execute('Init', evt.name)
def printAdvancement(self, currentLogPath):
"""Prints statistics information every 30 seconds"""
# Checks only every 10000 lines
if self.numProcessedLines % 10000 == 0:
# Prints if the time delta exceeds 30 seconds
dt = (datetime.datetime.now() - self.lastPrintedAdvancement).seconds
if dt > 30 :
self.lastPrintedAdvancement = datetime.datetime.now()
dl = self.numProcessedLines - self.lastNumProcessedLines
self.lastNumProcessedLines = self.numProcessedLines
# Inconsistencies exist in psutil (different on Linux)
proc = psutil.Process(os.getpid())
if "get_memory_info" in psutil.Process.__dict__:
mem = proc.get_memory_info()[0]
elif "memory_info" in psutil.Process.__dict__:
mem = proc.memory_info()[0]
else:
mem = 0
print "\n", self.numProcessedLines, "lines -", int(dl/dt), "lines/sec -", \
self.numFoundEvents, "events -", int(mem / (1024*1024)), "MBytes -", \
"Now at", currentLogPath
def checkSource(self, filePath, fileTime):
"""Checks if file path is matching at least one event type, then prepares internal structures.
Timestamp on file is given in order to get Year value if missing in the timestamp
definition."""
self.searchFilePath = filePath
self.searchFileTime = fileTime
# Gets events matching filename into new list
self.searchEventTypes = list()
for evt in self.eventTypes.values():
if evt.searchFilename(filePath):
# Stores event type into list
self.searchEventTypes.append(evt)
# Executes the related execOnFile code for this event type
d = dict(source_filename=os.path.basename(filePath), source_path=filePath)
self.executionContext.setLocalVariables(d)
self.executionContext.execute('File', evt.name)
# Prepares structures and returns true if at least one event matched
if len(self.searchEventTypes) > 0:
# Prepares buffer of log text strings for multiline log entries support
self.lines = collections.deque(maxlen=100) # Previous lines to scan for timestamp
self.unfinishedEvents = dict() # Events while looking for following timestamp
self.linenum = 0 # Current line number in source file
return True
return False
def getMultiline(self, num):
"""Returns a string built of the most recent num lines starting backwards"""
# Packs all lines from event into one
res = ''
for i in range(min(num, len(self.lines))):
res = self.lines[i] + ('\n' if i > 0 else '') + res
return res
def storeNewEvent(self, ev, eventLinesCount):
"""Completes event definition if not chronological and stores it into list of event"""
# Packs all lines related to this event into one (at this stage the current line is not
# in the previous lines, eventLinesCount was updated previously).
# Then updates event content, no new parseText as the user fields were already extracted.
ev.setRaw(self.getMultiline(eventLinesCount))
# Updates linenum using the event lines count, taking num of lines into account
ev.setLinenum(self.linenum - (eventLinesCount+1))
# Adds created event to current lists
self.events.add_event(ev)
self.numFoundEvents += 1
# Exec Python and creates display strings immediately using previous event if not chronological
if ev.eventType.immediate or not self.chronological:
# Executes execOnMatch code
ev.execute(self.executionContext)
# Events can be deleted during execution (including the current one)
if ev in self.events[ev.eventType.name]:
# Determines previous event if any and computes display string
pev = self.events[ev.eventType.name][-2] if len(self.events[ev.eventType.name])>1 else None
ev.parseDisplay(pev, self.events)
def checkLine(self, line, finishEvents=True):
"""Detects and stores events found in the given line of text (without CR). Function must be
called with 'line' set to None to finish current multiline treatment. If finishEvents is
false, then acquires events without waiting for next line with timestamp."""
# Handles unfinished events that were created during previous calls, i.e. check if the
# current line contains a timestamp applicable for this event type found in previous lines
if len(self.unfinishedEvents) > 0:
#print ">>>>>>>>", len(self.unfinishedEvents), self.unfinishedEvents
for ev in self.unfinishedEvents.values():
# Checks if the current line contains a timestamp or it is the last line (line=None),
# i.e. completes the fields and stores the event
if line is None or ev.eventType.searchTimestamp(line):
# Completes fields and stores new event
self.storeNewEvent(ev, self.eventLinesCount)
# Removes this event from the list and returns it
del self.unfinishedEvents[ev.eventType.name]
yield ev
# In any case increases the number of lines belonging to these events, created before
self.eventLinesCount += 1
# Handles new events starting at the current line
if line is not None:
# Stores line in multiline buffer
self.lines.appendleft(line)
# Updates current line number in source (pre-incrementation)
self.linenum += 1
# Search for known event types in given line, only if there is no unfinished event on-going
for evt in self.searchEventTypes:
if evt.name not in self.unfinishedEvents:
# Creates string with the current and previous lines for multiline patterns
multiline = line if evt.multilineCount == 1 else self.getMultiline(evt.multilineCount)
# Checks if text on current multiline matches the text pattern
rexResult = evt.searchText(multiline)
# If one event type matched, and match is on the last line of the multiline string
if rexResult and (len(multiline)-rexResult.span()[1]) < len(line):
if self.verbosity >= 2: print "---", multiline.replace("\n", " ")
# Creates Event object
ev = Event(evt, self.searchFilePath)
ev.parseText(rexResult)
# Looks in the current and previous lines to find a matching timestamp, and sets the
# eventLinesCount accordingly (reset to 1 if not found)
timestampFound = False
self.eventLinesCount = 1
for l in self.lines:
try:
ev.parseTimestamp(l, sourceTime=self.searchFileTime)
timestampFound = True
break
except:
self.eventLinesCount += 1
else:
self.eventLinesCount = 1
# If no timestamp could be found, at least prints a detailed description of the issue
# during the timestamp parsing process
if not timestampFound and self.verbosity >= 2:
print "WARNING: no timestamp found for this event\n" + multiline
try:
ev.parseTimestamp(multiline, sourceTime=self.searchFileTime)
except Exception as e:
print e
stack = traceback.format_exc().splitlines()
print stack[-3] + "\n" + stack[-2] + "\n" + stack[-1]
# Force output of new unfinished event if set, or no timestamp was found (little
# chance that a timestamp will be found in the next lines if no one was found in the
# previous lines)
if not finishEvents or not timestampFound:
self.storeNewEvent(ev, self.eventLinesCount)
yield ev
# Inserts event into list of unfinished events for later processing
else:
self.unfinishedEvents[evt.name] = ev
#print "\n\nAdding event", evt.name
#print multiline, "\n"
# Increments counter for statistics
self.numProcessedLines += 1
def wrapup(self, outputdir):
"""Sorts the events to display in chronological order, and save events in files if
the given outputdir is not None. Returns the full list of events if chronological
otherwise an empty list."""
sl = list()
# Sorts and finalizes events if necessary
if self.chronological:
sl = self.events.sortEvents()
self.events.finalizeEvents(self.executionContext)
# Executes wrapup Python code of event types
for evt in self.eventTypes.values():
self.executionContext.execute('Wrapup', evt.name)
# Export sorted events
if outputdir:
print "\nSaving events as XML/CSV"
self.events.save(outputdir)
return sl
class EventType:
"""Data to search text in logs for a particular set of files"""
def __init__(self):
"""Pre-initialization with given mandatory values set to None"""
self.name = None
def init(self, rexFilename=None, rexText=None, rexTimestamp=None, multilineCount=1,
caseSensitive=False, name=None, description=None, displayOnMatch=None,
displayIfChanged=False,
execOnInit=None, execOnMatch=None, execOnWrapup=None,
execOnFile=None, immediate=False):
"""Initializes an event definition completely from the given parameters"""
# Local helper functions to treat all inputs the same way
def getValid(value, alternative):
if value is not None and len(value) > 0: return value
else: return alternative
# Import simple values
self.name = getValid(name, "DEFAULT_EVENT_TYPE")
self.description = getValid(description, "N/A")
self.multilineCount = int(multilineCount)
self.caseSensitive = caseSensitive
self.immediate = immediate
self.displayOnMatch = displayOnMatch
self.displayIfChanged = displayIfChanged
# Helper function to compile and raise error if regexp cannot be compiled
def getCompiledRegexp(name, regexp, flags=0):
if regexp is None or len(regexp) == 0: return None
try:
return re.compile(regexp, flags)
except Exception as e:
raise RuntimeError("Regexp compile error for '" + name + "': " + str(e) + " in\n" + regexp)
# Import regexps
self.rexFilename = getValid(rexFilename, defaultRexFilename)
self.compiledRexFilename = getCompiledRegexp("RexFilename", self.rexFilename)
self.rexText = getValid(rexText, defaultRexText)
rexTextFlags = (re.IGNORECASE if not self.caseSensitive else 0)
rexTextFlags |= (re.MULTILINE | re.DOTALL) if self.multilineCount > 1 else 0
self.compiledRexText = getCompiledRegexp("RexText", self.rexText, rexTextFlags)
self.rexTimestamp = getValid(rexTimestamp, defaultRexTimestamp)
self.compiledRexTimestamp = getCompiledRegexp("RexTimestamp", self.rexTimestamp)
# Helper function to compile and raise error
def getCompiledCode(name, code):
if code is None or len(code) == 0: return None
try:
return compile(code, '<string>', 'exec')
except Exception as e:
raise RuntimeError("Python code compile error for '" + name + "': " + str(e) +\
" in\n" + code)
# Import Python code
self.execOnInit = getValid(execOnInit, None)
self.compiledExecOnInit = getCompiledCode("ExecOnInit", self.execOnInit)
self.execOnFile = getValid(execOnFile, None)
self.compiledExecOnFile = getCompiledCode("ExecOnFile", self.execOnFile)
self.execOnMatch = getValid(execOnMatch, None)
self.compiledExecOnMatch = getCompiledCode("ExecOnMatch", self.execOnMatch)
self.execOnWrapup = getValid(execOnWrapup, None)
self.compiledExecOnWrapup = getCompiledCode("ExecOnWrapup", self.execOnWrapup)
def __str__(self):
res = "EventType '" + str(self.name) + "'\n"
for s,v in [["Description", self.description], ["Filename regexp", self.rexFilename],
["Text regexp", self.rexText], ["Timestamp regexp", self.rexTimestamp],
["Multiline pattern count", self.multilineCount],
["Case sensitive pattern search", self.caseSensitive],
["Immediate processing", self.immediate],
["Displayed on match", self.displayOnMatch],
["Display if changed", self.displayIfChanged],
["Python code on init", "\n" + str(self.execOnInit)],
["Python code on file", "\n" + str(self.execOnFile)],
["Python code on match", "\n" + str(self.execOnMatch)],
["Python code on wrap-up", "\n" + str(self.execOnWrapup)]]:
res += " " + s + ": " + str(v) + "\n"
return res
def searchFilename(self, path):
"""Returns result of regexp search on filename"""
return self.compiledRexFilename.search(os.path.basename(path))
def searchText(self, text):
"""Returns result of regexp search on text"""
return self.compiledRexText.search(text)
def searchTimestamp(self, text):
"""Returns result of regexp search on timestamp"""
return self.compiledRexTimestamp.search(text)
def initXML(self, xev):
"""Initializes event type from an XML element, see regulog.xsd"""
# Mandatory fields for an event (exception if not found in XML file)
assert xev.tag == "EventType", "Attempt to parse tag " + xev.tag + " as log event definition"
name = xev.find("Name").text
# Helper Functions
def getStringTag(elem, tagname):
e = elem.find(tagname)
return None if e is None else None if len(e.text) == 0 else e.text
def getBoolTag(elem, tagname):
e = elem.find(tagname)
return e is not None and e.text == "true"
# Optional string fields (replaced by default values if not present via EventType.init)
rexFilename = getStringTag(xev, "RexFilename")
rexText = getStringTag(xev, "RexText")
rexTimestamp = getStringTag(xev, "RexTimestamp")
description = getStringTag(xev, "Description")
displayOnMatch = getStringTag(xev, "DisplayOnMatch")
execOnInit = getStringTag(xev, "ExecOnInit")
execOnFile = getStringTag(xev, "ExecOnFile")
execOnMatch = getStringTag(xev, "ExecOnMatch")
execOnWrapup = getStringTag(xev, "ExecOnWrapup")
# Optional boolean fields
displayIfChanged = getBoolTag(xev, "DisplayIfChanged")
immediate = getBoolTag(xev, "Immediate")
caseSensitive = getBoolTag(xev, "CaseSensitive")
# Other fields
e = xev.find("MultilineCount")
multilineCount = 1 if e is None else int(e.text)
self.init(rexFilename, rexText, rexTimestamp, multilineCount, caseSensitive, name, description,
displayOnMatch, displayIfChanged, execOnInit, execOnMatch, execOnWrapup, execOnFile,
immediate)
def toXML(self):
"""Returns an XML element with properties of this event type"""
# Helper functions
def setStringTag(elem, tagname, text, cdata=False):
if text is not None and len(text)>0:
e = ET.Element(tagname)
if not cdata:
e.text = text
else:
e.appendCDATA(text)
elem.append(e)
def setBoolTag(elem, tagname, val):
setStringTag(elem, tagname, None if not val else 'true')
# Creates element object and fields
elem = ET.Element('EventType')
setStringTag(elem, 'Name', self.name)
setStringTag(elem, 'Description', self.description)
setStringTag(elem, 'RexFilename', self.rexFilename, True)
setStringTag(elem, 'RexText', self.rexText, True)
setStringTag(elem, 'MultilineCount', str(self.multilineCount) if self.multilineCount > 1 \
else None)
setBoolTag(elem, 'CaseSensitive', self.caseSensitive)
setStringTag(elem, 'RexTimestamp', self.rexTimestamp, True)
setStringTag(elem, 'DisplayOnMatch', self.displayOnMatch)
setBoolTag(elem, 'DisplayIfChanged', self.displayIfChanged)
setBoolTag(elem, 'Immediate', self.immediate)
setStringTag(elem, 'ExecOnInit', self.execOnInit, True)
setStringTag(elem, 'ExecOnFile', self.execOnFile, True)
setStringTag(elem, 'ExecOnMatch', self.execOnMatch, True)
setStringTag(elem, 'ExecOnWrapup', self.execOnWrapup, True)
elem.indent()
elem.tail = "\n\n"
return elem
def write(self, filename):
"""Write event type to file, modifies XML block in place if file already exists.
Returns a string with the content of the written file."""
# Reads existing file or creates an XML Element from template
if os.path.isfile(filename):
elemtree = ET.parse(filename)
else:
template = """<Regulog xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
xsi:noNamespaceSchemaLocation='regulog.xsd'>
<!-- Created with Regulog version """ + __version__ + "</Regulog>"
elemtree = ET.fromstring(template)
# Creates an XML block from this event type
xev = self.toXML()
# Checks if this event type already exists in file
for e in elemtree.findall('EventType'):
n = e.find('Name')
if n is not None and n.text == self.name:
e.clear()
e.extend(list(xev))
break
else:
# Otherwise appends at end of file
elemtree.getroot().append(xev)
# Re-checks elements that need to be exported as CDATA
for e in elemtree.findall('EventType'):
for se in e.findall('./*'):
if (se.tag[:3] == "Rex" or se.tag[:6] == "ExecOn") and len(se.findall('./*')) == 0:
text = se.text
se.text = None
se.appendCDATA(text)
# Pretty prints result
elemtree.getroot().indent()
# Inserts newlines to improve XML visually
elemtree.getroot().text = "\n" + elemtree.getroot().text
for e in elemtree.findall('EventType') + elemtree.findall('Include') + \
elemtree.findall('Description'):
e.tail = "\n" + e.tail
# Writes results
# ET.ElementTree(elemtree).write(filename, 'utf-8', True)
elemtree.write(filename, 'utf-8', True)
# Returns resulting XML code for debug
return ET.tostring(elemtree.getroot())
class EventTypeList(dict):
"""List of Event Types with import/export XML file"""
def __init__(self, verbosity):
self.verbosity = verbosity
def addEventType(self, eventType):
self[eventType.name] = eventType
def readEventTypes(self, filename):
"""Reads event types file"""
xml = ET.parse(filename)
for xev in xml.findall("EventType"):
le = EventType()
le.initXML(xev)
self.addEventType(le)
def printEventTypes(self):
for el in self.values():
print str(el)
class LogSource:
"""Source of log files from a directory (DIR), a tar archive (TAR), as zip archive (ZIP) or
files directly given (LOG). An open tarfile/zipfile object is kept for archives."""
class LogSourceFile:
"""Helper class to store variables of source log file"""
def __init__(self, path, pseudoPath, time, size, info=None, fields=dict()):
"""Constructor with path (absolute path for LOG, relative for DIR/ZIP/TAR), time (timestamp
of last modification), info (info object for ZIP/TAR), groupdict fields"""
self.path = path # full or relative path
self.pseudoPath = pseudoPath # Full path normalized, including archive names
self.time = time
self.size = size
self.info = info
self.fields = fields
self.offset = 0 # Modified during file re-ordering (log4j)
self.destinationBasePath = None # Used for path reduction (non-modifiable part)
self.destinationRelativePath = None # Used for path reduction (modifiable part)
def __init__(self, verbosity, type, path=None, archive=None):
"""Inits the internal variables with the type of the source (DIR/TAR/ZIP/LOG), the base path
equal to the archive file path (TAR/ZIP) or the directory of log files searched
by directory (DIR). A TarFile/ZipFile object must be provided for archives (TAR/ZIP)."""
self.verbosity = verbosity
self.type = type # 'DIR', 'TAR"', 'ZIP', 'LOG'
self.path = path # Pseudo path of archive/dir (empty for LOG)
self.archive = archive # Archive object TarFile/ZipFile
self.logs = list() # List of LogSourceFile objects, in original order
self.earliest = datetime.datetime.max
self.latest = datetime.datetime.min
def getPseudoPath(self, logpath):
"""Returns the pseudo-path of the given logpath (taken from path in self.logs)"""
return os.path.join(self.path, logpath).replace("\\", "/")
def add(self, logpath, fields):
"""Adds a logpath to the list, given as a relative (TAR/ZIP) or absolute (LOG/DIR) path
to a log file, and a list of fields extracted from the path as a re.groupdict dictionary."""
# Retrieves last modification time from file or archive member
if self.type in ['LOG', 'DIR']:
info = None
tm = datetime.datetime.fromtimestamp(os.stat(logpath).st_mtime)
elif self.type is 'TAR':
info = self.archive.getmember(logpath)
tm = datetime.datetime.fromtimestamp(info.mtime)
elif self.type is 'ZIP':
info = self.archive.getinfo(logpath)
i = info.date_time
tm = datetime.datetime(i[0], i[1], i[2], i[3], i[4], i[5])
# Updates time
tm = tm.replace(microsecond=0)
if tm < self.earliest : self.earliest = tm
if tm > self.latest : self.latest = tm
# Gets size
if self.type is 'ZIP': size = info.file_size
elif self.type is 'TAR': size = info.size
else: size = os.path.getsize(logpath)
# Relative path to base path needs to be stored for DIR
spath = os.path.relpath(logpath, self.path) if self.type is 'DIR' else logpath
pseudoPath = self.getPseudoPath(spath)
# Stores path
self.logs.append(self.LogSourceFile(spath, pseudoPath, tm, size, info, fields))
if self.verbosity >= 2:
print "--", self.type, str(tm), pseudoPath, fields
def count(self):
return len(self.logs)
def __str__(self):
return self.type + " -- " + str(self.earliest) + " to " + str(self.latest) + " -- " + \
str(self.count()) + " file(s) -- " + self.path
def setDestinationPaths(self, outputdir, keepsourcedirs, globalsource):
"""Sets the destinationBasePath and destinationRelativePath members in log source objects"""
# Option keepsourcedirs: appends extra directory to outputdir
# If destination path already exists, then increases suffix "-000", "-001", etc
if keepsourcedirs:
if self.type is 'LOG': # or globalsource:
mdir = ""
else:
(dir, mdir) = os.path.split(self.path)
mdir += "-"
i = 0
done = False
while not done:
dir = os.path.join(outputdir, mdir + "%03d" % i)
if not os.path.lexists(dir): done = True
else: i += 1
outputdir = dir
# Set destination paths, keeps original order
for l in self.logs:
# Computes the prefix dir based on fields acquired from path filter, in alphabetical order
pref = ""
for kf in sorted(l.fields.keys()):
p = l.fields[kf]
if p is not None and len(p) > 0:
for c in "\"\\/:*?<>|": p = p.replace(c, "_")
pref = os.path.join(pref, p)
# Sets path parts by default
l.destinationBasePath = os.path.normpath(os.path.join(outputdir, pref))
if self.type is 'LOG':
# For type LOG: start relative path is only the filename
l.destinationRelativePath = os.path.basename(l.path)
else:
# For other types: start relative path is the relative path inside archive or directory
l.destinationRelativePath = os.path.normpath(l.path)
def reduceDestinationPaths(self, joinlog4j, reducedirs):
if joinlog4j or reducedirs:
# Creates dictionary of dictionaries, with destination dirs as keys, and then paths as keys
# and list of LogSourceFile objects
# At the beginning each list contains only one element
destinationPaths = dict()
for l in self.logs:
# Sets new entry in first dictionary if not already present, then stores log file object
if l.destinationBasePath not in destinationPaths:
destinationPaths[l.destinationBasePath] = dict()
destinationPaths[l.destinationBasePath][l.destinationRelativePath] = [l]
# Option joinlog4j: re-order and gather related log files
if joinlog4j:
for kbase in destinationPaths.keys():
for dest in destinationPaths[kbase].keys():
# Check if key still in dict as some keys will be removed
if dest in destinationPaths[kbase]:
# Put the related log files under the same destination file, removes found entries
i = 1
while (dest + "." + "%d"%i) in destinationPaths[kbase]:
odest = dest + "." + "%d"%i
destinationPaths[kbase][dest].extend(destinationPaths[kbase][odest])
del destinationPaths[kbase][odest]
i += 1
destinationPaths[kbase][dest].reverse()
# Updates offsets
offset = 0
for l in destinationPaths[kbase][dest]:
l.offset = offset
offset += l.size
# Option reducedirs: find common path and removes it
if reducedirs:
for kbase in destinationPaths.keys():
# Prepares new dictionary with paths for processing (keys same as content at beginning)
newdests = dict()
for dest in destinationPaths[kbase].keys():
newdests[dest] = dest
# Tries to remove top level directories until destinations overlap for at least one file
done = False
while not done:
# For each dest path, removes top dir in path (if any), then puts it in a Python set
s = set()
for ndest in newdests.values():
cropped = ndest.split(os.sep, 1)[-1]
s.add(cropped)
# If number of elements in set is the same as the original dict, then no path overlap
if len(s) == len(newdests):
# Updates newdests with cropped paths for next iterration
noupdate = True # Will be set to False if one value is different than from last iter
for ndest in newdests.keys():
cropped = newdests[ndest].split(os.sep, 1)[-1]
#print ">>cropped", cropped
#print ">>newdests[ndest]", newdests[ndest]
if cropped != newdests[ndest]: noupdate = False
newdests[ndest] = cropped
if noupdate: done = True # Stops if no dir remains in dest paths
else:
done = True # Stops if next removed dir level would make path overlap, i.e. reduction OK
# Updates main dictionary back with new destination paths
for (old, new) in newdests.items():
if new not in destinationPaths[kbase]:
destinationPaths[kbase][new] = destinationPaths[kbase][old]
del destinationPaths[kbase][old]
# Finally updates self.logs list with destination path
for kbase in destinationPaths.keys():
for dest in destinationPaths[kbase].keys():
for source in destinationPaths[kbase][dest]:
source.destinationRelativePath = dest
# Prints results of re-ordering using internal list
if self.verbosity >= 2:
print "\nNew destination paths and offsets in source log files:"
for l in self.logs:
print "--", l.size, l.offset, l.destinationBasePath, l.destinationRelativePath, l.path
def extract(self, outputdir, keepsourcedirs=False, joinlog4j=False, reducedirs=False,
globalsource=False):
"""Extract log files from this source to the outputdir"""
print "\nStarting extraction of", self.type, self.path
filenames = None # Only for debug info
prevdest = None # Only for debug info
# Adapts destination paths in self.logs list of LogSourceFile objects
self.setDestinationPaths(outputdir, keepsourcedirs, globalsource)
self.reduceDestinationPaths(joinlog4j, reducedirs)
# Extract source files to destination files
for l in self.logs:
destFullPath = os.path.normpath(os.path.join(l.destinationBasePath, l.destinationRelativePath))
# Checks if the destination directory exists, if not creates it
dirpath = os.path.dirname(destFullPath)
parts = list()
while not (os.path.exists(dirpath) and os.path.isdir(dirpath)):
(dirpath, part) = os.path.split(dirpath)
parts.append(part)
parts.reverse()
for part in parts:
dirpath = os.path.join(dirpath, part)
if self.verbosity >= 2: print "-- Make directory", dirpath
os.mkdir(dirpath)
# Sets time values for re-setting once file has been closed
destexists = os.path.exists(destFullPath)
if destexists:
curtime = datetime.datetime.fromtimestamp(os.stat(destFullPath).st_mtime)
else:
curtime = datetime.datetime.min
# Opens source and destination files
destfile = open(destFullPath, 'r+b' if destexists else 'wb')
if self.type is 'LOG': sourcefile = open(l.path, 'rb')
elif self.type is 'DIR': sourcefile = open(os.path.join(self.path, l.path), 'rb')
elif self.type is 'TAR': sourcefile = self.archive.extractfile(l.info)
else: sourcefile = self.archive.open(l.info)
# Copy/extract
if self.verbosity >= 2:
if not prevdest or prevdest != destFullPath:
if filenames: print "---", filenames
filenames = os.path.basename(l.path)
prevdest = destFullPath
print "--", "Depack" if self.type in ['TAR','ZIP'] else "Copy", "to", destFullPath
else:
filenames += " " + os.path.basename(l.path)
destfile.seek(l.offset)
shutil.copyfileobj(sourcefile, destfile, 1024*1024*10)
# Closes source and destination files
sourcefile.close()
destfile.close()
# Sets time on destination file to value in source log retrieved during scan,
# sets it only if later than set at creation (log4j join if files are in reverse order)
timestamp = time.mktime((l.time if curtime < l.time else curtime).timetuple())
os.utime(destFullPath, (timestamp, timestamp))
# Last line not taken into account in loop
if self.verbosity >= 2:
print "---", filenames
def search(self, searchContext, hideTimestamp):
"""Goes through all log files of the source and searches events if filename matches"""
for logfile in self.logs:
# Gets events matching filename
if self.verbosity >= 2: print "\nSearching events in", logfile.path
# Checks if path matches
if searchContext.checkSource(logfile.pseudoPath, logfile.time):
# Open file
if self.type is 'ZIP': sourcefile = self.archive.open(logfile.info)
elif self.type is 'TAR': sourcefile = self.archive.extractfile(logfile.info)
elif self.type is 'DIR': sourcefile = open(os.path.join(self.path, logfile.path), 'rb')
else: sourcefile = open(logfile.path, 'rb')
# Reads text lines from log file and searches for events
done = False
while not done:
line = sourcefile.readline()
if line == '':
done = True
line = None
else:
line = line.rstrip("\n\r")
# Prints events if any found
for ev in searchContext.checkLine(line):
if self.verbosity >= 1 and not searchContext.chronological:
ev.display(hideTimestamp)
if self.verbosity >= 2 or searchContext.chronological:
searchContext.printAdvancement(logfile.pseudoPath)
# Closes file
sourcefile.close()
class LogSet:
def __init__(self, verbosity, eventTypes, pathFilter = ".*\\.log*"):
"""Inits object with verbosity (value 0 to 2), a LogEventList object, a pathfilter given
as a regexp to search"""
# Sets common variables
self.verbosity = verbosity
self.eventTypes = eventTypes
self.rexPathFilter = re.compile(pathFilter, re.IGNORECASE)
# List of log sources found during scan
self.sources = list()
def checkPathFilter(self, path):
"""Checks if the path matches the path filter after normalizing it to "/" separator,
returns None if not otherwise returns the acquired named groups"""
res = self.rexPathFilter.match(path.replace("\\", "/"))
return res.groupdict() if bool(res) else None
def scanPath(self, path, archivePathRex, file=None):
"""Opens recursively a file or directory for processing, using given path or file handle.
During the scan, first each file name will be matched to the path filter,
then otherwise matched to the archive path regexp"""
# Number of items found in this run
res = 0
# Get fields for path given as argument
fields = self.checkPathFilter(path)
# Case 1 DIR: directory given as path, walk into sub-directories to find files or archives
if not file and os.path.isdir(path):
# Creates new LogSource to be filled, then iterates into directories
source = LogSource(self.verbosity, 'DIR', path)
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
# Prepares path and fields
fullpath = os.path.join(dirpath, filename)
fields = self.checkPathFilter(fullpath)
# Stores path to log file if path filter matches
if fields is not None: # fields can be empty while not None
source.add(fullpath, fields)
res += 1
# Recurses in archive if archive found in directory
elif archivePathRex.search(filename):
res += self.scanPath(fullpath, archivePathRex)
# Keeps LogSource only if logs were found
if source.count() > 0:
self.sources.append(source)
return res
# Case 2 LOG: single log file directly given as path (no check if it is an archive)
# Stores as log only if path filter matches
elif not file and os.path.isfile(path) and fields is not None:
self.singleLogFiles.add(path, fields)
return 1
# Case 3 TAR/ZIP: given path points on archive file, or given handler on archive file in archive
elif file or (not file and os.path.isfile(path) and archivePathRex.search(path)):
tar = None
zip = None
# First tries to open the given file as tar
if self.verbosity>=2:
print "\nTrying to open as archive", path if not file else file.name
try:
if file:
tar = tarfile.open(fileobj=file, mode='r:*')
else:
tar = tarfile.open(path, mode='r:*')
if self.verbosity>=2: print "TAR: successfully open"
except Exception as et:
if self.verbosity>=2: print "TAR: tarfile.open error:", et
# Then tries as zip
try:
zip = zipfile.ZipFile(file if file else path)
if self.verbosity>=2: print "ZIP: successfully open"
except Exception as ez:
# Could not open file at all as archive
if self.verbosity>=2: print "ZIP: zipfile.ZipFile.open error:", ez
return 0
# Either tar or zip was open, get archive file names to check them
source = LogSource(self.verbosity, "TAR" if tar else "ZIP", path, tar if tar else zip)
addedNames = set()
for f in tar if tar else zip.namelist():
name = f if zip else f.name
fullpath = os.path.join(path, name)
fields = self.checkPathFilter(fullpath)
# Stores path if matching path filter
if fields is not None and name not in addedNames:
addedNames.add(name)
source.add(name, fields)
res += 1
# Recurses into archive if extension matches
elif fields is None and archivePathRex.search(fullpath):
newFile = tar.extractfile(f) if tar else zip.open(f)
res += self.scanPath(os.path.join(path, name), archivePathRex, newFile)
if source.count() > 0:
self.sources.append(source)
return res
def scanPaths(self, paths, extarchive):
"""Calls scanPath for a semi-colon separated list of filenames/dirnames, with semi-colon
separated list of archive extensions"""
# List of log files given directly to scan, to be filled by scanPath
self.singleLogFiles = LogSource(self.verbosity, 'LOG', "")
# Calls scanPath for all items in list
print "\n--------------- BEGIN PATH SCAN -", time.strftime("%H:%M:%S"), "---------------"
for p in paths.split(";"):
print "\nScanning", p
# Builds regexp for archive extensions and calls sub-function
self.scanPath(p, re.compile("(?i)(" + extarchive.replace(";", "|") + ")$"))
print "\n---------------- END PATH SCAN -", time.strftime("%H:%M:%S"), "----------------"
# Adds log files given directly to list of sources
if self.singleLogFiles.count() > 0:
self.sources.append(self.singleLogFiles)
# Displays found files
print "\nRecognized sources:"
for s in self.sources:
print s
def extract(self, outputdir, keepsourcedirs=False, joinlog4j=False, reducedirs=False,
globalsource=False):
"""Extract log files from archives to the outputdir"""
print "\n--------------- BEGIN EXTRACTION -", time.strftime("%H:%M:%S"), "---------------"
for s in self.sources:
s.extract(outputdir, keepsourcedirs, joinlog4j, reducedirs, globalsource)
print "\n---------------- END EXTRACTION -", time.strftime("%H:%M:%S"), "----------------"
def search(self, chronological, hideTimestamp, globalsource, outputdir):
"""Search events in log files"""
print "\n--------------- BEGIN SEARCH -", time.strftime("%H:%M:%S"), "---------------"
context = EventSearchContext(self.verbosity, self.eventTypes, chronological, outputdir)
for s in self.sources:
s.search(context, hideTimestamp)
for ev in context.wrapup(outputdir):
if chronological and self.verbosity >= 1:
ev.display(hideTimestamp)
print "\n---------------- END SEARCH -", time.strftime("%H:%M:%S"), "----------------"
def getDefaultEventType(params):
# Gets values entered by user if at least the text pattern is given
if params["rextext"]:
le = EventType()
le.init(params["rexfilename"], params["rextext"], params["rextimestamp"],
int(params["multilinecount"]), params["casesensitive"], params["name"],
params["description"], params["displayonmatch"], params["displayifchanged"],
params["execoninit"], params["execonmatch"], params["execonwrapup"],
params["execonfile"], params["immediate"])
return le
return None
def readEventsDefinition(params):
"""Common function to read XML events file and define default event from GUI/command-line"""
# Inits vars
loge = EventTypeList(int(params["verbosity"]))
# Reads file(s) if given
path = params["ineventtypes"]
if path:
paths = path.split(';')
for p in paths:
loge.readEventTypes(p)
# Adds default event if defined
le = getDefaultEventType(params)
if le:
loge[le.name] = le
return loge
def splitLogPaths(params):
# Handles global source option, i.e. returned list contains either a list of one string or
# a list of parts of the path
if params['globalsource']:
return [params["inlogpaths"]]
else:
return params["inlogpaths"].split(';')
def overview(si):
params = si.getValues()
# Opens logs
for paths in splitLogPaths(params):
logs = LogSet(int(params["verbosity"]), readEventsDefinition(params), params["pathfilter"])
logs.scanPaths(paths, params["extarchive"])
def extract(si):
params = si.getValues()
# Opens logs
for paths in splitLogPaths(params):
logs = LogSet(int(params["verbosity"]), readEventsDefinition(params), params["pathfilter"])
logs.scanPaths(paths, params["extarchive"])
logs.extract(params["outputdir"], params["keepsourcedirs"], params["joinlog4j"],
params["reducedirs"], params["globalsource"])
def search(si):
params = si.getValues()
# Gets event types including possibly the default event
eventTypes = readEventsDefinition(params)
# Opens logs
if len(eventTypes) > 0:
for paths in splitLogPaths(params):
logs = LogSet(int(params["verbosity"]), eventTypes, params["pathfilter"])
logs.scanPaths(paths, params["extarchive"])
logs.search(params["chronological"], params["hidetimestamp"], params["globalsource"],
params["outputdir"])
else:
print "ERROR: no event type definition"
def saveDefaultEventType(si):
params = si.getValues()
# Parses destination file
filename = params["outeventtypes"]
# Creates single default event from the values entered manually
le = getDefaultEventType(params)
# Saves if event type could be parsed
if le:
if int(params["verbosity"]) >= 1: print "\nEvent to save:\n", le
s = le.write(filename)
if int(params["verbosity"]) >= 1: print "\nWrote file:\n", s
print "\nEvent", le.name, "saved successfully in", filename
else:
print "ERROR: No event type definition"
def showEventTypes(si):
eventTypes = readEventsDefinition(si.getValues())
if len(eventTypes) > 0:
eventTypes.printEventTypes()
else:
print "ERROR: no event type definition"
def main(argv):
"""Main procedure for command line processing and/or HMI"""
si = bfcommons.ScriptInterface("ReguLog", __doc__, __version__, sys.argv[0])
si.addCommonOptions()
# Input file options
desc = "Files/directories to be analyzed (semi-colon separated list)"
si.addOption("Input paths", desc, 'MIDF', "i", "inlogpaths")
desc = "If set for any command, log sources are treated together like one source during search"
si.addOption("Global source", desc, 'B', "g", "globalsource", format='')
desc = "In scan/extract/search operations, main path filter defined as a regex on pseudo-path "+\
"of log files, used to select files recursively in directories and archives\n" +\
"The path matching is case-insensitive and needs to match the full pseudo-path (not " +\
"only a subpart of it, hence the regexp will likely start with '.*').\n" +\
"In the pseudo-path to be matched, the path separator is always set to '/' (instead of "+\
"'\\' under Windows), in order to keep a common way to write patters whatever the OS. " +\
"For files in archive, the pseudo-path has the form <achive-file-path>/" +\
"<log-file-path-in-archive>, where the archive-file-path part can, recursively, " +\
"contain archive file names in the same form.\n" +\
"If 'named groups' are acquired for a matching log file (Python syntax like " +\
"'(?P<name>pattern)'), the values are used to create additional levels of " +\
"directories during extract, appended to the 'outputdir' parameter in the alphabetical "+\
"order of the group names.\n" +\
"WARNING: because the search is performed on the full pseudo-path, the expression " +\
"may match non-wanted files due to text at the beginning of the source path. For " +\
"example '.*ipsec.*\.log.*' can match any file ending with '.log' in an archive called "+\
"'ipsec-18122016.zip'. You can use '[^/]*' for parts where directories must be excluded."
si.addOption("Path Filter Regex", desc, "R", "f", "pathfilter", defaultPathFilter)
desc = "Semicolon-separated list of valid archive extensions"
val = ".zip;.tar;.tar.gz;.tgz"
if 'aib' in __version__: val += ";.pmf"
si.addOption("Archive extensions", desc, "S", "e", "extarchive", val, format='W160')
desc = "Displays overview of input log files, based on filenamess/dirs structure (not content)"
si.addCommand("Logs overview", desc, "overview", lambda: overview(si), ["inlogpaths"],
["pathfilter"])
# Output directory
desc = "Directory to store extracted files or files generated from search"
si.addOption("Output directory", desc, 'OD', "o", "outputdir", format='L')
desc = "If set for the extract command, an extra directory is created to store the extracted " +\
"or copied files from each source of logs"
si.addOption("Keep source directories", desc, 'B', "k", "keepsourcedirs", format='')
# Extract options
desc = "If set for the extract command, recognizes logs stored in log4j style (.log, .log.1, " +\
"etc in same directory) and concatenates the files into one single .log file in the " +\
"directory"
si.addOption("Join log4j", desc, 'B', "j", "joinlog4j", format='')
desc = "If set for the extract command, the number of levels of directories is reduced to the "+\
"common tree of the extracted/copied files"
si.addOption("Reduce directories", desc, 'B', "r", "reducedirs", format='')
si.addCommand("Extract", "Extract/copy all files from given archives/dirs into output directory",
"extract", lambda: extract(si), ["inlogpaths", "outputdir"], ["pathfilter"])
# Events input file
desc = "Input XML files with event types definition to be searched (semi-colon separated " +\
"list)\nThis list of event types (i.e. log patterns) can be combined with the 'default "+\
"event type' defined below through the GUI or using the command-line."
si.addOption("Patterns XML files", desc, 'MIF', "x", "ineventtypes", format='L')
si.addCommand("Show Event Types", "Prints definition of the applicable event types",
"show-event-types", lambda: showEventTypes(si), format='')
# Search events
desc = "If set for search, displayed text output is not prefixed with a timestamp"
si.addOption("Hide timestamps", desc, 'B', "t", "hidetimestamp", format='')
desc = "If set for the search command, found events are stored and then displayed time-" +\
"ordered at the end of the search"
si.addOption("Chronological", desc, 'B', "c", "chronological", format='')
si.addCommand("Search Events", "Search for events in the input files",
"search", lambda: search(si), ["inlogpaths"],
["pathfilter", "outputdir", "ineventtypes"])
# FIXME: modify bfScriptInterface to take all parameters into account whater the position of
# the command on the HMI
# "name", "description", "rexfilename",
# "rextext", "rextimestamp", "displayonmatch",
# "execoninit", "execonmatch", "execonwrapup"])
# Default event type definition
desc = "For the Default Event Type, regular expression on file name of log files, used for " +\
"search operations\nUse '.*' or '.*\\.log.*' to match all log files ('.*' means any " +\
"number of any character), or use a part of the name of a file to target specific log " +\
"files (e.g. 'messaging' to match 'bsmessaging' and 'messagingservice'). The search is "+\
"case-sensitive."
si.addOption("Filename Regex", desc, "R", "F", "rexfilename", defaultRexFilename,
format="N;W130;GDefault Event Type")
desc = "For the Default Event Type, regular expression to match the timestamp part, on the " +\
"first line of event once matched\nThe default value should match most kinds of " +\
"timestamps, such that the value can be used in most cases. The timestamp regular " +\
"expression needs to contain the following 'named groups', caught using the regular " +\
"expression special syntax '(?P<fieldname>pattern)':\n" +\
" _Y: year (4 or 2 digits supported, default to file timestamp if not present)\n" +\
" _M: month (2 digits or at least 3 first letters of the month name in English)\n" +\
" _D: day, _h: hour, _m: minute, _s: second (default to 00 if not present)"
si.addOption("Timestamp Regex", desc, 'R', "S", "rextimestamp", defaultRexTimestamp, format='')
desc = "For the Default Event Type, main regular expression text pattern (regexp) used for " +\
"matching events in log files\n" +\
"By default, the search is performed case-insensitively (can be reverted by using the " +\
"casesensitie option). It supports multiline search if multilinecount is greater " +\
"than 1. " +\
"This regexp is used as well for user fields extraction, with Python-style 'named " +\
"groups' using the specific syntax '(?P<fieldname>pattern)'.\n" +\
"WARNING: as it is a regular expression, it may be necessary to escape some characters "+\
"if text is searched directly, i.e. the characters '()[]\\.^$' need to be " +\
"prefixed with a backslash e.g. '\\['."
si.addOption("Text Regex", desc, 'R', "T", "rextext")
desc = "For the Default Event Type, number of log lines the rextext is matched with\n" +\
"In case of events composed of several lines (i.e. containing lines without timestamp) "+\
"and the rextext needs to match several lines, a multiline block of consecutive log " +\
"lines is built and matched with the rextext. This parameter defines how many lines " +\
"are gathered in the multiline block for matching (default to 1). When a multiline " +\
"search is performed, the dot '.' matches any character including a newline (options " +\
"DOTALL and MULTILINE activated)."
si.addOption("Multiline", desc, 'S', "M", "multilinecount", "1", format='W30')
desc = "If set for search with default event type, the text regexp is searched in " +\
"case-sensitive mode"
si.addOption("Case", desc, 'B', "A", "casesensitive", format='')
desc = "For the Default Event Type, string displayed if the text regex matches the log text\n" +\
"Extracted and pre-defined fields can be displayed within the text message as such:\n" +\
" - {fieldname} for the value of the field in the current event\n" +\
" - {fieldname@evname} for the latest value of a field in another event\n" +\
" - {fieldname@evname:rfieldname=cfieldname} for lookup of the value of a field in " +\
"another event such that the value of the field rfieldname equals the value of the " +\
"field cfieldname in this event.\n" +\
"The following pre-defined fields are available:\n" +\
" _raw: line of text for the event\n" +\
" _core: same as _raw without the part matching the timestamp if recognized\n" +\
" _flat: same as _raw without end of lines\n" +\
" _flat_core: same as _core without end of lines\n" +\
" _name: name of event type\n" +\
" _description: description of event type\n" +\
" _timestamp: timestamp string (date/time ISO format), _time: time, _date: date\n" +\
" _line_number: number of the first line in log file of the event text\n" +\
" _source_path: source pseudopath of the log file\n" +\
" _source_filename: basename of the source path"
val = "{_raw} at {_source_path}:{_line_number}"
si.addOption("Display on Match", desc, 'S', "M", "displayonmatch", val)
desc = "For the Default Event Type, if set to true, the displayonmatch string is displayed if "+\
"text regex matches and extracted fields are different than the previous match (except "+\
"system fields like '_timestamp')"
si.addOption("Display if changed", desc, 'B', "C", "displayifchanged", format='')
desc = "If set to true for the Default Event Type and 'chronological' is selected, the " +\
"treatment of execonmatch and displayonmatch is not deferred after global events sort\n"+\
"This can be used for events that appear very frequently and do not need to be stored " +\
"in the events list (discarded by using 'delete_event(event)' in execonmatch). " +\
"If 'chronological' is not selected, this option has no effect."
si.addOption("Immediate", desc, 'B', "D", "immediate", format='')
# Python Execution code
desc = "For the Default Event Type, Python code executed on search start\nThe code is " +\
"executed from an encapsulating object common for all event types. It is possible to " +\
"define variables for use in other Python code parts by using the prefix 'self.', " +\
"e.g. 'self.my_counter'. The following pre-defined variables are available:\n" +\
" - name: the name of the event type\n" +\
" - chronological: Set to True if the search is sorted chronologically\n" +\
" - output_directory: Contains the path given as outputdir otherwise None"
si.addOption("Exec On Init", desc, 'T', "I", "execoninit")
desc = "For the Default Event Type, Python code executed when search starts on a new file\n" +\
"The following pre-defined variables are available:\n" +\
" - source_filename: name of the file (no directory part)\n" +\
" - source_path: pseudo-path of the file"
si.addOption("Exec On File", desc, 'T', "F", "execonfile", format='')
desc = "For the Default Event Type, Python code executed when the text regexp matches\nIn " +\
"addition to the execoninit option, the following variables " +\
"and functions are available:\n" +\
" - event: the current event with fields set from the text regexp and system fields\n" +\
" - event.timestamp: the timestamp of the event as a datetime object\n" +\
" - event.set_field(name, value): sets an existing or new field (exception raised if " +\
"trying to set an existing system field)\n" +\
" - event.set_fields(dict): sets existing or new fields from a dictionary (no " +\
"exception raised)\n" +\
" - event.add_field(name, value): adds a new field (exception raised if the field " +\
"already exists)\n" +\
" - event.add_fields(dict): adds new fields from a dictionary (no exception raised)\n" +\
" - event.has_field(name): returns true if the field already exists in event\n" +\
" - event.get_field(name): returns the value of the given field\n" +\
" - event.get_user_fields(): returns the user fields as a dictionary\n" +\
" - event.get_system_fields(): returns the system fields as a dictionary\n" +\
" - event.seconds_since(event): returns the number of seconds since the given event\n" +\
" - get_events(name, fields, before, limit): returns an iterator on " +\
"events in the list according to several optional criteria, e.g. " +\
"get_event(fields = {'_name':'val'}, before = event). The available parameters are: \n" +\
" -- name: matches events with the given name (i.e. name of the related event type " +\
"or '_name' field), provided as a character string\n" +\
" -- fields: matches events with the given fields set to the given values (all " +\
"fields need to match), given as a dictionary of name/value pairs\n" +\
" -- before: matches events with a timestamp earlier than or equal to the given " +\
"time reference, provided as a timestamp (datetime.datetime object) or as an event " +\
"(in this case it is ensured that the sequence number of the matched event is lower " +\
"than the sequence number of the event given as reference, in order to ensure " +\
"fine-grained event ordering in case timestamp values are equal)\n" +\
" -- limit: maximum number of events to be returned (default 0 for no limit)\n" +\
" - get_event(name, fields, before): returns a single event with the same " +\
"parameters as get_events (except the limit parameter)"
si.addOption("Exec On Match", desc, 'T', "X", "execonmatch", format='')
desc = "Python code executed on wrapup (end of the search), same pre-defined variables and " +\
"functions can be used as in execonstart and execonmatch (except current 'event' local "+\
"variable)"
si.addOption("Exec On Wrapup", desc, 'T', "W", "execonwrapup", format='')
desc = "Name of the Default Event Type given directly through the GUI or the command-line\n"+\
"The name is used to store the event once fully defined and for data exports."
si.addOption("Name", desc, "S", "N", "name", format='N;W180')
# desc = "Name of an event type that defines default properties for the default event type\n"+\
# "This can only be defined here if a pattern file is given"
# si.addOption("Parent", desc, "S", "P", "parent", format='W180')
#
# desc = "Semi-colon separated list of tags"
# si.addOption("Tags", desc, "S", "G", "tags", format='N')
desc = "Description of the default event type, later available as '_description' field"
si.addOption("Description", desc, "S", "D", "description", format='')
# Events output fields
desc = "XML file to store Default Event Type definition"
si.addOption("Output patterns file", desc, 'OF', "p", "outeventtypes", format='')
desc = "Saves the given default event type into new or existing XML file\nThe output " +\
"file is re-parsed and re-written completely using the last available definition."
si.addCommand("Save Default Event Type", desc, "save-event-type", lambda: saveDefaultEventType(si),
["outeventtypes", "name", "description"],
["rexfilename", "rextext", "rextimestamp", "displayonmatch",
"execoninit", "execonfile", "execonmatch", "execonwrapup"])
si.run()
# Real start of the script
if __name__ == "__main__":
main(sys.argv[1:])
sys.exit(0)
|
from django import forms
from django.core.validators import MaxValueValidator, MinValueValidator
class GwModelForm(forms.Form):
n = forms.IntegerField(
label="Length of timeseries (n)",
validators=[
MinValueValidator(1)
]
)
hini = forms.FloatField(
label="Initial groundwater level (hini)",
validators=[
MinValueValidator(1)
]
)
rf = forms.FloatField(
label="Recharge factor (recharge/rainfall)",
validators=[
MaxValueValidator(1),
MinValueValidator(0)
]
)
sy = forms.FloatField(
label="Specific yield",
validators=[
MaxValueValidator(1),
MinValueValidator(0)
]
)
hmin = forms.FloatField(
label="Groundwater level at which baseflow ceases",
validators=[
MinValueValidator(0)
]
)
pd = forms.FloatField(
label="Parameter for controlling baseflow",
validators=[
MaxValueValidator(1),
MinValueValidator(0)
]
)
|
#!/usr/bin/env python3
import numpy as np
import pandas as pd
from scipy.stats import gaussian_kde
class Randomizations(object):
def _compute_randomizations(self,
cond,
column_cell_categories,
permutations,
n_controlled_distances=0,
**kwargs):
if n_controlled_distances == 0:
return [np.asarray(np.random.permutation(self.feature_table.loc[cond,column_cell_categories]), dtype=int) for _ in range(permutations)]
else:
nearest_distances = self.NN_distances[cond,1:n_controlled_distances+1]
types = np.unique(self.feature_table.loc[cond, column_cell_categories])
n_types = self.feature_table.loc[cond,:].groupby(column_cell_categories).agg('count').values[:,0]
gkde_types = [
gaussian_kde((nearest_distances[self.feature_table.loc[cond,column_cell_categories].values==i]).T)
for i in types
]
randomizations = np.zeros((permutations, np.sum(cond)), dtype=int)
for index_n, n in enumerate(nearest_distances):
p_vect = np.cumsum([gkde_i.evaluate(n)*n_type_i for gkde_i, n_type_i in zip(gkde_types, n_types)])
p_vect /= p_vect[-1]
r = np.random.rand(permutations)
randomizations[:, index_n] = [types[np.argmax(ri<=p_vect)] for ri in r]
# print(np.sum(randomizations == 1, axis=1), np.sum(randomizations == 2, axis=1))
return list(randomizations) |
import csv
import torch
import dgl
from dgl.data import dgl_dataset
from torch._C import device
f = open('D:\subject\graduate\graph computation\data\offline\\train0.csv', 'r')
reader = csv.reader(f)
M = 79
N = 152
record = []
node_num = 0
# user = []
user = {}
item = {}
features = []
user_features = []
item_features = []
labels = []
labeled = []
_edge = 0
for i in reader:
#record.append(i)
temp = {}
temp['uuid'] = int(i[0])
temp['visit_time'] = int(i[1])
temp['user_id'] = int(i[2])
temp['item_id'] = int(i[3])
features = i[4].split()
features = [float(_) for _ in features]
temp['features'] = features
temp['label'] = int(i[5])
if (temp['label'] != -1):
labeled.append(_edge)
_edge = _edge + 1
record.append(temp)
if not(int(i[2]) in user):
user[int(i[2])] = node_num
node_num = node_num + 1
user_features.append(features[M:])
else:
# detect whether the feature of user is same or not
pass
# print(record[0])
user_idx_begin = 0
item_idx_begin = node_num
# link[0] is user, link[1] is item
link = [[], []]
for i in record:
# print(i['item_id'])
if not(i['item_id'] in item):
item[i['item_id']] = node_num
node_num = node_num + 1
item_features.append(i['features'][:M])
else:
# detect whether the featuser of item is same or not
pass
# print(user[i['user_id']])
# print(item[i['item_id']])
# break
# link.append([user[i['user_id']], item[i['item_id']]])
link[0].append(user[i['user_id']])
link[1].append(item[i['item_id']] - item_idx_begin)
link_tensor = torch.tensor(link)
graph_data = {('user', 'evaluate', 'item'): (link_tensor[0], link_tensor[1]),
('item', 'evaluated', 'user'): (link_tensor[1], link_tensor[0])
}
g = dgl.heterograph(graph_data)
print(g)
g = g.to('cuda:0')
# user_features = user_features.to('cuda:0')
# item_features = item_features.to('cuda:0')
number_user_features = 73
number_item_features = 152 - number_user_features
# print(torch.tensor(user_features))
_min = torch.min(torch.tensor(user_features), dim = 0)
_max = torch.max(torch.tensor(user_features), dim = 0)
# print(_min)
# print(_max)
_dif = _max[0] - _min[0]
_nonzero_idx = torch.nonzero(_dif == 0)
# torch.set_printoptions(precision=8)
# print(_dif)
# print(_nonzero_idx)
_dif[_nonzero_idx] = 1
# print(_dif)
_temp = (torch.tensor(user_features) - _min[0]) / _dif
user_features = _temp.numpy().tolist()
# print((torch.tensor(user_features) - _min[0]) / _dif)
# _max = torch.max(_temp, dim = 0)
# print(_max)
_min = torch.min(torch.tensor(item_features), dim = 0)
_max = torch.max(torch.tensor(item_features), dim = 0)
_dif = _max[0] - _min[0]
_nonzero_idx = torch.nonzero(_dif == 0)
_dif[_nonzero_idx] = 1
_temp = (torch.tensor(item_features) - _min[0]) / _dif
item_features = _temp.numpy().tolist()
for i in range(number_user_features):
pass
for i in record:
features.append(i['features'])
# item_features.append(i['features'][:M])
# user_features.append(i['features'][M:])
labels.append(i['label'])
#print(g)
print(torch.tensor(user_features).size())
print(torch.tensor(item_features).size())
g.nodes['user'].data['features'] = torch.tensor(user_features).to('cuda:0')
g.nodes['item'].data['features'] = torch.tensor(item_features).to('cuda:0')
#g.ndata['features'] = torch.tensor(item_features)
#g.edata['features'] = torch.tensor(labels)
g = g.to('cuda:0')
# user_features = user_features.to(device = torch.device('cuda'))
# item_features = item_features.to(device = torch.device('cuda'))
print(g)
import torch.nn as nn
import dgl.function as fn
'''
class HeteroDotProductPredictor(nn.Module):
def forward(self, graph, h, etype):
# h是从5.1节中对每种类型的边所计算的节点表示
with graph.local_scope():
graph.ndata['h'] = h #一次性为所有节点类型的 'h'赋值
graph.apply_edges(fn.u_dot_v('features', 'features', 'score'), etype=etype)
return graph.edges[etype].data['score']
'''
class MLPPredictor(nn.Module):
def __init__(self, in_features_0, in_features_1, out_classes):
super().__init__()
self.W = nn.Linear(in_features_0 + in_features_1, out_classes)
def apply_edges(self, edges):
# a = input()
# print(edges.src)
# print(edges.dst)
'''
if (edges.src['user_features']):
features_user = edges.src['user_features']
features_item = edges.dst['item_features']
else:
features_user = edges.dst['user_features']
features_item = edges.src['item_features']
'''
features_u = edges.src['user_features']
features_v = edges.dst['item_features']
# print('---')
# print(features_u)
# print(features_v)
# print(features_u.size())
# print(features_v.size())
_features = torch.cat([features_u, features_v], 1)
# print(_features.size())
# print(_features)
score = self.W(torch.cat([features_u, features_v], dim = 1))
return {'score': score}
def forward(self, graph, h):
with graph.local_scope():
graph.nodes['user'].data['user_features'] = h['user']
graph.nodes['item'].data['item_features'] = h['item']
# print('---')
# print(graph)
# print(graph.nodes['user'].data['user_features'])
# print(graph.nodes['item'].data['item_features'])
graph.apply_edges(self.apply_edges)
return graph.edata['score']
'''
class HeteroGraphSAGE(nn.Module):
def __init__(self, mods, aggregate = 'sum'):
super(HeteroGraphSAGE, self).__init__()
self.mods = nn.ModuleDict(mods)
if (isinstance(aggregate, str)):
self.agg_fn = get_aggregate_fn(aggregate)
else:
self.agg_fn = aggregate
def apply_edges(self, edges):
pass
def forward(self, graph, h, etpye):
pass
'''
print(g.etypes)
import torch.nn.functional as F
import dgl.nn as dglnn
class GraphSAGE(nn.Module):
# here h_feats_0 is number_user_features, h_feats_1 is number_item_features
def __init__(self, in_feats, h_feats_0, h_feats_1, rel_names):
super(GraphSAGE, self).__init__()
# self.conv1 = GraphSAGE(h_feats_0, h_feats_0, 'mean')
# self.conv2 = GraphSAGE(h_feats_0, h_feats_0, 'mean')
# self.conv3 = GraphSAGE(h_feats_1, h_feats_1, 'mean')
# self.conv4 = GraphSAGE(h_feats_1, h_feats_1, 'mean')
self.pred = MLPPredictor(h_feats_0, h_feats_1, len(rel_names))
# user
self.conv0 = dglnn.GraphConv(h_feats_0, h_feats_0)
# item
self.conv1 = dglnn.GraphConv(h_feats_1, h_feats_1)
self.CONV0 = dglnn.HeteroGraphConv({'evaluate': self.conv0, 'evaluated': self.conv1}, aggregate = 'sum')
self.CONV1 = dglnn.HeteroGraphConv({'evaluate': self.conv1, 'evaluated': self.conv0}, aggregate = 'sum')
# self.conv2 = dglnn.GraphConv(in_feats, h_feats_0)
# self.conv3 = dglnn.GraphConv(in_feats, h_feats_1)
self.conv2 = torch.nn.Linear(in_feats, h_feats_0)
self.conv3 = torch.nn.Linear(in_feats, h_feats_1)
'''
self.conv1 = nn.HeteroGraphConv({
rel: nn.GraphConv(in_feats, h_feats_0)
for rel in rel_names}, aggregate = 'sum'
)
self.CONV0 = ({self.conv1, self.conv3})
self.CONV1 = ({self.conv2, self.conv4})
'''
def forward(self, g, inputs):
with g.local_scope():
# print(inputs)
# print(inputs['user'].size())
# print(inputs['item'].size())
print('work')
h = self.CONV0(g, inputs)
h = {k: F.relu(v) for k, v in h.items()}
# print(h)
# print(h['user'].size())
# print(h['item'].size())
_cat = {'user': torch.cat([inputs['user'], h['user']], dim = 1), 'item': torch.cat([inputs['item'], h['item']], dim = 1)}
_temp = {}
_temp['user'] = self.conv2(_cat['user'])
_temp['item'] = self.conv3(_cat['item'])
# print(_temp)
# print(_temp['user'].size())
'''
h = self.CONV1(g, h)
print(h)
print(h['user'].size())
print(h['item'].size())
'''
dec_graph = g['user', :, 'item']
# print(dec_graph)
res = self.pred(dec_graph, _temp)
# print(res)
res = torch.softmax(res, dim = 1)
# print(res)
return res
return _temp
model = GraphSAGE(number_user_features + number_item_features, number_user_features, number_item_features, g.etypes)
print(next(model.parameters()).device)
model = model.cuda()
print(next(model.parameters()).device)
user_features = g.nodes['user'].data['features'].to('cuda:0')
item_features = g.nodes['item'].data['features'].to('cuda:0')
# print('----')
# print(user_features.size())
node_features = {'user': user_features, 'item': item_features}
node_features = {key:node_features[key] for key in node_features}
opt = torch.optim.Adam(model.parameters())
epoch = 1000
_labels = [[1 - _, _] for _ in labels]
# print(_labels)
labels_tensor = torch.tensor(_labels)
res = []
# res.to(device = torch.device('cuda:0'))
for i in range(1, epoch + 1):
# model(g, {'user': user_features, 'item': item_features})
res = model(g, node_features)
dif = (res[labeled] - labels_tensor[labeled])
loss = (dif.mul(dif)).mean()
opt.zero_grad()
loss.backward()
opt.step()
if (i % 5 == 0):
print("epoch " + str(i) + "/" + str(epoch) + " : loss = ", str(loss))
_res = res[labeled]
_res = torch.max(_res, dim = 1)
_res = _res[1]
_label = torch.tensor(labels)
_label = _label[labeled]
print(_res)
print(_label)
# _label = labels[labeled]
auc = (_res == _label).sum()
print("auc : " + str(auc.item()) + "/" + str(len(labeled)))
# exit()
# print(res)
# print(labels[0])
# print(labels[1])
# print(labels[2])
# break
print(labels_tensor[labeled])
print(res[labeled])
# h_dict is res
'''
h_user = h_dict['user']
h_item = h_dict['item']
print(h_user)
print()
print(h_item)
''' |
from ursina import *; ssao_shader = Shader(language=Shader.GLSL, fragment='''
#version 140
vec3 sphere[16] = vec3[](
vec3( 0.5381, 0.1856,-0.4319), vec3( 0.1379, 0.2486, 0.4430),
vec3( 0.3371, 0.5679,-0.0057), vec3(-0.6999,-0.0451,-0.0019),
vec3( 0.0689,-0.1598,-0.8547), vec3( 0.0560, 0.0069,-0.1843),
vec3(-0.0146, 0.1402, 0.0762), vec3( 0.0100,-0.1924,-0.0344),
vec3(-0.3577,-0.5301,-0.4358), vec3(-0.3169, 0.1063, 0.0158),
vec3( 0.0103,-0.5869, 0.0046), vec3(-0.0897,-0.4940, 0.3287),
vec3( 0.7119,-0.0154,-0.0918), vec3(-0.0533, 0.0596,-0.5411),
vec3( 0.0352,-0.0631, 0.5460), vec3(-0.4776, 0.2847,-0.0271)
);
uniform sampler2D tex;
uniform sampler2D dtex;
uniform sampler2D random_texture;
uniform mat4 p3d_ViewProjectionMatrix;
in vec2 uv;
out vec4 o_color;
uniform float numsamples;
uniform float radius;
uniform float amount;
uniform float strength;
uniform float falloff;
vec3 get_normal(vec2 texcoords) {
const vec2 offset1 = vec2(0.0, 0.001);
const vec2 offset2 = vec2(0.001, 0.0);
float depth = texture(dtex, texcoords).r;
float depth1 = texture(dtex, texcoords + offset1).r;
float depth2 = texture(dtex, texcoords + offset2).r;
vec3 p1 = vec3(offset1, depth1 - depth);
vec3 p2 = vec3(offset2, depth2 - depth);
vec3 normal = cross(p1, p2);
normal.z = -normal.z;
return normalize(normal);
}
vec3 reconstructPosition(in vec2 uv, in float z)
{
float x = uv.x * 2.0f - 1.0f;
float y = (1.0 - uv.y) * 2.0f - 1.0f;
vec4 position_s = vec4(x, y, z, 1.0f);
mat4x4 view_projection_matrix_inverse = inverse(p3d_ViewProjectionMatrix);
vec4 position_v = view_projection_matrix_inverse * position_s;
return position_v.xyz / position_v.w;
}
void main() {
float depth = texture(dtex, uv).r;
vec3 position = reconstructPosition(uv, depth);
vec3 normal = get_normal(uv);
vec2 noiseScale = vec2(800.0/4.0, 600.0/4.0); // screen = 800x600
vec3 randomVec = texture(random_texture, uv * noiseScale).xyz;
vec3 tangent = normalize(randomVec - normal * dot(randomVec, normal));
vec3 bitangent = cross(normal, tangent);
mat3 TBN = mat3(tangent, bitangent, normal);
vec3 random_vector = normalize((texture(random_texture, uv * 18.0 + depth + normal.xy).xyz * 2.0) - vec3(1.0)).xyz;
float occlusion = 0.0;
float radius = radius / depth;
float depth_difference;
vec3 sample_normal;
vec3 ray;
for(int i = 0; i < numsamples; ++i) {
vec3 random_vector = texture(random_texture, uv * i * 100).xyz;
ray = radius * reflect(sphere[i], random_vector);
sample_normal = get_normal(uv + ray.xy).xzy;
depth_difference = (depth - texture(dtex, uv + ray.xy).r);
occlusion += step(falloff, depth_difference) * (.1 - dot(sample_normal.xyz, normal)) * (1.0 - smoothstep(falloff, strength, depth_difference));
}
o_color.rgb = normal;
o_color.rgb = texture(tex, uv).rgb + (occlusion * -(amount/numsamples));
o_color.a = 1.0;
}
''',
default_input = {
'numsamples' : 16,
'radius' : 0.01, # 0.05 is broken and cool
'amount' : 3.0,
'strength' : 0.001,
'falloff' : 0.000002,
'random_texture' : Func(load_texture, 'noise'),
}
)
if __name__ == '__main__':
from ursina import *
app = Ursina()
e = Entity(model='sphere', color=color.orange)
e = Entity(model='cube', y=-1)
e = Entity(model='plane', scale=100, y=-1)
camera.shader = ssao_shader
#camera.clip_plane_far = 500
camera.clip_plane_near = 1
EditorCamera()
def input(key):
if key == 'space':
if camera.shader:
camera.shader = None
else:
camera.shader = ssao_shader
random.seed(2)
for i in range(20):
e = Entity(model='cube', position=Vec3(random.random(),random.random(),random.random())*3, rotation=Vec3(random.random(),random.random(),random.random())*360)
# e.shader = matcap_shader
app.run()
|
from typing import Any, Dict, Iterable, List, Mapping, Tuple, Union
from collections import OrderedDict
import warnings
from torch import nn
from torch.utils.data import DataLoader, Dataset
from catalyst.core import IExperiment
from catalyst.dl import (
BatchOverfitCallback,
Callback,
CheckpointCallback,
CheckRunCallback,
ConsoleLogger,
ExceptionCallback,
MetricManagerCallback,
TensorboardLogger,
TimerCallback,
utils,
ValidationManagerCallback,
VerboseLogger,
)
from catalyst.dl.utils import check_callback_isinstance
from catalyst.tools import settings
from catalyst.tools.typing import Criterion, Model, Optimizer, Scheduler
class Experiment(IExperiment):
"""
Super-simple one-staged experiment,
you can use to declare experiment in code.
"""
def __init__(
self,
model: Model,
datasets: "OrderedDict[str, Union[Dataset, Dict, Any]]" = None,
loaders: "OrderedDict[str, DataLoader]" = None,
callbacks: "Union[OrderedDict[str, Callback], List[Callback]]" = None,
logdir: str = None,
stage: str = "train",
criterion: Criterion = None,
optimizer: Optimizer = None,
scheduler: Scheduler = None,
trial: Any = None,
num_epochs: int = 1,
valid_loader: str = "valid",
main_metric: str = "loss",
minimize_metric: bool = True,
verbose: bool = False,
check_time: bool = False,
check_run: bool = False,
overfit: bool = False,
stage_kwargs: Dict = None,
checkpoint_data: Dict = None,
distributed_params: Dict = None,
initial_seed: int = 42,
):
"""
Args:
model: model
datasets (OrderedDict[str, Union[Dataset, Dict, Any]]): dictionary
with one or several ``torch.utils.data.Dataset``
for training, validation or inference
used for Loaders automatic creation
preferred way for distributed training setup
loaders (OrderedDict[str, DataLoader]): dictionary
with one or several ``torch.utils.data.DataLoader``
for training, validation or inference
callbacks (Union[List[Callback], OrderedDict[str, Callback]]):
list or dictionary with Catalyst callbacks
logdir: path to output directory
stage: current stage
criterion: criterion function
optimizer: optimizer
scheduler: scheduler
trial : hyperparameters optimization trial.
Used for integrations with Optuna/HyperOpt/Ray.tune.
num_epochs: number of experiment's epochs
valid_loader: loader name used to calculate
the metrics and save the checkpoints. For example,
you can pass `train` and then
the metrics will be taken from `train` loader.
main_metric: the key to the name of the metric
by which the checkpoints will be selected.
minimize_metric: flag to indicate whether
the ``main_metric`` should be minimized.
verbose: if True, it displays the status of the training
to the console.
check_time: if True, computes the execution time
of training process and displays it to the console.
check_run: if True, we run only 3 batches per loader
and 3 epochs per stage to check pipeline correctness
overfit: if True, then takes only one batch per loader
for model overfitting, for advance usage please check
``BatchOverfitCallback``
stage_kwargs: additional stage params
checkpoint_data: additional data to save in checkpoint,
for example: ``class_names``, ``date_of_training``, etc
distributed_params: dictionary with the parameters
for distributed and FP16 method
initial_seed: experiment's initial seed value
"""
assert (
datasets is not None or loaders is not None
), "Please specify the data sources"
self._model = model
self._loaders, self._valid_loader = self._get_loaders(
loaders=loaders,
datasets=datasets,
stage=stage,
valid_loader=valid_loader,
initial_seed=initial_seed,
)
self._callbacks = utils.sort_callbacks_by_order(callbacks)
self._criterion = criterion
self._optimizer = optimizer
self._scheduler = scheduler
self._trial = trial
self._initial_seed = initial_seed
self._logdir = logdir
self._stage = stage
self._num_epochs = num_epochs
self._main_metric = main_metric
self._minimize_metric = minimize_metric
self._verbose = verbose
self._check_time = check_time
self._check_run = check_run
self._overfit = overfit
self._stage_kwargs = stage_kwargs or {}
self._checkpoint_data = checkpoint_data or {}
self._distributed_params = distributed_params or {}
@property
def initial_seed(self) -> int:
"""Experiment's initial seed value."""
return self._initial_seed
@property
def logdir(self):
"""Path to the directory where the experiment logs."""
return self._logdir
@property
def stages(self) -> Iterable[str]:
"""Experiment's stage names (array with one value)."""
return [self._stage]
@property
def hparams(self) -> OrderedDict:
"""Returns hyper parameters"""
hparams = OrderedDict()
if self._optimizer is not None:
optimizer = self._optimizer
hparams["optimizer"] = optimizer.__repr__().split()[0]
params_dict = optimizer.state_dict()["param_groups"][0]
for k, v in params_dict.items():
if k != "params":
hparams[k] = v
loaders = self.get_loaders(self._stage)
for k, v in loaders.items():
if k.startswith("train"):
hparams[f"{k}_batch_size"] = v.batch_size
return hparams
@property
def trial(self) -> Any:
"""
Returns hyperparameter trial for current experiment.
Could be usefull for Optuna/HyperOpt/Ray.tune
hyperparameters optimizers.
Returns:
trial
Example::
>>> experiment.trial
optuna.trial._trial.Trial # Optuna variant
"""
return self._trial
@property
def distributed_params(self) -> Dict:
"""Dict with the parameters for distributed and FP16 method."""
return self._distributed_params
@staticmethod
def _get_loaders(
loaders: "OrderedDict[str, DataLoader]",
datasets: Dict,
stage: str,
valid_loader: str,
initial_seed: int,
) -> "Tuple[OrderedDict[str, DataLoader], str]":
"""Prepares loaders for a given stage."""
if datasets is not None:
loaders = utils.get_loaders_from_params(
initial_seed=initial_seed, **datasets,
)
if not stage.startswith(settings.stage_infer_prefix): # train stage
if len(loaders) == 1:
valid_loader = list(loaders.keys())[0]
warnings.warn(
"Attention, there is only one dataloader - "
+ str(valid_loader)
)
assert valid_loader in loaders, (
"The validation loader must be present "
"in the loaders used during experiment."
)
return loaders, valid_loader
def get_stage_params(self, stage: str) -> Mapping[str, Any]:
"""Returns the state parameters for a given stage."""
default_params = {
"logdir": self.logdir,
"num_epochs": self._num_epochs,
"valid_loader": self._valid_loader,
"main_metric": self._main_metric,
"verbose": self._verbose,
"minimize_metric": self._minimize_metric,
"checkpoint_data": self._checkpoint_data,
}
stage_params = {**default_params, **self._stage_kwargs}
return stage_params
def get_model(self, stage: str) -> Model:
"""Returns the model for a given stage."""
return self._model
def get_criterion(self, stage: str) -> Criterion:
"""Returns the criterion for a given stage."""
return self._criterion
def get_optimizer(self, stage: str, model: nn.Module) -> Optimizer:
"""Returns the optimizer for a given stage."""
return self._optimizer
def get_scheduler(self, stage: str, optimizer=None) -> Scheduler:
"""Returns the scheduler for a given stage."""
return self._scheduler
def get_loaders(
self, stage: str, epoch: int = None,
) -> "OrderedDict[str, DataLoader]":
"""Returns the loaders for a given stage."""
return self._loaders
def get_callbacks(self, stage: str) -> "OrderedDict[str, Callback]":
"""
Returns the callbacks for a given stage.
"""
callbacks = self._callbacks or OrderedDict()
default_callbacks = []
if self._verbose:
default_callbacks.append(("_verbose", VerboseLogger))
if self._check_time:
default_callbacks.append(("_timer", TimerCallback))
if self._check_run:
default_callbacks.append(("_check", CheckRunCallback))
if self._overfit:
default_callbacks.append(("_overfit", BatchOverfitCallback))
if not stage.startswith("infer"):
default_callbacks.append(("_metrics", MetricManagerCallback))
default_callbacks.append(
("_validation", ValidationManagerCallback)
)
default_callbacks.append(("_console", ConsoleLogger))
if self.logdir is not None:
default_callbacks.append(("_saver", CheckpointCallback))
default_callbacks.append(("_tensorboard", TensorboardLogger))
default_callbacks.append(("_exception", ExceptionCallback))
for callback_name, callback_fn in default_callbacks:
is_already_present = any(
check_callback_isinstance(x, callback_fn)
for x in callbacks.values()
)
if not is_already_present:
callbacks[callback_name] = callback_fn()
return callbacks
__all__ = ["Experiment"]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.