content
stringlengths 5
1.05M
|
|---|
import FWCore.ParameterSet.Config as cms
process = cms.Process("Rec")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.load("CondCore.DBCommon.CondDBSetup_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(20) )
#can be 300 in that file
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
#MC relval SINGLE MUON
###'/store/relval/CMSSW_2_1_9/RelValSingleMuPt10/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V9_v2/0000/EA123BEB-9F85-DD11-AB1D-000423D9870C.root'
#MC relval singlemu
#'/store/relval/CMSSW_3_1_0_pre3/RelValSingleMuPt10/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_30X_v1/0001/581655A2-7D0A-DE11-B548-0019DB2F3F9A.root'
#MC relval ttbar
'/store/relval/CMSSW_3_1_0_pre3/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_30X_v1/0001/1EE2ECB6-170A-DE11-BE8C-0016177CA7A0.root',
#'/store/relval/CMSSW_3_1_0_pre3/RelValTTbar/GEN-SIM-RECO/IDEAL_30X_v1/0001/3C8AABDF-FA0A-DE11-80A5-001D09F290BF.root'
)
)
# output module
#
process.load("Configuration.EventContent.EventContentCosmics_cff")
process.FEVT = cms.OutputModule("PoolOutputModule",
process.FEVTEventContent,
dataset = cms.untracked.PSet(dataTier = cms.untracked.string('RECO')),
fileName = cms.untracked.string('promptrecoCosmics.root')
)
process.FEVT.outputCommands.append('keep CSCDetIdCSCALCTDigiMuonDigiCollection_muonCSCDigis_MuonCSCALCTDigi_*')
process.FEVT.outputCommands.append('keep CSCDetIdCSCCLCTDigiMuonDigiCollection_muonCSCDigis_MuonCSCCLCTDigi_*')
process.FEVT.outputCommands.append('keep CSCDetIdCSCComparatorDigiMuonDigiCollection_muonCSCDigis_MuonCSCComparatorDigi_*')
process.FEVT.outputCommands.append('keep CSCDetIdCSCCorrelatedLCTDigiMuonDigiCollection_csctfDigis_*_*')
process.FEVT.outputCommands.append('keep CSCDetIdCSCCorrelatedLCTDigiMuonDigiCollection_muonCSCDigis_MuonCSCCorrelatedLCTDigi_*')
process.FEVT.outputCommands.append('keep CSCDetIdCSCRPCDigiMuonDigiCollection_muonCSCDigis_MuonCSCRPCDigi_*')
process.FEVT.outputCommands.append('keep CSCDetIdCSCStripDigiMuonDigiCollection_muonCSCDigis_MuonCSCStripDigi_*')
process.FEVT.outputCommands.append('keep CSCDetIdCSCWireDigiMuonDigiCollection_muonCSCDigis_MuonCSCWireDigi_*')
process.FEVT.outputCommands.append('keep cscL1TrackCSCDetIdCSCCorrelatedLCTDigiMuonDigiCollectionstdpairs_csctfDigis_*_*')
process.FEVT.outputCommands.append('keep DTChamberIdDTLocalTriggerMuonDigiCollection_muonDTDigis_*_*')
process.FEVT.outputCommands.append('keep DTLayerIdDTDigiMuonDigiCollection_muonDTDigis_*_*')
process.FEVT.outputCommands.append('keep intL1CSCSPStatusDigisstdpair_csctfDigis_*_*')
process.FEVT.outputCommands.append('keep L1MuDTChambPhContainer_dttfDigis_*_*')
process.FEVT.outputCommands.append('keep L1MuDTChambThContainer_dttfDigis_*_*')
process.FEVT.outputCommands.append('keep L1MuDTTrackContainer_dttfDigis_DATA_*')
process.FEVT.outputCommands.append('keep PixelDigiedmDetSetVector_siPixelDigis_*_*')
process.FEVT.outputCommands.append('keep RPCDetIdRPCDigiMuonDigiCollection_muonRPCDigis_*_*')
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string(''),
name = cms.untracked.string(''),
annotation = cms.untracked.string('CRUZET Prompt Reco with DQM with Mag field at 0T')
)
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) ) ## default is false
# Conditions (Global Tag is used here):
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
####process.GlobalTag.connect = "frontier://PromptProd/CMS_COND_21X_GLOBALTAG"
process.GlobalTag.globaltag = "IDEAL_V9::All"
process.GlobalTag.globaltag = "IDEAL_30X::All"
process.es_prefer_GlobalTag = cms.ESPrefer('PoolDBESSource','GlobalTag')
###process.prefer("GlobalTag")
# Magnetic field: force mag field to be 0 tesla
process.load("Configuration.StandardSequences.MagneticField_0T_cff")
#Geometry
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
# Real data raw to digi
##process.load("Configuration.StandardSequences.RawToDigi_Data_cff")
process.load("Configuration.StandardSequences.RawToDigi_cff")
# reconstruction sequence for Cosmics
#process.load("Configuration.StandardSequences.ReconstructionCosmics_cff")
process.load("Configuration.StandardSequences.Reconstruction_cff")
# offline DQM
##process.load("DQMOffline.Configuration.DQMOfflineCosmics_cff")
process.load("DQMOffline.Configuration.DQMOffline_cff")
process.load("DQMServices.Components.MEtoEDMConverter_cff")
#L1 trigger validation
#process.load("L1Trigger.HardwareValidation.L1HardwareValidation_cff")
process.load("L1Trigger.Configuration.L1Config_cff")
process.load("L1TriggerConfig.CSCTFConfigProducers.CSCTFConfigProducer_cfi")
process.load("L1TriggerConfig.CSCTFConfigProducers.L1MuCSCTFConfigurationRcdSrc_cfi")
##new##
process.load("Configuration.StandardSequences.Simulation_cff")
#MC
process.SiPixelTrackResidualSource.TrackCandidateProducer = cms.string('newTrackCandidateMaker')
process.SiPixelTrackResidualSource.trajectoryInput = cms.InputTag('generalTracks')
process.SiPixelTrackResidualSource.saveFile = cms.untracked.bool(True)
process.SiPixelTrackResidualSource.modOn = cms.untracked.bool(True)
process.SiPixelClusterSource.modOn = cms.untracked.bool(True)
#event content analyzer
process.dump = cms.EDAnalyzer('EventContentAnalyzer')
#Paths
##process.allPath = cms.Path( process.RawToDigi_woGCT * process.reconstructionCosmics * process.DQMOfflineCosmics * process.MEtoEDMConverter)
##process.allPath = cms.Path( process.RawToDigi * process.reconstruction * process.DQMOffline * process.MEtoEDMConverter)
#######process.allPath = cms.Path( process.RawToDigi * process.reconstruction * process.DQMOffline)
process.allPath = cms.Path( process.RawToDigi * process.reconstruction * process.SiPixelClusterSource * process.SiPixelTrackResidualSource)
process.outpath = cms.EndPath(process.FEVT)
|
import os
import yaml
import argparse
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--archs', type = str, choices=['TSA'], help = 'our approach')
parser.add_argument('--benchmark', type = str, choices=['FineDiving'], help = 'dataset')
parser.add_argument('--prefix', type = str, default='default', help = 'experiment name')
parser.add_argument('--resume', action='store_true', default=False ,help = 'resume training (interrupted by accident)')
parser.add_argument('--sync_bn', type=bool, default=False)
parser.add_argument('--fix_bn', type=bool, default=True)
parser.add_argument('--test', action='store_true', default=False)
parser.add_argument('--ckpts', type=str, default=None, help='test used ckpt path')
args = parser.parse_args()
if args.test:
if args.ckpts is None:
raise RuntimeError('--ckpts should not be None when --test is activate')
return args
def setup(args):
args.config = '{}_TSA.yaml'.format(args.benchmark)
args.experiment_path = os.path.join('./experiments',args.archs, args.benchmark, args.prefix)
if args.resume:
cfg_path = os.path.join(args.experiment_path,'config.yaml')
if not os.path.exists(cfg_path):
print("Failed to resume")
args.resume = False
setup(args)
return
print('Resume yaml from %s' % cfg_path)
with open(cfg_path) as f:
config = yaml.load(f, Loader=yaml.Loader)
merge_config(config, args)
args.resume = True
else:
config = get_config(args)
merge_config(config, args)
create_experiment_dir(args)
save_experiment_config(args)
def get_config(args):
try:
print('Load config yaml from %s' % args.config)
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
except:
raise NotImplementedError('%s arch is not supported'% args.archs)
return config
def merge_config(config, args):
for k, v in config.items():
setattr(args, k, v)
def create_experiment_dir(args):
try:
os.makedirs(args.experiment_path)
print('Create experiment path successfully at %s' % args.experiment_path)
except:
pass
def save_experiment_config(args):
config_path = os.path.join(args.experiment_path,'config.yaml')
with open(config_path, 'w') as f:
yaml.dump(args.__dict__, f)
print('Save the Config file at %s' % config_path)
|
import pydig
class PhishingTrackerDig:
@staticmethod
def analyzer(record, type='ANY'):
if record is None or len(record) == 0:
return None
type = type.upper()
if type == 'ANY':
types = [ 'A', 'AAAA', 'CNAME', 'MX', 'NS', 'TXT' ]
data = {}
for t in types:
data[t] = PhishingTrackerDig.analyzer(record, t)
return data
try:
response = pydig.query(record, type)
except Exception as e:
response = 'Exception: {}'.format(str(e))
if type == 'TXT':
response = [s.strip('"') for s in response]
return response
|
from typing import Tuple
from pydantic import BaseSettings
class Settings(BaseSettings):
telegram_api_key: str
admin_ids: Tuple[int, ...]
sheet_title: str = 'Курсы по Питону'
class Config:
case_sensitive = False
env_file = 'creds/.env'
env_file_encoding = 'utf-8'
|
# Generated by Django 2.2.12 on 2020-06-04 16:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('EvalData', '0039_auto_20200601_1353'),
]
operations = [
migrations.AddField(
model_name='textsegmentwithtwotargets',
name='contextLeft',
field=models.TextField(blank=True, null=True, verbose_name='Context (left)'),
),
migrations.AddField(
model_name='textsegmentwithtwotargets',
name='contextRight',
field=models.TextField(blank=True, null=True, verbose_name='Context (right)'),
),
]
|
from django.urls import path
from . import views
app_name = 'blog'
urlpatterns = [
path('', views.index, name='index'),
path('cat/<str:category>/', views.index, name='index-specific-cat'),
path('author/<str:author_first_name>-<str:author_last_name>/', views.author_index, name='specific-author'),
path('post/<slug:slug>/', views.post_detail, name='post-detail'),
path('edit/', views.new_post, name='new-post'),
path('edit/<str:Id>/', views.post_editor, name='post-edit'),
path('delete/<str:Id>/', views.delete_post, name='delete-post'),
path('image/upload', views.post_image_upload, name='add-image'),
path('image/delete', views.delete_image, name='delete-image'),
]
|
import torch
from .action_head.action_head import build_roi_action_head
class Combined3dROIHeads(torch.nn.ModuleDict):
def __init__(self, cfg, heads):
super(Combined3dROIHeads, self).__init__(heads)
self.cfg = cfg.clone()
def forward(self, slow_features, fast_features, boxes, objects=None, extras={}, part_forward=-1):
result, loss_action, loss_weight, accuracy_action = self.action(slow_features, fast_features, boxes, objects, extras, part_forward)
return result, loss_action, loss_weight, accuracy_action
def c2_weight_mapping(self):
weight_map = {}
for name, m_child in self.named_children():
if m_child.state_dict() and hasattr(m_child,"c2_weight_mapping"):
child_map = m_child.c2_weight_mapping()
for key, val in child_map.items():
new_key = name + '.' + key
weight_map[new_key] = val
return weight_map
def build_3d_roi_heads(cfg, dim_in):
roi_heads = []
roi_heads.append(("action", build_roi_action_head(cfg, dim_in)))
if roi_heads:
roi_heads = Combined3dROIHeads(cfg, roi_heads)
return roi_heads
|
from . import Subplotter
import numpy as np
def masked(to_mask, mask):
return [item for item, keep in zip(to_mask, mask) if keep]
class SampleSubplotter(Subplotter):
def __init__(self, colors):
super(SampleSubplotter, self).__init__(colors)
self.failed_samples = []
self.unfailed_samples = []
self.failed_colors = []
self.unfailed_colors = []
self.failed_markers = []
self.unfailed_markers = []
def incur_sample(self, state, action, failed, color=None, marker=None):
if color is None:
color = [0.9, 0.3, 0.3]
# States and actions are stored in np arrays of shape (1,) (since we
# are plotting them)
if failed:
marker = marker if marker is not None else 'x'
self.failed_samples.append((state[0], action[0]))
self.failed_colors.append(color)
self.failed_markers.append(marker)
else:
marker = marker if marker is not None else '.'
self.unfailed_samples.append((state[0], action[0]))
self.unfailed_colors.append(color)
self.unfailed_markers.append(marker)
def flush_samples(self):
self.failed_samples = []
self.unfailed_samples = []
self.failed_colors = []
self.unfailed_colors = []
self.failed_markers = []
self.unfailed_markers = []
def ensure_samples_in_at_least_one(self, *datasets):
dataset = np.unique(
np.vstack(datasets),
axis=0
)
def is_in_dataset(to_check):
return [np.isclose(x, dataset).all(axis=1).any() for x in to_check]
failed_in = is_in_dataset(self.failed_samples)
unfailed_in = is_in_dataset(self.unfailed_samples)
def filter_list(to_filter, keep_bools):
return [x for x, keep in zip(to_filter, keep_bools) if keep]
self.failed_samples = filter_list(self.failed_samples, failed_in)
self.unfailed_samples = filter_list(self.unfailed_samples, unfailed_in)
self.failed_colors = filter_list(self.failed_colors, failed_in)
self.unfailed_colors = filter_list(self.unfailed_colors, unfailed_in)
self.failed_markers = filter_list(self.failed_markers, failed_in)
self.unfailed_markers = filter_list(self.unfailed_markers, failed_in)
def draw_on_axs(self, ax_Q):
def scatter_stateactions(stateactions, colors, markers):
markers_set = set(markers)
for marker in markers_set:
fltr = [m == marker for m in markers]
if any(fltr):
states, actions = zip(*masked(stateactions, fltr))
ax_Q.scatter(
actions,
states,
color=masked(colors, fltr),
s=60,
marker=marker,
edgecolors='none'
)
if len(self.failed_samples) > 0:
scatter_stateactions(self.failed_samples, self.failed_colors,
self.failed_markers)
if len(self.unfailed_samples) > 0:
scatter_stateactions(self.unfailed_samples, self.unfailed_colors,
self.unfailed_markers)
|
a = 21.0
b = 10.5
if ( a == b ):
print "Line 1 - a is equal to b"
else:
print "Line 1 - a is not equal to b"
if ( a != b ):
print "Line 2 - a is not equal to b"
else:
print "Line 2 - a is equal to b"
if ( a <> b ):
print "Line 3 - a is not equal to b"
else:
print "Line 3 - a is equal to b"
if ( a < b ):
print "Line 4 - a is less than b"
else:
print "Line 4 - a is not less than b"
if ( a > b ):
print "Line 5 - a is greater than b"
else:
print "Line 5 - a is not greater than b"
a = 5.0;
b = 20.0;
if ( a <= b ):
print "Line 6 - a is either less than or equal to b"
else:
print "Line 6 - a is neither less than nor equal to b"
if ( b >= a ):
print "Line 7 - b is either greater than or equal to b"
else:
print "Line 7 - b is neither greater than nor equal to b"
|
"""
First text analytics script using nltk.
Will load the tokens and then create a dictionary of words with the number of
occurences in the text.
"""
from TokenizeLemmatize import unpickle_tokens
from collections import Counter
def count_names(tokens):
"""Return a counter of the continuous chains of NNPs."""
names = []
name = None
for word, tag in tokens:
if word == '’':
tag = '’'
if name is None and tag.startswith('NNP'):
name = word
elif name is not None and tag.startswith('NNP'):
name = name + ' ' + word
elif name is not None and not tag.startswith('NNP'):
names.append(name)
name = None
return Counter(names)
def filter_names(names, target):
"""Filter out the names which do not contain the target name."""
hits = []
for name, count in names.most_common():
if target in name.split(' '):
hits.append((name, count))
return hits
# main loop
if __name__ == "__main__":
try:
tokens = unpickle_tokens('Texts/NonFreeTexts/InfiniteJest')
except FileNotFoundError:
tokens = unpickle_tokens('Texts/FreeTexts/Hamlet')
# print(count_names(tokens))
print(filter_names(count_names(tokens), 'Hal'))
|
# -*- coding: utf-8 -*-
# @Author: David Hanson
# @Date: 2021-01-31 13:45:32
# @Last Modified by: David Hanson
# @Last Modified time: 2021-01-31 14:09:40
# recursive method:
def find_factorial_recursive(number):
if number <= 1:
return 1
else:
return number * find_factorial_recursive(number-1)
# iterative method
def find_factorial_iterative(number):
num_step = number - 1
while num_step > 0:
number = number * num_step
num_step -= 1
return number
print(find_factorial_recursive(10))
print(find_factorial_iterative(10))
|
from . import parser
from . import watch
def gen_leases(path):
"""
Keep track of currently valid leases for ISC dhcpd.
Yields dictionaries that map ``ip`` to information about the
lease. Will block until new information is available.
"""
g = watch.watch_dhcp_leases(path)
for _ in g:
with file(path) as f:
s = f.read()
leases = {}
for l in parser.parse(s):
assert 'ip' in l
leases[l['ip']] = l
yield leases
|
import http
import io
import argparse
import json
import os
import pathlib
from collections import defaultdict
from typing import Optional, Tuple
import librosa
import rnd_utilities
import aiohttp
from pydub import AudioSegment
from aiogram import Bot, Dispatcher, types, executor
from aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from tacotron2.app.syntesis import defaults
parser = argparse.ArgumentParser()
parser.add_argument(
'--config', type=str, required=True, help='Telegram bot configuration'
)
args = parser.parse_args()
config = rnd_utilities.load_json(pathlib.Path(args.config))
API_TOKEN = config['api_token']
AUTH = aiohttp.BasicAuth(
login=config['user'],
password=config['password']
)
HEADERS = {
"accept": "application/json",
"Content-Type": "application/json"
}
BOT = Bot(token=API_TOKEN)
BOT_FOLDER = config['bot_folder']
DP = Dispatcher(BOT)
VOICES = config['voices']
DEFAULT_VOICE = '__default__'
START_VOICE = config['default_voice']
USER_VOICES = defaultdict(lambda: START_VOICE)
def get_mp3_path(wav_basestring, text, sampling_rate):
hashname = str(hash(text))
io_stream = io.BytesIO(wav_basestring)
audio = librosa.load(io_stream)
path_to_save = os.path.join(BOT_FOLDER, hashname)
librosa.output.write_wav(
path=path_to_save + '.wav',
y=audio[0],
sr=sampling_rate)
sound = AudioSegment.from_wav(path_to_save + '.wav')
sound.export(path_to_save + '.mp3', format='mp3')
return path_to_save + '.mp3'
def _get_voices_keyboard(selected: Optional[str] = None):
keyboard = []
for voice, _ in VOICES.items():
if voice == DEFAULT_VOICE:
continue
text = voice
if voice == selected:
text = f'[ {voice} ]'
btn = InlineKeyboardButton(text=text, callback_data=voice)
keyboard.append([btn])
keyboard = InlineKeyboardMarkup(inline_keyboard=keyboard)
return keyboard
def _get_server_payload_for_user(message):
user_id = str(message.from_user.id)
if user_id in USER_VOICES:
user_voice = USER_VOICES[user_id]
else:
user_voice = START_VOICE
voice_url = VOICES[user_voice]['url']
voice_denoiser_strength = VOICES[user_voice]['denoiser_strength']
inp_dict = {
"utterance": message,
'denoiser_strength': voice_denoiser_strength
}
payload = json.dumps(inp_dict)
return payload, voice_url
async def get_error_response_text(response):
if response.status == http.HTTPStatus.BAD_REQUEST:
response = await response.text()
else:
response = "Undefined error."
return response
async def reply_using_server_response(response, message: types.Message):
status = response.status
text = message.text
user_id = str(message.from_user.id)
if status == http.HTTPStatus.OK:
response = await response.read()
path_to_mp3 = get_mp3_path(
wav_basestring=response,
text=text,
sampling_rate=VOICES[USER_VOICES[user_id]]['sampling_rate']
)
with open(path_to_mp3, 'rb') as audio_file:
await message.answer_voice(audio_file)
else:
error_text = get_error_response_text(response)
await message.answer(error_text)
@DP.message_handler(commands=['start'])
async def send_kb(message: types.Message):
"""This handler will be called when user sends `/start`"""
await message.reply(
f"""Привет. Я озвучу любую отправленную мне фразу на русском языке длиной до {defaults.MAX_UTTERANCE_LENGTH} символов.
\n Чтобы выбрать голос отправь /voices \n Также, если хочешь сам выставить ударения - добавь знак `+` после требуемой гласной буквы."""
)
@DP.message_handler(commands=['voices'])
async def send_kb(message: types.Message):
"""This handler will be called if user sends `/voices`"""
keyboard = _get_voices_keyboard()
await message.reply(
"Выбери голос",
reply_markup=keyboard,
reply=False
)
@DP.callback_query_handler(lambda q: q.data in VOICES.keys())
async def send_kb(callback_query: types.CallbackQuery):
user_id = str(callback_query.from_user.id)
voice = callback_query.data
keyboard = _get_voices_keyboard(selected=voice)
USER_VOICES[user_id] = voice
await callback_query.message.edit_reply_markup(keyboard)
await callback_query.message.reply(
f'<Голос изменен на "{voice}">',
reply=False
)
@DP.message_handler()
async def reply_on_message(message: types.Message):
"""Replies on user message."""
payload, url = _get_server_payload_for_user(message)
async with aiohttp.ClientSession(auth=AUTH) as session:
async with session.post(url, data=payload, headers=HEADERS) as server_response:
await reply_using_server_response(server_response, message)
if __name__ == '__main__':
executor.start_polling(DP, skip_updates=True)
|
import requests
import numpy as np
import cv2
import os
from hashlib import sha1
images_dir = "animals"
name_prefix = "tree"
os.makedirs(images_dir, exist_ok=True)
with open("trees.txt", "rt") as file:
links = file.read().split("\n")
for link in links:
hex = sha1(bytes(link, encoding='utf-8')).hexdigest()
img_name = f"{name_prefix}-{hex}.png"
img_path = os.path.join(images_dir, img_name)
if os.path.isfile(img_path):
print(f"This file exists already: {img_path}")
continue
try:
re = requests.get(link, stream=True)
if re.status_code != 200:
print(f"Invalid, {re.status_code}, {link}")
continue
except Exception as err:
print(f"Error: {err}")
continue
img_raw = re.content
image = np.frombuffer(img_raw, dtype=np.uint8)
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
try:
cv2.imwrite(img_path, image)
print(f"Saved image: {img_path}")
except Exception as err:
print(f"Error, {err}")
print(f"Finished")
|
from django.apps import AppConfig
from django.conf import settings
from django.utils.translation import gettext_lazy as _
class UrlManagerConfig(AppConfig):
name = "djangocms_url_manager"
verbose_name = _("django CMS URL Manager")
url_manager_supported_models = {}
def ready(self):
from .compat import CMS_36
if CMS_36:
from .utils import parse_settings
self.url_manager_supported_models = parse_settings(
settings, "URL_MANAGER_SUPPORTED_MODELS"
)
|
from __future__ import print_function
from builtins import chr
import base64
import pickle
from Crypto import Random
from Crypto.Cipher import AES
from cumulusci.core.config import ConnectedAppOAuthConfig
from cumulusci.core.config import OrgConfig
from cumulusci.core.config import ScratchOrgConfig
from cumulusci.core.config import ServiceConfig
from cumulusci.core.exceptions import ConfigError
from cumulusci.core.keychain import BaseProjectKeychain
BS = 16
pad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS).encode('ascii')
unpad = lambda s: s[0:-ord(s[-1])]
class BaseEncryptedProjectKeychain(BaseProjectKeychain):
""" Base class for building project keychains that use AES encryption for securing stored org credentials """
encrypted = True
def _get_connected_app(self):
if self.app:
return self._decrypt_config(ConnectedAppOAuthConfig, self.app)
def _get_service(self, name):
return self._decrypt_config(ServiceConfig, self.services[name])
def _set_service(self, service, service_config, project):
encrypted = self._encrypt_config(service_config)
self._set_encrypted_service(service, encrypted, project)
def _set_encrypted_service(self, service, encrypted, project):
self.services[service] = encrypted
def _set_org(self, org_config, global_org):
encrypted = self._encrypt_config(org_config)
self._set_encrypted_org(org_config.name, encrypted, global_org)
def _set_encrypted_org(self, name, encrypted, global_org):
self.orgs[name] = encrypted
def _get_org(self, name):
return self._decrypt_config(OrgConfig, self.orgs[name], extra=[name])
def _get_cipher(self, iv=None):
if iv is None:
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return cipher, iv
def _encrypt_config(self, config):
pickled = pickle.dumps(config.config)
pickled = pad(pickled)
#pickled = base64.b64encode(pickled)
cipher, iv = self._get_cipher()
encrypted = base64.b64encode(iv + cipher.encrypt(pickled))
return encrypted
def _decrypt_config(self, config_class, encrypted_config, extra=None):
if not encrypted_config:
if extra:
return config_class(None, *extra)
else:
return config_class()
encrypted_config = base64.b64decode(encrypted_config)
iv = encrypted_config[:16]
cipher, iv = self._get_cipher(iv)
pickled = cipher.decrypt(encrypted_config[16:])
config_dict = pickle.loads(pickled)
args = [config_dict]
if extra:
args += extra
return self._construct_config(config_class, args)
def _construct_config(self, config_class, args):
if args[0].get('scratch'):
config_class = ScratchOrgConfig
return config_class(*args)
def _validate_key(self):
if not self.key:
raise ConfigError('CUMULUSCI_KEY not set')
if len(self.key) != 16:
raise ConfigError('CUMULUSCI_KEY must be 16 characters long')
|
############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
#
# written in python3, (c) 2019-2021 by mworion
# Licence APL2.0
#
###########################################################
# standard libraries
# external packages
from PyQt5.QtGui import QGuiApplication, QCursor
from PyQt5.QtCore import Qt
import numpy as np
import matplotlib.path as mpath
# local import
class HemisphereWindowExt:
"""
the HemisphereWindowExt window class handles
"""
__all__ = ['HemisphereWindowExt',
]
@staticmethod
def markerPoint():
"""
:return: marker
"""
circleB = mpath.Path.unit_circle()
circleS = mpath.Path.unit_circle()
verts = np.concatenate([circleB.vertices, 0.5 * circleS.vertices])
codes = np.concatenate([circleB.codes, circleS.codes])
marker = mpath.Path(verts, codes)
return marker
@staticmethod
def markerAltAz():
"""
:return: marker
"""
circleB = mpath.Path.unit_circle()
circleM = mpath.Path.unit_circle()
circleS = mpath.Path.unit_circle()
circleC = mpath.Path.unit_circle()
verts = np.concatenate([circleB.vertices,
0.8 * circleM.vertices,
0.15 * circleS.vertices,
0.1 * circleC.vertices])
codes = np.concatenate([circleB.codes,
circleM.codes,
circleS.codes,
circleC.codes])
marker = mpath.Path(verts, codes)
return marker
@staticmethod
def markerStar():
"""
:return: marker
"""
star = mpath.Path.unit_regular_star(8)
verts = np.concatenate([star.vertices])
codes = np.concatenate([star.codes])
marker = mpath.Path(verts, codes)
return marker
def setOperationMode(self):
"""
setOperationMode changes the operation mode of the hemisphere window(s)
depending on the choice, colors and styles will be changed.
:return: success
"""
if self.ui.checkEditNone.isChecked():
self.operationMode = 'normal'
self.ui.addPositionToHorizon.setEnabled(False)
elif self.ui.checkEditBuildPoints.isChecked():
self.operationMode = 'build'
self.ui.addPositionToHorizon.setEnabled(False)
elif self.ui.checkEditHorizonMask.isChecked():
self.operationMode = 'horizon'
self.ui.addPositionToHorizon.setEnabled(True)
elif self.ui.checkPolarAlignment.isChecked():
self.ui.checkShowAlignStar.setChecked(True)
self.operationMode = 'star'
self.ui.addPositionToHorizon.setEnabled(False)
self.drawHemisphere()
return True
def enableEditPoints(self, status):
"""
:param status:
:return:
"""
self.ui.operationMode.setEnabled(status)
hem = self.hemisphereMat.figure.canvas
if status:
self.hemMouse = hem.mpl_connect('button_press_event',
self.onMouseDispatcher)
else:
hem.mpl_disconnect(self.hemMouse)
return True
def showMouseCoordinates(self, event):
"""
:param event:
:return: success
"""
if not event.inaxes:
xText = '-'
yText = '-'
QGuiApplication.setOverrideCursor(QCursor(Qt.ArrowCursor))
else:
xText = f'{event.xdata:3.1f}'
yText = f'{event.ydata:3.1f}'
QGuiApplication.setOverrideCursor(QCursor(Qt.CrossCursor))
self.ui.altitude.setText(yText)
self.ui.azimuth.setText(xText)
return True
def slewSelectedTarget(self, slewType='normal'):
"""
:param slewType:
:return: success
"""
azimuthT = self.app.mount.obsSite.AzTarget.degrees
altitudeT = self.app.mount.obsSite.AltTarget.degrees
if self.app.deviceStat['dome']:
self.app.dome.avoidFirstOvershoot()
delta = self.app.dome.slewDome(altitude=altitudeT,
azimuth=azimuthT)
geoStat = 'Geometry corrected' if delta else 'Equal mount'
t = f'Slewing dome: [{geoStat}],'
t += f' AZ:[{azimuthT:3.1f}] delta: [{delta:3.1f}]'
self.app.message.emit(t, 0)
suc = self.app.mount.obsSite.startSlewing(slewType=slewType)
if suc:
t = f'Slewing mount to AZ:[{azimuthT:3.1f}], ALT:[{altitudeT:3.1f}]'
self.app.message.emit(t, 0)
else:
t = f'Cannot slew to AZ:[{azimuthT:3.1f}], ALT:[{altitudeT:3.1f}]'
self.app.message.emit(t, 2)
return suc
def onMouseNormal(self, event):
"""
onMouseNormal handles the mouse event in normal mode. this means only
a double click is possible and offers the opportunity to slew the
telescope to a certain position in sky selected by the mouse.
:param event: mouse events
:return: success
"""
if not event.inaxes:
return False
if event.button != 1 or not event.dblclick:
return False
azimuth = int(event.xdata + 0.5)
altitude = int(event.ydata + 0.5)
question = '<b>Manual slewing to coordinate</b>'
question += '<br><br>Selected coordinates are:<br>'
question += f'<font color={self.M_BLUE}> Altitude: {altitude:3.1f}°'
question += f' Azimuth: {azimuth:3.1f}°</font>'
question += '<br><br>Would you like to start slewing?<br>'
suc = self.messageDialog(self, 'Slewing mount', question)
if not suc:
return False
suc = self.app.mount.obsSite.setTargetAltAz(alt_degrees=altitude,
az_degrees=azimuth)
if not suc:
t = f'Cannot slew to AZ:[{azimuth:3.1f}], ALT:[{altitude:3.1f}]'
self.app.message.emit(t, 2)
return False
t = f'Slewing mount to AZ:[{azimuth:3.1f}], ALT:[{altitude:3.1f}]'
self.app.message.emit(t, 0)
suc = self.slewSelectedTarget(slewType='keep')
return suc
def addHorizonPointManual(self):
"""
:return:
"""
data = self.app.data
alt = self.app.mount.obsSite.Alt
az = self.app.mount.obsSite.Az
if alt is None or az is None:
return False
index = self.getIndexPointX(x=az.degrees, plane=data.horizonP)
if index is None and data.horizonP:
return False
suc = data.addHorizonP(value=(alt.degrees, az.degrees), position=index)
if suc:
self.drawHemisphere()
return suc
def addHorizonPoint(self, data=None, event=None):
"""
addHorizonPoint calculates from the position of the left mouse click the
position where the next horizon point should be added. the coordinates
are given from mouse click itself.
:param data: point in tuples (alt, az)
:param event: mouse event
:return:
"""
index = self.getIndexPointX(x=event.xdata, plane=data.horizonP)
if index is None and data.horizonP:
return False
suc = data.addHorizonP(value=(event.ydata, event.xdata),
position=index)
return suc
def deleteHorizonPoint(self, data=None, event=None):
"""
deleteHorizonPoint selects the next horizon point in distance max and
tries to delete it. there have to be at least 2 horizon point left.
:param data: point in tuples (alt, az)
:param event: mouse event
:return: success
"""
index = self.getIndexPoint(event=event, plane=data.horizonP)
if index is None:
return False
suc = False
if len(data.horizonP) > 0:
suc = data.delHorizonP(position=index)
return suc
def editHorizonMask(self, data=None, event=None):
"""
editHorizonMask does dispatching the different mouse clicks for adding
or deleting horizon mask points and call the function accordingly.
:param data: point in tuples (alt, az)
:param event: mouse event
:return: success
"""
if event.button == 1:
suc = self.addHorizonPoint(data=data, event=event)
elif event.button == 3:
suc = self.deleteHorizonPoint(data=data, event=event)
else:
return False
if data.horizonP is None:
return False
self.drawHemisphere()
return suc
def addBuildPoint(self, data=None, event=None):
"""
addBuildPoint calculates from the position of the left mouse click the
position where the next modeldata point should be added. the coordinates
are given from mouse click itself.
:param data: point in tuples (alt, az)
:param event: mouse event
:return:
"""
self.app.buildPointsChanged.emit()
index = self.getIndexPoint(event=event, plane=data.buildP, epsilon=360)
if index is None:
return False
index += 1
suc = data.addBuildP(value=(event.ydata, event.xdata, True),
position=index)
if not suc:
return False
return True
def deleteBuildPoint(self, data=None, event=None):
"""
deleteBuildPoint selects the next modeldata point in distance max and
tries to delete it. there have to be at least 2 horizon point left.
:param data: point in tuples (alt, az)
:param event: mouse event
:return: success
"""
self.app.buildPointsChanged.emit()
index = self.getIndexPoint(event=event, plane=data.buildP)
if index is None:
return False
suc = data.delBuildP(position=index)
return suc
def editBuildPoints(self, data=None, event=None):
"""
editBuildPoints does dispatching the different mouse clicks for adding
or deleting model data points and call the function accordingly.
:param data: points in tuples (alt, az)
:param event: mouse event
:return: success
"""
if event.button == 1:
suc = self.addBuildPoint(data=data, event=event)
elif event.button == 3:
suc = self.deleteBuildPoint(data=data, event=event)
else:
return False
self.drawHemisphere()
return suc
def onMouseEdit(self, event):
"""
onMouseEdit handles the mouse event in normal mode. this means depending
on the edit mode (horizon or model points) a left click adds a new point
and right click deletes the selected point.
:param event: mouse events
:return: success
"""
data = self.app.data
if not event.inaxes:
return False
if event.dblclick:
return False
if self.ui.checkEditHorizonMask.isChecked():
suc = self.editHorizonMask(event=event, data=data)
elif self.ui.checkEditBuildPoints.isChecked():
suc = self.editBuildPoints(event=event, data=data)
else:
return False
return suc
def onMouseStar(self, event):
"""
onMouseStar handles the mouse event in polar align mode. this means
only a right click is possible and offers the opportunity to slew the
telescope to the selected star and start manual polar alignment.
:param event: mouse events
:return: success
"""
if not event.inaxes:
return False
if not self.app.mount.model.numberStars:
self.app.message.emit('No model for alignment present!', 2)
return False
if event.button == 1 and not event.dblclick:
alignType = 'polar'
elif event.button == 3 and not event.dblclick:
alignType = 'ortho'
else:
return False
hip = self.app.hipparcos
plane = list(zip(hip.alt, hip.az))
index = self.getIndexPoint(event=event, plane=plane, epsilon=2)
if index is None:
return False
name = hip.name[index]
ra, dec = hip.getAlignStarRaDecFromName(hip.name[index])
question = '<b>Polar / Ortho Alignment procedure</b>'
question += '<br><br>Selected alignment type: '
question += f'<font color={self.M_BLUE}>{alignType}.</font>'
question += '<br>Selected alignment star: '
question += f'<font color={self.M_BLUE}>{name}.</font>'
question += '<br>Would you like to start alignment?<br>'
isDAT = self.app.mount.setting.statusDualAxisTracking
warning = f'<br><i><font color={self.M_YELLOW}>'
warning += 'Dual Axis Tracking is actually enabled!<br>'
warning += 'It should be off during alignment process.</font></i>'
question = question + warning if isDAT else question
suc = self.messageDialog(self, 'Slewing mount', question)
if not suc:
return False
suc = self.app.mount.obsSite.setTargetRaDec(ra_hours=ra,
dec_degrees=dec)
if not suc:
self.app.message.emit(f'Cannot slew to: [{name}]', 2)
return False
self.app.message.emit(f'Align [{alignType}] to: [{name}]', 1)
suc = self.slewSelectedTarget(slewType=alignType)
return suc
def onMouseDispatcher(self, event):
"""
onMouseDispatcher dispatches the button events depending on the actual
operation mode.
:param event: button event for parsing
:return: True for test purpose
"""
if self.ui.checkEditNone.isChecked():
self.onMouseNormal(event)
elif self.ui.checkEditBuildPoints.isChecked():
self.onMouseEdit(event)
elif self.ui.checkEditHorizonMask.isChecked():
self.onMouseEdit(event)
elif self.ui.checkPolarAlignment.isChecked():
self.onMouseStar(event)
return True
|
from .controller_util import get_triggering_dags
|
# Copyright 2021 Rafał Safin (rafsaf). All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from django.contrib.auth.decorators import login_required
from django.http import HttpRequest, HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.http import require_POST
from base import forms, models
from base.models.profile import Profile
@login_required
def new_outline_create(request: HttpRequest) -> HttpResponse:
"""creates new user's outline login required"""
profile: Profile = models.Profile.objects.select_related().get(user=request.user)
form1 = forms.OutlineForm(None)
form2 = forms.ChangeServerForm(None)
form1.fields["world"].choices = [
(f"{world.pk}", f"{world.human()}")
for world in models.World.objects.filter(server=profile.server).order_by(
"postfix"
)
]
if request.method == "POST":
if "form1" in request.POST:
form1 = forms.OutlineForm(request.POST)
form1.fields["world"].choices = [
(f"{world.pk}", world.human())
for world in models.World.objects.filter(
server=profile.server
).order_by("postfix")
]
if form1.is_valid():
world = request.POST["world"]
world_instance = get_object_or_404(models.World, pk=int(world))
new_instance = models.Outline(
owner=request.user,
date=request.POST["date"],
name=request.POST["name"],
world=world_instance,
morale_on=profile.default_morale_on,
)
new_instance.save()
new_instance.refresh_from_db()
result = models.Result(outline=new_instance)
result.save()
new_instance.create_stats()
return redirect("base:planer_create_select", new_instance.pk)
if "form2" in request.POST:
form2 = forms.ChangeServerForm(request.POST)
if form2.is_valid():
new_server = request.POST.get("server")
new_server = get_object_or_404(models.Server, dns=new_server)
profile: models.Profile = models.Profile.objects.get(user=request.user)
profile.server = new_server
profile.server_bind = True
profile.save()
return redirect("base:planer_create")
context = {"form1": form1, "profile": profile, "form2": form2}
return render(request, "base/new_outline/new_outline_create.html", context)
@login_required
def new_outline_create_select(request: HttpRequest, _id: int) -> HttpResponse:
"""select user's ally and enemy tribe after creating outline, login required"""
instance = get_object_or_404(
models.Outline, pk=_id, owner=request.user, editable="active"
)
ally_tribe: list[models.Tribe] = [
tribe
for tribe in models.Tribe.objects.filter(
world=instance.world, tag__in=instance.ally_tribe_tag
)
]
enemy_tribe: list[models.Tribe] = [
tribe
for tribe in models.Tribe.objects.filter(
world=instance.world, tag__in=instance.enemy_tribe_tag
)
]
banned_tribe_id = [tribe.pk for tribe in ally_tribe + enemy_tribe]
choices = [("banned", "--------")] + [ # type: ignore
(f"{tribe.tag}", f"{tribe.tag}")
for tribe in models.Tribe.objects.filter(world=instance.world).exclude(
pk__in=banned_tribe_id
)
]
if request.method == "POST":
if "tribe1" in request.POST:
form1 = forms.MyTribeTagForm(request.POST)
form1.fields["tribe1"].choices = choices
form2 = forms.EnemyTribeTagForm()
form2.fields["tribe2"].choices = choices
if form1.is_valid():
tribe = request.POST["tribe1"]
instance.ally_tribe_tag.append(tribe)
instance.save()
return redirect("base:planer_create_select", _id)
elif "tribe2" in request.POST:
form1 = forms.MyTribeTagForm()
form1.fields["tribe1"].choices = choices
form2 = forms.EnemyTribeTagForm(request.POST)
form2.fields["tribe2"].choices = choices
if form2.is_valid():
tribe = request.POST["tribe2"]
instance.enemy_tribe_tag.append(tribe)
instance.save()
return redirect("base:planer_create_select", _id)
else:
form1 = forms.MyTribeTagForm()
form1.fields["tribe1"].choices = choices
form2 = forms.EnemyTribeTagForm()
form2.fields["tribe2"].choices = choices
else:
form1 = forms.MyTribeTagForm()
form1.fields["tribe1"].choices = choices
form2 = forms.EnemyTribeTagForm()
form2.fields["tribe2"].choices = choices
context = {
"instance": instance,
"form1": form1,
"form2": form2,
"ally": ally_tribe,
"enemy": enemy_tribe,
}
return render(request, "base/new_outline/new_outline_create_select.html", context)
@require_POST
@login_required
def outline_delete_ally_tags(request: HttpRequest, _id: int) -> HttpResponse:
"""Delete ally tribe tags from outline"""
instance = get_object_or_404(models.Outline, pk=_id, owner=request.user)
instance.ally_tribe_tag = list()
instance.save()
return redirect("base:planer_create_select", _id)
@require_POST
@login_required
def outline_delete_enemy_tags(request: HttpRequest, _id: int) -> HttpResponse:
"""Delete enemy tribe tags from outline"""
instance = get_object_or_404(models.Outline, pk=_id, owner=request.user)
instance.enemy_tribe_tag = list()
instance.save()
return redirect("base:planer_create_select", _id)
@require_POST
@login_required
def outline_disable_editable(request: HttpRequest, _id: int) -> HttpResponse:
"""Outline not editable after chosing tags"""
instance = get_object_or_404(models.Outline, pk=_id, owner=request.user)
instance.editable = "inactive"
instance.save()
return redirect("base:planer")
|
import random
import evaluation
from evaluation import PTBTokenizer, Cider
from data.tokenizers import Tokenizer
from data.medicalDataloaders import R2DataLoader
from models.visual_extractor import VisualExtractor
from models.rstnet import Transformer, TransformerEncoder, TransformerDecoderLayer, ScaledDotProductAttention
import torch
from torch.optim import Adam
from torch.optim.lr_scheduler import LambdaLR
from torch.nn import NLLLoss
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
import argparse
import os
import pickle
import numpy as np
import itertools
import multiprocessing
from shutil import copyfile
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
def evaluate_loss(model, visual_extractor, dataloader, loss_fn, tokenizer, device):
# Validation loss
model.eval()
running_loss = .0
with tqdm(desc='Epoch %d - validation' % e, unit='it', total=len(dataloader)) as pbar:
with torch.no_grad():
for it, (images_id, images, captions, reports_masks) in enumerate(dataloader):
images, captions, reports_masks = images.to(device), captions.to(device), reports_masks.to(device)
features = visual_extractor(images)
out = model(features, captions)
captions = captions[:, 1:].contiguous()
out = out[:, :-1].contiguous()
loss = loss_fn(out.view(-1, tokenizer.get_vocab_size()), captions.view(-1))
this_loss = loss.item()
running_loss += this_loss
pbar.set_postfix(loss=running_loss / (it + 1))
pbar.update()
val_loss = running_loss / len(dataloader)
return val_loss
def evaluate_metrics(model, visual_extractor, dataloader, tokenizer, args, device):
import itertools
model.eval()
gen = {}
gts = {}
with tqdm(desc='Epoch %d - evaluation' % e, unit='it', total=len(dataloader)) as pbar:
for it, (images_id, images, captions, reports_masks) in enumerate(dataloader):
images, captions, reports_masks = images.to(device), captions.to(device), reports_masks.to(device)
with torch.no_grad():
features = visual_extractor(images)
out, _ = model.beam_search(features, args.max_seq_length, tokenizer.token2idx['<eos>'], args.beam_size, out_size=1)
caps_gen = tokenizer.decode_batch(out[..., :-1])
caps_gt = tokenizer.decode_batch(captions[..., 1:])
for i, (gts_i, gen_i) in enumerate(zip(caps_gt, caps_gen)):
# gen_i = ' '.join([k for k, g in itertools.groupby(gen_i)])
gen['%d_%d' % (it, i)] = [gen_i, ]
# gen['%d_%d' % (it, i)] = [gen_i]
# gts['%d_%d' % (it, i)] = gts_i
gts['%d_%d' % (it, i)] = [gts_i, ]
pbar.update()
print(gen)
gts = evaluation.PTBTokenizer.tokenize(gts)
gen = evaluation.PTBTokenizer.tokenize(gen)
scores, _ = evaluation.compute_scores(gts, gen)
return scores
def train_xe(model, visual_extractor, dataloader, tokenizer, optim, device):
# Training with cross-entropy
model.train()
scheduler.step()
print('lr = ', optim.state_dict()['param_groups'][0]['lr'])
running_loss = .0
sum_eos = 0
with tqdm(desc='Epoch %d - train' % e, unit='it', total=len(dataloader)) as pbar:
for it, (images_id, images, captions, reports_masks) in enumerate(dataloader_train):
images, captions, reports_masks = images.to(device), captions.to(device), reports_masks.to(device)
features = visual_extractor(images)
out = model(features, captions)
optim.zero_grad()
# print('captions', (captions == tokenizer.token2idx['<eos>']).sum())
captions_gt = captions[:, 1:].contiguous()
out = out[:, :-1].contiguous()
sum_eos += int((captions == tokenizer.token2idx['<eos>']).sum())
loss = loss_fn(out.view(-1, tokenizer.get_vocab_size()), captions_gt.view(-1))
loss.backward()
optim.step()
this_loss = loss.item()
running_loss += this_loss
pbar.set_postfix(loss=running_loss / (it + 1))
pbar.update()
# scheduler.step()
loss = running_loss / len(dataloader)
# scheduler.step()
return loss, sum_eos
if __name__ == '__main__':
device = torch.device('cuda')
parser = argparse.ArgumentParser(description='Transformer')
parser.add_argument('--exp_name', type=str, default='rstnet')
parser.add_argument('--batch_size', type=int, default=12)
parser.add_argument('--workers', type=int, default=4)
parser.add_argument('--m', type=int, default=40)
parser.add_argument('--head', type=int, default=8)
parser.add_argument('--resume_last', action='store_true')
parser.add_argument('--resume_best', action='store_true')
parser.add_argument('--threshold', type=int, default=3, help='the cut off frequency for the words.')
parser.add_argument('--dataset_name', type=str, default='iu_xray', choices=['iu_xray', 'mimic_cxr'],
help='the dataset to be used.')
parser.add_argument('--logs_folder', type=str, default='language_tensorboard_logs')
parser.add_argument('--ann_path', type=str, default='../ReportGen/data/iu_xray/annotation.json',
help='the path to the directory containing the data.')
parser.add_argument('--image_dir', type=str, default='../ReportGen/data/iu_xray/images/',
help='the path to the directory containing the data.')
# parser.add_argument('--ann_path', type=str, default='../datasets/mimic_cxr/annotation.json',
# help='the path to the directory containing the data.')
# parser.add_argument('--image_dir', type=str, default='../datasets/mimic_cxr/images/',
# help='the path to the directory containing the data.')
parser.add_argument('--max_seq_length', type=int, default=60, help='the maximum sequence length of the reports.')
parser.add_argument('--seed', type=int, default=9233, help='.')
parser.add_argument('--num_workers', type=int, default=2, help='the number of workers for dataloader.')
parser.add_argument('--visual_extractor', type=str, default='resnet101', help='the visual extractor to be used.')
parser.add_argument('--visual_extractor_pretrained', type=bool, default=True, help='whether to load the pretrained visual extractor')
parser.add_argument('--lr_model', type=float, default=0.5, help='the learning rate for the transformer.')
parser.add_argument('--lr_ve', type=float, default=0.25, help='the learning rate for the visual extractor.')
parser.add_argument('--lr_bert', type=float, default=1, help='the learning rate for BERT.')
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--beam_size', type=int, default=3, help='the beam size when beam searching.')
parser.add_argument('--patience', type=int, default=100)
parser.add_argument('--xe_base_lr', type=float, default=0.0001)
args = parser.parse_args()
print(args)
print('Transformer Training')
writer = SummaryWriter(log_dir=os.path.join(args.logs_folder, args.exp_name))
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(args.seed)
# Create the dataset
tokenizer = Tokenizer(args)
dataloader_train = R2DataLoader(args, tokenizer, split='train', shuffle=True)
dataloader_val = R2DataLoader(args, tokenizer, split='val', shuffle=False)
dataloader_test = R2DataLoader(args, tokenizer, split='test', shuffle=False)
# Model and dataloaders
encoder = TransformerEncoder(3, tokenizer.token2idx['<pad>'], attention_module=ScaledDotProductAttention, attention_module_kwargs={'m': args.m})
decoder = TransformerDecoderLayer(tokenizer.get_vocab_size(), args.max_seq_length - 1, 3, tokenizer.token2idx['<pad>'])
ve = VisualExtractor(args).to(device)
model = Transformer(tokenizer.token2idx['<bos>'], encoder, decoder).to(device)
# ref_caps_train = list(train_dataset.text)
# cider_train = Cider(PTBTokenizer.tokenize(ref_caps_train))
def lambda_lr(s):
print("s:", s)
if s <= 3:
lr = args.xe_base_lr * s / 4
elif s <= 10:
lr = args.xe_base_lr
elif s <= 12:
lr = args.xe_base_lr * 0.2
else:
lr = args.xe_base_lr * 0.2 * 0.2
return lr
# Initial conditions
bert_params = list(map(id, model.decoder.language_model.parameters()))
ed_params = filter(lambda x: id(x) not in bert_params, model.parameters())
optim = Adam([
{'params': model.decoder.language_model.parameters(), 'lr': args.lr_bert},
{'params': ed_params, 'lr': args.lr_model, 'betas': (0.9, 0.98)},
{'params': ve.parameters(), 'lr': args.lr_ve}
])
scheduler = LambdaLR(optim, lambda_lr)
loss_fn = NLLLoss(ignore_index=tokenizer.token2idx['<pad>'])
# best_cider = .0
# best_test_cider = 0.
use_rl = False
best_bleu4 = .0
best_test_bleu4 = 0.
patience = 0
start_epoch = 0
if args.resume_last or args.resume_best:
if args.resume_last:
fname = 'saved_transformer_models/%s_last.pth' % args.exp_name
else:
fname = 'saved_transformer_models/%s_best.pth' % args.exp_name
if os.path.exists(fname):
data = torch.load(fname)
torch.set_rng_state(data['torch_rng_state'])
torch.cuda.set_rng_state(data['cuda_rng_state'])
np.random.set_state(data['numpy_rng_state'])
random.setstate(data['random_rng_state'])
model.load_state_dict(data['tr_state_dict'])
ve.load_state_dict(data['ve_state_dict'])
"""
optim.load_state_dict(data['optimizer'])
scheduler.load_state_dict(data['scheduler'])
"""
start_epoch = data['epoch'] + 1
best_bleu4 = data['best_bleu4']
best_test_bleu4 = data['best_test_bleu4']
patience = data['patience']
optim.load_state_dict(data['optimizer'])
scheduler.load_state_dict(data['scheduler'])
print('Resuming from epoch %d, validation loss %f, best bleu4 %f, and best_test_bleu4 %f' % (
data['epoch'], data['val_loss'], data['best_bleu4'], data['best_test_bleu4']))
print('patience:', data['patience'])
print("Training starts")
for e in range(start_epoch, start_epoch + args.epochs):
train_loss, sum_eos = train_xe(model, ve, dataloader_train, tokenizer, optim, device)
writer.add_scalar('data/train_loss', train_loss, e)
# Validation loss
val_loss = evaluate_loss(model, ve, dataloader_val, loss_fn, tokenizer, device)
writer.add_scalar('data/val_loss', val_loss, e)
# Validation scores
scores = evaluate_metrics(model, ve, dataloader_val, tokenizer, args, device)
print("Validation scores", scores)
val_bleu4 = scores['BLEU'][3]
writer.add_scalar('data/val_cider', scores['CIDEr'], e)
writer.add_scalar('data/val_bleu1', scores['BLEU'][0], e)
writer.add_scalar('data/val_bleu2', scores['BLEU'][1], e)
writer.add_scalar('data/val_bleu3', scores['BLEU'][2], e)
writer.add_scalar('data/val_bleu4', scores['BLEU'][3], e)
writer.add_scalar('data/val_meteor', scores['METEOR'], e)
writer.add_scalar('data/val_rouge', scores['ROUGE'], e)
# Test scores
scores = evaluate_metrics(model, ve, dataloader_test, tokenizer, args, device)
print("Test scores", scores)
test_bleu4 = scores['BLEU'][3]
writer.add_scalar('data/test_cider', scores['CIDEr'], e)
writer.add_scalar('data/test_bleu1', scores['BLEU'][0], e)
writer.add_scalar('data/test_bleu2', scores['BLEU'][1], e)
writer.add_scalar('data/test_bleu3', scores['BLEU'][2], e)
writer.add_scalar('data/test_bleu4', scores['BLEU'][3], e)
writer.add_scalar('data/test_meteor', scores['METEOR'], e)
writer.add_scalar('data/test_rouge', scores['ROUGE'], e)
# Prepare for next epoch
best = False
if val_bleu4 >= best_bleu4:
best_bleu4 = val_bleu4
patience = 0
best = True
else:
patience += 1
best_test = False
if test_bleu4 >= best_test_bleu4:
best_test_bleu4 = test_bleu4
best_test = True
exit_train = False
if patience == args.patience:
print('patience reached.')
exit_train = True
torch.save({
'torch_rng_state': torch.get_rng_state(),
'cuda_rng_state': torch.cuda.get_rng_state(),
'numpy_rng_state': np.random.get_state(),
'random_rng_state': random.getstate(),
'epoch': e,
'val_loss': val_loss,
'val_bleu4': val_bleu4,
'tr_state_dict': model.state_dict(),
've_state_dict': ve.state_dict(),
'optimizer': optim.state_dict() if not use_rl else optim_rl.state_dict(),
'scheduler': scheduler.state_dict() if not use_rl else scheduler_rl.state_dict(),
'patience': patience,
'best_bleu4': best_bleu4,
'best_test_bleu4': best_test_bleu4,
'use_rl': use_rl,
}, 'saved_transformer_models/%s_last.pth' % args.exp_name)
if best:
copyfile('saved_transformer_models/%s_last.pth' % args.exp_name, 'saved_transformer_models/%s_best.pth' % args.exp_name)
if best_test:
copyfile('saved_transformer_models/%s_last.pth' % args.exp_name, 'saved_transformer_models/%s_best_test.pth' % args.exp_name)
if exit_train:
writer.close()
break
|
import pickle
import nltk
import spacy
import benepar
import torch
import json
import os
import subprocess
import numpy as np
from tempfile import TemporaryDirectory
from fairseq.models.bart import BARTModel
from nltk import sent_tokenize,word_tokenize
from nltk.corpus import stopwords
from nltk.tree import Tree
from nltk.tree import ParentedTree
from benepar.spacy_plugin import BeneparComponent
from collections import defaultdict, Counter
import time
from transformers.pipelines import pipeline
import warnings
warnings.filterwarnings("ignore")
#from bert_score import BERTScorer
class FEQA(object):
def __init__(self, device='cpu', qa_model_name = "deepset/minilm-uncased-squad2", qg_model_dir='../feqa/bart_qg/checkpoints/'):
self.qg_model = BARTModel.from_pretrained(
qg_model_dir,
checkpoint_file = 'checkpoint_best.pt'
)
if device=='cuda':
self.qg_model.to(device) #.cuda()
self.qg_model.half()
self.qg_model.eval()
self.batch_size = 1#64
self.beam_size = 10
self.max_length = 100
self.nlp = spacy.load('en_core_web_sm')
#self.parser = benepar.Parser("benepar_en2")
self.stop_words = set(stopwords.words('english'))
self.qa_threshold = 0.1 # below threshold, the question quality is too vague
self.qa_pipeline = pipeline('question-answering', model=qa_model_name, tokenizer=qa_model_name)
# self.bertscorer = BERTScorer(lang="en") #, rescale_with_baseline=True)
def _get_entities(self, output_summary):
entities = [X.text for X in self.nlp(output_summary).ents]
return entities
# def _get_masked_phrases(self, output_summary, phrase_types=["NP"]):
# masked_phrases = []
# parse_tree = self.parser.parse(output_summary)
# for subtree in parse_tree.subtrees():
# phrases_list = [(subtree_.leaves(), subtree_.label()) for subtree_ in subtree if type(subtree_) == Tree and subtree_.label() in phrase_types]
# for phrase_tuple in phrases_list:
# phrase = phrase_tuple[0]
# phrase_type = phrase_tuple[1]
# phrase_text = " ".join(phrase)
# if len(phrase) > 0 and phrase_text not in self.stop_words:
# masked_phrases.append(phrase_text)
# return masked_phrases
def _generate_questions(self, summaries, entities=True, phrase_types=["NP"]):
doc_ids = []
qa_masks = []
tokenized_phrases = []
for id_, summary in enumerate(summaries):
summary = summary.strip()
all_masked_phrases = []
if entities:
all_masked_phrases.extend(self._get_entities(summary))
# all_masked_phrases.extend(self._get_masked_phrases(summary,phrase_types))
all_masked_phrases = list(set(all_masked_phrases))
for i, masked_phrase in enumerate(all_masked_phrases):
tokenized_summary = " ".join(nltk.word_tokenize(summary.lower()))
tokenized_phrase = " ".join(nltk.word_tokenize(masked_phrase.lower()))
qa_masks.append(tokenized_summary + " [SEP] " + tokenized_phrase)
doc_ids.append(str(id_))
tokenized_phrases.append(tokenized_phrase)
questions = []
for i in range(0, len(qa_masks), self.batch_size):
batch = qa_masks[i:i + self.batch_size]
hypotheses = self.qg_model.sample(batch, beam=self.beam_size, lenpen=1.0, max_len_b=self.max_length, min_len=1, no_repeat_ngram_size=3)
questions.extend(hypotheses)
return doc_ids, questions, tokenized_phrases
def _convert_to_squad_format(self, gold_answers, questions, doc_ids, bodies):
squad_format = {"data":[]}
id_questions=defaultdict(list)
id_gold_answers=defaultdict(str)
for idx in range(0,len(doc_ids)):
id_questions[doc_ids[idx].strip()].append((questions[idx], gold_answers[idx]))
for idx in id_questions:
paragraphs = []
context = bodies[int(idx)].strip()
title = "doc_" + str(idx)
questions_list_input=[]
for q_id, question in enumerate(id_questions[idx]):
gold_answer = question[1]
question_text = question[0]
answers_input = [{"text": gold_answer, "answer_start": 0}]
questions_input = {
"question": question_text,
"answers": answers_input,
"id": str(idx).strip() + "-" + str(q_id)
}
questions_list_input.append(questions_input)
id_gold_answers[questions_input["id"]] = gold_answer
paragraphs.append({"context":" ".join(nltk.word_tokenize(context)).lower(),"qas":questions_list_input})
squad_format["data"].append({"title":title,"paragraphs":paragraphs})
squad_format["version"] = "1.1"
return id_gold_answers, squad_format
def _answer_questions_by_context(self, squad_format):
id_answers=defaultdict(str)
for doc in squad_format['data']:
for para in doc['paragraphs']:
context = para['context']
for q in para['qas']:
inputs = {
'question': q['question'],
'context': context
}
ret = self.qa_pipeline(inputs)
id_answers[q["id"]] = ret
# print(q['question'])
# print(ret)
# print()
return id_answers
def _readable_qas_dict(self, doc_ids, questions, gold_answers, pred_dict, bodies):
qas_dict = defaultdict()
previous_doc_id = None
for idx, qa_id in enumerate(pred_dict):
qa_info = {"question": questions[idx],
"gold_ans": gold_answers[idx],
"reply_ans": pred_dict[qa_id]["answer"],
"reply_scr": pred_dict[qa_id]["score"]}
# if qa_info["reply_scr"] > self.qa_threshold:
# cand = [qa_info["reply_ans"]]
# ref = [qa_info['gold_ans']]
# _, _, bert_f1 = self.bertscorer.score( cand, ref )
# qa_info["bert_f1"] = bert_f1.item()
# doc_f1_list.append(qa_info["bert_f1"])
# else:
# qa_info["bert_f1"] = None
doc_id = doc_ids[idx].strip()
if doc_id != previous_doc_id:
# if previous_doc_id is not None:
# if len(doc_f1_list) == 0:
# qas_dict[previous_doc_id]["doc_f1"] = 0
# else:
# qas_dict[previous_doc_id]["doc_f1"] = np.mean(doc_f1_list)
# doc_f1_list = []
qas_dict[doc_id] = dict()
qas_dict[doc_id]['context'] = bodies[int(doc_id)]
qas_dict[doc_id]['qas'] = dict()
qas_dict[doc_id]['qas'][qa_id] = qa_info
else:
qas_dict[doc_id]['qas'][qa_id] = qa_info
previous_doc_id = doc_ids[idx]
return qas_dict
def _compute_f1(self, a_gold, a_pred): # with word-overlap
gold_toks = nltk.word_tokenize(a_gold)
pred_toks = nltk.word_tokenize(a_pred)
common = Counter(gold_toks) & Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def _compare_pred_gold(self, qas_dict, use_bertscr=True):
doc_f1_list = []
for doc_id in qas_dict:
for qa_id in qas_dict[doc_id]['qas']:
qa_info = qas_dict[doc_id]['qas'][qa_id]
if qa_info["reply_scr"] > self.qa_threshold:
cand = qa_info["reply_ans"]
ref = qa_info['gold_ans']
if use_bertscr:
_, _, bert_f1 = self.bertscorer.score( [cand], [ref] )
bert_f1 = bert_f1.item()
else:
bert_f1 = self._compute_f1(cand, ref)
qas_dict[doc_id]['qas'][qa_id]["bert_f1"] = bert_f1
doc_f1_list.append(qa_info["bert_f1"])
else:
qa_info["bert_f1"] = None
if len(doc_f1_list) == 0:
qas_dict[doc_id]["doc_f1"] = 0
else:
qas_dict[doc_id]["doc_f1"] = np.mean(doc_f1_list)
doc_f1_list = []
f1_list = []
for doc_id in qas_dict:
f1_list.append(qas_dict[doc_id]["doc_f1"] )
return qas_dict, f1_list
def compute_score(self, bodies, summaries, aggregate=False, show_qas_dict=False, use_bertscr=False):
#generate questions from summaries
#print("Generating questions...")
ts = time.time()
doc_ids, questions, gold_answers = self._generate_questions(summaries)
te = time.time()
#print("time spent generate questions:", te-ts)
#print("Getting answers...")
#run qa system
ts = time.time()
gold_answers_dict, squad_format = self._convert_to_squad_format(gold_answers, questions, doc_ids, bodies)
pred_dict = self._answer_questions_by_context(squad_format)
te = time.time()
#print("time spent answering questions:", te-ts)
qas_dict = self._readable_qas_dict(doc_ids, questions, gold_answers, pred_dict, bodies)
qas_dict, f1_list = self._compare_pred_gold(qas_dict, use_bertscr=use_bertscr)
if show_qas_dict:
for doc_id in qas_dict:
#print("context:", qas_dict[doc_id]['context'])
print("doc_f1:", qas_dict[doc_id]['doc_f1'])
qas = qas_dict[doc_id]['qas']
for q_id in qas:
print(q_id)
print("qst:", qas[q_id]["question"])
print("g_a:", qas[q_id]["gold_ans"])
print("r_a:", qas[q_id]["reply_ans"])
print("scr:", qas[q_id]["reply_scr"])
print("bert_f1:", qas[q_id]["bert_f1"] )
if aggregate:
return np.mean(f1_list)
return f1_list
def score(self, summaries, bodies, bodies_tokenized=None, lengths=None, extra=None):
if extra is None:
scores = self.compute_score( bodies, summaries, aggregate=False, show_qas_dict=False, use_bertscr=False)
return scores, "no need to cal faithfulness for argmax"
else:
scores = [0] * len(summaries)
return scores, None
|
"""
CPMpy interfaces to (the Python API interface of) solvers
Solvers typically use some of the generic transformations in
`transformations` as well as specific reformulations to map the
CPMpy expression to the solver's Python API
==================
List of submodules
==================
.. autosummary::
:nosignatures:
ortools
pysat
utils
===============
List of classes
===============
.. autosummary::
:nosignatures:
CPM_ortools
CPM_pysat
=================
List of functions
=================
.. autosummary::
:nosignatures:
param_combinations
"""
from .utils import builtin_solvers, get_supported_solvers, param_combinations
from .ortools import CPM_ortools
from .pysat import CPM_pysat
from .minizinc import CPM_minizinc
|
import copy
import pytest
import numpy as np
import pandas as pd
from nomenclature.core import process
from nomenclature.definition import DataStructureDefinition
from nomenclature.processor.region import RegionProcessor
from pyam import IAMC_IDX, IamDataFrame, assert_iamframe_equal
from conftest import TEST_DATA_DIR
def test_region_processing_rename():
# Test **only** the renaming aspect, i.e. 3 things:
# 1. All native regions **with** a renaming property should be renamed correctly
# 2. All native regions **without** a renaming property should be passed through
# 3. All regions which are explicitly named should be dropped
# Testing strategy:
# 1. Rename region_a -> region_A
# 2. Leave region_B untouched
# 3. Drop region_C
test_df = IamDataFrame(
pd.DataFrame(
[
["model_a", "scen_a", "region_a", "Primary Energy", "EJ/yr", 1, 2],
["model_a", "scen_a", "region_B", "Primary Energy", "EJ/yr", 3, 4],
["model_a", "scen_a", "region_C", "Primary Energy", "EJ/yr", 5, 6],
],
columns=IAMC_IDX + [2005, 2010],
)
)
exp = copy.deepcopy(test_df)
exp.filter(region=["region_a", "region_B"], inplace=True)
exp.rename(region={"region_a": "region_A"}, inplace=True)
obs = process(
test_df,
DataStructureDefinition(TEST_DATA_DIR / "region_processing/dsd"),
processor=RegionProcessor.from_directory(
TEST_DATA_DIR / "region_processing/rename_only"
),
)
assert_iamframe_equal(obs, exp)
@pytest.mark.parametrize(
"rp_dir", ["region_processing/rename_only", "region_processing/empty_aggregation"]
)
def test_region_processing_empty_raises(rp_dir):
# Test that an empty result of the region-processing raises
# see also https://github.com/IAMconsortium/pyam/issues/631
test_df = IamDataFrame(
pd.DataFrame(
[
["model_a", "scen_a", "region_foo", "Primary Energy", "EJ/yr", 1, 2],
["model_b", "scen_a", "region_foo", "Primary Energy", "EJ/yr", 1, 2],
],
columns=IAMC_IDX + [2005, 2010],
)
)
with pytest.raises(ValueError, match=("'model_a', 'model_b'.*empty dataset")):
process(
test_df,
DataStructureDefinition(TEST_DATA_DIR / "region_processing/dsd"),
processor=RegionProcessor.from_directory(TEST_DATA_DIR / rp_dir),
)
def test_region_processing_no_mapping(simple_df):
# Test that a model without a mapping is passed untouched
exp = copy.deepcopy(simple_df)
obs = process(
simple_df,
DataStructureDefinition(TEST_DATA_DIR / "region_processing/dsd"),
processor=RegionProcessor.from_directory(
TEST_DATA_DIR / "region_processing/no_mapping"
),
)
assert_iamframe_equal(obs, exp)
def test_region_processing_aggregate():
# Test only the aggregation feature
test_df = IamDataFrame(
pd.DataFrame(
[
["model_a", "scen_a", "region_A", "Primary Energy", "EJ/yr", 1, 2],
["model_a", "scen_a", "region_B", "Primary Energy", "EJ/yr", 3, 4],
["model_a", "scen_a", "region_C", "Primary Energy", "EJ/yr", 5, 6],
["model_a", "scen_b", "region_A", "Primary Energy", "EJ/yr", 1, 2],
["model_a", "scen_b", "region_B", "Primary Energy", "EJ/yr", 3, 4],
],
columns=IAMC_IDX + [2005, 2010],
)
)
exp = IamDataFrame(
pd.DataFrame(
[
["model_a", "scen_a", "World", "Primary Energy", "EJ/yr", 4, 6],
["model_a", "scen_b", "World", "Primary Energy", "EJ/yr", 4, 6],
],
columns=IAMC_IDX + [2005, 2010],
)
)
obs = process(
test_df,
DataStructureDefinition(TEST_DATA_DIR / "region_processing/dsd"),
processor=RegionProcessor.from_directory(
TEST_DATA_DIR / "region_processing/aggregate_only"
),
)
assert_iamframe_equal(obs, exp)
@pytest.mark.parametrize(
"directory", ("complete_processing", "complete_processing_list")
)
def test_region_processing_complete(directory):
# Test all three aspects of region processing together:
# 1. Renaming
# 2. Passing models without a mapping
# 3. Aggregating correctly
test_df = IamDataFrame(
pd.DataFrame(
[
["m_a", "s_a", "region_a", "Primary Energy", "EJ/yr", 1, 2],
["m_a", "s_a", "region_B", "Primary Energy", "EJ/yr", 3, 4],
["m_a", "s_a", "region_C", "Primary Energy", "EJ/yr", 5, 6],
["m_a", "s_a", "region_a", "Primary Energy|Coal", "EJ/yr", 0.5, 1],
["m_a", "s_a", "region_B", "Primary Energy|Coal", "EJ/yr", 1.5, 2],
["m_b", "s_b", "region_A", "Primary Energy", "EJ/yr", 1, 2],
],
columns=IAMC_IDX + [2005, 2010],
)
)
exp = IamDataFrame(
pd.DataFrame(
[
["m_a", "s_a", "region_A", "Primary Energy", "EJ/yr", 1, 2],
["m_a", "s_a", "region_B", "Primary Energy", "EJ/yr", 3, 4],
["m_a", "s_a", "World", "Primary Energy", "EJ/yr", 4, 6],
["m_a", "s_a", "region_A", "Primary Energy|Coal", "EJ/yr", 0.5, 1],
["m_a", "s_a", "region_B", "Primary Energy|Coal", "EJ/yr", 1.5, 2],
["m_a", "s_a", "World", "Primary Energy|Coal", "EJ/yr", 2, 3],
["m_b", "s_b", "region_A", "Primary Energy", "EJ/yr", 1, 2],
],
columns=IAMC_IDX + [2005, 2010],
)
)
obs = process(
test_df,
DataStructureDefinition(TEST_DATA_DIR / "region_processing/dsd"),
processor=RegionProcessor.from_directory(
TEST_DATA_DIR / "region_processing" / directory
),
)
assert_iamframe_equal(obs, exp)
@pytest.mark.parametrize(
"folder, exp_df, args",
[
(
"weighted_aggregation",
[
["model_a", "scen_a", "World", "Primary Energy", "EJ/yr", 4, 6],
["model_a", "scen_a", "World", "Emissions|CO2", "Mt CO2", 5, 8],
["model_a", "scen_a", "World", "Price|Carbon", "USD/t CO2", 2.8, 7.0],
],
None,
),
(
"weighted_aggregation_rename",
[
["model_a", "scen_a", "World", "Primary Energy", "EJ/yr", 4, 6],
["model_a", "scen_a", "World", "Emissions|CO2", "Mt CO2", 5, 8],
["model_a", "scen_a", "World", "Price|Carbon", "USD/t CO2", 2.8, 7.0],
["model_a", "scen_a", "World", "Price|Carbon (Max)", "USD/t CO2", 3, 8],
],
None,
),
# check that region-aggregation with missing weights passes (inconsistent index)
# TODO check the log output
(
"weighted_aggregation",
[
["model_a", "scen_a", "World", "Primary Energy", "EJ/yr", 4, 6],
["model_a", "scen_a", "World", "Emissions|CO2", "Mt CO2", 5, np.nan],
],
dict(variable="Emissions|CO2", year=2010, keep=False),
),
],
)
def test_region_processing_weighted_aggregation(folder, exp_df, args, caplog):
# test a weighed sum
test_df = IamDataFrame(
pd.DataFrame(
[
["model_a", "scen_a", "region_A", "Primary Energy", "EJ/yr", 1, 2],
["model_a", "scen_a", "region_B", "Primary Energy", "EJ/yr", 3, 4],
["model_a", "scen_a", "region_A", "Emissions|CO2", "Mt CO2", 4, 6],
["model_a", "scen_a", "region_B", "Emissions|CO2", "Mt CO2", 1, 2],
["model_a", "scen_a", "region_A", "Price|Carbon", "USD/t CO2", 3, 8],
["model_a", "scen_a", "region_B", "Price|Carbon", "USD/t CO2", 2, 4],
],
columns=IAMC_IDX + [2005, 2010],
)
)
if args is not None:
test_df = test_df.filter(**args)
exp = IamDataFrame(pd.DataFrame(exp_df, columns=IAMC_IDX + [2005, 2010]))
obs = process(
test_df,
DataStructureDefinition(TEST_DATA_DIR / "region_processing" / folder / "dsd"),
processor=RegionProcessor.from_directory(
TEST_DATA_DIR / "region_processing" / folder / "aggregate"
),
)
assert_iamframe_equal(obs, exp)
# check the logs since the presence of args should cause a warning in the logs
if args:
logmsg = (
"Could not aggregate 'Price|Carbon' for region 'World' "
"({'weight': 'Emissions|CO2'})"
)
assert logmsg in caplog.text
def test_region_processing_skip_aggregation():
test_df = IamDataFrame(
pd.DataFrame(
[
["m_a", "s_a", "region_A", "Primary Energy", "EJ/yr", 1, 2],
["m_a", "s_a", "region_B", "Primary Energy", "EJ/yr", 3, 4],
],
columns=IAMC_IDX + [2005, 2010],
)
)
exp = test_df
obs = process(
test_df,
DataStructureDefinition(
TEST_DATA_DIR / "region_processing/skip_aggregation/dsd"
),
processor=RegionProcessor.from_directory(
TEST_DATA_DIR / "region_processing/skip_aggregation/mappings"
),
)
assert_iamframe_equal(obs, exp)
@pytest.mark.parametrize(
"input_data, exp_data, warning",
[
( # Variable is available in provided and aggregated data and the same
[
["m_a", "s_a", "region_A", "Primary Energy", "EJ/yr", 1, 2],
["m_a", "s_a", "region_B", "Primary Energy", "EJ/yr", 3, 4],
["m_a", "s_a", "World", "Primary Energy", "EJ/yr", 4, 6],
],
[["m_a", "s_a", "World", "Primary Energy", "EJ/yr", 4, 6]],
None,
),
( # Variable is only available in the provided data
[
["m_a", "s_a", "region_A", "Primary Energy", "EJ/yr", 1, 2],
["m_a", "s_a", "region_B", "Primary Energy", "EJ/yr", 3, 4],
],
[["m_a", "s_a", "World", "Primary Energy", "EJ/yr", 4, 6]],
None,
),
( # Variable is only available in the aggregated data
[["m_a", "s_a", "World", "Primary Energy", "EJ/yr", 4, 6]],
[["m_a", "s_a", "World", "Primary Energy", "EJ/yr", 4, 6]],
None,
),
( # Variable is not available in all scenarios in the provided data
[
["m_a", "s_a", "region_A", "Primary Energy", "EJ/yr", 1, 2],
["m_a", "s_a", "region_B", "Primary Energy", "EJ/yr", 3, 4],
["m_a", "s_b", "region_A", "Primary Energy", "EJ/yr", 5, 6],
["m_a", "s_b", "region_B", "Primary Energy", "EJ/yr", 7, 8],
["m_a", "s_a", "World", "Primary Energy", "EJ/yr", 4, 6],
],
[
["m_a", "s_a", "World", "Primary Energy", "EJ/yr", 4, 6],
["m_a", "s_b", "World", "Primary Energy", "EJ/yr", 12, 14],
],
None,
),
( # Using skip-aggregation: true should only take provided results
[
["m_a", "s_a", "region_A", "Skip-Aggregation", "EJ/yr", 1, 2],
["m_a", "s_a", "region_B", "Skip-Aggregation", "EJ/yr", 3, 4],
["m_a", "s_a", "World", "Skip-Aggregation", "EJ/yr", 10, 11],
],
[["m_a", "s_a", "World", "Skip-Aggregation", "EJ/yr", 10, 11]],
None,
),
( # Using the region-aggregation attribute to create an additional variable
[
["m_a", "s_a", "region_A", "Variable A", "EJ/yr", 1, 10],
["m_a", "s_a", "region_B", "Variable A", "EJ/yr", 10, 1],
["m_a", "s_a", "World", "Variable A", "EJ/yr", 11, 11],
],
[
["m_a", "s_a", "World", "Variable A", "EJ/yr", 11, 11],
["m_a", "s_a", "World", "Variable A (max)", "EJ/yr", 10, 10],
],
None,
),
( # Variable is available in provided and aggregated data but different
[
["m_a", "s_a", "region_A", "Primary Energy", "EJ/yr", 1, 2],
["m_a", "s_a", "region_B", "Primary Energy", "EJ/yr", 3, 4],
["m_a", "s_a", "World", "Primary Energy", "EJ/yr", 5, 6],
],
[["m_a", "s_a", "World", "Primary Energy", "EJ/yr", 5, 6]],
[
"Difference between original and aggregated data:",
"m_a s_a World Primary Energy",
"2005 5 4",
],
),
( # Conflict between overlapping renamed variable and provided data
[
["m_a", "s_a", "region_A", "Variable B", "EJ/yr", 1, 2],
["m_a", "s_a", "region_B", "Variable B", "EJ/yr", 3, 4],
["m_a", "s_a", "World", "Variable B", "EJ/yr", 4, 6],
],
[["m_a", "s_a", "World", "Variable B", "EJ/yr", 4, 6]],
[
"Difference between original and aggregated data:",
"m_a s_a World Variable B EJ/yr",
"2005 4 3",
],
),
],
)
def test_partial_aggregation(input_data, exp_data, warning, caplog):
# Dedicated test for partial aggregation
# Test cases are:
# * Variable is available in provided and aggregated data and the same
# * Variable is only available in the provided data
# * Variable is only available in the aggregated data
# * Variable is not available in all scenarios in the provided data
# * Using skip-aggregation: true should only take provided results
# * Using the region-aggregation attribute to create an additional variable
# * Variable is available in provided and aggregated data but different
obs = process(
IamDataFrame(pd.DataFrame(input_data, columns=IAMC_IDX + [2005, 2010])),
DataStructureDefinition(TEST_DATA_DIR / "region_processing/dsd"),
processor=RegionProcessor.from_directory(
TEST_DATA_DIR / "region_processing/partial_aggregation"
),
)
exp = IamDataFrame(pd.DataFrame(exp_data, columns=IAMC_IDX + [2005, 2010]))
# Assert that we get the expected values
assert_iamframe_equal(obs, exp)
# Assert that we get the correct warnings
if warning is None:
assert "WARNING" not in caplog.text
else:
assert all(c in caplog.text for c in warning)
|
import numpy as np
import re
import xml.etree.cElementTree as ET
input = None
with open("svm.model") as f:
input = f.readlines()
svm_type = re.match(r"svm_type ([\w_]+)", input[0]).groups()[0]
kernel_type = re.match(r"kernel_type ([\w]+)", input[1]).groups()[0]
gamma = float(re.match(r"gamma ([-\d.]+)", input[2]).groups()[0])
nr_class = int(re.match(r"nr_class ([\d]+)", input[3]).groups()[0])
total_sv = int(re.match(r"total_sv ([\d]+)", input[4]).groups()[0])
rho = list(map(float, re.findall(r"([-\d.]+)", input[5])))
label = list(map(int, re.findall(r"([\d]+)", input[6])))
nr_sv = list(map(int, re.findall(r"([\d]+)", input[7])))
sv_coef = np.zeros((total_sv, nr_class-1), dtype=np.float)
total_sv_val = max([int(idx) for idx, val in re.findall(r"([\d]+):([-\d.]+)", input[9])])
SVs = np.zeros((total_sv, total_sv_val), dtype=np.float)
for i in range(9, total_sv+9):
coef = re.findall(r"([-\d.]+)", input[i])
for j in range(nr_class-1):
sv_coef[i-9,j] = float(coef[j])
sv = {int(idx): float(val) for idx, val in re.findall(r"([\d]+):([-\d.]+)", input[i])}
for j in range(total_sv_val):
if j+1 in sv:
SVs[i-9,j] = sv[j+1]
xml_os = ET.Element("opencv_storage")
xml_ms = ET.SubElement(xml_os, "ish_svm", type_id="opencv-ml-svm")
ET.SubElement(xml_ms, "svm_type").text = svm_type.upper()
xml_k = ET.SubElement(xml_ms, "kernel")
ET.SubElement(xml_k, "type").text = kernel_type.upper()
ET.SubElement(xml_k, "gamma").text = str(gamma)
ET.SubElement(xml_ms, "C").text = str(1) # default: C=1, only used during training phase
ET.SubElement(xml_ms, "var_all").text = str(total_sv_val)
ET.SubElement(xml_ms, "var_count").text = str(total_sv_val)
ET.SubElement(xml_ms, "class_count").text = str(nr_class)
xml_cl = ET.SubElement(xml_ms, "class_labels", type_id="opencv-matrix")
ET.SubElement(xml_cl, "rows").text = str(1)
ET.SubElement(xml_cl, "cols").text = str(nr_class)
ET.SubElement(xml_cl, "dt").text = "i"
ET.SubElement(xml_cl, "data").text = " ".join(map(str, label))
ET.SubElement(xml_ms, "sv_total").text = str(total_sv)
xml_sv = ET.SubElement(xml_ms, "support_vectors")
for i in range(0, total_sv):
ET.SubElement(xml_sv, "_").text = " ".join(map(str, SVs[i,:]))
xml_df = ET.SubElement(xml_ms, "decision_functions")
# results in a total of nr_class*(nr_class-1)/2 decision functions
rho_idx = 0
for i in range(0, nr_class):
for j in range(i+1, nr_class):
xml_dfe = ET.SubElement(xml_df, "_")
ET.SubElement(xml_dfe, "sv_count").text = str(nr_sv[i]+nr_sv[j])
ET.SubElement(xml_dfe, "rho").text = str(rho[rho_idx])
rho_idx += 1
alphas_c1 = sv_coef[sum(nr_sv[:i]):sum(nr_sv[:i])+nr_sv[i], j-1]
alphas_c2 = sv_coef[sum(nr_sv[:j]):sum(nr_sv[:j])+nr_sv[j], i]
alphas = np.concatenate((alphas_c1, alphas_c2), axis=0)
ET.SubElement(xml_dfe, "alpha").text = " ".join(map(str, alphas))
idxs_c1 = [idx for idx in range(sum(nr_sv[:i]),sum(nr_sv[:i])+nr_sv[i])]
idxs_c2 = [idx for idx in range(sum(nr_sv[:j]),sum(nr_sv[:j])+nr_sv[j])]
idxs = np.concatenate((idxs_c1, idxs_c2), axis=0)
ET.SubElement(xml_dfe, "index").text = " ".join(map(str, idxs))
tree = ET.ElementTree(xml_os)
tree.write("svm.xml", xml_declaration=True)
|
#!/usr/bin/env python
#
import collections
import datetime
import logging
import yfinance
import syncfin.db.model as mydb
import syncfin.utils.parallel as parallel
log = logging.getLogger(__name__)
class TickerPull(object):
def _update_db(self, db, index, row):
values = {
'date': ('%s' % index).split()[0],
'close': round(row['Close'], 2),
'open': round(row['Open'], 2),
'high': round(row['High'], 2),
'low': round(row['Low'], 2),
'split': row.get('Stock Splits', 0),
'volume': int(row['Volume'])
}
db.write(**values)
def update(self, tckr):
"""
Updates 'tckr' history in local database.
"""
with mydb.Ticker() as db:
if db.table_exists(db.table_name(tckr)):
db.set_table(tckr)
start_date = db.max('date')
end_date = '%s' % datetime.date.today()
if start_date == end_date:
return # Nothing to update.
data = yfinance.download(tckr,
start=start_date, end=end_date)
else:
db.add_new_table(tckr)
db.set_table(tckr)
_tckr = yfinance.Ticker(tckr)
# get historical market data
data = _tckr.history(period="max")
for index, row in data.iterrows():
try:
self._update_db(db, index, row)
except Exception as err:
if 'UNIQUE constraint failed' in err.args[0]:
# Trying to update for same date again.
continue
elif 'cannot convert float NaN to integer' in err.args[0]:
continue
log.error("Cannot process %s : %s. \nError - %r ", index, row, err)
def update_till_today(self, tckrs):
print ("Updating Historical data for : %s" % ' '.join(tckrs))
for tckr in tckrs:
try:
self.update(tckr)
print("%s ... Done" % tckr)
except Exception as _:
print("%s ... Skipped due to error" % tckr)
class ProfileUpdate(object):
def update(self, tckr):
with mydb.CompanyProfile() as db:
db.table = db.TABLE
info = yfinance.Ticker(tckr).info
values = {}
for field in db._get_fields(db.TABLE):
if field == 'date':
continue
try:
val = info.get(field, '')
if val:
values[field] = val
except Exception:
pass # ignore fields which are not available.
values['date'] = datetime.datetime.today().strftime('%Y-%m-%d')
db.write(**values)
def _update(self, tckr):
try:
self.update(tckr)
log.info("%s (Summary) ... Done" % tckr)
except Exception as _:
log.info("%s ... Skipped due to error" % tckr)
def update_all(self, tckrs):
print ("Updating Profile data for : %s" % ' '.join(tckrs))
params = [(tckr, (tckr,) , {}) for tckr in tckrs]
parallel.ThreadPool(self._update, params)
|
"""Nice function to make figures for slides (not used in the framework)."""
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
def plot_decomposition_graph(
graph,
nodes_color="#6886b7",
weight="weight",
colormap="jet",
ax=None,
edge_width=1,
edges_step=1,
edge_alpha=0.2,
figsize=(8, 8),
):
xx = np.array(sorted(graph.nodes))
L = xx.max()
edges = [
(start, end, data[weight] / (end - start))
for start, end, data in graph.edges(data=True)
]
edges = edges[::edges_step]
edges = sorted(edges, key=lambda e: e[2])
max_segment_length = max([end - start for (start, end, _) in edges])
weights = np.array([w for (_, _, w) in edges])
normalized_weights = (255 * weights / weights.max()).astype("uint8")
colormap = cm.__dict__[colormap]
colors = colormap(normalized_weights, alpha=edge_alpha)
if ax is None:
fig, ax = plt.subplots(1, figsize=figsize)
ax.axis("off")
# print (list(colors))
for (start, end, w), color in zip(edges, colors):
xc = 0.5 * (start + end)
half = 0.5 * abs(end - start)
ax.add_patch(
mpatches.Arc(
(xc, 0),
2 * half,
2 * half,
theta1=0,
theta2=180,
facecolor="none",
ls="-",
edgecolor=color,
linewidth=1,
)
)
ax.plot(xx, [0 for x in xx], marker="o", c=nodes_color)
ax.set_aspect("equal")
ax.set_xlim(-1, L + 1)
ax.set_ylim(-1, max_segment_length / 2 + 1)
|
import csv
import gzip
import logging
import os.path
from collections import namedtuple
from itertools import chain, takewhile, groupby, product
from os import getcwd
import pandas as pd
from more_itertools import peekable
from py.path import local
from nltk.corpus import brown, CategorizedTaggedCorpusReader
from nltk.corpus.reader.bnc import BNCCorpusReader
from nltk.parse.dependencygraph import DependencyGraph, DependencyGraphError
from nltk.parse.stanford import StanfordDependencyParser
from nltk.stem.snowball import SnowballStemmer
logger = logging.getLogger(__name__)
Token = namedtuple('Token', 'word, stem, tag')
Dependency = namedtuple('Dependency', 'head, relation, dependant')
class BNC:
def __init__(self, root, paths):
self.root = root
self.paths = paths
@classmethod
def init_kwargs(cls, root=None, fileids=r'[A-K]/\w*/\w*\.xml'):
if root is None:
root = os.path.join(getcwd(), 'BNC', 'Texts')
return dict(
root=root,
paths=BNCCorpusReader(root=root, fileids=fileids).fileids(),
)
def words_by_document(self, path):
def it():
reader = BNCCorpusReader(fileids=path, root=self.root)
words_tags = reader.tagged_words(stem=False)
stems = (s for s, _ in reader.tagged_words(stem=True))
for (word, tag), stem in zip(words_tags, stems):
yield Token(word, stem, tag)
# Consider the whole file as one document!
yield it()
class Brown:
def __init__(self, root, paths):
self.root = root
self.paths = paths
@classmethod
def init_kwargs(cls, root=None, fileids=None):
return dict(
root=brown.root if root is None else root,
paths=brown.fileids() if fileids is None else fileids,
)
def words_by_document(self, path):
stemmer = SnowballStemmer('english')
def it():
reader = CategorizedTaggedCorpusReader(
fileids=[path],
root=self.root,
cat_file='cats.txt',
tagset='brown',
encoding='ascii',
)
for word, tag in reader.tagged_words():
stem = stemmer.stem(word)
yield Token(word, stem, tag)
# Consider the whole file as one document!
yield it()
class BNC_CCG:
def __init__(self, paths):
self.paths = paths
@classmethod
def init_kwargs(cls, root=None):
if root is None:
root = os.path.join(getcwd(), 'corpora', 'CCG_BNC_v1')
return dict(
paths=[str(n) for n in local(root).visit() if n.check(file=True, exists=True)],
)
def words_by_document(self, path):
def word_tags(dependencies, tokens):
for token in tokens.values():
# TODO: return a namedtuple?
yield token.word, token.stem, token.tag
# Consider the whole file as one document!
for dependencies, tokens in self.ccg_bnc_iter(path):
yield word_tags(dependencies, tokens)
def ccg_bnc_iter(self, f_name):
with open(f_name, 'rt', encoding='utf8') as f:
# Get rid of trailing whitespace.
lines = (l.strip() for l in f)
while True:
# Sentences are split by an empty line.
sentence = list(takewhile(bool, lines))
if not sentence:
# No line was taken, this means all the file has be read!
break
# Take extra care of comments.
sentence = [l for l in sentence if not l.startswith('#')]
if not sentence:
# If we got nothing, but comments: skip.
continue
*dependencies, c = sentence
tokens = dict(self.parse_tokens(c))
dependencies = self.parse_dependencies(dependencies)
yield dependencies, tokens
def verb_subject_object_iter(self, path):
for dependencies, tokens in self.ccg_bnc_iter(path):
yield from self._collect_verb_subject_object(dependencies, tokens)
def _collect_verb_subject_object(self, dependencies, tokens):
"""Retrieve verb together with it's subject and object from a C&C parsed file.
File format description [1] or Table 13 in [2].
[1] http://svn.ask.it.usyd.edu.au/trac/candc/wiki/MarkedUp
[2] http://anthology.aclweb.org/J/J07/J07-4004.pdf
"""
dependencies = sorted(
d for d in dependencies if d.relation in ('dobj', 'ncsubj')
)
for head_id, group in groupby(dependencies, lambda d: d.head):
group = list(group)
try:
(_, obj, obj_id), (_, subj, subj_id) = sorted(g for g in group if g.relation in ('dobj', 'ncsubj'))
except ValueError:
pass
else:
if obj == 'dobj'and subj == 'ncsubj':
try:
yield tuple(chain(tokens[head_id], tokens[subj_id], tokens[obj_id]))
except KeyError:
logger.debug('Invalid group %s', group)
def dependencies_iter(self, path):
def collect_dependencies(dependencies, tokens):
for d in dependencies:
yield Dependency(tokens[d.head], d.relation, tokens[d.dependant])
# Consider the whole file as one document!
for dependencies, tokens in self.ccg_bnc_iter(path):
yield from collect_dependencies(dependencies, tokens)
def parse_dependencies(self, dependencies):
"""Parse and filter out verb subject/object dependencies from a C&C parse."""
for dependency in dependencies:
assert dependency[0] == '('
assert dependency[-1] == ')'
dependency = dependency[1:-1]
split_dependency = dependency.split()
split_dependency_len = len(split_dependency)
if split_dependency_len == 3:
# (dobj in_15 judgement_17)
relation, head, dependant = split_dependency
elif split_dependency_len == 4:
empty = lambda r: r == '_' or '_' not in r
if empty(split_dependency[-1]):
# (ncsubj being_19 judgement_17 _)
# (ncsubj laid_13 rule_12 obj)
relation, head, dependant = split_dependency[:-1]
elif empty(split_dependency[1]):
# (xmod _ judgement_17 as_18)
# (ncmod poss CHOICE_4 IT_1)
relation, _, head, dependant = split_dependency
else:
# (cmod who_11 people_3 share_12)
logger.debug('Ignoring dependency: %s', dependency)
continue
else:
logger.debug('Invalid dependency: %s', dependency)
continue
parse_argument = lambda a: int(a.split('_')[1])
try:
head_id = parse_argument(head)
dependant_id = parse_argument(dependant)
except (ValueError, IndexError):
logger.debug('Could not extract dependency argument: %s', dependency)
continue
yield Dependency(head_id, relation, dependant_id)
def parse_tokens(self, c):
"""Parse and retrieve token position, word, stem and tag from a C&C parse."""
assert c[:4] == '<c> '
c = c[4:]
for position, token in enumerate(c.split()):
word, stem, tag, *_ = token.split('|')
yield position, Token(word, stem, tag)
def ukwac_cell_extractor(cells):
word, lemma, tag, feats, head, rel = cells
return word, lemma, tag, tag, feats, head, rel
class UKWAC:
def __init__(self, paths, file_passes, lowercase_stem, limit):
self.paths = paths
self.file_passes = int(file_passes)
self.lowercase_stem = lowercase_stem
self.limit = int(limit) if limit is not None else None
@classmethod
def init_kwargs(
cls,
root=None,
workers_count=16,
lowercase_stem=False,
limit=None,
):
if root is None:
root = os.path.join(getcwd(), 'dep_parsed_ukwac')
paths = [
str(n) for n in local(root).visit()
if n.check(file=True, exists=True)
]
file_passes = max(1, workers_count // len(paths))
paths = list(
chain.from_iterable(
((i, p) for p in paths)
for i in range(file_passes)
)
)
assert lowercase_stem in ('', 'y', False, True)
lowercase_stem = bool(lowercase_stem)
return dict(
paths=paths,
file_passes=file_passes,
lowercase_stem=lowercase_stem,
limit=limit,
)
def words_by_document(self, path):
for document in self.documents(path):
yield self.document_words(document)
def documents(self, path):
file_pass, path = path
with gzip.open(path, 'rt', encoding='ISO-8859-1') as f:
lines = (l.rstrip() for l in f)
lines = peekable(
l for l in lines
if not l.startswith('<text') and l != '<s>'
)
c = 0
while lines:
if (c % (10 ** 4)) == 0:
logger.debug(
'%s text elements are read, every %s is processed. '
'It\'s about %.2f of the file.',
c,
self.file_passes,
c / 550000, # An approximate number of texts in a file.
)
if (self.limit is not None) and (c > self.limit):
logger.info('Limit of sentences is reached.')
break
document = list(takewhile(lambda l: l != '</text>', lines))
if (c % self.file_passes) == file_pass:
yield document
c += 1
def document_words(self, document):
for dg in self.document_dependency_graphs(document):
# Make sure that nodes are sorted by the position in the sentence.
for _, node in sorted(dg.nodes.items()):
if node['word'] is not None:
yield self.node_to_token(node)
def verb_subject_object_iter(self, path):
for document in self.documents(path):
for dg in self.document_dependency_graphs(document):
for node in dg.nodes.values():
if node['tag'][0] == 'V':
if 'SBJ' in node['deps'] and 'OBJ' in node['deps']:
for sbj_address, obj_address in product(
node['deps']['SBJ'],
node['deps']['OBJ'],
):
sbj = dg.nodes[sbj_address]
obj = dg.nodes[obj_address]
yield (
node['word'],
node['lemma'],
node['tag'],
sbj['word'],
sbj['lemma'],
sbj['tag'],
obj['word'],
obj['lemma'],
obj['tag'],
)
def document_dependency_graphs(self, document):
document = peekable(iter(document))
while document:
sentence = list(takewhile(lambda l: l != '</s>', document))
if not sentence:
# It might happen because of the snippets like this:
#
# plates plate NNS 119 116 PMOD
# </text>
# </s>
# <text id="ukwac:http://www.learning-connections.co.uk/curric/cur_pri/artists/links.html">
# <s>
# Ideas Ideas NP 1 14 DEP
#
# where </text> is before </s>.
continue
try:
dg = DependencyGraph(
sentence,
cell_extractor=ukwac_cell_extractor,
cell_separator='\t',
)
except DependencyGraphError:
logger.exception("Couldn't instantiate a dependency graph.")
else:
for node in dg.nodes.values():
if self.lowercase_stem and node['lemma']:
node['lemma'] = node['lemma'].lower()
yield dg
def node_to_token(self, node):
return Token(node['word'], node['lemma'], node['tag'])
def dependencies_iter(self, path):
for document in self.documents(path):
for dg in self.document_dependency_graphs(document):
for node in dg.nodes.values():
if node['head'] is not None:
yield Dependency(
self.node_to_token(dg.nodes[node['head']]),
node['rel'],
self.node_to_token(node)
)
class SingleFileDatasetMixIn:
def __init__(self, paths, tagset):
self.paths = paths
self.tagset = tagset
@classmethod
def init_kwargs(cls, root=None, tagset='ukwac'):
if root is None:
root = os.path.join(getcwd(), cls.default_file_name)
return {
'paths': [root],
'tagset': tagset,
}
class KS13(SingleFileDatasetMixIn):
# TODO: Corpus readers should define tag mapping!
vectorizer = 'compositional'
default_file_name = 'emnlp2013_turk.txt'
def read_file(self, group=False):
# TODO: should be moved away from here.
from fowler.corpora.wsd.datasets import tag_mappings
df = pd.read_csv(
self.paths[0],
sep=' ',
usecols=(
'subject1', 'verb1', 'object1',
'subject2', 'verb2', 'object2',
'score',
),
)
for item, tag in (
('subject1', 'N'),
('verb1', 'V'),
('object1', 'N'),
('subject2', 'N'),
('verb2', 'V'),
('object2', 'N'),
):
df['{}_tag'.format(item)] = tag_mappings[self.tagset][tag]
if group:
df = df.groupby(
[
'subject1', 'subject1_tag', 'verb1', 'verb1_tag', 'object1', 'object1_tag',
'subject2', 'subject2_tag', 'verb2', 'verb2_tag', 'object2', 'object2_tag',
],
as_index=False,
).mean()
return df
def words_by_document(self, path):
# Part of CorpusReader
df = self.read_file()
def words_iter(rows):
for _, row in rows:
for item in (
'subject1', 'verb1', 'object1',
'subject2', 'verb2', 'object2',
):
word = stem = row[item]
t = row['{}_tag'.format(item)]
yield word, stem, t
yield words_iter(df.iterrows())
def dependency_graphs_pairs(self):
# Part of Dataset
df = self.read_file(group=True)
for _, row in df.iterrows():
yield (
transitive_sentence_to_graph(
row['subject1'], row['subject1_tag'],
row['verb1'], row['verb1_tag'],
row['object1'], row['object1_tag'],
),
transitive_sentence_to_graph(
row['subject2'], row['subject2_tag'],
row['verb2'], row['verb2_tag'],
row['object2'], row['object2_tag'],
),
row['score']
)
class PhraseRel(SingleFileDatasetMixIn):
# TODO: Corpus readers should define tag mapping!
vectorizer = 'compositional'
extra_fields = 'relevance_type',
default_file_name = 'phraserel.csv'
def read_file(self):
# TODO: should be moved away from here.
from fowler.corpora.wsd.datasets import tag_mappings
df = pd.read_csv(
self.paths[0],
sep=',',
usecols=(
'query_subject', 'query_verb', 'query_object',
'document_subject', 'document_verb', 'document_object',
'relevance_type', 'relevance_mean',
),
)
for item, tag in (
('query_subject', 'N'),
('query_verb', 'V'),
('query_object', 'N'),
('document_subject', 'N'),
('document_verb', 'V'),
('document_object', 'N'),
):
df['{}_tag'.format(item)] = tag_mappings[self.tagset][tag]
return df
def words_by_document(self, path):
# Part of CorpusReader
df = self.read_file()
def words_iter(rows):
for _, row in rows:
for item in (
'query_subject', 'query_verb', 'query_object',
'document_subject', 'document_verb', 'document_object',
):
word = stem = row[item]
t = row['{}_tag'.format(item)]
yield word, stem, t
yield words_iter(df.iterrows())
def dependency_graphs_pairs(self):
# Part of Dataset
df = self.read_file()
for _, row in df.iterrows():
yield (
transitive_sentence_to_graph(
row['query_subject'], row['query_subject_tag'],
row['query_verb'], row['query_verb_tag'],
row['query_object'], row['query_object_tag'],
),
transitive_sentence_to_graph(
row['document_subject'], row['document_subject_tag'],
row['document_verb'], row['document_verb_tag'],
row['document_object'], row['document_object_tag'],
),
row['relevance_mean'],
row['relevance_type'],
)
def transitive_sentence_to_graph(s, s_t, v, v_t, o, o_t):
template = (
'{s}\t{s_t}\t2\tSBJ\n'
'{v}\t{v_t}\t0\tROOT\n'
'{o}\t{o_t}\t2\tOBJ\n'
)
return DependencyGraph(
template.format(
s=s, s_t=s_t,
v=v, v_t=v_t,
o=o, o_t=o_t,
)
)
class GS11(SingleFileDatasetMixIn):
"""Transitive sentence disambiguation dataset described in [1].
The data is available at [2].
[1] Grefenstette, Edward, and Mehrnoosh Sadrzadeh. "Experimental support
for a categorical compositional distributional model of meaning."
Proceedings of the Conference on Empirical Methods in Natural Language
Processing. Association for Computational Linguistics, 2011.
[2] http://www.cs.ox.ac.uk/activities/compdistmeaning/GS2011data.txt
"""
# TODO: Corpus readers should define tag mapping!
vectorizer = 'compositional'
default_file_name = 'GS2011data.txt'
def read_file(self, group=False):
# TODO: should be moved away from here.
from fowler.corpora.wsd.datasets import tag_mappings
df = pd.read_csv(
self.paths[0],
sep=' ',
usecols=(
'verb', 'subject', 'object', 'landmark', 'input',
),
)
for item, tag in (
('subject', 'N'),
('verb', 'V'),
('object', 'N'),
('landmark', 'V'),
):
df['{}_tag'.format(item)] = tag_mappings[self.tagset][tag]
if group:
df = df.groupby(
[
'subject', 'subject_tag', 'verb', 'verb_tag', 'object', 'object_tag',
'landmark', 'landmark_tag'
],
as_index=False,
).mean()
return df
def words_by_document(self, path):
# Part of CorpusReader
df = self.read_file()
def words_iter(rows):
for _, row in rows:
for item in (
'subject', 'verb', 'object', 'landmark'
):
word = stem = row[item]
t = row['{}_tag'.format(item)]
yield word, stem, t
yield words_iter(df.iterrows())
def dependency_graphs_pairs(self):
# Part of Dataset
df = self.read_file(group=True)
for _, row in df.iterrows():
yield (
transitive_sentence_to_graph(
row['subject'], row['subject_tag'],
row['verb'], row['verb_tag'],
row['object'], row['object_tag'],
),
transitive_sentence_to_graph(
row['subject'], row['subject_tag'],
row['landmark'], row['landmark_tag'],
row['object'], row['object_tag'],
),
row['input']
)
class GS12(SingleFileDatasetMixIn):
# TODO: Corpus readers should define tag mapping!
vectorizer = 'compositional'
default_file_name = 'GS2012data.txt'
def read_file(self, group=False):
# TODO: should be moved away from here.
from fowler.corpora.wsd.datasets import tag_mappings
df = pd.read_csv(
self.paths[0],
sep=' ',
usecols=(
'adj_subj', 'subj', 'verb', 'landmark', 'adj_obj', 'obj', 'annotator_score'
),
)
for item, tag in (
('adj_subj', 'J'),
('subj', 'N'),
('verb', 'V'),
('adj_obj', 'J'),
('obj', 'N'),
('landmark', 'V'),
):
df['{}_tag'.format(item)] = tag_mappings[self.tagset][tag]
if group:
df = df.groupby(
[
'adj_subj', 'adj_subj_tag',
'subj', 'subj_tag',
'verb', 'verb_tag',
'adj_obj', 'adj_obj_tag',
'obj', 'obj_tag',
'landmark', 'landmark_tag'
],
as_index=False,
).mean()
return df
def words_by_document(self, path):
# Part of CorpusReader
df = self.read_file()
def words_iter(rows):
for _, row in rows:
for item in (
'adj_subj', 'subj', 'verb', 'adj_obj', 'obj', 'landmark'
):
word = stem = row[item]
t = row['{}_tag'.format(item)]
yield word, stem, t
yield words_iter(df.iterrows())
def dependency_graphs_pairs(self):
# Part of Dataset
df = self.read_file(group=True)
for _, row in df.iterrows():
yield (
self.sentence_to_graph(
row['adj_subj'], row['adj_subj_tag'],
row['subj'], row['subj_tag'],
row['verb'], row['verb_tag'],
row['adj_obj'], row['adj_obj_tag'],
row['obj'], row['obj_tag'],
),
self.sentence_to_graph(
row['adj_subj'], row['adj_subj_tag'],
row['subj'], row['subj_tag'],
row['landmark'], row['landmark_tag'],
row['adj_obj'], row['adj_obj_tag'],
row['obj'], row['obj_tag'],
),
row['annotator_score']
)
def sentence_to_graph(self, sa, sa_t, s, s_t, v, v_t, oa, oa_t, o, o_t):
template = (
'{sa}\t{sa_t}\t2\tamod\n'
'{s}\t{s_t}\t3\tSBJ\n'
'{v}\t{v_t}\t0\tROOT\n'
'{oa}\t{oa_t}\t2\tamod\n'
'{o}\t{o_t}\t3\tOBJ\n'
)
return DependencyGraph(
template.format(
sa=sa, sa_t=sa_t,
s=s, s_t=s_t,
v=v, v_t=v_t,
oa=oa, oa_t=oa_t,
o=o, o_t=o_t,
)
)
class SimLex999(SingleFileDatasetMixIn):
# TODO: Corpus readers should define tag mapping!
vectorizer = 'lexical'
default_file_name = 'SimLex-999.txt'
def read_file(self):
# TODO: should be moved away from here.
from fowler.corpora.wsd.datasets import tag_mappings
df = pd.read_csv(
self.paths[0],
sep='\t',
usecols=('word1', 'word2', 'POS', 'SimLex999'),
)
df.loc[df['POS'] == 'N', 'POS'] = tag_mappings[self.tagset]['N']
df.loc[df['POS'] == 'V', 'POS'] = tag_mappings[self.tagset]['V']
df.loc[df['POS'] == 'A', 'POS'] = tag_mappings[self.tagset]['J']
return df
def words_by_document(self, path):
# Part of CorpusReader
def words_iter(rows):
for _, row in rows:
for item in ('word1', 'word2'):
word = stem = row[item]
t = row['POS']
yield word, stem, t
yield words_iter(self.read_file().iterrows())
def dependency_graphs_pairs(self):
# Part of Dataset
df = self.read_file()
for _, row in df.iterrows():
yield (
self.sentence_to_graph(
row['word1'], row['POS'],
),
self.sentence_to_graph(
row['word2'], row['POS'],
),
row['SimLex999']
)
def sentence_to_graph(self, w, t):
template = (
'{w}\t{t}\t0\tROOT\n'
)
return DependencyGraph(template.format(w=w, t=t))
class MEN(SingleFileDatasetMixIn):
# TODO: Corpus readers should define tag mapping!
vectorizer = 'lexical'
default_file_name = 'MEN_dataset_lemma_form_full'
def read_file(self):
# TODO: should be moved away from here.
from fowler.corpora.wsd.datasets import tag_mappings
df = pd.read_csv(
self.paths[0],
sep=' ',
names=('token1', 'token2', 'score'),
)
def split(item):
tag = 'tag{}'.format(item)
word_token = df['token{}'.format(item)].str.split('-', expand=True)
df['word{}'.format(item)] = word_token[0]
df[tag] = word_token[1]
df.loc[df[tag] == 'n', tag] = tag_mappings[self.tagset]['N']
df.loc[df[tag] == 'v', tag] = tag_mappings[self.tagset]['V']
df.loc[df[tag] == 'j', tag] = tag_mappings[self.tagset]['J']
split('1')
split('2')
return df
def words_by_document(self, path):
# Part of CorpusReader
def words_iter(rows):
for _, row in rows:
for item in ('1', '2'):
word = stem = row['word{}'.format(item)]
t = row['tag{}'.format(item)]
yield word, stem, t
yield words_iter(self.read_file().iterrows())
def dependency_graphs_pairs(self):
# Part of Dataset
df = self.read_file()
for _, row in df.iterrows():
yield (
self.sentence_to_graph(
row['word1'], row['tag1'],
),
self.sentence_to_graph(
row['word2'], row['tag2'],
),
row['score']
)
def sentence_to_graph(self, w, t):
template = (
'{w}\t{t}\t0\tROOT\n'
)
return DependencyGraph(template.format(w=w, t=t))
class MSRParaphraseCorpus():
# TODO: Corpus readers should define tag mapping!
vectorizer = 'compositional'
extra_fields = 'split',
def __init__(self, paths, tagset):
self.paths = paths
self.tagset = tagset
@classmethod
def init_kwargs(cls, root=None, tagset='ukwac', split=None):
if root is None:
root = os.path.join(getcwd(), 'MSRParaphraseCorpus')
if split is None:
paths = [
(os.path.join(root, 'msr_paraphrase_train.txt'), 'train'),
(os.path.join(root, 'msr_paraphrase_test.txt'), 'test'),
]
elif split == 'train':
paths = [
(os.path.join(root, 'msr_paraphrase_train.txt'), 'train'),
]
elif split == 'test':
paths = [
(os.path.join(root, 'msr_paraphrase_test.txt'), 'test'),
]
return {
'paths': paths,
'tagset': tagset,
}
def read_file(self):
dfs = []
for path, split in self.paths:
df = pd.read_csv(
path,
sep='\t',
quoting=csv.QUOTE_NONE,
encoding='utf-8-sig',
)
df['split'] = split
dfs.append(df)
df = pd.concat(dfs)
logger.warn('Replacng `Treasury\\x12s` with `Treasurys`.')
df['#1 String'] = df['#1 String'].str.replace('Treasury\x12s', 'Treasurys')
return df
def words_by_document(self, path):
# Part of CorpusReader
def words_iter():
for g1, g2, _ in self.dependency_graphs_pairs():
for g in g1, g2:
for node in g.nodes.values():
if not node['address']:
continue
yield (
node['word'],
node['lemma'],
node['tag'],
)
yield words_iter()
def dependency_graphs_pairs(self):
# Part of Dataset
from fowler.corpora.wsd.datasets import tag_mappings
df = self.read_file()
parser = StanfordDependencyParser()
def parse(string):
dg = next(parser.raw_parse(string))
for node in dg.nodes.values():
if not node['address']:
continue
node['original_tag'] = node['tag']
node['tag'] = tag_mappings[self.tagset][node['tag'][0]]
return dg
for _, row in df.iterrows():
yield (
parse(row['#1 String']),
parse(row['#2 String']),
row['Quality'],
row['split'],
)
|
# SPDX-FileCopyrightText: 2018-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
#
import importlib.util
import os
import sys
from importlib.abc import Loader
from typing import Any
def _load_source(name: str, path: str) -> Any:
spec = importlib.util.spec_from_file_location(name, path)
if not spec:
return None
module = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = module
assert isinstance(spec.loader, Loader)
spec.loader.exec_module(module)
return module
idf_path = os.environ['IDF_PATH']
# protocomm component related python files generated from .proto files
constants_pb2 = _load_source('constants_pb2', idf_path + '/components/protocomm/python/constants_pb2.py')
sec0_pb2 = _load_source('sec0_pb2', idf_path + '/components/protocomm/python/sec0_pb2.py')
sec1_pb2 = _load_source('sec1_pb2', idf_path + '/components/protocomm/python/sec1_pb2.py')
sec2_pb2 = _load_source('sec2_pb2', idf_path + '/components/protocomm/python/sec2_pb2.py')
session_pb2 = _load_source('session_pb2', idf_path + '/components/protocomm/python/session_pb2.py')
# wifi_provisioning component related python files generated from .proto files
wifi_constants_pb2 = _load_source('wifi_constants_pb2', idf_path + '/components/wifi_provisioning/python/wifi_constants_pb2.py')
wifi_config_pb2 = _load_source('wifi_config_pb2', idf_path + '/components/wifi_provisioning/python/wifi_config_pb2.py')
wifi_scan_pb2 = _load_source('wifi_scan_pb2', idf_path + '/components/wifi_provisioning/python/wifi_scan_pb2.py')
|
""" Testing the benefits of using pone """
import pyspiel
import time
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from darkhex.algorithms.gel_all_information_states import get_all_information_states
def num_states_encountered():
""" Returns the number of possible states to encounter in a game. """
num_rows = 4
num_cols = 3
# No pone
start = time.time()
game = pyspiel.load_game(
f"dark_hex_ir(num_rows={num_rows},num_cols={num_cols})")
info_states = get_all_information_states(game,
num_rows,
num_cols,
include_terminal_states=True,
get_data=False)
game1_len = len(info_states)
time1 = time.time() - start
print(f'Number of states (No pone): {game1_len}')
print(f'Time: {time1}')
# Pone
start = time.time()
game = pyspiel.load_game(
f"dark_hex_ir(num_rows={num_rows},num_cols={num_cols},use_early_terminal=True)"
)
info_states = get_all_information_states(game,
num_rows,
num_cols,
include_terminal_states=True,
get_data=False)
game2_len = len(info_states)
time2 = time.time() - start
print(f'Number of states (Pone): {game2_len}')
print(f'Time: {time2}')
# plot number of states encountered
plt.figure(figsize=(3, 5))
sns.color_palette("tab10")
sns.set_style("darkgrid")
x = ['p-one', 'no p-one']
y = [game2_len / 100000, game1_len / 100000]
plt.bar(x,
y,
width=.9,
color=sns.color_palette("tab10")[:2],
align='center')
plt.ylabel('Number of states encountered (x100000)')
plt.title(f'Number of states\nfor p-one vs no p-one')
plt.tight_layout()
plt.savefig('darkhex/experiments/results/pone_vs_npone_num_states.pdf')
# plot time
plt.figure(figsize=(3, 5))
x = ['p-one', 'no p-one']
y = [time2, time1]
plt.bar(x,
y,
width=.9,
color=sns.color_palette("tab10")[:2],
align='center')
plt.ylabel('Time (s)')
plt.title(f'Time\nfor p-one vs no p-one')
plt.tight_layout()
plt.savefig('darkhex/experiments/results/pone_vs_npone_time.pdf')
if __name__ == "__main__":
num_states_encountered()
|
"""
Resource and titles
"""
import os
import re
VPC_GATEWAY_ATTACHMENT = "VPCGatewayAttachment"
def vpc_gateway_title(prefix):
"""
The VPC gateway title.
"""
return "%s%s" % (prefix, VPC_GATEWAY_ATTACHMENT)
def vpc_title(prefix):
"""
VPC title
"""
return "%sVPC" % prefix
def bucket_name(prefix, suffix=None):
"""
DNS compliant bucket name.
"""
if suffix:
fmt_str, fmt_args = ("%sApp.%s", (prefix, suffix))
else:
fmt_str, fmt_args = ("%sApp", prefix)
return "-".join([cp.lower() for cp in re.sub("([A-Z])", " \\1", fmt_str % fmt_args).split()])
def packaged_path(template_path, token='packaged'):
"""
Converts a path like ../foo/bar/template.yml to ../foo/bar/template-packaged.yml
"""
dn = os.path.dirname(template_path)
bn = os.path.basename(template_path)
fn, ext = re.split('\.', bn)
fp = os.path.join(dn, fn)
return "{0}-{1}.{2}".format(fp, token, ext)
def subnet_title(prefix, index=1):
"""Subnet title"""
return "%sPublicSubnet%d" % (prefix, index)
|
"""Tests for the helper functions."""
from pyglet.window.key import MOD_SHIFT, MOD_CTRL, MOD_ALT, MOD_NUMLOCK
from shimmer.helpers import bitwise_contains, bitwise_add, bitwise_remove
def test_bitwise_add(subtests):
"""Test that the bitwise_add function works as intended."""
assert bitwise_add(MOD_SHIFT, MOD_CTRL) == 3
assert bitwise_add(MOD_CTRL, MOD_SHIFT) == 3
assert bitwise_add(MOD_SHIFT, MOD_ALT) == 5
assert bitwise_add(MOD_SHIFT, bitwise_add(MOD_CTRL, MOD_ALT)) == 7
with subtests.test("Adding to itself results in itself"):
assert bitwise_add(MOD_SHIFT, MOD_SHIFT) == MOD_SHIFT
def test_bitwise_remove(subtests):
"""Test that the bitwise_remove function works as intended."""
shift_ctrl = bitwise_add(MOD_SHIFT, MOD_CTRL)
with subtests.test("Removing one mask from another works."):
assert bitwise_remove(shift_ctrl, MOD_SHIFT) == MOD_CTRL
assert bitwise_remove(shift_ctrl, MOD_CTRL) == MOD_SHIFT
with subtests.test("Removing a bit that isn't contained results in no change."):
assert bitwise_remove(shift_ctrl, MOD_ALT) == shift_ctrl
with subtests.test("Can remove a single mask to get 0."):
assert bitwise_remove(MOD_SHIFT, MOD_SHIFT) == 0
def test_bitwise_contains(subtests):
"""Test that the bitwise_contains function works as intended."""
shift_alt = bitwise_add(MOD_SHIFT, MOD_ALT)
with subtests.test("Comparing single bits works."):
assert bitwise_contains(MOD_SHIFT, MOD_SHIFT) is True
assert bitwise_contains(MOD_SHIFT, MOD_CTRL) is False
with subtests.test("A single bit matches a multi-bit mask."):
assert bitwise_contains(shift_alt, MOD_SHIFT) is True
assert bitwise_contains(shift_alt, MOD_CTRL) is False
assert bitwise_contains(shift_alt, MOD_ALT) is True
with subtests.test("A multi-bit mask matches a single bit"):
assert bitwise_contains(MOD_SHIFT, shift_alt) is True
with subtests.test("A multi-bit mask matches a multi-bit mask."):
assert bitwise_contains(shift_alt, shift_alt) is True
shift_ctrl = bitwise_add(MOD_SHIFT, MOD_CTRL)
assert bitwise_contains(shift_alt, shift_ctrl) is True
with subtests.test(
"A multi-bit mask with no overlap does not match another multi-bit mask"
):
alt_numlock = bitwise_add(MOD_ALT, MOD_NUMLOCK)
assert bitwise_contains(shift_ctrl, alt_numlock) is False
|
import numpy as np
import decimal
import spectra
class Smear(object):
""" This class smears the energy and radius of a spectra.
The class can recieve energy and radius as individual data points or a
1 dimensional numpy array to smear which is then returned. 2d and 3d
arrays with linked energy, radius and time information is yet to be
implemented.
Attributes:
_light_yield (float): Number of PMT hits expected for a
MeV energy deposit in NHit/MeV
_position_resolution (float): Sigma in mm
"""
_light_yield = 200. # NHit per MeV
_position_resolution = 100. # mm
def __init__(self):
""" Initialise the Smear class by seeding the random number generator
"""
np.random.seed()
def bin_1d_array(self, array, bins):
""" Sorts a 1 dimensional array and bins it
Args:
array (:class:`numpy.array`): To sort and bin
bins (list): Upper limit of bins
Returns:
A 1 dimensional numpy array, sorted and binned.
"""
array = np.sort(array)
split_at = array.searchsorted(bins)
return np.split(array, split_at)
def calc_gaussian(self, x, mean, sigma):
""" Calculates the value of a gaussian whose integral is equal to
one at position x with a given mean and sigma.
Args:
x : Position to calculate the gaussian
mean : Mean of the gaussian
sigma : Sigma of the gaussian
Returns:
Value of the gaussian at the given position
"""
return np.exp(-(x-mean)**2/(2*sigma**2))/(sigma*np.sqrt(2*np.pi))
def floor_to_bin(self, x, bin_size):
""" Rounds down value bin content to lower edge of nearest bin.
Args:
x (float): Value to round down
bin_size (float): Width of a bin
Returns:
Value of nearest lower bin edge
"""
dp = abs(decimal.Decimal(str(bin_size)).as_tuple().exponent)
coef = np.power(10, dp)
return np.floor(coef*(x//bin_size)*bin_size)/coef
def ceil_to_bin(self, x, bin_size):
""" Rounds up value bin content to upper edge of nearest bin.
Args:
x (float): Value to round down
bin_size (float): Width of a bin
Returns:
Value of nearest lower bin edge
"""
dp = abs(decimal.Decimal(str(bin_size)).as_tuple().exponent)
coef = np.power(10, dp)
return np.ceil(coef*(bin_size+(x//bin_size)*bin_size))/coef
def get_energy_sigma(self, energy):
""" Calculates sigma at a given energy.
Args:
energy (float): Energy value of data point(s)
Returns:
Sigma equivalent to sqrt(energy/_light_yield)
"""
return np.sqrt(energy/self._light_yield)
def smear_energy_0d(self, energy):
""" Smears a single energy value
Args:
energy (float): Value to smear
Returns:
Smeared energy value
"""
sigma = self.get_energy_sigma(energy)
return np.fabs(np.random.normal(energy, sigma))
def smear_energy_1d(self, energies, bins, binned=False):
""" Smears a 1 dimensional array of energy values
Args:
energies (:class:`numpy.array`): Values to smear
bins (list): Upper edge of bins for array
binned (bool): Is the array already binned? (True or False)
Returns:
Smeared and sorted 1 dimensional numpy array of energy values
"""
if binned is False:
energies = self.bin_1d_array(energies, bins)
bin_size = bins[1]-bins[0]
smeared_energies = []
for energy in energies:
if energy.any():
energy_bin = self.floor_to_bin(energy[0], bin_size)+0.5*bin_size
num_entries = len(energy)
smeared_energies += self.smear_energy_bin(energy_bin,
num_entries)
return np.array(smeared_energies)
def smear_energy_bin(self, energy, entries):
""" Smears one energy bin.
Args:
energy (float): Central value of energy of bin
entries (int): Number of entries in the bin
Returns:
A list of smeared energies corresponding to the input bin.
"""
sigma = self.get_energy_sigma(energy)
smeared_energies = []
for i in range(entries):
smeared_energies.append(np.fabs(np.random.normal(energy, sigma)))
return smeared_energies
def smear_radius_0d(self, radius):
""" Smears a single radius value
Args:
radius (float): Value to smear
Returns:
Smeared radius value
"""
return np.fabs(np.random.normal(radius, self._position_resolution))
def smear_radii_1d(self, radii, bins, binned=False):
""" Smears a 1 dimensional array of radius values
Args:
radii (:class:`numpy.array`): Values to smear
bins (list): Upper edge of bins for array
binned (bool): Is the array already binned? (True or False)
Returns:
Smeared and sorted 1 dimensional numpy array of radius values
"""
if binned is False:
radii = self.bin_1d_array(radii, bins)
bin_size = bins[1]-bins[0]
smeared_radii = []
for radius in radii:
if radius.any():
radius_bin = self.floor_to_bin(radius[0], bin_size)+0.5*bin_size
num_entries = len(radius)
smeared_radii += self.smear_radius_bin(radius_bin, num_entries)
return np.array(smeared_radii)
def smear_radius_bin(self, radius, entries):
""" Smears one energy bin.
Args:
radius (float): Central value of radius of bin
entries (int): Number of entries in the bin
Returns:
A list of smeared radii corresponding to the input bin.
"""
smeared_radii = []
for i in range(entries):
smeared_radii.append(np.fabs(np.random.normal(radius,
self._position_resolution)))
return smeared_radii
def random_gaussian_energy_spectra(self, true_spectrum):
""" Smears the energy of a spectra object by generating
a number of random points from a Gaussian pdf generated
for that bin. The number of points generated is equivalent
to the number of entries in that bin.
Args:
true_spectrum (spectra): spectrum to be smeared
Returns:
A smeared spectra object.
"""
energy_step = (true_spectrum._energy_high-true_spectrum._energy_low)/true_spectrum._energy_bins
time_step = (true_spectrum._time_high-true_spectrum._time_low)/true_spectrum._time_bins
radial_step = (true_spectrum._radial_high-true_spectrum._radial_low)/true_spectrum._radial_bins
smeared_spectrum = spectra.Spectra(true_spectrum._name+str(self._light_yield)+"_light_yield",
true_spectrum._num_decays)
for time_bin in range(true_spectrum._time_bins):
mean_time = time_bin*time_step+0.5*time_step
for radial_bin in range(true_spectrum._radial_bins):
mean_radius = radial_bin*radial_step+0.5*radial_step
for energy_bin in range(true_spectrum._energy_bins):
mean_energy = energy_bin*energy_step+0.5*energy_step
sigma = self.get_energy_sigma(mean_energy)
entries = true_spectrum._data[energy_bin,
radial_bin,
time_bin]
for i in range(int(entries)):
try:
smeared_spectrum.fill(np.fabs(np.random.normal(mean_energy,
sigma)),
mean_radius,
mean_time)
except:
# Occurs when smeared energy is > max bin
print "Warning: Smeared energy out of bounds. Skipping."
continue
return smeared_spectrum
def weight_gaussian_energy_spectra(self, true_spectrum, num_sigma=5.):
""" Smears the energy of a spectra object by calculating a Gaussian pdf
for each bin and applying a weight to the bin and corresponding bins
a default 5 sigma apart.
Args:
true_spectrum (spectra): spectrum to be smeared
num_sigma (float): Width of window to apply the weight method.
Default is 5.
Returns:
A smeared spectra object.
"""
energy_step = (true_spectrum._energy_high-true_spectrum._energy_low)/true_spectrum._energy_bins
time_step = (true_spectrum._time_high-true_spectrum._time_low)/true_spectrum._time_bins
radial_step = (true_spectrum._radial_high-true_spectrum._radial_low)/true_spectrum._radial_bins
smeared_spectrum = spectra.Spectra(true_spectrum._name+str(self._light_yield)+"_light_yield",
true_spectrum._num_decays)
for time_bin in range(true_spectrum._time_bins):
mean_time = time_bin*time_step+0.5*time_step
for radial_bin in range(true_spectrum._radial_bins):
mean_radius = radial_bin*radial_step+0.5*radial_step
for energy_bin in range(true_spectrum._energy_bins):
mean_energy = energy_bin*energy_step+0.5*energy_step
sigma = self.get_energy_sigma(mean_energy)
entries = float(true_spectrum._data[energy_bin,
radial_bin,
time_bin])
if entries == 0:
continue # Bin Empty
lower_bin = self.floor_to_bin(mean_energy-num_sigma*sigma,
energy_step)+0.5*energy_step
upper_bin = self.ceil_to_bin(mean_energy+num_sigma*sigma,
energy_step)-0.5*energy_step
if upper_bin > true_spectrum._energy_high:
upper_bin = true_spectrum._energy_high-0.5*energy_step
if lower_bin < true_spectrum._energy_low:
lower_bin = true_spectrum._energy_low+0.5*energy_step
weights = []
for energy in np.arange(lower_bin, upper_bin, energy_step):
weights.append(self.calc_gaussian(energy,
mean_energy,
sigma))
i = 0
tot_weight = np.array(weights).sum()
for energy in np.arange(lower_bin, upper_bin, energy_step):
smeared_spectrum.fill(energy,
mean_radius,
mean_time,
entries*weights[i]/tot_weight)
i += 1
return smeared_spectrum
def random_gaussian_radius_spectra(self, true_spectrum):
""" Smears the radius of a spectra object by generating a
number of random points from a Gaussian pdf generated for
that bin. The number of points generated is equivalent
to the number of entries in that bin.
Args:
true_spectrum (spectra): spectrum to be smeared
Returns:
A smeared spectra object.
"""
energy_step = (true_spectrum._energy_high-true_spectrum._energy_low)/true_spectrum._energy_bins
time_step = (true_spectrum._time_high-true_spectrum._time_low)/true_spectrum._time_bins
radial_step = (true_spectrum._radial_high-true_spectrum._radial_low)/true_spectrum._radial_bins
smeared_spectrum = spectra.Spectra(true_spectrum._name+str(self._position_resolution)+"_position_resolution",
true_spectrum._num_decays)
for time_bin in range(true_spectrum._time_bins):
mean_time = time_bin*time_step+0.5*time_step
for energy_bin in range(true_spectrum._energy_bins):
mean_energy = energy_bin*energy_step+0.5*energy_step
for radial_bin in range(true_spectrum._radial_bins):
mean_radius = radial_bin*radial_step+0.5*radial_step
entries = true_spectrum._data[energy_bin,
radial_bin,
time_bin]
for i in range(int(entries)):
try:
smeared_spectrum.fill(mean_energy,
np.fabs(np.random.normal(mean_radius,
self._position_resolution)),
mean_time)
except:
# Occurs when smeared radius is > max bin
print "Warning: Smeared radius out of bounds. Skipping."
continue
return smeared_spectrum
def weight_gaussian_radius_spectra(self, true_spectrum, num_sigma=5.):
""" Smears the radius of a spectra object by calculating a Gaussian pdf
for each bin and applies a weight to the bin and corresponding bins a
default 5 sigma apart.
Args:
true_spectrum (spectra): spectrum to be smeared
num_sigma (float): Width of window to apply the weight method.
Default is 5.
Returns:
A smeared spectra object.
"""
energy_step = (true_spectrum._energy_high-true_spectrum._energy_low)/true_spectrum._energy_bins
time_step = (true_spectrum._time_high-true_spectrum._time_low)/true_spectrum._time_bins
radial_step = (true_spectrum._radial_high-true_spectrum._radial_low)/true_spectrum._radial_bins
smeared_spectrum = spectra.Spectra(true_spectrum._name+str(self._position_resolution)+"_position_resolution",
true_spectrum._num_decays)
for time_bin in range(true_spectrum._time_bins):
mean_time = time_bin*time_step+0.5*time_step
for energy_bin in range(true_spectrum._energy_bins):
mean_energy = energy_bin*energy_step+0.5*energy_step
for radial_bin in range(true_spectrum._radial_bins):
mean_radius = radial_bin*radial_step+0.5*radial_step
entries = float(true_spectrum._data[energy_bin,
radial_bin,
time_bin])
if entries == 0:
continue # Bin Empty
lower_bin = self.floor_to_bin(mean_radius-num_sigma*self._position_resolution,
radial_step)+0.5*radial_step
upper_bin = self.ceil_to_bin(mean_radius+num_sigma*self._position_resolution,
radial_step)-0.5*radial_step
if upper_bin > true_spectrum._radial_high:
upper_bin = true_spectrum._radial_high-0.5*energy_step
if lower_bin < true_spectrum._radial_low:
lower_bin = true_spectrum._radial_low+0.5*energy_step
weights = []
for radius in np.arange(lower_bin, upper_bin, radial_step):
weights.append(self.calc_gaussian(radius,
mean_radius,
self._position_resolution))
weight_tot = np.array(weights).sum()
i = 0
for radius in np.arange(lower_bin, upper_bin, radial_step):
smeared_spectrum.fill(mean_energy,
radius,
mean_time,
entries*weights[i]/weight_tot)
i += 1
return smeared_spectrum
|
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
#SSL Certification Error Handle
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
#Data Collection
link = input('Enter URL: ')
cont = int(input('Enter count: '))
line = int(input('Enter position: '))
print('Retrieving: %s' % link)
for i in range(0, cont):
html = urllib.request.urlopen(link, context=ctx).read()
soup = BeautifulSoup(html, 'html.parser')
tags = soup('a')
cn = 0
ps = 0
for tag in tags:
ps += 1
if ps == line:
print('Retrieving: %s' % str(tag.get('href', None)))
link = str(tag.get('href', None))
ps = 0
break
|
import os
from typing import List
from fvttmv.path_tools import PathTools
from fvttmv.wolds_finder import WorldsFinder
dirs_in_worlds_to_look_for_db_file = ["data", "packs"]
class DbFilesIterator:
"""
Tools for iterating over db files.
"""
def iterate_through_all(self,
abs_path_to_foundry_data: str,
abs_paths_to_additional_targets: List[str]):
# Check before iterating -> Don't fail in the middle
for abs_path in abs_paths_to_additional_targets + [abs_path_to_foundry_data]:
PathTools.assert_path_format_is_ok(abs_path)
for abs_path_to_target in abs_paths_to_additional_targets:
PathTools.assert_path_is_file_or_dir(abs_path_to_target)
PathTools.assert_path_is_dir(abs_path_to_foundry_data)
for abs_path_to_db in self.__iterate_through_all_worlds(abs_path_to_foundry_data):
yield abs_path_to_db
for abs_path_to_db in self.__iterate_trough_additional_targets(abs_paths_to_additional_targets):
yield abs_path_to_db
def __iterate_through_all_worlds(self,
abs_path_to_foundry_data: str):
PathTools.assert_path_format_is_ok(abs_path_to_foundry_data)
PathTools.assert_path_is_dir(abs_path_to_foundry_data)
worlds_finder = WorldsFinder(abs_path_to_foundry_data)
world_dirs = worlds_finder.get_paths_to_worlds()
for path_to_world_dir in world_dirs:
for db_file in self.__iterate_through_world_dir(path_to_world_dir):
yield db_file
def __iterate_trough_additional_targets(self,
abs_paths_to_additional_targets: List[str]):
for abs_path_to_additional_target in abs_paths_to_additional_targets:
if os.path.isfile(abs_path_to_additional_target):
yield abs_path_to_additional_target
continue
for abs_path_to_db in self.__iterate_through_dir(abs_path_to_additional_target):
yield abs_path_to_db
def __iterate_through_dir(self,
abs_path_to_dir: str):
PathTools.assert_path_format_is_ok(abs_path_to_dir)
PathTools.assert_path_is_dir(abs_path_to_dir)
# os.listdir is not always sorted the same way. For testing purposes and reproduction purposes it should be though
for element in sorted(os.listdir(abs_path_to_dir)):
abs_path_to_element = os.path.join(abs_path_to_dir, element)
if not os.path.isfile(abs_path_to_element):
continue
if element.endswith(".db"):
yield abs_path_to_element
def __iterate_through_world_dir(self,
abs_path_to_world_dir: str):
for directory in dirs_in_worlds_to_look_for_db_file:
abs_path_to_dir = os.path.join(abs_path_to_world_dir, directory)
if not os.path.exists(abs_path_to_dir):
continue
for db_file in self.__iterate_through_dir(abs_path_to_dir):
yield db_file
|
'''Module with functions to work with artists data.
Під час підбору потрібного датасету ми зіткнулися з проблемою:
більший датасет не мав інформації щодо популярності композицій.
Через це ми вирішили побудувати свій власний критерій популярності,
що базується на популярності артистів, що працювали над певною композицією.
За допомогою цих функцій, ми зібрали інформацію про популярність виконавців
з меншого датасету - таких всього 32 тисячі. Потім, ми відкинули всі пісні з
більшого датасету, у яких список виконавців не мав хоча б одного з
малого датасету. В результаті маємо датасет на більше ніж 600
тисяч пісень замість початково запланованих 160 тисяч.
'''
from typing import List, Set
import pandas as pd
def get_artists(data: pd.DataFrame) -> set:
'''
Return a set of artists from the dataset.
'''
artists_set = set()
for artist_data in data['artists']:
# Add a single artist
if len(artist_data) == 1:
artists_set.add(artist_data[0])
# Add a group of artists
else:
for artist in artist_data:
artists_set.add(artist)
return artists_set
def has_known_artist(artist_data: List[str], artists: Set[str]):
'''
Return True if at least one artist is
present in the given set, False otherwise.
'''
# Check if at least one artist from the group is known
for artist in artist_data:
if artist in artists:
return True
return False
def get_mean_popularity(artists, popularity):
'''
Return mean popularity value for among given artists.
Return None if there is missing data for at least one artist.
'''
try:
return sum(popularity[artist] for artist in artists) / len(artists)
except:
return None
|
#
# @lc app=leetcode id=243 lang=python3
#
# [243] Shortest Word Distance
#
# @lc code=start
class Solution:
def shortestDistance(self, words, word1, word2):
d = {word1: -1, word2: -1}
mi = float('inf')
if word1 == word2:
return
for i in range(len(words)):
if words[i] == word1:
d[word1] = i
if d[word2] != -1:
mi = min(abs(d[word1] - d[word2]), mi)
elif words[i] == word2:
d[word2] = i
if d[word1] != -1:
mi = min(abs(d[word1] - d[word2]), mi)
return mi
# @lc code=end
|
#!/usr/bin/python
"""
pi-timolo - Raspberry Pi Long Duration Timelapse, Motion Tracking,
with Low Light Capability
written by Claude Pageau Jul-2017 (release 7.x)
This release uses OpenCV to do Motion Tracking.
It requires updated config.py
"""
from __future__ import print_function
progVer = "ver 11.52" # Requires Latest 11.2 release of config.py
__version__ = progVer # May test for version number at a future time
import os
import subprocess
warn_on = False # Add short delay to review warning messages
mypath = os.path.abspath(__file__) # Find the full path of this python script
# get the path location only (excluding script name)
baseDir = os.path.dirname(mypath)
baseFileName = os.path.splitext(os.path.basename(mypath))[0]
progName = os.path.basename(__file__)
logFilePath = os.path.join(baseDir, baseFileName + ".log")
horz_line = '-------------------------------------------------------'
print(horz_line)
print('%s %s written by Claude Pageau' % (progName, progVer))
print(horz_line)
print('Loading Wait ....')
# import python library modules
import datetime
import logging
import sys
import subprocess
import shutil
import glob
import time
import math
from threading import Thread
from fractions import Fraction
import numpy as np
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
# Attempt to import dateutil
try:
from dateutil.parser import parse
except ImportError:
print("WARN : Could Not Import dateutil.parser")
print(" Disabling timelapseStartAt, motionStartAt and VideoStartAt")
print(" See https://github.com/pageauc/pi-timolo/wiki/Basic-Trouble-Shooting#problems-with-python-pip-install-on-wheezy")
warn_on = True
# Disable get_sched_start if import fails for Raspbian wheezy or Jessie
timelapseStartAt = ""
motionStartAt = ""
videoStartAt = ""
# Attempt to import pyexiv2. Note python3 can be a problem
try:
# pyexiv2 Transfers image exif data to writeTextToImage
# For python3 install of pyexiv2 lib
# See https://github.com/pageauc/pi-timolo/issues/79
# Bypass pyexiv2 if library Not Found
import pyexiv2
except ImportError:
print("WARN : Could Not Import pyexiv2. Required for Saving Image EXIF meta data")
print(" If Running under python3 then Install pyexiv2 library for python3 per")
print(" cd ~/pi-timolo")
print(" ./install-py3exiv2.sh")
warn_on = True
except OSError as err:
print("WARN : Could Not import python3 pyexiv2 due to an Operating System Error")
print(" %s" % err)
print(" Camera images will be missing exif meta data")
warn_on = True
"""
This is a dictionary of the default settings for pi-timolo.py
If you don't want to use a config.py file these will create the required
variables with default values. Change dictionary values if you want different
variable default values.
A message will be displayed if a variable is Not imported from config.py.
Note: plugins can override default and config.py values if plugins are
enabled. This happens after config.py variables are initialized
"""
default_settings = {
'configName':'default_settings',
'configTitle':'No config.py so using internal dictionary settings',
'pluginEnable':False,
'pluginName':"shopcam",
'verbose':True,
'logDataToFile':False,
'debug':False,
'imageNamePrefix':'cam1-',
'imageWidth':1920,
'imageHeight':1080,
'imageFormat':".jpg",
'imageJpegQuality':95,
'imageRotation':0,
'imageVFlip':True,
'imageHFlip':True,
'imageGrayscale':False,
'imagePreview':False,
'noNightShots':False,
'noDayShots':False,
'useVideoPort':False,
'imageShowStream':False,
'streamWidth':320,
'streamHeight':240,
'showDateOnImage':True,
'showTextFontSize':18,
'showTextBottom':True,
'showTextWhite':True,
'showTextWhiteNight':True,
'nightTwilightThreshold':90,
'nightDarkThreshold':50,
'nightBlackThreshold':4,
'nightSleepSec':30,
'nightMaxShutSec':5.9,
'nightMaxISO':800,
'nightDarkAdjust':4.7,
'motionTrackOn':True,
'motionTrackQuickPic':False,
'motionTrackInfo':True,
'motionTrackTimeOut':0.3,
'motionTrackTrigLen':75,
'motionTrackMinArea':100,
'motionTrackFrameRate':20,
'motionTrackQPBigger':3.0,
'motionDir':"media/motion",
'motionPrefix':"mo-",
'motionStartAt':"",
'motionVideoOn':False,
'motionVideoFPS':15,
'motionVideoTimer':10,
'motionQuickTLOn':False,
'motionQuickTLTimer':20,
'motionQuickTLInterval':4,
'motionForce':3600,
'motionNumOn':True,
'motionNumRecycle':True,
'motionNumStart':1000,
'motionNumMax':500,
'motionSubDirMaxFiles':0,
'motionSubDirMaxHours':0,
'motionRecentMax':40,
'motionRecentDir':"media/recent/motion",
'motionDotsOn':False,
'motionDotsMax':100,
'motionCamSleep':0.7,
'createLockFile':False,
'timelapseOn':True,
'timelapseDir':"media/timelapse",
'timelapsePrefix':"tl-",
'timelapseStartAt':"",
'timelapseTimer':300,
'timelapseCamSleep':4.0,
'timelapseNumOn':True,
'timelapseNumRecycle':True,
'timelapseNumStart':1000,
'timelapseNumMax':2000,
'timelapseExitSec':0,
'timelapseMaxFiles':0,
'timelapseSubDirMaxFiles':0,
'timelapseSubDirMaxHours':0,
'timelapseRecentMax':40,
'timelapseRecentDir':"media/recent/timelapse",
'videoRepeatOn':False,
'videoPath':"media/videos",
'videoPrefix':"vid-",
'videoStartAt':"",
'videoDuration':120,
'videoTimer':60,
'videoFPS':30,
'videoNumOn':False,
'videoNumRecycle':False,
'videoNumStart':100,
'videoNumMax':20,
'spaceTimerHrs':0,
'spaceFreeMB':500,
'spaceMediaDir':'/home/pi/pi-timolo/media',
'spaceFileExt':'jpg',
'web_server_port':8080,
'web_server_root':"media",
'web_page_title':"PI-TIMOLO Media",
'web_page_refresh_on':True,
'web_page_refresh_sec':"900",
'web_page_blank':False,
'web_image_height':"768",
'web_iframe_width_usage':"70%",
'web_iframe_width':"100%",
'web_iframe_height':"100%",
'web_max_list_entries':0,
'web_list_height':"768",
'web_list_by_datetime':True,
'web_list_sort_descending':True
}
# Check for config.py variable file to import and error out if not found.
configFilePath = os.path.join(baseDir, "config.py")
if os.path.isfile(configFilePath):
try:
# Read Configuration variables from config.py file
from config import *
except ImportError:
print('WARN : Problem Importing Variables from %s' % configFilePath)
warn_on = True
else:
print('WARN : %s File Not Found. Cannot Import Configuration Variables.'
% configFilePath)
print(' Run Console Command Below to Download File from GitHub Repo')
print(' wget -O config.py https://raw.github.com/pageauc/pi-timolo/master/source/config.py')
print(' or cp config.py.new config.py')
print(' Will now use default_settings dictionary variable values.')
warn_on = True
"""
Check if variables were imported from config.py. If not create variable using
the values in the default_settings dictionary above.
"""
for key, val in default_settings.items():
try:
exec(key)
except NameError:
print('WARN : config.py Variable Not Found. Setting ' + key + ' = ' + str(val))
exec(key + '=val')
warn_on = True
# Setup Logging now that variables are imported from config.py/plugin
if logDataToFile:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(funcName)-10s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename=logFilePath,
filemode='w')
elif verbose:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(funcName)-10s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
else:
logging.basicConfig(level=logging.CRITICAL,
format='%(asctime)s %(levelname)-8s %(funcName)-10s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
# Check for user_motion_code.py file to import and error out if not found.
userMotionFilePath = os.path.join(baseDir, "user_motion_code.py")
if not os.path.isfile(userMotionFilePath):
print('WARN : %s File Not Found. Cannot Import user_motion_code functions.'
% userMotionFilePath)
warn_on = True
else:
# Read Configuration variables from config.py file
try:
motionCode = True
import user_motion_code
except ImportError:
print('WARN : Failed Import of File user_motion_code.py Investigate Problem')
motionCode = False
warn_on = True
# Give some time to read any warnings
if warn_on and verbose:
print('')
print('Please Review Warnings Wait 10 sec ...')
time.sleep(10)
try:
import cv2
except ImportError:
if sys.version_info > (2, 9):
logging.error("Failed to import cv2 opencv for python3")
logging.error("Try installing opencv for python3")
logging.error("See https://github.com/pageauc/opencv3-setup")
else:
logging.error("Failed to import cv2 for python2")
logging.error("Try reinstalling per command")
logging.error("sudo apt-get install python-opencv")
logging.error("Exiting %s Due to Error", progName)
sys.exit(1)
try:
from picamera import PiCamera
except ImportError:
logging.error("Problem importing picamera module")
logging.error("Try command below to import module")
if sys.version_info > (2, 9):
logging.error("sudo apt-get install python3-picamera")
else:
logging.error("sudo apt-get install python-picamera")
logging.error("Exiting %s Due to Error", progName)
sys.exit(1)
from picamera.array import PiRGBArray
import picamera.array
# Check that pi camera module is installed and enabled
camResult = subprocess.check_output("vcgencmd get_camera", shell=True)
camResult = camResult.decode("utf-8")
camResult = camResult.replace("\n", "")
if (camResult.find("0")) >= 0: # Was a 0 found in vcgencmd output
logging.error("Pi Camera Module Not Found %s", camResult)
logging.error("if supported=0 Enable Camera using command sudo raspi-config")
logging.error("if detected=0 Check Pi Camera Module is Installed Correctly")
logging.error("Exiting %s Due to Error", progName)
sys.exit(1)
else:
logging.info("Pi Camera Module is Enabled and Connected %s", camResult)
if pluginEnable: # Check and verify plugin and load variable overlay
pluginDir = os.path.join(baseDir, "plugins")
# Check if there is a .py at the end of pluginName variable
if pluginName.endswith('.py'):
pluginName = pluginName[:-3] # Remove .py extensiion
pluginPath = os.path.join(pluginDir, pluginName + '.py')
logging.info("pluginEnabled - loading pluginName %s", pluginPath)
if not os.path.isdir(pluginDir):
logging.error("plugin Directory Not Found at %s", pluginDir)
logging.error("Rerun github curl install script to install plugins")
logging.error("https://github.com/pageauc/pi-timolo/wiki/"
"How-to-Install-or-Upgrade#quick-install")
logging.error("Exiting %s Due to Error", progName)
sys.exit(1)
elif not os.path.isfile(pluginPath):
logging.error("File Not Found pluginName %s", pluginPath)
logging.error("Check Spelling of pluginName Value in %s",
configFilePath)
logging.error("------- Valid Names -------")
validPlugin = glob.glob(pluginDir + "/*py")
validPlugin.sort()
for entry in validPlugin:
pluginFile = os.path.basename(entry)
plugin = pluginFile.rsplit('.', 1)[0]
if not ((plugin == "__init__") or (plugin == "current")):
logging.error(" %s", plugin)
logging.error("------- End of List -------")
logging.error("Note: pluginName Should Not have .py Ending.")
logging.error("or Rerun github curl install command. See github wiki")
logging.error("https://github.com/pageauc/pi-timolo/wiki/"
"How-to-Install-or-Upgrade#quick-install")
logging.error("Exiting %s Due to Error", progName)
sys.exit(1)
else:
pluginCurrent = os.path.join(pluginDir, "current.py")
try: # Copy image file to recent folder
logging.info("Copy %s to %s", pluginPath, pluginCurrent)
shutil.copy(pluginPath, pluginCurrent)
except OSError as err:
logging.error('Copy Failed from %s to %s - %s',
pluginPath, pluginCurrent, err)
logging.error("Check permissions, disk space, Etc.")
logging.error("Exiting %s Due to Error", progName)
sys.exit(1)
logging.info("Import Plugin %s", pluginPath)
sys.path.insert(0, pluginDir) # add plugin directory to program PATH
from plugins.current import *
try:
if os.path.isfile(pluginCurrent):
os.remove(pluginCurrent)
pluginCurrentpyc = os.path.join(pluginDir, "current.pyc")
if os.path.isfile(pluginCurrentpyc):
os.remove(pluginCurrentpyc)
except OSError as err:
logging.warn("Failed Removal of %s - %s", pluginCurrentpyc, err)
time.sleep(5)
else:
logging.info("No Plugin Enabled per pluginEnable=%s", pluginEnable)
# Turn on verbose when debug mode is enabled
if debug:
verbose = True
# Make sure image format extention starts with a dot
if not imageFormat.startswith('.',0,1):
imageFormat = '.' + imageFormat
#==================================
# System Variables
# Should Not need to be customized
#==================================
SECONDS2MICRO = 1000000 # Used to convert from seconds to microseconds
nightMaxShut = int(nightMaxShutSec * SECONDS2MICRO)
# default=5 seconds IMPORTANT- 6 seconds works sometimes
# but occasionally locks RPI and HARD reboot required to clear
darkAdjust = int((SECONDS2MICRO/5.0) * nightDarkAdjust)
daymode = False # default should always be False.
motionPath = os.path.join(baseDir, motionDir) # Store Motion images
# motion dat file to save currentCount
motionNumPath = os.path.join(baseDir, motionPrefix + baseFileName + ".dat")
motionStreamStopSec = .5 # default= 0.5 seconds Time to close stream thread
timelapsePath = os.path.join(baseDir, timelapseDir) # Store Time Lapse images
# timelapse dat file to save currentCount
timelapseNumPath = os.path.join(baseDir, timelapsePrefix + baseFileName + ".dat")
lockFilePath = os.path.join(baseDir, baseFileName + ".sync")
# Colors for drawing lines
cvWhite = (255, 255, 255)
cvBlack = (0, 0, 0)
cvBlue = (255, 0, 0)
cvGreen = (0, 255, 0)
cvRed = (0, 0, 255)
LINE_THICKNESS = 1 # Thickness of opencv drawing lines
LINE_COLOR = cvWhite # color of lines to highlight motion stream area
CAMERA_WIDTH = streamWidth
CAMERA_HEIGHT = streamHeight
CAMERA_FRAMERATE = motionTrackFrameRate # camera framerate
bigImage = motionTrackQPBigger # increase size of motionTrackQuickPic image
bigImageWidth = int(CAMERA_WIDTH * bigImage)
bigImageHeight = int(CAMERA_HEIGHT * bigImage)
TRACK_TRIG_LEN = motionTrackTrigLen # Length of track to trigger speed photo
# Don't track progress until this Len reached.
TRACK_TRIG_LEN_MIN = int(motionTrackTrigLen / 6)
# Set max overshoot triglen allowed half cam height
TRACK_TRIG_LEN_MAX = int(CAMERA_HEIGHT / 2)
# Timeout seconds Stops motion tracking when no activity
TRACK_TIMEOUT = motionTrackTimeOut
# OpenCV Contour sq px area must be greater than this.
MIN_AREA = motionTrackMinArea
BLUR_SIZE = 10 # OpenCV setting for Gaussian difference image blur
THRESHOLD_SENSITIVITY = 20 # OpenCV setting for difference image threshold
# Fix range Errors Use zero to set default quality to 85
if imageJpegQuality < 1:
imageJpegQuality = 85
elif imageJpegQuality > 100:
imageJpegQuality = 100
#------------------------------------------------------------------------------
class PiVideoStream:
"""
Create a picamera in memory video stream and
return a frame when update called
"""
def __init__(self, resolution=(CAMERA_WIDTH, CAMERA_HEIGHT),
framerate=CAMERA_FRAMERATE,
rotation=0,
hflip=False, vflip=False):
# initialize the camera and stream
try:
self.camera = PiCamera()
except:
logging.error("PiCamera Already in Use by Another Process")
logging.error("Exiting %s Due to Error", progName)
exit(1)
self.camera.resolution = resolution
self.camera.framerate = framerate
self.camera.hflip = hflip
self.camera.vflip = vflip
self.camera.rotation = rotation
self.rawCapture = PiRGBArray(self.camera, size=resolution)
self.stream = self.camera.capture_continuous(self.rawCapture,
format="bgr",
use_video_port=True)
# initialize the frame and the variable used to indicate
# if the thread should be stopped
self.thread = None # Initialize thread
self.frame = None
self.stopped = False
def start(self):
""" start the thread to read frames from the video stream"""
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
return self
def update(self):
""" keep looping infinitely until the thread is stopped"""
for f in self.stream:
# grab the frame from the stream and clear the stream in
# preparation for the next frame
self.frame = f.array
self.rawCapture.truncate(0)
# if the thread indicator variable is set, stop the thread
# and release camera resources
if self.stopped:
self.stream.close()
self.rawCapture.close()
self.camera.close()
return
def read(self):
""" return the frame most recently read """
return self.frame
def stop(self):
""" indicate that the thread should be stopped """
self.stopped = True
if self.thread is not None:
self.thread.join()
#------------------------------------------------------------------------------
def shut2Sec(shutspeed):
""" Convert camera shutter speed setting to string """
shutspeedSec = shutspeed/float(SECONDS2MICRO)
shutstring = str("%.4f") % (shutspeedSec)
return shutstring
#------------------------------------------------------------------------------
def showTime():
""" Show current date time in text format """
rightNow = datetime.datetime.now()
currentTime = ("%04d-%02d-%02d %02d:%02d:%02d" % (rightNow.year,
rightNow.month,
rightNow.day,
rightNow.hour,
rightNow.minute,
rightNow.second))
return currentTime
#------------------------------------------------------------------------------
def showDots(dotcnt):
"""
If motionShowDots=True then display a progress
dot for each cycle. If motionTrackOn then this would
normally be too fast and should be turned off
"""
if motionDotsOn:
if motionTrackOn and verbose:
dotcnt += 1
if dotcnt > motionDotsMax + 2:
print("")
dotcnt = 0
elif dotcnt > motionDotsMax:
print("")
stime = showTime() + " ."
sys.stdout.write(stime)
sys.stdout.flush()
dotcnt = 0
else:
sys.stdout.write('.')
sys.stdout.flush()
return dotcnt
#------------------------------------------------------------------------------
def checkConfig():
"""
Check if both motion track and
timelapse are disabled and error out
"""
if not motionTrackOn and not timelapseOn:
errorText = ("Both Motion and Timelapse are turned OFF"
" - motionTrackOn=%s timelapseOn=%s \n"
% (motionTrackOn, timelapseOn))
if verbose:
logging.error(errorText)
else:
sys.stdout.write(errorText)
sys.exit(1)
#------------------------------------------------------------------------------
def displayInfo(motioncount, timelapsecount):
""" Display variable settings with plugin overlays if required """
if verbose:
print("----------------------------------- Settings "
"-----------------------------------")
print("Config File .. configName=%s configTitle=%s"
% (configName, configTitle))
if pluginEnable:
print(" Plugin .. pluginEnable=%s pluginName=%s"
" (Overlays %s Variable Settings)"
% (pluginEnable, pluginName, configName))
else:
print(" Plugin .. pluginEnable=%s" % pluginEnable)
print("")
print("Image Info ... Size=%ix%i ext=%s Prefix=%s"
" VFlip=%s HFlip=%s Rotation=%i"
% (imageWidth, imageHeight, imageFormat, imageNamePrefix,
imageVFlip, imageHFlip, imageRotation))
print(" imageGrayscale=%s Preview=%s"
% (imageGrayscale, imagePreview))
if imageFormat == '.jpg' or imageFormat == '.jpeg':
print(" JpegQuality=%i where 1=Low 100=High"
% (imageJpegQuality))
print(" Low Light.. nightTwilightThreshold=%i"
" nightDarkThreshold=%i nightBlackThreshold=%i"
% (nightTwilightThreshold, nightDarkThreshold, nightBlackThreshold))
print(" nightMaxShutSec=%.2f nightMaxISO=%i"
" nightDarkAdjust=%.2f nightSleepSec=%i"
% (nightMaxShutSec, nightMaxISO, nightDarkAdjust, nightSleepSec))
print(" No Shots .. noNightShots=%s noDayShots=%s"
% (noNightShots, noDayShots))
if showDateOnImage:
print(" Img Text .. On=%s Bottom=%s (False=Top) WhiteText=%s (False=Black)"
% (showDateOnImage, showTextBottom, showTextWhite))
print(" showTextWhiteNight=%s showTextFontSize=%i px height"
% (showTextWhiteNight, showTextFontSize))
else:
print(" No Text .. showDateOnImage=%s Text on Image is Disabled"
% (showDateOnImage))
print("")
if motionTrackOn:
print("Motion Track.. On=%s Prefix=%s MinArea=%i sqpx"
" TrigLen=%i-%i px TimeOut=%i sec"
% (motionTrackOn, motionPrefix, motionTrackMinArea,
motionTrackTrigLen, TRACK_TRIG_LEN_MAX, motionTrackTimeOut))
print(" motionTrackInfo=%s motionDotsOn=%s imageShowStream=%s"
% (motionTrackInfo, motionDotsOn, imageShowStream))
print(" Stream .... size=%ix%i framerate=%i fps"
" motionStreamStopSec=%.2f QuickPic=%s"
% (CAMERA_WIDTH, CAMERA_HEIGHT, motionTrackFrameRate,
motionStreamStopSec, motionTrackQuickPic))
print(" Img Path .. motionPath=%s motionCamSleep=%.2f sec"
% (motionPath, motionCamSleep))
print(" Sched ..... motionStartAt %s blank=Off or"
" Set Valid Date and/or Time to Start Sequence"
% motionStartAt)
print(" Force ..... forceTimer=%i min (If No Motion)"
% (motionForce/60))
print(" Lockfile .. On=%s Path=%s NOTE: For Motion Images Only."
% (createLockFile, lockFilePath))
if motionNumOn:
print(" Num Seq ... motionNumOn=%s numRecycle=%s"
" numStart=%i numMax=%i current=%s"
% (motionNumOn, motionNumRecycle, motionNumStart,
motionNumMax, motioncount))
print(" Num Path .. motionNumPath=%s " % (motionNumPath))
else:
print(" Date-Time.. motionNumOn=%s Image Numbering is Disabled"
% (motionNumOn))
if motionQuickTLOn:
print(" Quick TL .. motionQuickTLOn=%s motionQuickTLTimer=%i"
" sec motionQuickTLInterval=%i sec (0=fastest)"
% (motionQuickTLOn, motionQuickTLTimer,
motionQuickTLInterval))
else:
print(" Quick TL .. motionQuickTLOn=%s Quick Time Lapse Disabled"
% motionQuickTLOn)
if motionVideoOn:
print(" Video ..... motionVideoOn=%s motionVideoTimer=%i"
" sec motionVideoFPS=%i (superseded by QuickTL)"
% (motionVideoOn, motionVideoTimer, motionVideoFPS))
else:
print(" Video ..... motionVideoOn=%s Motion Video is Disabled"
% motionVideoOn)
print(" Sub-Dir ... motionSubDirMaxHours=%i (0-off)"
" motionSubDirMaxFiles=%i (0=off)"
% (motionSubDirMaxHours, motionSubDirMaxFiles))
print(" Recent .... motionRecentMax=%i (0=off) motionRecentDir=%s"
% (motionRecentMax, motionRecentDir))
else:
print("Motion ....... motionTrackOn=%s Motion Tracking is Disabled)"
% motionTrackOn)
print("")
if timelapseOn:
print("Time Lapse ... On=%s Prefix=%s Timer=%i sec"
" timelapseExitSec=%i (0=Continuous)"
% (timelapseOn, timelapsePrefix,
timelapseTimer, timelapseExitSec))
print(" timelapseMaxFiles=%i" % (timelapseMaxFiles))
print(" Img Path .. timelapsePath=%s timelapseCamSleep=%.2f sec"
% (timelapsePath, timelapseCamSleep))
print(" Sched ..... timelapseStartAt %s blank=Off or"
" Set Valid Date and/or Time to Start Sequence"
% timelapseStartAt)
if timelapseNumOn:
print(" Num Seq ... On=%s numRecycle=%s numStart=%i numMax=%i current=%s"
% (timelapseNumOn, timelapseNumRecycle, timelapseNumStart,
timelapseNumMax, timelapsecount))
print(" Num Path .. numPath=%s" % (timelapseNumPath))
else:
print(" Date-Time.. motionNumOn=%s Numbering Disabled"
% timelapseNumOn)
print(" Sub-Dir ... timelapseSubDirMaxHours=%i (0=off)"
" timelapseSubDirMaxFiles=%i (0=off)"
% (timelapseSubDirMaxHours, timelapseSubDirMaxFiles))
print(" Recent .... timelapseRecentMax=%i (0=off) timelapseRecentDir=%s"
% (timelapseRecentMax, timelapseRecentDir))
else:
print("Time Lapse ... timelapseOn=%s Timelapse is Disabled"
% timelapseOn)
print("")
if spaceTimerHrs > 0: # Check if disk mgmnt is enabled
print("Disk Space .. Enabled - Manage Target Free Disk Space."
" Delete Oldest %s Files if Required"
% (spaceFileExt))
print(" Check Every spaceTimerHrs=%i (0=off)"
" Target spaceFreeMB=%i (min=100 MB) spaceFileExt=%s"
% (spaceTimerHrs, spaceFreeMB, spaceFileExt))
print(" Delete Oldest spaceFileExt=%s spaceMediaDir=%s"
% (spaceFileExt, spaceMediaDir))
else:
print("Disk Space .. spaceTimerHrs=%i "
"(Disabled) - Manage Target Free Disk Space. Delete Oldest %s Files"
% (spaceTimerHrs, spaceFileExt))
print(" .. Check Every spaceTimerHrs=%i (0=Off)"
" Target spaceFreeMB=%i (min=100 MB)"
% (spaceTimerHrs, spaceFreeMB))
print("")
print("Logging ...... verbose=%s (True=Enabled False=Disabled)"
% verbose)
print(" Log Path .. logDataToFile=%s logFilePath=%s"
% (logDataToFile, logFilePath))
print("--------------------------------- Log Activity "
"---------------------------------")
checkConfig()
#------------------------------------------------------------------------------
def subDirLatest(directory):
""" Scan for directories and return most recent """
dirList = ([name for name in
os.listdir(directory) if
os.path.isdir(os.path.join(directory, name))])
if len(dirList) > 0:
lastSubDir = sorted(dirList)[-1]
lastSubDir = os.path.join(directory, lastSubDir)
else:
lastSubDir = directory
return lastSubDir
#------------------------------------------------------------------------------
def subDirCreate(directory, prefix):
"""
Create a subdirectory in directory with
unique name based on prefix and date time
"""
now = datetime.datetime.now()
# Specify folder naming
subDirName = ('%s%d-%02d%02d-%02d%02d' % (prefix,
now.year, now.month, now.day,
now.hour, now.minute))
subDirPath = os.path.join(directory, subDirName)
if not os.path.isdir(subDirPath):
try:
os.makedirs(subDirPath)
except OSError as err:
logging.error('Cannot Create Directory %s - %s, using default location.',
subDirPath, err)
subDirPath = directory
else:
logging.info('Created %s', subDirPath)
else:
subDirPath = directory
return subDirPath
#------------------------------------------------------------------------------
def subDirCheckMaxFiles(directory, filesMax):
""" Count number of files in a folder path """
fileList = glob.glob(directory + '/*jpg')
count = len(fileList)
if count > filesMax:
makeNewDir = True
logging.info('Total Files in %s Exceeds %i', directory, filesMax)
else:
makeNewDir = False
return makeNewDir
#------------------------------------------------------------------------------
def subDirCheckMaxHrs(directory, hrsMax, prefix):
"""
Note to self need to add error checking
extract the date-time from the directory name
"""
dirName = os.path.split(directory)[1] # split dir path and keep dirName
# remove prefix from dirName so just date-time left
dirStr = dirName.replace(prefix, '')
# convert string to datetime
dirDate = datetime.datetime.strptime(dirStr, "%Y-%m-%d-%H:%M")
rightNow = datetime.datetime.now() # get datetime now
diff = rightNow - dirDate # get time difference between dates
days, seconds = diff.days, diff.seconds
dirAgeHours = days * 24 + seconds // 3600 # convert to hours
if dirAgeHours > hrsMax: # See if hours are exceeded
makeNewDir = True
logging.info('MaxHrs %i Exceeds %i for %s',
dirAgeHours, hrsMax, directory)
else:
makeNewDir = False
return makeNewDir
#------------------------------------------------------------------------------
def subDirChecks(maxHours, maxFiles, directory, prefix):
""" Check if motion SubDir needs to be created """
if maxHours < 1 and maxFiles < 1: # No Checks required
# logging.info('No sub-folders Required in %s', directory)
subDirPath = directory
else:
subDirPath = subDirLatest(directory)
if subDirPath == directory: # No subDir Found
logging.info('No sub folders Found in %s', directory)
subDirPath = subDirCreate(directory, prefix)
# Check MaxHours Folder Age Only
elif (maxHours > 0 and maxFiles < 1):
if subDirCheckMaxHrs(subDirPath, maxHours, prefix):
subDirPath = subDirCreate(directory, prefix)
elif (maxHours < 1 and maxFiles > 0): # Check Max Files Only
if subDirCheckMaxFiles(subDirPath, maxFiles):
subDirPath = subDirCreate(directory, prefix)
elif maxHours > 0 and maxFiles > 0: # Check both Max Files and Age
if subDirCheckMaxHrs(subDirPath, maxHours, prefix):
if subDirCheckMaxFiles(subDirPath, maxFiles):
subDirPath = subDirCreate(directory, prefix)
else:
logging.info('MaxFiles Not Exceeded in %s', subDirPath)
os.path.abspath(subDirPath)
return subDirPath
#------------------------------------------------------------------------------
def checkMediaPaths():
"""
Checks for image folders and
create them if they do not already exist.
"""
if motionTrackOn:
if not os.path.isdir(motionPath):
logging.info("Create Motion Media Folder %s", motionPath)
try:
os.makedirs(motionPath)
except OSError as err:
logging.error("Could Not Create %s - %s", motionPath, err)
sys.exit(1)
if os.path.isfile(motionNumPath):
logging.info("Delete Motion dat File %s", motionNumPath)
os.remove(motionNumPath)
if timelapseOn:
if not os.path.isdir(timelapsePath):
logging.info("Create TimeLapse Image Folder %s", timelapsePath)
try:
os.makedirs(timelapsePath)
except OSError as err:
logging.error("Could Not Create %s - %s", motionPath, err)
sys.exit(1)
if os.path.isfile(timelapseNumPath):
logging.info("Delete TimeLapse dat file %s", timelapseNumPath)
os.remove(timelapseNumPath)
# Check for Recent Image Folders and create if they do not already exist.
if motionRecentMax > 0:
if not os.path.isdir(motionRecentDir):
logging.info("Create Motion Recent Folder %s", motionRecentDir)
try:
os.makedirs(motionRecentDir)
except OSError as err:
logging.error('Failed to Create %s - %s', motionRecentDir, err)
sys.exit(1)
if timelapseRecentMax > 0:
if not os.path.isdir(timelapseRecentDir):
logging.info("Create TimeLapse Recent Folder %s",
timelapseRecentDir)
try:
os.makedirs(timelapseRecentDir)
except OSError as err:
logging.error('Failed to Create %s - %s',
timelapseRecentDir, err)
sys.exit(1)
#------------------------------------------------------------------------------
def deleteOldFiles(maxFiles, dirPath, prefix):
"""
Delete Oldest files gt or eq to maxfiles that match filename prefix
"""
try:
fileList = sorted(glob.glob(os.path.join(dirPath, prefix + '*')), key=os.path.getmtime)
except OSError as err:
logging.error('Problem Reading Directory %s - %s', dirPath, err)
else:
while len(fileList) >= maxFiles:
oldest = fileList[0]
oldestFile = oldest
try: # Remove oldest file in recent folder
fileList.remove(oldest)
logging.info('%s', oldestFile)
os.remove(oldestFile)
except OSError as err:
logging.error('Failed %s err: %s', oldestFile, err)
#------------------------------------------------------------------------------
def saveRecent(recentMax, recentDir, filename, prefix):
"""
Create a symlink file in recent folder (timelapse or motion subfolder)
Delete Oldest symlink file if recentMax exceeded.
"""
src = os.path.abspath(filename) # original file path
dest = os.path.abspath(os.path.join(recentDir,
os.path.basename(filename)))
deleteOldFiles(recentMax, os.path.abspath(recentDir), prefix)
try: # Create symlink in recent folder
logging.info('symlink %s', dest)
os.symlink(src, dest) # Create a symlink to actual file
except OSError as err:
logging.error('symlink %s to %s err: %s', dest, src, err)
#------------------------------------------------------------------------------
def filesToDelete(mediaDirPath, extension=imageFormat):
"""
Deletes files of specified format extension
by walking folder structure from specified mediaDirPath
"""
return sorted(
(os.path.join(dirname, filename)
for dirname, dirnames, filenames in os.walk(mediaDirPath)
for filename in filenames
if filename.endswith(extension)),
key=lambda fn: os.stat(fn).st_mtime, reverse=True)
#------------------------------------------------------------------------------
def freeSpaceUpTo(freeMB, mediaDir, extension=imageFormat):
"""
Walks mediaDir and deletes oldest files until spaceFreeMB is achieved.
You should Use with Caution this feature.
"""
mediaDirPath = os.path.abspath(mediaDir)
if os.path.isdir(mediaDirPath):
MB2Bytes = 1048576 # Conversion from MB to Bytes
targetFreeBytes = freeMB * MB2Bytes
fileList = filesToDelete(mediaDir, extension)
totFiles = len(fileList)
delcnt = 0
logging.info('Session Started')
while fileList:
statv = os.statvfs(mediaDirPath)
availFreeBytes = statv.f_bfree*statv.f_bsize
if availFreeBytes >= targetFreeBytes:
break
filePath = fileList.pop()
try:
os.remove(filePath)
except OSError as err:
logging.error('Del Failed %s', filePath)
logging.error('Error is %s', err)
else:
delcnt += 1
logging.info('Del %s', filePath)
logging.info('Target=%i MB Avail=%i MB Deleted %i of %i Files ',
targetFreeBytes / MB2Bytes, availFreeBytes / MB2Bytes,
delcnt, totFiles)
# Avoid deleting more than 1/4 of files at one time
if delcnt > totFiles / 4:
logging.warning('Max Deletions Reached %i of %i',
delcnt, totFiles)
logging.warning('Deletions Restricted to 1/4 of '
'total files per session.')
break
logging.info('Session Ended')
else:
logging.error('Directory Not Found - %s', mediaDirPath)
#------------------------------------------------------------------------------
def freeDiskSpaceCheck(lastSpaceCheck):
""" Perform Disk space checking and Clean up
if enabled and return datetime done
to reset ready for next sched date/time"""
if spaceTimerHrs > 0: # Check if disk free space timer hours is enabled
# See if it is time to do disk clean-up check
if (datetime.datetime.now() - lastSpaceCheck).total_seconds() > spaceTimerHrs * 3600:
lastSpaceCheck = datetime.datetime.now()
if spaceFreeMB < 100: # set freeSpaceMB to reasonable value if too low
diskFreeMB = 100
else:
diskFreeMB = spaceFreeMB
logging.info('spaceTimerHrs=%i diskFreeMB=%i spaceMediaDir=%s spaceFileExt=%s',
spaceTimerHrs, diskFreeMB, spaceMediaDir, spaceFileExt)
freeSpaceUpTo(diskFreeMB, spaceMediaDir, spaceFileExt)
return lastSpaceCheck
#------------------------------------------------------------------------------
def getCurrentCount(numberpath, numberstart):
""" Create a .dat file to store currentCount
or read file if it already Exists"""
if not os.path.isfile(numberpath):
# Create numberPath file if it does not exist
logging.info("Creating New File %s numberstart= %s",
numberpath, numberstart)
open(numberpath, 'w').close()
f = open(numberpath, 'w+')
f.write(str(numberstart))
f.close()
# Read the numberPath file to get the last sequence number
with open(numberpath, 'r') as f:
writeCount = f.read()
f.closed
try:
numbercounter = int(writeCount)
# Found Corrupt dat file since cannot convert to integer
except ValueError:
# Try to determine if this is motion or timelapse
if numberpath.find(motionPrefix) > 0:
filePath = motionPath + "/*" + imageFormat
fprefix = motionPath + motionPrefix + imageNamePrefix
else:
filePath = timelapsePath + "/*" + imageFormat
fprefix = timelapsePath + timelapsePrefix + imageNamePrefix
try:
# Scan image folder for most recent file
# and try to extract most recent number counter
newest = max(glob.iglob(filePath), key=os.path.getctime)
writeCount = newest[len(fprefix)+1:newest.find(imageFormat)]
except:
writeCount = numberstart
try:
numbercounter = int(writeCount)+1
except ValueError:
numbercounter = numberstart
logging.warn("Found Invalid Data in %s Resetting Counter to %s",
numberpath, numbercounter)
f = open(numberpath, 'w+')
f.write(str(numbercounter))
f.close()
f = open(numberpath, 'r')
writeCount = f.read()
f.close()
numbercounter = int(writeCount)
return numbercounter
#------------------------------------------------------------------------------
def writeTextToImage(imagename, datetoprint, currentDayMode):
"""
function to write date/time stamp
directly on top or bottom of images.
"""
if showTextWhite:
FOREGROUND = (255, 255, 255) # rgb settings for white text foreground
textColour = "White"
else:
FOREGROUND = (0, 0, 0) # rgb settings for black text foreground
textColour = "Black"
if showTextWhiteNight and (not currentDayMode):
# rgb settings for black text foreground
FOREGROUND = (255, 255, 255)
textColour = "White"
img = cv2.imread(imagename)
# This is grayscale image so channels is not avail or used
height, width, channels = img.shape
# centre text and compensate for graphics text being wider
x = int((width/2) - (len(imagename)*2))
if showTextBottom:
y = (height - 50) # show text at bottom of image
else:
y = 10 # show text at top of image
TEXT = imageNamePrefix + datetoprint
font_path = '/usr/share/fonts/truetype/freefont/FreeSansBold.ttf'
font = ImageFont.truetype(font_path, showTextFontSize, encoding='unic')
try:
text = TEXT.decode('utf-8') # required for python2
except:
text = TEXT # Just set for python3
img = Image.open(imagename)
# For python3 install of pyexiv2 lib
# See https://github.com/pageauc/pi-timolo/issues/79
try: # Read exif data since ImageDraw does not save this metadata
metadata = pyexiv2.ImageMetadata(imagename)
metadata.read()
except:
pass
draw = ImageDraw.Draw(img)
# draw.text((x, y),"Sample Text",(r,g,b))
draw.text((x, y), text, FOREGROUND, font=font)
img.save(imagename)
logging.info("Added %s Text [ %s ]", textColour, datetoprint)
try:
metadata.write() # Write previously saved exif data to image file
except:
logging.warn("Image EXIF Data Not Transferred.")
logging.info("Saved %s", imagename)
#------------------------------------------------------------------------------
def postImageProcessing(numberon, counterstart, countermax, counter,
recycle, counterpath, filename, currentDaymode):
""" If required process text to display directly on image """
rightNow = datetime.datetime.now()
if showDateOnImage:
dateTimeText = ("%04d%02d%02d_%02d:%02d:%02d"
% (rightNow.year, rightNow.month, rightNow.day,
rightNow.hour, rightNow.minute, rightNow.second))
if numberon:
if not recycle and countermax > 0:
counterStr = "%i/%i " % (counter, counterstart + countermax)
imageText = counterStr + dateTimeText
else:
counterStr = "%i " % (counter)
imageText = counterStr + dateTimeText
else:
imageText = dateTimeText
# Now put the imageText on the current image
try: # This will fail for a video file
writeTextToImage(filename, imageText, currentDaymode)
except:
pass
if createLockFile and motionTrackOn:
createSyncLockFile(filename)
# Process currentCount for next image if number sequence is enabled
if numberon:
counter += 1
if countermax > 0:
if counter > counterstart + countermax:
if recycle:
counter = counterstart
else:
counter = counterstart + countermax + 1
logging.warn("Exceeded Image Count numberMax=%i for %s \n",
countermax, filename)
# write next image counter number to dat file
writeCount = str(counter)
if not os.path.isfile(counterpath):
logging.info("Create New Counter File writeCount=%s %s",
writeCount, counterpath)
open(counterpath, 'w').close()
f = open(counterpath, 'w+')
f.write(str(writeCount))
f.close()
logging.info("Next Counter=%s %s", writeCount, counterpath)
return counter
#------------------------------------------------------------------------------
def getVideoName(path, prefix, numberon, counter):
""" build image file names by number sequence or date/time"""
if numberon:
if motionVideoOn or videoRepeatOn:
filename = os.path.join(path, prefix + str(counter) + ".h264")
else:
if motionVideoOn or videoRepeatOn:
rightNow = datetime.datetime.now()
filename = ("%s/%s%04d%02d%02d-%02d%02d%02d.h264"
% (path, prefix,
rightNow.year, rightNow.month, rightNow.day,
rightNow.hour, rightNow.minute, rightNow.second))
return filename
#------------------------------------------------------------------------------
def getImageName(path, prefix, numberon, counter):
""" build image file names by number sequence or date/time """
if numberon:
filename = os.path.join(path, prefix + str(counter) + imageFormat)
else:
rightNow = datetime.datetime.now()
filename = ("%s/%s%04d%02d%02d-%02d%02d%02d%s"
% (path, prefix,
rightNow.year, rightNow.month, rightNow.day,
rightNow.hour, rightNow.minute, rightNow.second,
imageFormat))
return filename
#------------------------------------------------------------------------------
def takeTrackQuickPic(image, filename):
""" Enlarge and Save stream image if motionTrackQuickPic=True"""
big_image = cv2.resize(image, (bigImageWidth, bigImageHeight))
cv2.imwrite(filename, big_image)
logging.info("Saved %ix%i Image to %s",
bigImageWidth, bigImageHeight, filename)
#------------------------------------------------------------------------------
def showBox(filename):
"""
Show stream image detection area on image to align camera
This is a quick fix for restricting motion detection
to a portion of the final image. Change the stream image size
on line 206 and 207 above
Adjust track config.py file motionTrackTrigLen as required.
"""
working_image = cv2.imread(filename)
x1y1 = (int((imageWidth - CAMERA_WIDTH)/2),
int((imageHeight - CAMERA_HEIGHT)/2))
x2y2 = (x1y1[0] + CAMERA_WIDTH,
x1y1[1] + CAMERA_HEIGHT)
cv2.rectangle(working_image, x1y1, x2y2, LINE_COLOR, LINE_THICKNESS)
cv2.imwrite(filename, working_image)
#------------------------------------------------------------------------------
def takeDayImage(filename, cam_sleep_time):
""" Take a Day image using exp=auto and awb=auto """
with picamera.PiCamera() as camera:
camera.resolution = (imageWidth, imageHeight)
camera.vflip = imageVFlip
camera.hflip = imageHFlip
camera.rotation = imageRotation # Valid values are 0, 90, 180, 270
# Day Automatic Mode
camera.exposure_mode = 'auto'
camera.awb_mode = 'auto'
if imageGrayscale:
camera.color_effects = (128, 128)
time.sleep(cam_sleep_time) # use motion or TL camera sleep to get AWB
if imagePreview:
camera.start_preview()
if imageFormat == ".jpg": # Set quality if image is jpg
camera.capture(filename, quality=imageJpegQuality)
else:
camera.capture(filename)
camera.close()
if imageShowStream: # Show motion area on full image to align camera
showBox(filename)
logging.info("camSleepSec=%.2f exp=auto awb=auto Size=%ix%i ",
cam_sleep_time, imageWidth, imageHeight)
# showDateOnImage displays FilePath so avoid showing twice
if not showDateOnImage:
logging.info("FilePath %s", filename)
if runScriptAfterCapture!=False:
if os.path.isfile(runScriptAfterCapture):
subprocess.call([runScriptAfterCapture])
else:
logging.error("Script to be run after taking an image, "+runScript+", doesn't exist")
#------------------------------------------------------------------------------
def getShut(pxAve):
"""
Calculate a shutter speed based on image pixel average
"""
px = pxAve + 1 # avoid division by zero
offset = nightMaxShut - ((nightMaxShut / float(nightDarkThreshold) * px))
brightness = offset * (1/float(nightDarkAdjust))
# hyperbolic curve + brightness adjust
shut = (nightMaxShut * (1 / float(px))) + brightness
return int(shut)
#------------------------------------------------------------------------------
def takeNightImage(filename, pixelAve):
""" Take low light Twilight or Night image """
with picamera.PiCamera() as camera:
camera.resolution = (imageWidth, imageHeight)
camera.vflip = imageVFlip
camera.hflip = imageHFlip
camera.rotation = imageRotation # valid values are 0, 90, 180, 270
if imageGrayscale:
camera.color_effects = (128, 128)
time.sleep(1)
# Use Twilight Threshold variable framerate_range
if pixelAve >= nightDarkThreshold:
camera.framerate_range = (Fraction(1, 6), Fraction(30, 1))
time.sleep(1)
camera.iso = nightMaxISO
logging.info("%ix%i TwilightThresh=%i/%i MaxISO=%i uses framerate_range",
imageWidth, imageHeight,
pixelAve, nightTwilightThreshold,
nightMaxISO)
time.sleep(4)
else:
# Set the framerate to a fixed value
camera.framerate = Fraction(1, 6)
time.sleep(1)
camera.iso = nightMaxISO
if pixelAve <= nightBlackThreshold: # Black Threshold (very dark)
camera.shutter_speed = nightMaxShut
logging.info("%ix%i BlackThresh=%i/%i shutSec=%s MaxISO=%i nightSleepSec=%i",
imageWidth, imageHeight,
pixelAve, nightBlackThreshold,
shut2Sec(nightMaxShut), nightMaxISO, nightSleepSec)
else: # Dark Threshold (Between Twilight and Black)
camShut = getShut(pixelAve)
if camShut > nightMaxShut:
camShut = nightMaxShut
# Set the shutter for long exposure
camera.shutter_speed = camShut
logging.info("%ix%i DarkThresh=%i/%i shutSec=%s MaxISO=%i nightSleepSec=%i",
imageWidth, imageHeight,
pixelAve, nightDarkThreshold,
shut2Sec(camShut), nightMaxISO, nightSleepSec)
time.sleep(nightSleepSec)
camera.exposure_mode = 'off'
if imageFormat == ".jpg":
camera.capture(filename, format='jpeg', quality=imageJpegQuality)
else:
camera.capture(filename)
camera.framerate = 5 # Adhoc Fix for Stretch camera freeze issue
# Perform sudo rpi-update
camera.close()
if imageShowStream: # Show motion area on full image to align camera
showBox(filename)
# showDateOnImage displays FilePath to avoid showing twice
if not showDateOnImage:
logging.info("FilePath %s", filename)
if runScriptAfterCapture!=False:
if os.path.isfile(runScript):
subprocess.call([runScript])
else:
logging.error("Script to be run after taking an image, "+runScript+", doesn't exist")
#------------------------------------------------------------------------------
def takeQuickTimeLapse(moPath, imagePrefix, NumOn, motionNumCount,
currentDayMode, NumPath):
""" Take a quick timelapse sequence using yield if motion triggered """
logging.info("Start Sequence for %i sec every %i sec",
motionQuickTLTimer, motionQuickTLInterval)
checkTimeLapseTimer = datetime.datetime.now()
keepTakingImages = True
imgCnt = 0
filename = getImageName(moPath, imagePrefix, NumOn, motionNumCount)
while keepTakingImages:
yield filename
rightNow = datetime.datetime.now()
timelapseDiff = (rightNow - checkTimeLapseTimer).total_seconds()
motionNumCount = postImageProcessing(NumOn,
motionNumStart,
motionNumMax,
motionNumCount,
motionNumRecycle,
NumPath, filename,
currentDayMode)
filename = getImageName(moPath, imagePrefix, NumOn, motionNumCount)
if timelapseDiff > motionQuickTLTimer:
keepTakingImages = False
else:
imgCnt += 1
if motionRecentMax > 0:
saveRecent(motionRecentMax,
motionRecentDir,
filename,
imagePrefix)
time.sleep(motionQuickTLInterval)
logging.info('End Sequence Total %i Images in %i seconds',
imgCnt, timelapseDiff)
#------------------------------------------------------------------------------
def takeVideo(filename, duration, fps=25):
""" Take a short motion video if required """
# Working folder for h264 videos
h264_work = os.path.join(baseDir, "h264_work")
if not os.path.isdir(h264_work):
try:
os.makedirs(h264_work)
except OSError as err:
logging.error('%s err: %s', h264_work, err)
else:
logging.info('Created Dir %s', h264_work)
filePath264 = os.path.join(h264_work, os.path.basename(filename))
# Final destination for mp4 videos
filePathMP4 = os.path.join(os.path.dirname(filename),
os.path.splitext(os.path.basename(filename))[0] + ".mp4")
# command to convert h264 video to mp4
h264_mp4_cmd = ("/usr/bin/MP4Box -add %s:fps=%i -new %s" %
(filePath264, fps, filePathMP4))
logging.info("File : %s", filePath264)
logging.info("Start: Size %ix%i for %i sec at %i fps",
imageWidth, imageHeight, duration, fps)
if motionVideoOn or videoRepeatOn:
with picamera.PiCamera() as camera:
camera.resolution = (imageWidth, imageHeight)
camera.vflip = imageVFlip
camera.hflip = imageHFlip
# rotation can be used if camera is on side
camera.rotation = imageRotation
camera.framerate = fps
if showDateOnImage:
rightNow = datetime.datetime.now()
dateTimeText = (" Started at %04d-%02d-%02d %02d:%02d:%02d "
% (rightNow.year,
rightNow.month,
rightNow.day,
rightNow.hour,
rightNow.minute,
rightNow.second))
camera.annotate_text_size = showTextFontSize
camera.annotate_foreground = picamera.Color('black')
camera.annotate_background = picamera.Color('white')
camera.annotate_text = dateTimeText
camera.start_recording(filePath264)
camera.wait_recording(duration)
camera.stop_recording()
camera.close()
# This creates a subprocess that runs MP4Box to convert h264 file
# to MP4 with the filename as a parameter. Note this will take
# some time so MP4Box logging info will be delayed.
try:
logging.info("MP4Box %s", filePathMP4)
proc = subprocess.Popen(h264_mp4_cmd, shell=True, stdin=None,
stdout=None, stderr=None, close_fds=True)
except IOError:
logging.error("subprocess %s", h264_mp4_cmd)
if motionRecentMax > 0:
saveRecent(motionRecentMax,
motionRecentDir,
filePathMP4,
motionPrefix)
createSyncLockFile(filename)
#------------------------------------------------------------------------------
def createSyncLockFile(imagefilename):
"""
If required create a lock file to indicate file(s) to process
"""
if createLockFile:
if not os.path.isfile(lockFilePath):
open(lockFilePath, 'w').close()
logging.info("Create Lock File %s", lockFilePath)
rightNow = datetime.datetime.now()
now = ("%04d%02d%02d-%02d%02d%02d"
% (rightNow.year, rightNow.month, rightNow.day,
rightNow.hour, rightNow.minute, rightNow.second))
filecontents = (now + " createSyncLockFile - " + imagefilename +
" Ready to sync using sudo ./sync.sh command.")
f = open(lockFilePath, 'w+')
f.write(filecontents)
f.close()
#------------------------------------------------------------------------------
def trackPoint(grayimage1, grayimage2):
"""
Process two cropped grayscale images.
check for motion and return center point
of motion for largest contour.
"""
movementCenterPoint = [] # initialize list of movementCenterPoints
biggestArea = MIN_AREA
# Get differences between the two greyed images
differenceimage = cv2.absdiff(grayimage1, grayimage2)
# Blur difference image to enhance motion vectors
differenceimage = cv2.blur(differenceimage, (BLUR_SIZE, BLUR_SIZE))
# Get threshold of blurred difference image
# based on THRESHOLD_SENSITIVITY variable
retval, thresholdimage = cv2.threshold(differenceimage,
THRESHOLD_SENSITIVITY,
255, cv2.THRESH_BINARY)
try:
# opencv2 syntax default
contours, hierarchy = cv2.findContours(thresholdimage,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
except ValueError:
# opencv 3 syntax
thresholdimage, contours, hierarchy = cv2.findContours(thresholdimage,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
if contours:
for c in contours:
cArea = cv2.contourArea(c)
if cArea > biggestArea:
biggestArea = cArea
(x, y, w, h) = cv2.boundingRect(c)
cx = int(x + w/2) # x center point of contour
cy = int(y + h/2) # y center point of contour
movementCenterPoint = [cx, cy]
return movementCenterPoint
#------------------------------------------------------------------------------
def trackDistance(mPoint1, mPoint2):
"""
Return the triangulated distance between two tracking locations
"""
x1, y1 = mPoint1
x2, y2 = mPoint2
trackLen = abs(math.hypot(x2 - x1, y2 - y1))
return trackLen
#------------------------------------------------------------------------------
def getStreamPixAve(streamData):
"""
Calculate the average pixel values for the specified stream
used for determining day/night or twilight conditions
"""
pixAverage = int(np.average(streamData[..., 1]))
return pixAverage
#------------------------------------------------------------------------------
def checkIfDayStream(currentDayMode, image):
""" Try to determine if it is day, night or twilight."""
dayPixAverage = 0
dayPixAverage = getStreamPixAve(image)
if dayPixAverage > nightTwilightThreshold:
currentDayMode = True
else:
currentDayMode = False
return currentDayMode
#------------------------------------------------------------------------------
def timeToSleep(currentDayMode):
"""
Based on weather it is day or night (exclude twilight)
return sleepMode boolean based on variable
settings for noNightShots or noDayShots config.py variables
Note if both are enabled then no shots will be taken.
"""
if noNightShots:
if currentDayMode:
sleepMode = False
else:
sleepMode = True
elif noDayShots:
if currentDayMode:
sleepMode = True
else:
sleepMode = False
else:
sleepMode = False
return sleepMode
#------------------------------------------------------------------------------
def getSchedStart(dateToCheck):
"""
This function will try to extract a valid date/time from a
date time formatted string variable
If date/time is past then try to extract time
and schedule for current date at extracted time
"""
goodDateTime = datetime.datetime.now()
if len(dateToCheck) > 1: # Check if timelapseStartAt is set
try:
# parse and convert string to date/time or return error
goodDateTime = parse(dateToCheck)
except:
# Is there a colon indicating possible time format exists
if ":" in dateToCheck:
timeTry = dateToCheck[dateToCheck.find(":") -2:]
# Try to extract time only from string
try:
# See if a valid time is found returns with current day
goodDateTime = parse(timeTry)
except:
logging.error("Bad Date and/or Time Format %s",
dateToCheck)
logging.error('Use a Valid Date and/or Time '
'Format Eg "DD-MMM-YYYY HH:MM:SS"')
goodDateTime = datetime.datetime.now()
logging.warn("Resetting date/time to Now: %s",
goodDateTime)
# Check if date/time is past
if goodDateTime < datetime.datetime.now():
if ":" in dateToCheck: # Check if there is a time component
# Extract possible time component
timeTry = dateToCheck[dateToCheck.find(":") -2:]
try:
# parse for valid time
# returns current day with parsed time
goodDateTime = parse(timeTry)
except:
pass # Do Nothing
return goodDateTime
#------------------------------------------------------------------------------
def checkSchedStart(schedDate):
"""
Based on schedule date setting see if current
datetime is past and return boolean
to indicate processing can start for
timelapse or motiontracking
"""
startStatus = False
if schedDate < datetime.datetime.now():
startStatus = True # sched date/time has passed so start sequence
return startStatus
#------------------------------------------------------------------------------
def checkForTimelapse(timelapseStart):
""" Check if timelapse timer has expired """
rightNow = datetime.datetime.now()
timeDiff = (rightNow - timelapseStart).total_seconds()
if timeDiff > timelapseTimer:
timelapseStart = rightNow
timelapseFound = True
else:
timelapseFound = False
return timelapseFound
#------------------------------------------------------------------------------
def timolo():
"""
Main motion and or motion tracking
initialization and logic loop
"""
# Counter for showDots() display if not motion found
# shows system is working
dotCount = 0
checkMediaPaths()
timelapseNumCount = 0
motionNumCount = 0
tlstr = "" # Used to display if timelapse is selected
mostr = "" # Used to display if motion is selected
moCnt = "non"
tlCnt = "non"
daymode = False # Keep track of night and day based on dayPixAve
# Forcing motion if no motion for motionForce time exceeded
forceMotion = False
motionFound = False
takeTimeLapse = True
stopTimeLapse = False
takeMotion = True
stopMotion = False
firstTimeLapse = True
timelapseStart = datetime.datetime.now()
timelapseExitStart = timelapseStart
checkMotionTimer = timelapseStart
startTL = getSchedStart(timelapseStartAt)
startMO = getSchedStart(motionStartAt)
trackLen = 0.0
if spaceTimerHrs > 0:
lastSpaceCheck = datetime.datetime.now()
if timelapseOn:
tlstr = "TimeLapse"
# Check if timelapse subDirs reqd and create one if non exists
tlPath = subDirChecks(timelapseSubDirMaxHours,
timelapseSubDirMaxFiles,
timelapseDir, timelapsePrefix)
if timelapseNumOn:
timelapseNumCount = getCurrentCount(timelapseNumPath,
timelapseNumStart)
tlCnt = str(timelapseNumCount)
else:
logging.warn("Timelapse is Suppressed per timelapseOn=%s",
timelapseOn)
stopTimeLapse = True
logging.info("Start PiVideoStream ....")
vs = PiVideoStream().start()
vs.camera.rotation = imageRotation
vs.camera.hflip = imageHFlip
vs.camera.vflip = imageVFlip
time.sleep(1)
if motionTrackOn:
mostr = "Motion Tracking"
# Check if motion subDirs required and
# create one if required and non exists
moPath = subDirChecks(motionSubDirMaxHours,
motionSubDirMaxFiles,
motionDir,
motionPrefix)
if motionNumOn:
motionNumCount = getCurrentCount(motionNumPath, motionNumStart)
moCnt = str(motionNumCount)
trackTimeout = time.time()
trackTimer = TRACK_TIMEOUT
startPos = []
startTrack = False
image1 = vs.read()
image2 = vs.read()
grayimage1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
else:
image2 = vs.read() # use video stream to check for daymode
logging.info("Motion Tracking is Suppressed per variable motionTrackOn=%s",
motionTrackOn)
stopMotion = True
daymode = checkIfDayStream(daymode, image2)
pixAve = getStreamPixAve(image2)
if timelapseOn and motionTrackOn:
tlstr = " and " + tlstr
displayInfo(moCnt, tlCnt) # Display config.py settings
if logDataToFile:
logging.info("logDataToFile=%s Logging to Console Disabled.",
logDataToFile)
logging.info("Sending Console Messages to %s", logFilePath)
logging.info("Entering Loop for %s%s", mostr, tlstr)
else:
if pluginEnable:
logging.info("plugin %s - Start %s%s Loop ...",
pluginName, mostr, tlstr)
else:
logging.info("Start %s%s Loop ... ctrl-c Exits", mostr, tlstr)
if motionTrackOn and not checkSchedStart(startMO):
logging.info('Motion Track: motionStartAt = "%s"', motionStartAt)
logging.info("Motion Track: Sched Start Set For %s Please Wait ...",
startMO)
if timelapseOn and not checkSchedStart(startTL):
logging.info('Timelapse : timelapseStartAt = "%s"', timelapseStartAt)
logging.info("Timelapee : Sched Start Set For %s Please Wait ...",
startTL)
logging.info("daymode=%s motionDotsOn=%s ", daymode, motionDotsOn)
dotCount = showDots(motionDotsMax) # reset motion dots
while True: # Start main program Loop.
motionFound = False
forceMotion = False
if (motionTrackOn and (not motionNumRecycle)
and (motionNumCount > motionNumStart + motionNumMax)
and (not stopMotion)):
logging.warning("motionNumRecycle=%s and motionNumCount %i Exceeds %i",
motionNumRecycle, motionNumCount,
motionNumStart + motionNumMax)
logging.warn("Suppressing Further Motion Tracking")
logging.warn("To Reset: Change %s Settings or Archive Images",
configName)
logging.warn("Then Delete %s and Restart %s \n",
motionNumPath, progName)
takeMotion = False
stopMotion = True
if stopTimeLapse and stopMotion:
logging.warn("NOTICE: Both Motion and Timelapse Disabled")
logging.warn("per Num Recycle=False and "
"Max Counter Reached or timelapseExitSec Settings")
logging.warn("Change %s Settings or Archive/Save Media Then",
configName)
logging.warn("Delete appropriate .dat File(s) to Reset Counter(s)")
logging.warn("Exiting %s %s \n", progName, progVer)
sys.exit(1)
# if required check free disk space and delete older files (jpg)
if spaceTimerHrs > 0:
lastSpaceCheck = freeDiskSpaceCheck(lastSpaceCheck)
# use image2 to check daymode as image1 may be average
# that changes slowly, and image1 may not be updated
if motionTrackOn:
if daymode != checkIfDayStream(daymode, image2):
daymode = not daymode
image2 = vs.read()
image1 = image2
else:
image2 = vs.read()
else:
image2 = vs.read()
# if daymode has changed, reset background
# to avoid false motion trigger
if daymode != checkIfDayStream(daymode, image2):
daymode = not daymode
pixAve = getStreamPixAve(image2)
rightNow = datetime.datetime.now() # refresh rightNow time
if not timeToSleep(daymode):
# Don't take images if noNightShots
# or noDayShots settings are valid
if timelapseOn and checkSchedStart(startTL):
# Check for a scheduled date/time to start timelapse
if firstTimeLapse:
firstTimeLapse = False
takeTimeLapse = True
else:
takeTimeLapse = checkForTimelapse(timelapseStart)
if ((not stopTimeLapse) and takeTimeLapse and
timelapseExitSec > 0):
if ((datetime.datetime.now() -
timelapseExitStart).total_seconds() >
timelapseExitSec):
logging.info("timelapseExitSec=%i Exceeded.",
timelapseExitSec)
logging.info("Suppressing Further Timelapse Images")
logging.info("To RESET: Restart %s to Restart "
"timelapseExitSec Timer. \n", progName)
# Suppress further timelapse images
takeTimeLapse = False
stopTimeLapse = True
if ((not stopTimeLapse) and timelapseNumOn
and (not timelapseNumRecycle)):
if (timelapseNumMax > 0 and
timelapseNumCount > (timelapseNumStart + timelapseNumMax)):
logging.warn("timelapseNumRecycle=%s and Counter=%i Exceeds %i",
timelapseNumRecycle, timelapseNumCount,
timelapseNumStart + timelapseNumMax)
logging.warn("Suppressing Further Timelapse Images")
logging.warn("To RESET: Change %s Settings or Archive Images",
configName)
logging.warn("Then Delete %s and Restart %s \n",
timelapseNumPath, progName)
# Suppress further timelapse images
takeTimeLapse = False
stopTimeLapse = True
if takeTimeLapse and (not stopTimeLapse):
if motionDotsOn and motionTrackOn:
# reset motion dots
dotCount = showDots(motionDotsMax + 2)
else:
print("")
if pluginEnable:
if timelapseExitSec > 0:
exitSecProgress = (datetime.datetime.now() -
timelapseExitStart).total_seconds()
logging.info("%s Sched TimeLapse daymode=%s Timer=%i sec"
" ExitSec=%i/%i Status",
pluginName, daymode, timelapseTimer,
exitSecProgress, timelapseExitSec)
else:
logging.info("%s Sched TimeLapse daymode=%s"
" Timer=%i sec ExitSec=%i 0=Continuous",
pluginName, daymode,
timelapseTimer, timelapseExitSec)
else:
if timelapseExitSec > 0:
exitSecProgress = (datetime.datetime.now() -
timelapseExitStart).total_seconds()
logging.info("Sched TimeLapse daymode=%s Timer=%i sec"
" ExitSec=%i/%i Status",
daymode, timelapseTimer,
exitSecProgress, timelapseExitSec)
else:
logging.info("Sched TimeLapse daymode=%s Timer=%i sec"
" ExitSec=%i 0=Continuous",
daymode, timelapseTimer,
timelapseExitSec)
imagePrefix = timelapsePrefix + imageNamePrefix
filename = getImageName(tlPath, imagePrefix,
timelapseNumOn, timelapseNumCount)
logging.info("Stop PiVideoStream ...")
vs.stop()
time.sleep(motionStreamStopSec)
# reset time lapse timer
timelapseStart = datetime.datetime.now()
if daymode:
takeDayImage(filename, timelapseCamSleep)
else:
takeNightImage(filename, pixAve)
timelapseNumCount = postImageProcessing(timelapseNumOn,
timelapseNumStart,
timelapseNumMax,
timelapseNumCount,
timelapseNumRecycle,
timelapseNumPath,
filename, daymode)
if timelapseRecentMax > 0:
saveRecent(timelapseRecentMax, timelapseRecentDir,
filename, imagePrefix)
if timelapseMaxFiles > 0:
deleteOldFiles(timelapseMaxFiles, timelapseDir,
imagePrefix)
dotCount = showDots(motionDotsMax)
logging.info("Restart PiVideoStream ....")
vs = PiVideoStream().start()
vs.camera.rotation = imageRotation
vs.camera.hflip = imageHFlip
vs.camera.vflip = imageVFlip
time.sleep(1)
tlPath = subDirChecks(timelapseSubDirMaxHours,
timelapseSubDirMaxFiles,
timelapseDir, timelapsePrefix)
if motionTrackOn and checkSchedStart(startMO) and takeMotion and (not stopMotion):
# IMPORTANT - Night motion tracking may not work very well
# due to long exposure times and low light
image2 = vs.read()
grayimage2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
movePoint1 = trackPoint(grayimage1, grayimage2)
grayimage1 = grayimage2
if movePoint1 and not startTrack:
startTrack = True
trackTimeout = time.time()
startPos = movePoint1
image2 = vs.read()
grayimage2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
movePoint2 = trackPoint(grayimage1, grayimage2)
if movePoint2 and startTrack: # Two sets of movement required
trackLen = trackDistance(startPos, movePoint2)
# wait until track well started
if trackLen > TRACK_TRIG_LEN_MIN:
# Reset tracking timer object moved
trackTimeout = time.time()
if motionTrackInfo:
logging.info("Track Progress From(%i,%i) To(%i,%i) trackLen=%i/%i px",
startPos[0], startPos[1],
movePoint2[0], movePoint2[1],
trackLen, TRACK_TRIG_LEN)
# Track length triggered
if trackLen > TRACK_TRIG_LEN:
# reduce chance of two objects at different positions
if trackLen > TRACK_TRIG_LEN_MAX:
motionFound = False
if motionTrackInfo:
logging.info("TrackLen %i px Exceeded %i px Max Trig Len Allowed.",
trackLen, TRACK_TRIG_LEN_MAX)
else:
motionFound = True
if pluginEnable:
logging.info("%s Motion Triggered Start(%i,%i)"
" End(%i,%i) trackLen=%.i/%i px",
pluginName, startPos[0], startPos[1],
movePoint2[0], movePoint2[1],
trackLen, TRACK_TRIG_LEN)
else:
logging.info("Motion Triggered Start(%i,%i)"
" End(%i,%i) trackLen=%i/%i px",
startPos[0], startPos[1],
movePoint2[0], movePoint2[1],
trackLen, TRACK_TRIG_LEN)
image1 = vs.read()
image2 = image1
grayimage1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
grayimage2 = grayimage1
startTrack = False
startPos = []
trackLen = 0.0
# Track timed out
if (time.time() - trackTimeout > trackTimer) and startTrack:
image1 = vs.read()
image2 = image1
grayimage1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
grayimage2 = grayimage1
if motionTrackInfo:
logging.info("Track Timer %.2f sec Exceeded. Reset Track",
trackTimer)
startTrack = False
startPos = []
trackLen = 0.0
rightNow = datetime.datetime.now()
timeDiff = (rightNow - checkMotionTimer).total_seconds()
if motionForce > 0 and timeDiff > motionForce:
image1 = vs.read()
image2 = image1
grayimage1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
grayimage2 = grayimage1
dotCount = showDots(motionDotsMax + 2) # New Line
logging.info("No Motion Detected for %s minutes. "
"Taking Forced Motion Image.",
(motionForce / 60))
checkMotionTimer = rightNow
forceMotion = True
if motionFound or forceMotion:
imagePrefix = motionPrefix + imageNamePrefix
if motionTrackQuickPic: # Do not stop PiVideoStream
filename = getImageName(moPath,
imagePrefix,
motionNumOn,
motionNumCount)
takeTrackQuickPic(image2, filename)
motionNumCount = postImageProcessing(motionNumOn,
motionNumStart,
motionNumMax,
motionNumCount,
motionNumRecycle,
motionNumPath,
filename, daymode)
if motionRecentMax > 0:
saveRecent(motionRecentMax,
motionRecentDir,
filename,
imagePrefix)
else:
if motionTrackOn:
logging.info("Stop PiVideoStream ...")
vs.stop()
time.sleep(motionStreamStopSec)
checkMotionTimer = rightNow
if forceMotion:
forceMotion = False
# check if motion Quick Time Lapse option is On.
# This option supersedes motionVideoOn
if motionQuickTLOn and daymode:
filename = getImageName(moPath,
imagePrefix,
motionNumOn,
motionNumCount)
with picamera.PiCamera() as camera:
camera.resolution = (imageWidth, imageHeight)
camera.vflip = imageVFlip
camera.hflip = imageHFlip
# valid rotation values 0, 90, 180, 270
camera.rotation = imageRotation
time.sleep(motionCamSleep)
# This uses yield to loop through time lapse
# sequence but does not seem to be faster
# due to writing images
camera.capture_sequence(takeQuickTimeLapse(moPath,
imagePrefix,
motionNumOn,
motionNumCount,
daymode,
motionNumPath))
camera.close()
motionNumCount = getCurrentCount(motionNumPath,
motionNumStart)
else:
if motionVideoOn:
filename = getVideoName(motionPath,
imagePrefix,
motionNumOn,
motionNumCount)
takeVideo(filename, motionVideoTimer,
motionVideoFPS)
else:
filename = getImageName(moPath,
imagePrefix,
motionNumOn,
motionNumCount)
if daymode:
takeDayImage(filename, motionCamSleep)
else:
takeNightImage(filename, pixAve)
motionNumCount = postImageProcessing(motionNumOn,
motionNumStart,
motionNumMax,
motionNumCount,
motionNumRecycle,
motionNumPath,
filename,
daymode)
if motionRecentMax > 0:
if not motionVideoOn:
# prevent h264 video files from
# being copied to recent
saveRecent(motionRecentMax,
motionRecentDir,
filename,
imagePrefix)
if motionTrackOn:
logging.info("Restart PiVideoStream ....")
vs = PiVideoStream().start()
vs.camera.rotation = imageRotation
vs.camera.hflip = imageHFlip
vs.camera.vflip = imageVFlip
time.sleep(1)
image1 = vs.read()
image2 = image1
grayimage1 = cv2.cvtColor(image1,
cv2.COLOR_BGR2GRAY)
grayimage2 = grayimage1
trackLen = 0.0
trackTimeout = time.time()
startPos = []
startTrack = False
forceMotion = False
moPath = subDirChecks(motionSubDirMaxHours,
motionSubDirMaxFiles,
motionDir, motionPrefix)
if motionFound and motionCode:
# ===========================================
# Put your user code in userMotionCode() function
# In the File user_motion_code.py
# ===========================================
try:
user_motion_code.userMotionCode(filename)
dotCount = showDots(motionDotsMax)
except ValueError:
logging.error("Problem running userMotionCode function from File %s",
userMotionFilePath)
else:
# show progress dots when no motion found
dotCount = showDots(dotCount)
#------------------------------------------------------------------------------
def videoRepeat():
"""
This is a special dash cam video mode
that overrides both timelapse and motion tracking settings
It has it's own set of settings to manage start, video duration,
number recycle mode, Etc.
"""
# Check if folder exist and create if required
if not os.path.isdir(videoPath):
logging.info("Create videoRepeat Folder %s", videoPath)
os.makedirs(videoPath)
print("--------------------------------------------------------------------")
print("VideoRepeat . videoRepeatOn=%s" % videoRepeatOn)
print(" Info ..... Size=%ix%i videoPrefix=%s videoDuration=%i seconds videoFPS=%i"
% (imageWidth, imageHeight, videoPrefix, videoDuration, videoFPS))
print(" Vid Path . videoPath=%s" % videoPath)
print(" Sched .... videoStartAt=%s blank=Off or Set Valid Date and/or Time to Start Sequence"
% videoStartAt)
print(" Timer .... videoTimer=%i minutes 0=Continuous" % videoTimer)
print(" Num Seq .. videoNumOn=%s videoNumRecycle=%s videoNumStart=%i"
" videoNumMax=%i 0=Continuous"
% (videoNumOn, videoNumRecycle, videoNumStart, videoNumMax))
print("--------------------------------------------------------------------")
print("WARNING: videoRepeatOn=%s Suppresses TimeLapse and Motion Settings."
% videoRepeatOn)
startVideoRepeat = getSchedStart(videoStartAt)
if not checkSchedStart(startVideoRepeat):
logging.info('Video Repeat: videoStartAt = "%s" ', videoStartAt)
logging.info("Video Repeat: Sched Start Set For %s Please Wait ...",
startVideoRepeat)
while not checkSchedStart(startVideoRepeat):
pass
videoStartTime = datetime.datetime.now()
lastSpaceCheck = datetime.datetime.now()
videoCount = 0
videoNumCounter = videoNumStart
keepRecording = True
while keepRecording:
# if required check free disk space and delete older files
# Set variables spaceFileExt='mp4' and
# spaceMediaDir= to appropriate folder path
if spaceTimerHrs > 0:
lastSpaceCheck = freeDiskSpaceCheck(lastSpaceCheck)
filename = getVideoName(videoPath, videoPrefix,
videoNumOn, videoNumCounter)
takeVideo(filename, videoDuration, videoFPS)
timeUsed = (datetime.datetime.now() - videoStartTime).total_seconds()
timeRemaining = (videoTimer*60 - timeUsed) / 60.0
videoCount += 1
if videoNumOn:
videoNumCounter += 1
if videoNumMax > 0:
if videoNumCounter - videoNumStart > videoNumMax:
if videoNumRecycle:
videoNumCounter = videoNumStart
logging.info("Restart Numbering: videoNumRecycle=%s "
"and videoNumMax=%i Exceeded",
videoNumRecycle, videoNumMax)
else:
keepRecording = False
logging.info("Exit since videoNumRecycle=%s "
"and videoNumMax=%i Exceeded %i Videos Recorded",
videoNumRecycle, videoNumMax, videoCount)
logging.info("Recorded %i of %i Videos",
videoCount, videoNumMax)
else:
logging.info("Recorded %i Videos videoNumMax=%i 0=Continuous",
videoCount, videoNumMax)
else:
logging.info("Progress: %i Videos Recorded in Folder %s",
videoCount, videoPath)
if videoTimer > 0:
if timeUsed > videoTimer * 60:
keepRecording = False
errorText = ("Stop Recording Since videoTimer=%i minutes Exceeded \n",
videoTimer)
logging.warn(errorText)
sys.stdout.write(errorText)
else:
logging.info("Remaining Time %.1f of %i minutes",
timeRemaining, videoTimer)
else:
videoStartTime = datetime.datetime.now()
logging.info("Exit: %i Videos Recorded in Folder %s",
videoCount, videoPath)
#------------------------------------------------------------------------------
def getTlNumFromRclone():
tryToDoIt=True
while tryToDoIt:
logging.info("TIMELAPSENUMUPDATE: Trying to update "+timelapseNumPath+" from most recent Rclone file")
rcloneLsLines = subprocess.run(timelapseListRcloneCmd.split(), stdout=subprocess.PIPE).stdout.decode('utf-8').splitlines()
i=0
while i < len(rcloneLsLines):
outputLine=rcloneLsLines[i]
if "error" in outputLine.lower():
logging.error("TIMELAPSENUMUPDATE: Rclone ls command encountered an error")
if timelapseListRcloneErrorResetNetworking:
logging.info("TIMELAPSENUMUPDATE: Restarting networking")
subprocess.run(["sudo", "-S", "service", "networking", "restart"],input=raspiSudoPassword)
if timelapseListRcloneErrorRetry:
logging.info("TIMELAPSENUMUPDATE: Trying again in"+str(timelapseListRcloneErrorRetrySleep)+"seconds")
time.sleep(timelapseListRcloneErrorRetrySleep)
else:
tryToDoIt=False
break
if not timelapseListRcloneOutputSplit:
filename=outputLine
else:
filename=outputLine.split(timelapseListRcloneOutputSplit)[timelapseListRcloneOutputIndex]
if timelapsePrefix in filename and imageNamePrefix in filename and imageFormat in filename:
tlNumberStr = filename[len(timelapsePrefix+imageNamePrefix):-len(imageFormat)]
try:
tlNumberStr=str(int(tlNumberStr)+1)
except ValueError:
logging.debug("TIMELAPSENUMUPDATE: couldn't extract num from filename "+filename)
continue
f = open(timelapseNumPath, "w+")
f.write(tlNumberStr)
f.close()
logging.info("TIMELAPSENUMUPDATE: updated timelapse num to "+tlNumberStr)
tryToDoIt=False
break
if i==len(rcloneLsLines): tryToDoIt=False
#------------------------------------------------------------------------------
if __name__ == '__main__':
if timelapseOn and timelapseNumGetFromRcloneRemote: getTlNumFromRclone()
""" Initialization prior to launching appropriate pi-timolo options """
logging.info("Testing if Pi Camera is in Use")
# Test if the pi camera is already in use
ts = PiVideoStream().start()
time.sleep(1)
ts.stop()
time.sleep(motionStreamStopSec)
logging.info("Pi Camera is Available.")
if pluginEnable:
logging.info("Start pi-timolo per %s and plugins/%s.py Settings",
configFilePath, pluginName)
else:
logging.info("Start pi-timolo per %s Settings", configFilePath)
if not verbose:
print("NOTICE: Logging Disabled per variable verbose=False ctrl-c Exits")
try:
if videoRepeatOn:
videoRepeat()
else:
timolo()
except KeyboardInterrupt:
print("")
if verbose:
logging.info("User Pressed Keyboard ctrl-c")
logging.info("Exiting %s %s", progName, progVer)
else:
sys.stdout.write("User Pressed Keyboard ctrl-c \n")
sys.stdout.write("Exiting %s %s \n" % (progName, progVer))
try:
if pluginEnable:
if os.path.isfile(pluginCurrent):
os.remove(pluginCurrent)
pluginCurrentpyc = os.path.join(pluginDir, "current.pyc")
if os.path.isfile(pluginCurrentpyc):
os.remove(pluginCurrentpyc)
except OSError as err:
logging.warn("Failed To Remove File %s - %s", pluginCurrentpyc, err)
sys.exit(1)
|
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
from matplotlib.colors import to_rgb, to_rgba
#---------------------------------------------------------------------------------------------------
# matplotlib style settings
#---------------------------------------------------------------------------------------------------
plt.rcParams['axes.linewidth'] = 1.4
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['ytick.minor.size'] = 2.5
plt.rcParams['xtick.minor.size'] = 4.0
plt.rcParams['ytick.minor.visible'] = True
plt.rcParams['ytick.labelsize'] = 'large'
plt.rcParams['xtick.labelsize'] = 'large'
#---------------------------------------------------------------------------------------------------
# custom user functions for plotting
#---------------------------------------------------------------------------------------------------
def set_custom_alpha(col_, alpha_):
rgb_ = to_rgba(col_)
return (col_[0], col_[1], col_[2], alpha_)
def rgb2rgba(col_):
_ = []
for c in col_:
_.append(float(c)/255.0)
_.append(1.0)
return tuple(_)
def getNcols(N=3, cmap_='plasma'):
cmap = plt.get_cmap(cmap_)
cols = cmap.colors
arr = []
for i in range(N):
arr.append(cols[int(256*float(i)/float(N))])
return arr
#---------------------------------------------------------------------------------------------------
|
from activist.views import ActivistViewSet
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'activists', ActivistViewSet)
urlpatterns = router.urls
|
def single_number(integers):
"""
Naive version: Given a non-empty array of integers, every element appears
twice except for one. Find that single one.
Runtime: O(n), Space: O(n)
"""
seen = set()
for integer in integers:
if integer in seen:
seen.remove(integer)
else:
seen.add(integer)
return seen.pop()
def find_unique_elem(arr):
"""
Given a non-empty array of integers, every element appears
twice except for one. Find that single one.
Runtime: O(n), Space: O(1)
"""
result = 0
for item in arr:
# all numbers repeated twice will be equal to 0 after XOR operation
# at the end, only left is the unique integer
result ^= item
return result
arr = [2, 2, 1]
print(find_unique_elem(arr))
|
from django.contrib import admin
from .models import Survey, Question, Answer
class SurveyAdmin(admin.ModelAdmin):
list_display = (
'pk',
'title',
'description',
'start_date',
'end_date',
'author')
search_fields = ('title',)
list_filter = ('start_date',)
empty_value_display = '-empty-'
def get_readonly_fields(self, request, obj=None):
if obj:
return ['start_date']
return self.readonly_fields
class QuestionAdmin(admin.ModelAdmin):
list_display = (
'survey',
'pk',
'text',
'answer_type',
'answer_choices',)
search_fields = ('title',)
list_filter = ('survey',)
class AnswerAdmin(admin.ModelAdmin):
list_display = (
'text',
'anonimously',
'question',
'pk',
'author',)
search_fields = ('text',)
list_filter = ('question','author',)
admin.site.register(Survey, SurveyAdmin)
admin.site.register(Question, QuestionAdmin)
admin.site.register(Answer, AnswerAdmin)
|
import math
import torch.nn as nn
from collections import OrderedDict
import torch.utils.model_zoo as model_zoo
from torchvision.models.resnet import model_urls
import torch
import torch.nn as nn
class Backbone(nn.Module):
"""Base class for backbone networks. Handles freezing layers etc.
args:
frozen_layers - Name of layers to freeze. Either list of strings, 'none' or 'all'. Default: 'none'.
"""
def __init__(self, frozen_layers=()):
super().__init__()
if isinstance(frozen_layers, str):
if frozen_layers.lower() == 'none':
frozen_layers = ()
elif frozen_layers.lower() != 'all':
raise ValueError('Unknown option for frozen layers: \"{}\". Should be \"all\", \"none\" or list of layer names.'.format(frozen_layers))
self.frozen_layers = frozen_layers
self._is_frozen_nograd = False
def train(self, mode=True):
super().train(mode)
if mode == True:
self._set_frozen_to_eval()
if not self._is_frozen_nograd:
self._set_frozen_to_nograd()
self._is_frozen_nograd = True
def _set_frozen_to_eval(self):
if isinstance(self.frozen_layers, str) and self.frozen_layers.lower() == 'all':
self.eval()
else:
for layer in self.frozen_layers:
getattr(self, layer).eval()
def _set_frozen_to_nograd(self):
if isinstance(self.frozen_layers, str) and self.frozen_layers.lower() == 'all':
for p in self.parameters():
p.requires_grad_(False)
else:
for layer in self.frozen_layers:
for p in getattr(self, layer).parameters():
p.requires_grad_(False)
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, bias=False, dilation=dilation)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1, use_bn=True):
super(BasicBlock, self).__init__()
self.use_bn = use_bn
self.conv1 = conv3x3(inplanes, planes, stride, dilation=dilation)
if use_bn:
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, dilation=dilation)
if use_bn:
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
if self.use_bn:
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if self.use_bn:
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=dilation, bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(Backbone):
""" ResNet network module. Allows extracting specific feature blocks."""
def __init__(self, block, layers, output_layers, num_classes=1000, inplanes=64, dilation_factor=1, frozen_layers=()):
self.inplanes = inplanes
super(ResNet, self).__init__(frozen_layers=frozen_layers)
self.output_layers = output_layers
self.conv1 = nn.Conv2d(3, inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
stride = [1 + (dilation_factor < l) for l in (8, 4, 2)]
self.layer1 = self._make_layer(block, inplanes, layers[0], dilation=max(dilation_factor//8, 1))
self.layer2 = self._make_layer(block, inplanes*2, layers[1], stride=stride[0], dilation=max(dilation_factor//4, 1))
# self.layer3 = self._make_layer(block, inplanes*4, layers[2], stride=stride[1], dilation=max(dilation_factor//2, 1))
self.layer3 = self._make_layer(block, inplanes*4, layers[2], stride=1, dilation=2)
# self.layer4 = self._make_layer(block, inplanes*8, layers[3], stride=stride[2], dilation=dilation_factor)
out_feature_strides = {'conv1': 4, 'layer1': 4, 'layer2': 4*stride[0], 'layer3': 4*stride[0]*stride[1]}
if isinstance(self.layer1[0], BasicBlock):
out_feature_channels = {'conv1': inplanes, 'layer1': inplanes, 'layer2': inplanes*2, 'layer3': inplanes*4}
elif isinstance(self.layer1[0], Bottleneck):
base_num_channels = 4 * inplanes
out_feature_channels = {'conv1': inplanes, 'layer1': base_num_channels, 'layer2': base_num_channels * 2,
'layer3': base_num_channels * 4}
else:
raise Exception('block not supported')
self._out_feature_strides = out_feature_strides
self._out_feature_channels = out_feature_channels
self.num_channels_output = [list(out_feature_channels.values())[-1]]
# self.avgpool = nn.AvgPool2d(7, stride=1)
# self.avgpool = nn.AdaptiveAvgPool2d((1,1))
# self.fc = nn.Linear(inplanes*8 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def out_feature_strides(self, layer=None):
if layer is None:
return self._out_feature_strides
else:
return self._out_feature_strides[layer]
def out_feature_channels(self, layer=None):
if layer is None:
return self._out_feature_channels
else:
return self._out_feature_channels[layer]
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, dilation=dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _add_output_and_check(self, name, x, outputs, output_layers, output_NCHW):
if name in output_layers:
if not output_NCHW:
x = x.flatten(start_dim=2)
x = x.transpose(1, 2)
outputs[name] = x
return len(output_layers) == len(outputs)
def forward(self, x, output_layers=None, output_NCHW=False):
""" Forward pass with input x. The output_layers specify the feature blocks which must be returned """
outputs = OrderedDict()
if output_layers is None:
output_layers = self.output_layers
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if self._add_output_and_check('conv1', x, outputs, output_layers, output_NCHW):
return outputs.values()
x = self.maxpool(x)
x = self.layer1(x)
if self._add_output_and_check('layer1', x, outputs, output_layers, output_NCHW):
return outputs.values()
x = self.layer2(x)
if self._add_output_and_check('layer2', x, outputs, output_layers, output_NCHW):
return outputs.values()
x = self.layer3(x)
if self._add_output_and_check('layer3', x, outputs, output_layers, output_NCHW):
return outputs.values()
# x = self.layer4(x)
# if self._add_output_and_check('layer4', x, outputs, output_layers):
# return outputs
#
# x = self.avgpool(x)
# x = x.view(x.size(0), -1)
# x = self.fc(x)
#
# if self._add_output_and_check('fc', x, outputs, output_layers):
# return outputs
#
# if len(output_layers) == 1 and output_layers[0] == 'default':
# return x
raise ValueError('output_layer is wrong.')
def resnet_baby(output_layers=None, pretrained=False, inplanes=16, **kwargs):
"""Constructs a ResNet-18 model.
"""
if output_layers is None:
output_layers = ['default']
else:
for l in output_layers:
if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:
raise ValueError('Unknown layer: {}'.format(l))
model = ResNet(BasicBlock, [2, 2, 2, 2], output_layers, inplanes=inplanes, **kwargs)
if pretrained:
raise NotImplementedError
return model
def resnet18(output_layers=None, pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
"""
if output_layers is None:
output_layers = ['default']
else:
for l in output_layers:
if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:
raise ValueError('Unknown layer: {}'.format(l))
model = ResNet(BasicBlock, [2, 2, 2, 2], output_layers, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet50(output_layers=None, pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
"""
if output_layers is None:
output_layers = ['default']
else:
for l in output_layers:
if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:
raise ValueError('Unknown layer: {}'.format(l))
model = ResNet(Bottleneck, [3, 4, 6, 3], output_layers, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']), strict=False)
return model
def resnet101(output_layers=None, pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
"""
if output_layers is None:
output_layers = ['default']
else:
for l in output_layers:
if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:
raise ValueError('Unknown layer: {}'.format(l))
model = ResNet(Bottleneck, [3, 4, 23, 3], output_layers, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
|
import asyncio
import logging.config
import warnings
from functools import partial
from typing import Optional
import annofabapi
import requests
from annofabapi.models import ProjectJobType
from annofabcli.common.dataclasses import WaitOptions
from annofabcli.common.exceptions import DownloadingFileNotFoundError, UpdatedFileForDownloadingError
logger = logging.getLogger(__name__)
DOWNLOADING_FILETYPE_DICT = {
ProjectJobType.GEN_TASKS_LIST: "タスク全件ファイル",
ProjectJobType.GEN_INPUTS_LIST: "入力データ全件ファイル",
ProjectJobType.GEN_ANNOTATION: "アノテーションzip",
}
DEFAULT_WAIT_OPTIONS = WaitOptions(interval=60, max_tries=360)
def _get_annofab_error_message(http_error: requests.HTTPError) -> Optional[str]:
obj = http_error.response.json()
errors = obj.get("errors")
if errors is None:
return None
return errors[0].get("message")
class DownloadingFile:
def __init__(self, service: annofabapi.Resource):
self.service = service
@staticmethod
def get_max_wait_minutes(wait_options: WaitOptions):
return wait_options.max_tries * wait_options.interval / 60
def _wait_for_completion(
self,
project_id: str,
job_type: ProjectJobType,
wait_options: Optional[WaitOptions] = None,
job_id: Optional[str] = None,
):
if wait_options is None:
wait_options = DEFAULT_WAIT_OPTIONS
max_wait_minutes = self.get_max_wait_minutes(wait_options)
filetype = DOWNLOADING_FILETYPE_DICT[job_type]
logger.info(f"{filetype}の更新処理が完了するまで、最大{max_wait_minutes}分間待ちます。job_id={job_id}")
result = self.service.wrapper.wait_for_completion(
project_id,
job_type=job_type,
job_access_interval=wait_options.interval,
max_job_access=wait_options.max_tries,
)
if not result:
raise UpdatedFileForDownloadingError(f"{filetype}の更新処理が{max_wait_minutes}分以内に完了しない、または更新処理に失敗しました。")
async def download_annotation_zip_with_async(
self, project_id: str, dest_path: str, is_latest: bool = False, wait_options: Optional[WaitOptions] = None
):
loop = asyncio.get_event_loop()
partial_func = partial(self.download_annotation_zip, project_id, dest_path, is_latest, wait_options)
result = await loop.run_in_executor(None, partial_func)
return result
def download_annotation_zip(
self, project_id: str, dest_path: str, is_latest: bool = False, wait_options: Optional[WaitOptions] = None
):
logger.debug(f"アノテーションzipをダウンロードします。path={dest_path}")
if is_latest:
self.wait_until_updated_annotation_zip(project_id, wait_options)
self.service.wrapper.download_annotation_archive(project_id, dest_path)
else:
try:
self.service.wrapper.download_annotation_archive(project_id, dest_path)
except requests.HTTPError as e:
if e.response.status_code == requests.codes.not_found:
logger.info(f"アノテーションzipが存在しなかったので、アノテーションzipファイルの更新処理を実行します。")
self.wait_until_updated_annotation_zip(project_id, wait_options)
self.service.wrapper.download_annotation_archive(project_id, dest_path)
else:
raise e
def wait_until_updated_annotation_zip(self, project_id: str, wait_options: Optional[WaitOptions] = None):
job_id = None
try:
job = self.service.api.post_annotation_archive_update(project_id)[0]["job"]
job_id = job["job_id"]
except requests.HTTPError as e:
# すでにジョブが進行中の場合は、無視する
if e.response.status_code == requests.codes.conflict:
logger.warning(f"別のバックグラウンドジョブが既に実行されているので、更新処理を無視します。")
logger.warning(f"{_get_annofab_error_message(e)}")
else:
raise e
self._wait_for_completion(
project_id, job_type=ProjectJobType.GEN_ANNOTATION, wait_options=wait_options, job_id=job_id
)
async def download_input_data_json_with_async(
self, project_id: str, dest_path: str, is_latest: bool = False, wait_options: Optional[WaitOptions] = None
):
loop = asyncio.get_event_loop()
partial_func = partial(self.download_input_data_json, project_id, dest_path, is_latest, wait_options)
result = await loop.run_in_executor(None, partial_func)
return result
def download_input_data_json(
self, project_id: str, dest_path: str, is_latest: bool = False, wait_options: Optional[WaitOptions] = None
):
logger.debug(f"入力データ全件ファイルをダウンロードします。path={dest_path}")
if is_latest:
self.wait_until_updated_input_data_json(project_id, wait_options)
self.service.wrapper.download_project_inputs_url(project_id, dest_path)
else:
try:
self.service.wrapper.download_project_inputs_url(project_id, dest_path)
except requests.HTTPError as e:
if e.response.status_code == requests.codes.not_found:
logger.info(f"入力データ全件ファイルが存在しなかったので、入力データ全件ファイルの更新処理を実行します。")
self.wait_until_updated_input_data_json(project_id, wait_options)
self.service.wrapper.download_project_inputs_url(project_id, dest_path)
else:
raise e
def wait_until_updated_input_data_json(self, project_id: str, wait_options: Optional[WaitOptions] = None):
job_id = None
try:
job = self.service.api.post_project_inputs_update(project_id)[0]["job"]
job_id = job["job_id"]
except requests.HTTPError as e:
# すでにジョブが進行中の場合は、無視する
if e.response.status_code == requests.codes.conflict:
logger.warning(f"別のバックグラウンドジョブが既に実行されているので、更新処理を無視します。")
logger.warning(f"{_get_annofab_error_message(e)}")
else:
raise e
self._wait_for_completion(
project_id, job_type=ProjectJobType.GEN_INPUTS_LIST, wait_options=wait_options, job_id=job_id
)
async def download_task_json_with_async(
self, project_id: str, dest_path: str, is_latest: bool = False, wait_options: Optional[WaitOptions] = None
):
loop = asyncio.get_event_loop()
partial_func = partial(self.download_task_json, project_id, dest_path, is_latest, wait_options)
result = await loop.run_in_executor(None, partial_func)
return result
def download_task_json(
self, project_id: str, dest_path: str, is_latest: bool = False, wait_options: Optional[WaitOptions] = None
):
logger.debug(f"タスク全件ファイルをダウンロードします。path={dest_path}")
if is_latest:
self.wait_until_updated_task_json(project_id, wait_options)
self.service.wrapper.download_project_tasks_url(project_id, dest_path)
else:
try:
self.service.wrapper.download_project_tasks_url(project_id, dest_path)
except requests.HTTPError as e:
if e.response.status_code == requests.codes.not_found:
logger.info(f"タスク全件ファイルが存在しなかったので、タスク全件ファイルの更新処理を実行します。")
self.wait_until_updated_task_json(project_id, wait_options)
self.service.wrapper.download_project_tasks_url(project_id, dest_path)
else:
raise e
def wait_until_updated_task_json(self, project_id: str, wait_options: Optional[WaitOptions] = None):
job_id = None
try:
job = self.service.api.post_project_tasks_update(project_id)[0]["job"]
job_id = job["job_id"]
except requests.HTTPError as e:
# すでにジョブが進行中の場合は、無視する
if e.response.status_code == requests.codes.conflict:
logger.warning(f"別のバックグラウンドジョブが既に実行されているので、更新処理を無視します。")
logger.warning(f"{_get_annofab_error_message(e)}")
else:
raise e
self._wait_for_completion(
project_id, job_type=ProjectJobType.GEN_TASKS_LIST, wait_options=wait_options, job_id=job_id
)
async def download_task_history_json_with_async(self, project_id: str, dest_path: str):
"""
非同期でタスク履歴全件ファイルをダウンロードする。
Raises:
DownloadingFileNotFoundError:
"""
return self.download_task_history_json(project_id, dest_path=dest_path)
def download_task_history_json(self, project_id: str, dest_path: str):
"""
タスク履歴全件ファイルをダウンロードする。
Args:
project_id:
dest_path:
Raises:
DownloadingFileNotFoundError:
"""
try:
logger.debug(f"タスク履歴全件ファイルをダウンロードします。path={dest_path}")
self.service.wrapper.download_project_task_histories_url(project_id, dest_path)
except requests.HTTPError as e:
if e.response.status_code == requests.codes.not_found:
logger.info(f"タスク履歴全件ファイルが存在しません。")
raise DownloadingFileNotFoundError("タスク履歴全件ファイルが存在しません。") from e
raise e
def download_task_history_event_json(self, project_id: str, dest_path: str):
"""
タスク履歴イベント全件ファイルをダウンロードする。
.. deprecated:: 0.21.1
Args:
project_id:
dest_path:
Raises:
DownloadingFileNotFoundError:
"""
try:
logger.debug(f"タスク履歴イベント全件ファイルをダウンロードします。path={dest_path}")
self.service.wrapper.download_project_task_history_events_url(project_id, dest_path)
except requests.HTTPError as e:
if e.response.status_code == requests.codes.not_found:
logger.info(f"タスク履歴イベント全件ファイルが存在しません。")
raise DownloadingFileNotFoundError("タスク履歴イベント全件ファイルが存在しません。") from e
raise e
async def download_task_history_event_json_with_async(self, project_id: str, dest_path: str):
"""
非同期で検査コメント全件ファイルをダウンロードする。
.. deprecated:: 0.21.1
Raises:
DownloadingFileNotFoundError:
"""
warnings.warn("deprecated", DeprecationWarning)
return self.download_task_history_event_json(project_id, dest_path=dest_path)
async def download_inspection_json_with_async(self, project_id: str, dest_path: str):
"""
非同期で検査コメント全件ファイルをダウンロードする。
Raises:
DownloadingFileNotFoundError:
"""
return self.download_inspection_json(project_id, dest_path=dest_path)
def download_inspection_json(self, project_id: str, dest_path: str):
"""
検査コメント全件ファイルをダウンロードする。
Raises:
DownloadingFileNotFoundError:
"""
try:
logger.debug(f"検査コメント全件ファイルをダウンロードします。path={dest_path}")
self.service.wrapper.download_project_inspections_url(project_id, dest_path)
except requests.HTTPError as e:
if e.response.status_code == requests.codes.not_found:
logger.info(f"検査コメント全件ファイルが存在しません。")
raise DownloadingFileNotFoundError("タスク履歴全件ファイルが存在しません。") from e
raise e
|
# Same as script 14, but the algorith is trained with a number of agents varying from 1 to 20.
import sys
import os
lucas_path = os.environ['LUCAS_PATH']
sys.path.insert(1, lucas_path)
from general import general as gen
from devices.devices import node, base_station, mobile_user, d2d_user, d2d_node_type
from pathloss import pathloss
from plots.plots import plot_positions, plot_spectral_effs
from q_learning.environments.completeEnvironment import CompleteEnvironment
from dqn.agents.dqnAgent import ExternalDQNAgent
from dqn.externalDQNFramework import ExternalDQNFramework
from dqn.replayMemory import ReplayMemory
from dqn.dqn import DQN
from q_learning.q_table import DistributedQTable
from q_learning import rewards
from parameters.parameters import EnvironmentParameters, TrainingParameters, DQNAgentParameters, LearningParameters
from typing import List
from matplotlib import pyplot as plt
import torch
import math
import numpy as np
import os
import pickle
n_mues = 1 # number of mues
n_d2d = 2 # number of d2d pairs
n_rb = n_mues # number of RBs
bs_radius = 500 # bs radius in m
rb_bandwidth = 180*1e3 # rb bandwidth in Hz
d2d_pair_distance = 50 # d2d pair distance in m
p_max = 23 # max tx power in dBm
noise_power = -116 # noise power per RB in dBm
bs_gain = 17 # macro bs antenna gain in dBi
user_gain = 4 # user antenna gain in dBi
sinr_threshold_train = 6 # mue sinr threshold in dB for training
sinr_threshold_mue = 6 # true mue sinr threshold in dB
mue_margin = .5e4
# conversions from dB to pow
p_max = p_max - 30
p_max = gen.db_to_power(p_max)
noise_power = noise_power - 30
noise_power = gen.db_to_power(noise_power)
bs_gain = gen.db_to_power(bs_gain)
user_gain = gen.db_to_power(user_gain)
sinr_threshold_train = gen.db_to_power(sinr_threshold_train)
# q-learning parameters
STEPS_PER_EPISODE = 25
EPSILON_MIN = 0.05
# MAX_NUM_STEPS = 50
# EPSILON_DECAY = 0.4045*1e-4 # super long training
# EPSILON_DECAY = 0.809*1e-4 # long training
# EPSILON_DECAY = 0.809*1e-4 # medium training
EPSILON_DECAY = 1.675*1e-4 # medium training
# EPSILON_DECAY = 8.09*1e-4 # short training
# MAX_NUM_EPISODES = 40000 # super long training
# MAX_NUM_EPISODES = 20000 # long training
MAX_NUM_EPISODES = 960 # medium training
# MAX_NUM_EPISODES = 2000 # short training
ALPHA = 0.05 # Learning rate
GAMMA = 0.98 # Discount factor
# C = 8000 # C constant for the improved reward function
C = 80 # C constant for the improved reward function
TARGET_UPDATE = 10
MAX_NUMBER_OF_AGENTS = 20
# more parameters
env_params = EnvironmentParameters(rb_bandwidth, d2d_pair_distance, p_max, noise_power, bs_gain, user_gain, sinr_threshold_train,
n_mues, n_d2d, n_rb, bs_radius, c_param=C, mue_margin=mue_margin)
train_params = TrainingParameters(MAX_NUM_EPISODES, STEPS_PER_EPISODE)
agent_params = DQNAgentParameters(EPSILON_MIN, EPSILON_DECAY, 1, 512, GAMMA)
ext_framework = ExternalDQNFramework(agent_params)
# actions = [i*p_max/10/1000 for i in range(21)] # worst
# actions = [i*0.80*p_max/10/1000 for i in range(21)] # best histogram
reward_function = rewards.dis_reward_tensor
# environment = CompleteEnvironment(env_params, reward_function, early_stop=1e-6, tolerance=10)
environment = CompleteEnvironment(env_params, reward_function)
# training function
# TODO: colocar agente e d2d_device na mesma classe? fazer propriedade d2d_device no agente?
def train(framework: ExternalDQNFramework, env: CompleteEnvironment, params: TrainingParameters, agent_params: DQNAgentParameters, max_d2d: int):
best_reward = float('-inf')
device = torch.device('cuda')
rewards_bag = list()
aux_range = range(max_d2d)[1:]
epsilon = agent_params.start_epsilon
for episode in range(params.max_episodes):
# TODO: atualmente redistribuo os usuarios aleatoriamente a cada episodio. Isto é o melhor há se fazer?
# Simular deslocamento dos usuários?
actions = [i*0.82*p_max/10/1000 for i in range(21)] # best result
n_agents = np.random.choice(aux_range)
agents = [ExternalDQNAgent(agent_params, actions) for i in range(n_agents)] # 1 agent per d2d tx
counts = np.zeros(len(agents))
awaits = list()
await_steps = [2,3,4]
for a in agents:
awaits.append(np.random.choice(await_steps))
a.set_action(torch.tensor(0).long().cuda(), a.actions[0])
a.set_epsilon(epsilon)
env.build_scenario(agents)
done = False
obs = [env.get_state(a) for a in agents]
total_reward = 0.0
i = 0
bag = list()
while not done:
if i >= params.steps_per_episode:
break
else:
actions = torch.zeros([len(agents)], device=device)
for j, agent in enumerate(agents):
if counts[j] < awaits[j]:
counts[j] += 1
else:
agent.get_action(framework, obs[j])
actions[j] = agent.action_index
counts[j] = 0
awaits[j] = np.random.choice(await_steps)
next_obs, rewards, done = env.step(agents)
i += 1
for j, agent in enumerate(agents):
framework.replay_memory.push(obs[j], actions[j], next_obs[j], rewards[j])
framework.learn()
obs = next_obs
total_reward += torch.sum(rewards)
bag.append(total_reward.item())
obs = next_obs
if episode % TARGET_UPDATE == 0:
framework.target_net.load_state_dict(framework.policy_net.state_dict())
if total_reward > best_reward:
best_reward = total_reward
print("Episode#:{} sum reward:{} best_sum_reward:{} eps:{}".format(episode,
total_reward, best_reward, agents[0].epsilon))
rewards_bag.append(np.average(bag))
epsilon = agents[0].epsilon
# Return the trained policy
return rewards_bag
# SCRIPT EXEC
# training
rewards = train(ext_framework, environment, train_params, agent_params, MAX_NUMBER_OF_AGENTS)
cwd = os.getcwd()
torch.save(ext_framework.policy_net.state_dict(), f'{cwd}/models/ext_model_dqn_agent_mult.pt')
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
filename = f'{lucas_path}/data/{filename}.pickle'
with open(filename, 'wb') as f:
pickle.dump(ext_framework.bag, f)
plt.figure(1)
plt.plot(ext_framework.bag, '.')
plt.xlabel('Iterations')
plt.ylabel('Average Q-Values')
plt.figure(2)
plt.plot(rewards, '.')
plt.show()
|
from ..functions import loader
from .animation import Animation
from .enemy import Enemy
from .box import Box
from .pig import Pig
# pig sprites loader
load_image = loader("kings_and_pigs/data/sprites/04-Pig Throwing a Box")
class PigThrowingBox(Enemy):
def __init__(self, x, y, type=None, id=None):
idle = load_image("Idle (26x30).png")
run = load_image("Run (26x30).png")
pick = load_image("Picking Box (26x30).png")
# throw = load_image("Throwing Box (26x30).png")
self.animation_left_idle = Animation(idle, 9)
self.animation_right_idle = self.animation_left_idle.flip()
self.animation_left_run = Animation(run, 6)
self.animation_right_run = self.animation_left_run.flip()
self.animation_left_jump = None
self.animation_right_jump = None
self.animation_left_fall = None
self.animation_right_fall = None
self.animation_left_pick = Animation(pick, 5)
self.animation_right_pick = self.animation_left_pick.flip()
# self.animation_left_throw = Animation(throw, 5)
# self.animation_right_throw = self.animation_left_throw.flip()
super().__init__(x, y, self.animation_right_idle, type, id)
self.adjust_hit_box(left=5, right=10, top=5)
self.adjust_direction = 5
self.facing_right = False
self.lives = 2
self.can_throw = True
def get_hit_area(self, chamber=None):
area = self.get_hit_box()
area.left += 10 if self.facing_right else -160
area.top -= 15
area.width += 150
area.height += 15
return area
def jump(self, floors):
pass # cannot jump
def fall_on_ground(self, floor, chamber, force):
pass # do nothing
def attack(self, targets, chamber):
# attack = throw a box
box = Box(self.rect.x + 4, self.rect.y + 3)
pig = Pig(self.rect.x - 4, self.rect.y + 2, self.type, self.id)
pig.facing_right = self.facing_right
box.vy = -8
box.vx = 8 if self.facing_right else -8
self.replace_with = [pig, box]
def hit(self, direction, chamber):
# when hero hits - destroy the box and hit the pig separately
box = Box(self.rect.x + 4, self.rect.y + 3)
pig = Pig(self.rect.x - 4, self.rect.y + 2, self.type, self.id)
pig.facing_right = self.facing_right
box.hit(direction, chamber)
pig.hit(direction, chamber)
self.replace_with = [box, pig]
def murder(self):
box = Box(self.rect.x + 4, self.rect.y + 3)
pig = Pig(self.rect.x - 4, self.rect.y + 2, self.type, self.id)
pig.facing_right = self.facing_right
pig.murder()
self.replace_with = [box, pig]
def pick(self):
if self.in_action:
return
def pick_is_done():
self.in_action = False
self.in_action = True
if self.facing_right:
self.change_animation(self.animation_right_pick)
self.animation_right_pick.on_done(pick_is_done)
else:
self.change_animation(self.animation_left_pick)
self.animation_left_pick.on_done(pick_is_done)
|
tamiyo_1 = "Al parecer la historia era cierta"
jace_2 = "cual historia ! ?"
tamiyo_3 = "el bosque cuenta que tu eliminaste"
tamiyo_4 = "al anciano nicol"
jace_5 = "su nombre era lucin"
tamiyo_6 = "que ingenuo eres, si llegas a mi"
tamiyo_7 = "te contare una historia"
tamiyo_8 = "muajajajajajaja"
|
while True:
a, b = map(int, input().split())
if a == b:
break
else:
print('Crescente' if a < b else 'Decrescente')
|
import torch
from torch import optim
import torch.nn.functional as F
import time
import os
import matplotlib.pyplot as plt
from loss import ContrastiveLoss
import datahandler as dl
from model import SiameseNetwork
from torch.utils import tensorboard
root_dir = '..\..\Dataset\MVTEC_AD'
epochs = 200
lear_rate = 0.0005
trainbatchsize = 4
validbatchsize = 4
testbatchsize = 1
log_dir = "logs"
if not os.path.exists(log_dir):
os.makedirs(log_dir)
writer = tensorboard.SummaryWriter(log_dir)
train_loader, valid_loader, test_loader = dl.pre_processor(root_dir=root_dir,
trainbatchsize=trainbatchsize,
validbatchsize=validbatchsize,
testbatchsize=1)
net = SiameseNetwork().cuda()
# net.train()
criterion = ContrastiveLoss().cuda()
optimizer = optim.Adam(net.parameters(), lr = lear_rate )
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.95)
train_losses = []
val_losses = []
best_val = 0.1
start_time = time.time()
for epoch in range( epochs):
net.train()
running_loss = 0
for i, data in enumerate(train_loader, 0): # giving the argument 0 starts the counter from 0
img0, img1 , label = data
img0, img1 , label = img0.cuda(), img1.cuda() , label.cuda()
# reset the gradients
optimizer.zero_grad()
output1, output2 = net(img0, img1)
loss_contrastive = criterion(output1, output2, label)
loss_contrastive.backward() # backward pass
optimizer.step() # update weights
running_loss += loss_contrastive.item()
writer.add_scalar('train_loss/epoch', running_loss, epoch)
train_losses.append(running_loss/len(train_loader))
if epoch % 2 == 0:
val_loss = 0
net.eval()
with torch.no_grad():
for i , data in enumerate(valid_loader, 0):
img_0, img_1, label_ = data
img_0, img_1 , label_ = img_0.cuda(), img_1.cuda() , label_.cuda()
output1, output2 = net(img_0, img_1)
# distance = torch.sigmoid(F.pairwise_distance(output1, output2))
loss_val = criterion(output1, output2, label_)
val_loss += loss_val.item()
writer.add_scalar('val_loss/epoch', val_loss, epoch)
val_losses.append(val_loss/len(valid_loader))
print('Epoch : ',epoch, "\t Train loss: {:.2f}".format(running_loss/len(train_loader)),
"\t Validation loss: {:.2f}".format(val_loss/len(valid_loader)))
if val_loss/len(valid_loader) < best_val:
best_val = val_loss/len(valid_loader)
PATH = '../models/best_model_val_loss.pth'
torch.save(net, PATH)
print()
print("Saved best model at epoch: ", epoch)
print()
if epoch%2 == 0:
PATH = "../models/saved_epoch_model.pth"
torch.save(net, PATH)
scheduler.step()
print('It took {} seconds to train the model.. '.format(time.time() - start_time))
# plot and save the losses
fig = plt.figure(figsize=(10,5))
plt.title("Training and Validation Loss")
plt.plot(train_losses, label = "train")
plt.plot(val_losses, label = "val")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
fig.savefig('new_Train_&_Val_loss.png')
|
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import requests
from six.moves.urllib import parse
from sushy import exceptions
LOG = logging.getLogger(__name__)
class Connector(object):
def __init__(self, url, username=None, password=None, verify=True):
self._url = url
self._session = requests.Session()
self._session.verify = verify
if username and password:
self._session.auth = (username, password)
def close(self):
"""Close this connector and the associated HTTP session."""
self._session.close()
def _op(self, method, path='', data=None, headers=None):
"""Generic RESTful request handler.
:param method: The HTTP method to be used, e.g: GET, POST,
PUT, PATCH, etc...
:param path: The sub-URI path to the resource.
:param data: Optional JSON data.
:param headers: Optional dictionary of headers.
:returns: The response object from the requests library.
:raises: ConnectionError
:raises: HTTPError
"""
if headers is None:
headers = {}
if data is not None:
data = json.dumps(data)
headers['Content-Type'] = 'application/json'
url = parse.urljoin(self._url, path)
# TODO(lucasagomes): We should mask the data to remove sensitive
# information
LOG.debug('HTTP request: %(method)s %(url)s; '
'headers: %(headers)s; body: %(data)s',
{'method': method, 'url': url, 'headers': headers,
'data': data})
try:
response = self._session.request(method, url, data=data,
headers=headers)
except requests.ConnectionError as e:
raise exceptions.ConnectionError(url=url, error=e)
exceptions.raise_for_response(method, url, response)
LOG.debug('HTTP response for %(method)s %(url)s: '
'status code: %(code)s',
{'method': method, 'url': url,
'code': response.status_code})
return response
def get(self, path='', data=None, headers=None):
"""HTTP GET method.
:param path: Optional sub-URI path to the resource.
:param data: Optional JSON data.
:param headers: Optional dictionary of headers.
:returns: The response object from the requests library.
:raises: ConnectionError
:raises: HTTPError
"""
return self._op('GET', path, data, headers)
def post(self, path='', data=None, headers=None):
"""HTTP POST method.
:param path: Optional sub-URI path to the resource.
:param data: Optional JSON data.
:param headers: Optional dictionary of headers.
:returns: The response object from the requests library.
:raises: ConnectionError
:raises: HTTPError
"""
return self._op('POST', path, data, headers)
def patch(self, path='', data=None, headers=None):
"""HTTP PATCH method.
:param path: Optional sub-URI path to the resource.
:param data: Optional JSON data.
:param headers: Optional dictionary of headers.
:returns: The response object from the requests library.
:raises: ConnectionError
:raises: HTTPError
"""
return self._op('PATCH', path, data, headers)
def __enter__(self):
return self
def __exit__(self, *_args):
self.close()
|
"""Train A2C agent in ALE game registerd in gym.
Some hyper parametes are from OpenAI baselines:
https://github.com/openai/baselines/blob/master/baselines/a2c/a2c.py
"""
import os
from torch.optim import RMSprop
import rainy
from rainy.agents import A2CAgent
from rainy.envs import Atari, atari_parallel
@rainy.main(A2CAgent, script_path=os.path.realpath(__file__))
def main(envname: str = "Breakout") -> rainy.Config:
c = rainy.Config()
c.set_env(lambda: Atari(envname, frame_stack=False))
c.set_optimizer(lambda params: RMSprop(params, lr=7e-4, alpha=0.99, eps=1e-5))
# c.set_net_fn('actor-critic', rainy.net.actor_critic.conv_shared(rnn=net.GruBlock))
c.set_net_fn("actor-critic", rainy.net.actor_critic.conv_shared())
c.nworkers = 16
c.nsteps = 5
c.set_parallel_env(atari_parallel())
c.grad_clip = 0.5
c.value_loss_weight = 1.0
c.use_gae = False
c.max_steps = int(2e7)
c.eval_env = Atari(envname)
c.eval_deterministic = False
c.episode_log_freq = 100
c.eval_freq = None
c.save_freq = None
return c
if __name__ == "__main__":
main()
|
from bs4 import BeautifulSoup
import urllib2
import shlex
import os
import wget
from subprocess import Popen, PIPE
def mysoup(link):
url = urllib2.Request(link, headers={ 'User-Agent': 'Mozilla/5.0 (X11; Linux i686; rv:33.0) Gecko/20100101 Firefox/33.0' })
page = urllib2.urlopen(url)
soup = BeautifulSoup(page.read())
return soup
#def upload_youtube(link,title,desc,keyword):
def upload_youtube(link):
cmd = '/home/hadn/python/bin/python /home/hadn/vtv_detail.py %s' % link
cmd = shlex.split(cmd.encode('utf8'))
print cmd
try:
up = Popen(cmd, stdout=PIPE)
print up.communicate()
except Exception as e:
print str(e)
pass
soup = mysoup("http://vtv.vn/truyen-hinh-truc-tuyen.htm")
videos_new = soup.find("div",{"class":"video-news-box"}).find("ul",{"class":"list-item"}).find_all("li")
links = []
for video in videos_new:
link = "http://vtv.vn"+video.find("a").get("href")
links.append(link)
if not links:
sys.exit(0)
with open('/home/hadn/vtv.txt') as f:
link_traced = f.readlines()[0].split(',')
index = 0
for link in links:
if link in link_traced:
break
index += 1
if index > 0:
with open('/home/hadn/vtv.txt', 'w') as f:
f.write(','.join(links[:index]))
for link in links[:index]:
upload_youtube(link)
|
import numpy as np
import sys
import cvxEDA
import json
import matplotlib.pyplot as plt
import scipy.optimize
import gsr
def bateman(tau0, tau1):
return lambda t: np.exp(-t/tau0) - np.exp(-t/tau1)
ts = np.arange(0, 100, 0.1)
plt.plot(ts, bateman(10.0, 5.0)(ts))
plt.show()
data = []
for line in sys.stdin:
row = json.loads(line)
data.append((row[0]['ts'], row[1]['E']))
data = np.array(data)
data = data[::3]
#data = data[5000:10000]
data = data[data[:,1] > 0]
ts, scr = data.T
scr = 1.0/scr
oscr = scr.copy()
scr -= np.mean(scr)
scr /= np.std(scr)
dt = np.median(np.diff(ts))
ts = np.arange(len(ts))*dt
#plt.plot(data[:,0], 1.0/data[:,1])
def objective(taus):
tau0, tau1 = np.exp(taus)
wtf = list(cvxEDA.cvxEDA(scr, dt, tau0=tau0, tau1=tau1))
print(tau0, tau1, float(wtf[-1]))
return float(wtf[-1])
#print(objective([2.0, 0.7]))
#fit = scipy.optimize.minimize(objective, np.log((10.0, 5.0)))
#print(fit)
#tau0, tau1 = np.exp(fit.x)
#tau0, tau1 = np.exp([ 4.40451525, -1.79824158]) # WTF!!
wtf = list(cvxEDA.cvxEDA(scr, dt))
driver, tonic, kernel = gsr.deconv_baseline(oscr, 1/dt)
ax = plt.subplot(2,1,1)
plt.plot(ts, scr)
recon = scr - wtf[5]
plt.plot(ts, recon)
#plt.plot(ts, wtf[2])
plt.subplot(2,1,2,sharex=ax)
plt.plot(ts, wtf[1]/np.max(wtf[1]))
plt.plot(ts, driver/np.max(driver))
plt.show()
|
from gi.repository import Gtk
from .basewidgets import Child
class Button(Child):
def __init__(self, bananawidget):
self.widget = Gtk.Button()
self.widget.connect('clicked', self._do_click)
super().__init__(bananawidget)
def _do_click(self, button):
self.bananawidget.on_click.run()
def set_text(self, text):
self.widget.set_label(text)
# TODO: set_imagepath.
ImageButton = Button
|
"""
Python Attributed Hierarchical Port Graph.
AHPGraph, is intended to be a simple, flexible way to describe
architecture components and their connections. These physical architecture
descriptions can then be converted to SST component graphs or analyzed by other
tools.
"""
from .Device import *
from .DeviceGraph import *
from .SSTGraph import *
|
from framework.core.base import BasePage
class addQuestionPage (BasePage):
questionTextBox = None
showMore = None
todayLink = None
nowLink = None
choiceText1 = None
choiceText2 = None
choiceText3 = None
choiceVotes1 = None
choiceVotes2 = None
choiceVotes3 = None
addChoice = None
saveButton = None
def __init__(self, driver):
super().__init__(driver)
self.driver = driver
def locate_elements(self):
self.questionTextBox = self.driver.find_element_by_id("id_question_text")
self.showMore = self.driver.find_element_by_id("fieldsetcollapser0")
self.todayLink = self.driver.find_element_by_link_text("Today")
self.nowLink = self.driver.find_element_by_link_text("Now")
self.choiceText1 = self.driver.find_element_by_name("choice_set-0-choice_text")
self.choiceText2 = self.driver.find_element_by_name("choice_set-1-choice_text")
self.choiceText3 = self.driver.find_element_by_name("choice_set-2-choice_text")
self.choiceVotes1 = self.driver.find_element_by_name("choice_set-0-votes")
self.choiceVotes2 = self.driver.find_element_by_name("choice_set-1-votes")
self.choiceVotes3 = self.driver.find_element_by_name("choice_set-2-votes")
self.addChoice = self.driver.find_element_by_link_text("Add another Choice")
self.saveButton = self.driver.find_element_by_name("_save")
def setQuestionText(self, question_text=''):
self.questionTextBox.send_keys(question_text)
def setNow(self):
self.showMore.click()
self.todayLink.click()
self.nowLink.click()
def setChoicesText(self, choiceTextvalue1='', choiceTextvalue2='', choiceTextvalue3=''):
self.choiceText1.send_keys(choiceTextvalue1)
self.choiceText2.send_keys(choiceTextvalue2)
self.choiceText3.send_keys(choiceTextvalue3)
def setChoiceVotes(self, choiceVotesValue1=0, choiceVotesValue2=0, choiceVotesValue3=0):
self.choiceVotes1.clear()
self.choiceVotes2.clear()
self.choiceVotes3.clear()
self.choiceVotes1.send_keys(choiceVotesValue1)
self.choiceVotes2.send_keys(choiceVotesValue2)
self.choiceVotes3.send_keys(choiceVotesValue3)
def savePoll(self):
self.saveButton.click()
#return mainPage(self.driver)
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from controller.node_controller import NodeController
from common import utilities
class NodeCommandImpl:
def __init__(self, config):
self.node_controller = NodeController(config)
def gen_node_config(self):
function = "generate_all_config"
notice_info = "generate config for all nodes"
return self.execute_command(function, notice_info)
def start_all(self):
function = "start_group"
notice_info = "start all nodes of the given group"
return self.execute_command(function, notice_info)
def stop_all(self):
function = "stop_group"
notice_info = "stop all nodes of the given group"
return self.execute_command(function, notice_info)
def upgrade_nodes(self):
function = "upgrade_group"
notice_info = "upgrade all nodes of the given group"
return self.execute_command(function, notice_info)
def deploy_nodes(self):
function = "generate_and_deploy_group_services"
notice_info = "deploy all nodes of the given group"
return self.execute_command(function, notice_info)
def upload_nodes(self):
function = "deploy_group_services"
notice_info = "upload all nodes config of the given group"
return self.execute_command(function, notice_info)
def undeploy_nodes(self):
function = "undeploy_group"
notice_info = "undeploy all nodes of the given group"
return self.execute_command(function, notice_info)
def generate_expand_config(self):
function = "generate_all_expand_config"
notice_info = "generate expand config for the given group"
return self.execute_command(function, notice_info)
def expand_nodes(self):
function = "expand_and_deploy_all_nodes"
notice_info = "expand nodes for the given group"
return self.execute_command(function, notice_info)
def execute_command(self, function, notice_info):
utilities.print_split_info()
utilities.print_badage(notice_info)
ret = getattr(self.node_controller, function)()
if ret is True:
utilities.print_badage("%s success" % notice_info)
else:
utilities.log_error("%s failed" % notice_info)
utilities.print_split_info()
return ret
|
#!/usr/bin/env python
"""
Layered wheel implementation
"""
import base64
import csv
import hashlib
import io
import pprint
import re
import shutil
import subprocess
import zipfile
import email.policy
# Non-greedy matching of an optional build number may be too clever (more
# invalid wheel filenames will match). Separate regex for .dist-info?
WHEEL_INFO_RE = re.compile(
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>.+?))(-(?P<build>\d[^-]*))?
-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)\.whl$""",
re.VERBOSE,
)
class HashStream(io.BufferedIOBase):
"""
Forward operations to an underlying stream, calculating a hash as we go.
For reading *or* writing, not both.
"""
# blake2b 11.39s vs sha256 11.75s vs passthrough 7.68s
def __init__(self, backing: io.BufferedIOBase, callback, algo="blake2s"):
super().__init__()
self.backing = backing
self.length = 0
self.digest = hashlib.new(algo)
self.callback = callback
# one of these methods makess recordReader() fail
# (we haven't written the entire io interface)
# def closed(self):
# return self.backing.closed()
# def readable(self):
# return self.backing.readable()
# def writable(self):
# return self.backing.writable()
def write(self, b):
self.digest.update(b)
self.length += len(b)
return self.backing.write(b)
def read(self, n=None):
data = self.backing.read(n)
self.digest.update(data)
self.length += len(data)
return data
def close(self):
super().close()
self.callback(self)
return self.backing.close()
class WheelArchiver(zipfile.ZipFile):
"""
Open a wheel file for reading *or* writing, not both.
Wraps ZipFile to handle {namever}/RECORD.
Check read files against RECORD in read mode.
Automatically write RECORD on close in write mode.
Defer writes to dist-info until close? Or raise if non-dist-info files written after any dist-info is written.
Args:
namever (str):
First part of the wheel filename, the package name and version.
e.g. "beaglevote-1.0". Or parsed from filename if given.
"""
def __init__(self, *args, namever=None, **kwargs):
super().__init__(*args, **kwargs)
if namever is None and self.filename:
namever = WHEEL_INFO_RE.match(self.filename).group("namever")
self.namever = namever
self._file_hashes = {} # if mode = 'r' initialize from RECORD
self._file_sizes = {}
@property
def dist_info(self):
return f"{self.namever}.dist-info"
@property
def wheelfile_path(self):
return self.dist_info + "/WHEEL"
@property
def metadata_path(self):
return self.dist_info + "/METADATA"
@property
def record_path(self):
return self.dist_info + "/RECORD"
def open(self, name, *args, **kwargs):
"""
Buffer files written to the dist-info directory and append to wheel on close.
"""
if isinstance(name, zipfile.ZipInfo):
fname = name.filename
else:
fname = name
return HashStream(
super().open(name, *args, **kwargs),
lambda hash: self._hash_callback(fname, hash),
)
def _hash_callback(self, fname, hashwriter: HashStream):
self._file_hashes[fname] = (
hashwriter.digest.name,
urlsafe_b64encode(hashwriter.digest.digest()).decode("charmap"),
)
self._file_sizes[fname] = hashwriter.length
def urlsafe_b64encode(data):
"""urlsafe_b64encode without padding"""
return base64.urlsafe_b64encode(data).rstrip(b"=")
def urlsafe_b64decode(data):
"""urlsafe_b64decode without padding"""
pad = b"=" * (4 - (len(data) & 3))
return base64.urlsafe_b64decode(data + pad)
def recordWriter(data):
return csv.writer(data, delimiter=",", quotechar='"', lineterminator="\n")
def recordReader(data):
return csv.reader(data, delimiter=",", quotechar='"', lineterminator="\n")
policy = email.policy.EmailPolicy(utf8=True, max_line_length=0)
def parse_kv(fp):
"""
parse bytes from fp.read() for METADATA or WHEEL.
Overkill for just getting the wheel version.
"""
# HashStream isn't complete enough to send to .parse()
# This will be good for round-tripping (parse / generate), may be
# details getting "normal" Unicode in and out, beware surrogates.
return email.parser.BytesParser(policy=policy).parsebytes(fp.read())
def write_kv(kv):
"""
Return bytes for an EmailMessage (representing WHEEL or METADATA)
Overkill for WHEEL (can generate with a string template), useful
for METADATA.
Would be convenient to alsos accept a sequence of key-value pairs.
[('Wheel-Version', '1.0'),
('Generator', 'bdist_wheel (0.31.1)'),
('Root-Is-Purelib', 'false'),
('Tag', 'cp37-cp37m-manylinux2010_x86_64')]
"""
return kv.as_bytes(policy=policy)
|
from PyQt5.QtWidgets import (QDesktopWidget,
QMainWindow,
QAction, QFrame)
from PyQt5.QtGui import (QPalette, QPen,
QIcon, QPainter, QColor)
from PyQt5.QtCore import Qt, QBasicTimer, QPoint, QTimer
from gui.communicate import Communicate
from common.state import State
from gui.dialogs.message_dialog import *
from common.computer import Computer
from gui.dialogs.main_window import *
import time
WINDOW_TITLE = "Reversi"
WINDOWHEIGHT = 480
WINDOWWIDTH = 640
SPACE = 50
WIDTH = 8
HEIGHT = 8
X_OFFSET = int((WINDOWWIDTH - 400) / 2)
Y_OFFSET = int((WINDOWHEIGHT - 440) / 2)
PLAYERS_STATES = [State.black, State.white]
class BoardUi(QFrame):
def __init__(self, parent, game):
super().__init__(parent)
self.main_window = parent
self.game = game
def start(self):
self.show()
self.setFocusPolicy(Qt.StrongFocus)
self.timer = QBasicTimer()
self.timer.start(150, self)
if isinstance(self.game.first_player, Computer) \
and isinstance(self.game.second_player, Computer):
self.computers_play()
def computers_play(self):
while not self.game.is_game_won():
self.computer_turn()
#self.show_winner_message()
def init_signals(self):
self.communicate = Communicate()
self.communicate.restart.connect(self.restart_game)
def paintEvent(self, e):
painter = QPainter()
painter.begin(self)
self.draw_board_rectangle(painter)
painter.end()
def draw_board_rectangle(self, painter):
col = QColor(0, 0, 0)
col.setNamedColor('#d4d4d4')
painter.setPen(col)
painter.setBrush(QColor(200, 0, 0))
painter.drawRect(X_OFFSET, Y_OFFSET, 400, 400)
pen = QPen(Qt.black, 2, Qt.SolidLine)
painter.setPen(pen)
for spot in [(x, x) for x in range(WIDTH + 1)]:
left = ((spot[0] * SPACE) + X_OFFSET, Y_OFFSET)
right = ((spot[0] * SPACE) + X_OFFSET, Y_OFFSET + (HEIGHT * SPACE))
up = (X_OFFSET, (spot[1] * SPACE) + Y_OFFSET)
down = (X_OFFSET + (WIDTH * SPACE), (spot[1] * SPACE) + Y_OFFSET)
painter.drawLine(left[0], left[1], right[0], right[1])
painter.drawLine(up[0], up[1], down[0], down[1])
for x in range(WIDTH):
for y in range(HEIGHT):
centerx, centery = self.get_center(x, y)
if self.game.board.pieces[x][y].state in PLAYERS_STATES:
if self.game.board.pieces[x][y].state == State.white:
painter.setBrush(Qt.white)
else:
painter.setBrush(Qt.black)
center = QPoint(centerx, centery)
circle_diameter = int(SPACE / 2) - 5
painter.drawEllipse(
center, circle_diameter, circle_diameter)
def get_center(self, x, y):
return X_OFFSET + x * SPACE + int(SPACE / 2), \
Y_OFFSET + y * SPACE + int(SPACE / 2)
def timerEvent(self, event):
self.update()
def get_clicked_block_position(self, mouseX, mouseY):
for x in range(WIDTH):
for y in range(HEIGHT):
if mouseX > x * SPACE + X_OFFSET and \
mouseX < (x + 1) * SPACE + X_OFFSET and \
mouseY > y * SPACE + Y_OFFSET and \
mouseY < (y + 1) * SPACE + Y_OFFSET:
return (x, y)
return None
def mousePressEvent(self, event):
if isinstance(self.game.get_current_player(), Computer):
return
position = event.pos()
piece_position = self.get_clicked_block_position(
position.x(), position.y())
if piece_position is None:
return
current_player = self.game.get_current_player()
other_player = self.game.get_other_player()
is_valid = current_player.make_move(
piece_position[0], piece_position[1], other_player)
if not is_valid:
return
self.game.change_current_player()
if self.game.is_game_won():
self.show_winner_message()
return
if isinstance(self.game.get_current_player(), Computer):
self.computer_turn()
message = str(
self.get_current_player_message()
+ " | Scores: " + self.get_score_text())
self.main_window.communicate.message_statusbar.emit(message)
def get_current_player_message(self):
if self.game.first_player.colour == self.game.current_player_colour:
return "First player's turn with " \
+ self.game.current_player_colour.name
return "Second player's turn with " \
+ self.game.current_player_colour.name
def show_winner_message(self):
winner = self.game.get_winner()
message = None
if winner is None:
message = "No one won its a tie"
elif winner is self.game.first_player:
message = "First Player has won"
else:
message = "Second Player has won"
self.main_window.communicate.message_statusbar.emit(message)
def get_score_text(self):
text = 'Player 1: %s Player 2: %s'
first_player_score = self.game.first_player.score
second_player_score = self.game.second_player.score
return text % (first_player_score, second_player_score)
def restart_game(self):
self.game.reset_game()
self.update()
message = str(
self.get_current_player_message()
+ " | Scores: " + self.get_score_text())
self.main_window.communicate.message_statusbar.emit(message)
def computer_turn(self):
current_player = self.game.get_current_player()
other_player = self.game.get_other_player()
current_player.make_move(other_player)
self.game.change_current_player()
if self.game.is_game_won():
print("here")
self.show_winner_message()
|
def scrape_facebook_url(url):
pass
|
#Imports for finding urls
import urllib
import json
import urllib.request
#Imports for UI
import tkinter as tk
from tkinter import *
import json, requests
from tkinter.messagebox import showinfo, showwarning
#Imports to scrape video download, thumbnail, title
from pytube import *
import PIL.Image
from PIL import ImageTk
#extras
import os
from pytube.helpers import install_proxy
import ast
#VARS
video_links = []
index = 0
last_title = ""
proxy_use = False
saved_ids = {}
API_KEY = ""
#PROXY
servers = {
"http": "",
'https': ""
}
#READ FILE FROM TXT
file = open("savedIDS.txt")
read_content = file.read()
saved_ids = ast.literal_eval(read_content)
#FUCNTIONS
def addProxy(ip, port):
#proxy settings
global servers
servers["http"] = "http://" + ip + ":" + port
servers["https"] = "https://" + ip + ":" + port
try:
install_proxy(servers)
res = requests.get("https://www.google.com/", proxies=servers)
showinfo("Proxy", "Sucsessfuly connect to porxy at " + ip + ":" + port)
except:
showinfo("Proxy", "Sorry but we cant connect to that IP and Port")
def addID(name, cid):
global saved_ids
#saveing ids
saved_ids[name] = cid
#open file and read
with open('savedIDS.txt', 'w') as convert_file:
convert_file.write(json.dumps(saved_ids))
#save ids in current saved_ids dict
file = open("savedIDS.txt")
read_content = file.read()
saved_ids = ast.literal_eval(read_content)
def get_all_video_in_channel(channel_id):
global video_links
#clear video_links
video_links = []
#OUR API KEY
api_key = API_KEY
base_video_url = 'https://www.youtube.com/watch?v='
base_search_url = 'https://www.googleapis.com/youtube/v3/search?'
first_url = base_search_url+'key={}&channelId={}&part=snippet,id&order=date&maxResults=25'.format(api_key, channel_id)
url = first_url
for i in range(1):
print(url)
inp = urllib.request.urlopen(url)
resp = json.load(inp)
for i in resp['items']:
if i['id']['kind'] == "youtube#video":
video_links.append(base_video_url + i['id']['videoId'])
try:
next_page_token = resp['nextPageToken']
url = first_url + '&pageToken={}'.format(next_page_token)
except:
break
def get_video_thumbnail(url):
#Get thumbnail
my_video = YouTube(url)
name = "curtmb.png"
response = requests.get(my_video.thumbnail_url)
file = open("curtmb.png", "wb")
file.write(response.content)
file.close()
return name
def get_video_title(url):
#get title
my_video = YouTube(url)
return(my_video.title)
def download_video(url):
#download video
my_video = YouTube(url)
print(my_video)
my_video = my_video.streams.get_highest_resolution()
my_video.download()
#set status
status.set("Downloaded")
def downlaod_buffer(url):
#set status
status.set("Downloading")
download_video(url)
def delete_video():
global last_title
#set status
status.set("Browsing")
#delete video
os.remove(last_title.replace(".", "") + ".mp4")
def submit_buffer(name, com, cid):
global index, API_KEY
if com == "none":
if API_KEY == "":
showwarning("API KEY INVALID","Your API key is empty, make sure you have submit your key. For more help reference the HOW_TO_RUN.txt file in the folder you downloaded")
else:
index = 0
add_videos(name, com, cid)
else:
API_KEY = cid
def add_videos(name, com, cid):
global main, cthumbnail, index, video_links, last_title, status, saved_ids
#check saved file to grab ids
if cid in saved_ids.keys():
cid = saved_ids[cid]
channel_id = cid
get_all_video_in_channel(channel_id)
if com == "n":
index += 1
status.set('Browsing')
add_videos(get_video_thumbnail(video_links[index]), "none", cid)
elif com == "l":
index -= 1
status.set('Browsing')
add_videos(get_video_thumbnail(video_links[index]), "none", cid)
else:
name = get_video_thumbnail(video_links[index])
#rezize img
cthumbnail = PIL.Image.open(name)
cthumbnail = cthumbnail.resize((535,400), PIL.Image.ANTIALIAS)
cthumbnail = ImageTk.PhotoImage(cthumbnail)
#show img
main.create_image(0,0, anchor=NW, image=cthumbnail)
#add title to view
title = get_video_title(video_links[index])
vid_title.set(title)
last_title = title
#SET UP UI
app = tk.Tk()
app.geometry("750x550")
app.configure(bg="lightblue")
#UI
#--------------------MAIN UI--------------------
vid_title = StringVar()
vid_title.set('Please enter the channel ID of of your youtuber below...')
vidLabel = tk.Label(app, textvariable=vid_title, bg="lightblue", font=("Airl", 15))
vidLabel.place(relx=0,rely=0)
ytName = tk.Entry(app, width=70)
ytName.place(relx=.01,rely=0.95)
main = tk.Canvas(app, width=535, height=400)
main.place(relx=0.01,rely=0.06)
cthumbnail = ImageTk.PhotoImage(file="base.png")
main.create_image(0,0, anchor=NW, image=cthumbnail)
Submit = tk.Button(app, width=14, text="Submit ID", command= lambda *args: submit_buffer(ytName, "none", ytName.get()))
Submit.place(relx=0.6,rely=0.94)
keyName = tk.Entry(app, width=70)
keyName.place(relx=.01,rely=0.82)
Submitkey = tk.Button(app, width=14, text="Submit KEY", command= lambda *args: submit_buffer(keyName, "KEY", keyName.get()))
Submitkey.place(relx=0.6,rely=0.81)
#--------------------MAIN UI--------------------
#--------------------STATUS--------------------
statlab = tk.Label(app, text="Status:", bg="lightblue", font=("Airl", 15))
statlab.place(relx=.85, rely=.05)
status = StringVar()
status.set('Browsing')
statusLabel = tk.Label(app, textvariable=status, bg="lightblue", font=("Airl", 15))
statusLabel.place(relx=.835, rely=.1)
#--------------------STATUS--------------------
#--------------------CONTROLS--------------------
download = tk.Button(app, text="Download video", command= lambda *args: downlaod_buffer(video_links[index]))
download.place(relx=0.4,rely=0.88)
delete = tk.Button(app, text="Delete video", command= lambda *args: delete_video())
delete.place(relx=0.55,rely=0.88)
next = tk.Button(app, width=14, text="Next", command= lambda *args: add_videos(ytName, "n", ytName.get()))
next.place(relx=0.18,rely=0.88)
last = tk.Button(app, width=14, text="Last", command= lambda *args: add_videos(ytName, "l", ytName.get()))
last.place(relx=0.01,rely=0.88)
#--------------------CONTROLS--------------------
#--------------------PROXY--------------------
proxlab = tk.Label(app, text="Proxy:", bg="lightblue", font=("Airl", 15))
proxlab.place(relx=.85, rely=.2)
ipLabel = tk.Label(app, text="IP:", bg="lightblue")
ipLabel.place(relx=.79, rely=.3)
ip_ent = tk.Entry(app)
ip_ent.place(relx=.815, rely=.3)
portLabel = tk.Label(app, text="Port:", bg="lightblue")
portLabel.place(relx=.775, rely=.4)
port_ent = tk.Entry(app)
port_ent.place(relx=.815, rely=.4)
Submitprox = tk.Button(app, width=14, text="Submit proxy", command= lambda *args: addProxy(ip_ent.get(), port_ent.get()))
Submitprox.place(relx=0.83,rely=0.46)
#--------------------PROXY--------------------
#--------------------SAVED IDS--------------------
savelab = tk.Label(app, text="Save an ID:", bg="lightblue", font=("Airl", 15))
savelab.place(relx=.82, rely=.6)
nameLabel = tk.Label(app, text="Name:", bg="lightblue")
nameLabel.place(relx=.75, rely=.7)
name_ent = tk.Entry(app)
name_ent.place(relx=.815, rely=.7)
idLabel = tk.Label(app, text="ID:", bg="lightblue")
idLabel.place(relx=.775, rely=.8)
id_ent = tk.Entry(app)
id_ent.place(relx=.815, rely=.8)
Submitprox = tk.Button(app, width=14, text="Save ID", command= lambda *args: addID(name_ent.get(), id_ent.get()))
Submitprox.place(relx=0.83,rely=0.87)
showids = tk.Button(app, width=14, text="Show saved ID's", command= lambda: showinfo("Saved ID's", saved_ids))
showids.place(relx=0.83,rely=0.93)
#--------------------SAVED IDS--------------------
app.mainloop()
|
import os
from django import template
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext
from django.conf import settings
from django_openid.models import UserOpenidAssociation
try:
any
except NameError:
def any(seq):
for x in seq:
if x:
return True
return False
register = template.Library()
def openid_icon(openid, user):
oid = u'%s' % openid
matches = [u.openid == oid for u in UserOpenidAssociation.objects.filter(user=user)]
if any(matches):
return mark_safe(u'<img src="%s" alt="%s" />' % (
os.path.join(settings.STATIC_URL, 'images', 'openid-icon.png'),
ugettext('Logged in with OpenID')
))
else:
return u''
register.simple_tag(openid_icon)
|
# Generated by Django 4.0 on 2022-01-03 18:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0009_alter_post_thumbnail'),
]
operations = [
migrations.AlterField(
model_name='post',
name='published',
field=models.BooleanField(default=False, verbose_name='Published'),
),
migrations.AlterField(
model_name='post',
name='re_published',
field=models.BooleanField(default=False, verbose_name='Re-published'),
),
]
|
import numpy as np
target = 80
score_beg = np.array([72, 35, 64, 88, 51, 90, 74, 12])
def curve(score_beg):
limit_up = 100
mean_score = score_beg.mean()
added_score = target - mean_score
new_score = score_beg + added_score
return np.clip(new_score, score_beg, limit_up)
print(score_beg)
curve(score_beg)
|
"""
Copyright 2021 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: code@inmanta.com
"""
import os
import pytest
from providers.gitlab.helpers.gitlab_client import GitlabClient
from providers.gitlab.helpers.gitlab_provider import GitlabProvider
@pytest.fixture(scope="package")
def token(lab_config_session: dict) -> str:
return os.getenv(lab_config_session["gitlab"]["configuration"]["token_env_var"])
@pytest.fixture(scope="package")
def base_url(lab_config_session: dict) -> str:
return lab_config_session["gitlab"]["configuration"]["base_url"]
@pytest.fixture(scope="package")
def provider(base_url: str, token: str) -> GitlabProvider:
return GitlabProvider(base_url, token)
@pytest.fixture(scope="package")
def parent_namespace_id(lab_config_session: dict) -> int:
return lab_config_session["gitlab"]["values"]["namespace_id"]
@pytest.fixture(scope="package")
def parent_namespace_path(lab_config_session: dict) -> str:
return lab_config_session["gitlab"]["values"]["namespace_path"]
@pytest.fixture(scope="package")
def project_name(lab_config_session: dict) -> str:
return lab_config_session["gitlab"]["values"]["project_name"]
@pytest.fixture(scope="package")
def global_client(lab_config_session: dict, base_url: str, token: str) -> GitlabClient:
gitlab_values = lab_config_session["gitlab"]["values"]
gitlab_client = GitlabClient(base_url, token)
yield gitlab_client
project_path = os.path.join(
gitlab_values["namespace_path"], gitlab_values["project_name"]
)
gitlab_client.delete_project(project_path)
@pytest.fixture(scope="function")
def gitlab_client(
lab_config_session: dict, global_client: GitlabClient
) -> GitlabClient:
gitlab_values = lab_config_session["gitlab"]["values"]
project_path = os.path.join(
gitlab_values["namespace_path"], gitlab_values["project_name"]
)
global_client.delete_project(project_path)
return global_client
|
#!/usr/bin/env python
import numpy as np
import math
import matplotlib.pyplot as plt
import rospy
import std_msgs.msg
import geometry_msgs.msg
from nav_msgs.msg import Odometry
import itertools
import tf
D = 0.15 # look-ahead distance
L = 0.21 # 24 before
show_animation = True
x_odom = 0.0
y_odom = 0.0
theta_odom = 0.0
#####################################################
# /left_motor/encoder Callback #
#####################################################
def odomCallback(msg):
global x_odom, y_odom, theta_odom
x_odom = msg.pose.pose.position.x
y_odom = msg.pose.pose.position.y
(r, p, y) = tf.transformations.euler_from_quaternion([msg.pose.pose.orientation.x, msg.pose.pose.orientation.y, msg.pose.pose.orientation.z, msg.pose.pose.orientation.w])
theta_odom = y
#####################################################
# Initialize Publisher #
#####################################################
rospy.init_node('path_follower_node', anonymous=True)
pub_path_following_VEL = rospy.Publisher('/keyboard/vel', geometry_msgs.msg.Twist, queue_size=1)
rate = rospy.Rate(50)
# odom subscriber
rospy.Subscriber("/robot_odom", Odometry, odomCallback)
class State:
def __init__(self, x=0.0, y=0.0, yaw=0.0):
self.x = x
self.y = y
self.yaw = yaw
def send_message(LINEAR_VELOCITY, ANGULAR_VELOCITY):
VEL = geometry_msgs.msg.Twist()
if not rospy.is_shutdown():
VEL.linear.x = LINEAR_VELOCITY
VEL.linear.y = 0.0
VEL.linear.z = 0.0
VEL.angular.x = 0.0
VEL.angular.y = 0.0
VEL.angular.z = ANGULAR_VELOCITY
pub_path_following_VEL.publish(VEL)
def pure_pursuit_control(state, path_x, path_y, t_ind_prev):
t_ind = calc_target_index(state, path_x, path_y)
# if the previous target was further away on the path than now, use that instead
if t_ind_prev >= t_ind:
t_ind = t_ind_prev
# target index to coordinates
if t_ind < len(path_x):
tx = path_x[t_ind]
ty = path_y[t_ind]
else:
tx = path_x[-1]
ty = path_y[-1]
t_ind = len(path_x) - 1
# calculate the angle to the target point (relative to heading angle)
alpha = math.atan2(ty - state.y, tx - state.x) - state.yaw
# if reversing, flip the steering angle
#if state.v < 0:
# alpha = math.pi - alpha
D_tot = D
# calculate an appropriate steering angle
delta = math.atan2(2.0 * L * math.sin(alpha) / D_tot, 1.0)
return delta, t_ind
def calc_target_index(state, path_x, path_y):
# find the index of the path closest to the robot
#print("x,y", state.x,state.y)
dx = [state.x - icx for icx in path_x]
dy = [state.y - icy for icy in path_y]
d = [abs(math.sqrt(idx ** 2 + idy ** 2)) for (idx, idy) in zip(dx, dy)]
t_ind = d.index(min(d))
# total look ahead distance (taking the speed into consideration)
D_tot = D
# search length
L = 0.0
# find the index of the look head point on the path (look ahead is the distance ALONG the path, not straight line)
path_length = len(path_x)
while L < D_tot and (t_ind + 1) < path_length:
dx = path_x[t_ind + 1] - path_x[t_ind]
dy = path_y[t_ind + 1] - path_y[t_ind]
L += math.sqrt(dx**2 + dy**2)
t_ind += 1
return t_ind
def main():
state = State(x=0,y=0,yaw=0)
# target course
#path_x = np.arange(0, 3, 0.01)
#path_y = [0.5*math.cos(ix / 0.3)-0.5 for ix in path_x]
path_x1 = np.arange(0, 1, 0.01)
path_x2 = np.empty(100)
path_x2.fill(1)
path_y1 = np.empty(100)
path_y1.fill(0)
path_y2 = np.arange(0, 1, 0.01)
path_x = np.append(path_x1, path_x2)
path_y = np.append(path_y1, path_y2)
print(path_x)
print(path_y)
print(len(path_x))
print(len(path_y))
lastIndex = len(path_x) - 1
x = [0]
y = [0]
yaw = [0]
t = [0.0]
state.x = x_odom
state.y = y_odom
state.yaw = theta_odom
target_ind = calc_target_index(state, path_x, path_y)
while lastIndex > target_ind:
state.x = x_odom
state.y = y_odom
state.yaw = theta_odom
ang_vel, target_ind = pure_pursuit_control(state, path_x, path_y, target_ind)
GAIN = 0.7
lin_vel = 0.07
send_message(lin_vel, ang_vel*GAIN)
#rate.sleep()
print("ang_vel", ang_vel*GAIN)
x.append(x_odom)
y.append(y_odom)
yaw.append(theta_odom)
if show_animation:
plt.cla()
plt.plot(path_x, path_y, ".r", label="course")
plt.plot([x_odom, x_odom + math.cos(theta_odom)], [y_odom, y_odom + math.sin(theta_odom)], "g", label="angle")
plt.plot(x, y, "-b", label="trajectory")
plt.plot(path_x[target_ind], path_y[target_ind], "xg", label="target")
plt.axis("equal")
plt.grid(True)
plt.pause(0.001)
# Test
send_message(0, 0)
assert lastIndex >= target_ind, "Cannot goal"
if show_animation:
plt.plot(path_x, path_y, ".r", label="course")
plt.plot(x, y, "-b", label="trajectory")
plt.legend()
plt.xlabel("x[m]")
plt.ylabel("y[m]")
plt.axis("equal")
plt.grid(True)
plt.show()
if __name__ == '__main__':
print("path tracker started")
main()
|
#!/usr/bin/env python
#-*- coding: UTF-8 -*-
###########################################################################
#
# Copyright (c) 2018 www.codingchen.com, Inc. All Rights Reserved
#
##########################################################################
'''
@brief leetcode algorithm
@author chenhui(hui.chen6789@gmail.com)
@date 2018/11/08 19:09:33
'''
class Solution:
def intToRoman(self, num):
"""
:type num: int
:rtype: str
"""
roman = ''
simbols = ['I', 'V', 'X', 'L', 'C', 'D', 'M']
i = 0
while num > 0:
number = num % 10
if number < 4:
roman = ''.join([simbols[2*i] for _ in range(number)]) + roman
elif number == 4:
roman = simbols[2*i] + simbols[2*i+1] + roman
elif number > 4 and number < 9:
roman = simbols[2*i+1] + ''.join([simbols[2*i] for _ in range(number - 5)]) + roman
elif number == 9:
roman = simbols[2*i] + simbols[2*i+2] + roman
num = num // 10
i += 1
return roman
if __name__ == '__main__':
s = Solution()
print(s.intToRoman(3))
print(s.intToRoman(58))
print(s.intToRoman(1994))
|
from copy import deepcopy
import numpy as np
import random
import itertools
import cPickle as pickle
class agent(object):
'''
This is an abstract agent class, however its abstractness is not enforced
via the ABC module. This is a deliberate design choice.
The brains of the agent class is the pick_action() method.
Within the project, three classes extend agent, namely:
1- manual_agent: where pick_action() asks for human input. This is used
if you would like to play with a hard-coded logic or a trained
TD-learner
2- teacher : where pick_action() follows a (nearly) optimal hard-coded logic.
This is used to train the td-learner
3- td_learner : where pick_action() follows an epsilon greedy policy selection,
and depending on the user input does on-policy (SARSA) or off-policy (Q-learning)
learning
Attributes:
----------------------
name - (str) the name you would like to give to the agent
marker - (int) the marker that the agent will use to play.
This is either 1 or -1 (rather than 'X' and 'O')
board_dimensions - (tuple) The size of the tic-tac-toe board. For now
hard-coded as 3*3
'''
def __init__(self, name, marker):
self.name = name
self.marker = marker
self.oppmarker = -1 if self.marker==1 else 1
self.board_dimensions = (3,3)
def pick_action(self, env):
'''
Given the environment the agent is dealing with, pick one of the legal
environment actions
Parameters:
--------------------
'''
return
class manual_agent(agent):
'''
Agent that relies on human input to pick actions. The pick_action() method
renders the current state of the environment and asks for input from the
human user.
Attributes:
----------------------
name - (str) the name you would like to give to the agent
marker - (int) the marker that the agent will use to play.
This is either 1 or -1 (rather than 'X' and 'O')
board_dimensions - (tuple) The size of the tic-tac-toe board. For now
hard-coded as 3*3
'''
def __init__(self, name, marker):
'''
Return a manual agent object with the indicated
name and marker (-1 or 1 ) of choice. '''
super(manual_agent, self).__init__(name, marker)
self.epsilon = 0 # TO DO: here for plotting convenience / remove
def pick_action(self, obs):
'''
Ask for manual action input from the human user.
If necessary, this would allow printing instructions
for the human as well.
Parameters:
-------------
obs - (np.array) the current environment (board) state
For example: np.array([0,0,0,0,0,0,0,0,0]) for an
empty board.
'''
action = None
available_actions = np.where(obs==0)[0]
while action not in available_actions:
# print out what to do
self.instruct_human(obs, available_actions)
action = raw_input()
try:
action = int(action)
if action not in available_actions:
print 'Sorry, %s is not a valid action.'%(str(action))
except:
if action.lower().startswith('inst'):
self.print_instructions()
else:
print 'Sorry, \'%s\' is not a valid input.'%(str(action))
return action
def instruct_human(self, obs, available_actions):
'''
Based on the input board configuration and the available
actions, instruct the human to input a vali (available)
action.
Parameters:
--------------
obs -(np.array) the current environment (board) state
For example: np.array([0,0,0,0,0,0,0,0,0]) for an
empty board.
available)actions -(list) the available actions based on the unoccupied
cells on the board. The player can only pick one of
these.
'''
print 'Current board: '
self.render(obs)
print 'Your marker is %i. What is your next move?'%(self.marker)
print 'Please pick one of %s'%(str(available_actions))
print 'Type: Instructions for locations mapping.'
def print_instructions(self):
'''
Prints the instructions (as below) out to the reader.
'''
inst = '---------------Instructions --------------- \n'\
'Available actions are integers from 0 to 8 on a 3*3 board \n'\
'with the location mapping as below:\n' \
'0 | 1 | 2 \n'\
'----------\n'\
'3 | 4 | 5 \n'\
'----------\n'\
'6 | 7 | 8 \n'\
'If the location you are choosing is already taken (is not 0),the computer \n' \
'will keep on asking you to put in an available location integer. \n'\
'------------------------------------------'
print inst
def render(self,obs):
'''
Hard-coded visualisation of the current state of the board
Parameters:
-------------
obs - (np.array) the current environment (board) state
For example: np.array([0,0,0,0,0,0,0,0,0]) for an
empty board.
'''
board = [str(i) for i in obs]
# "board" is a list of 10 strings representing the board (ignore index 0)
print(' ' + board[0] + ' | ' + board[1] + ' | ' + board[2])
print('-----------')
print(' ' + board[3] + ' | ' + board[4] + ' | ' + board[5])
print('-----------')
print(' ' + board[6] + ' | ' + board[7] + ' | ' + board[8])
class teacher(agent):
'''
Agent with a hard-coded game-play logic for a 3*3 board configuration.
The logic mainly involves looking one move ahead to see whether the agent can
win the game immediately, or whether it can avoid losing by blocking
an immediate win opportunity by the opponent.
If neither is possible, the agent first tries to choose an unoccupied corner,
then the centre and finally one of the sides (in this preference order).
There is also a special logic to deflect certain moves when the opponent
starts the game by picking one of the corners.
Attributes:
----------------------
name - (str) the name you would like to give to the agent
marker - (int) the marker that the agent will use to play.
This is either 1 or -1 (rather than 'X' and 'O')
board_dimensions - (tuple) The size of the tic-tac-toe board. For now
hard-coded as 3*3
epsilon - (float) the randomness factor. For instance, if this is 0.2,
the teacher object would act randomly 20% of the time and
carries out its hard-coded logic 80% of the time.
This is set to 0.5 during training to allow some state
space exploration for the opponent (a td-learner).
'''
def __init__(self, name, marker, epsilon):
'''
Creates a teacher object that acts on a hard-coded logic as specified
by its pick_action() method.
'''
super(teacher, self).__init__(name, marker)
self.epsilon = epsilon
def pick_action(self, obs):
'''
Given the environment the agent is dealing with, returns one of the legal
environment actions based on the hard-coded logic, whereby the agent
looks one move ahead to see whether the agent can win the game immediately,
or whether it can avoid losing by blocking an immediate win opportunity
by the opponent.
If neither is possible, the agent first tries to choose an unoccupied corner,
then the centre and finally one of the sides (in this preference order).
There is also a special logic to deflect certain moves when the opponent
starts the game by picking one of the corners.
Parameters:
----------------
obs - (np.array) the current (board) state
For example: np.array([0,0,0,0,0,0,0,0,0]) for an
empty board.
'''
available_actions = np.where(obs==0)[0]
if np.random.rand() < self.epsilon: # random action
return np.random.choice(available_actions)
else:
if len(available_actions) == 1: # agent has no option to pick another action
return available_actions[0]
else: # pick action based on the hard-coded logic
# special logic for when the opponent starts at a corner
if (len(available_actions) ==8) and any(obs[[0,2,6,8]] !=0):
return 4
# special logic for when the opponent starts with opposite corners
elif (len(available_actions) ==6) and (sum(obs[[0,8]]) == 2*self.oppmarker or
sum(obs[[2,6]]) == 2*self.oppmarker):
sides = np.intersect1d(np.array([1, 3, 5, 7]), available_actions)
return random.choice(sides)
else:
# 1. Check if we can win the game with this move
for action in available_actions:
board_copy_attack = self.__considerMove(self.marker, action, obs)
if self.__isWinner(board_copy_attack): # First, attempt to win with this move
return action
# 2. Check if the opponent can win on their next move, and block them.
for action in available_actions:
board_copy_defend = self.__considerMove(self.oppmarker, action, obs)
if self.__isWinner(board_copy_defend): # If not possible, defend
return action
# 3. Take one of the corners, if they are free.
corners = np.intersect1d(np.array([0, 2, 6, 8]), available_actions)
if len(corners)>0:
return random.choice(corners)
# 4. Take the centre if it is free.
if 4 in available_actions:
return 4
# 5. If nothing else is free, take one of the free side locations
sides = np.intersect1d(np.array([1, 3, 5, 7]), available_actions)
return random.choice(sides)
def __considerMove(self, mark, move, obs):
'''
Given a move, a player marker and the current board configuration,
return a (temporary) copy of the board with the considered move applied.
Parameters:
----------------
mark - {-1,1} the numeric marker (equivalent to 'X' or 'O') for the
tic-tac-toe game.
move - (int) a legal move location on the board.
obs - (np.array) the 3*3 board configuration to test a move on
'''
board_copy = deepcopy(obs)
board_copy[move] = mark
return board_copy
def __isWinner(self, board):
'''
Given a board configuration, this method returns True if there is a winner.
Note: There is an equivalent method in the tic_tac_toe class, however in
order to keep the agent and the environment isolated, this class has its
own _isWinner() implementation, at the expense of minor repetition.
Parameters:
-----------------
obs - (np.array) the current (board) state
For example: np.array([0,0,0,0,0,0,0,0,0]) for an
empty board.
'''
board_mat =board.reshape(self.board_dimensions)
# episode ending criteria
row_complete = any(abs(board_mat.sum(axis=1))==3 )
col_complete = any(abs(board_mat.sum(axis=0))==3 )
diag_complete = abs(np.diag(board_mat).sum())==3
opp_diag_complete = abs(np.diag(np.fliplr(board_mat)).sum()) == 3
# if any of the criteria satisfies, episode complete
if any([row_complete, col_complete, diag_complete, opp_diag_complete]) :
return True
else:
False
class td_learner(agent):
'''
Epsilon-greedy temporal difference (td) learner that is trained based
on the equation:
Q(s,a) <-- Q(s,a) + alpha * [target - prediction], where:
prediction = Q(s,a),
and
target = r + gamma * max_a'[Q(s',a')] for Q-learning,
or
target = r + gamma * [ (1-epsilon)* max_a'[Q(s',a')] +
epsilon* mean[Q(s',a') |a'!= optimal a'] ] for SARSA.
Attributes:
----------------------
name - (str) the name you would like to give to the agent
marker - (int) the marker that the agent will use to play.
This is either 1 or -1 (rather than 'X' and 'O')
board_dimensions - (tuple) The size of the tic-tac-toe board. For now
hard-coded as 3*3
epsilon - (float) the randomness factor. For instance, if this is 0.2,
the agent would act randomly 20% of the time and pick
the optimal action 80% of the time.
This is annealed from 1 to 0.05 during training to allow
some state space exploration.
In other words, this adjusts the exploration / exploitation
balance.
learning - {'off-policy', 'on-policy'} defines whether update_q_table()
operates off-policy (i.e. Q-Learning) or on-policy (SARSA)
learn_rate - (float) the learning rate (alpha) for the td-update formula
given above.
gamma - (float) the future reward discount factor in the td-update
formula given above. Its choice should be informed by
average episode (game-play) duration.
action_set_size - (int) the number of available actions. By default, if the agent
is used with the tic-tac-toe environment, this is equal to the
state dimension size, i.e. 3*3
q_dict - (dict) the Q-value lookup table of the td-learner, implemented
as a dictionary where keys are tuples of unique states and the
values are the available actions per each state.
This can either be initialised empty or loaded from an existing
pickled q value dictionary.
q_table_dir - (str) the pickled file location of a previously trained
Q value dictionary. If this is not None, instead of creating
an empty Q value dictionary, the object is initialised by
reading in the pickled dictionary at this location.
'''
def __init__(self, name, marker, state_dimensions, learning, epsilon, learn_rate,
gamma, action_set_size = None, q_table_dir = None):
'''
Creates a Temporal-Difference Learner object. Depending on the user-defined
'learning' parameter, the agent either does off-policy (Q-Learning) or
on-policy (SARSA) learning.
'''
super(td_learner, self).__init__(name, marker)
self.epsilon = epsilon
self.final_epsilon = 0.05 # hard-coded, make dynamic
self.learning_rate = learn_rate
self.gamma = gamma
self.learning = learning
if action_set_size is None:
self.__action_space_size = len(state_dimensions)
else:
self.__action_space_size = action_set_size
if q_table_dir is None:
self.q_dict = self.__create_q_table(state_dimensions)
else:
self.q_dict = self.load_q_dict(q_table_dir)
def __create_q_table(self, state_dimensions):
'''
Create a Q lookup dict by taking the Cartesian product of all the dimensions in the
state space. For the 3*3 tic-tac-toe environment, each cell can have [-1,0,1].
So there are 3**(3*3) configurations. Some of these are not legal game plays,
e.g.(1,1,1,1,1,1,1,1,1) but for the time being (due to time constraints) we do
not worry about these.
Each product is then used as a tuple key, pointing to a numpy array of size 9, each
representing an available location, i.e. action index.
The lookup dictionary is constrained in the sense that when we know a certain state
can not allow an action (i.e. that particular location is already occupied),
we populate the Q value for that action as np.nan.
For instance:
q_dict[(1,1,1,1,1,1,1,1,1)] = array([ nan, nan, nan, nan, nan, nan, nan, nan, nan])
q_dict[(0,0,1,-1,0,0,0,0,0)] = array([-0.06, -0.04, nan, nan, -0.03, -0.03, -0.07, 0.04, 0.06])
etc..
Parameters:
------------
state_dimensions - (list) the state dimensions for the environment that the
agent will interact with. For the tic_tac_toe env, this is
a list of all possible markers [-1,0,1] repeated 3*3 times.
'''
n = self.__action_space_size # for brevity below, create temp variable
q_dict = dict([(element, np.array([(i == 0 and [np.random.uniform(0, 0)] or [np.nan])[0]
for i in element]))
for element in itertools.product(*state_dimensions )])
return q_dict
def set_epsilon(self, val):
'''
Manually adjust the td_learner agent's epsilon. This is not very clean
but is done while the td-learner is playing against a manual_agent (human)
to ensure that we play with a fully exploitative, non-random td-learner agent.
Parameters:
----------------
val - (float) the value we want to set the epsilon to
'''
self.epsilon = val
def save_q_dict(self, name):
'''
Pickles and saves the current Q-value dictionary of the agent
with the provided file name.
This is used to save the results of a trained agent so we can play
with it without having to retrain.
Parameters:
----------------
name - (str) the directory and name that we want to give to the
pickled Q dictionary file. Example: 'on_policy_trained.p'
'''
with open(name, 'wb') as fp:
pickle.dump(self.q_dict, fp)
def load_q_dict(self, dir):
'''
Loads the pickled dictionary in the provided location as the agent's
q-value dictionary play. We can initialise a td-learner in this way and
play with it directly, without having to retrain a blank one.
Parameters:
----------------
name - (str) the directory and name of the pickled Q value dictionary
file that we want to load.
'''
with open(dir, 'rb') as fp:
q_dict = pickle.load(fp)
return q_dict
def pick_action(self, obs):
'''
Pick action in an epsilon-greedy way. By self.epsilon probability
this returns a random action, and by (1-epsilon)
probability it returns the action with the maximum q-value
for the current environment state.
Parameters:
---------------
obs - (np.array) the current (board) state to pick an action on.
For example: np.array([0,0,0,0,0,0,0,0,0]) for an
empty board.
'''
if np.random.rand() < self.epsilon: # random action
action = np.random.choice(np.where(obs==0)[0])
else: # action with the max q-value
action = np.nanargmax(self.__get_state_vals(obs))
return action
def update_q_table(self, obs, action, reward, next_obs, done):
'''
Implementation of the temporal difference learning update:
Q(s,a) <-- Q(s,a) + alpha * [target - prediction].
where:
prediction = Q(s,a),
and
target = r + gamma * max_a'[Q(s',a')] for Q-learning,
or
def update_q_table(self, obs, action, reward, next_obs, done):
'''
Implementation of the temporal difference learning update:
Q(s,a) <-- Q(s,a) + alpha * [target - prediction].
where:
prediction = Q(s,a),
and
target = r + gamma * max_a'[Q(s',a')] for Q-learning,
or
target = r + gamma * [ (1-epsilon)* max_a'[Q(s',a')] +
epsilon* mean[Q(s',a')] for SARSA.
def update_q_table(self, obs, action, reward, next_obs, done, func):
'''
Implementation of the temporal difference learning update:
Q(s,a) <-- Q(s,a) + alpha * [target - prediction].
where:
prediction = Q(s,a),
and
target = r + gamma * max_a'[Q(s',a')] for Q-learning,
or
target = r + gamma * [ (1-epsilon)* max_a'[Q(s',a')] +
epsilon* mean[Q(s',a')] for SARSA.
The definition of the target changes depending on whether the learning is done
off-policy (Q-Learning) or on-policy (SARSA).
Off-policy (Q-Learning) computes the difference between Q(s,a) and the maximum
action value, while on-policy (SARSA) computes the difference between Q(s,a)
and the weighted sum of the average action value and the maximum.
Parameters:
---------------
obs - (np.array), the state we transitioned from (s).
action - (int) the action (a) taken at state=s.
reward - (int) the reward (r) resulting from taking the specific action (a)
at state = s.
next_obs - (np.array) the next state (s') we transitioned into
after the taking the action at state=s.
done - (bool) episode termination indicator. If True, target (above) is
only equal to the immediate reward (r) and there is no discounted
future reward
func - (np.nanmax, np.nanmin) Should update with max if it is the agent's turn
and should take min if the opponent's turn
'''
if self.learning == 'off-policy': # Q-Learning
if done: # terminal state, just immediate reward
target = reward
else: # within episode
target = reward + self.gamma*func(self.__get_state_vals(next_obs))
prediction = self.__get_state_vals(obs)[action]
updated_q_val = prediction + self.learning_rate *(target - prediction)
# update the q-value for the observed state,action pair
self.__set_q_val(obs, action, updated_q_val)
elif self.learning == 'on-policy': # SARSA
if done: # terminal state, just immediate reward
target = reward
else: # within episode
on_policy_q = self.epsilon * np.nanmean(self.__get_state_vals(next_obs)) + \
(1- self.epsilon) * func(self.__get_state_vals(next_obs))
target = reward + self.gamma*on_policy_q
prediction = self.__get_state_vals(obs)[action]
updated_q_val = prediction + self.learning_rate *(target - prediction)
# update the q-value for the observed state,action pair
self.__set_q_val(obs, action, updated_q_val)
else:
raise ValueError ('Learning method is not known.')
def on_policy_q_target(self, next_obs):
'''
Calculate the target in the TD learning update function:
Q(s,a) <-- Q(s,a) + alpha * [target - prediction] when the learning is
done on policy. In this case, target is the difference between Q(s,a)
and the weighted sum of the average action value and the maximum. The
weighting is done using self.epsilon.
target = r + gamma * [ (1-epsilon)* max_a'[Q(s',a')] + epsilon* mean[Q(s',a') |a'!= optimal a'] ]
Parameters:
-----------------
next_obs - (np.array) the next state (s') we transitioned into
after the taking the action at state=s.
'''
# next action candidates
a_prime_candidates = deepcopy(self.__get_state_vals(next_obs))
# optimum next state action (greedy selection)
optimum_a_prime_idx = np.nanargmax(a_prime_candidates)
# on_policy_q = (1-eps)*optimal a' + eps*E[non_optimal a']
exp_greedy_q = self.epsilon*a_prime_candidates[optimum_a_prime_idx]
if all(np.isnan((np.delete(a_prime_candidates,optimum_a_prime_idx))) ):
exp_random_q = 0
else:
exp_random_q = (1-self.epsilon)* np.nanmean(np.delete(a_prime_candidates,optimum_a_prime_idx))
return exp_greedy_q + exp_random_q
def __set_q_val(self, state, action, q_val):
'''
Set the q value for a state-action pair in the object's q val dictionary.
Parameters:
-----------------
state -(list) the state index, for a 3*3 board
action -(int) the action index
q_val -(float) the Q value to appoint to the state-action pair
'''
self.q_dict[tuple(state)][action] = q_val
def __get_state_vals(self, state):
'''
For a given state, look up and return the the action values from
the object's q val dictionary.The q values are returned as a dictionary with
keys equal to action indices and the values the corresponding q values.
The output is a dictionary to facilitate post-processing and filtering
out some q-values that belong to unavailable action locations.
Parameters:
-----------------
state -(list) the state index, for a 3*3 board
'''
d = self.q_dict[tuple(state)]
return d
|
from typing import Optional, Dict
from sqlalchemy.sql.sqltypes import DateTime
from repository.models import Section
from datetime import datetime
# 기계
# 1. 솔레노이드 밸브 5ea
# 2. 물공급용 워터펌프
# 3. 스프레이용 워터펌프
# 4. 양액공급용 워터펌프 2ea
class Machine:
def __init__(
self,
name: str,
pin: int,
id: Optional[int]= None,
createdAt: Optional[DateTime]= None
):
self.id = id
self.name = name
self.pin = pin
self.createdAt = createdAt
@classmethod
def from_dict(cls, adict: Dict) -> object:
return cls(**adict)
def to_dict(self) -> Dict:
return {
"id": self.id,
"name": self.name,
"pin": self.pin,
"createdAt": self.createdAt
}
|
import csv
from pymongo import MongoClient
import sys
from pprint import pprint
client = MongoClient()
client = MongoClient('localhost', 27017)
db = client.test_database
# Find the unique authors and publishers
author_list = []
publisher_list = []
with open("book.csv", newline='') as csvfile:
bookreader = csv.DictReader(csvfile, delimiter=',')
for row in bookreader:
author_list_tmp = row['Author'].split(";")
for author in author_list_tmp:
if author.strip() not in author_list:
author_list.append(author.strip())
publisher = row['publisher'].strip()
if publisher not in publisher_list:
publisher_list.append(publisher)
# print(author_list)
# print(publisher_list)
# creating author collection and create documents
db.author.drop()
author_collection = db.author
db.publisher.drop()
publisher_collection = db.publisher
for author_name in author_list:
print(author_name)
author_dict = {"author": author_name}
author_collection.insert_one(author_dict)
cursor = author_collection.find({})
for document in cursor:
pprint(document)
for publisher_name in publisher_list:
print(publisher_name)
publisher_dict = {"publisher": publisher_name}
publisher_collection.insert_one(publisher_dict)
cursor = publisher_collection.find({})
for document in cursor:
pprint(document)
print(db.collection_names())
db.book.drop()
book_collection = db.book
db.inventory.drop()
inventory_collection = db.inventory
with open("book.csv", newline='') as csvfile:
bookreader = csv.DictReader(csvfile, delimiter=',')
for row in bookreader:
dict1 = {}
dict2 = {}
print(row)
dict1["title"] = row["Title"]
dict1["ISBN-13"] = row["ISBN-13"]
dict1["ISBN-10"] = row["ISBN-10"]
dict1["review_count"] = row["Reviews"]
dict1["description"] = row["Book Description"]
dict1["published"] = row["published"]
dict1["pages"] = row["pages"]
dict1["price"] = row["price"]
author_list_tmp = row['Author'].split(";")
author_id_list = []
for author in author_list_tmp:
author_id_list.append(author_collection.find({"author": author.strip()})[0]["_id"])
dict1["author_id"] = author_id_list
publisher_id_list = []
publisher_list_tmp = row['publisher'].split(";")
for publisher in publisher_list_tmp:
publisher_id_list.append(publisher_collection.find({"publisher": publisher.strip()})[0]["_id"])
dict1["publisher_id"] = publisher_id_list
result = book_collection.insert_one(dict1)
dict2["_id"] = result.inserted_id
dict2["quantity"] = row["quantity"]
inventory_collection.insert_one(dict2)
cursor = book_collection.find({})
for document in cursor:
pprint(document)
cursor = inventory_collection.find({})
for document in cursor:
pprint(document)
db.customers.drop()
customers_collection = db.customers
with open("customer.csv", newline='') as csvfile:
customersreader = csv.DictReader(csvfile, delimiter=',')
for row in customersreader:
dict1 = {}
dict1["name"] = row["Name"]
dict1["email_address"] = row["email_address"]
dict1["address"] = row["address"]
dict1["phone_number"] = row["phone number"]
customers_collection.insert_one(dict1)
cursor = customers_collection.find({})
for document in cursor:
pprint(document)
db.order.drop()
order_collection = db.order
with open("order.csv", newline='') as csvfile:
orderreader = csv.DictReader(csvfile, delimiter=',')
for row in orderreader:
print(row)
dict1 = {}
book_id_list = []
# email_addr=row["userid"]
# isbn_list_tmp=row['ISBN-13'].split(";")
# for isbn in isbn_list_tmp:
# book_id_list.append(book_collection.find({"ISBN-13":isbn})[0]["_id"])
# dict1["customer_id"]=customer_collection.find({"email_address":email_addr})[0]["_id"]
dict1["order_id"] = row["order_id"]
dict1['email'] = row["email"]
dict1['title'] = row["title"]
dict1["amount"] = row["amount"]
dict1["created_time"] = row["created_time"]
dict1['status'] = row["status"]
dict1["completed_time"] = row["completed_time"]
print(dict1)
order_collection.insert_one(dict1)
cursor = order_collection.find({})
for document in cursor:
pprint(document)
|
# -*- coding: utf-8 -*-
VERSION = "1.5.0" #should keep up with the counterwallet version it works with (for now at least)
DB_VERSION = 22 #a db version increment will cause counterblockd to rebuild its database off of counterpartyd
CAUGHT_UP = False #atomic state variable, set to True when counterpartyd AND counterblockd are caught up
UNIT = 100000000
SUBDIR_ASSET_IMAGES = "asset_img" #goes under the data dir and stores retrieved asset images
SUBDIR_FEED_IMAGES = "feed_img" #goes under the data dir and stores retrieved feed images
MARKET_PRICE_DERIVE_NUM_POINTS = 8 #number of last trades over which to derive the market price (via WVAP)
# FROM counterpartyd
# NOTE: These constants must match those in counterpartyd/lib/config.py
REGULAR_DUST_SIZE = 5430
MULTISIG_DUST_SIZE = 5430 * 2
ORDER_BTC_DUST_LIMIT_CUTOFF = MULTISIG_DUST_SIZE
mongo_db = None #will be set on server init
BTC = 'BTC'
XCP = 'XCP'
MAX_REORG_NUM_BLOCKS = 10 #max reorg we'd likely ever see
MAX_FORCED_REORG_NUM_BLOCKS = 20 #but let us go deeper when messages are out of sync
ARMORY_UTXSVR_PORT_MAINNET = 6590
ARMORY_UTXSVR_PORT_TESTNET = 6591
QUOTE_ASSETS = ['BTC', 'XBTC', 'XCP'] # define the priority for quote asset
MARKET_LIST_QUOTE_ASSETS = ['XCP', 'XBTC', 'BTC'] # define the order in the market list
DEFAULT_BACKEND_RPC_PORT_TESTNET = 18332
DEFAULT_BACKEND_RPC_PORT = 8332
|
from .interface import Interface
#from . import message
#from . import parsers
|
# If tests is a package, debugging is a bit easier.
|
# coding: utf-8
# # Your first neural network
#
# In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more.
#
#
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# ## Load and prepare the data
#
# A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon!
# In[2]:
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
# In[3]:
rides.head()
# ## Checking out the data
#
# This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the `cnt` column. You can see the first few rows of the data above.
#
# Below is a plot showing the number of bike riders over the first 10 days or so in the data set. (Some days don't have exactly 24 entries in the data set, so it's not exactly 10 days.) You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model.
# In[4]:
rides[:24 * 10].plot(x='dteday', y='cnt')
# ### Dummy variables
# Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to `get_dummies()`.
# In[5]:
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
# ### Scaling target variables
# To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1.
#
# The scaling factors are saved so we can go backwards when we use the network for predictions.
# In[6]:
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean) / std
# ### Splitting the data into training, testing, and validation sets
#
# We'll save the data for the last approximately 21 days to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders.
# In[7]:
# Save data for approximately the last 21 days
test_data = data[-21 * 24:]
# Now remove the test data from the data set
data = data[:-21 * 24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
# In[ ]:
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60 * 24], targets[:-60 * 24]
val_features, val_targets = features[-60 * 24:], targets[-60 * 24:]
# ## Time to build the network
#
# Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.
#
# <img src="assets/neural_network.png" width=300px>
#
# The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called *forward propagation*.
#
# We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called *backpropagation*.
#
# > **Hint:** You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.
#
# Below, you have these tasks:
# 1. Implement the sigmoid function to use as the activation function. Set `self.activation_function` in `__init__` to your sigmoid function.
# 2. Implement the forward pass in the `train` method.
# 3. Implement the backpropagation algorithm in the `train` method, including calculating the output error.
# 4. Implement the forward pass in the `run` method.
#
# In[ ]:
#############
# In the my_answers.py file, fill out the TODO sections as specified
#############
from my_answers import NeuralNetwork
# In[ ]:
def MSE(y, Y):
return np.mean((y - Y) ** 2)
# ## Unit tests
#
# Run these unit tests to check the correctness of your network implementation. This will help you be sure your network was implemented correctly befor you starting trying to train it. These tests must all be successful to pass the project.
# In[ ]:
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1 / (1 + np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
# ## Training the network
#
# Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.
#
# You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later.
#
# ### Choose the number of iterations
# This is the number of batches of samples from the training data we'll use to train the network. The more iterations you use, the better the model will fit the data. However, this process can have sharply diminishing returns and can waste computational resources if you use too many iterations. You want to find a number here where the network has a low training loss, and the validation loss is at a minimum. The ideal number of iterations would be a level that stops shortly after the validation loss is no longer decreasing.
#
# ### Choose the learning rate
# This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. Normally a good choice to start at is 0.1; however, if you effectively divide the learning rate by n_records, try starting out with a learning rate of 1. In either case, if the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge.
#
# ### Choose the number of hidden nodes
# In a model where all the weights are optimized, the more hidden nodes you have, the more accurate the predictions of the model will be. (A fully optimized model could have weights of zero, after all.) However, the more hidden nodes you have, the harder it will be to optimize the weights of the model, and the more likely it will be that suboptimal weights will lead to overfitting. With overfitting, the model will memorize the training data instead of learning the true pattern, and won't generalize well to unseen data.
#
# Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose. You'll generally find that the best number of hidden nodes to use ends up being between the number of input and output nodes.
# In[ ]:
import sys
####################
### Set the hyperparameters in you myanswers.py file ###
####################
from my_answers import iterations, learning_rate, hidden_nodes, output_nodes
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train': [], 'validation': []}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write(
"\rProgress: {:2.1f}".format(100 * ii / float(iterations)) + "% ... Training loss: " + str(train_loss)[
:5] + " ... Validation loss: " + str(
val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
# In[ ]:
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
# ## Check out your predictions
#
# Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly.
# In[ ]:
fig, ax = plt.subplots(figsize=(8, 4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T * std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt'] * std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
# ## OPTIONAL: Thinking about your results(this question will not be evaluated in the rubric).
#
# Answer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does?
#
# > **Note:** You can edit the text in this cell by double clicking on it. When you want to render the text, press control + enter
#
# #### Your answer below
|
from tqdm import tqdm
import torch
def run_epoch(model, iterator,
criterion, optimizer,
metrics,
phase='train', epoch=0,
device='cpu', writer=None):
is_train = (phase == 'train')
if is_train:
model.train()
else:
model.eval()
epoch_loss = 0.0
epoch_metrics = dict((metric_name, 0.0) for metric_name in metrics.keys())
with torch.set_grad_enabled(is_train):
for (images, targets) in tqdm(iterator, desc=f"{phase}", ascii=True):
images, targets = images.to(device), targets.to(device)
predictions = model(images)
loss = criterion(predictions, targets)
if is_train:
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss += loss.item()
for metric_name in metrics.keys():
epoch_metrics[metric_name] += metrics[metric_name](predictions.detach(), targets)
epoch_loss /= len(iterator)
for metric_name in metrics.keys():
epoch_metrics[metric_name] /= len(iterator)
if writer is not None:
writer.add_scalar(f"loss/{phase}", epoch_loss, epoch)
for metric_name in metrics.keys():
writer.add_scalar(f"{metric_name}/{phase}", epoch_metrics[metric_name], epoch)
return epoch_loss, epoch_metrics
def print_metrics(epoch, train_loss, train_metrics, val_loss, val_metrics):
print(f'Epoch: {epoch + 1:02}')
print(f'\tTrain Loss: {train_loss:.2f} | Train Metrics: ' +
' | '.join([metric_name + ': ' + f"{train_metrics[metric_name]:.2f}"
for metric_name in train_metrics.keys()]))
print(f'\t Val Loss: {val_loss:.2f} | Val Metrics: ' +
' | '.join([metric_name + ': ' + f"{val_metrics[metric_name]:.2f}"
for metric_name in val_metrics.keys()]))
def train(model,
train_dataloader, val_dataloader,
criterion,
optimizer, scheduler,
metrics,
n_epochs,
device,
writer):
best_val_loss = float('+inf')
for epoch in range(n_epochs):
train_loss, train_metrics = run_epoch(model, train_dataloader,
criterion, optimizer,
metrics,
phase='train', epoch=epoch,
device=device, writer=writer)
val_loss, val_metrics = run_epoch(model, val_dataloader,
criterion, None,
metrics,
phase='val', epoch=epoch,
device=device, writer=writer)
if scheduler is not None:
scheduler.step(val_loss)
if val_loss < best_val_loss:
best_val_loss = val_loss
torch.save(model.state_dict(), f"{model.name}.best.pth")
print_metrics(epoch, train_loss, train_metrics, val_loss, val_metrics)
if writer is not None:
writer.close()
|
import evfuncs
import pytest
import soundfile
import vocalpy
def test_field_access():
assert False
def test_defaults():
assert False
def test_asdict():
assert False
def test_equality():
assert False
def test_inequality():
assert False
@pytest.mark.parametrize(
'spect_format, with_format_str',
[
('npz', False),
('npz', True),
('mat', False),
('mat', True),
]
)
def test_spectrogram_from_file(spect_format: str,
with_format_str: bool,
specific_spect_list):
spect_list = specific_spect_list(spect_format)
if with_format_str:
format_str = spect_format
else:
format_str = None
for spect_path in spect_list:
spect = vocalpy.Spectrogram.from_file(spect_path, format=format_str)
assert isinstance(spect, vocalpy.Spectrogram)
for spect_attrs in ('s', 't', 'f', 'audio_path'):
assert hasattr(spect, spect_attrs)
# TODO: rewrite to actually load dict and assert that attributes equal what's in dict
# TODO: use an 'assert helper' for this
pytest.fail()
def test_from_mat(spect_list_mat: list):
for spect_path in spect_list_mat:
spect = vocalpy.Spectrogram.from_mat(spect_path)
assert isinstance(spect, vocalpy.Spectrogram)
for spect_attrs in ('s', 't', 'f', 'audio_path'):
assert hasattr(spect, spect_attrs)
def test_from_npz(spect_list_npz: list):
for spect_path in spect_list_npz:
spect = vocalpy.Spectrogram.from_npz(spect_path)
assert isinstance(spect, vocalpy.Spectrogram)
for spect_attrs in ('s', 't', 'f', 'audio_path'):
assert hasattr(spect, spect_attrs)
@pytest.mark.parametrize(
'audio_format',
[
'wav',
'cbin',
]
)
def test_spectrogram_from_arrays(specific_audio_list,
audio_format):
audio_paths = specific_audio_list(audio_format)
for audio_path in audio_paths:
if audio_format == 'wav':
data, samplerate = soundfile.read(audio_path)
elif audio_format == 'cbin':
data, samplerate = evfuncs.load_cbin(audio_path)
s, t, f = vocalpy.signal.spectrogram(data=data, samplerate=samplerate)
spect = vocalpy.Spectrogram(s=s, t=t, f=f)
assert isinstance(spect, vocalpy.Spectrogram)
for spect_attrs in ('s', 't', 'f', 'audio_path'):
assert hasattr(spect, spect_attrs)
def test_to_file():
assert False
|
import TestExports
from TestExports import project_to_json
PROJECT_NAME = 'African Animals'
#Uncomment if running on Windows:
#project_to_json(PROJECT_NAME, 'Windows')
#Uncomment if running on Mac:
project_to_json(PROJECT_NAME, 'Mac')
#Uncomment if running on Linux:
#project_to_json(PROJECT_NAME, 'Linux')
|
# BSD Licence
# Copyright (c) 2009, Science & Technology Facilities Council (STFC)
# All rights reserved.
#
# See the LICENSE file in the source distribution of this software for
# the full license text.
"""
Utilities for use with genshi
@author: Stephen Pascoe
"""
from genshi import *
class RenameElementFilter(object):
"""
Rename the root element in the stream.
Filters of this class will replicate the stream until the first START event
then change the event element's QName. It will then count opening and closing
tags until it finds the matching close tag and replace the QName in that. It
then continues to replicate the stream.
"""
def __init__(self, newQName):
self.newQName = newQName
# status 0: awaiting START, 1: awaiting END, 2: noop
self.status = 0
self.tcount = 0
def __call__(self, stream):
for kind, data, pos in stream:
if self.status == 0:
if kind == Stream.START:
self.status = 1
self.tcount = 1
yield kind, (self.newQName, data[1]), pos
else:
yield kind, data, pos
elif self.status == 1:
if kind == Stream.START:
self.tcount += 1
elif kind == Stream.END:
self.tcount -= 1
if self.tcount == 0:
self.status == 2
yield kind, self.newQName, pos
else:
yield kind, data, pos
elif self.status == 2:
yield kind, data, pos
|
import director.applogic as app
import director.objectmodel as om
from director import cameraview
import functools
actionName = 'ActionColorizeLidar'
def setVisProperties(obj, colorModeEnabled):
if colorModeEnabled:
alpha = 1.0
pointSize = 4.0
colorBy = 'rgb'
else:
alpha = 0.5
pointSize = 1.0
colorBy = None
obj.setProperty('Alpha', alpha)
obj.setProperty('Point Size', pointSize)
def colorizePoints(polyData):
cameras = ['CAMERACHEST_RIGHT', 'CAMERACHEST_LEFT', 'CAMERA_LEFT']
for camera in cameras:
cameraview.colorizePoints(polyData, camera)
def colorizeSegmentationLidar(enabled):
obj = om.findObjectByName('pointcloud snapshot')
if not obj:
return
if enabled:
colorizePoints(obj.polyData)
else:
obj.polyData.GetPointData().RemoveArray('rgb')
setVisProperties(obj, enabled)
_colorizeMapNames = ['HEIGHT_MAP_SCENE', 'SCANS_HALF_SWEEP']
def colorizeMapCallback(obj):
if obj and obj.getProperty('Name') in _colorizeMapNames:
colorizePoints(obj.polyData)
obj._updateColorByProperty()
obj.setProperty('Color By', 'rgb')
def colorizeMaps(enabled):
if enabled:
om.findObjectByName('Map Server').source.colorizeCallback = colorizeMapCallback
for name in _colorizeMapNames:
colorizeMapCallback(om.findObjectByName(name))
else:
om.findObjectByName('Map Server').source.colorizeCallback = None
def colorizeMultisense(enabled):
obj = om.findObjectByName('Multisense')
if not obj:
return
setVisProperties(obj, enabled)
colorBy = 'Camera RGB' if enabled else 'Solid Color'
obj.setProperty('Color By', colorBy)
def colorizeMapsOff():
obj = om.findObjectByName('Map Server')
obj.source.colorizeCallback = None
alpha = 0.7
pointSize = 1.0
obj.setProperty('Alpha', alpha)
obj.setProperty('Point Size', pointSize)
def onColorizeLidar():
colorizeEnabled = app.getToolBarActions()[actionName].checked
colorizeMaps(colorizeEnabled)
colorizeMultisense(colorizeEnabled)
colorizeSegmentationLidar(colorizeEnabled)
def initColorizeCallbacks():
obj = om.findObjectByName('Multisense')
assert(obj)
def callback():
colorizePoints(obj.model.polyDataObj.polyData)
obj.model.colorizeCallback = callback
def init():
action = app.getToolBarActions()[actionName]
action.connect(action, 'triggered()', onColorizeLidar)
initColorizeCallbacks()
|
#!/usr/bin/env python
"""
plot position data from a time, position, rotation file
"""
import numpy as np
from mpl_toolkits.mplot3d import axes3d, Axes3D
import matplotlib.pyplot as plt
import sys
import re
fig = plt.figure()
ax = Axes3D(fig)
ax.hold(True)
# HRC:POINTS: [2448.59; -1015.19; -206.10] [2445.40; -1063.05; -203.92] [2439.18; -1180.01; -203.92] [2433.39; -1298.93; -203.92]
points_re = re.compile(r'HRC:POINTS: \[(.*); (.*); (.*)\] \[(.*);(.*); (.*)\] \[(.*); (.*); (.*)\] \[(.*); (.*); (.*)\]')
good_points = []
bad_points = []
with open('HumanTraceBotOsiris2-good.log') as f:
for line in f:
m = points_re.match(line.strip())
if m:
good_points.append([float(x) for x in m.groups()])
with open('HumanTraceBotOsiris2-bad.log') as f:
for line in f:
m = points_re.match(line.strip())
if m:
bad_points.append([float(x) for x in m.groups()])
label = 'good'
good_points = np.array(good_points)
bad_points = np.array(bad_points)
for r in range(np.size(good_points,0)):
row = good_points[r,3:]
seg = row.reshape((3,3))
ax.plot(seg[:,0], seg[:,1], seg[:,2], label=label,color='green')
label = '_nolabel_'
ax.plot(good_points[:,0], good_points[:,1], good_points[:,2], linestyle='None', marker='x', color='blue', label='good path')
label = 'bad'
for r in range(np.size(bad_points,0)):
row = bad_points[r,3:]
seg = row.reshape((3,3))
ax.plot(seg[:,0], seg[:,1], seg[:,2], label=label,color='red')
label = '_nolabel_'
ax.plot(bad_points[:,0], bad_points[:,1], bad_points[:,2], linestyle='None', marker='x', color='black', label='bad path')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
|
import argparse
import json
import logging
from git import Repo
import os
import sys
import re
import traceback
import services
from services.base_service import Service
from helpers.json_skeletons import JSONSkeleton
from importlib import import_module
from helpers.api import VeracodeAPI
from helpers.exceptions import VeracodeAPIError
from helpers.exceptions import VeracodeError
import configparser
banner = """
_ _ _
__ __ ___ _ __ __ _ ___ ___ __| | ___ ___ | |(_)
\ \ / // _ \| '__|/ _` | / __|/ _ \ / _` | / _ \ _____ / __|| || |
\ V /| __/| | | (_| || (__| (_) || (_| || __/|_____|| (__ | || |
\_/ \___||_| \__,_| \___|\___/ \__,_| \___| \___||_||_|
"""
readme = banner + """This Command Line Interface for Veracode is based on 3 key principles:
* Services and Commands: The Veracode functionality is broken up into
a set of Services (e.g. static, dynamic, admin, etc.). Each Service
has a limited set of Commands that can be executed (e.g. start). The
Commands represent higher level operations than the underlying API
calls (e.g. the start command in the static service will create a
scan, upload files and begin the scanning process).
* Configuration : Config information for each Service is stored
in a JSON formatted file (veracode-cli.config) which is managed within
the Git repository for the application code. Each Service provides
a skeleton command which will generate a default configuration
block for the Service in the config file.
* Branches : Each Veracode Service can be configured and used
differently based on the Git Branch that is being worked on. The
config file supports regular expression matching between the Branch
Name (supplied as an argument) and the configuration blocks in the file.
When the CLI is executed it will produce a JSON output which is either
written to a file (veracode-cli.output) or to the console. The default
option is file. The output can also be used as additional input for
subsequent commands.
For example:
"veracode-cli static start" will produce output that includes the build_id
of the scan that has been started. When "veracode-cli static results" is
executed it will use the build_id to identify which scan to retrieve results
from, and those results will be outputted. Next, when
"veracode-cli static tickets" is executed it will use those results to
synchronise with the configured ticketing system.
For more information use the -h option to show Help.
"""
creds_warning = """There don't seem to be any Veracode API credentials configured. If you
don't have any credentials then see the following page on the Veracode
Help Center for more information.
https://help.veracode.com/reader/LMv_dtSHyb7iIxAQznC~9w/RUQ3fCrA~jO2ff1G3t0ctg
Alternatively contact your Veracode Admin team or Security Program Manager.
Once you have your credentials there are 3 ways to supply them:
* As Environment Variables
Use VID and VKEY
* As arguments on the command line
veracode-cli --vid=<your_id> --vkey=<your_key>
* In a Credentials File
https://help.veracode.com/reader/LMv_dtSHyb7iIxAQznC~9w/1EGRCXxGvHuj5wxn6h3eXA
"""
########################################################################################################################
# Main entry point
#
def run():
try:
""" setup the main arugment parser """
parser = argparse.ArgumentParser(prog='veracode-cli',
description='A Command Line Interface for interacting with Veracode Services using a local JSON configuration file to manage the settings that are used. For more information use the readme service.')
parser.add_argument("-v", "--vid", type=str, help="API ID for the Veracode Platform user")
parser.add_argument("-k", "--vkey", type=str, help="API Key for the Veracode Platform user")
parser.add_argument("-s", "--stage", type=str,
help="Stage name to be used to select the activities settings")
parser.add_argument("-b", "--branch", type=str,
help="Branch name to be used to select configuration settings")
parser.add_argument("-c", "--console", action="store_true",
help="Should the output be sent the console. If this is enabled then all other console output will be suppressed")
parser.add_argument("-e", "--error", action="store_true",
help="Should the command fail if the veracode-cli.output file contains an error")
""" add sub-parsers for each of the services """
service_parsers = parser.add_subparsers(dest='service', help='Veracode service description')
readme_parser = service_parsers.add_parser('readme', help='show the detailed readme information')
for service_class in Service.services:
service_class.add_parser(service_parsers)
""" parse the command line """
args = parser.parse_args()
""" set up the output_data object """
output_data = {}
""" Just show the Readme? """
if args.service == 'readme':
""" show the readme file information """
print(readme)
return 0
if not args.console:
print(banner)
""" ------------------------------------------------ """
""" First thing to do is get the VeracodeAPI created """
""" ------------------------------------------------ """
try:
if args.vid is None or args.vkey is None:
""" OK, lets try the environment variables... """
args.vid = os.environ.get("VID")
args.vkey = os.environ.get("VKEY")
if args.vid is None or args.vid == "" or args.vkey is None or args.vkey == "":
""" OK, try for the credentials file instead... """
auth_file = os.path.join(os.path.expanduser("~"), '.veracode', 'credentials')
if not os.path.exists(auth_file):
creds_file = False
else:
creds = configparser.ConfigParser()
creds.read(auth_file)
credentials_section_name = os.environ.get("VERACODE_API_PROFILE", "default")
args.vid = creds.get(credentials_section_name, "VERACODE_API_KEY_ID")
args.vkey = creds.get(credentials_section_name, "VERACODE_API_KEY_SECRET")
if args.vid is None or args.vid == "" or args.vkey is None or args.vkey == "":
""" warning and guidance on credentials """
output_data["error"] = creds_warning
return
else:
try:
""" create the Veracode API instance """
api = VeracodeAPI(None, args.vid, args.vkey)
except:
""" error message about incorrect credentials """
print(f'{"exception":10} : Unexpected Exception #001 : {sys.exc_info()[0]}')
except UnboundLocalError as ule1:
""" Unexpected Exception """
print("Different Unexpected error creating the API object : " + str(ule1))
except:
""" Unexpected Exception """
print(f'{"exception":10} : {"Unexpected Exception #002 :", sys.exc_info()[0]}')
""" -------------------------------------------------------------- """
""" Next we need to load the Configuration data for the Branch """
""" -------------------------------------------------------------- """
try:
with open('veracode-cli.config') as json_file:
config = json.load(json_file)
except FileNotFoundError:
if not args.console:
print( "The veracode-cli.config file was not found at " + os.getcwd())
config = {}
config["error"] = "Config File not found"
except:
""" Unexpected Exception """
print(f'{"exception":10} : {"Unexpected Exception #003 :", sys.exc_info()[0]}')
""" what Branch are we working on """
if args.branch is None:
""" get the current repository"""
repo = Repo(os.path.curdir)
if repo.bare:
raise VeracodeError("No usable Git Repository found. Unable to identify active branch.")
args.branch = repo.active_branch
if not args.console:
print("Using '" + str(args.branch) + "' as active branch")
""" Get the Branch Configuration segment """
branch_config = None
for segment in config["branches"]:
if re.match("^"+segment["branch_pattern"]+"$", str(args.branch)):
""" This is the config to use... """
branch_config = segment
break
if branch_config is None:
""" """
raise VeracodeError("No Static Scan Configuration found for branch '" + str(args.branch) + "'")
""" ------------------------------------------------- """
""" Next we need to load any Context data that exists """
""" ------------------------------------------------- """
if not args.console:
try:
with open('veracode-cli.output') as json_file:
context = json.load(json_file)
except FileNotFoundError:
context = {}
except:
""" Unexpected Exception """
print(f'{"exception":10} : {"Unexpected Exception #004 :", sys.exc_info()[0]}')
else:
""" Load the data from stdin """
context = json.load(sys.stdin.readlines())
""" initialise the output data with the previous context """
output_data = context
""" Was there an error in the previous context? """
if args.error is True and "error" in context and context["error"] is not None:
output_data["error"] = f'{"exception":10} : Error in veracode-cli.context - {context["error"]}'
if not args.console:
print(output["error"])
return output
output_data["error"] = None
if args.service is None:
""" ------------------------------------------------------------------------- """
""" If no Service was provided as an Argument then use Activities from config """
""" ------------------------------------------------------------------------- """
""" which Activities should we perform? """
if args.stage is None:
if not args.console:
print("No Stage specified. Unable to proceed")
output_data["error"] = "No Stage specified. Unable to proceed"
else:
""" find the right stage in the config """
stage_activities = None
for segment in config.stages:
if re.match("^" + segment["stage_pattern"] + "$", str(args.stage)):
""" This is the config to use... """
stage_config = segment
break
if stage_config is None:
""" couldn't find the stage """
output_data["error"] = ""
raise VeracodeError("No Stage Configuration found for stage '" + str(args.stage) + "'")
else:
""" --------------------------------------------------------------- """
""" Service was provided as an Argument so execute that one service """
""" --------------------------------------------------------------- """
if not args.console:
print(f'{"service":10} : {args.service}')
print(f'{"command":10} : {args.command}')
for arg in vars(args):
if arg is not "service" and arg is not "command" and arg is not "vid" and arg is not "vkey" and arg is not "None":
print(f'{arg:10} : {getattr(args, arg)}')
print(f'{"context":10} : {context}')
print()
""" load the relevant service class """
service = my_import('services.' + args.service + '.' + args.service)
instance = service()
""" execute the service """
output_data = instance.execute(args, branch_config, api, context)
except KeyboardInterrupt:
if not args.console:
print(f'{"exception":10} : {"Keyboard Interrupt. Exiting..."}')
except VeracodeError as verr:
output_data["error"] = "Veracode Error occurred: " + str(verr)
if not args.console:
print(f'{"exception":10} : {output_data["error"]}')
except UnboundLocalError as ule:
if not args.console:
print(f'{"exception":10} : {"UnboundLocalError - " + str(ule)}')
except AttributeError as ae:
if not args.console:
print(f'{"exception":10} : {"AttributeError - " + str(ae)}')
traceback.print_exc()
except:
""" Unexpected Exception """
print(f'{"exception":10} : Unexpected Exception #005 - {sys.exc_info()[0]}')
traceback.print_exc()
finally:
if not args.console:
print()
""" if there's an error then lets print it out"""
if "error" in output_data:
if output_data["error"] is not None:
print(output_data["error"])
""" send the output to veracode-cli.output """
with open('veracode-cli.output', 'w') as outfile:
json.dump(output_data, outfile, indent=4, sort_keys=True)
""" Always output to the console """
print(output_data)
if "error" in output_data:
if output_data["error"] is not None:
return 1
else:
return 0
else:
return 0
def get_service(name):
kludge = 'services.' + name + '.' + name
components = kludge.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def my_import(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def start():
try:
return run()
except KeyboardInterrupt:
print("\r\nExiting")
if __name__ == "__main__":
error = start()
exit(error)
|
class Solution(object):
def countSmaller(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
if nums == []:
return []
res = []
sorted_nums = []
rev = nums[::-1]
for num in rev:
l, r = 0, len(res) - 1
while l <= r:
m = l + (r - l) / 2
if num <= sorted_nums[m]:
r = m - 1
else:
l = m + 1
res.insert(0, l)
sorted_nums.insert(l, num)
return res
|
# contagem regressiva para ano novo
from time import sleep
for c in range(10, -1, -1):
print(c)
sleep(0.5)
print('BUMM BUMM ')
|
from django.urls import path, include
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register('', views.UserViewSet, basename='user')
urlpatterns = [
path('register/', views.CreateUserView.as_view(), name='register'),
path('', include(router.urls)),
]
|
import argparse
parser = argparse.ArgumentParser(description='This script is ')
# Test or not args
parser.add_argument('-t', '--test', default=False, type=bool, help='Test or not')
# The coordinate args
parser.add_argument('-lt_x', '--left_top_x', default=0, type=int, help='Left-top x-coordinate')
parser.add_argument('-lt_y', '--left_top_y', default=0, type=int, help='Left-top y-coordinate')
parser.add_argument('-rl_x', '--right_lower_x', default=0, type=int, help='Left-lower x-coordinate')
parser.add_argument('-rl_y', '--right_lower_y', default=0, type=int, help='Left-lower y-coordinate')
args = parser.parse_args()
|
#!/usr/bin/env python
from __future__ import print_function
# Libraries we need
import pyxhook
import time
import subprocess
nounlist = open("nounlist.txt","r")
stringtosearch = ""
# This function is called every time a key is presssed
def kbevent(event):
global running
global stringtosearch
# print key info
#print(event)
#stringtosearch += chr(event.Ascii)
# print (stringtosearch)
# If the ascii value matches spacebar, terminate the while loop
if event.Ascii == 32:
#print (stringtosearch)
if stringtosearch in open("nounlist.txt","r").read():
# p=subprocess.Popen(["firefox","www." + stringtosearch + ".com"])
print ("http://www.wikipedia.org/wiki/" + stringtosearch)
print ("http://www.google.com/#?=" + stringtosearch)
#subprocess.Popen(["firefox", "www.wikipedia.org/wiki/"+ stringtosearch])
stringtosearch = ''
elif event.Ascii == 27:
#print (stringtosearch)
running = False
else:
stringtosearch += chr(event.Ascii)
#print (stringtosearch)
# Create hookmanager
hookman = pyxhook.HookManager()
# Define our callback to fire when a key is pressed down
hookman.KeyDown = kbevent
# Hook the keyboard
hookman.HookKeyboard()
# Start our listener
hookman.start()
# Create a loop to keep the application running
running = True
while running:
time.sleep(0.1)
# Close the listener when we are done
hookman.cancel()
|
# -*- coding: utf-8 -*-
import paho.mqtt.client as mqtt
import time
class mqtt_handler:
client = mqtt.Client()
def __init__(self):
return
@classmethod
def subscribe(cls):
cls.client.connect("broker.mqttdashboard.com", port=1883, keepalive=60)
cls.client.loop_start()
@classmethod
def publish(cls, message):
cls.client.publish("robotline/ra1", message)
@classmethod
def cancel(cls):
cls.client.loop_stop()
cls.client.disconnect()
mqtt = mqtt_handler()
mqtt.subscribe()
mqtt.publish("message send")
|
import maze.__maze as __maze
__maze.createMaze()
def __canMoveLeft(curPos: []):
room = __maze.mazeLayout[curPos[0]][curPos[1]]
if int(room[0]):
return True
else:
return False
def __canMoveTop(curPos: []):
room = __maze.mazeLayout[curPos[0]][curPos[1]]
if int(room[1]):
return True
else:
return False
def __canMoveRight(curPos: []):
room = __maze.mazeLayout[curPos[0]][curPos[1]]
if int(room[2]):
return True
else:
return False
def __canMoveDown(curPos: []) -> bool:
room = __maze.mazeLayout[curPos[0]][curPos[1]]
if int(room[3]):
return True
else:
return False
def canMoveInDirection(direction: str, curPos: []) -> bool:
if direction == 'left':
return __canMoveLeft(curPos)
if direction == 'right':
return __canMoveRight(curPos)
if direction == 'up':
return __canMoveTop(curPos)
if direction == 'down':
return __canMoveDown(curPos)
raise Exception('Allowed values are left,right,up or down')
def moveInDirection(direction: str,
curPos: []) -> []:
canMove = canMoveInDirection(direction, curPos)
if canMove:
if direction == 'left':
curPos[1] = curPos[1] - 1
elif direction == 'right':
curPos[1] = curPos[1] + 1
elif direction == 'up':
curPos[0] = curPos[0] - 1
else:
if direction == 'down':
curPos[0] = curPos[0] + 1
else:
raise Exception('Cannot move in the direction:%s in the position:[%s,%s]' % (direction, curPos[0], curPos[1]))
return curPos
def getInitialDirection():
firstRoom = __maze.mazeLayout[0][0]
if firstRoom == '10000':
return 'left'
if firstRoom == '01000':
return 'up'
if firstRoom == '00100':
return 'right'
if firstRoom == '00010':
return 'down'
raise Exception("The first element of the maze layout should be one of '10000','01000','00100' or '00010'")
def moveInDirectionIfStillInsideMaze(direction: str,curPos: []) -> bool:
try:
moveInDirection(direction, curPos)
except IndexError:
return False
return True
def isMazeSolved(curPos: []) -> bool:
data = __maze.mazeLayout[curPos[0]][curPos[1]]
if int(data[4]) == 1:
return True
return False
|
import dsz
import sqlite3
import sys
if (__name__ == '__main__'):
save_flags = dsz.control.Method()
dsz.control.echo.Off()
if (dsz.script.Env['script_parent_echo_disabled'].lower() != 'false'):
dsz.control.quiet.On()
if (len(sys.argv) != 3):
dsz.ui.Echo(('Invalid number of arguments supplied. Expected 3 (including program name), received %d.' % len(sys.argv)))
print ('For debugging purposes:\n%s' % sys.argv)
sys.exit((-1))
database_file = sys.argv[1]
sql_statement = sys.argv[2]
dsz.script.data.Start('sqlstatementinfo')
dsz.script.data.Add('database_file', database_file, dsz.TYPE_STRING)
dsz.script.data.Add('sql_statement', sql_statement, dsz.TYPE_STRING)
dsz.script.data.Store()
db = sqlite3.connect(database_file)
c = db.cursor()
rows = c.execute(sql_statement).fetchall()
if (len(rows) > 0):
for r in rows:
dsz.script.data.Start('row')
d = 0
for c in r:
dsz.script.data.Add(('column%d' % d), str(c), dsz.TYPE_STRING)
d += 1
dsz.script.data.Store()
|
import pytest
from aioarango import ArangoClient
from aioarango.connection import BasicConnection, JwtConnection, JwtSuperuserConnection
from aioarango.errno import FORBIDDEN, HTTP_UNAUTHORIZED
from aioarango.exceptions import (
JWTAuthError,
JWTSecretListError,
JWTSecretReloadError,
ServerEncryptionError,
ServerTLSError,
ServerTLSReloadError,
ServerVersionError,
)
from tests.helpers import assert_raises, generate_jwt, generate_string
pytestmark = pytest.mark.asyncio
async def test_auth_invalid_method(client: ArangoClient, db_name, username, password):
with assert_raises(ValueError) as err:
await client.db(
name=db_name,
username=username,
password=password,
verify=True,
auth_method="bad_method",
)
assert "invalid auth_method" in str(err.value)
async def test_auth_basic(client: ArangoClient, db, db_name, username, password):
db = await client.db(
name=db_name,
username=username,
password=password,
verify=True,
auth_method="basic",
)
assert isinstance(db.conn, BasicConnection)
assert isinstance(await db.version(), str)
assert isinstance(await db.properties(), dict)
async def test_auth_jwt(client: ArangoClient, db, db_name, username, password):
db = await client.db(
name=db_name,
username=username,
password=password,
verify=True,
auth_method="jwt",
)
assert isinstance(db.conn, JwtConnection)
assert isinstance(await db.version(), str)
assert isinstance(await db.properties(), dict)
bad_password = generate_string()
with assert_raises(JWTAuthError) as err:
await client.db(db_name, username, bad_password, auth_method="jwt")
assert err.value.error_code == HTTP_UNAUTHORIZED
# TODO re-examine commented out code
@pytest.mark.skip(reason="ArangoDB is not configured to use JWT auth.")
async def test_auth_superuser_token(client, db_name, root_password, secret):
token = generate_jwt(secret)
db = await client.db("_system", superuser_token=token)
bad_db = await client.db("_system", superuser_token="bad_token")
assert isinstance(db.conn, JwtSuperuserConnection)
assert isinstance(await db.version(), str)
assert isinstance(await db.properties(), dict)
# # Test get JWT secrets
# secrets = db.jwt_secrets()
# assert 'active' in secrets
# assert 'passive' in secrets
# Test get JWT secrets with bad database
with assert_raises(JWTSecretListError) as err:
await bad_db.jwt_secrets()
assert err.value.error_code == FORBIDDEN
# # Test reload JWT secrets
# secrets = db.reload_jwt_secrets()
# assert 'active' in secrets
# assert 'passive' in secrets
# Test reload JWT secrets with bad database
with assert_raises(JWTSecretReloadError) as err:
await bad_db.reload_jwt_secrets()
assert err.value.error_code == FORBIDDEN
# Test get TLS data
result = await db.tls()
assert isinstance(result, dict)
# Test get TLS data with bad database
with assert_raises(ServerTLSError) as err:
await bad_db.tls()
assert err.value.error_code == FORBIDDEN
# Test reload TLS
result = await db.reload_tls()
assert isinstance(result, dict)
# Test reload TLS with bad database
with assert_raises(ServerTLSReloadError) as err:
await bad_db.reload_tls()
assert err.value.error_code == FORBIDDEN
# # Test get encryption
# result = db.encryption()
# assert isinstance(result, dict)
# Test reload user-defined encryption keys.
with assert_raises(ServerEncryptionError) as err:
await bad_db.encryption()
assert err.value.error_code == FORBIDDEN
async def test_auth_jwt_expiry(client, db_name, root_password, secret):
# Test automatic token refresh on expired token.
db = await client.db("_system", "root", root_password, auth_method="jwt")
expired_token = generate_jwt(secret, exp=-1000)
db.conn._token = expired_token
db.conn._auth_header = f"bearer {expired_token}"
assert isinstance(await db.version(), str)
# Test correct error on token expiry.
db = await client.db("_system", superuser_token=expired_token)
with assert_raises(ServerVersionError) as err:
await db.version()
assert err.value.error_code == FORBIDDEN
|
# Source: https://github.com/willwhite/freemail/
# Copyright (c) 2015, Will White <will@mapbox.com>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
FREE_EMAIL_DOMAINS = frozenset(
(
"1033edge.com",
"11mail.com",
"123.com",
"123box.net",
"123india.com",
"123mail.cl",
"123mail.org",
"123qwe.co.uk",
"126.com",
"126.net",
"139.com",
"150mail.com",
"150ml.com",
"15meg4free.com",
"163.com",
"16mail.com",
"188.com",
"189.cn",
"1coolplace.com",
"1freeemail.com",
"1funplace.com",
"1internetdrive.com",
"1mail.net",
"1me.net",
"1mum.com",
"1musicrow.com",
"1netdrive.com",
"1nsyncfan.com",
"1under.com",
"1webave.com",
"1webhighway.com",
"2-mail.com",
"212.com",
"24horas.com",
"2911.net",
"2980.com",
"2bmail.co.uk",
"2d2i.com",
"2die4.com",
"2trom.com",
"3000.it",
"30minutesmail.com",
"3126.com",
"321media.com",
"3675.mooo.com",
"37.com",
"3ammagazine.com",
"3dmail.com",
"3email.com",
"3g.ua",
"3xl.net",
"404: not found",
"444.net",
"4email.com",
"4email.net",
"4mg.com",
"4newyork.com",
"4x4man.com",
"50mail.com",
"5iron.com",
"5star.com",
"74.ru",
"88.am",
"88.com",
"8848.net",
"888.luk2.com",
"888.nu",
"97rock.com",
"99.com",
"a1.net",
"aa.da.mail-temp.com",
"aaamail.zzn.com",
"aamail.net",
"aapt.net.au",
"aaronkwok.net",
"abbeyroadlondon.co.uk",
"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijk.com",
"abcflash.net",
"abdulnour.com",
"aberystwyth.com",
"abolition-now.com",
"about.com",
"abv.bg",
"abwesend.de",
"academycougars.com",
"acceso.or.cr",
"access4less.net",
"accessgcc.com",
"accountant.com",
"acdcfan.com",
"ace-of-base.com",
"acmecity.com",
"acmemail.net",
"acninc.net",
"activatormail.com",
"activist.com",
"adam.com.au",
"addcom.de",
"address.com",
"adelphia.net",
"adexec.com",
"adfarrow.com",
"adios.net",
"adoption.com",
"ados.fr",
"adrenalinefreak.com",
"advalvas.be",
"aeiou.pt",
"aemail4u.com",
"aeneasmail.com",
"afreeinternet.com",
"africamail.com",
"africamel.net",
"ag.us.to",
"agoodmail.com",
"ahaa.dk",
"ahgae-crews.us.to",
"ai.aax.cloudns.asia",
"aichi.com",
"aim.com",
"aircraftmail.com",
"airforce.net",
"airforceemail.com",
"airmail.cc",
"airpost.net",
"ajacied.com",
"ak47.hu",
"aknet.kg",
"albawaba.com",
"alecsmail.com",
"alex4all.com",
"alexandria.cc",
"algeria.com",
"alhilal.net",
"alibaba.com",
"alice.it",
"alicedsl.de",
"alive.cz",
"aliyun.com",
"allergist.com",
"allmail.net",
"alloymail.com",
"allracing.com",
"allsaintsfan.com",
"alltel.net",
"alpenjodel.de",
"alphafrau.de",
"alskens.dk",
"altavista.com",
"altavista.net",
"altavista.se",
"alternativagratis.com",
"alumni.com",
"alumnidirector.com",
"alvilag.hu",
"amele.com",
"america.hm",
"ameritech.net",
"amnetsal.com",
"amorki.pl",
"amrer.net",
"amuro.net",
"amuromail.com",
"ananzi.co.za",
"ancestry.com",
"andylau.net",
"anfmail.com",
"angelfan.com",
"angelfire.com",
"angelic.com",
"animail.net",
"animal.net",
"animalhouse.com",
"animalwoman.net",
"anjungcafe.com",
"annsmail.com",
"anonymous.to",
"anote.com",
"another.com",
"anotherwin95.com",
"anti-social.com",
"antisocial.com",
"antongijsen.com",
"antwerpen.com",
"anymoment.com",
"anytimenow.com",
"aol.co.uk",
"aol.com",
"aol.fr",
"aon.at",
"apagitu.chickenkiller.com",
"apexmail.com",
"apmail.com",
"apollo.lv",
"aport.ru",
"aport2000.ru",
"apple.dnsabr.com",
"appraiser.net",
"approvers.net",
"arabia.com",
"arabtop.net",
"arcademaster.com",
"archaeologist.com",
"arcor.de",
"arcotronics.bg",
"arcticmail.com",
"argentina.com",
"aristotle.org",
"army.net",
"arnet.com.ar",
"artlover.com",
"artlover.com.au",
"as-if.com",
"asean-mail",
"asean-mail.com",
"asheville.com",
"asia-links.com",
"asia-mail.com",
"asia.com",
"asiafind.com",
"asianavenue.com",
"asiancityweb.com",
"asiansonly.net",
"asianwired.net",
"asiapoint.net",
"assala.com",
"assamesemail.com",
"astroboymail.com",
"astrolover.com",
"astrosfan.com",
"astrosfan.net",
"asurfer.com",
"atheist.com",
"athenachu.net",
"atina.cl",
"atl.lv",
"atlaswebmail.com",
"atlink.com",
"ato.check.com",
"atozasia.com",
"atrus.ru",
"att.net",
"attglobal.net",
"attymail.com",
"au.ru",
"auctioneer.net",
"ausi.com",
"aussiemail.com.au",
"austin.rr.com",
"australia.edu",
"australiamail.com",
"austrosearch.net",
"autoescuelanerja.com",
"autograf.pl",
"automotiveauthority.com",
"autorambler.ru",
"aver.com",
"avh.hu",
"awsom.net",
"axoskate.com",
"ayna.com",
"azimiweb.com",
"azure.cloudns.asia",
"bacapedia.web.id",
"bachelorboy.com",
"bachelorgal.com",
"backpackers.com",
"backstreet-boys.com",
"backstreetboysclub.com",
"backwards.com",
"bagherpour.com",
"bahrainmail.com",
"baldmama.de",
"baldpapa.de",
"ballyfinance.com",
"bangkok.com",
"bangkok2000.com",
"bannertown.net",
"baptistmail.com",
"baptized.com",
"barcelona.com",
"bartender.net",
"baseballmail.com",
"basketballmail.com",
"batuta.net",
"baudoinconsulting.com",
"bboy.zzn.com",
"bcvibes.com",
"beeebank.com",
"beenhad.com",
"beep.ru",
"beer.com",
"beethoven.com",
"belanjaonlineku.web.id",
"belice.com",
"belizehome.com",
"bell.net",
"bellair.net",
"bellsouth.net",
"berkscounty.com",
"berlin.com",
"berlin.de",
"berlinexpo.de",
"bestmail.us",
"betriebsdirektor.de",
"bettergolf.net",
"bharatmail.com",
"bigassweb.com",
"bigblue.net.au",
"bigboab.com",
"bigfoot.com",
"bigfoot.de",
"bigger.com",
"biggerbadder.com",
"bigmailbox.com",
"bigmir.net",
"bigpond.com",
"bigpond.com.au",
"bigpond.net.au",
"bigramp.com",
"bigtokenican2.hmail.us",
"bigtokenican3.hmail.us",
"bikemechanics.com",
"bikeracer.com",
"bikeracers.net",
"bikerider.com",
"billsfan.com",
"billsfan.net",
"bimamail.com",
"bimla.net",
"bin-wieder-da.de",
"bio-muesli.info",
"birdlover.com",
"birdowner.net",
"bisons.com",
"bitmail.com",
"bitpage.net",
"bizhosting.com",
"bk.ru",
"bla-bla.com",
"blackburnmail.com",
"blackplanet.com",
"blader.com",
"blazemail.com",
"bleib-bei-mir.de",
"blockfilter.com",
"bluebottle.com",
"bluehyppo.com",
"bluemail.ch",
"bluemail.dk",
"bluesfan.com",
"blushmail.com",
"bmlsports.net",
"boardermail.com",
"boatracers.com",
"bol.com.br",
"bolando.com",
"bollywoodz.com",
"bolt.com",
"boltonfans.com",
"bombdiggity.com",
"bonbon.net",
"boom.com",
"bootmail.com",
"bornnaked.com",
"bossofthemoss.com",
"bostonoffice.com",
"bounce.net",
"box.az",
"box.ua",
"boxbg.com",
"boxemail.com",
"boxfrog.com",
"boyzoneclub.com",
"bradfordfans.com",
"brasilia.net",
"bratwurst.dnsabr.com",
"brazilmail.com",
"brazilmail.com.br",
"breathe.com",
"bresnan.net",
"brew-master.com",
"brew-meister.com",
"brfree.com.br",
"briefemail.com",
"bright.net",
"britneyclub.com",
"brittonsign.com",
"broadcast.net",
"brokenvalve.com",
"brusseler.com",
"bsdmail.com",
"btinternet.com",
"btopenworld.co.uk",
"buerotiger.de",
"buffymail.com",
"buford.us.to",
"bullsfan.com",
"bullsgame.com",
"bumerang.ro",
"bumrap.com",
"bunko.com",
"buryfans.com",
"business-man.com",
"businessman.net",
"businessweekmail.com",
"busta-rhymes.com",
"busymail.com",
"busymail.comhomeart.com",
"buyersusa.com",
"bvimailbox.com",
"byteme.com",
"c.nut.emailfake.nut.cc",
"c2.hu",
"c2i.net",
"c3.hu",
"c4.com",
"cabacabana.com",
"cableone.net",
"cad.edu.gr",
"caere.it",
"cairomail.com",
"calidifontain.be",
"californiamail.com",
"callnetuk.com",
"callsign.net",
"caltanet.it",
"camidge.com",
"canada-11.com",
"canada.com",
"canadianmail.com",
"canoemail.com",
"canwetalk.com",
"caramail.com",
"care2.com",
"careerbuildermail.com",
"carioca.net",
"cartestraina.ro",
"casablancaresort.com",
"casema.nl",
"cash4u.com",
"cashette.com",
"casino.com",
"catcha.com",
"catchamail.com",
"catholic.org",
"catlover.com",
"catsrule.garfield.com",
"ccnmail.com",
"cd2.com",
"celineclub.com",
"celtic.com",
"center-mail.de",
"centermail.at",
"centermail.de",
"centermail.info",
"centoper.it",
"centralpets.com",
"centrum.cz",
"centrum.sk",
"centurytel.net",
"certifiedmail.com",
"cfl.rr.com",
"cgac.es",
"cghost.s-a-d.de",
"chaiyomail.com",
"chance2mail.com",
"chandrasekar.net",
"charmedmail.com",
"charter.com",
"charter.net",
"chat.ru",
"chattown.com",
"chauhanweb.com",
"check.com",
"check.com12",
"check1check.com",
"cheerful.com",
"chef.net",
"chek.com",
"chello.at",
"chello.nl",
"chemist.com",
"chequemail.com",
"cheyenneweb.com",
"chez.com",
"chickmail.com",
"childrens.md",
"china.com",
"china.net.vg",
"chinalook.com",
"chinamail.com",
"chirk.com",
"chocaholic.com.au",
"christianmail.net",
"churchusa.com",
"cia-agent.com",
"cia.hu",
"ciaoweb.it",
"cicciociccio.com",
"cincinow.net",
"citeweb.net",
"citiz.net",
"citlink.net",
"city-of-bath.org",
"city-of-birmingham.com",
"city-of-brighton.org",
"city-of-cambridge.com",
"city-of-coventry.com",
"city-of-edinburgh.com",
"city-of-lichfield.com",
"city-of-lincoln.com",
"city-of-liverpool.com",
"city-of-manchester.com",
"city-of-nottingham.com",
"city-of-oxford.com",
"city-of-swansea.com",
"city-of-westminster.com",
"city-of-westminster.net",
"city-of-york.net",
"city2city.com",
"cityofcardiff.net",
"cityoflondon.org",
"claramail.com",
"classicalfan.com",
"classicmail.co.za",
"clerk.com",
"cliffhanger.com",
"close2you.ne",
"close2you.net",
"club4x4.net",
"clubalfa.com",
"clubbers.net",
"clubducati.com",
"clubhonda.net",
"clubmember.org",
"clubnetnoir.com",
"clubvdo.net",
"cluemail.com",
"cmpmail.com",
"cnnsimail.com",
"cntv.cn",
"cocaine.ninja",
"codec.ro",
"codec.roemail.ro",
"coder.hu",
"coid.biz",
"cok.3utilities.com",
"coldmail.com",
"collectiblesuperstore.com",
"collector.org",
"collegebeat.com",
"collegeclub.com",
"collegemail.com",
"colleges.com",
"columbus.rr.com",
"columbusrr.com",
"columnist.com",
"comast.com",
"comast.net",
"comcast.com",
"comcast.net",
"comic.com",
"communityconnect.com",
"comprendemail.com",
"compuserve.com",
"computer-freak.com",
"computer4u.com",
"computermail.net",
"conexcol.com",
"conk.com",
"connect4free.net",
"connectbox.com",
"conok.com",
"consultant.com",
"contractor.net",
"contrasto.cu.cc",
"cookiemonster.com",
"cool.br",
"coole-files.de",
"coolgoose.ca",
"coolgoose.com",
"coolkiwi.com",
"coollist.com",
"coolmail.com",
"coolmail.net",
"coolsend.com",
"coolsite.net",
"cooooool.com",
"cooperation.net",
"cooperationtogo.net",
"copacabana.com",
"cornells.com",
"cornerpub.com",
"corporatedirtbag.com",
"correo.terra.com.gt",
"cortinet.com",
"cotas.net",
"counsellor.com",
"countrylover.com",
"cousinit.mooo.com",
"cox.com",
"cox.net",
"coxinet.net",
"cpaonline.net",
"cr3wmail.sytes.net",
"cracker.hu",
"crazedanddazed.com",
"crazysexycool.com",
"creo.cloudns.cc",
"cristianemail.com",
"critterpost.com",
"croeso.com",
"crosshairs.com",
"crosswinds.net",
"crwmail.com",
"cry4helponline.com",
"cs.com",
"csinibaba.hu",
"cuemail.com",
"cum.sborra.tk",
"curio-city.com",
"curtsmail.com",
"cute-girl.com",
"cuteandcuddly.com",
"cutey.com",
"cww.de",
"cyber-africa.net",
"cyber-matrix.com",
"cyber-wizard.com",
"cyber4all.com",
"cyberbabies.com",
"cybercafemaui.com",
"cyberdude.com",
"cyberforeplay.net",
"cybergal.com",
"cybergrrl.com",
"cyberinbox.com",
"cyberleports.com",
"cybermail.net",
"cybernet.it",
"cyberservices.com",
"cyberspace-asia.com",
"cybertrains.org",
"cyclefanz.com",
"cynetcity.com",
"dabsol.net",
"dadacasa.com",
"daha.com",
"dailypioneer.com",
"dallas.theboys.com",
"dallasmail.com",
"dangerous-minds.com",
"dansegulvet.com",
"data54.com",
"davegracey.com",
"dawnsonmail.com",
"dawsonmail.com",
"dazedandconfused.com",
"dbzmail.com",
"deadlymob.org",
"deal-maker.com",
"dearriba.com",
"death-star.com",
"dejanews.com",
"deliveryman.com",
"deneg.net",
"depechemode.com",
"deseretmail.com",
"desertmail.com",
"desilota.com",
"deskmail.com",
"deskpilot.com",
"destin.com",
"detik.com",
"deutschland-net.com",
"devotedcouples.com",
"dezigner.ru",
"dfwatson.com",
"dgd.mail-temp.com",
"di-ve.com",
"die-besten-bilder.de",
"die-genossen.de",
"die-optimisten.de",
"die-optimisten.net",
"diemailbox.de",
"digibel.be",
"digital-filestore.de",
"diplomats.com",
"directbox.com",
"dirtracer.com",
"dirtracers.com",
"disciples.com",
"discofan.com",
"discovery.com",
"discoverymail.com",
"disinfo.net",
"disposable.com",
"dmailman.com",
"dmaji.ddns.net",
"dmtc.edu.pl",
"dnsmadeeasy.com",
"doctor.com",
"dodo.com.au",
"dog.com",
"dogit.com",
"doglover.com",
"dogmail.co.uk",
"dogsnob.net",
"doityourself.com",
"doneasy.com",
"donjuan.com",
"dontgotmail.com",
"dontmesswithtexas.com",
"doramail.com",
"dostmail.com",
"dotcom.fr",
"dott.it",
"download-privat.de",
"dplanet.ch",
"dr.com",
"dragoncon.net",
"dragracer.com",
"dropzone.com",
"drotposta.hu",
"dubaimail.com",
"dublin.com",
"dublin.ie",
"dumpmail.com",
"dunlopdriver.com",
"dunloprider.com",
"duno.com",
"dutchmail.com",
"dvx.dnsabr.com",
"dwp.net",
"dygo.com",
"dynamitemail.com",
"dyndns.org",
"e-apollo.lv",
"e-mail.com.tr",
"e-mail.dk",
"e-mail.ru",
"e-mail.ua",
"e-mailanywhere.com",
"e-mails.ru",
"e-tapaal.com",
"ea.luk2.com",
"earthalliance.com",
"earthcam.net",
"earthdome.com",
"earthling.net",
"earthlink.net",
"earthonline.net",
"eastcoast.co.za",
"eastmail.com",
"easy.to",
"easypost.com",
"eatmydirt.com",
"ecardmail.com",
"ecbsolutions.net",
"echina.com",
"ecompare.com",
"edmail.com",
"ednatx.com",
"edtnmail.com",
"educacao.te.pt",
"educastmail.com",
"ehmail.com",
"eintagsmail.de",
"eircom.net",
"ekidz.com.au",
"elitemail.org",
"elsitio.com",
"elvis.com",
"elvisfan.com",
"email-london.co.uk",
"email.biz",
"email.com",
"email.cz",
"email.ee",
"email.it",
"email.nu",
"email.org",
"email.ro",
"email.ru",
"email.si",
"email.su",
"email.tst",
"email.ua",
"email.women.com",
"email2me.net",
"email4u.info",
"emailacc.com",
"emailaccount.com",
"emailasso.net",
"emailchoice.com",
"emailcorner.net",
"emailem.com",
"emailengine.net",
"emailengine.org",
"emailforyou.net",
"emailgroups.net",
"emailit.com",
"emailpinoy.com",
"emailplanet.com",
"emailplus.org",
"emails.ru",
"emailuser.net",
"emailx.net",
"ematic.com",
"embarqmail.com",
"eml.cc",
"emumail.com",
"end-war.com",
"enel.net",
"engineer.com",
"england.com",
"england.edu",
"englandmail.com",
"epage.ru",
"epatra.com",
"epicgamers.mooo.com",
"epix.net",
"epost.de",
"eposta.hu",
"eqqu.com",
"eqr.luk2.com",
"eramail.co.za",
"eresmas.com",
"eriga.lv",
"estranet.it",
"ethos.st",
"etoast.com",
"etrademail.com",
"eudoramail.com",
"europamel.net",
"europe.com",
"europemail.com",
"euroseek.com",
"eurosport.com",
"every1.net",
"everyday.com.kh",
"everymail.net",
"everyone.net",
"examnotes.net",
"excite.co.jp",
"excite.com",
"excite.it",
"execs.com",
"exemail.com.au",
"expressasia.com",
"extenda.net",
"extended.com",
"eyou.com",
"ezcybersearch.com",
"ezmail.egine.com",
"ezmail.ru",
"ezrs.com",
"f-m.fm",
"f1fans.net",
"facebook.com",
"fahr-zur-hoelle.org",
"falseaddress.com",
"fan.com",
"fan.theboys.com",
"fansonlymail.com",
"fantasticmail.com",
"farang.net",
"farifluset.mailexpire.com",
"faroweb.com",
"fast-email.com",
"fast-mail.org",
"fastem.com",
"fastemail.us",
"fastemailer.com",
"fastermail.com",
"fastest.cc",
"fastimap.com",
"fastmail.ca",
"fastmail.cn",
"fastmail.co.uk",
"fastmail.com",
"fastmail.com.au",
"fastmail.es",
"fastmail.fm",
"fastmail.im",
"fastmail.in",
"fastmail.jp",
"fastmail.mx",
"fastmail.net",
"fastmail.nl",
"fastmail.se",
"fastmail.to",
"fastmail.tw",
"fastmail.us",
"fastmailbox.net",
"fastmessaging.com",
"fastservice.com",
"fatcock.net",
"fathersrightsne.org",
"fax.ru",
"fbi-agent.com",
"fbi.hu",
"fea.st",
"federalcontractors.com",
"feinripptraeger.de",
"felicity.com",
"felicitymail.com",
"femenino.com",
"fetchmail.co.uk",
"fetchmail.com",
"fettabernett.de",
"feyenoorder.com",
"ffanet.com",
"fiberia.com",
"filipinolinks.com",
"financemail.net",
"financier.com",
"findmail.com",
"finebody.com",
"finfin.com",
"fire-brigade.com",
"fireman.net",
"fishburne.org",
"fishfuse.com",
"flashemail.com",
"flashmail.com",
"flashmail.net",
"flipcode.com",
"fmail.co.uk",
"fmailbox.com",
"fmgirl.com",
"fmguy.com",
"fnbmail.co.za",
"fnmail.com",
"folkfan.com",
"foodmail.com",
"football.theboys.com",
"footballmail.com",
"for-president.com",
"force9.co.uk",
"forfree.at",
"forpresident.com",
"fortuncity.com",
"fortunecity.com",
"forum.dk",
"foxmail.com",
"francemel.fr",
"free-online.net",
"free-org.com",
"free.com.pe",
"free.fr",
"freeaccess.nl",
"freeaccount.com",
"freeandsingle.com",
"freebox.com",
"freedom.usa.com",
"freedomlover.com",
"freegates.be",
"freeghana.com",
"freeler.nl",
"freemail.c3.hu",
"freemail.com.au",
"freemail.com.pk",
"freemail.de",
"freemail.et",
"freemail.gr",
"freemail.hu",
"freemail.it",
"freemail.lt",
"freemail.nl",
"freemail.org.mk",
"freenet.de",
"freenet.kg",
"freeola.com",
"freeola.net",
"freeserve.co.uk",
"freestamp.com",
"freestart.hu",
"freesurf.fr",
"freesurf.nl",
"freeuk.com",
"freeuk.net",
"freeukisp.co.uk",
"freeweb.org",
"freewebemail.com",
"freeyellow.com",
"freezone.co.uk",
"fresnomail.com",
"freudenkinder.de",
"friends-cafe.com",
"friendsfan.com",
"from-africa.com",
"from-america.com",
"from-argentina.com",
"from-asia.com",
"from-australia.com",
"from-belgium.com",
"from-brazil.com",
"from-canada.com",
"from-china.net",
"from-england.com",
"from-europe.com",
"from-france.net",
"from-germany.net",
"from-holland.com",
"from-israel.com",
"from-italy.net",
"from-japan.net",
"from-korea.com",
"from-mexico.com",
"from-outerspace.com",
"from-russia.com",
"from-spain.net",
"fromalabama.com",
"fromalaska.com",
"fromarizona.com",
"fromarkansas.com",
"fromcalifornia.com",
"fromcolorado.com",
"fromconnecticut.com",
"fromdelaware.com",
"fromflorida.net",
"fromgeorgia.com",
"fromhawaii.net",
"fromidaho.com",
"fromillinois.com",
"fromindiana.com",
"fromiowa.com",
"fromjupiter.com",
"fromkansas.com",
"fromkentucky.com",
"fromlouisiana.com",
"frommaine.net",
"frommaryland.com",
"frommassachusetts.com",
"frommiami.com",
"frommichigan.com",
"fromminnesota.com",
"frommississippi.com",
"frommissouri.com",
"frommontana.com",
"fromnebraska.com",
"fromnevada.com",
"fromnewhampshire.com",
"fromnewjersey.com",
"fromnewmexico.com",
"fromnewyork.net",
"fromnorthcarolina.com",
"fromnorthdakota.com",
"fromohio.com",
"fromoklahoma.com",
"fromoregon.net",
"frompennsylvania.com",
"fromrhodeisland.com",
"fromru.com",
"fromsouthcarolina.com",
"fromsouthdakota.com",
"fromtennessee.com",
"fromtexas.com",
"fromthestates.com",
"fromutah.com",
"fromvermont.com",
"fromvirginia.com",
"fromwashington.com",
"fromwashingtondc.com",
"fromwestvirginia.com",
"fromwisconsin.com",
"fromwyoming.com",
"front.ru",
"frontier.com",
"frontiernet.net",
"frostbyte.uk.net",
"fsmail.net",
"ftml.net",
"fubuki.shp7.cn",
"fullmail.com",
"funkfan.com",
"funky4.com",
"fuorissimo.com",
"furnitureprovider.com",
"fuse.net",
"fut.es",
"fwnb.com",
"fxsmails.com",
"g.hmail.us",
"galamb.net",
"galaxy5.com",
"galaxyhit.com",
"gamebox.net",
"gamegeek.com",
"gamespotmail.com",
"garbage.com",
"gardener.com",
"gaybrighton.co.uk",
"gaza.net",
"gazeta.pl",
"gazibooks.com",
"gci.net",
"gee-wiz.com",
"geecities.com",
"geek.com",
"geek.hu",
"geeklife.com",
"gencmail.com",
"general-hospital.com",
"gentlemansclub.de",
"geocities.com",
"geography.net",
"geologist.com",
"geopia.com",
"germanymail.com",
"gh2000.com",
"ghanamail.com",
"ghostmail.com",
"giantsfan.com",
"giga4u.de",
"gigileung.org",
"girl4god.com",
"givepeaceachance.com",
"glay.org",
"glendale.net",
"globalfree.it",
"globalpagan.com",
"globalsite.com.br",
"gmail.com",
"gmail.com.br",
"gmail.gr.com",
"gmail.ru",
"gmail.sk",
"gmail.zalvisual.us",
"gmx.at",
"gmx.co.uk",
"gmx.com",
"gmx.de",
"gmx.fr",
"gmx.li",
"gmx.net",
"gnwmail.com",
"go.com",
"go.ro",
"go.ru",
"go2.com.py",
"go2net.com",
"gocollege.com",
"gocubs.com",
"gofree.co.uk",
"goldenmail.ru",
"goldmail.ru",
"goldtoolbox.com",
"golfemail.com",
"golfilla.info",
"golfmail.be",
"gonavy.net",
"goodnewsmail.com",
"goodstick.com",
"googlemail.com",
"goplay.com",
"gorontalo.net",
"gospelfan.com",
"gothere.uk.com",
"gotomy.com",
"gotti.otherinbox.com",
"govolsfan.com",
"gportal.hu",
"grabmail.com",
"graduate.org",
"graffiti.net",
"gramszu.net",
"graphic-designer.com",
"grapplers.com",
"gratisweb.com",
"greenmail.net",
"groupmail.com",
"grungecafe.com",
"gspam.mooo.com",
"gtemail.net",
"gtmc.net",
"gu.luk2.com",
"gua.net",
"guessmail.com",
"guju.net",
"gurlmail.com",
"guy.com",
"guy2.com",
"guyanafriends.com",
"gyorsposta.com",
"gyorsposta.hu",
"h-mail.us",
"hab-verschlafen.de",
"habmalnefrage.de",
"hackermail.com",
"hackermail.net",
"hailmail.net",
"hairdresser.net",
"hamptonroads.com",
"handbag.com",
"handleit.com",
"hang-ten.com",
"hanmail.net",
"happemail.com",
"happycounsel.com",
"happypuppy.com",
"hardcorefreak.com",
"hawaii.rr.com",
"hawaiiantel.net",
"headbone.com",
"heartthrob.com",
"heerschap.com",
"heesun.net",
"hehe.com",
"hello.hu",
"hello.net.au",
"hello.to",
"helter-skelter.com",
"hempseed.com",
"herediano.com",
"heremail.com",
"herono1.com",
"herr-der-mails.de",
"hetnet.nl",
"hey.to",
"hhdevel.com",
"hhh.sytes.net",
"highmilton.com",
"highquality.com",
"highveldmail.co.za",
"hilarious.com",
"hiphopfan.com",
"hispavista.com",
"hitmail.com",
"hitthe.net",
"hkg.net",
"hkstarphoto.com",
"hku.us.to",
"hockeymail.com",
"hollywoodkids.com",
"home-email.com",
"home.de",
"home.nl",
"home.no.net",
"home.ro",
"home.se",
"homeart.com",
"homelocator.com",
"homemail.com",
"homestead.com",
"homeworkcentral.com",
"honduras.com",
"hongkong.com",
"hookup.net",
"hoopsmail.com",
"horrormail.com",
"host-it.com.sg",
"hot-shot.com",
"hot.ee",
"hotbot.com",
"hotbrev.com",
"hotepmail.com",
"hotfire.net",
"hotletter.com",
"hotmail.be",
"hotmail.ca",
"hotmail.ch",
"hotmail.co.il",
"hotmail.co.nz",
"hotmail.co.th",
"hotmail.co.uk",
"hotmail.com",
"hotmail.com.br",
"hotmail.com.hk",
"hotmail.com.tw",
"hotmail.de",
"hotmail.es",
"hotmail.fr",
"hotmail.it",
"hotmail.kg",
"hotmail.kz",
"hotmail.nl",
"hotmail.no",
"hotmail.roor",
"hotmail.ru",
"hotpop3.com",
"hotvoice.com",
"housefan.com",
"housefancom",
"housemail.com",
"hsuchi.net",
"html.tou.com",
"hu2.ru",
"hughes.net",
"humanoid.net",
"hunsa.com",
"hurting.com",
"hush.ai",
"hush.com",
"hushmail.com",
"hypernautica.com",
"i-connect.com",
"i-france.com",
"i-mail.com.au",
"i-mailbox.net",
"i-p.com",
"i.am",
"i.amhey.to",
"i.ua",
"i12.com",
"i6.cloudns.cc",
"i6.cloudns.cx",
"iamawoman.com",
"iamwaiting.com",
"iamwasted.com",
"iamyours.com",
"icestorm.com",
"icfu.mooo.com",
"ich-bin-verrueckt-nach-dir.de",
"ich-will-net.de",
"icloud.com",
"icmsconsultants.com",
"icq.com",
"icqmail.com",
"icrazy.com",
"icu.md",
"id-base.com",
"ididitmyway.com",
"idigjesus.com",
"idirect.com",
"iespana.es",
"ifoward.com",
"ig.com.br",
"ignazio.it",
"ignmail.com",
"ihateclowns.com",
"iinet.net.au",
"ijustdontcare.com",
"ilkposta.com",
"ilovechocolate.com",
"ilovejesus.com",
"ilovethemovies.com",
"ilovetocollect.net",
"ilse.nl",
"imaginemail.com",
"imail.org",
"imail.ru",
"imailbox.com",
"imap-mail.com",
"imap.cc",
"imapmail.org",
"imel.org",
"imneverwrong.com",
"imposter.co.uk",
"imstressed.com",
"imtoosexy.com",
"in-box.net",
"in2jesus.com",
"iname.com",
"inbox.com",
"inbox.lt",
"inbox.lv",
"inbox.net",
"inbox.ru",
"incamail.com",
"includingarabia.com",
"incredimail.com",
"index.ua",
"indexa.fr",
"india.com",
"indiatimes.com",
"indo-mail.com",
"indocities.com",
"indomail.com",
"indyracers.com",
"inerted.com",
"info-media.de",
"info66.com",
"infohq.com",
"infomail.es",
"infomart.or.jp",
"infospacemail.com",
"infovia.com.ar",
"inicia.es",
"inmail.sk",
"inmail24.com",
"inmano.com",
"innocent.com",
"inorbit.com",
"inoutbox.com",
"insidebaltimore.net",
"insight.rr.com",
"instruction.com",
"instructor.net",
"insurer.com",
"interburp.com",
"interfree.it",
"interia.pl",
"interlap.com.ar",
"intermail.co.il",
"internet-club.com",
"internet-e-mail.com",
"internet-mail.org",
"internet-police.com",
"internetbiz.com",
"internetdrive.com",
"internetegypt.com",
"internetemails.net",
"internetmailing.net",
"internode.on.net",
"investormail.com",
"inwind.it",
"iobox.com",
"iobox.fi",
"iol.it",
"iowaemail.com",
"ip3.com",
"iprimus.com.au",
"iqemail.com",
"irangate.net",
"iraqmail.com",
"ireland.com",
"irelandmail.com",
"irj.hu",
"isellcars.com",
"iservejesus.com",
"islamonline.net",
"isleuthmail.com",
"ismart.net",
"isonfire.com",
"isp9.net",
"israelmail.com",
"ist-allein.info",
"ist-einmalig.de",
"ist-ganz-allein.de",
"ist-willig.de",
"italymail.com",
"itloox.com",
"itmom.com",
"ivebeenframed.com",
"ivillage.com",
"iwan-fals.com",
"iwmail.com",
"iwon.com",
"izadpanah.com",
"jahoopa.com",
"jakuza.hu",
"japan.com",
"jaydemail.com",
"jazzandjava.com",
"jazzfan.com",
"jazzgame.com",
"jerusalemmail.com",
"jetable.de",
"jetemail.net",
"jewishmail.com",
"jippii.fi",
"jmail.co.za",
"job4u.com",
"joinme.com",
"jokes.com",
"jordanmail.com",
"journalist.com",
"jovem.te.pt",
"joymail.com",
"jpopmail.com",
"jubiimail.dk",
"jump.com",
"jumpy.it",
"juniormail.com",
"junkmail.com",
"juno.com",
"justemail.net",
"justicemail.com",
"kaazoo.com",
"kaffeeschluerfer.com",
"kaffeeschluerfer.de",
"kaixo.com",
"kalpoint.com",
"kansascity.com",
"kapoorweb.com",
"karachian.com",
"karachioye.com",
"karbasi.com",
"katamail.com",
"kayafmmail.co.za",
"kbjrmail.com",
"kcks.com",
"keftamail.com",
"keg-party.com",
"keko.com.ar",
"kellychen.com",
"keromail.com",
"keyemail.com",
"kgb.hu",
"khosropour.com",
"kickassmail.com",
"killermail.com",
"kimo.com",
"kinglibrary.net",
"kinki-kids.com",
"kissfans.com",
"kittymail.com",
"kitznet.at",
"kiwibox.com",
"kiwitown.com",
"km.ru",
"kmail.com.au",
"kmail.mooo.com",
"kommespaeter.de",
"konx.com",
"korea.com",
"koreamail.com",
"kozmail.com",
"kpnmail.nl",
"krim.ws",
"krongthip.com",
"krunis.com",
"ksanmail.com",
"ksee24mail.com",
"kube93mail.com",
"kukamail.com",
"kumarweb.com",
"kuwait-mail.com",
"la.com",
"ladymail.cz",
"lagerlouts.com",
"lahoreoye.com",
"lakmail.com",
"lamer.hu",
"land.ru",
"lankamail.com",
"laposte.net",
"lass-es-geschehen.de",
"latemodels.com",
"latinmail.com",
"latino.com",
"lavache.com",
"law.com",
"lawyer.com",
"leehom.net",
"legalactions.com",
"legislator.com",
"lenta.ru",
"leonlai.net",
"letsgomets.net",
"letterbox.com",
"letterboxes.org",
"levele.com",
"levele.hu",
"lex.bg",
"lexis-nexis-mail.com",
"libero.it",
"liberomail.com",
"lick101.com",
"liebt-dich.info",
"link.cloudns.asia",
"linkmaster.com",
"linktrader.com",
"linuxfreemail.com",
"linuxmail.org",
"lionsfan.com.au",
"liontrucks.com",
"list.ru",
"listomail.com",
"littleapple.com",
"littleblueroom.com",
"live.at",
"live.be",
"live.ca",
"live.cl",
"live.cn",
"live.co.uk",
"live.co.za",
"live.com",
"live.com.ar",
"live.com.au",
"live.com.mx",
"live.com.my",
"live.com.pt",
"live.com.sg",
"live.de",
"live.dk",
"live.fr",
"live.ie",
"live.in",
"live.it",
"live.jp",
"live.nl",
"live.no",
"live.ru",
"live.se",
"liverpoolfans.com",
"lko.co.kr",
"llandudno.com",
"llangollen.com",
"lmxmail.sk",
"lobbyist.com",
"localbar.com",
"locos.com",
"london.com",
"loobie.com",
"looksmart.co.uk",
"looksmart.com",
"looksmart.com.au",
"lopezclub.com",
"louiskoo.com",
"love.cz",
"loveable.com",
"lovecat.com",
"lovefootball.com",
"lovelygirl.net",
"lovemail.com",
"lover-boy.com",
"lovergirl.com",
"lovethebroncos.com",
"lovethecowboys.com",
"loveyouforever.de",
"lovingjesus.com",
"lowandslow.com",
"luso.pt",
"luukku.com",
"lvey.luk2.com",
"lvie.com.sg",
"lycos.co.uk",
"lycos.com",
"lycos.de",
"lycos.es",
"lycos.it",
"lycos.ne.jp",
"lycosemail.com",
"lycosmail.com",
"m-a-i-l.com",
"m-hmail.com",
"m4.org",
"mac.com",
"macbox.com",
"macfreak.com",
"machinecandy.com",
"macmail.com",
"madcreations.com",
"madonnafan.com",
"madrid.com",
"maennerversteherin.com",
"maennerversteherin.de",
"maffia.hu",
"magicmail.co.za",
"mahmoodweb.com",
"mail-awu.de",
"mail-box.cz",
"mail-center.com",
"mail-central.com",
"mail-me.com",
"mail-page.com",
"mail.austria.com",
"mail.az",
"mail.be",
"mail.bg",
"mail.bulgaria.com",
"mail.byte.it",
"mail.co.za",
"mail.com",
"mail.com.tr",
"mail.defaultdomain.ml",
"mail.ee",
"mail.entrepeneurmag.com",
"mail.freetown.com",
"mail.gr",
"mail.hitthebeach.com",
"mail.htl22.at",
"mail.kmsp.com",
"mail.md",
"mail.misterpinball.de",
"mail.nu",
"mail.org.uk",
"mail.pf",
"mail.pharmacy.com",
"mail.pt",
"mail.r-o-o-t.com",
"mail.ru",
"mail.salu.net",
"mail.sisna.com",
"mail.spaceports.com",
"mail.svenz.eu",
"mail.theboys.com",
"mail.ua",
"mail.usa.com",
"mail.vasarhely.hu",
"mail15.com",
"mail1st.com",
"mail2007.com",
"mail2aaron.com",
"mail2abby.com",
"mail2abc.com",
"mail2actor.com",
"mail2admiral.com",
"mail2adorable.com",
"mail2adoration.com",
"mail2adore.com",
"mail2adventure.com",
"mail2aeolus.com",
"mail2aether.com",
"mail2affection.com",
"mail2afghanistan.com",
"mail2africa.com",
"mail2agent.com",
"mail2aha.com",
"mail2ahoy.com",
"mail2aim.com",
"mail2air.com",
"mail2airbag.com",
"mail2airforce.com",
"mail2airport.com",
"mail2alabama.com",
"mail2alan.com",
"mail2alaska.com",
"mail2albania.com",
"mail2alcoholic.com",
"mail2alec.com",
"mail2alexa.com",
"mail2algeria.com",
"mail2alicia.com",
"mail2alien.com",
"mail2allan.com",
"mail2allen.com",
"mail2allison.com",
"mail2alpha.com",
"mail2alyssa.com",
"mail2amanda.com",
"mail2amazing.com",
"mail2amber.com",
"mail2america.com",
"mail2american.com",
"mail2andorra.com",
"mail2andrea.com",
"mail2andy.com",
"mail2anesthesiologist.com",
"mail2angela.com",
"mail2angola.com",
"mail2ann.com",
"mail2anna.com",
"mail2anne.com",
"mail2anthony.com",
"mail2anything.com",
"mail2aphrodite.com",
"mail2apollo.com",
"mail2april.com",
"mail2aquarius.com",
"mail2arabia.com",
"mail2arabic.com",
"mail2architect.com",
"mail2ares.com",
"mail2argentina.com",
"mail2aries.com",
"mail2arizona.com",
"mail2arkansas.com",
"mail2armenia.com",
"mail2army.com",
"mail2arnold.com",
"mail2art.com",
"mail2artemus.com",
"mail2arthur.com",
"mail2artist.com",
"mail2ashley.com",
"mail2ask.com",
"mail2astronomer.com",
"mail2athena.com",
"mail2athlete.com",
"mail2atlas.com",
"mail2atom.com",
"mail2attitude.com",
"mail2auction.com",
"mail2aunt.com",
"mail2australia.com",
"mail2austria.com",
"mail2azerbaijan.com",
"mail2baby.com",
"mail2bahamas.com",
"mail2bahrain.com",
"mail2ballerina.com",
"mail2ballplayer.com",
"mail2band.com",
"mail2bangladesh.com",
"mail2bank.com",
"mail2banker.com",
"mail2bankrupt.com",
"mail2baptist.com",
"mail2bar.com",
"mail2barbados.com",
"mail2barbara.com",
"mail2barter.com",
"mail2basketball.com",
"mail2batter.com",
"mail2beach.com",
"mail2beast.com",
"mail2beatles.com",
"mail2beauty.com",
"mail2becky.com",
"mail2beijing.com",
"mail2belgium.com",
"mail2belize.com",
"mail2ben.com",
"mail2bernard.com",
"mail2beth.com",
"mail2betty.com",
"mail2beverly.com",
"mail2beyond.com",
"mail2biker.com",
"mail2bill.com",
"mail2billionaire.com",
"mail2billy.com",
"mail2bio.com",
"mail2biologist.com",
"mail2black.com",
"mail2blackbelt.com",
"mail2blake.com",
"mail2blind.com",
"mail2blonde.com",
"mail2blues.com",
"mail2bob.com",
"mail2bobby.com",
"mail2bolivia.com",
"mail2bombay.com",
"mail2bonn.com",
"mail2bookmark.com",
"mail2boreas.com",
"mail2bosnia.com",
"mail2boston.com",
"mail2botswana.com",
"mail2bradley.com",
"mail2brazil.com",
"mail2breakfast.com",
"mail2brian.com",
"mail2bride.com",
"mail2brittany.com",
"mail2broker.com",
"mail2brook.com",
"mail2bruce.com",
"mail2brunei.com",
"mail2brunette.com",
"mail2brussels.com",
"mail2bryan.com",
"mail2bug.com",
"mail2bulgaria.com",
"mail2business.com",
"mail2buy.com",
"mail2ca.com",
"mail2california.com",
"mail2calvin.com",
"mail2cambodia.com",
"mail2cameroon.com",
"mail2canada.com",
"mail2cancer.com",
"mail2capeverde.com",
"mail2capricorn.com",
"mail2cardinal.com",
"mail2cardiologist.com",
"mail2care.com",
"mail2caroline.com",
"mail2carolyn.com",
"mail2casey.com",
"mail2cat.com",
"mail2caterer.com",
"mail2cathy.com",
"mail2catlover.com",
"mail2catwalk.com",
"mail2cell.com",
"mail2chad.com",
"mail2champaign.com",
"mail2charles.com",
"mail2chef.com",
"mail2chemist.com",
"mail2cherry.com",
"mail2chicago.com",
"mail2chile.com",
"mail2china.com",
"mail2chinese.com",
"mail2chocolate.com",
"mail2christian.com",
"mail2christie.com",
"mail2christmas.com",
"mail2christy.com",
"mail2chuck.com",
"mail2cindy.com",
"mail2clark.com",
"mail2classifieds.com",
"mail2claude.com",
"mail2cliff.com",
"mail2clinic.com",
"mail2clint.com",
"mail2close.com",
"mail2club.com",
"mail2coach.com",
"mail2coastguard.com",
"mail2colin.com",
"mail2college.com",
"mail2colombia.com",
"mail2color.com",
"mail2colorado.com",
"mail2columbia.com",
"mail2comedian.com",
"mail2composer.com",
"mail2computer.com",
"mail2computers.com",
"mail2concert.com",
"mail2congo.com",
"mail2connect.com",
"mail2connecticut.com",
"mail2consultant.com",
"mail2convict.com",
"mail2cook.com",
"mail2cool.com",
"mail2cory.com",
"mail2costarica.com",
"mail2country.com",
"mail2courtney.com",
"mail2cowboy.com",
"mail2cowgirl.com",
"mail2craig.com",
"mail2crave.com",
"mail2crazy.com",
"mail2create.com",
"mail2croatia.com",
"mail2cry.com",
"mail2crystal.com",
"mail2cuba.com",
"mail2culture.com",
"mail2curt.com",
"mail2customs.com",
"mail2cute.com",
"mail2cutey.com",
"mail2cynthia.com",
"mail2cyprus.com",
"mail2czechrepublic.com",
"mail2dad.com",
"mail2dale.com",
"mail2dallas.com",
"mail2dan.com",
"mail2dana.com",
"mail2dance.com",
"mail2dancer.com",
"mail2danielle.com",
"mail2danny.com",
"mail2darlene.com",
"mail2darling.com",
"mail2darren.com",
"mail2daughter.com",
"mail2dave.com",
"mail2dawn.com",
"mail2dc.com",
"mail2dealer.com",
"mail2deanna.com",
"mail2dearest.com",
"mail2debbie.com",
"mail2debby.com",
"mail2deer.com",
"mail2delaware.com",
"mail2delicious.com",
"mail2demeter.com",
"mail2democrat.com",
"mail2denise.com",
"mail2denmark.com",
"mail2dennis.com",
"mail2dentist.com",
"mail2derek.com",
"mail2desert.com",
"mail2devoted.com",
"mail2devotion.com",
"mail2diamond.com",
"mail2diana.com",
"mail2diane.com",
"mail2diehard.com",
"mail2dilemma.com",
"mail2dillon.com",
"mail2dinner.com",
"mail2dinosaur.com",
"mail2dionysos.com",
"mail2diplomat.com",
"mail2director.com",
"mail2dirk.com",
"mail2disco.com",
"mail2dive.com",
"mail2diver.com",
"mail2divorced.com",
"mail2djibouti.com",
"mail2doctor.com",
"mail2doglover.com",
"mail2dominic.com",
"mail2dominica.com",
"mail2dominicanrepublic.com",
"mail2don.com",
"mail2donald.com",
"mail2donna.com",
"mail2doris.com",
"mail2dorothy.com",
"mail2doug.com",
"mail2dough.com",
"mail2douglas.com",
"mail2dow.com",
"mail2downtown.com",
"mail2dream.com",
"mail2dreamer.com",
"mail2dude.com",
"mail2dustin.com",
"mail2dyke.com",
"mail2dylan.com",
"mail2earl.com",
"mail2earth.com",
"mail2eastend.com",
"mail2eat.com",
"mail2economist.com",
"mail2ecuador.com",
"mail2eddie.com",
"mail2edgar.com",
"mail2edwin.com",
"mail2egypt.com",
"mail2electron.com",
"mail2eli.com",
"mail2elizabeth.com",
"mail2ellen.com",
"mail2elliot.com",
"mail2elsalvador.com",
"mail2elvis.com",
"mail2emergency.com",
"mail2emily.com",
"mail2engineer.com",
"mail2english.com",
"mail2environmentalist.com",
"mail2eos.com",
"mail2eric.com",
"mail2erica.com",
"mail2erin.com",
"mail2erinyes.com",
"mail2eris.com",
"mail2eritrea.com",
"mail2ernie.com",
"mail2eros.com",
"mail2estonia.com",
"mail2ethan.com",
"mail2ethiopia.com",
"mail2eu.com",
"mail2europe.com",
"mail2eurus.com",
"mail2eva.com",
"mail2evan.com",
"mail2evelyn.com",
"mail2everything.com",
"mail2exciting.com",
"mail2expert.com",
"mail2fairy.com",
"mail2faith.com",
"mail2fanatic.com",
"mail2fancy.com",
"mail2fantasy.com",
"mail2farm.com",
"mail2farmer.com",
"mail2fashion.com",
"mail2fat.com",
"mail2feeling.com",
"mail2female.com",
"mail2fever.com",
"mail2fighter.com",
"mail2fiji.com",
"mail2filmfestival.com",
"mail2films.com",
"mail2finance.com",
"mail2finland.com",
"mail2fireman.com",
"mail2firm.com",
"mail2fisherman.com",
"mail2flexible.com",
"mail2florence.com",
"mail2florida.com",
"mail2floyd.com",
"mail2fly.com",
"mail2fond.com",
"mail2fondness.com",
"mail2football.com",
"mail2footballfan.com",
"mail2found.com",
"mail2france.com",
"mail2frank.com",
"mail2frankfurt.com",
"mail2franklin.com",
"mail2fred.com",
"mail2freddie.com",
"mail2free.com",
"mail2freedom.com",
"mail2french.com",
"mail2freudian.com",
"mail2friendship.com",
"mail2from.com",
"mail2fun.com",
"mail2gabon.com",
"mail2gabriel.com",
"mail2gail.com",
"mail2galaxy.com",
"mail2gambia.com",
"mail2games.com",
"mail2gary.com",
"mail2gavin.com",
"mail2gemini.com",
"mail2gene.com",
"mail2genes.com",
"mail2geneva.com",
"mail2george.com",
"mail2georgia.com",
"mail2gerald.com",
"mail2german.com",
"mail2germany.com",
"mail2ghana.com",
"mail2gilbert.com",
"mail2gina.com",
"mail2girl.com",
"mail2glen.com",
"mail2gloria.com",
"mail2goddess.com",
"mail2gold.com",
"mail2golfclub.com",
"mail2golfer.com",
"mail2gordon.com",
"mail2government.com",
"mail2grab.com",
"mail2grace.com",
"mail2graham.com",
"mail2grandma.com",
"mail2grandpa.com",
"mail2grant.com",
"mail2greece.com",
"mail2green.com",
"mail2greg.com",
"mail2grenada.com",
"mail2gsm.com",
"mail2guard.com",
"mail2guatemala.com",
"mail2guy.com",
"mail2hades.com",
"mail2haiti.com",
"mail2hal.com",
"mail2handhelds.com",
"mail2hank.com",
"mail2hannah.com",
"mail2harold.com",
"mail2harry.com",
"mail2hawaii.com",
"mail2headhunter.com",
"mail2heal.com",
"mail2heather.com",
"mail2heaven.com",
"mail2hebe.com",
"mail2hecate.com",
"mail2heidi.com",
"mail2helen.com",
"mail2hell.com",
"mail2help.com",
"mail2helpdesk.com",
"mail2henry.com",
"mail2hephaestus.com",
"mail2hera.com",
"mail2hercules.com",
"mail2herman.com",
"mail2hermes.com",
"mail2hespera.com",
"mail2hestia.com",
"mail2highschool.com",
"mail2hindu.com",
"mail2hip.com",
"mail2hiphop.com",
"mail2holland.com",
"mail2holly.com",
"mail2hollywood.com",
"mail2homer.com",
"mail2honduras.com",
"mail2honey.com",
"mail2hongkong.com",
"mail2hope.com",
"mail2horse.com",
"mail2hot.com",
"mail2hotel.com",
"mail2houston.com",
"mail2howard.com",
"mail2hugh.com",
"mail2human.com",
"mail2hungary.com",
"mail2hungry.com",
"mail2hygeia.com",
"mail2hyperspace.com",
"mail2hypnos.com",
"mail2ian.com",
"mail2ice-cream.com",
"mail2iceland.com",
"mail2idaho.com",
"mail2idontknow.com",
"mail2illinois.com",
"mail2imam.com",
"mail2in.com",
"mail2india.com",
"mail2indian.com",
"mail2indiana.com",
"mail2indonesia.com",
"mail2infinity.com",
"mail2intense.com",
"mail2iowa.com",
"mail2iran.com",
"mail2iraq.com",
"mail2ireland.com",
"mail2irene.com",
"mail2iris.com",
"mail2irresistible.com",
"mail2irving.com",
"mail2irwin.com",
"mail2isaac.com",
"mail2israel.com",
"mail2italian.com",
"mail2italy.com",
"mail2jackie.com",
"mail2jacob.com",
"mail2jail.com",
"mail2jaime.com",
"mail2jake.com",
"mail2jamaica.com",
"mail2james.com",
"mail2jamie.com",
"mail2jan.com",
"mail2jane.com",
"mail2janet.com",
"mail2janice.com",
"mail2japan.com",
"mail2japanese.com",
"mail2jasmine.com",
"mail2jason.com",
"mail2java.com",
"mail2jay.com",
"mail2jazz.com",
"mail2jed.com",
"mail2jeffrey.com",
"mail2jennifer.com",
"mail2jenny.com",
"mail2jeremy.com",
"mail2jerry.com",
"mail2jessica.com",
"mail2jessie.com",
"mail2jesus.com",
"mail2jew.com",
"mail2jeweler.com",
"mail2jim.com",
"mail2jimmy.com",
"mail2joan.com",
"mail2joann.com",
"mail2joanna.com",
"mail2jody.com",
"mail2joe.com",
"mail2joel.com",
"mail2joey.com",
"mail2john.com",
"mail2join.com",
"mail2jon.com",
"mail2jonathan.com",
"mail2jones.com",
"mail2jordan.com",
"mail2joseph.com",
"mail2josh.com",
"mail2joy.com",
"mail2juan.com",
"mail2judge.com",
"mail2judy.com",
"mail2juggler.com",
"mail2julian.com",
"mail2julie.com",
"mail2jumbo.com",
"mail2junk.com",
"mail2justin.com",
"mail2justme.com",
"mail2k.ru",
"mail2kansas.com",
"mail2karate.com",
"mail2karen.com",
"mail2karl.com",
"mail2karma.com",
"mail2kathleen.com",
"mail2kathy.com",
"mail2katie.com",
"mail2kay.com",
"mail2kazakhstan.com",
"mail2keen.com",
"mail2keith.com",
"mail2kelly.com",
"mail2kelsey.com",
"mail2ken.com",
"mail2kendall.com",
"mail2kennedy.com",
"mail2kenneth.com",
"mail2kenny.com",
"mail2kentucky.com",
"mail2kenya.com",
"mail2kerry.com",
"mail2kevin.com",
"mail2kim.com",
"mail2kimberly.com",
"mail2king.com",
"mail2kirk.com",
"mail2kiss.com",
"mail2kosher.com",
"mail2kristin.com",
"mail2kurt.com",
"mail2kuwait.com",
"mail2kyle.com",
"mail2kyrgyzstan.com",
"mail2la.com",
"mail2lacrosse.com",
"mail2lance.com",
"mail2lao.com",
"mail2larry.com",
"mail2latvia.com",
"mail2laugh.com",
"mail2laura.com",
"mail2lauren.com",
"mail2laurie.com",
"mail2lawrence.com",
"mail2lawyer.com",
"mail2lebanon.com",
"mail2lee.com",
"mail2leo.com",
"mail2leon.com",
"mail2leonard.com",
"mail2leone.com",
"mail2leslie.com",
"mail2letter.com",
"mail2liberia.com",
"mail2libertarian.com",
"mail2libra.com",
"mail2libya.com",
"mail2liechtenstein.com",
"mail2life.com",
"mail2linda.com",
"mail2linux.com",
"mail2lionel.com",
"mail2lipstick.com",
"mail2liquid.com",
"mail2lisa.com",
"mail2lithuania.com",
"mail2litigator.com",
"mail2liz.com",
"mail2lloyd.com",
"mail2lois.com",
"mail2lola.com",
"mail2london.com",
"mail2looking.com",
"mail2lori.com",
"mail2lost.com",
"mail2lou.com",
"mail2louis.com",
"mail2louisiana.com",
"mail2lovable.com",
"mail2love.com",
"mail2lucky.com",
"mail2lucy.com",
"mail2lunch.com",
"mail2lust.com",
"mail2luxembourg.com",
"mail2luxury.com",
"mail2lyle.com",
"mail2lynn.com",
"mail2madagascar.com",
"mail2madison.com",
"mail2madrid.com",
"mail2maggie.com",
"mail2mail4.com",
"mail2maine.com",
"mail2malawi.com",
"mail2malaysia.com",
"mail2maldives.com",
"mail2mali.com",
"mail2malta.com",
"mail2mambo.com",
"mail2man.com",
"mail2mandy.com",
"mail2manhunter.com",
"mail2mankind.com",
"mail2many.com",
"mail2marc.com",
"mail2marcia.com",
"mail2margaret.com",
"mail2margie.com",
"mail2marhaba.com",
"mail2maria.com",
"mail2marilyn.com",
"mail2marines.com",
"mail2mark.com",
"mail2marriage.com",
"mail2married.com",
"mail2marries.com",
"mail2mars.com",
"mail2marsha.com",
"mail2marshallislands.com",
"mail2martha.com",
"mail2martin.com",
"mail2marty.com",
"mail2marvin.com",
"mail2mary.com",
"mail2maryland.com",
"mail2mason.com",
"mail2massachusetts.com",
"mail2matt.com",
"mail2matthew.com",
"mail2maurice.com",
"mail2mauritania.com",
"mail2mauritius.com",
"mail2max.com",
"mail2maxwell.com",
"mail2maybe.com",
"mail2mba.com",
"mail2me4u.com",
"mail2mechanic.com",
"mail2medieval.com",
"mail2megan.com",
"mail2mel.com",
"mail2melanie.com",
"mail2melissa.com",
"mail2melody.com",
"mail2member.com",
"mail2memphis.com",
"mail2methodist.com",
"mail2mexican.com",
"mail2mexico.com",
"mail2mgz.com",
"mail2miami.com",
"mail2michael.com",
"mail2michelle.com",
"mail2michigan.com",
"mail2mike.com",
"mail2milan.com",
"mail2milano.com",
"mail2mildred.com",
"mail2milkyway.com",
"mail2millennium.com",
"mail2millionaire.com",
"mail2milton.com",
"mail2mime.com",
"mail2mindreader.com",
"mail2mini.com",
"mail2minister.com",
"mail2minneapolis.com",
"mail2minnesota.com",
"mail2miracle.com",
"mail2missionary.com",
"mail2mississippi.com",
"mail2missouri.com",
"mail2mitch.com",
"mail2model.com",
"mail2moldova.commail2molly.com",
"mail2mom.com",
"mail2monaco.com",
"mail2money.com",
"mail2mongolia.com",
"mail2monica.com",
"mail2montana.com",
"mail2monty.com",
"mail2moon.com",
"mail2morocco.com",
"mail2morpheus.com",
"mail2mors.com",
"mail2moscow.com",
"mail2moslem.com",
"mail2mouseketeer.com",
"mail2movies.com",
"mail2mozambique.com",
"mail2mp3.com",
"mail2mrright.com",
"mail2msright.com",
"mail2museum.com",
"mail2music.com",
"mail2musician.com",
"mail2muslim.com",
"mail2my.com",
"mail2myboat.com",
"mail2mycar.com",
"mail2mycell.com",
"mail2mygsm.com",
"mail2mylaptop.com",
"mail2mymac.com",
"mail2mypager.com",
"mail2mypalm.com",
"mail2mypc.com",
"mail2myphone.com",
"mail2myplane.com",
"mail2namibia.com",
"mail2nancy.com",
"mail2nasdaq.com",
"mail2nathan.com",
"mail2nauru.com",
"mail2navy.com",
"mail2neal.com",
"mail2nebraska.com",
"mail2ned.com",
"mail2neil.com",
"mail2nelson.com",
"mail2nemesis.com",
"mail2nepal.com",
"mail2netherlands.com",
"mail2network.com",
"mail2nevada.com",
"mail2newhampshire.com",
"mail2newjersey.com",
"mail2newmexico.com",
"mail2newyork.com",
"mail2newzealand.com",
"mail2nicaragua.com",
"mail2nick.com",
"mail2nicole.com",
"mail2niger.com",
"mail2nigeria.com",
"mail2nike.com",
"mail2no.com",
"mail2noah.com",
"mail2noel.com",
"mail2noelle.com",
"mail2normal.com",
"mail2norman.com",
"mail2northamerica.com",
"mail2northcarolina.com",
"mail2northdakota.com",
"mail2northpole.com",
"mail2norway.com",
"mail2notus.com",
"mail2noway.com",
"mail2nowhere.com",
"mail2nuclear.com",
"mail2nun.com",
"mail2ny.com",
"mail2oasis.com",
"mail2oceanographer.com",
"mail2ohio.com",
"mail2ok.com",
"mail2oklahoma.com",
"mail2oliver.com",
"mail2oman.com",
"mail2one.com",
"mail2onfire.com",
"mail2online.com",
"mail2oops.com",
"mail2open.com",
"mail2ophthalmologist.com",
"mail2optometrist.com",
"mail2oregon.com",
"mail2oscars.com",
"mail2oslo.com",
"mail2painter.com",
"mail2pakistan.com",
"mail2palau.com",
"mail2pan.com",
"mail2panama.com",
"mail2paraguay.com",
"mail2paralegal.com",
"mail2paris.com",
"mail2park.com",
"mail2parker.com",
"mail2party.com",
"mail2passion.com",
"mail2pat.com",
"mail2patricia.com",
"mail2patrick.com",
"mail2patty.com",
"mail2paul.com",
"mail2paula.com",
"mail2pay.com",
"mail2peace.com",
"mail2pediatrician.com",
"mail2peggy.com",
"mail2pennsylvania.com",
"mail2perry.com",
"mail2persephone.com",
"mail2persian.com",
"mail2peru.com",
"mail2pete.com",
"mail2peter.com",
"mail2pharmacist.com",
"mail2phil.com",
"mail2philippines.com",
"mail2phoenix.com",
"mail2phonecall.com",
"mail2phyllis.com",
"mail2pickup.com",
"mail2pilot.com",
"mail2pisces.com",
"mail2planet.com",
"mail2platinum.com",
"mail2plato.com",
"mail2pluto.com",
"mail2pm.com",
"mail2podiatrist.com",
"mail2poet.com",
"mail2poland.com",
"mail2policeman.com",
"mail2policewoman.com",
"mail2politician.com",
"mail2pop.com",
"mail2pope.com",
"mail2popular.com",
"mail2portugal.com",
"mail2poseidon.com",
"mail2potatohead.com",
"mail2power.com",
"mail2presbyterian.com",
"mail2president.com",
"mail2priest.com",
"mail2prince.com",
"mail2princess.com",
"mail2producer.com",
"mail2professor.com",
"mail2protect.com",
"mail2psychiatrist.com",
"mail2psycho.com",
"mail2psychologist.com",
"mail2qatar.com",
"mail2queen.com",
"mail2rabbi.com",
"mail2race.com",
"mail2racer.com",
"mail2rachel.com",
"mail2rage.com",
"mail2rainmaker.com",
"mail2ralph.com",
"mail2randy.com",
"mail2rap.com",
"mail2rare.com",
"mail2rave.com",
"mail2ray.com",
"mail2raymond.com",
"mail2realtor.com",
"mail2rebecca.com",
"mail2recruiter.com",
"mail2recycle.com",
"mail2redhead.com",
"mail2reed.com",
"mail2reggie.com",
"mail2register.com",
"mail2rent.com",
"mail2republican.com",
"mail2resort.com",
"mail2rex.com",
"mail2rhodeisland.com",
"mail2rich.com",
"mail2richard.com",
"mail2ricky.com",
"mail2ride.com",
"mail2riley.com",
"mail2rita.com",
"mail2rob.com",
"mail2robert.com",
"mail2roberta.com",
"mail2robin.com",
"mail2rock.com",
"mail2rocker.com",
"mail2rod.com",
"mail2rodney.com",
"mail2romania.com",
"mail2rome.com",
"mail2ron.com",
"mail2ronald.com",
"mail2ronnie.com",
"mail2rose.com",
"mail2rosie.com",
"mail2roy.com",
"mail2rudy.com",
"mail2rugby.com",
"mail2runner.com",
"mail2russell.com",
"mail2russia.com",
"mail2russian.com",
"mail2rusty.com",
"mail2ruth.com",
"mail2rwanda.com",
"mail2ryan.com",
"mail2sa.com",
"mail2sabrina.com",
"mail2safe.com",
"mail2sagittarius.com",
"mail2sail.com",
"mail2sailor.com",
"mail2sal.com",
"mail2salaam.com",
"mail2sam.com",
"mail2samantha.com",
"mail2samoa.com",
"mail2samurai.com",
"mail2sandra.com",
"mail2sandy.com",
"mail2sanfrancisco.com",
"mail2sanmarino.com",
"mail2santa.com",
"mail2sara.com",
"mail2sarah.com",
"mail2sat.com",
"mail2saturn.com",
"mail2saudi.com",
"mail2saudiarabia.com",
"mail2save.com",
"mail2savings.com",
"mail2school.com",
"mail2scientist.com",
"mail2scorpio.com",
"mail2scott.com",
"mail2sean.com",
"mail2search.com",
"mail2seattle.com",
"mail2secretagent.com",
"mail2senate.com",
"mail2senegal.com",
"mail2sensual.com",
"mail2seth.com",
"mail2sevenseas.com",
"mail2sexy.com",
"mail2seychelles.com",
"mail2shane.com",
"mail2sharon.com",
"mail2shawn.com",
"mail2ship.com",
"mail2shirley.com",
"mail2shoot.com",
"mail2shuttle.com",
"mail2sierraleone.com",
"mail2simon.com",
"mail2singapore.com",
"mail2single.com",
"mail2site.com",
"mail2skater.com",
"mail2skier.com",
"mail2sky.com",
"mail2sleek.com",
"mail2slim.com",
"mail2slovakia.com",
"mail2slovenia.com",
"mail2smile.com",
"mail2smith.com",
"mail2smooth.com",
"mail2soccer.com",
"mail2soccerfan.com",
"mail2socialist.com",
"mail2soldier.com",
"mail2somalia.com",
"mail2son.com",
"mail2song.com",
"mail2sos.com",
"mail2sound.com",
"mail2southafrica.com",
"mail2southamerica.com",
"mail2southcarolina.com",
"mail2southdakota.com",
"mail2southkorea.com",
"mail2southpole.com",
"mail2spain.com",
"mail2spanish.com",
"mail2spare.com",
"mail2spectrum.com",
"mail2splash.com",
"mail2sponsor.com",
"mail2sports.com",
"mail2srilanka.com",
"mail2stacy.com",
"mail2stan.com",
"mail2stanley.com",
"mail2star.com",
"mail2state.com",
"mail2stephanie.com",
"mail2steve.com",
"mail2steven.com",
"mail2stewart.com",
"mail2stlouis.com",
"mail2stock.com",
"mail2stockholm.com",
"mail2stockmarket.com",
"mail2storage.com",
"mail2store.com",
"mail2strong.com",
"mail2student.com",
"mail2studio.com",
"mail2studio54.com",
"mail2stuntman.com",
"mail2subscribe.com",
"mail2sudan.com",
"mail2superstar.com",
"mail2surfer.com",
"mail2suriname.com",
"mail2susan.com",
"mail2suzie.com",
"mail2swaziland.com",
"mail2sweden.com",
"mail2sweetheart.com",
"mail2swim.com",
"mail2swimmer.com",
"mail2swiss.com",
"mail2switzerland.com",
"mail2sydney.com",
"mail2sylvia.com",
"mail2syria.com",
"mail2taboo.com",
"mail2taiwan.com",
"mail2tajikistan.com",
"mail2tammy.com",
"mail2tango.com",
"mail2tanya.com",
"mail2tanzania.com",
"mail2tara.com",
"mail2taurus.com",
"mail2taxi.com",
"mail2taxidermist.com",
"mail2taylor.com",
"mail2taz.com",
"mail2teacher.com",
"mail2technician.com",
"mail2ted.com",
"mail2telephone.com",
"mail2teletubbie.com",
"mail2tenderness.com",
"mail2tennessee.com",
"mail2tennis.com",
"mail2tennisfan.com",
"mail2terri.com",
"mail2terry.com",
"mail2test.com",
"mail2texas.com",
"mail2thailand.com",
"mail2therapy.com",
"mail2think.com",
"mail2tickets.com",
"mail2tiffany.com",
"mail2tim.com",
"mail2time.com",
"mail2timothy.com",
"mail2tina.com",
"mail2titanic.com",
"mail2toby.com",
"mail2todd.com",
"mail2togo.com",
"mail2tom.com",
"mail2tommy.com",
"mail2tonga.com",
"mail2tony.com",
"mail2touch.com",
"mail2tourist.com",
"mail2tracey.com",
"mail2tracy.com",
"mail2tramp.com",
"mail2travel.com",
"mail2traveler.com",
"mail2travis.com",
"mail2trekkie.com",
"mail2trex.com",
"mail2triallawyer.com",
"mail2trick.com",
"mail2trillionaire.com",
"mail2troy.com",
"mail2truck.com",
"mail2trump.com",
"mail2try.com",
"mail2tunisia.com",
"mail2turbo.com",
"mail2turkey.com",
"mail2turkmenistan.com",
"mail2tv.com",
"mail2tycoon.com",
"mail2tyler.com",
"mail2u4me.com",
"mail2uae.com",
"mail2uganda.com",
"mail2uk.com",
"mail2ukraine.com",
"mail2uncle.com",
"mail2unsubscribe.com",
"mail2uptown.com",
"mail2uruguay.com",
"mail2usa.com",
"mail2utah.com",
"mail2uzbekistan.com",
"mail2v.com",
"mail2vacation.com",
"mail2valentines.com",
"mail2valerie.com",
"mail2valley.com",
"mail2vamoose.com",
"mail2vanessa.com",
"mail2vanuatu.com",
"mail2venezuela.com",
"mail2venous.com",
"mail2venus.com",
"mail2vermont.com",
"mail2vickie.com",
"mail2victor.com",
"mail2victoria.com",
"mail2vienna.com",
"mail2vietnam.com",
"mail2vince.com",
"mail2virginia.com",
"mail2virgo.com",
"mail2visionary.com",
"mail2vodka.com",
"mail2volleyball.com",
"mail2waiter.com",
"mail2wallstreet.com",
"mail2wally.com",
"mail2walter.com",
"mail2warren.com",
"mail2washington.com",
"mail2wave.com",
"mail2way.com",
"mail2waycool.com",
"mail2wayne.com",
"mail2webmaster.com",
"mail2webtop.com",
"mail2webtv.com",
"mail2weird.com",
"mail2wendell.com",
"mail2wendy.com",
"mail2westend.com",
"mail2westvirginia.com",
"mail2whether.com",
"mail2whip.com",
"mail2white.com",
"mail2whitehouse.com",
"mail2whitney.com",
"mail2why.com",
"mail2wilbur.com",
"mail2wild.com",
"mail2willard.com",
"mail2willie.com",
"mail2wine.com",
"mail2winner.com",
"mail2wired.com",
"mail2wisconsin.com",
"mail2woman.com",
"mail2wonder.com",
"mail2world.com",
"mail2worship.com",
"mail2wow.com",
"mail2www.com",
"mail2wyoming.com",
"mail2xfiles.com",
"mail2xox.com",
"mail2yachtclub.com",
"mail2yahalla.com",
"mail2yemen.com",
"mail2yes.com",
"mail2yugoslavia.com",
"mail2zack.com",
"mail2zambia.com",
"mail2zenith.com",
"mail2zephir.com",
"mail2zeus.com",
"mail2zipper.com",
"mail2zoo.com",
"mail2zoologist.com",
"mail2zurich.com",
"mail3000.com",
"mail4u.info",
"mailandftp.com",
"mailandnews.com",
"mailas.com",
"mailasia.com",
"mailbolt.com",
"mailbomb.net",
"mailboom.com",
"mailbox.as",
"mailbox.co.za",
"mailbox.gr",
"mailbox.hu",
"mailbox.r2.dns-cloud.net",
"mailbr.com.br",
"mailc.net",
"mailcan.com",
"mailcc.com",
"mailchoose.co",
"mailcity.com",
"mailclub.fr",
"mailclub.net",
"mailexcite.com",
"mailforce.net",
"mailftp.com",
"mailgate.gr",
"mailgenie.net",
"mailhaven.com",
"mailhood.com",
"mailinatorzz.mooo.com",
"mailinblack.com",
"mailingaddress.org",
"mailingweb.com",
"mailisent.com",
"mailite.com",
"mailme.dk",
"mailmight.com",
"mailmij.nl",
"mailnew.com",
"mailops.com",
"mailoye.com",
"mailpanda.com",
"mailpokemon.com",
"mailpost.zzn.com",
"mailpride.com",
"mailpuppy.com",
"mailroom.com",
"mailru.com",
"mailsent.net",
"mailservice.ms",
"mailshuttle.com",
"mailstart.com",
"mailstartplus.com",
"mailsurf.com",
"mailtag.com",
"mailto.de",
"mailueberfall.de",
"mailup.net",
"mailwire.com",
"mailworks.org",
"mailz.info.tm",
"maktoob.com",
"malayalamtelevision.net",
"mall.tko.co.kr",
"maltesemail.com",
"manager.de",
"mancity.net",
"mantrafreenet.com",
"mantramail.com",
"mantraonline.com",
"marchmail.com",
"mariah-carey.ml.org",
"mariahc.com",
"marijuana.com",
"marijuana.nl",
"marketing.lu",
"married-not.com",
"marsattack.com",
"martindalemail.com",
"mash4077.com",
"masrawy.com",
"matmail.com",
"mauimail.com",
"mauritius.com",
"maxleft.com",
"maxmail.co.uk",
"mbdnsmail.mooo.com",
"mbox.com.au",
"me-mail.hu",
"me.com",
"medical.net.au",
"medmail.com",
"medscape.com",
"meetingmall.com",
"megago.com",
"megamail.pt",
"megapoint.com",
"mehrani.com",
"mehtaweb.com",
"meine-dateien.info",
"meine-diashow.de",
"meine-fotos.info",
"meine-urlaubsfotos.de",
"mekhong.com",
"melodymail.com",
"meloo.com",
"members.student.com",
"memeware.net",
"mendoan.uu.gl",
"merda.flu.cc",
"merda.igg.biz",
"merda.nut.cc",
"merda.usa.cc",
"message.hu",
"messages.to",
"metacrawler.com",
"metalfan.com",
"metaping.com",
"metta.lk",
"mexicomail.com",
"mhdsl.ddns.net",
"miatadriver.com",
"miesto.sk",
"mighty.co.za",
"miho-nakayama.com",
"mikrotamanet.com",
"millionaireintraining.com",
"millionairemail.com",
"milmail.com",
"milmail.com15",
"mindless.com",
"mindspring.com",
"mini-mail.com",
"minister.com",
"misery.net",
"mittalweb.com",
"mixmail.com",
"mjfrogmail.com",
"ml1.net",
"mm.st",
"mns.ru",
"mobi.web.id",
"mobilbatam.com",
"mochamail.com",
"mohammed.com",
"moldova.cc",
"moldova.com",
"moldovacc.com",
"momslife.com",
"monemail.com",
"money.net",
"montevideo.com.uy",
"moonman.com",
"moose-mail.com",
"mor19.uu.gl",
"mortaza.com",
"mosaicfx.com",
"moscowmail.com",
"most-wanted.com",
"mostlysunny.com",
"motormania.com",
"movemail.com",
"movieluver.com",
"mp4.it",
"mr-potatohead.com",
"mrpost.com",
"mscold.com",
"msgbox.com",
"msn.cn",
"msn.com",
"msn.com.au",
"msn.nl",
"mttestdriver.com",
"multiplechoices",
"mundomail.net",
"munich.com",
"music.com",
"music.com19",
"musician.org",
"musicscene.org",
"muskelshirt.de",
"muslim.com",
"muslimemail.com",
"muslimsonline.com",
"mutantweb.com",
"mxp.dns-cloud.net",
"mxp.dnsabr.com",
"my.com",
"my.safe-mail.gq",
"mybox.it",
"mycabin.com",
"mycampus.com",
"mycity.com",
"mycool.com",
"mydomain.com",
"mydotcomaddress.com",
"myfamily.com",
"myfastmail.com",
"mygo.com",
"myiris.com",
"mymacmail.com",
"mynamedot.com",
"mynet.com",
"mynetaddress.com",
"myownemail.com",
"myownfriends.com",
"mypad.com",
"mypersonalemail.com",
"myplace.com",
"myrambler.ru",
"myrealbox.com",
"myremarq.com",
"myself.com",
"mystupidjob.com",
"mythirdage.com",
"myway.com",
"myworldmail.com",
"n2.com",
"n2baseball.com",
"n2business.com",
"n2mail.com",
"n2soccer.com",
"n2software.com",
"nabc.biz",
"nafe.com",
"nagpal.net",
"nakedgreens.com",
"name.com",
"nameplanet.com",
"nan.us.to",
"nandomail.com",
"naplesnews.net",
"naseej.com",
"nativestar.net",
"nativeweb.net",
"naui.net",
"naver.com",
"navigator.lv",
"navy.org",
"naz.com",
"nchoicemail.com",
"neeva.net",
"nemra1.com",
"nenter.com",
"neo.rr.com",
"nervhq.org",
"net-c.be",
"net-c.ca",
"net-c.cat",
"net-c.com",
"net-c.es",
"net-c.fr",
"net-c.it",
"net-c.lu",
"net-c.nl",
"net-c.pl",
"net-pager.net",
"net-shopping.com",
"net4b.pt",
"net4you.at",
"netbounce.com",
"netbroadcaster.com",
"netby.dk",
"netc.eu",
"netc.fr",
"netc.it",
"netc.lu",
"netc.pl",
"netcenter-vn.net",
"netcmail.com",
"netcourrier.com",
"netexecutive.com",
"netexpressway.com",
"netgenie.com",
"netian.com",
"netizen.com.ar",
"netlane.com",
"netlimit.com",
"netmanor.com",
"netmongol.com",
"netnet.com.sg",
"netnoir.net",
"netpiper.com",
"netposta.net",
"netradiomail.com",
"netralink.com",
"netscape.net",
"netscapeonline.co.uk",
"netspace.net.au",
"netspeedway.com",
"netsquare.com",
"netster.com",
"nettaxi.com",
"nettemail.com",
"netterchef.de",
"netzero.com",
"netzero.net",
"neue-dateien.de",
"neuro.md",
"newmail.com",
"newmail.net",
"newmail.ru",
"newsboysmail.com",
"newyork.com",
"nextmail.ru",
"nexxmail.com",
"nfmail.com",
"nhmail.com",
"nicebush.com",
"nicegal.com",
"nicholastse.net",
"nicolastse.com",
"nightmail.com",
"nikopage.com",
"nimail.com",
"ninfan.com",
"nirvanafan.com",
"noavar.com",
"nonpartisan.com",
"norika-fujiwara.com",
"norikomail.com",
"northgates.net",
"noways.ddns.net",
"nqav95zj0p.kro.kr",
"nqrk.luk2.com",
"ntscan.com",
"null.net",
"nullbox.info",
"nuo.co.kr",
"nur-fuer-spam.de",
"nus.edu.sg",
"nxt.ru",
"ny.com",
"nybella.com",
"nyc.com",
"nycmail.com",
"nzoomail.com",
"o-tay.com",
"o.spamtrap.ro",
"o2.co.uk",
"o2.pl",
"oaklandas-fan.com",
"oath.com",
"oceanfree.net",
"oddpost.com",
"odmail.com",
"office-dateien.de",
"office-email.com",
"officedomain.com",
"offroadwarrior.com",
"oicexchange.com",
"oikrach.com",
"okbank.com",
"okhuman.com",
"okmad.com",
"okmagic.com",
"okname.net",
"okuk.com",
"oldies1041.com",
"oldies104mail.com",
"ole.com",
"olemail.com",
"olympist.net",
"omaninfo.com",
"omeaaa124.ddns.net",
"omen.ru",
"onebox.com",
"onenet.com.ar",
"onet.com.pl",
"onet.eu",
"onet.pl",
"oninet.pt",
"online.de",
"online.ie",
"online.nl",
"onlinewiz.com",
"onmilwaukee.com",
"onobox.com",
"onvillage.com",
"op.pl",
"openmailbox.org",
"operafan.com",
"operamail.com",
"opoczta.pl",
"optician.com",
"optonline.net",
"optusnet.com.au",
"orange.fr",
"orbitel.bg",
"orgmail.net",
"orthodontist.net",
"osite.com.br",
"oso.com",
"otakumail.com",
"our-computer.com",
"our-office.com",
"our.st",
"ourbrisbane.com",
"ournet.md",
"outel.com",
"outgun.com",
"outlook.at",
"outlook.be",
"outlook.cl",
"outlook.co.id",
"outlook.co.il",
"outlook.co.nz",
"outlook.co.th",
"outlook.com",
"outlook.com.au",
"outlook.com.br",
"outlook.com.gr",
"outlook.com.pe",
"outlook.com.tr",
"outlook.com.vn",
"outlook.cz",
"outlook.de",
"outlook.dk",
"outlook.es",
"outlook.fr",
"outlook.hu",
"outlook.ie",
"outlook.in",
"outlook.it",
"outlook.jp",
"outlook.kr",
"outlook.lv",
"outlook.my",
"outlook.nl",
"outlook.ph",
"outlook.pt",
"outlook.sa",
"outlook.sg",
"outlook.sk",
"over-the-rainbow.com",
"ownmail.net",
"ozbytes.net.au",
"ozemail.com.au",
"pacbell.net",
"pacific-ocean.com",
"pacific-re.com",
"pacificwest.com",
"packersfan.com",
"pagina.de",
"pagons.org",
"pakistanmail.com",
"pakistanoye.com",
"palestinemail.com",
"parkjiyoon.com",
"parrot.com",
"parsmail.com",
"partlycloudy.com",
"partybombe.de",
"partyheld.de",
"partynight.at",
"parvazi.com",
"password.nafko.cf",
"passwordmail.com",
"pathfindermail.com",
"pconnections.net",
"pcpostal.com",
"pcsrock.com",
"pcusers.otherinbox.com",
"peachworld.com",
"pediatrician.com",
"pemail.net",
"penpen.com",
"peoplepc.com",
"peopleweb.com",
"perfectmail.com",
"perso.be",
"personal.ro",
"personales.com",
"petlover.com",
"petml.com",
"pettypool.com",
"pezeshkpour.com",
"phayze.com",
"phone.net",
"photographer.net",
"phpbb.uu.gl",
"phreaker.net",
"phus8kajuspa.cu.cc",
"physicist.net",
"pianomail.com",
"pickupman.com",
"picusnet.com",
"pigpig.net",
"pinoymail.com",
"piracha.net",
"pisem.net",
"pkwt.luk2.com",
"planet-mail.com",
"planet.nl",
"planetaccess.com",
"planetall.com",
"planetarymotion.net",
"planetdirect.com",
"planetearthinter.net",
"planetmail.com",
"planetmail.net",
"planetout.com",
"plasa.com",
"playersodds.com",
"playful.com",
"plus.com",
"plusmail.com.br",
"pmail.net",
"pobox.hu",
"pobox.sk",
"pochta.ru",
"poczta.fm",
"poczta.onet.pl",
"poetic.com",
"pokemonpost.com",
"pokepost.com",
"polandmail.com",
"polbox.com",
"policeoffice.com",
"politician.com",
"polizisten-duzer.de",
"pool-sharks.com",
"poond.com",
"popaccount.com",
"popmail.com",
"popsmail.com",
"popstar.com",
"populus.net",
"portableoffice.com",
"portugalmail.com",
"portugalmail.pt",
"portugalnet.com",
"positive-thinking.com",
"post.com",
"post.cz",
"post.sk",
"posta.net",
"posta.ro",
"posta.rosativa.ro.org",
"postaccesslite.com",
"postafree.com",
"postaweb.com",
"postfach.cc",
"postinbox.com",
"postino.ch",
"postmark.net",
"postmaster.co.uk",
"postpro.net",
"pousa.com",
"powerfan.com",
"praize.com",
"pray247.com",
"premiumservice.com",
"presidency.com",
"press.co.jp",
"pride.nafko.cf",
"priest.com",
"primposta.com",
"primposta.hu",
"pro.cloudns.asia",
"pro.hu",
"probemail.com",
"prodigy.net",
"progetplus.it",
"programist.ru",
"programmer.net",
"programozo.hu",
"proinbox.com",
"project2k.com",
"prolaunch.com",
"promessage.com",
"prontomail.com",
"prontomail.compopulus.net",
"protestant.com",
"protonmail.com",
"prydirect.info",
"psv-supporter.com",
"ptd.net",
"public-files.de",
"public.usa.com",
"publicist.com",
"pulp-fiction.com",
"purpleturtle.com",
"pw.r4.dns-cloud.net",
"q.com",
"qatarmail.com",
"qip.ru",
"qmail.com",
"qprfans.com",
"qq.cn",
"qq.com",
"qrio.com",
"qt1.ddns.net",
"quackquack.com",
"quakemail.com",
"qualityservice.com",
"quantentunnel.de",
"qudsmail.com",
"quepasa.com",
"quickhosts.com",
"quicknet.nl",
"quickwebmail.com",
"quiklinks.com",
"quikmail.com",
"qv7.info",
"qwest.net",
"qwestoffice.net",
"r-o-o-t.com",
"r4.dns-cloud.net",
"raakim.com",
"racedriver.com",
"racefanz.com",
"racingfan.com.au",
"racingmail.com",
"radicalz.com",
"radiku.ye.vc",
"radiologist.net",
"ragingbull.com",
"ralib.com",
"rambler.ru",
"ramjane.mooo.com",
"ranmamail.com",
"raspberrypi123.ddns.net",
"rastogi.net",
"ratt-n-roll.com",
"rattle-snake.com",
"raubtierbaendiger.de",
"ravearena.com",
"ravemail.com",
"razormail.com",
"rccgmail.org",
"realemail.net",
"reallyfast.biz",
"reallyfast.info",
"realradiomail.com",
"realtyagent.com",
"reborn.com",
"receiveee.chickenkiller.com",
"recycler.com",
"recyclermail.com",
"rediff.com",
"rediffmail.com",
"rediffmailpro.com",
"rednecks.com",
"redseven.de",
"redsfans.com",
"reggaefan.com",
"reggafan.com",
"regiononline.com",
"registerednurses.com",
"reincarnate.com",
"religious.com",
"renren.com",
"repairman.com",
"reply.hu",
"representative.com",
"rescueteam.com",
"resumemail.com",
"rezai.com",
"richmondhill.com",
"rickymail.com",
"rin.ru",
"riopreto.com.br",
"rk9.chickenkiller.com",
"rn.com",
"ro.ru",
"roadrunner.com",
"roanokemail.com",
"rock.com",
"rocketmail.com",
"rocketship.com",
"rockfan.com",
"rodrun.com",
"rogers.com",
"rome.com",
"romymichele.com",
"roosh.com",
"rootprompt.org",
"rotfl.com",
"roughnet.com",
"rr.com",
"rrohio.com",
"rsub.com",
"rubyridge.com",
"runbox.com",
"rushpost.com",
"ruttolibero.com",
"rvshop.com",
"rxdoc.biz",
"ryo.shp7.cn",
"s-mail.com",
"s6.weprof.it",
"sabreshockey.com",
"sacbeemail.com",
"saeuferleber.de",
"safarimail.com",
"safrica.com",
"sagra.lu",
"sagra.lumarketing.lu",
"sags-per-mail.de",
"sailormoon.com",
"saintly.com",
"saintmail.net",
"sale-sale-sale.com",
"salehi.net",
"salesperson.net",
"samerica.com",
"samilan.net",
"sammimail.com",
"sanfranmail.com",
"sanook.com",
"sapo.pt",
"sativa.ro.org",
"saudia.com",
"sayhi.net",
"sbcglobal.net",
"scandalmail.com",
"scarlet.nl",
"scbox.one.pl",
"schizo.com",
"schmusemail.de",
"schoolemail.com",
"schoolmail.com",
"schoolsucks.com",
"schreib-doch-mal-wieder.de",
"schweiz.org",
"sci.fi",
"science.com.au",
"scientist.com",
"scifianime.com",
"scotland.com",
"scotlandmail.com",
"scottishmail.co.uk",
"scubadiving.com",
"seanet.com",
"search.ua",
"searchwales.com",
"sebil.com",
"seckinmail.com",
"secret-police.com",
"secretary.net",
"secretservices.net",
"seductive.com",
"seekstoyboy.com",
"seguros.com.br",
"send.hu",
"sendme.cz",
"sent.as",
"sent.at",
"sent.com",
"sentrismail.com",
"serga.com.ar",
"servemymail.com",
"sesmail.com",
"sex.dns-cloud.net",
"sexmagnet.com",
"seznam.cz",
"sgbteam.hostingarif.me",
"shahweb.net",
"shaniastuff.com",
"shared-files.de",
"sharewaredevelopers.com",
"sharmaweb.com",
"she.com",
"shinedyoureyes.com",
"shitaway.cu.cc",
"shitaway.usa.cc",
"shockinmytown.cu.cc",
"shootmail.com",
"shortmail.com",
"shotgun.hu",
"shuf.com",
"sialkotcity.com",
"sialkotian.com",
"sialkotoye.com",
"sify.com",
"silkroad.net",
"sina.cn",
"sina.com",
"sinamail.com",
"singapore.com",
"singles4jesus.com",
"singmail.com",
"singnet.com.sg",
"singpost.com",
"skafan.com",
"skim.com",
"skizo.hu",
"sky.com",
"slamdunkfan.com",
"slingshot.com",
"slo.net",
"slotter.com",
"sm.westchestergov.com",
"smapxsmap.net",
"smileyface.comsmithemail.net",
"smoothmail.com",
"sms.at",
"snail-mail.net",
"snail-mail.ney",
"snakebite.com",
"sndt.net",
"snet.net",
"sniper.hu",
"snoopymail.com",
"snowboarding.com",
"snowdonia.net",
"socamail.com",
"socceramerica.net",
"soccermail.com",
"soccermomz.com",
"socialworker.net",
"sociologist.com",
"sofortmail.de",
"softhome.net",
"sogou.com",
"sohu.com",
"sol.dk",
"solcon.nl",
"soldier.hu",
"solution4u.com",
"songwriter.net",
"sonnenkinder.org",
"soon.com",
"soulfoodcookbook.com",
"sp.nl",
"space-bank.com",
"space-man.com",
"space-ship.com",
"space-travel.com",
"space.com",
"spaceart.com",
"spacebank.com",
"spacemart.com",
"spacetowns.com",
"spacewar.com",
"spainmail.com",
"spam.fassagforpresident.ga",
"spameater.com",
"spartapiet.com",
"spazmail.com",
"speedemail.net",
"speedpost.net",
"speedrules.com",
"speedrulz.com",
"speedymail.org",
"sperke.net",
"spils.com",
"spinfinder.com",
"spl.at",
"spoko.pl",
"sportemail.com",
"sportsmail.com",
"sporttruckdriver.com",
"spray.no",
"spray.se",
"spymac.com",
"srilankan.net",
"ssl-mail.com",
"st-davids.net",
"stade.fr",
"stalag13.com",
"stargateradio.com",
"starmail.com",
"starmail.org",
"starmedia.com",
"starplace.com",
"starspath.com",
"start.com.au",
"starting-point.com",
"startrekmail.com",
"stealthmail.com",
"stipte.nl",
"stockracer.com",
"stoned.com",
"stones.com",
"stop-my-spam.pp.ua",
"stopdropandroll.com",
"storksite.com",
"streber24.de",
"stribmail.com",
"strompost.com",
"strongguy.com",
"student.su",
"studentcenter.org",
"subnetwork.com",
"subram.com",
"sudanmail.net",
"suhabi.com",
"suisse.org",
"sukhumvit.net",
"sunpoint.net",
"sunrise-sunset.com",
"sunsgame.com",
"sunumail.sn",
"superdada.com",
"supereva.it",
"supermail.ru",
"surat.com",
"surf3.net",
"surfree.com",
"surfy.net",
"surgical.net",
"surimail.com",
"survivormail.com",
"swbell.net",
"sweb.cz",
"swedenmail.com",
"sweetville.net",
"swift-mail.com",
"swiftdesk.com",
"swingeasyhithard.com",
"swingfan.com",
"swipermail.zzn.com",
"swirve.com",
"swissinfo.org",
"swissmail.com",
"swissmail.net",
"switchboardmail.com",
"switzerland.org",
"sx172.com",
"syom.com",
"syriamail.com",
"t-online.de",
"t2mail.com",
"takuyakimura.com",
"talk21.com",
"talkcity.com",
"tamil.com",
"tampabay.rr.com",
"tankpolice.com",
"tatanova.com",
"taufik.sytes.net",
"taufikrt.ddns.net",
"tbwt.com",
"tds.net",
"teachermail.net",
"teachers.org",
"teamdiscovery.com",
"teamtulsa.net",
"tech-center.com",
"tech4peace.org",
"techemail.com",
"techie.com",
"techindo.web.id",
"technisamail.co.za",
"technologist.com",
"techpointer.com",
"techscout.com",
"techseek.com",
"techspot.com",
"teenagedirtbag.com",
"tele2.at",
"tele2.nl",
"telebot.com",
"telebot.net",
"teleline.es",
"telerymd.com",
"teleserve.dynip.com",
"telfort.nl",
"telfortglasvezel.nl",
"telinco.net",
"telkom.net",
"telpage.net",
"telstra.com",
"telstra.com.au",
"temp.cloudns.asia",
"tempmail.sytes.net",
"temtulsa.net",
"tenchiclub.com",
"tenderkiss.com",
"tennismail.com",
"terminverpennt.de",
"terra.cl",
"terra.com",
"terra.com.ar",
"terra.com.br",
"terra.es",
"test.com",
"test.de",
"tfanus.com.er",
"tfz.net",
"thai.com",
"thaimail.com",
"thaimail.net",
"the-african.com",
"the-airforce.com",
"the-aliens.com",
"the-american.com",
"the-animal.com",
"the-army.com",
"the-astronaut.com",
"the-beauty.com",
"the-big-apple.com",
"the-biker.com",
"the-boss.com",
"the-brazilian.com",
"the-canadian.com",
"the-canuck.com",
"the-captain.com",
"the-chinese.com",
"the-country.com",
"the-cowboy.com",
"the-davis-home.com",
"the-dutchman.com",
"the-eagles.com",
"the-englishman.com",
"the-fastest.net",
"the-fool.com",
"the-frenchman.com",
"the-galaxy.net",
"the-genius.com",
"the-gentleman.com",
"the-german.com",
"the-gremlin.com",
"the-hooligan.com",
"the-italian.com",
"the-japanese.com",
"the-lair.com",
"the-madman.com",
"the-mailinglist.com",
"the-marine.com",
"the-master.com",
"the-mexican.com",
"the-ministry.com",
"the-monkey.com",
"the-newsletter.net",
"the-pentagon.com",
"the-police.com",
"the-prayer.com",
"the-professional.com",
"the-quickest.com",
"the-russian.com",
"the-snake.com",
"the-spaceman.com",
"the-stock-market.com",
"the-student.net",
"the-whitehouse.net",
"the-wild-west.com",
"the18th.com",
"thecoolguy.com",
"thecriminals.com",
"thedarkmaster097.sytes.net",
"thedoghousemail.com",
"thedorm.com",
"theend.hu",
"theglobe.com",
"thegolfcourse.com",
"thegooner.com",
"theheadoffice.com",
"theinternetemail.com",
"thelanddownunder.com",
"themail.com",
"themillionare.net",
"theoffice.net",
"theplate.com",
"thepokerface.com",
"thepostmaster.net",
"theraces.com",
"theracetrack.com",
"therapist.net",
"thestreetfighter.com",
"theteebox.com",
"thewatercooler.com",
"thewebpros.co.uk",
"thewizzard.com",
"thewizzkid.com",
"thezhangs.net",
"thirdage.com",
"thisgirl.com",
"thoic.com",
"thundermail.com",
"tidni.com",
"timein.net",
"tiscali.at",
"tiscali.be",
"tiscali.co.uk",
"tiscali.it",
"tiscali.lu",
"tiscali.se",
"tkcity.com",
"tko.co.kr",
"toast.com",
"toke.com",
"tom.com",
"toolsource.com",
"toothfairy.com",
"topchat.com",
"topgamers.co.uk",
"topletter.com",
"topmail-files.de",
"topmail.com.ar",
"topsurf.com",
"topteam.bg",
"torchmail.com",
"torontomail.com",
"tortenboxer.de",
"totalmail.de",
"totalmusic.net",
"toughguy.net",
"tpg.com.au",
"travel.li",
"trialbytrivia.com",
"trimix.cn",
"tritium.net",
"trmailbox.com",
"tropicalstorm.com",
"truckers.com",
"truckerz.com",
"truckracer.com",
"truckracers.com",
"trust-me.com",
"truth247.com",
"truthmail.com",
"tsamail.co.za",
"ttml.co.in",
"tunisiamail.com",
"turboprinz.de",
"turboprinzessin.de",
"turkey.com",
"tut.by",
"tvstar.com",
"twc.com",
"twinstarsmail.com",
"tycoonmail.com",
"typemail.com",
"u2club.com",
"ua.fm",
"uae.ac",
"uaemail.com",
"ubbi.com",
"ubbi.com.br",
"uboot.com",
"uk2.net",
"uk2k.com",
"uk2net.com",
"uk7.net",
"uk8.net",
"ukbuilder.com",
"ukcool.com",
"ukdreamcast.com",
"ukmail.org",
"ukmax.com",
"ukr.net",
"uku.co.uk",
"ultapulta.com",
"ultrapostman.com",
"ummah.org",
"umpire.com",
"unbounded.com",
"unforgettable.com",
"uni.de",
"uni.demailto.de",
"unican.es",
"unihome.com",
"unite.cloudns.asia",
"universal.pt",
"uno.ee",
"uno.it",
"unofree.it",
"unomail.com",
"unterderbruecke.de",
"uol.com.ar",
"uol.com.br",
"uol.com.co",
"uol.com.mx",
"uol.com.ve",
"uole.com",
"uole.com.ve",
"uolmail.com",
"uomail.com",
"upc.nl",
"upcmail.nl",
"upf.org",
"ureach.com",
"urgentmail.biz",
"usa.com",
"usa.net",
"usaaccess.net",
"usanetmail.com",
"usermail.com",
"username.e4ward.com",
"usma.net",
"usmc.net",
"uswestmail.net",
"utanet.at",
"uymail.com",
"uyuyuy.com",
"v-sexi.com",
"vahoo.com",
"vampirehunter.com",
"varbizmail.com",
"vcmail.com",
"velnet.co.uk",
"velocall.com",
"verizon.net",
"verizonmail.com",
"verlass-mich-nicht.de",
"versatel.nl",
"veryfast.biz",
"veryspeedy.net",
"videotron.ca",
"vinbazar.com",
"violinmakers.co.uk",
"vip.126.com",
"vip.163.com",
"vip.21cn.com",
"vip.citiz.net",
"vip.gr",
"vip.onet.pl",
"vip.qq.com",
"vip.sina.com",
"vipmail.ru",
"virgilio.it",
"virgin.net",
"virginbroadband.com.au",
"virtual-mail.com",
"virtualactive.com",
"virtualmail.com",
"visitmail.com",
"visitweb.com",
"visto.com",
"visualcities.com",
"vivavelocity.com",
"vivianhsu.net",
"vjmail.com",
"vjr.luk2.com",
"vjtimail.com",
"vlmail.com",
"vnet.citiz.net",
"vnn.vn",
"vodafone.nl",
"vodafonethuis.nl",
"volcanomail.com",
"vollbio.de",
"volloeko.de",
"vorscorp.mooo.com",
"vorsicht-bissig.de",
"vorsicht-scharf.de",
"vote-democrats.com",
"vote-hillary.com",
"vote-republicans.com",
"vote4gop.org",
"votenet.com",
"vp.pl",
"vr9.com",
"w3.to",
"wahoye.com",
"wales2000.net",
"walla.co.il",
"walla.com",
"wam.co.za",
"wanadoo.es",
"wanadoo.fr",
"war-im-urlaub.de",
"warmmail.com",
"warpmail.net",
"warrior.hu",
"wasd.dropmail.me",
"waumail.com",
"wbdet.com",
"wearab.net",
"web-mail.com.ar",
"web-police.com",
"web.de",
"webave.com",
"webcammail.com",
"webcity.ca",
"webdream.com",
"webinbox.com",
"webindia123.com",
"webjump.com",
"webmail.bellsouth.net",
"webmail.co.yu",
"webmail.co.za",
"webmail.hu",
"webmails.com",
"webname.com",
"webprogramming.com",
"webstation.com",
"websurfer.co.za",
"webtopmail.com",
"weedmail.com",
"weekmail.com",
"weekonline.com",
"wegas.ru",
"wehshee.com",
"weibsvolk.de",
"weibsvolk.org",
"weinenvorglueck.de",
"welsh-lady.com",
"westnet.com.au",
"whale-mail.com",
"whartontx.com",
"wheelweb.com",
"whipmail.com",
"whoever.com",
"whoopymail.com",
"whtjddn.33mail.com",
"wickedmail.com",
"wideopenwest.com",
"wildmail.com",
"will-hier-weg.de",
"windowslive.com",
"windrivers.net",
"windstream.net",
"wingnutz.com",
"winmail.com.au",
"winning.com",
"wir-haben-nachwuchs.de",
"wir-sind-cool.org",
"wirsindcool.de",
"witty.com",
"wiz.cc",
"wkbwmail.com",
"wo.com.cn",
"woh.rr.com",
"wolf-web.com",
"wolke7.net",
"wombles.com",
"women-at-work.org",
"wonder-net.com",
"wongfaye.com",
"wooow.it",
"worker.com",
"workmail.com",
"worldemail.com",
"worldmailer.com",
"worldnet.att.net",
"wormseo.cn",
"wosaddict.com",
"wouldilie.com",
"wovz.cu.cc",
"wowgirl.com",
"wowmail.com",
"wowway.com",
"wp.pl",
"wptamail.com",
"wrestlingpages.com",
"wrexham.net",
"writeme.com",
"writemeback.com",
"wrongmail.com",
"wsfr.luk2.com",
"wtvhmail.com",
"wwdg.com",
"www.com",
"www.dmtc.edu.pl",
"www.e4ward.com",
"www2000.net",
"wx88.net",
"wxs.net",
"wyrm.supernews.com",
"x-mail.net",
"x-networks.net",
"x5g.com",
"xaker.ru",
"xaynetsss.ddns.net",
"xing886.uu.gl",
"xmail.com",
"xmastime.com",
"xms.nl",
"xmsg.com",
"xnmail.mooo.com",
"xoom.com",
"xoommail.com",
"xpressmail.zzn.com",
"xs4all.nl",
"xsecurity.org",
"xsmail.com",
"xtra.co.nz",
"xuno.com",
"xxx.sytes.net",
"xzapmail.com",
"y7mail.com",
"ya.ru",
"yada-yada.com",
"yaho.com",
"yahoo.ae",
"yahoo.ar",
"yahoo.at",
"yahoo.be",
"yahoo.ca",
"yahoo.ch",
"yahoo.cl",
"yahoo.cn",
"yahoo.co",
"yahoo.co.id",
"yahoo.co.il",
"yahoo.co.in",
"yahoo.co.jp",
"yahoo.co.kr",
"yahoo.co.nz",
"yahoo.co.th",
"yahoo.co.uk",
"yahoo.co.za",
"yahoo.com",
"yahoo.com.ar",
"yahoo.com.au",
"yahoo.com.br",
"yahoo.com.cn",
"yahoo.com.co",
"yahoo.com.hk",
"yahoo.com.is",
"yahoo.com.mx",
"yahoo.com.my",
"yahoo.com.ph",
"yahoo.com.ru",
"yahoo.com.sg",
"yahoo.com.tr",
"yahoo.com.tw",
"yahoo.com.vn",
"yahoo.cz",
"yahoo.de",
"yahoo.dk",
"yahoo.es",
"yahoo.fi",
"yahoo.fr",
"yahoo.gr",
"yahoo.hu",
"yahoo.id",
"yahoo.ie",
"yahoo.in",
"yahoo.it",
"yahoo.jp",
"yahoo.mx",
"yahoo.my",
"yahoo.nl",
"yahoo.no",
"yahoo.ph",
"yahoo.pl",
"yahoo.pt",
"yahoo.ro",
"yahoo.ru",
"yahoo.se",
"yahoo.uk",
"yahoo.vn",
"yahoofs.com",
"yalla.com",
"yalla.com.lb",
"yalook.com",
"yam.com",
"yandex.com",
"yandex.mail",
"yandex.pl",
"yandex.ru",
"yandex.ua",
"yapost.com",
"yawmail.com",
"yclub.com",
"yeah.net",
"yebox.com",
"yehaa.com",
"yehey.com",
"yemenmail.com",
"yepmail.net",
"yert.ye.vc",
"yesbox.net",
"yesey.net",
"yifan.net",
"ymail.com",
"ynnmail.com",
"yogotemail.com",
"yopmail.biz.st",
"yopmail.xxi2.com",
"yopolis.com",
"yopweb.com",
"youareadork.com",
"youmailr.com",
"youpy.com",
"your-house.com",
"your-mail.com",
"yourinbox.com",
"yourlifesucks.cu.cc",
"yourlover.net",
"yourname.ddns.org",
"yourname.freeservers.com",
"yournightmare.com",
"yours.com",
"yourssincerely.com",
"yoursubdomain.findhere.com",
"yoursubdomain.zzn.com",
"yourteacher.net",
"yourwap.com",
"youvegotmail.net",
"yuuhuu.net",
"yyhmail.com",
"zahadum.com",
"zcities.com",
"zdnetmail.com",
"zeeks.com",
"zeepost.nl",
"zensearch.net",
"zhaowei.net",
"ziggo.nl",
"zionweb.org",
"zip.net",
"zipido.com",
"ziplip.com",
"zipmail.com",
"zipmail.com.br",
"zipmax.com",
"zmail.ru",
"zoho.com",
"zonnet.nl",
"zoominternet.net",
"zubee.com",
"zuzzurello.com",
"zwallet.com",
"zweb.in",
"zybermail.com",
"zydecofan.com",
"zzn.com",
"zzom.co.uk",
"zzom.co.uk1033edge.com",
"zzom.co.ukgmail.com",
"zzom.co.ukhitmail.com",
"zzom.co.uksina.com0-mail.com",
)
)
|
import pytest
import pandas._testing as tm
class TestDataFrameTake:
def test_take(self, float_frame):
# homogeneous
order = [3, 1, 2, 0]
for df in [float_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
tm.assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ["D", "B", "C", "A"]]
tm.assert_frame_equal(result, expected, check_names=False)
# negative indices
order = [2, 1, -1]
for df in [float_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
tm.assert_frame_equal(result, expected)
result = df.take(order, axis=0)
tm.assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ["C", "B", "D"]]
tm.assert_frame_equal(result, expected, check_names=False)
# illegal indices
msg = "indices are out-of-bounds"
with pytest.raises(IndexError, match=msg):
df.take([3, 1, 2, 30], axis=0)
with pytest.raises(IndexError, match=msg):
df.take([3, 1, 2, -31], axis=0)
with pytest.raises(IndexError, match=msg):
df.take([3, 1, 2, 5], axis=1)
with pytest.raises(IndexError, match=msg):
df.take([3, 1, 2, -5], axis=1)
def test_take_mixed_type(self, float_string_frame):
# mixed-dtype
order = [4, 1, 2, 0, 3]
for df in [float_string_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
tm.assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ["foo", "B", "C", "A", "D"]]
tm.assert_frame_equal(result, expected)
# negative indices
order = [4, 1, -2]
for df in [float_string_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
tm.assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ["foo", "B", "D"]]
tm.assert_frame_equal(result, expected)
def test_take_mixed_numeric(self, mixed_float_frame, mixed_int_frame):
# by dtype
order = [1, 2, 0, 3]
for df in [mixed_float_frame, mixed_int_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
tm.assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ["B", "C", "A", "D"]]
tm.assert_frame_equal(result, expected)
|
import requests
baseURL = 'https://storytelling.blackrock.com/insights/api/stories/'
def getAllPages(cnt):
pages = []
for i in range(1, cnt+1):
params = {'page': i}
resp = requests.get(baseURL, params=params)
pages.append(resp.json())
return pages
def getAllStories(pages):
return [story for page in pages for story in page['results']]
if __name__ == "__main__":
pass
|
""" This is the config file for the rig, it contains functions that set
sane defaults for different bits of the rig, at some point this may be
converted so that those defaults can be saved in the database, but that
may not really matter since these measures will be taken elsewhere too
This file will be used by rigIO during init so it is fine to call stuff directly
"""
clx_defaults={
'protocolPath':{
'HILL_RIG':'C:/tom_axon',
},
}
mcc_defaults={
'VC_Holding':-.06,
'VC_HoldingEnable':0,
'VC_PrimarySignal':0,
'VC_PrimarySignalGain':1,
'VC_PrimarySignalLPF':2000,
'IC_Holding':0,
'IC_HoldingEnable':0,
'IC_PrimarySignal':7,
'IC_PrimarySignalGain':1,
'IC_PrimarySignalLPF':2000,
}
esp_defaults={
'Speed':(1,.1),
'FeLim':8,
#TODO 'kp kd, or ki needed to fix felim nonsense'
'MotorOn':0,
}
#TODO move the functions themselves into their own file?
def mcc(ctrl):
mcc_mode_dict={'VC':0,'IC':2}
for channel in range(ctrl.mcNum):
ctrl.selectMC(channel)
for key,value in mcc_defaults.items():
ctrl.SetMode(mcc_mode_dict[key[:2]])
getattr(ctrl,'Set'+key[3:])(value)
def esp(ctrl):
for key,value in esp_defaults.items(): #FIXME some of these things do need state tracking...
if hasattr(value,'__iter__'):
getattr(ctrl,'set'+key)(*value)
else:
getattr(ctrl,'set'+key)(value)
def set_all_defaults(rigio):
#mcc(rigio.ctrlDict['mccControl']) #FIXME WARNING DANGEROUS!!! on a crash this could fry cells!
esp(rigio.ctrlDict['espControl'])
|
from typing import (
Union,
)
from ssz.exceptions import (
DeserializationError,
SerializationError,
)
from ssz.sedes.base import (
BaseCompositeSedes,
)
from ssz.utils import (
merkleize,
pack_bytes,
)
BytesOrByteArray = Union[bytes, bytearray]
class ByteVector(BaseCompositeSedes[BytesOrByteArray, bytes]):
def __init__(self, size: int) -> None:
if size < 0:
raise TypeError("Size cannot be negative")
self.size = size
#
# Size
#
is_fixed_sized = True
def get_fixed_size(self):
return self.size
#
# Serialization
#
def serialize(self, value: BytesOrByteArray) -> bytes:
if len(value) != self.size:
raise SerializationError(
f"Cannot serialize length {len(value)} byte-string as bytes{self.size}"
)
return value
#
# Deserialization
#
def deserialize(self, data: bytes) -> bytes:
if len(data) != self.size:
raise DeserializationError(
f"Cannot deserialize length {len(data)} data as bytes{self.size}"
)
return data
#
# Tree hashing
#
def hash_tree_root(self, value: bytes) -> bytes:
serialized_value = self.serialize(value)
return merkleize(pack_bytes(serialized_value))
bytes4 = ByteVector(4)
bytes32 = ByteVector(32)
bytes48 = ByteVector(48)
bytes96 = ByteVector(96)
|
from essence import World, System, Component, DuplicateComponentError, NoSuchComponentError
import pytest
from fixtures import world
def test_can_add_components_to_entities(world):
entity = world.create_entity()
component = Component()
assert not world.has_component(entity, Component)
world.add_component(entity, component)
assert world.has_component(entity, Component)
assert world.get_component(entity, Component) is component
def test_can_remove_components(world):
entity = world.create_entity()
component = Component()
world.add_component(entity, component)
assert world.has_component(entity, Component)
world.remove_component(entity, Component)
assert not world.has_component(entity, Component)
def test_adding_a_duplicate_component_is_an_error(world):
entity = world.create_entity()
world.add_component(entity, Component())
with pytest.raises(DuplicateComponentError):
world.add_component(entity, Component())
def test_getting_a_non_existent_component_is_an_error(world):
entity = world.create_entity()
with pytest.raises(NoSuchComponentError):
world.get_component(entity, Component)
def test_can_pass_a_default_when_getting_a_component(world):
entity = world.create_entity()
assert world.get_component(entity, Component, missing="foo") == "foo"
|
from __future__ import print_function
# Import smorgasbord
import sys
import os
import pdb
current_module = sys.modules[__name__]
import numpy as np
import scipy.stats
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import astropy.io.fits
import astropy.wcs
import astropy.convolution
import ChrisFuncs
# Function to sum all elements in an ellipse centred on the middle of a given array
# Args: Array, semi-major axis (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse
# Returns: Numpy array containing the sum of the pixel values in the ellipse, total number of pixels counted, and an array containing the pixel values
def EllipseSum(array, rad, axial_ratio, angle, i_centre, j_centre):
# Create slice of input array, containing only the region of interest
i_cutout_min = int(np.floor(max([0, i_centre-rad])))
i_cutout_max = int(np.ceil(min([(array.shape)[0], i_centre+rad])))
j_cutout_min = int(np.floor(max([0, j_centre-rad])))
j_cutout_max = int(np.ceil(min([(array.shape)[1], j_centre+rad])))
array_slice = array[ int(round(i_cutout_min)):int(round(i_cutout_max))+1, int(round(j_cutout_min)):int(round(j_cutout_max))+1 ]
i_centre_slice = i_centre - i_cutout_min
j_centre_slice = j_centre - j_cutout_min
if array[int(i_centre),int(j_centre)]!=array_slice[int(i_centre_slice),int(j_centre_slice)]:
if np.isnan(array[int(i_centre),int(j_centre)]==False) and np.isnan(array_slice[int(i_centre_slice),int(j_centre_slice)]==False):
print('SEVERE ERROR: EllipseSum check failed.')
pdb.set_trace()
else:
array = array_slice
i_centre = i_centre_slice
j_centre = j_centre_slice
# Define semi-major & semi-minor axes, then convert input angle to radians
semi_maj = float(rad)
semi_min = float(rad) / float(axial_ratio)
angle = np.radians(float(angle))
# Create meshgrids with which to access i & j coordinates for ellipse calculations
i_linespace = np.linspace(0, array.shape[0]-1, array.shape[0])
j_linespace = np.linspace(0, array.shape[1]-1, array.shape[1])
i_grid, j_grid = np.meshgrid(i_linespace, j_linespace, indexing='ij')
# Use meshgrids to create array identifying which coordinates lie within ellipse
i_trans = -(j_grid-float(j_centre))*np.sin(angle) + (i_grid-float(i_centre))*np.cos(angle)
j_trans = (j_grid-float(j_centre))*np.cos(angle) + (i_grid-float(i_centre))*np.sin(angle)
ellipse_check = (j_trans**2 / semi_maj**2) + (i_trans**2 / semi_min**2 )
# Calculate flux & pixels in aperture, and store pixel values
ellipse_where = np.where( (ellipse_check<=1) & (np.isnan(array)==False) )
ellipse_tot = sum( array[ ellipse_where ] )
ellipse_count = ellipse_where[0].shape[0]
ellipse_pix = array[ ellipse_where ]
ellipse_nan = np.where( (ellipse_check<=1) & (np.isnan(array)==True) )
# Return results
return [ellipse_tot, ellipse_count, ellipse_pix, ellipse_nan]
# Function to sum all elements in an annulus centred upon the middle of the given array
# Args: Array, semi-major axis of inside edge of annulus (pix), width of annulus (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse
# Returns: Numpy array containing the sum of the pixel values in the annulus, the total number of pixels counted, and an array containing the pixel values
def AnnulusSum(array, rad_inner, width, axial_ratio, angle, i_centre, j_centre):
# Create slice of input array, containing only the region of interest
i_cutout_min = int(np.floor(max([0, i_centre-(rad_inner+width)])))
i_cutout_max = int(np.ceil(min([(array.shape)[0], i_centre+(rad_inner+width)])))
j_cutout_min = int(np.floor(max([0, j_centre-(rad_inner+width)])))
j_cutout_max = int(np.ceil(min([(array.shape)[1], j_centre+(rad_inner+width)])))
array_slice = array[ int(round(i_cutout_min)):int(round(i_cutout_max))+1, int(round(j_cutout_min)):int(round(j_cutout_max))+1 ]
i_centre_slice = i_centre - i_cutout_min
j_centre_slice = j_centre - j_cutout_min
if array[int(i_centre),int(j_centre)]!=array_slice[int(i_centre_slice),int(j_centre_slice)]:
if np.isnan(array[int(i_centre),int(j_centre)]==False) and np.isnan(array_slice[int(i_centre_slice),int(j_centre_slice)]==False):
print('SEVERE ERROR: AnnulusSum check failed.')
pdb.set_trace()
else:
array = array_slice
i_centre = i_centre_slice
j_centre = j_centre_slice
# Define semi-major & semi-minor axes, then convert input angle to radians
semi_maj_inner = float(rad_inner)
semi_min_inner = float(semi_maj_inner) / float(axial_ratio)
semi_maj_outer = float(rad_inner) + float(width)
semi_min_outer = float(semi_maj_outer) / float(axial_ratio)
angle = np.radians(float(angle))
# Create meshgrids with which to access i & j coordinates for ellipse calculations
i_linespace = np.linspace(0, array.shape[0]-1, array.shape[0])
j_linespace = np.linspace(0, array.shape[1]-1, array.shape[1])
i_grid, j_grid = np.meshgrid(i_linespace, j_linespace, indexing='ij')
# Use meshgrids to create array identifying which coordinates lie within inner ellipse
i_trans = -(j_grid-float(j_centre))*np.sin(angle) + (i_grid-float(i_centre))*np.cos(angle)
j_trans = (j_grid-float(j_centre))*np.cos(angle) + (i_grid-float(i_centre))*np.sin(angle)
ellipse_check_inner = (j_trans**2 / semi_maj_inner**2) + (i_trans**2 / semi_min_inner**2 )
# Use meshgrids to create array identifying which coordinates lie within outer ellipse
i_trans = -(j_grid-float(j_centre))*np.sin(angle) + (i_grid-float(i_centre))*np.cos(angle)
j_trans = (j_grid-float(j_centre))*np.cos(angle) + (i_grid-float(i_centre))*np.sin(angle)
ellipse_check_outer = (j_trans**2 / semi_maj_outer**2) + (i_trans**2 / semi_min_outer**2 )
# Calculate flux & pixels in aperture, and store pixel values
annulus_where = np.where( (ellipse_check_outer<=1) & (ellipse_check_inner>1) & (np.isnan(array)==False) )
annulus_tot = sum( array[ annulus_where ] )
annulus_count = annulus_where[0].shape[0]
annulus_pix = array[ annulus_where ]
annulus_nan = np.where( (ellipse_check_outer<=1) & (ellipse_check_inner>1) & (np.isnan(array)==True) )
# Return results
return [annulus_tot, annulus_count, annulus_pix, annulus_nan]
# Function to make annular photometry faster by pre-preparing arrays of transposed coords that are to be repeatedly used
# Args: Array, semi-major axis of inside edge of annulus (pix), width of annulus (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse
# Returns: List containing i & j transposed coords
def AnnulusQuickPrepare(array, angle, i_centre, j_centre):
# Convert input angle to radians
angle = np.radians(float(angle))
# Create meshgrids with which to access i & j coordinates for ellipse calculations
i_linespace = np.linspace(0, array.shape[0]-1, array.shape[0])
j_linespace = np.linspace(0, array.shape[1]-1, array.shape[1])
i_grid, j_grid = np.meshgrid(i_linespace, j_linespace, indexing='ij')
# Use meshgrids to create array identifying which coordinates lie within inner ellipse
i_trans = -(j_grid-float(j_centre))*np.sin(angle) + (i_grid-float(i_centre))*np.cos(angle)
j_trans = (j_grid-float(j_centre))*np.cos(angle) + (i_grid-float(i_centre))*np.sin(angle)
# Return results
return (i_trans, j_trans)
# Function to sum all elements in an annulus centred upon the middle of the given array, usingpre-prepared transposed coord arrays
# Args: Array, semi-major axis of inside edge of annulus (pix), width of annulus (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse, i & j transposed coord arrays
# Returns: Numpy array containing the sum of the pixel values in the annulus, the total number of pixels counted, and an array containing the pixel values
def AnnulusQuickSum(array, rad_inner, width, axial_ratio, angle, i_centre, j_centre, i_trans, j_trans):
# Create slice of input array, containing only the region of interest
i_cutout_min = int(np.floor(max([0, i_centre-(rad_inner+width)])))
i_cutout_max = int(np.ceil(min([(array.shape)[0], i_centre+(rad_inner+width)])))
j_cutout_min = int(np.floor(max([0, j_centre-(rad_inner+width)])))
j_cutout_max = int(np.ceil(min([(array.shape)[1], j_centre+(rad_inner+width)])))
array_slice = array[ int(round(i_cutout_min)):int(round(i_cutout_max))+1, int(round(j_cutout_min)):int(round(j_cutout_max))+1 ]
i_centre_slice = i_centre - i_cutout_min
j_centre_slice = j_centre - j_cutout_min
if array[int(i_centre),int(j_centre)]!=array_slice[int(i_centre_slice),int(j_centre_slice)]:
if np.isnan(array[int(i_centre),int(j_centre)]==False) and np.isnan(array_slice[int(i_centre_slice),int(j_centre_slice)]==False):
print('SEVERE ERROR: AnnulusQuickSum check failed.')
pdb.set_trace()
else:
array = array_slice
i_centre = i_centre_slice
j_centre = j_centre_slice
# Make corresponding slices of transposed coord arrays
i_trans = i_trans[ int(round(i_cutout_min)):int(round(i_cutout_max))+1, int(round(j_cutout_min)):int(round(j_cutout_max))+1 ]
j_trans = j_trans[ int(round(i_cutout_min)):int(round(i_cutout_max))+1, int(round(j_cutout_min)):int(round(j_cutout_max))+1 ]
# Define semi-major & semi-minor axes, then convert input angle to radians
semi_maj_inner = float(rad_inner)
semi_min_inner = float(semi_maj_inner) / float(axial_ratio)
semi_maj_outer = float(rad_inner) + float(width)
semi_min_outer = float(semi_maj_outer) / float(axial_ratio)
angle = np.radians(float(angle))
# Use meshgrids to create array identifying which coordinates lie within inner & outer ellipses
ellipse_check_inner = (j_trans**2 / semi_maj_inner**2) + (i_trans**2 / semi_min_inner**2 )
# Use meshgrids to create array identifying which coordinates lie within outer ellipse
ellipse_check_outer = (j_trans**2 / semi_maj_outer**2) + (i_trans**2 / semi_min_outer**2 )
# Calculate flux & pixels in aperture, and store pixel values
annulus_where = np.where( (ellipse_check_outer<=1) & (ellipse_check_inner>1) & (np.isnan(array)==False) )
annulus_tot = sum( array[ annulus_where ] )
annulus_count = annulus_where[0].shape[0]
annulus_pix = array[ annulus_where ]
# Return results
return [annulus_tot, annulus_count, annulus_pix]
# Function to sum all elements in an annulus centred upon the middle of the given array, using pre-prepared transposed coord arrays
# Args: Array, semi-major axis of ellipse (pix), position angle (deg), i & j coords of centre of ellipse, i & j transposed coord arrays
# Returns: Numpy array containing the sum of the pixel values in the ellipse, the total number of pixels counted, and an array containing the pixel values
def EllipseQuickSum(array, rad, axial_ratio, angle, i_centre, j_centre, i_trans, j_trans):
# Create slice of input array, containing only the region of interest
i_cutout_min = int(np.floor(max([0, i_centre-rad])))
i_cutout_max = int(np.ceil(min([(array.shape)[0], i_centre+rad])))
j_cutout_min = int(np.floor(max([0, j_centre-rad])))
j_cutout_max = int(np.ceil(min([(array.shape)[1], j_centre+rad])))
array_slice = array[ int(round(i_cutout_min)):int(round(i_cutout_max))+1, int(round(j_cutout_min)):int(round(j_cutout_max))+1 ]
i_centre_slice = i_centre - i_cutout_min
j_centre_slice = j_centre - j_cutout_min
if array[int(i_centre),int(j_centre)]!=array_slice[int(i_centre_slice),int(j_centre_slice)]:
if np.isnan(array[int(i_centre),int(j_centre)]==False) and np.isnan(array_slice[int(i_centre_slice),int(j_centre_slice)]==False):
print('SEVERE ERROR: EllipseQuickSum check failed.')
pdb.set_trace()
else:
array = array_slice
i_centre = i_centre_slice
j_centre = j_centre_slice
# Make corresponding slices of transposed coord arrays
i_trans = i_trans[ int(round(i_cutout_min)):int(round(i_cutout_max))+1, int(round(j_cutout_min)):int(round(j_cutout_max))+1 ]
j_trans = j_trans[ int(round(i_cutout_min)):int(round(i_cutout_max))+1, int(round(j_cutout_min)):int(round(j_cutout_max))+1 ]
# Define semi-major & semi-minor axes, then convert input angle to radians
semi_maj = float(rad)
semi_min = float(semi_maj) / float(axial_ratio)
angle = np.radians(float(angle))
# Use meshgrids to create array identifying which coordinates lie within ellipses
ellipse_check = (j_trans**2 / semi_maj**2) + (i_trans**2 / semi_min**2 )
# Calculate flux & pixels in aperture, and store pixel values
ellipse_where = np.where( (ellipse_check<=1) & (np.isnan(array)==False) )
ellipse_tot = sum( array[ ellipse_where ] )
ellipse_count = ellipse_where[0].shape[0]
ellipse_pix = array[ ellipse_where ]
# Return results
return [ellipse_tot, ellipse_count, ellipse_pix]
# Function to return a mask identifying all pixels within an ellipse of given parameters
# Args: Array, semi-major axis (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse
# Returns: Mask array of same dimensions as input array where pixels that lie within ellipse have value 1
def EllipseMask(array, rad, axial_ratio, angle, i_centre, j_centre):
# Define semi-major & semi-minor axes, then convert input angle to radians
semi_maj = float(rad)
semi_min = float(rad) / float(axial_ratio)
angle = np.radians(float(angle))
# Create meshgrids with which to access i & j coordinates for ellipse calculations
i_linespace = np.linspace(0, array.shape[0]-1, array.shape[0])
j_linespace = np.linspace(0, array.shape[1]-1, array.shape[1])
i_grid, j_grid = np.meshgrid(i_linespace, j_linespace, indexing='ij')
# Use meshgrids to create array identifying which coordinates lie within ellipse
i_trans = -(j_grid-float(j_centre))*np.sin(angle) + (i_grid-float(i_centre))*np.cos(angle)
j_trans = (j_grid-float(j_centre))*np.cos(angle) + (i_grid-float(i_centre))*np.sin(angle)
ellipse_check = (j_trans**2 / semi_maj**2) + (i_trans**2 / semi_min**2 )
# Create ellipse mask
ellipse_mask = np.zeros([array.shape[0], array.shape[1]])
ellipse_mask[ np.where( ellipse_check<=1 ) ] = 1.0
# Return array
return ellipse_mask
# Function to sum all pixel elements inside a given circle... the old-fashioned way
# Args: Array to be used, i & j coordinates of centre of circle, radius of circle
# Returns: Sum of elements within circle, number of pixels within circle
def CircleSum(fits, i_centre, j_centre, r):
i_centre, j_centre, r = int(i_centre), int(j_centre), int(r)
ap_sum = 0.0
ap_pix = 0.0
ap_values = []
for i in range(-r, r+1):
for j in range(-r, r+1):
if i**2.0 + j**2.0 <= r**2.0:
try:
ap_sum += fits[i_centre+i, j_centre+j]
ap_pix += 1.0
ap_values.append(fits[i_centre+i, j_centre+j])
except:
continue
return [ap_sum, ap_pix, ap_values]
# Function to sum all pixel elements inside a given circle... the old-fashioned way
# Args: Array to be used, i & j coordinates of centre of circle, radius of circle
# Returns: Sum of elements within circle, number of pixels within circle
def CircleAnnulusSum(fits, i_centre, j_centre, r, width):
i_centre, j_centre, r, width = int(i_centre), int(j_centre), int(r), int(width)
ann_sum = 0.0
ann_pix = 0.0
ann_values = []
for i in range(-r, r+1):
for j in range(-r, r+1):
if (i**2.0 + j**2.0 > r**2.0) and (i**2.0 + j**2.0 <= (r+width)**2.0):
try:
ann_sum += fits[i_centre+i, j_centre+j]
ann_pix += 1.0
ann_values.append(fits[i_centre+i, j_centre+j])
except:
continue
return [ann_sum, ann_pix, ann_values]
# Function to sum all elements in an ellipse centred on the middle of an array that has been resized to allow better pixel sampling
# Args: Array, semi-major axis (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse, upscaling factor
# Returns: Numpy array containing the sum of the pixel values in the ellipse, the total number of pixels counted, and an array containing the pixel values
def EllipseSumUpscale(cutout, rad, axial_ratio, angle, i_centre, j_centre, upscale=1):
# Create slice of input array, containing only the region of interest
i_cutout_min = int(np.floor(max([0, i_centre-rad])))
i_cutout_max = int(np.ceil(min([(cutout.shape)[0], i_centre+rad])))
j_cutout_min = int(np.floor(max([0, j_centre-rad])))
j_cutout_max = int(np.ceil(min([(cutout.shape)[1], j_centre+rad])))
cutout_slice = cutout[ int(round(i_cutout_min)):int(round(i_cutout_max))+1, int(round(j_cutout_min)):int(round(j_cutout_max))+1 ]
i_centre_slice = i_centre - i_cutout_min
j_centre_slice = j_centre - j_cutout_min
# Produce integer versions of values
i_centre_int, j_centre_int = int(round(i_centre)), int(round(j_centre))
i_centre_slice_int, j_centre_slice_int = int(round(i_centre_slice)), int(round(j_centre_slice))
# Check that sliced coordinates are compatible with the original coordinates
if cutout[i_centre_int,j_centre_int]!=cutout[i_centre_slice_int,j_centre_slice_int]:
if np.isnan(cutout[i_centre_int,j_centre_int]==False) and np.isnan(cutout_slice[i_centre_slice_int,j_centre_slice_int]==False):
print('SEVERE ERROR: EllipseSumUpscale check failed.')
pdb.set_trace()
else:
cutout = cutout_slice
i_centre = i_centre_slice
j_centre = j_centre_slice
# Resize array to increase pixel sampling, cupdate centre coords, and downscale pixel values accordinly to preserve flux
cutout_inviolate = np.copy(cutout)
cutout = np.zeros([cutout_inviolate.shape[0]*upscale, cutout_inviolate.shape[1]*upscale])
scipy.ndimage.zoom(cutout_inviolate, upscale, output=cutout, order=0)
cutout *= float(upscale)**-2.0
i_centre = float(i_centre) * float(upscale)
j_centre = float(j_centre) * float(upscale)
# Define semi-major & semi-minor axes, then convert input angle to radians
semi_maj = float(rad) * float(upscale)
semi_min = semi_maj / float(axial_ratio)
angle = np.radians(float(angle))
# Create meshgrids with which to access i & j coordinates for ellipse calculations
i_linespace = np.linspace(0, cutout.shape[0]-1, cutout.shape[0])
j_linespace = np.linspace(0, cutout.shape[1]-1, cutout.shape[1])
i_grid, j_grid = np.meshgrid(i_linespace, j_linespace, indexing='ij')
# Use meshgrids to create array identifying which coordinates lie within ellipse
i_trans = -(j_grid-float(j_centre))*np.sin(angle) + (i_grid-float(i_centre))*np.cos(angle)
j_trans = (j_grid-float(j_centre))*np.cos(angle) + (i_grid-float(i_centre))*np.sin(angle)
ellipse_check = (j_trans**2 / semi_maj**2) + (i_trans**2 / semi_min**2 )
# Calculate flux & pixels in aperture, and store pixel values
ellipse_where = np.where( (ellipse_check<=1) & (np.isnan(cutout)==False) )
ellipse_tot = sum( cutout[ ellipse_where ] )
ellipse_count = ellipse_where[0].shape[0]
ellipse_pix = cutout[ ellipse_where ]
# Scale output values down to what they would've been for original array
ellipse_count *= float(upscale)**-2.0
# Return results
return [ellipse_tot, ellipse_count, ellipse_pix]
# Function to sum all elements in an annulus centred upon the middle of an array that has been resized to allow better pixel sampling
# Args: Array, semi-major axis of inside edge of annulus (pix), width of annulus (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse, upscaling factor
# Returns: Numpy array containing the sum of the pixel values in the annulus, the total number of pixels counted, and an array containing the pixel values
def AnnulusSumUpscale(cutout, rad_inner, width, axial_ratio, angle, i_centre, j_centre, upscale=1):
# Create slice of input array, containing only the region of interest
i_cutout_min = int(np.floor(max([0, i_centre-(rad_inner+width)])))
i_cutout_max = int(np.ceil(min([(cutout.shape)[0], i_centre+(rad_inner+width)])))
j_cutout_min = int(np.floor(max([0, j_centre-(rad_inner+width)])))
j_cutout_max = int(np.ceil(min([(cutout.shape)[1], j_centre+(rad_inner+width)])))
cutout_slice = cutout[ int(round(i_cutout_min)):int(round(i_cutout_max))+1, int(round(j_cutout_min)):int(round(j_cutout_max))+1 ]
i_centre_slice = i_centre - i_cutout_min
j_centre_slice = j_centre - j_cutout_min
# Produce integer versions of values
i_centre_int, j_centre_int = int(round(i_centre)), int(round(j_centre))
i_centre_slice_int, j_centre_slice_int = int(round(i_centre_slice)), int(round(j_centre_slice))
# Check that sliced coordinates are compatible with the original coordinates
if cutout[i_centre_int,j_centre_int]!=cutout[i_centre_slice_int,j_centre_slice_int]:
if np.isnan(cutout[i_centre_int,j_centre_int]==False) and np.isnan(cutout_slice[i_centre_slice_int,j_centre_slice_int]==False):
print('SEVERE ERROR: AnnulusSumUpscale check failed.')
pdb.set_trace()
else:
cutout = cutout_slice
i_centre = i_centre_slice
j_centre = j_centre_slice
# Resize array to increase pixel sampling, update centre coords, and downscale pixel values accordinly to preserve flux
cutout_inviolate = np.copy(cutout)
cutout = np.zeros([cutout_inviolate.shape[0]*upscale, cutout_inviolate.shape[1]*upscale])
scipy.ndimage.zoom(cutout_inviolate, upscale, output=cutout, order=0)
cutout *= float(upscale)**-2.0
i_centre = float(i_centre) * float(upscale)
j_centre = float(j_centre) * float(upscale)
# Define semi-major & semi-minor axes, then convert input angle to radians
semi_maj_inner = float(rad_inner) * float(upscale)
semi_min_inner = semi_maj_inner / float(axial_ratio)
semi_maj_outer = ( float(rad_inner) * float(upscale) ) + ( float(width) * float(upscale) )
semi_min_outer = semi_maj_outer / float(axial_ratio)
angle = np.radians(float(angle))
# Create meshgrids with which to access i & j coordinates for ellipse calculations
i_linespace = np.linspace(0, cutout.shape[0]-1, cutout.shape[0])
j_linespace = np.linspace(0, cutout.shape[1]-1, cutout.shape[1])
i_grid, j_grid = np.meshgrid(i_linespace, j_linespace, indexing='ij')
# Use meshgrids to create cutout identifying which coordinates lie within inner ellipse
i_trans = -(j_grid-float(j_centre))*np.sin(angle) + (i_grid-float(i_centre))*np.cos(angle)
j_trans = (j_grid-float(j_centre))*np.cos(angle) + (i_grid-float(i_centre))*np.sin(angle)
ellipse_check_inner = (j_trans**2 / semi_maj_inner**2) + (i_trans**2 / semi_min_inner**2 )
# Use meshgrids to create cutout identifying which coordinates lie within outer ellipse
i_trans = -(j_grid-float(j_centre))*np.sin(angle) + (i_grid-float(i_centre))*np.cos(angle)
j_trans = (j_grid-float(j_centre))*np.cos(angle) + (i_grid-float(i_centre))*np.sin(angle)
ellipse_check_outer = (j_trans**2 / semi_maj_outer**2) + (i_trans**2 / semi_min_outer**2 )
# Calculate flux & pixels in aperture, and store pixel values
annulus_where = np.where( (ellipse_check_outer<=1) & (ellipse_check_inner>1) & (np.isnan(cutout)==False) )
annulus_tot = sum( cutout[ annulus_where ] )
annulus_count = annulus_where[0].shape[0]
annulus_pix = cutout[ annulus_where ]
# Scale output values down to what they would've been for original array
annulus_count *= float(upscale)**-2.0
# Return results
return [annulus_tot, annulus_count, annulus_pix]
# Function to iteratively calculate SPIRE aperture noise of photometry cutout using randomly-placed (annular-background-subtracted) circular aperture
# Args: Map, radius of aperture (pix), area of aperture (pix), boolean of whether or not to sky-subtract the noise apertures, relative radius of inner edge of annulus, relative width of annulus, angle of source ellipse, axial ratio of source ellipse
# Returns: Aperture standard deviation, and list of mean background values, list of aperture sum values
def CircularApertureStandardDeviationFinder(fits, area, ann=True, ann_inner=1.5, ann_width=1.0, angle=0.0, axial_ratio=1.0, apertures=100):
# Calculate aperture's radius from given circular area
rad = np.sqrt(area/np.pi)
# Define width of border
if ann==True:
factor = ann_inner + ann_width
elif ann==False:
factor = 1.0
# Define exclusion zone
semi_maj = ( area / (axial_ratio*np.pi) )**0.5
source_i_centre, source_j_centre = int(round(float(fits.shape[0])/2.0)), int(round(float(fits.shape[1])/2.0))
exclusion_mask = ChrisFuncs.EllipseMask(fits, semi_maj, axial_ratio, angle, source_i_centre, source_j_centre)
# Definine initial values and initiate loop
if rad < 2:
rad = 2
# Define limits for fixed sigma-clip
fits_clip = ChrisFuncs.SigmaClip(fits[ np.where(exclusion_mask==0) ], sigma_thresh=5.0, median=True, no_zeros=True)
clip_upper = fits_clip[1] + (3.0 * fits_clip[0])
clip_lower = fits_clip[1] - (3.0 * fits_clip[0])
# Loop over apertures
ap_sum_array = []
bg_mean_array = []
rejects_array = []
clip_frac_array = []
old_school_ap_sum_array = []
a = 0
b = 0
sum_ratio_array = []
while a<apertures*2.5:
# Generate box coordinates, excluding cutout border, and target source
ap_border = int((factor*rad)+1)
neverending = 1
while neverending > 0:
b += 1
neverending = 1
i_centre = np.random.randint(ap_border, fits.shape[0]-ap_border)
j_centre = np.random.randint(ap_border, fits.shape[1]-ap_border)
# Do crude check that generated coords do not intersect source
if ( abs(0.5*fits.shape[0] - i_centre) < rad ) and ( abs(0.5*fits.shape[1] - j_centre) < rad ):
continue
# Do sophisticated check that generated coords do not intersect source
exclusion_cutout = exclusion_mask[ i_centre-(rad+1):i_centre+(rad+1) , j_centre-(rad+1):j_centre+(rad+1) ]
exclusion_sum = np.sum(exclusion_cutout)
if exclusion_sum>0:
continue
else:
break
# Extract slice around aperture; if slice has either dimension 0, then continue
cutout = fits[ i_centre-(rad*factor+1):i_centre+(rad*factor+1) , j_centre-(rad*factor+1):j_centre+(rad*factor+1) ]
box_centre = int(round(cutout.shape[0]/2.0))
if cutout.shape[0]==0 or cutout.shape[1]==0:
continue
# If background-subtraction required, measure sky annulus
if ann==True:
bg_phot = ChrisFuncs.AnnulusSum(cutout, ann_inner*rad, ann_width*rad, 1.0, 0.0, box_centre, box_centre)
bg_mean = ChrisFuncs.SigmaClip(np.array(bg_phot[2]))[1]
elif ann==False:
bg_mean = 0.0
# Measure sky aperture, and reject if it contains nothing but NaN pixels
ap_phot = ChrisFuncs.EllipseSum(cutout, rad, 1.0, 0.0, box_centre, box_centre)
old_school_ap_sum_array.append(ap_phot[0])
if ap_phot[2].shape[0] == 0:
continue
if np.where( ap_phot[2]==0 )[0].shape[0] == ap_phot[2].shape[0]:
continue
# Perform fixed sigma-clip of sky aperture; reject if >20% of pixels are clipped, otherwise scale flux for removed pixels
rejects_array.append(ap_phot[0])
ap_clipped = ap_phot[2][ np.where( (ap_phot[2]<clip_upper) & (ap_phot[2]>clip_lower) ) ]
ap_clipped = ap_clipped[ np.where( ap_clipped<clip_upper ) ]
ap_clipped = ap_clipped[ np.where( ap_clipped>clip_lower ) ]
clip_frac = float(ap_clipped.shape[0]) / float(ap_phot[2].shape[0])
if clip_frac<0.8:
continue
ap_sum = np.sum(ap_clipped) / clip_frac
sum_ratio_array.append(ap_sum/ap_phot[0])
clip_frac_array.append(clip_frac)
# Store calculated sum and tick up counter
a += 1
ap_sum_array.append(ap_sum)
if ann==True:
bg_mean_array.append(bg_mean)
#pdb.set_trace()
# Clip out the random apertures with the most extremal clip fractions, to get rid of stray light and so forth
clip_frac_threshold = np.median(clip_frac_array) + ( 2.0 * np.std(clip_frac_array) )
ap_sum_array = np.array(ap_sum_array)
bg_mean_array = np.array(bg_mean_array)
old_school_ap_sum_array = np.array(old_school_ap_sum_array)
ap_sum_array = ap_sum_array[ np.where( (clip_frac_array<clip_frac_threshold) & (np.isnan(ap_sum_array)==False) ) ]
bg_mean_array = bg_mean_array[ np.where( (clip_frac_array<clip_frac_threshold) & (np.isnan(bg_mean_array)==False) ) ]
max_index = np.min([100, int(ap_sum_array.shape[0])])
ap_sum_array = ap_sum_array[:max_index]
# Now take standard deviation of output array to get a noise value as pure as the driven snow
ap_sigma = np.std(ap_sum_array)
rejects_array = np.array(rejects_array)
rejects_array = rejects_array[:max_index]
# Report calculated standard deviation
return ap_sigma, bg_mean_array, ap_sum_array, old_school_ap_sum_array
# Function to find all contiguous pixels that lie above a given flux limit
# Args: Array, radius of guess region (pix), i & j coords of centre of guess region, cutoff value for pixel selection, optional custom structure
# Returns: Array of ones and zeros indicating contiguous region
def ContiguousPixels(cutout, rad_initial, i_centre, j_centre, cutoff, custom_structure=False):
# Create version of cutout where significant pixels have value 1, insignificant pixels have value 0
cont_array_binary = np.zeros([(cutout.shape)[0], (cutout.shape)[1]])
cont_array_binary[np.where(cutout>=cutoff)[0], np.where(cutout>=cutoff)[1]] = 1
# Use SciPy's label function to identify contiguous features in binary map
if isinstance(custom_structure, bool) and custom_structure==False:
cont_structure = np.array([[0,1,0], [1,1,1], [0,1,0]])
else:
cont_structure = custom_structure
cont_array = np.zeros([(cutout.shape)[0], (cutout.shape)[1]])
scipy.ndimage.measurements.label(cont_array_binary, structure=cont_structure, output=cont_array)
# Identify primary contiguous feature within specified radius of given coordinates
cont_array_mask = ChrisFuncs.EllipseMask(cont_array, rad_initial, 1.0, 0.0, i_centre, j_centre)
cont_search_values = cont_array[ np.where( cont_array_mask==1 ) ]
# If no features found, only return central "cross" of pixels; otherwise, identify primary feature
if int(sum(cont_search_values)) == 0:
cont_array = np.zeros([(cutout.shape)[0], (cutout.shape)[1]])
cont_array[int(round(i_centre)), int(round(j_centre))] = 1
cont_array[int(round(i_centre))+1, int(round(j_centre))] = 1
cont_array[int(round(i_centre))-1, int(round(j_centre))] = 1
cont_array[int(round(i_centre)), int(round(j_centre))+1] = 1
cont_array[int(round(i_centre)), int(round(j_centre))-1] = 1
else:
# Take mode of values
cont_search_values = np.array(cont_search_values)
cont_target = scipy.stats.mode(cont_search_values[np.where(cont_search_values>0)])[0][0]
# Remove all features other than primary, set value of primary feature to 1
cont_array[np.where(cont_array!=cont_target)] = 0
cont_array[np.where(cont_array!=0)] = 1
# If feautre contains fewer than 5 pixels, once again default to central "cross"
if np.sum(cont_array) < 5:
cont_array = np.zeros([(cutout.shape)[0], (cutout.shape)[1]])
cont_array[int(round(i_centre)), int(round(j_centre))] = 1
cont_array[int(round(i_centre))+1, int(round(j_centre))] = 1
cont_array[int(round(i_centre))-1, int(round(j_centre))] = 1
cont_array[int(round(i_centre)), int(round(j_centre))+1] = 1
cont_array[int(round(i_centre)), int(round(j_centre))-1] = 1
# Report array and count
return cont_array
# Function that combines all of the ellipse-fitting steps (finds convex hull, fits ellipse to this, then finds properties of ellipse)
# Args: x & y coordinates to which the ellipse is to be fitted
# Returns: Array of x & y coordinates of ellipse centre, array of ellipse's major & minor axes, ellipse's position angle
def EllipseFit(x,y):
# Find convex hull of points
p = np.zeros([x.shape[0],2])
p[:,0], p[:,1] = x, y
h = []
for s in scipy.spatial.ConvexHull(p).simplices:
h.append(p[s[0]])
h.append(p[s[1]])
h = np.array(h)
x, y = h[:,0], h[:,1]
# Carry out ellipse-fitting witchcraft
x = x[:,np.newaxis]
y = y[:,np.newaxis]
D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x)))
S = np.dot(D.T,D)
C = np.zeros([6,6])
C[0,2] = C[2,0] = 2; C[1,1] = -1
E, V = np.linalg.eig(np.dot(np.linalg.inv(S), C))
n = np.argmax(np.abs(E))
a = V[:,n]
# Calculate and return properties of ellipse
centre = np.real(ChrisFuncs.EllipseCentre(a))
axes = np.real(ChrisFuncs.EllipseAxes(a))
angle = (180/3.14159) * np.real(ChrisFuncs.EllipseAngle(a))
if axes[0]<axes[1]:
angle += 90.0
return np.array([centre, axes, angle, [x,y]])
# Function to calculate the coordinates of the centre of an ellipse produced by EllipseFit
# Args: Ellipse produced by EllipseFit
# Returns: Array of x & y coordinates of ellipse centre
def EllipseCentre(a):
b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
b,c,d,f,g,a = b,c,d,f,g,a
num = b*b-a*c
x0=(c*d-b*f)/num
y0=(a*f-b*d)/num
return np.array([x0,y0])
# Function to calculate the lengths of the axes of an ellipse produced by EllipseFit
# Args: Ellipse produced by EllipseFit
# Returns: Array of ellipse's major & minor axes
def EllipseAxes(a):
b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
up = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)
down1=(b*b-a*c)*( (c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
down2=(b*b-a*c)*( (a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
res1=np.sqrt(up/down1)
res2=np.sqrt(up/down2)
return np.array([res1, res2])
# Function to calculat the position angle of the centre of an ellipse produced by EllipseFit
# Args: Ellipse produced by EllipseFit
# Returns: Ellipse's position angle
def EllipseAngle(a):
b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
b,c,d,f,g,a = b,c,d,f,g,a
return 0.5*np.arctan(2*b/(a-c))
"""
# GUIDE TO ELLIPTICAL APERTURES
I assume you also know the location of the ellipse's center. Call that (x0,y0).
Let t be the counterclockwise angle the major axis makes with respect to the
x-axis. Let a and b be the semi-major and semi-minor axes, respectively. If
P = (x,y) is an arbitrary point then do this:
X = (x-x0)*cos(t)+(y-y0)*sin(t); % Translate and rotate coords.
Y = -(x-x0)*sin(t)+(y-y0)*cos(t); % to align with ellipse
If
X^2/a^2+Y^2/b^2
is less than 1, the point P lies inside the ellipse. If it equals 1, it is right on
the ellipse. If it is greater than 1, P is outside.
"""
|
# Generated by Django 3.2.8 on 2021-11-05 00:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='project',
name='created_date',
),
]
|
import gym
import numpy as np
from stable_baselines import DQN
from stable_baselines.common import atari_wrappers
from stable_baselines.common.cmd_util import make_atari_env
from stable_baselines.common.vec_env import VecFrameStack
from stable_baselines.results_plotter import load_results, ts2xy, X_TIMESTEPS
from stable_baselines.common.callbacks import BaseCallback
import logging
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # FATAL
logging.getLogger('tensorflow').setLevel(logging.FATAL)
env = atari_wrappers.make_atari('PongNoFrameskip-v4')
env = atari_wrappers.wrap_deepmind(env)
MODEL_SAVE_DIR = "."
dqn_agent = DQN(
policy='CnnPolicy', env=env,
exploration_fraction=0.1, exploration_final_eps=0.01, exploration_initial_eps=1.0,
train_freq=4, verbose=1
)
class SaveOnBestTrainingRewardCallback(BaseCallback):
def __init__(self, check_freq, verbose=1):
super(SaveOnBestTrainingRewardCallback, self).__init__(verbose)
self.check_freq = check_freq
self.best_mean_episode_reward = -np.inf
def _on_step(self) -> bool:
if self.n_calls % self.check_freq == 0:
print(self.locals["episode_rewards"][-100:])
mean_100ep_reward = np.mean(self.locals["episode_rewards"][-100:])
if self.verbose > 0:
# print(self.episode_rewards[-100:])
print("Steps: {} | Best mean reward: {:.2f} | Last mean reward per episode: {:.2f}".format(
self.num_timesteps,
self.best_mean_episode_reward,
mean_100ep_reward
))
# New best model, you could save the agent here
if mean_100ep_reward > self.best_mean_episode_reward:
self.best_mean_episode_reward = mean_100ep_reward
saved_file_name = MODEL_SAVE_DIR + "/dqn_pong_{0}_{1}".format(
self.n_calls, int(mean_100ep_reward)
)
# Example for saving best model
if self.verbose > 0:
print("Saving new best model to {}".format(saved_file_name))
self.model.save(saved_file_name)
print()
return True
callback = SaveOnBestTrainingRewardCallback(check_freq=1000)
MAX_STEPS = 1000000
dqn_agent = dqn_agent.learn(total_timesteps=MAX_STEPS, callback=callback)
|
import json
import pytest
from unittest import mock
from asynctest import patch
from blebox_uniapi.box import Box
from blebox_uniapi import error
pytestmark = pytest.mark.asyncio
@pytest.fixture
def mock_session():
return mock.MagicMock(host="172.1.2.3", port=80)
@pytest.fixture
def data():
return {
"id": "abcd1234ef",
"type": "airSensor",
"deviceName": "foobar",
"fv": "1.23",
"hv": "4.56",
"apiLevel": "20180403",
}
async def test_json_paths(mock_session, data):
box = Box(mock_session, data)
assert "foo" == box.follow(json.loads("""["foo"]"""), "[0]")
assert 4 == box.follow(
json.loads("""[{"foo":"3", "value":4}]"""), "[foo='3']/value"
)
assert 4 == box.follow(json.loads("""[{"foo":3, "value":4}]"""), "[foo=3]/value")
with pytest.raises(error.JPathFailed, match=r"with: foo=bc at .* within .*"):
box.follow(json.loads("""[{"foo":"ab", "value":4}]"""), "[foo='bc']/value")
with pytest.raises(
error.JPathFailed, match=r"with value at index 1 at .* within .*"
):
box.follow(json.loads("""[{"value":4}]"""), "[1]/value")
with pytest.raises(
error.JPathFailed, match=r"with value at index 1 at .* within .*"
):
box.follow(json.loads("""{"value":4}"""), "[1]/value")
with pytest.raises(error.JPathFailed, match=r"with: foo=7 at .* within .*"):
box.follow(json.loads("""[{"foo":3, "value":4}]"""), "[foo=7]/value")
with pytest.raises(
error.JPathFailed, match=r"item 'foo' not among \['value'\] at .* within .*"
):
box.follow(json.loads("""{"value":4}"""), "foo")
with pytest.raises(
error.JPathFailed,
match=r"unexpected item type: 'foo' not in: \[4\] at .* within .*",
):
box.follow(json.loads("""[4]"""), "foo")
with pytest.raises(
error.JPathFailed,
match=r"list expected but got {'foo': \[4\]} at .* within .*",
):
box.follow(json.loads("""{"foo": [4]}"""), "[bar=0]/value")
async def test_without_id(mock_session, data):
with pytest.raises(
error.UnsupportedBoxResponse, match="Device at 172.1.2.3:80 has no id"
):
del data["id"]
Box(mock_session, data)
async def test_without_type(mock_session, data):
with pytest.raises(
error.UnsupportedBoxResponse,
match="Device:abcd1234ef at 172.1.2.3:80 has no type",
):
del data["type"]
Box(mock_session, data)
async def test_with_unknown_type(mock_session, data):
with pytest.raises(
error.UnsupportedBoxResponse,
match=r"'foobar' \(unknownBox:abcd1234ef/1.23 at 172.1.2.3:80\) is not a supported type",
):
data["type"] = "unknownBox"
Box(mock_session, data)
async def test_without_name(mock_session, data):
with pytest.raises(
error.UnsupportedBoxResponse,
match="airSensor:abcd1234ef at 172.1.2.3:80 has no name",
):
del data["deviceName"]
Box(mock_session, data)
async def test_without_firmware_version(mock_session, data):
with pytest.raises(
error.UnsupportedBoxResponse,
match=r"'foobar' \(airSensor:abcd1234ef at 172.1.2.3:80\) has no firmware version",
):
del data["fv"]
Box(mock_session, data)
async def test_without_hardware_version(mock_session, data):
with pytest.raises(
error.UnsupportedBoxResponse,
match=r"'foobar' \(airSensor:abcd1234ef/1.23 at 172.1.2.3:80\) has no hardware version",
):
del data["hv"]
Box(mock_session, data)
async def test_without_api_level(mock_session, data):
with pytest.raises(
error.UnsupportedBoxVersion,
match=r"'foobar' \(airSensor:abcd1234ef/1.23 at 172.1.2.3:80\) has unsupported version",
):
del data["apiLevel"]
Box(mock_session, data)
async def test_with_init_failure(mock_session, data):
with patch(
"blebox_uniapi.box.AirQuality", spec_set=True, autospec=True
) as mock_sensor:
mock_sensor.side_effect = KeyError
with pytest.raises(
error.UnsupportedBoxResponse,
match=r"'foobar' \(airSensor:abcd1234ef/1.23 at 172.1.2.3:80\) failed to initialize: ",
):
Box(mock_session, data)
async def test_properties(mock_session, data):
box = Box(mock_session, data)
assert "foobar" == box.name
assert None is box.last_data
assert "airSensor" == box.type
assert "airSensor" == box.model
assert "abcd1234ef" == box.unique_id
assert "1.23" == box.firmware_version
assert "4.56" == box.hardware_version
assert "BleBox" == box.brand
assert 20180403 == box.api_version
async def test_validations(mock_session, data):
box = Box(mock_session, data)
with pytest.raises(
error.BadFieldExceedsMax,
match=r"foobar.field1 is 123 which exceeds max \(100\)",
):
box.check_int_range(123, "field1", 100, 0)
with pytest.raises(
error.BadFieldLessThanMin,
match=r"foobar.field1 is 123 which is less than minimum \(200\)",
):
box.check_int_range(123, "field1", 300, 200)
with pytest.raises(error.BadFieldMissing, match=r"foobar.field1 is missing"):
box.check_int(None, "field1", 300, 200)
with pytest.raises(
error.BadFieldNotANumber, match=r"foobar.field1 is '123' which is not a number"
):
box.check_int("123", "field1", 300, 200)
with pytest.raises(error.BadFieldMissing, match=r"foobar.field1 is missing"):
box.check_hex_str(None, "field1", 300, 200)
with pytest.raises(
error.BadFieldNotAString, match=r"foobar.field1 is 123 which is not a string"
):
box.check_hex_str(123, "field1", 300, 200)
with pytest.raises(error.BadFieldMissing, match=r"foobar.field1 is missing"):
box.check_rgbw(None, "field1")
with pytest.raises(
error.BadFieldNotAString, match=r"foobar.field1 is 123 which is not a string"
):
box.check_rgbw(123, "field1")
with pytest.raises(
error.BadFieldNotRGBW, match=r"foobar.field1 is 123 which is not a rgbw string"
):
box.check_rgbw("123", "field1")
|
import os
import sys
import glob
import time
import random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm, colors
from mpl_toolkits.mplot3d import Axes3D
from keras.utils import Sequence
from keras.callbacks import Callback
def _calc_plot_dim(n, f=0.3):
rows = max(int(np.sqrt(n) - f), 1)
cols = 1
while rows*cols < n:
cols += 1
return rows, cols
class DataGenerator(Sequence):
def __init__(self, data_path):
self.batch_ids = glob.glob(os.path.join(data_path, 'batch_*.npz'))
def apply_preprocessing(self, batch):
X, Y = batch
add_norm(X)
add_noise(X, c=0.1)
rand_shift_xy(X, c=0.02)
add_cutout(X, n_holes=5)
Y = [Y[:,:,:,i] for i in range(Y.shape[-1])]
minimum_to_zero(Y)
return X,Y
def load_batch(self, index):
file_path = os.path.join(self.batch_ids[index])
batch_object = np.load(file_path)
batch_data = (batch_object['arr_0'], batch_object['arr_1'])
return batch_data
def __len__(self):
return len(self.batch_ids)
def __getitem__(self, index):
batch = self.load_batch(index)
return self.apply_preprocessing(batch)
class HistoryPlotter(Callback):
def __init__(self, log_path, plot_path, loss_labels):
self.log_path = log_path
self.plot_path = plot_path
self.loss_labels = ['Total_weighted'] + loss_labels
self.read_log()
super(HistoryPlotter, self).__init__()
def read_log(self):
self.losses = []
self.val_losses = []
if os.path.exists(self.log_path):
with open(self.log_path, 'r') as f:
f.readline()
for line in f:
line = line.split(',')
lt = []
lv = []
for i in range(len(self.loss_labels)):
lt.append(float(line[1+i]))
lv.append(float(line[1+len(self.loss_labels)+i]))
self.losses.append(lt)
self.val_losses.append(lv)
def on_epoch_end(self, epoch, logs):
lt = [logs['loss']]
lv = [logs['val_loss']]
for label in self.loss_labels[1:]:
lt.append(logs[label+'_loss'])
lv.append(logs['val_'+label+'_loss'])
self.losses.append(lt)
self.val_losses.append(lv)
self.plot()
def plot(self, show=False):
x = range(1, len(self.losses)+1)
n_rows, n_cols = _calc_plot_dim(len(self.loss_labels), f=0)
fig, axes = plt.subplots(n_rows, n_cols, figsize=(4.5*n_cols, 4*n_rows))
for i, (label, ax) in enumerate(zip(self.loss_labels, axes.flatten())):
ax.semilogy(x, np.array(self.losses)[:,i])
ax.semilogy(x, np.array(self.val_losses)[:,i])
ax.legend(['Training', 'Validation'])
ax.set_xlabel('Epochs')
ax.set_ylabel('Loss')
ax.set_title(label)
fig.tight_layout()
plt.savefig(self.plot_path)
if show:
plt.show()
else:
plt.close()
def make_prediction_plots(preds, true=None, losses=None, descriptors=None, outdir='./predictions/', start_ind=0, verbose=1):
if true is None:
rows = 1
else:
rows = 2
if not isinstance(true, list):
true = [true]
if not isinstance(preds, list):
preds = [preds]
if descriptors is not None:
if len(descriptors) != len(preds):
raise ValueError('len(descriptors) = %d and len(preds) = %d do not match' % (len(descriptors), len(preds)))
if not os.path.exists(outdir):
os.makedirs(outdir)
cols = len(preds)
if losses is not None and losses.ndim < 1:
losses = np.expand_dims(losses, axis=0)
img_ind = start_ind
for i in range(preds[0].shape[0]):
fig, axes = plt.subplots(rows, cols)
fig.set_size_inches(6*cols, 5*rows)
if cols == 1:
axes = np.expand_dims(axes, axis=1)
if losses is not None and losses.ndim < 2:
losses = np.expand_dims(losses, axis=1)
for j in range(cols):
p = preds[j][i]
if true is not None:
t = true[j][i]
ax = axes[:, j]
vmax = np.concatenate([p,t]).flatten().max()
vmin = np.concatenate([p,t]).flatten().min()
else:
ax = [axes[j]]
vmax = p.flatten().max()
vmin = p.flatten().min()
title1 = ''
title2 = ''
cmap = cm.viridis
if descriptors is not None:
descriptor = descriptors[j]
title1 += descriptor+' Prediction'
title2 += descriptor+' Reference'
if descriptor == 'ES':
vmax = max(abs(vmax), abs(vmin))
vmin = -vmax
cmap = cm.coolwarm
if losses is not None:
title1 += '\nMSE = '+'{:.2E}'.format(losses[i,j])
im1 = ax[0].imshow(p, vmax=vmax, vmin=vmin, cmap=cmap, origin='lower')
if true is not None:
im2 = ax[1].imshow(t, vmax=vmax, vmin=vmin, cmap=cmap, origin='lower')
if title1 != '':
ax[0].set_title(title1)
if true:
ax[1].set_title(title2)
for axi in ax:
pos = axi.get_position()
pos_new = [pos.x0, pos.y0, 0.8*(pos.x1-pos.x0), pos.y1-pos.y0]
axi.set_position(pos_new)
pos1 = ax[0].get_position()
if true is not None:
pos2 = ax[1].get_position()
c_pos = [pos1.x1+0.1*(pos1.x1-pos1.x0), pos2.y0, 0.08*(pos1.x1-pos1.x0), pos1.y1-pos2.y0]
else:
c_pos = [pos1.x1+0.1*(pos1.x1-pos1.x0), pos1.y0, 0.08*(pos1.x1-pos1.x0), pos1.y1-pos1.y0]
cbar_ax = fig.add_axes(c_pos)
fig.colorbar(im1, cax=cbar_ax)
save_name = outdir+str(img_ind)+'_pred.png'
plt.savefig(save_name)
plt.close()
if verbose > 0: print('Prediction saved to '+save_name)
img_ind += 1
def make_input_plots(Xs, outdir='./predictions/', start_ind=0, constant_range=True, cmap=cm.viridis, verbose=1):
if not os.path.exists(outdir):
os.makedirs(outdir)
if not isinstance(Xs, list):
Xs = [Xs]
img_ind = start_ind
for i in range(Xs[0].shape[0]):
for j in range(len(Xs)):
x = Xs[j][i]
rows, cols = _calc_plot_dim(x.shape[-1])
fig = plt.figure(figsize=(3.2*cols,2.5*rows))
vmax = x.max()
vmin = x.min()
for k in range(x.shape[-1]):
fig.add_subplot(rows,cols,k+1)
if constant_range:
plt.imshow(x[:,:,k], cmap = cmap, vmin=vmin, vmax=vmax, origin="lower")
else:
plt.imshow(x[:,:,k], cmap = cmap, origin="lower")
plt.colorbar()
save_name = outdir+str(img_ind)+'_input'
if len(Xs) > 1:
save_name += str(j+1)
save_name += '.png'
plt.savefig(save_name)
plt.close()
if verbose > 0: print('Input image saved to '+save_name)
img_ind += 1
def calculate_losses(model, true, preds=None, X=None):
import keras.backend as K
if preds is None and X is None:
raise ValueError('preds and X cannot both be None')
if preds is None:
preds = model.predict_on_batch(X)
if not isinstance(true, list):
true = [true]
if not isinstance(preds, list):
preds = [preds]
losses = np.zeros((true[0].shape[0], len(true)))
for i, (t, p) in enumerate(zip(true, preds)):
t = K.variable(t)
p = K.variable(p)
loss = model.loss_functions[i](t, p)
sh = loss.shape.as_list()
if len(sh) > 1:
loss = K.mean(K.reshape(loss, (sh[0],-1)), axis=1)
losses[:,i] = K.eval(loss)
if losses.shape[1] == 1:
losses = losses[:,0]
if losses.shape[0] == 1 and losses.ndim == 1:
losses = losses[0]
return losses
def minimum_to_zero(Y_):
if isinstance(Y_, list):
Ys = Y_
else:
Ys = [Y_]
for Y in Ys:
sh = Y.shape
for j in range(sh[0]):
Y[j,:,] = Y[j,:,] - np.amin(Y[j,:,])
def add_noise(X_, c=0.1 ):
if isinstance(X_, list):
Xs = X_
else:
Xs = [X_]
for X in Xs:
sh = X.shape
R = np.random.rand( sh[0], sh[1], sh[2], sh[3] ) - 0.5
for j in range(sh[0]):
for i in range(sh[3]):
vmin = X[j,:,:,i].min()
vmax = X[j,:,:,i].max()
X[j,:,:,i] += R[j,:,:,i] * c*(vmax-vmin)
def add_norm(X_):
if isinstance(X_, list):
Xs = X_
else:
Xs = [X_]
for X in Xs:
sh = X.shape
for j in range(sh[0]):
for i in range(sh[3]):
mean=np.mean(X[j,:,:,i])
sigma=np.std(X[j,:,:,i])
X[j,:,:,i]-= mean
X[j,:,:,i]= X[j,:,:,i]/ sigma
def rand_shift_xy(X_, c=0.02):
# c= percantage shift acording to size of image in pixels. c=0.05 ~ 5 %
if isinstance(X_, list):
Xs = X_
else:
Xs = [X_]
for X in Xs:
sh= X.shape
max_y_shift=np.floor(sh[1]*c).astype(int)
max_x_shift=np.floor(sh[2]*c).astype(int)
for j in range(sh[0]):
for i in range(sh[3]):
rand_shift_y=random.choice(np.append(np.arange(-max_y_shift,0), np.arange(1,max_y_shift+1)))
rand_shift_x= random.choice(np.append(np.arange(-max_x_shift,0), np.arange(1,max_x_shift+1)))
shift_y=abs(rand_shift_y)
shift_x=abs(rand_shift_x)
a=X[j,:,:,i]
tmp=np.zeros((sh[1]+2*shift_y,sh[2]+2*shift_x))
tmp[shift_y:-shift_y,shift_x:-shift_x]=a
tmp[:shift_y,shift_x:-shift_x]=a[shift_y:0:-1,:]
tmp[-shift_y:,shift_x:-shift_x]=a[-2:-2-shift_y:-1,:]
tmp[:,-shift_x:]=tmp[:,-2-shift_x:-2-2*shift_x:-1]
tmp[:,:shift_x]=tmp[:,2*shift_x:shift_x:-1]
X[j,:,:,i]=tmp[shift_y-rand_shift_y:shift_y-rand_shift_y+sh[1],shift_x-rand_shift_x:shift_x-rand_shift_x+sh[2] ]
def add_cutout(X_, n_holes=5):
def get_random_eraser(input_img,p=0.2, s_l=0.001, s_h=0.01, r_1=0.1, r_2=1/0.1, v_l=0, v_h=0):
'''
p : the probability that random erasing is performed
s_l, s_h : minimum / maximum proportion of erased area against input image
r_1, r_2 : minimum / maximum aspect ratio of erased area
v_l, v_h : minimum / maximum value for erased area
'''
sh = input_img.shape
img_h, img_w= [sh[0],sh[1]]
if np.random.uniform(0, 1) > p:
return input_img
while True:
s = np.random.uniform(s_l, s_h) * img_h * img_w
r = np.random.uniform(r_1, r_2)
w = int(np.sqrt(s / r))
h = int(np.sqrt(s * r))
left = np.random.randint(0, img_w)
top = np.random.randint(0, img_h)
if left + w <= img_w and top + h <= img_h:
break
c = np.random.uniform(v_l, v_h)
input_img[top:top + h, left:left + w] = 0.0
return input_img
if isinstance(X_, list):
Xs = X_
else:
Xs = [X_]
for X in Xs:
sh = X.shape
for j in range(sh[0]):
for i in range(sh[3]):
for attempt in range(n_holes):
X[j,:,:,i]=get_random_eraser(X[j,:,:,i])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.