content
stringlengths 5
1.05M
|
|---|
"""
:py:mod:`pymco.ssl`
-------------------
Contains SSL security provider plugin.
"""
from __future__ import print_function
import base64
import os
try:
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
except ImportError as exc:
print('You need install pycrypto for using SSL security provider')
raise exc
from .. import exc
from . import SecurityProvider
from .. import utils
class SSLProvider(SecurityProvider):
"""Provide SSL security provider plugin.
See
http://docs.puppetlabs.com/mcollective/reference/plugins/security_ssl.html
for further information.
"""
def __init__(self, config):
super(SSLProvider, self).__init__(config=config)
self._private_key = None
self._server_public_key = None
self._caller_id = None
self._serializer = None
def sign(self, msg):
"""Implement :py:meth:`pymco.security.SecurityProvider.sign`."""
msg[':callerid'] = self.callerid
msg[':hash'] = self.get_hash(msg)
return msg
def verify(self, msg):
"""Implement :py:meth:`pymco.security.SecurityProvider.verify`."""
hash_ = SHA.new(msg[':body'].encode('utf8'))
verifier = PKCS1_v1_5.new(self.server_public_key)
signature = base64.b64decode(msg[':hash'])
if not verifier.verify(hash_, signature):
raise exc.VerificationError(
'Message {0} can\'t be verified'.format(msg))
return msg
def get_hash(self, msg):
"""Get the hash for the given message.
:arg pymco.message.Message msg: message to get hash for.
:return: message hash so the receiver can verify the message.
"""
hashed_signature = SHA.new(msg[':body'].encode('utf8'))
signer = PKCS1_v1_5.new(self.private_key)
hashed_signature = signer.sign(hashed_signature)
return base64.b64encode(hashed_signature)
@property
def callerid(self):
"""Property returning the MCollective SSL caller id.
As MCollective docs states, the caller ID will be the name of public
key filename, without the extension part.
"""
if not self._caller_id:
caller_id = os.path.basename(
self.config['plugin.ssl_client_public']).split('.')[0]
self._caller_id = 'cert={0}'.format(caller_id)
return self._caller_id
def _load_rsa_key(self, key, cache):
if not cache:
cache = self._server_public_key = utils.load_rsa_key(self.config[key])
return cache
@property
def server_public_key(self):
"""Property returning the server public key after being loaded."""
return self._load_rsa_key(key='plugin.ssl_server_public',
cache=self._server_public_key)
@property
def private_key(self):
"""Property returning the private key after being loaded."""
return self._load_rsa_key(key='plugin.ssl_client_private',
cache=self._private_key)
@property
def serializer(self):
"""Property returning the serializer object.
Serailzer object should be any subclass
:py:class:`pymco.serializer.Serializer`, depending on configuration.
However, right now, only YAML serialization can be supported,
since the default serializer (Marshal) isn't portable.
"""
if not self._serializer:
self._serializer = self.config.get_serializer('plugin.ssl_serializer')
return self._serializer
|
from shapely.geometry import Polygon
from helper import *
from pyhdf.SD import SD, SDC
from my_functions import *
class fpar_utils:
def __init__(self):
self.west = -124.457906126032
self.east = -69.2724108461701
self.north = 50.0000009955098
self.south = 30.0000009964079
self.lat_num = 3678
self.lon_num = 10145
self.lon_interval = self.east - self.west
self.lat_interval = self.north - self.south
self.fpar = None
self.qc = None
#input the path of the file of our fpar data
#returns true if successfully read the data, else throw assertion
def read_fpar(self, path):
try:
data = SD(path, SDC.READ)
self.fpar = np.array(data.select('Fpar_500m')[:])
self.qc = np.array(data.select('FparExtra_QC')[:])
self._laiqc = np.array(data.select('FparLai_QC')[:])
return True
except:
assert self.fpar != None and self.qc != None
#get the fpar data indices inside a bounding box
#input lb: left bottom, lu: left up, rb: right bottom , ru: right up coordinates
#return the indices of fpar box
def get_fpar_indices_by_box(self, lu, ru, rb, lb, fpar_data):
p1 = self.coords_to_ind(lu[0], lu[1])
p2 = self.coords_to_ind(ru[0], ru[1])
p3 = self.coords_to_ind(rb[0], rb[1])
p4 = self.coords_to_ind(lb[0], lb[1])
polygon = Polygon((p1, p2, p3, p4))
indices = points_inside_polygon(polygon, p1, p2, p3, p4)
return indices
def coords_to_ind(self, lat, lon):
"""
input latitude and longitude
return the according indices on fpar grid
"""
lon_diff = lon - self.west
lat_diff = self.north - lat
lon_ind = int(lon_diff / self.lon_interval * self.lon_num)
lat_ind = int(lat_diff / self.lat_interval * self.lat_num)
return (lat_ind, lon_ind)
def get_fpar_coords(self, lat_ind, lon_ind):
"""
input lat and lon indices on modis fpar data
return the according latitude and longitude of that data
"""
lat = self.north - lat_ind / self.lat_num * (self.north - self.south)
lon = self.west + lon_ind / self.lon_num * (self.east - self.west)
return (lon, lat)
#input the lat and lon indices on modis data
#output the four coordinates of that bounding box
def get_fpar_box(self, lat_ind, lon_ind):
try:
lu = self.get_fpar_coords(lat_ind, lon_ind)
lb = self.get_fpar_coords(lat_ind+1, lon_ind)
ru = self.get_fpar_coords(lat_ind, lon_ind+1)
rb = self.get_fpar_coords(lat_ind+1, lon_ind+1)
return [lb, rb, ru, lu]
except:
assert False
return None
def get_fpar_bound(self, lat_ind, lon_ind):
"""
args:lat index and lon index
input: lat index, lon index of the fpar data (left up corner)
returns: the min_lon, max_lon, min_lat, max_lat of the corresponding bounding box of fpar
"""
assert lat_ind >= 0 and lon_ind >= 0
lu = self.get_fpar_coords(lat_ind, lon_ind)
lb = self.get_fpar_coords(lat_ind+1, lon_ind)
ru = self.get_fpar_coords(lat_ind, lon_ind+1)
rb = self.get_fpar_coords(lat_ind+1, lon_ind+1)
max_lat, min_lat = max(lu[1], lb[1]), min(lu[1], lb[1])
max_lon, min_lon = max(lu[0], ru[0]), min(lu[0], ru[0])
return [min_lon, max_lon, min_lat, max_lat]
#input the indices of fpar data
#return the corresponding fpar values and qc values
#parameter: fpar_dat: the global fpar data, fpar_qc: the global qc data, indices: the corresponding indices
def get_fpar_by_indices(self,fpar_dat, fpar_qc, indices):
fp_values = []
qc_values = []
for i in range(len(indices)):
row, col = indices[i, 0], indices[i, 1]
fp_data = fpar_dat[row, col]
fp_qc = fpar_qc[row, col]
fp_values.append(fp_data)
qc_values.append(fp_qc)
return fp_values, qc_values
def get_fpar_directedly(self, lo, hi, left, right):
"""
@input the four borders of the fpar array
@return the sliced fpar matrix
"""
return self.fpar[lo:hi, left:right], self.qc[lo:hi, left:right], self._laiqc[lo:hi, left:right]
def strong_filter_fpar(self, fpar, qc, laiqc):
"""
@given fpar, extra qc and lai qc
@return the filtered fpar
Args:
fpar, qc, laiqc
Returns:
strongly filtered fpar, missing value filled with -1
"""
ret = np.zeros(fpar.shape)
for i in range(qc.shape[0]):
for j in range(qc.shape[1]):
lai_qc_info = convert_binary(laiqc[i,j])
extra_qc_info = convert_binary(qc[i,j])[1:]
if lai_qc_info == '00000000' and (extra_qc_info == '0000000' or extra_qc_info == '0001000'):
ret[i,j] = fpar[i,j]
else:
ret[i,j] = np.NaN
return ret
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from bson.objectid import ObjectId
import fibo
fibo.fib(1000)
print(fibo.fib2(100))
print("fibo模块名:", fibo.__name__)
name = input('Please enter your name: ')
print(name != None)
print(name is not None)
if name is not None:
print(name)
else:
print('''not a
nothing''')
print('''
not a
number''')
print()
oid = ObjectId()
print(oid.__str__())
print(oid.__repr__())
for n in range(10):
print(ObjectId().__str__())
print()
# 默认参数最好指向不变对象,否则容易掉坑里
def add_end(L=[]):
L.append('END')
return L
# 将上面的函数修改一下
def add_end2(L=None):
if L is None:
L = []
L.append('END')
return L
print(add_end())
print(add_end())
print()
print(add_end2())
print(add_end2())
print()
def fact(n):
if n == 1:
return 1
return n * fact(n - 1)
print(fact(100))
d = {'a': 1, 'b': 2, 'c': 3}
for key in d:
print(key)
for k, v in d.items():
print(k, ':', v)
|
#
# Copyright 2015 LinkedIn Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import sys
from org.slf4j import LoggerFactory
from com.ziclix.python.sql import zxJDBC
from wherehows.common import Constant
class HdfsLoad:
def __init__(self):
self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__)
def load_metadata(self):
"""
Load dataset metadata into final table
:return: nothing
"""
cursor = self.conn_mysql.cursor()
load_cmd = '''
DELETE FROM stg_dict_dataset WHERE db_id = '{db_id}';
LOAD DATA LOCAL INFILE '{source_file}'
INTO TABLE stg_dict_dataset
FIELDS TERMINATED BY '\Z' ESCAPED BY '\0'
(`name`, `schema`, properties, fields, urn, source, sample_partition_full_path, source_created_time, source_modified_time)
SET db_id = {db_id},
wh_etl_exec_id = {wh_etl_exec_id};
-- clear
DELETE FROM stg_dict_dataset
where db_id = {db_id}
AND (length(`name`) = 0
OR name like 'tmp\_%'
OR urn like '%/\_tmp'
OR urn like '%/\_distcp\_%')
;
update stg_dict_dataset
set location_prefix =
case
when source in ('Espresso', 'Oracle', 'Hdfs', 'Kafka', 'Hive')
then substring_index(substring_index(urn, '/', 5), '/', -3) /* get the leading 2 levels */
when urn like '%:///%/%'
then substring_index(substring_index(urn, '/', 4), '/', -2) /* get the leading 1 level */
end
WHERE db_id = {db_id} and location_prefix is null;
-- fix for some edge cases
update stg_dict_dataset
set name = substring_index(urn, '/', -2)
where db_id = {db_id}
and name in ('1.0', '2.0', '3.0', '4.0', '0.1', '0.2', '0.3', '0.4', 'dedup', '1-day', '7-day');
-- update parent name, this depends on the data from source system
update stg_dict_dataset
set parent_name =
case
when urn like 'hdfs:///data/external/gobblin/%'
then substring_index(substring_index(urn, '/', 7), '/', -1)
when (urn like 'hdfs:///%data/databases/%' or urn like 'hdfs:///%data/dbchanges/%' or urn like 'hdfs:///data/external/%')
then substring_index(substring_index(urn, '/', 5), '/', -1)
when (urn like 'hdfs:///%data/tracking/%' or urn like 'hdfs:///data/service/%' or urn like 'hdfs:///%data/derived/%')
then substring_index(substring_index(urn, '/', 4), '/', -1)
else substring_index(substring_index(urn, '/', 4), '/', -1)
end
where db_id = {db_id} and parent_name is null
;
-- insert into final table
INSERT INTO dict_dataset
( `name`,
`schema`,
schema_type,
fields,
properties,
urn,
source,
location_prefix,
parent_name,
storage_type,
ref_dataset_id,
status_id,
dataset_type,
hive_serdes_class,
is_partitioned,
partition_layout_pattern_id,
sample_partition_full_path,
source_created_time,
source_modified_time,
created_time,
wh_etl_exec_id
)
select s.name, s.schema, s.schema_type, s.fields,
s.properties, s.urn,
s.source, s.location_prefix, s.parent_name,
s.storage_type, s.ref_dataset_id, s.status_id,
s.dataset_type, s.hive_serdes_class, s.is_partitioned,
s.partition_layout_pattern_id, s.sample_partition_full_path,
s.source_created_time, s.source_modified_time, UNIX_TIMESTAMP(now()),
s.wh_etl_exec_id
from stg_dict_dataset s
where s.db_id = {db_id}
on duplicate key update
`name`=s.name, `schema`=s.schema, schema_type=s.schema_type, fields=s.fields,
properties=s.properties, source=s.source, location_prefix=s.location_prefix, parent_name=s.parent_name,
storage_type=s.storage_type, ref_dataset_id=s.ref_dataset_id, status_id=s.status_id,
dataset_type=s.dataset_type, hive_serdes_class=s.hive_serdes_class, is_partitioned=s.is_partitioned,
partition_layout_pattern_id=s.partition_layout_pattern_id, sample_partition_full_path=s.sample_partition_full_path,
source_created_time=s.source_created_time, source_modified_time=s.source_modified_time,
modified_time=UNIX_TIMESTAMP(now()), wh_etl_exec_id=s.wh_etl_exec_id
;
analyze table dict_dataset;
'''.format(source_file=self.input_file, db_id=self.db_id, wh_etl_exec_id=self.wh_etl_exec_id)
for state in load_cmd.split(";"):
self.logger.debug(state)
cursor.execute(state)
self.conn_mysql.commit()
cursor.close()
def load_field(self):
cursor = self.conn_mysql.cursor()
load_field_cmd = '''
DELETE FROM stg_dict_field_detail where db_id = {db_id};
LOAD DATA LOCAL INFILE '{source_file}'
INTO TABLE stg_dict_field_detail
FIELDS TERMINATED BY '\Z'
(urn, sort_id, parent_sort_id, parent_path, field_name, data_type,
is_nullable, default_value, data_size, namespace, description)
SET db_id = {db_id};
-- show warnings limit 20;
analyze table stg_dict_field_detail;
update stg_dict_field_detail
set description = null
where db_id = {db_id}
and (char_length(trim(description)) = 0
or description in ('null', 'N/A', 'nothing', 'empty', 'none'));
insert into field_comments (
user_id, comment, created, comment_crc32_checksum
)
select 0 user_id, description, now() created, crc32(description) from
(
select sf.description
from stg_dict_field_detail sf left join field_comments fc
on sf.description = fc.comment
where sf.description is not null
and fc.id is null
and sf.db_id = {db_id}
group by 1 order by 1
) d;
analyze table field_comments;
-- delete old record if it does not exist in this load batch anymore (but have the dataset id)
create temporary table if not exists t_deleted_fields (primary key (field_id))
select x.field_id
from stg_dict_field_detail s
join dict_dataset i
on s.urn = i.urn
and s.db_id = {db_id}
right join dict_field_detail x
on i.id = x.dataset_id
and s.field_name = x.field_name
and s.parent_path = x.parent_path
where s.field_name is null
and x.dataset_id in (
select d.id dataset_id
from stg_dict_field_detail k join dict_dataset d
on k.urn = d.urn
and k.db_id = {db_id}
)
; -- run time : ~2min
delete from dict_field_detail where field_id in (select field_id from t_deleted_fields);
-- update the old record if some thing changed
update dict_field_detail t join
(
select x.field_id, s.*
from stg_dict_field_detail s join dict_dataset d
on s.urn = d.urn
join dict_field_detail x
on s.field_name = x.field_name
and coalesce(s.parent_path, '*') = coalesce(x.parent_path, '*')
and d.id = x.dataset_id
where s.db_id = {db_id}
and (x.sort_id <> s.sort_id
or x.parent_sort_id <> s.parent_sort_id
or x.data_type <> s.data_type
or x.data_size <> s.data_size or (x.data_size is null XOR s.data_size is null)
or x.data_precision <> s.data_precision or (x.data_precision is null XOR s.data_precision is null)
or x.is_nullable <> s.is_nullable or (x.is_nullable is null XOR s.is_nullable is null)
or x.is_partitioned <> s.is_partitioned or (x.is_partitioned is null XOR s.is_partitioned is null)
or x.is_distributed <> s.is_distributed or (x.is_distributed is null XOR s.is_distributed is null)
or x.default_value <> s.default_value or (x.default_value is null XOR s.default_value is null)
or x.namespace <> s.namespace or (x.namespace is null XOR s.namespace is null)
)
) p
on t.field_id = p.field_id
set t.sort_id = p.sort_id,
t.parent_sort_id = p.parent_sort_id,
t.data_type = p.data_type,
t.data_size = p.data_size,
t.data_precision = p.data_precision,
t.is_nullable = p.is_nullable,
t.is_partitioned = p.is_partitioned,
t.is_distributed = p.is_distributed,
t.default_value = p.default_value,
t.namespace = p.namespace,
t.modified = now()
;
insert into dict_field_detail (
dataset_id, fields_layout_id, sort_id, parent_sort_id, parent_path,
field_name, namespace, data_type, data_size, is_nullable, default_value,
default_comment_id, modified
)
select
d.id, 0, sf.sort_id, sf.parent_sort_id, sf.parent_path,
sf.field_name, sf.namespace, sf.data_type, sf.data_size, sf.is_nullable, sf.default_value,
coalesce(fc.id, t.default_comment_id) fc_id, now()
from stg_dict_field_detail sf join dict_dataset d
on sf.urn = d.urn
left join field_comments fc
on sf.description = fc.comment
left join dict_field_detail t
on d.id = t.dataset_id
and sf.field_name = t.field_name
and sf.parent_path = t.parent_path
where db_id = {db_id} and t.field_id is null
on duplicate key update
data_type = sf.data_type, data_size = sf.data_size,
is_nullable = sf.is_nullable, default_value = sf.default_value,
namespace = sf.namespace,
default_comment_id = coalesce(fc.id, t.default_comment_id),
modified=now()
;
analyze table dict_field_detail;
'''.format(source_file=self.input_field_file, db_id=self.db_id)
for state in load_field_cmd.split(";"):
self.logger.debug(state)
cursor.execute(state)
self.conn_mysql.commit()
cursor.close()
def load_sample(self):
cursor = self.conn_mysql.cursor()
load_sample_cmd = '''
DELETE FROM stg_dict_dataset_sample where db_id = {db_id};
LOAD DATA LOCAL INFILE '{source_file}'
INTO TABLE stg_dict_dataset_sample
FIELDS TERMINATED BY '\Z' ESCAPED BY '\0'
(urn,ref_urn,data)
SET db_id = {db_id};
-- update reference id in stagging table
UPDATE stg_dict_dataset_sample s
LEFT JOIN dict_dataset d ON s.ref_urn = d.urn
SET s.ref_id = d.id
WHERE s.db_id = {db_id};
-- first insert ref_id as 0
INSERT INTO dict_dataset_sample
( `dataset_id`,
`urn`,
`ref_id`,
`data`,
created
)
select d.id as dataset_id, s.urn, s.ref_id, s.data, now()
from stg_dict_dataset_sample s left join dict_dataset d on d.urn = s.urn
where s.db_id = {db_id}
on duplicate key update
`data`=s.data, modified=now();
-- update reference id in final table
UPDATE dict_dataset_sample d
RIGHT JOIN stg_dict_dataset_sample s ON d.urn = s.urn
SET d.ref_id = s.ref_id
WHERE s.db_id = {db_id} AND d.ref_id = 0;
'''.format(source_file=self.input_sample_file, db_id=self.db_id)
for state in load_sample_cmd.split(";"):
self.logger.debug(state)
cursor.execute(state)
self.conn_mysql.commit()
cursor.close()
if __name__ == "__main__":
args = sys.argv[1]
l = HdfsLoad()
# set up connection
username = args[Constant.WH_DB_USERNAME_KEY]
password = args[Constant.WH_DB_PASSWORD_KEY]
JDBC_DRIVER = args[Constant.WH_DB_DRIVER_KEY]
JDBC_URL = args[Constant.WH_DB_URL_KEY]
l.input_file = args[Constant.HDFS_SCHEMA_RESULT_KEY]
l.input_field_file = args[Constant.HDFS_FIELD_RESULT_KEY]
l.input_sample_file = args[Constant.HDFS_SAMPLE_LOCAL_PATH_KEY]
l.db_id = args[Constant.DB_ID_KEY]
l.wh_etl_exec_id = args[Constant.WH_EXEC_ID_KEY]
l.conn_mysql = zxJDBC.connect(JDBC_URL, username, password, JDBC_DRIVER)
try:
l.load_metadata()
l.load_field()
l.load_sample()
finally:
l.conn_mysql.close()
|
import random
import uuid
import os
class RandomRequest(object):
def __init__(self):
pass
def get_random_user_agent(self):
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'files/supported_ios_versions.txt')) as f:
ios_versions = f.read().splitlines()
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'files/airbnb_versions.txt')) as f:
airbnb_versions = f.read().splitlines()
return "Airbnb/{} iPhone/{} Type/Phone".format(random.choice(airbnb_versions), random.choice(ios_versions))
def get_random_udid(self):
hex_digits = "0123456789abcdef"
return ''.join(random.choice(hex_digits) for _ in range(40))
def get_random_uuid(self):
return str(uuid.uuid4()).upper()
|
import discord
from discord.ext import commands
from modules import utils
import asyncio
import aiohttp
from http.client import responses
from typing import Optional
class Fortnite(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def get_fortnite_stats(self, name: str, platform: str) -> Optional[dict]:
headers = {
"TRN-Api-Key": self.bot.CONFIG["trn_api_key"]
}
url = "https://api.fortnitetracker.com/v1/profile/{platform}/{name}".format(platform=platform, name=name)
try:
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=headers) as r:
if (r.status != 200):
return r.status
return await r.json()
except Exception:
return None
@staticmethod
def create_fortnite_stats_embed(user: discord.User, stats_data: dict, stats: str, title: str = "") -> discord.Embed:
embed = discord.Embed(colpr=discord.Color.dark_green())
embed.title = title
embed.set_footer(text="{platform} | {stats} stats | Powered by fortnitetracker.com".format(
platform=stats_data["platformNameLong"],
stats=stats[0].upper() + stats[1:]))
embed.set_author(name=user.name, icon_url=user.avatar_url)
if (stats == "lifetime"):
data = utils.list_of_pairs_to_dict(stats_data["lifeTimeStats"])
embed.add_field(name=":trophy: Wins", value="{:,}".format(int(data["Wins"])))
embed.add_field(name=":medal: Win %", value=data["Win%"])
embed.add_field(name=":gun: Kills", value="{:,}".format(int(data["Kills"])))
embed.add_field(name=":skull_crossbones: K/D", value=data["K/d"])
embed.add_field(name=":video_game: Matches Played", value=data["Matches Played"])
try:
rank = stats_data["stats"]["p9"]["trnRating"]["rank"]
except KeyError:
pass
else:
embed.add_field(name=":military_medal: Ranking", value="{:,}".format(int(rank)))
else:
stats_options = {"solo": "p2",
"duo": "p10",
"squad": "p9"
}
if ("stats" not in stats_data or stats_options[stats] not in stats_data["stats"]):
return None
data = stats_data["stats"][stats_options[stats]]
embed.add_field(name=":trophy: Wins", value="{:,}".format(int(data["top1"]["value"])))
embed.add_field(name=":medal: Win %", value=(data["winRatio"]["value"] + "%"))
embed.add_field(name=":gun: Kills", value="{:,}".format(int(data["kills"]["value"])))
embed.add_field(name=":skull_crossbones: K/D", value=data["kd"]["value"])
embed.add_field(name=":video_game: Matches Played", value="{:,}".format(int(data["matches"]["value"])))
if (stats == "solo"):
embed.add_field(name=":third_place: Top 10", value="{:,}".format(int(data["top10"]["value"])))
elif (stats == "duo"):
embed.add_field(name=":third_place: Top 5", value="{:,}".format(int(data["top5"]["value"])))
elif (stats == "squad"):
embed.add_field(name=":third_place: Top 3", value="{:,}".format(int(data["top3"]["value"])))
return embed
@commands.command(description="finds Fortnite stats for a user",
brief="finds Fortnite stats for a user",
aliases=["fstats"])
@commands.cooldown(1, 1, commands.BucketType.guild)
async def fortnite(self, ctx, name: str, stats: str = "lifetime"):
async with ctx.channel.typing():
if (not "trn_api_key" in self.bot.CONFIG):
await ctx.send("No Tracker API key found")
return
stats_options = ["lifetime", "solo", "duo", "squad"]
if (stats not in stats_options):
await ctx.send(f"{ctx.author.mention} Invalid stat selection `{stats}`, options are: {', '.join('`{}`'.format(s) for s in stats_options)}")
return
platforms = ["pc", "xbl", "psn"]
success = False
for platform in platforms:
data = await self.get_fortnite_stats(name, platform)
await asyncio.sleep(1) # cooldown in between each request, according to the api's guidelines
if (not data):
continue
if (isinstance(data, int)):
self.bot.bot_utils.log_error_to_file("Failed to get Fortnite stats for \"{name}\" ({platform}) failed with status code {code} ({string})".format(
name=name,
platform=platform,
code=data,
string=responses[data] if (data in responses) else "unknown"), prefix="Fortnite")
continue
try:
data = dict(data)
except Exception as e:
self.bot.bot_utils.log_error_to_file("Failed to find Fortnite stats for \"{}\" ({}) because of exception: {}".format(name, platform, e),
prefix="Fortnite")
continue
if ("error" in data):
if (data["error"] != "Player Not Found"):
self.bot.bot_utils.log_error_to_file("API error for \"{}\" ({}): {}".format(name, platform, data["error"]), prefix="Fortnite")
continue
embed = self.create_fortnite_stats_embed(ctx.message.author,
data,
stats,
title=name)
if (not embed):
await ctx.send(f"{ctx.author.mention} Failed to find `{stats}` Fortnite stats for `{name}`")
return
await ctx.send(embed=embed)
success = True
if (not success):
await ctx.send(f"{ctx.author.mention} Failed to find `{stats}` Fortnite stats for `{name}`")
def setup(bot):
bot.add_cog(Fortnite(bot))
|
from .utils import run_all, run_pattern, run_list, initialize
__all__ = [
'run_all',
'run_pattern',
'run_list',
'initialize',
]
|
from bs4 import BeautifulSoup
import requests
import json
from urllib.parse import urljoin
import re
#TODO: decorate self.logged_in check
#TODO: get if user is admin
class CTFd(object):
"""
API wrapper for CTFd 2.1.2
"""
PATH_GET_CURRENT_USER = r"/api/v1/users/me"
PATH_GET_CHALLENGES = r"/api/v1/challenges"
PATH_GET_CHALLENGE = r"/api/v1/challenges/%d"
PATH_NONCE_DELETE_CHALLENGE = r"/admin/challenges/%d"
PATH_DELETE_CHALLENGE = r"/api/v1/challenges/%d"
PATH_NONCE_CREATE_CHALLENGE = r"/admin/challenges/new"
PATH_CREATE_CHALLENGE = r"/api/v1/challenges"
PATH_NONCE_PATCH_CHALLENGE = r"/admin/challenges/%d"
PATH_PATCH_CHALLENGE = r"/api/v1/challenges/%d"
PATH_NONCE_CREATE_FLAG = r"/admin/challenges/%d"
PATH_CREATE_FLAG = r"/api/v1/flags"
PATH_GET_FLAG = r"/api/v1/flags/%d"
PATH_NONCE_PATCH_FLAG = r"/admin/challenges/%d"
PATH_PATCH_FLAG = r"/api/v1/flags/%d"
PATH_NONCE_DELETE_FLAG = r"/admin/challenges/%d"
PATH_DELETE_FLAG = r"/api/v1/flags/%d"
PATH_GET_CHALLENGE_FLAGS = r"/api/v1/challenges/%d/flags"
PATH_SETUP = r"/setup"
PATH_GET_TAG = r"/api/v1/tags/%d"
PATH_NONCE_DELETE_TAG = r"/admin/challenges/%d"
PATH_DELETE_TAG = r"/api/v1/tags/%d"
PATH_NONCE_CREATE_TAG = r"/admin/challenges/%d"
PATH_CREATE_TAG = r"/api/v1/tags"
PATH_NONCE_PATCH_TAG = r"/admin/challenges/%d"
PATH_PATCH_TAG = r"/api/v1/tags/%d"
PATH_GET_CHALLENGE_TAGS = r"/api/v1/challenges/%d/tags"
PATH_GET_HINT = r"/api/v1/hints/%d"
PATH_NONCE_DELETE_HINT = r"/admin/challenges/%d"
PATH_DELETE_HINT = r"/api/v1/hints/%d"
PATH_NONCE_CREATE_HINT = r"/admin/challenges/%d"
PATH_CREATE_HINT = r"/api/v1/hints"
PATH_NONCE_PATCH_HINT = r"/admin/challenges/%d"
PATH_PATCH_HINT = r"/api/v1/hints/%d"
PATH_GET_CHALLENGE_HINTS = r"/api/v1/challenges/%d/hints"
PATH_NONCE_CREATE_FILE = r"/admin/challenges/%d"
PATH_CREATE_FILE = r"/api/v1/files"
PATH_NONCE_DELETE_FILE = r"/admin/challenges/%d"
PATH_DELETE_FILE = r"/api/v1/files/%d"
PATH_GET_CHALLENGE_FILES = r"/api/v1/challenges/%d/files"
PATH_GET_FILE = r"/api/v1/files/%d"
def __init__(self, host, verify=True):
"""
host: CTFd URL
"""
self.host = host
self.s = requests.Session()
if verify is False:
self.s.verify = False
self.logged_in = False
def setup(self, **kwargs):
"""
kwargs must be:
{
ctf_name: str, #ctf name
name: str, #admin name,
email: str, #admin email
password: str #admin password
user_mode: str, #("teams", "users")
}
"""
ret = None
r = self.s.get(
urljoin(
self.host,
self.__class__.PATH_SETUP
)
)
if r.status_code == 302:
return True
m = re.search(
r'var csrf_nonce = "(.+?)";',
r.text
)
nonce = m.group(1)
args = {}
params = ["ctf_name", "name", "email", "password", "user_mode"]
args = {}
for key in params:
if key in kwargs.keys():
args[key] = kwargs[key]
args["nonce"] = nonce
r = self.s.post(
urljoin(
self.host,
self.__class__.PATH_SETUP
),
headers={
"Content-Type": "application/x-www-form-urlencoded",
},
data=args,
allow_redirects=False
)
ret = (r.status_code == 302)
return ret
def login(self, login: str, password: str):
"""
login: CTFd admin login
password CTFd admin password
"""
r = self.s.get(
urljoin(self.host, "/login"),
)
soup = BeautifulSoup(r.text, 'html.parser')
nonce = soup.find(
"input",
attrs={
"type": "hidden",
"name": "nonce"
}
)["value"]
r = self.s.post(
urljoin(self.host, "/login"),
data={
"name": login,
"password": password,
"nonce": nonce
},
headers={
"Content-Type": "application/x-www-form-urlencoded",
},
allow_redirects=False
)
self.logged_in = r.status_code == 302
return self.logged_in
def get_current_user(self):
ret = None
if self.logged_in is True:
r = self.s.get(urljoin(
self.host,
self.__class__.PATH_GET_CURRENT_USER
))
if r.status_code == 200:
j = r.json()
ret = j if j["success"] is True else ret
return ret
def get_challenges(self):
ret = None
if self.logged_in is True:
r = self.s.get(urljoin(
self.host,
self.__class__.PATH_GET_CHALLENGES
))
if r.status_code == 200:
j = r.json()
ret = j if j["success"] is True else ret
return ret
def get_challenge(self, cid: int):
ret = None
if self.logged_in is True:
r = self.s.get(urljoin(
self.host,
self.__class__.PATH_GET_CHALLENGE % (cid)
))
if r.status_code == 200:
j = r.json()
ret = j if j["success"] is True else ret
return ret
def create_challenge(self, **kwargs):
"""
kwargs mut be jsonifyable as follows:
{
description: str,
category: str,
name: str,
value: int
state: str, ("hidden", "locked", "visible"),
type: str, ("standard", "dynamic"),
decay: int, #only if challenge is dynamic
minimum: int, #only if challenge is dynamic
}
"""
ret = None
if self.logged_in is True:
r = self.s.get(
urljoin(
self.host,
self.__class__.PATH_NONCE_CREATE_CHALLENGE
)
)
m = re.search(
r'var csrf_nonce = "(.+?)";',
r.text
)
nonce = m.group(1)
args = {}
params = ["description", "category", "name", "value", "state", "type", "decay", "minimum"]
args = {}
for key in params:
if key in kwargs.keys():
args[key] = kwargs[key]
r = self.s.post(
urljoin(
self.host,
self.__class__.PATH_CREATE_CHALLENGE
),
headers={
"CSRF-Token": nonce
},
json=args
)
if r.status_code == 200:
j = r.json()
ret = j if j["success"] is True else ret
return ret
def patch_challenge(self, cid: int, **kwargs):
"""
cid: challenge id
kwargs mut be jsonifyable as follows:
{
description: str,
category: str,
name: str,
value: int
state: str, ("hidden", "locked", "visible"),
type: str, ("standard", "dynamic"),
}
"""
ret = None
if self.logged_in is True:
r = self.s.get(
urljoin(
self.host,
self.__class__.PATH_NONCE_PATCH_CHALLENGE % (cid)
)
)
m = re.search(
r'var csrf_nonce = "(.+?)";',
r.text
)
nonce = m.group(1)
args = {}
params = ["description", "category", "name", "value", "state", "type", "decay", "minimum"]
args = {}
for key in params:
if key in kwargs.keys():
args[key] = kwargs[key]
r = self.s.patch(
urljoin(
self.host,
self.__class__.PATH_PATCH_CHALLENGE % (cid)
),
headers={
"CSRF-Token": nonce
},
json=args
)
if r.status_code == 200:
j = r.json()
ret = j if j["success"] is True else ret
return ret
def delete_challenge(self, cid):
"""
cid: challenge id
"""
ret = None
if self.logged_in is True:
r = self.s.get(
urljoin(
self.host,
self.__class__.PATH_NONCE_DELETE_CHALLENGE % (cid)
)
)
m = re.search(
r'var csrf_nonce = "(.+?)";',
r.text
)
nonce = m.group(1)
r = self.s.delete(
urljoin(
self.host,
self.__class__.PATH_DELETE_CHALLENGE % (cid)
),
json={},
headers={
"CSRF-Token": nonce
}
)
if r.status_code == 200:
j = r.json()
ret = j if j["success"] is True else ret
return ret
def create_flag(self, cid, **kwargs):
"""
cid: challenge id
kwargs mut be jsonifyable as follows:
{
content: str,
type: str, ("static", "regex")
challenge: int,
data: "case_insensitive" # not present if case sensitive
}
"""
ret = None
if self.logged_in is True:
r = self.s.get(
urljoin(
self.host,
self.__class__.PATH_NONCE_CREATE_FLAG % (cid)
)
)
args = {}
params = ["content", "type", "challenge", "data"]
args = {}
for key in params:
if key in kwargs.keys():
args[key] = kwargs[key]
m = re.search(
r'var csrf_nonce = "(.+?)";',
r.text
)
nonce = m.group(1)
args["challenge"] = cid
r = self.s.post(
urljoin(
self.host,
self.__class__.PATH_CREATE_FLAG
),
headers={
"CSRF-Token": nonce
},
json=args
)
if r.status_code == 200:
j = r.json()
ret = j if j["success"] is True else ret
return ret
def patch_flag(self, fid, **kwargs):
"""
cid: challenge id
kwargs mut be jsonifyable as follows:
{
content: str,
type: str, ("static", "regex")
challenge: int,
data: "case_insensitive" # not present if case sensitive
}
"""
ret = None
if self.logged_in is True:
r = self.s.get(
urljoin(
self.host,
self.__class__.PATH_GET_FLAG % (fid)
)
)
challenge_id = r.json()["data"]["challenge_id"]
r = self.s.get(
urljoin(
self.host,
self.__class__.PATH_NONCE_PATCH_FLAG % (challenge_id)
)
)
m = re.search(
r'var csrf_nonce = "(.+?)";',
r.text
)
nonce = m.group(1)
args = {}
params = ["content", "type", "challenge", "data"]
args = {}
for key in params:
if key in kwargs.keys():
args[key] = kwargs[key]
args["id"] = fid
r = self.s.patch(
urljoin(
self.host,
self.__class__.PATH_PATCH_FLAG % (fid)
),
headers={
"CSRF-Token": nonce
},
json=args
)
if r.status_code == 200:
j = r.json()
ret = j if j["success"] is True else ret
return ret
def delete_flag(self, fid):
"""
fid: flag id
"""
ret = None
if self.logged_in is True:
r = self.s.get(
urljoin(
self.host,
self.__class__.PATH_GET_FLAG % (fid)
)
)
challenge_id = r.json()["data"]["challenge_id"]
r = self.s.get(
urljoin(
self.host,
self.__class__.PATH_NONCE_DELETE_FLAG % (challenge_id)
)
)
m = re.search(
r'var csrf_nonce = "(.+?)";',
r.text
)
nonce = m.group(1)
r = self.s.delete(
urljoin(
self.host,
self.__class__.PATH_DELETE_FLAG % (fid)
),
json={},
headers={
"CSRF-Token": nonce
}
)
if r.status_code == 200:
j = r.json()
ret = j if j["success"] is True else ret
return ret
def get_challenge_flags(self, cid):
ret = None
if self.logged_in is True:
r = self.s.get(urljoin(
self.host,
self.__class__.PATH_GET_CHALLENGE_FLAGS % (cid)
))
if r.status_code == 200:
j = r.json()
ret = j if j["success"] is True else ret
return ret
def create_tag(self, cid, **kwargs):
"""
cid: challenge id
kwargs mut be jsonifyable as follows:
{
value: str,
}
"""
ret = None
if self.logged_in is True:
r = self.s.get(
urljoin(
self.host,
self.__class__.PATH_NONCE_CREATE_TAG % (cid)
)
)
m = re.search(
r'var csrf_nonce = "(.+?)";',
r.text
)
nonce = m.group(1)
args = {}
params = ["value"]
args = {}
for key in params:
if key in kwargs.keys():
args[key] = kwargs[key]
args["challenge"] = cid
r = self.s.post(
urljoin(
self.host,
self.__class__.PATH_CREATE_TAG
),
headers={
"CSRF-Token": nonce
},
json=args
)
if r.status_code == 200:
j = r.json()
ret = j if j["success"] is True else ret
return ret
def patch_tag(self, tid, **kwargs):
"""
tid: challenge id
kwargs mut be jsonifyable as follows:
{
value: str,
}
"""
ret = None
if self.logged_in is True:
r = self.s.get(
urljoin(
self.host,
self.__class__.PATH_GET_TAG % (tid)
)
)
challenge_id = r.json()["data"]["challenge_id"]
r = self.s.get(
urljoin(
self.host,
self.__class__.PATH_NONCE_PATCH_TAG % (challenge_id)
)
)
m = re.search(
r'var csrf_nonce = "(.+?)";',
r.text
)
nonce = m.group(1)
args = {}
params = ["value"]
args = {}
for key in params:
if key in kwargs.keys():
args[key] = kwargs[key]
args["id"] = tid
r = self.s.patch(
urljoin(
self.host,
self.__class__.PATH_PATCH_TAG % (tid)
),
headers={
"CSRF-Token": nonce
},
json=args
)
if r.status_code == 200:
j = r.json()
ret = j if j["success"] is True else ret
return ret
def delete_tag(self, tid):
"""
fid: tag id
"""
ret = None
if self.logged_in is True:
r = self.s.get(
urljoin(
self.host,
self.__class__.PATH_GET_TAG % (tid)
)
)
challenge_id = r.json()["data"]["challenge_id"]
r = self.s.get(
urljoin(
self.host,
self.__class__.PATH_NONCE_DELETE_TAG % (challenge_id)
)
)
m = re.search(
r'var csrf_nonce = "(.+?)";',
r.text
)
nonce = m.group(1)
r = self.s.delete(
urljoin(
self.host,
self.__class__.PATH_DELETE_TAG % (tid)
),
json={},
headers={
"CSRF-Token": nonce
}
)
if r.status_code == 200:
j = r.json()
ret = j if j["success"] is True else ret
return ret
def get_challenge_tags(self, cid):
ret = None
if self.logged_in is True:
r = self.s.get(urljoin(
self.host,
self.__class__.PATH_GET_CHALLENGE_TAGS % (cid)
))
if r.status_code == 200:
j = r.json()
ret = j if j["success"] is True else ret
return ret
def create_hint(self, cid, **kwargs):
"""
cid: challenge id
kwargs mut be jsonifyable as follows:
{
content: str,
cost: int
}
"""
ret = None
if self.logged_in is True:
r = self.s.get(
urljoin(
self.host,
self.__class__.PATH_NONCE_CREATE_HINT % (cid)
)
)
m = re.search(
r'var csrf_nonce = "(.+?)";',
r.text
)
nonce = m.group(1)
args = {}
params = ["content", "cost"]
args = {}
for key in params:
if key in kwargs.keys():
args[key] = kwargs[key]
args["challenge"] = cid
r = self.s.post(
urljoin(
self.host,
self.__class__.PATH_CREATE_HINT
),
headers={
"CSRF-Token": nonce
},
json=args
)
if r.status_code == 200:
j = r.json()
ret = j if j["success"] is True else ret
return ret
def patch_hint(self, hid, **kwargs):
"""
hid: challenge id
kwargs mut be jsonifyable as follows:
{
content: str,
cost: int
}
"""
ret = None
if self.logged_in is True:
r = self.s.get(
urljoin(
self.host,
self.__class__.PATH_GET_HINT % (hid)
)
)
challenge_id = r.json()["data"]["challenge"]
r = self.s.get(
urljoin(
self.host,
self.__class__.PATH_NONCE_PATCH_HINT % (challenge_id)
)
)
m = re.search(
r'var csrf_nonce = "(.+?)";',
r.text
)
nonce = m.group(1)
args = {}
params = ["content", "cost"]
args = {}
for key in params:
if key in kwargs.keys():
args[key] = kwargs[key]
args["id"] = hid
r = self.s.patch(
urljoin(
self.host,
self.__class__.PATH_PATCH_HINT % (hid)
),
headers={
"CSRF-Token": nonce
},
json=args
)
if r.status_code == 200:
j = r.json()
ret = j if j["success"] is True else ret
return ret
def delete_hint(self, hid):
"""
fid: hint id
"""
ret = None
if self.logged_in is True:
r = self.s.get(
urljoin(
self.host,
self.__class__.PATH_GET_HINT % (hid)
)
)
challenge_id = r.json()["data"]["challenge"]
r = self.s.get(
urljoin(
self.host,
self.__class__.PATH_NONCE_DELETE_HINT % (challenge_id)
)
)
m = re.search(
r'var csrf_nonce = "(.+?)";',
r.text
)
nonce = m.group(1)
r = self.s.delete(
urljoin(
self.host,
self.__class__.PATH_DELETE_HINT % (hid)
),
json={},
headers={
"CSRF-Token": nonce
}
)
if r.status_code == 200:
j = r.json()
ret = j if j["success"] is True else ret
return ret
def get_challenge_hints(self, cid):
ret = None
if self.logged_in is True:
r = self.s.get(urljoin(
self.host,
self.__class__.PATH_GET_CHALLENGE_HINTS % (cid)
))
if r.status_code == 200:
j = r.json()
ret = j if j["success"] is True else ret
return ret
def create_file(self, cid, **kwargs):
"""
cid: challenge id
kwargs mut be jsonifyable as follows:
{
file: file,
filename: str,
mime: str,
type: str, ("challenge")
}
"""
ret = None
if self.logged_in is True:
r = self.s.get(
urljoin(
self.host,
self.__class__.PATH_NONCE_CREATE_FILE % (cid)
)
)
m = re.search(
r'var csrf_nonce = "(.+?)";',
r.text
)
nonce = m.group(1)
args = {
"challenge": cid,
"nonce": nonce,
"type": "challenge"
}
r = self.s.post(
urljoin(
self.host,
self.__class__.PATH_CREATE_FILE
),
data=args,
files={"file": (kwargs["filename"], kwargs["file"], kwargs["mime"])}
)
if r.status_code == 200:
j = r.json()
ret = j if j["success"] is True else ret
return ret
def delete_file(self, cid, fid):
"""
fid: file id
"""
ret = None
if self.logged_in is True:
r = self.s.get(
urljoin(
self.host,
self.__class__.PATH_NONCE_DELETE_FILE % (cid)
)
)
m = re.search(
r'var csrf_nonce = "(.+?)";',
r.text
)
nonce = m.group(1)
r = self.s.delete(
urljoin(
self.host,
self.__class__.PATH_DELETE_FILE % (fid)
),
json={},
headers={
"CSRF-Token": nonce
}
)
if r.status_code == 200:
j = r.json()
ret = j if j["success"] is True else ret
return ret
def get_challenge_files(self, cid):
ret = None
if self.logged_in is True:
r = self.s.get(urljoin(
self.host,
self.__class__.PATH_GET_CHALLENGE_FILES % (cid)
))
if r.status_code == 200:
j = r.json()
ret = j if j["success"] is True else ret
return ret
|
from constants import COLOR, COLOR_RED, COLOR_GREEN
from num_to_asin import num_to_asin
class Tote:
"""
Create Tote object with .audit() method.
"""
def __init__(self, tote_code, df_content):
"""
:param tote_code: str (e.g.: tsX... )
:param df_content: pd DataFrame
"""
self.tote_code = tote_code
self.df_content = df_content # pd DataFrame
self.asins_dict = False # will become dict of ASINs in tote
self.asins_checked = False # will become deep copy of self.asins_dict to count ASINs audited
self.missing_asins = False # will become deep copy of asins_checked but keys with value == 0 removed
self.audited = False
def audit(self, escalation_msg, fc):
print(COLOR + f"Auditing {self.tote_code}. Please scan all items in tote. Insert 'exit' to stop.")
self.df_content = self.df_content.reset_index(drop=True) # reset index to allow enumerate to work
# create dict with ASINs
self.asins_dict = dict() # {Scannable ID: Quantity} add duplicates in pd DataFrame
self.asins_checked = dict() # deep copy of self.asins_dict
# str.rstrip() removes whitespace at end of ASINs
for row, elem in enumerate(self.df_content["FN SKU"].str.rstrip()):
if elem in self.asins_dict:
self.asins_dict[elem] += self.df_content["Quantity"][row]
self.asins_checked[elem] += self.df_content["Quantity"][row]
else:
self.asins_dict[elem] = self.df_content["Quantity"][row]
self.asins_checked[elem] = self.df_content["Quantity"][row]
#
print(COLOR + f"{self.tote_code} contains:")
for asin in self.asins_dict:
print(COLOR + f"{asin} : {self.asins_dict[asin]}")
while sum(self.asins_checked.values()) > 0:
print(COLOR + f"{self.tote_code} contains ", end="")
print(COLOR_GREEN + f"{sum(self.asins_checked.values())}", end="")
print(COLOR + " products that have not been audited. Please scan a product or insert 'exit' to stop.\n->",
end="")
item = input()
if item.lower() == "exit":
for elem in self.asins_checked:
if self.asins_checked[elem] != 0:
print(COLOR + f"{elem} : {self.asins_checked[elem]}")
print(COLOR + f"are still missing. If these items are not in {self.tote_code}, {escalation_msg}")
print(COLOR + f"Do you want to stop auditing {self.tote_code}?(yes/no)\n->", end="")
confirmation = input().strip().lower()
if confirmation == "yes":
break
else:
print(COLOR + f"Please keep auditing {self.tote_code}.")
elif item in self.asins_checked:
if self.asins_checked[item] > 0:
print(COLOR_GREEN + "\nSUCCESS!")
print(COLOR + f"Audited: {item}")
self.asins_checked[item] -= 1
else: # elif tote_content[item] == 0: # over quantity
print(COLOR_RED + "\nERROR!!!")
print(COLOR_RED + f"All the items with this ASIN have been already scanned. {escalation_msg}")
else: # elif item != "exit" and item not in tote_content:
try:
right_code = num_to_asin(item, fc)
if right_code is None:
print(COLOR_RED + f"This product number or ASIN was not recognised. {escalation_msg}")
elif right_code in self.asins_checked:
if self.asins_checked[right_code] > 0:
print(COLOR_GREEN + f"{item} converted to {right_code}.\nSUCCESS!!")
self.asins_checked[right_code] -= 1
else: # over quantity
print(COLOR_RED + f"All the items with this ASIN have been already scanned "
f"{escalation_msg}")
else:
print(COLOR_RED + f"{item} was recognized as ASIN {right_code}, but this item should not be in "
f"{self.tote_code}. {escalation_msg}")
except BaseException as e:
print(COLOR_RED + f"\nERROR!! {e}\n. Wrong ASIN: {item}. {escalation_msg}")
print(COLOR + f"Finished auditing {self.tote_code}")
if sum(self.asins_checked.values()) == 0:
print(COLOR_GREEN + f"{self.tote_code} audit was successful!")
self.audited = "Audit Passed"
else:
answer = False
while not answer:
print(COLOR_RED + f"{self.tote_code} audit was not completed. Please start audit again or start "
f"escalation procedure. Was audit interrupted because audit FAILED? (yes/no)\n->",
end="")
is_failed = input()
is_failed = is_failed.lower()
if 'y' in is_failed:
print(COLOR_RED + f"These items were not found in {self.tote_code}:")
self.missing_asins = dict()
for elem in self.asins_checked:
if self.asins_checked[elem] != 0:
self.missing_asins[elem] = self.asins_checked[elem]
print(COLOR + f"{elem} : {self.asins_checked[elem]}")
self.audited = "Failed. Missing:" + str(self.missing_asins)
answer = True
elif 'n' in is_failed:
self.audited = "Interrupted"
print(COLOR + f"At the end of pallet audit, please explain why {self.tote_code} audit was "
f"interrupted (e.g.: @login requested the pallet to be transshipped immediately).")
answer = True
if __name__ == "__main__":
import pandas as pd
df = pd.DataFrame({"FN SKU": [1, 2], "Quantity": [3, 4]})
a = Tote("a", df)
a.audit("AA", "MXP5")
|
from skimage.color import rgb2gray
from skimage.transform import resize
from skimage.io import imread, concatenate_images
import os
import numpy as np
# function for importing images from a folder
def load_images(path, input_size, output_size):
x_ = []
y_ = []
counter, totalnumber = 1, len(os.listdir(path))
for imgpath in os.listdir(path):
if counter % 100 == 0:
print("Importing image %s of %s (%s%%)" %(counter, totalnumber, round(counter/totalnumber*100)))
y = imread(path + "/" + imgpath)
y = rgb2gray(resize(y, output_size, mode="constant"))
x = resize(y, input_size, mode="constant")
x_.append(x)
y_.append(y)
counter += 1
return concatenate_images(x_), concatenate_images(y_)
# function for importing a video, frame by frame
def read_video(filepath, input_size, output_size):
vid = imageio.get_reader(filepath, "ffmpeg")
video_len = vid.get_length()
counter, totalnumber = 1, video_len
y_ = []
x_ = []
for i in range(0, video_len - 1):
if counter % 100 == 0:
print("Importing frame %s of %s (%s%%)" % (counter, totalnumber, round(counter / totalnumber * 100)))
y_frame = resize(vid.get_data(i), output_size, mode="constant")
y_frame = rgb2gray(y_frame)
x_frame = resize(y_frame, input_size, mode="constant")
y_.append(y_frame)
x_.append(x_frame)
counter += 1
return concatenate_images(x_), concatenate_images(y_)
# defining input and output size
input_size = (64, 64)
output_size = (128, 128)
# loading and reshaping train set
x_train, y_train = load_images("D:\\Users\Pc\Pictures\python\imagedata\\train_faces", input_size, output_size)
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], 1)
y_train = y_train.reshape(y_train.shape[0], y_train.shape[1], y_train.shape[2], 1)
print(x_train.shape, y_train.shape)
# loading and reshaping validation set
x_test, y_test = load_images("D:\\Users\Pc\Pictures\python\imagedata\\test_faces", input_size, output_size)
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 1)
y_test = y_test.reshape(y_test.shape[0], y_test.shape[1], y_test.shape[2], 1)
print(x_test.shape, y_test.shape)
# saving the data in arrays
print("Creating a compressed dataset...")
np.savez_compressed("image",
x_train = x_train,
y_train = y_train,
x_test = x_test,
y_test = y_test)
|
import torch
from .transforms_rotated import bbox2delta_rotated
from ..utils import multi_apply
def bbox_target_rotated(pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg,
reg_classes=1,
target_means=[.0, .0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0, 1.0],
concat=True):
labels, label_weights, bbox_targets, bbox_weights = multi_apply(
bbox_target_single,
pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg=cfg,
reg_classes=reg_classes,
target_means=target_means,
target_stds=target_stds)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bbox_targets = torch.cat(bbox_targets, 0)
bbox_weights = torch.cat(bbox_weights, 0)
return labels, label_weights, bbox_targets, bbox_weights
def bbox_target_single(pos_bboxes,
neg_bboxes,
pos_gt_bboxes,
pos_gt_labels,
cfg,
reg_classes=1,
target_means=[.0, .0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0, 1.0]):
num_pos = pos_bboxes.size(0)
num_neg = neg_bboxes.size(0)
num_samples = num_pos + num_neg
labels = pos_bboxes.new_zeros(num_samples, dtype=torch.long)
label_weights = pos_bboxes.new_zeros(num_samples)
bbox_targets = pos_bboxes.new_zeros(num_samples, 5)
bbox_weights = pos_bboxes.new_zeros(num_samples, 5)
if num_pos > 0:
labels[:num_pos] = pos_gt_labels
pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
label_weights[:num_pos] = pos_weight
pos_bbox_targets = bbox2delta_rotated(pos_bboxes, pos_gt_bboxes, target_means,
target_stds)
bbox_targets[:num_pos, :] = pos_bbox_targets
bbox_weights[:num_pos, :] = 1
if num_neg > 0:
label_weights[-num_neg:] = 1.0
return labels, label_weights, bbox_targets, bbox_weights
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011, Florent Lamiraux, Thomas Moulard, JRL, CNRS/AIST
import unittest
import numpy as np
from dynamic_graph.sot.core.op_point_modifier import OpPointModifier
gaze = np.array((((1.0, 0.0, 0.0, 0.025), (0.0, 1.0, 0.0, 0.0), (0.0, 0.0, 1.0, 0.648), (0.0, 0.0, 0.0, 1.0))))
Jgaze = np.array(
(((1.0, 0.0, 0.0, 0.0, 0.648, 0.0), (0.0, 1.0, 0.0, -0.648, 0.0, 0.025), (0.0, 0.0, 1.0, 0.0, -0.025, 0.0),
(0.0, 0.0, 0.0, 1.0, 0.0, 0.0), (0.0, 0.0, 0.0, 0.0, 1.0, 0.0), (0.0, 0.0, 0.0, 0.0, 0.0, 1.0))))
I4 = np.array(((1., 0., 0., 0.), (0., 1., 0., 0.), (0., 0., 1., 0.), (0., 0., 0., 1.)))
I6 = np.array(((1., 0., 0., 0., 0., 0.), (0., 1., 0., 0., 0., 0.), (0., 0., 1., 0., 0., 0.), (0., 0., 0., 1., 0., 0.),
(0., 0., 0., 0., 1., 0.), (0., 0., 0., 0., 0., 1.)))
class OpPointModifierTest(unittest.TestCase):
def test_simple(self):
op = OpPointModifier('op')
op.setTransformation(I4)
op.positionIN.value = I4
op.jacobianIN.value = I6
op.position.recompute(0)
op.jacobian.recompute(0)
self.assertTrue((op.getTransformation() == I4).all())
self.assertTrue((op.position.value == I4).all())
self.assertTrue((op.jacobian.value == I6).all())
def test_translation(self):
tx = 11.
ty = 22.
tz = 33.
T = np.array(((1., 0., 0., tx), (0., 1., 0., ty), (0., 0., 1., tz), (0., 0., 0., 1.)))
op = OpPointModifier('op2')
op.setTransformation(T)
op.positionIN.value = gaze
op.jacobianIN.value = Jgaze
op.position.recompute(1)
op.jacobian.recompute(1)
self.assertTrue((op.getTransformation() == T).all())
# w_M_s = w_M_g * g_M_s
w_M_g = gaze
g_M_s = T
w_M_s_ref = w_M_g.dot(g_M_s)
w_M_s = op.position.value
# Check w_M_s == w_M_s_ref
self.assertTrue((w_M_s == w_M_s_ref).all())
twist = np.array([[1., 0., 0., 0., tz, -ty], [0., 1., 0., -tz, 0., tx], [0., 0., 1., ty, -tx, 0.],
[0., 0., 0., 1., 0., 0.], [0., 0., 0., 0., 1., 0.], [0., 0., 0., 0., 0., 1.]])
J = op.jacobian.value
J_ref = twist.dot(Jgaze)
# Check w_M_s == w_M_s_ref
self.assertTrue((J == J_ref).all())
def test_rotation(self):
T = np.array(((0., 0., 1., 0.), (0., -1., 0., 0.), (1., 0., 0., 0.), (0., 0., 0., 1.)))
op = OpPointModifier('op3')
op.setTransformation(T)
op.positionIN.value = gaze
op.jacobianIN.value = Jgaze
op.position.recompute(1)
op.jacobian.recompute(1)
self.assertTrue((op.getTransformation() == T).all())
# w_M_s = w_M_g * g_M_s
w_M_g = gaze
g_M_s = T
w_M_s_ref = w_M_g.dot(g_M_s)
w_M_s = op.position.value
# Check w_M_s == w_M_s_ref
self.assertTrue((w_M_s == w_M_s_ref).all())
twist = np.array([[0., 0., 1., 0., 0., 0.], [0., -1., 0., 0., 0., 0.], [1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1.], [0., 0., 0., 0., -1., 0.], [0., 0., 0., 1., 0., 0.]])
J = op.jacobian.value
J_ref = twist.dot(Jgaze)
# Check w_M_s == w_M_s_ref
self.assertTrue((J == J_ref).all())
def test_rotation_translation(self):
tx = 11.
ty = 22.
tz = 33.
T = np.array(((0., 0., 1., tx), (0., -1., 0., ty), (1., 0., 0., tz), (0., 0., 0., 1.)))
op = OpPointModifier('op4')
op.setTransformation(T)
op.positionIN.value = gaze
op.jacobianIN.value = Jgaze
op.position.recompute(1)
op.jacobian.recompute(1)
self.assertTrue((op.getTransformation() == T).all())
# w_M_s = w_M_g * g_M_s
w_M_g = gaze
g_M_s = T
w_M_s_ref = w_M_g.dot(g_M_s)
w_M_s = op.position.value
# Check w_M_s == w_M_s_ref
self.assertTrue((w_M_s == w_M_s_ref).all())
twist = np.array([[0., 0., 1., ty, -tx, 0.], [0., -1., 0., tz, 0., -tx], [1., 0., 0., 0., tz, -ty],
[0., 0., 0., 0., 0., 1.], [0., 0., 0., 0., -1., 0.], [0., 0., 0., 1., 0., 0.]])
J = op.jacobian.value
J_ref = twist.dot(Jgaze)
# Check w_M_s == w_M_s_ref
self.assertTrue((J == J_ref).all())
if __name__ == '__main__':
unittest.main()
|
import requests
import json
def get_practitioner(npi):
"""Returns a Practitioner Resource from Djmongo"""
url = "https://registry.npi.io/search/fhir/Practitioner.json" \
"?identifier.value=%s" % (npi)
response = requests.get(url)
try:
jr = json.loads(response.text)
if 'results' not in jr:
jr = {'error', 'The lookup failed. Invalid response from server'}
if not jr['results']:
jr = {'error', 'Invalid NPI'}
except ValueError:
jr = {'error',
'The lookup failed. JSON was not returned from the server.'}
return jr['results'][0]
def convert_practitioner_fhir_to_form(pract_res, user):
"""Converts a Practitioner Resource into Values for Form"""
data = {}
data['user'] = user
data['first_name']= pract_res['name'][0]['given'][0]
data['last_name']= pract_res['name'][0]['family'][0]
data['npi']= pract_res['identifier'][0]['value']
data['fhir_id']= pract_res['id']
return data
def convert_practitioner_fhir_to_meta(pract_res, user):
"""Converts a Practitioner Resource into Values for Meta"""
data = {}
data['user'] = user
data['npi']= pract_res['identifier'][0]['value']
data['fhir_id']= pract_res['id']
return data
def get_pecos_individual_affliliation(npi):
"""Returns a Pecos Affiliation Resource from Djmongo"""
url = "https://registry.npi.io/search/pecos/compiled_individuals." \
"json?NPI=%s" % (npi)
response = requests.get(url)
try:
jr = json.loads(response.text)
if 'results' not in jr:
jr = {'error', 'The lookup failed. Invalid response from server'}
if not jr['results']:
jr = {'error', 'Invalid NPI'}
except ValueError:
jr = {'error',
'The lookup failed. JSON was not returned from the server.'}
return jr['results'][0]['works_for']
def convert_pecos_to_form(pecos_res, user):
"""Converts a Practitioner Resource into Values for Form"""
data = {}
data['user'] = user
data['name']= pecos_res[0]['NAME']
data['npi']= pecos_res[0]['NPI']
data['description']= pecos_res[0]['DESCRIPTION']
return data
def convert_pecos_to_meta(pecos_res, user):
"""Converts a Practitioner Resource into Values for Meta"""
data = {}
data['user'] = user
data['npi']= pecos_res['identifier'][0]['value']
data['fhir_id']= pecos_res['id']
return data
# if __name__ == "__main__":
# print(get_pecos_individual_affliliation(1205824083))
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, LABS^N
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import print_function
import os
import os.path as op
from copy import deepcopy
import warnings
from shutil import move, copy2
import subprocess
from collections import Counter
import time
import sys
import numpy as np
from scipy import linalg, io as spio
from numpy.testing import assert_allclose
import mne
from mne import (
compute_proj_raw, make_fixed_length_events, Epochs, find_events,
read_events, write_events, concatenate_events, read_cov,
compute_covariance, write_cov, read_forward_solution,
convert_forward_solution, write_proj, read_proj, setup_source_space,
make_forward_solution, write_evokeds, make_sphere_model,
setup_volume_source_space, pick_info, write_source_spaces,
read_source_spaces, write_forward_solution, DipoleFixed,
read_annotations)
from mne.externals.h5io import read_hdf5, write_hdf5
try:
from mne import compute_raw_covariance # up-to-date mne-python
except ImportError: # oldmne-python
from mne import compute_raw_data_covariance as compute_raw_covariance
from mne.preprocessing.ssp import compute_proj_ecg, compute_proj_eog
from mne.preprocessing.maxfilter import fit_sphere_to_headshape
from mne.preprocessing.maxwell import (maxwell_filter,
_trans_sss_basis,
_get_mf_picks, _prep_mf_coils,
_check_regularize,
_regularize)
from mne.utils import verbose, logger
try:
# Experimental version
from mne.preprocessing.maxwell import _prep_regularize
except ImportError:
_prep_regularize = None
from mne.bem import _check_origin
from mne.minimum_norm import make_inverse_operator
from mne.label import read_label
from mne.epochs import combine_event_ids
from mne.chpi import (filter_chpi, read_head_pos, write_head_pos,
_get_hpi_info, _get_hpi_initial_fit, _setup_hpi_struct,
_fit_cHPI_amplitudes, _fit_magnetic_dipole)
from mne.io.proj import _needs_eeg_average_ref_proj
from mne.cov import regularize
try:
from mne.chpi import quat_to_rot, rot_to_quat
except ImportError:
try:
from mne.chpi import (_quat_to_rot as quat_to_rot,
_rot_to_quat as rot_to_quat)
except ImportError:
from mne.io.chpi import (_quat_to_rot as quat_to_rot,
_rot_to_quat as rot_to_quat)
from mne.io import read_raw_fif, concatenate_raws, read_info, write_info
from mne.io.base import _annotations_starts_stops
from mne.io.constants import FIFF
from mne.io.pick import pick_types_forward, pick_types
from mne.io.meas_info import _empty_info
from mne.minimum_norm import write_inverse_operator
from mne.utils import run_subprocess, _time_mask
from mne.viz import plot_drop_log, tight_layout
from mne.externals.six import string_types
from ._paths import (get_raw_fnames, get_event_fnames,
get_epochs_evokeds_fnames, safe_inserter, _regex_convert)
from ._status import print_proc_status
from ._reorder import fix_eeg_channels
from ._report import gen_html_report
from ._scoring import default_score
try:
from functools import reduce
except Exception:
pass
# Class adapted from:
# http://stackoverflow.com/questions/3603502/
class Frozen(object):
__isfrozen = False
def __setattr__(self, key, value):
if self.__isfrozen and not hasattr(self, key):
raise AttributeError('%r is not an attribute of class %s. Call '
'"unfreeze()" to allow addition of new '
'attributes' % (key, self))
object.__setattr__(self, key, value)
def freeze(self):
"""Freeze the object so that only existing properties can be set"""
self.__isfrozen = True
def unfreeze(self):
"""Unfreeze the object so that additional properties can be added"""
self.__isfrozen = False
# noinspection PyUnresolvedReferences
class Params(Frozen):
"""Make a parameter structure for use with `do_processing`
This is technically a class, but it doesn't currently have any methods
other than init.
Parameters
----------
tmin : float
tmin for events.
tmax : float
tmax for events.
t_adjust : float
Adjustment for delays (e.g., -4e-3 compensates for a 4 ms delay
in the trigger.
bmin : float
Lower limit for baseline compensation.
bmax : float
Upper limit for baseline compensation.
n_jobs : int
Number of jobs to use in parallel operations.
lp_cut : float
Cutoff for lowpass filtering.
decim : int
Amount to decimate the data after filtering when epoching data
(e.g., a factor of 5 on 1000 Hz data yields 200 Hz data).
proj_sfreq : float | None
The sample freq to use for calculating projectors. Useful since
time points are not independent following low-pass. Also saves
computation to downsample.
n_jobs_mkl : int
Number of jobs to spawn in parallel for operations that can make
use of MKL threading. If Numpy/Scipy has been compiled with MKL
support, it is best to leave this at 1 or 2 since MKL will
automatically spawn threads. Otherwise, n_cpu is a good choice.
n_jobs_fir : int | str
Number of threads to use for FIR filtering. Can also be 'cuda'
if the system supports CUDA.
n_jobs_resample : int | str
Number of threads to use for resampling. Can also be 'cuda'
if the system supports CUDA.
filter_length : int
Filter length to use in FIR filtering. Longer filters generally
have better roll-off characteristics, but more ringing.
drop_thresh : float
The percentage threshold to use when deciding whether or not to
plot Epochs drop_log.
epochs_type : str | list
Can be 'fif', 'mat', or a list containing both.
fwd_mindist : float
Minimum distance for sources in the brain from the skull in order
for them to be included in the forward solution source space.
bem_type : str
Defaults to ``'5120-5120-5120'``, use ``'5120'`` for a
single-layer BEM.
auto_bad : float | None
If not None, bad channels will be automatically excluded if
they disqualify a proportion of events exceeding ``autobad``.
ecg_channel : str | None
The channel to use to detect ECG events. None will use ECG063.
In lieu of an ECG recording, MEG1531 may work.
eog_channel : str
The channel to use to detect EOG events. None will use EOG*.
In lieu of an EOG recording, MEG1411 may work.
plot_raw : bool
If True, plot the raw files with the ECG/EOG events overlaid.
match_fun : function | None
If None, standard matching will be performed. If a function,
must_match will be ignored, and ``match_fun`` will be called
to equalize event counts.
hp_cut : float | None
Highpass cutoff in Hz. Use None for no highpassing.
cov_method : str
Covariance calculation method.
ssp_eog_reject : dict | None
Amplitude rejection criteria for EOG SSP computation. None will
use the mne-python default.
ssp_ecg_reject : dict | None
Amplitude rejection criteria for ECG SSP computation. None will
use the mne-python default.
baseline : tuple | None | str
Baseline to use. If "individual", use ``params.bmin`` and
``params.bmax``, otherwise pass as the baseline parameter to
mne-python Epochs. ``params.bmin`` and ``params.bmax`` will always
be used for covariance calculation. This is useful e.g. when using
a high-pass filter and no baselining is desired (but evoked
covariances should still be calculated from the baseline period).
reject_tmin : float | None
Reject minimum time to use when epoching. None will use ``tmin``.
reject_tmax : float | None
Reject maximum time to use when epoching. None will use ``tmax``.
lp_trans : float
Low-pass transition band.
hp_trans : float
High-pass transition band.
Attributes
----------
movecomp : str | None
Movement compensation to use. Can be 'inter' or None.
sss_type : str
signal space separation method. Must be either 'maxfilter' or 'python'
int_order : int
Order of internal component of spherical expansion. Default is 8.
ext_order : int
Order of external component of spherical expansion. Default is 3.
Value of 6 recomended for infant data
tsss_dur : float | None
Buffer length (in seconds) fpr Spatiotemporal SSS. Default is 60.
however based on system specification a shorter buffer may be
appropriate. For data containing excessive head movements e.g. young
children a buffer size of 4s is recommended.
st_correlation : float
Correlation limit between inner and outer subspaces used to reject
ovwrlapping intersecting inner/outer signals during spatiotemporal SSS.
Default is .98 however a smaller value of .9 is recommended for infant/
child data.
trans_to : str | array-like, (3,) | None
The destination location for the head. Can be ``None``, which
will not change the head position, a string path to a FIF file
containing a MEG device to head transformation, or a 3-element
array giving the coordinates to translate to (with no rotations).
Default is median head position across runs.
sss_origin : array-like, shape (3,) | str
Origin of internal and external multipolar moment space in meters.
Default is center of sphere fit to digitized head points.
fir_design : str
Can be "firwin2" or "firwin".
autoreject_thresholds : bool | False
If True use autoreject module to compute global rejection thresholds
for epoching. Make sure autoreject module is installed. See
http://autoreject.github.io/ for instructions.
autoreject_types : tuple
Default is ('mag', 'grad', 'eeg'). Can set to ('mag', 'grad', 'eeg',
'eog) to use EOG channel rejection criterion from autoreject module to
reject trials on basis of EOG.
src_pos : float
Default is 7 mm. Defines source grid spacing for volumetric source
space.
on_missing : string
Can set to ‘error’ | ‘warning’ | ‘ignore’. Default is 'error'. Determine
what to do if one or several event ids are not found in the recording
during epoching. See mne.Epochs docstring for further details.
compute_rank : bool
Default is False. Set to True to compute rank of the noise covariance
matrix during inverse kernel computation.
Returns
-------
params : instance of Params
The parameters to use.
See also
--------
do_processing
mne.preprocessing.maxwell_filter
Notes
-----
Params has additional properties. Use ``dir(params)`` to see
all the possible options.
"""
def __init__(self, tmin=None, tmax=None, t_adjust=0, bmin=-0.2, bmax=0.0,
n_jobs=6, lp_cut=55, decim=5, proj_sfreq=None, n_jobs_mkl=1,
n_jobs_fir='cuda', n_jobs_resample='cuda',
filter_length=32768, drop_thresh=1,
epochs_type='fif', fwd_mindist=2.0,
bem_type='5120-5120-5120', auto_bad=None,
ecg_channel=None, eog_channel=None,
plot_raw=False, match_fun=None, hp_cut=None,
cov_method='empirical', ssp_eog_reject=None,
ssp_ecg_reject=None, baseline='individual',
reject_tmin=None, reject_tmax=None,
lp_trans=0.5, hp_trans=0.5):
self.reject = dict(eog=np.inf, grad=1500e-13, mag=5000e-15, eeg=150e-6)
self.flat = dict(eog=0, grad=1e-13, mag=1e-15, eeg=1e-6)
if ssp_eog_reject is None:
ssp_eog_reject = dict(grad=2000e-13, mag=3000e-15,
eeg=500e-6, eog=np.inf)
if ssp_ecg_reject is None:
ssp_ecg_reject = dict(grad=2000e-13, mag=3000e-15,
eeg=50e-6, eog=250e-6)
self.ssp_eog_reject = ssp_eog_reject
self.ssp_ecg_reject = ssp_ecg_reject
self.tmin = tmin
self.tmax = tmax
self.reject_tmin = reject_tmin
self.reject_tmax = reject_tmax
self.t_adjust = t_adjust
self.baseline = baseline
self.bmin = bmin
self.bmax = bmax
self.run_names = None
self.inv_names = None
self.inv_runs = None
self.work_dir = os.getcwd()
self.n_jobs = n_jobs
self.n_jobs_mkl = n_jobs_mkl
self.n_jobs_fir = n_jobs_fir # Jobs when using method='fir'
self.n_jobs_resample = n_jobs_resample
self.filter_length = filter_length
self.cont_lp = 5
self.lp_cut = lp_cut
self.hp_cut = hp_cut
self.lp_trans = lp_trans
self.hp_trans = hp_trans
self.phase = 'zero-double'
self.fir_window = 'hann'
self.fir_design = 'firwin2'
self.disp_files = True
self.plot_drop_logs = False # plot drop logs after do_preprocessing_
self.proj_sfreq = proj_sfreq
self.decim = decim
self.drop_thresh = drop_thresh
self.bem_type = bem_type
self.match_fun = match_fun
if isinstance(epochs_type, string_types):
epochs_type = (epochs_type,)
if not all([t in ('mat', 'fif') for t in epochs_type]):
raise ValueError('All entries in "epochs_type" must be "mat" '
'or "fif"')
self.epochs_type = epochs_type
self.fwd_mindist = fwd_mindist
self.auto_bad = auto_bad
self.auto_bad_reject = None
self.auto_bad_flat = None
self.auto_bad_meg_thresh = 10
self.auto_bad_eeg_thresh = 10
self.ecg_channel = ecg_channel
self.eog_channel = eog_channel
self.plot_raw = plot_raw
# add standard file tags
self.epochs_dir = 'epochs'
self.cov_dir = 'covariance'
self.inverse_dir = 'inverse'
self.forward_dir = 'forward'
self.list_dir = 'lists'
self.trans_dir = 'trans'
self.bad_dir = 'bads'
self.raw_dir = 'raw_fif'
self.sss_dir = 'sss_fif'
self.pca_dir = 'sss_pca_fif'
self.epochs_tag = '-epo'
self.inv_tag = '-sss'
self.inv_fixed_tag = '-fixed'
self.inv_loose_tag = ''
self.inv_free_tag = '-free'
self.inv_erm_tag = '-erm'
self.eq_tag = 'eq'
self.sss_fif_tag = '_raw_sss.fif'
self.bad_tag = '_post-sss.txt'
self.keep_orig = False
# This is used by fix_eeg_channels to fix original files
self.raw_fif_tag = '_raw.fif'
self.cal_file = None
self.ct_file = None
# SSS denoising params
self.sss_type = 'maxfilter'
self.mf_args = ''
self.tsss_dur = 60.
self.trans_to = 'median' # where to transform head positions to
self.sss_format = 'float' # output type for MaxFilter
self.movecomp = 'inter'
self.int_order = 8
self.ext_order = 3
self.st_correlation = .98
self.sss_origin = 'auto'
self.sss_regularize = 'in'
self.filter_chpi = True
# boolean for whether data set(s) have an individual mri
self.on_process = None
# Use more than EXTRA points to fit headshape
self.dig_with_eeg = False
# Function to pick a subset of events to use to make a covariance
self.pick_events_cov = lambda x: x
self.cov_method = cov_method
self.proj_extra = None
# These should be overridden by the user unless they are only doing
# a small subset, e.g. epoching
self.subjects = []
self.structurals = None
self.dates = None
self.score = None # defaults to passing events through
self.acq_ssh = self.acq_dir = None
self.acq_port = 22
self.sws_ssh = self.sws_dir = None
self.sws_port = 22
self.subject_indices = []
self.get_projs_from = []
self.runs_empty = []
self.proj_nums = [[0] * 3] * 3
self.in_names = []
self.in_numbers = []
self.analyses = []
self.out_names = []
self.out_numbers = []
self.must_match = []
self.on_missing = 'error' # for epochs
self.subject_run_indices = None
self.autoreject_thresholds = False
self.autoreject_types = ('mag', 'grad', 'eeg')
self.subjects_dir = None
self.src_pos = 7.
self.report_params = dict(
good_hpi_count=True,
head_movement=True,
psd=True,
ssp_topomaps=True,
source_alignment=True,
bem=True,
source=None,
)
self.rotation_limit = np.inf
self.translation_limit = np.inf
self.coil_bad_count_duration_limit = np.inf # for annotations
self.coil_dist_limit = 0.005
self.coil_t_window = 0.2 # default is same as MF
self.coil_t_step_min = 0.01
self.proj_ave = False
self.compute_rank = False
self.freeze()
@property
def pca_extra(self):
return '_allclean_fil%d' % self.lp_cut
@property
def pca_fif_tag(self):
return self.pca_extra + self.sss_fif_tag
def convert_subjects(self, subj_template, struc_template=None):
"""Helper to convert subject names
Parameters
----------
subj_template : str
Subject template to use.
struc_template : str
Structural template to use.
"""
if struc_template is not None:
if isinstance(struc_template, string_types):
def fun(x):
return struc_template % x
else:
fun = struc_template
new = [fun(subj) for subj in self.subjects]
assert all(isinstance(subj, string_types) for subj in new)
self.structurals = new
if isinstance(subj_template, string_types):
def fun(x):
return subj_template % x
else:
fun = subj_template
new = [fun(subj) for subj in self.subjects]
assert all(isinstance(subj, string_types) for subj in new)
self.subjects = new
def _get_baseline(p):
"""Helper to extract baseline from params"""
if p.baseline == 'individual':
baseline = (p.bmin, p.bmax)
else:
baseline = p.baseline
return baseline
def do_processing(p, fetch_raw=False, do_score=False, push_raw=False,
do_sss=False, fetch_sss=False, do_ch_fix=False,
gen_ssp=False, apply_ssp=False,
write_epochs=False, gen_covs=False, gen_fwd=False,
gen_inv=False, gen_report=False, print_status=True):
"""Do M/EEG data processing
Parameters
----------
p : instance of Params
The parameter structure.
fetch_raw : bool
Fetch raw recording files from acquisition machine.
do_score : bool
Do scoring.
push_raw : bool
Push raw recording files to SSS workstation.
do_sss : bool
Run SSS remotely on SSS workstation.
fetch_sss : bool
Fetch SSS files from SSS workstation.
do_ch_fix : bool
Fix channel ordering.
gen_ssp : bool
Generate SSP vectors.
apply_ssp : bool
Apply SSP vectors and filtering.
write_epochs : bool
Write epochs to disk.
gen_covs : bool
Generate covariances.
gen_fwd : bool
Generate forward solutions.
get_inv : bool
Generate inverses.
gen_report : bool
Generate HTML reports.
print_status : bool
Print status (determined from file structure).
"""
# Generate requested things
if p.sss_type == 'python':
push_raw = False
fetch_sss = False
bools = [fetch_raw,
do_score,
push_raw,
do_sss,
fetch_sss,
do_ch_fix,
gen_ssp,
apply_ssp,
write_epochs,
gen_covs,
gen_fwd,
gen_inv,
gen_report,
print_status,
]
texts = ['Pulling raw files from acquisition machine',
'Scoring subjects',
'Pushing raw files to remote workstation',
'Running SSS using %s' % p.sss_type,
'Pulling SSS files from remote workstation',
'Fixing EEG order',
'Preprocessing files',
'Applying preprocessing',
'Doing epoch EQ/DQ',
'Generating covariances',
'Generating forward models',
'Generating inverse solutions',
'Generating HTML Reports',
'Status',
]
score_fun = p.score if p.score is not None else default_score
if len(mne.utils._get_args(score_fun)) == 2:
score_fun_two = score_fun
def score_fun(p, subjects, run_indices):
return score_fun_two(p, subjects)
funcs = [fetch_raw_files,
score_fun,
push_raw_files,
run_sss,
fetch_sss_files,
fix_eeg_files,
do_preprocessing_combined,
apply_preprocessing_combined,
save_epochs,
gen_covariances,
gen_forwards,
gen_inverses,
gen_html_report,
print_proc_status,
]
assert len(bools) == len(texts) == len(funcs)
# Only run a subset of subjects
n_subj_orig = len(p.subjects)
sinds = p.subject_indices
if sinds is None:
sinds = np.arange(len(p.subjects))
subjects = np.array(p.subjects)[sinds].tolist()
structurals = p.structurals
if structurals is not None:
assert len(structurals) == n_subj_orig
structurals = np.array(structurals)[sinds].tolist()
dates = p.dates
if dates is not None:
assert len(dates) == n_subj_orig
dates = [tuple([int(dd) for dd in d]) if d is not None else None
for d in np.array(p.dates)[sinds]]
decim = p.decim
if not isinstance(decim, (list, tuple)):
decim = [decim] * len(p.subjects)
assert len(decim) == n_subj_orig
decim = np.array(decim)
assert np.issubdtype(decim.dtype, np.int), (decim.dtype, decim.dtype.char)
assert decim.ndim == 1
assert decim.size == len(p.subjects)
decim = decim[sinds]
run_indices = p.subject_run_indices
if run_indices is None:
run_indices = [None] * len(p.subjects)
assert len(run_indices) == len(p.subjects)
run_indices = [r for ri, r in enumerate(run_indices) if ri in sinds]
assert all(r is None or np.in1d(r, np.arange(len(p.run_names))).all()
for r in run_indices)
# Actually do the work
outs = [None] * len(bools)
for ii, (b, text, func) in enumerate(zip(bools, texts, funcs)):
if b:
t0 = time.time()
print(text + '. ')
if func is None:
raise ValueError('function is None')
if func == fix_eeg_files:
outs[ii] = func(p, subjects, structurals, dates, run_indices)
elif func in (gen_forwards, gen_html_report):
outs[ii] = func(p, subjects, structurals, run_indices)
elif func == save_epochs:
outs[ii] = func(p, subjects, p.in_names, p.in_numbers,
p.analyses, p.out_names, p.out_numbers,
p.must_match, decim, run_indices)
elif func == print_proc_status:
outs[ii] = func(p, subjects, structurals, p.analyses,
run_indices)
else:
outs[ii] = func(p, subjects, run_indices)
print(' (' + timestring(time.time() - t0) + ')')
if p.on_process is not None:
p.on_process(text, func, outs[ii], p)
print("Done")
def _is_dir(d):
"""Safely check for a directory (allowing symlinks)"""
return op.isdir(op.abspath(d))
def fetch_raw_files(p, subjects, run_indices):
"""Fetch remote raw recording files (only designed for *nix platforms)"""
for si, subj in enumerate(subjects):
print(' Checking for proper remote filenames for %s...' % subj)
subj_dir = op.join(p.work_dir, subj)
if not _is_dir(subj_dir):
os.mkdir(subj_dir)
raw_dir = op.join(subj_dir, p.raw_dir)
if not op.isdir(raw_dir):
os.mkdir(raw_dir)
fnames = get_raw_fnames(p, subj, 'raw', True, False,
run_indices[si])
assert len(fnames) > 0
# build remote raw file finder
if isinstance(p.acq_dir, string_types):
use_dir = [p.acq_dir]
else:
use_dir = p.acq_dir
finder_stem = 'find ' + ' '.join(use_dir)
finder = (finder_stem + ' -o '.join([' -type f -regex ' +
_regex_convert(f)
for f in fnames]))
# Ignore "Permission denied" errors:
# https://unix.stackexchange.com/questions/42841/how-to-skip-permission-denied-errors-when-running-find-in-linux # noqa
finder += '2>&1 | grep -v "Permission denied"'
stdout_ = run_subprocess(
['ssh', '-p', str(p.acq_port), p.acq_ssh, finder],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)[0]
remote_fnames = [x.strip() for x in stdout_.splitlines()]
if not any(fname.startswith(rd.rstrip('/') + '/') for rd in use_dir
for fname in remote_fnames):
raise IOError('Unable to find files at remote locations. '
'Check filenames, for example:\n%s'
% remote_fnames[:1])
# make the name "local" to the acq dir, so that the name works
# remotely during rsync and locally during copyfile
remote_dir = [fn[:fn.index(op.basename(fn))]
for fn in remote_fnames][0]
remote_fnames = [op.basename(fname) for fname in remote_fnames]
want = set(op.basename(fname) for fname in fnames)
got = set(op.basename(fname) for fname in remote_fnames)
if want != got.intersection(want):
raise RuntimeError('Could not find all files, missing:\n' +
'\n'.join(sorted(want - got)))
if len(remote_fnames) != len(fnames):
warnings.warn('Found more files than expected on remote server.\n'
'Likely split files were found. Please confirm '
'results.')
print(' Pulling %s files for %s...' % (len(remote_fnames), subj))
cmd = ['rsync', '-ave', 'ssh -p %s' % p.acq_port,
'--prune-empty-dirs', '--partial',
'--include', '*/']
for fname in remote_fnames:
cmd += ['--include', op.basename(fname)]
remote_loc = '%s:%s' % (p.acq_ssh, op.join(remote_dir, ''))
cmd += ['--exclude', '*', remote_loc, op.join(raw_dir, '')]
run_subprocess(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# move files to root raw_dir
for fname in remote_fnames:
from_ = fname.index(subj)
move(op.join(raw_dir, fname[from_:].lstrip('/')),
op.join(raw_dir, op.basename(fname)))
# prune the extra directories we made
for fname in remote_fnames:
from_ = fname.index(subj)
next_ = op.split(fname[from_:].lstrip('/'))[0]
while len(next_) > 0:
if op.isdir(op.join(raw_dir, next_)):
os.rmdir(op.join(raw_dir, next_)) # safe; goes if empty
next_ = op.split(next_)[0]
def calc_median_hp(p, subj, out_file, ridx):
"""Calculate median head position"""
print(' Estimating median head position ...')
raw_files = get_raw_fnames(p, subj, 'raw', False, False, ridx)
ts = []
qs = []
info = None
for fname in raw_files:
info = read_info(fname)
trans = info['dev_head_t']['trans']
ts.append(trans[:3, 3])
m = trans[:3, :3]
# make sure we are a rotation matrix
assert_allclose(np.dot(m, m.T), np.eye(3), atol=1e-5)
assert_allclose(np.linalg.det(m), 1., atol=1e-5)
qs.append(rot_to_quat(m))
assert info is not None
if len(raw_files) == 1: # only one head position
dev_head_t = info['dev_head_t']
else:
t = np.median(np.array(ts), axis=0)
rot = np.median(quat_to_rot(np.array(qs)), axis=0)
trans = np.r_[np.c_[rot, t[:, np.newaxis]],
np.array([0, 0, 0, 1], t.dtype)[np.newaxis, :]]
dev_head_t = {'to': 4, 'from': 1, 'trans': trans}
info = _empty_info(info['sfreq'])
info['dev_head_t'] = dev_head_t
write_info(out_file, info)
def calc_twa_hp(p, subj, out_file, ridx):
"""Calculate time-weighted average head position."""
if not p.movecomp:
# Eventually we could relax this but probably YAGNI
raise RuntimeError('Cannot use time-weighted average head position '
'when movecomp is off.')
print(' Estimating time-weighted average head position ...')
raw_fnames = get_raw_fnames(p, subj, 'raw', False, False, ridx)
assert len(raw_fnames) >= 1
norm = 0
A = np.zeros((4, 4))
pos = np.zeros(3)
for raw_fname in raw_fnames:
raw = mne.io.read_raw_fif(raw_fname, allow_maxshield='yes',
verbose='error')
hp, annot, _ = _head_pos_annot(p, raw_fname, prefix=' ')
raw.annotations = annot
good = np.ones(len(raw.times))
ts = np.concatenate((hp[:, 0],
[(raw.last_samp + 1) / raw.info['sfreq']]))
ts -= raw.first_samp / raw.info['sfreq']
idx = raw.time_as_index(ts, use_rounding=True)
assert idx[-1] == len(good)
# Mark times bad that are bad according to annotations
onsets, ends = _annotations_starts_stops(raw, 'bad')
for onset, end in zip(onsets, ends):
good[onset:end] = 0
dt = np.diff(np.cumsum(np.concatenate([[0], good]))[idx])
dt = dt / raw.info['sfreq']
del good, idx, ts
pos += np.dot(dt, hp[:, 4:7])
these_qs = hp[:, 1:4]
res = 1 - np.sum(these_qs * these_qs, axis=-1, keepdims=True)
assert (res >= 0).all()
these_qs = np.concatenate((these_qs, np.sqrt(res)), axis=-1)
assert np.allclose(np.linalg.norm(these_qs, axis=1), 1)
these_qs *= dt[:, np.newaxis]
# rank 1 update method
# https://arc.aiaa.org/doi/abs/10.2514/1.28949?journalCode=jgcd
# https://github.com/tolgabirdal/averaging_quaternions/blob/master/wavg_quaternion_markley.m # noqa: E501
# qs.append(these_qs)
outers = np.einsum('ij,ik->ijk', these_qs, these_qs)
A += outers.sum(axis=0)
norm += dt.sum()
A /= norm
best_q = linalg.eigh(A)[1][:, -1] # largest eigenvector is the wavg
# Same as the largest eigenvector from the concatenation of all
# best_q = linalg.svd(np.concatenate(qs).T)[0][:, 0]
best_q = best_q[:3] * np.sign(best_q[-1])
trans = np.eye(4)
trans[:3, :3] = quat_to_rot(best_q)
trans[:3, 3] = pos / norm
dev_head_t = mne.Transform('meg', 'head', trans)
info = _empty_info(raw.info['sfreq'])
info['dev_head_t'] = dev_head_t
write_info(out_file, info)
def push_raw_files(p, subjects, run_indices):
"""Push raw files to SSS workstation"""
if len(subjects) == 0:
return
print(' Pushing raw files to SSS workstation...')
# do all copies at once to avoid multiple logins
copy2(op.join(op.dirname(__file__), 'run_sss.sh'), p.work_dir)
includes = ['--include', op.sep + 'run_sss.sh']
if not isinstance(p.trans_to, string_types):
raise TypeError(' Illegal head transformation argument to MaxFilter.')
elif p.trans_to not in ('default', 'median'):
_check_trans_file(p)
includes += ['--include', op.sep + p.trans_to]
for si, subj in enumerate(subjects):
subj_dir = op.join(p.work_dir, subj)
raw_dir = op.join(subj_dir, p.raw_dir)
out_pos = op.join(raw_dir, subj + '_center.txt')
if not op.isfile(out_pos):
print(' Determining head center for %s... ' % subj, end='')
in_fif = op.join(raw_dir,
safe_inserter(p.run_names[0], subj) +
p.raw_fif_tag)
if p.dig_with_eeg:
dig_kinds = (FIFF.FIFFV_POINT_EXTRA, FIFF.FIFFV_POINT_LPA,
FIFF.FIFFV_POINT_NASION, FIFF.FIFFV_POINT_RPA,
FIFF.FIFFV_POINT_EEG)
else:
dig_kinds = (FIFF.FIFFV_POINT_EXTRA,)
origin_head = fit_sphere_to_headshape(read_info(in_fif),
dig_kinds=dig_kinds,
units='mm')[1]
out_string = ' '.join(['%0.0f' % np.round(number)
for number in origin_head])
with open(out_pos, 'w') as fid:
fid.write(out_string)
med_pos = op.join(raw_dir, subj + '_median_pos.fif')
if not op.isfile(med_pos):
calc_median_hp(p, subj, med_pos, run_indices[si])
root = op.sep + subj
raw_root = op.join(root, p.raw_dir)
includes += ['--include', root, '--include', raw_root,
'--include', op.join(raw_root, op.basename(out_pos)),
'--include', op.join(raw_root, op.basename(med_pos))]
prebad_file = _prebad(p, subj)
includes += ['--include',
op.join(raw_root, op.basename(prebad_file))]
fnames = get_raw_fnames(p, subj, 'raw', True, True, run_indices[si])
assert len(fnames) > 0
for fname in fnames:
assert op.isfile(fname), fname
includes += ['--include', op.join(raw_root, op.basename(fname))]
assert ' ' not in p.sws_dir
assert ' ' not in p.sws_ssh
cmd = (['rsync', '-aLve', 'ssh -p %s' % p.sws_port, '--partial'] +
includes + ['--exclude', '*'])
cmd += ['.', '%s:%s' % (p.sws_ssh, op.join(p.sws_dir, ''))]
run_subprocess(cmd, cwd=p.work_dir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def _check_trans_file(p):
"""Helper to make sure our trans_to file exists"""
if not isinstance(p.trans_to, string_types):
raise ValueError('trans_to must be a string')
if p.trans_to not in ('default', 'median'):
if not op.isfile(op.join(p.work_dir, p.trans_to)):
raise ValueError('Trans position file "%s" not found'
% p.trans_to)
def run_sss(p, subjects, run_indices):
"""Run SSS preprocessing remotely (only designed for *nix platforms) or
locally using Maxwell filtering in mne-python"""
if p.sss_type == 'python':
print(' Applying SSS locally using mne-python')
run_sss_locally(p, subjects, run_indices)
else:
for si, subj in enumerate(subjects):
files = get_raw_fnames(p, subj, 'raw', False, True,
run_indices[si])
n_files = len(files)
files = ':'.join([op.basename(f) for f in files])
erm = get_raw_fnames(p, subj, 'raw', 'only', True, run_indices[si])
n_files += len(erm)
erm = ':'.join([op.basename(f) for f in erm])
erm = ' --erm ' + erm if len(erm) > 0 else ''
assert isinstance(p.tsss_dur, float) and p.tsss_dur > 0
st = ' --st %s' % p.tsss_dur
if p.sss_format not in ('short', 'long', 'float'):
raise RuntimeError('format must be short, long, or float')
fmt = ' --format ' + p.sss_format
assert p.movecomp in ['inter', None]
mc = ' --mc %s' % str(p.movecomp).lower()
_check_trans_file(p)
trans = ' --trans ' + p.trans_to
run_sss = (op.join(p.sws_dir, 'run_sss.sh') + st + fmt + trans +
' --subject ' + subj + ' --files ' + files + erm + mc +
' --args=\"%s\"' % p.mf_args)
cmd = ['ssh', '-p', str(p.sws_port), p.sws_ssh, run_sss]
s = 'Remote output for %s on %s files:' % (subj, n_files)
print('-' * len(s))
print(s)
print('-' * len(s))
run_subprocess(cmd, stdout=sys.stdout, stderr=sys.stderr)
print('-' * 70, end='\n\n')
def fetch_sss_files(p, subjects, run_indices):
"""Pull SSS files (only designed for *nix platforms)"""
if len(subjects) == 0:
return
includes = []
for subj in subjects:
includes += ['--include', subj,
'--include', op.join(subj, 'sss_fif'),
'--include', op.join(subj, 'sss_fif', '*'),
'--include', op.join(subj, 'sss_log'),
'--include', op.join(subj, 'sss_log', '*')]
assert ' ' not in p.sws_dir
assert ' ' not in p.sws_ssh
cmd = (['rsync', '-ave', 'ssh -p %s' % p.sws_port, '--partial', '-K'] +
includes + ['--exclude', '*'])
cmd += ['%s:%s' % (p.sws_ssh, op.join(p.sws_dir, '*')), '.']
run_subprocess(cmd, cwd=p.work_dir, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def run_sss_command(fname_in, options, fname_out, host='kasga', port=22,
fname_pos=None, stdout=None, stderr=None, prefix='',
work_dir='~/'):
"""Run Maxfilter remotely and fetch resulting file
Parameters
----------
fname_in : str
The filename to process.
options : str
The command-line options for Maxfilter.
fname_out : str | None
Output filename to use to store the result on the local machine.
None will output to a temporary file.
host : str
The SSH/scp host to run the command on.
fname_pos : str | None
The ``-hp fname_pos`` to use with MaxFilter.
stdout : file-like | None
Where to send stdout.
stderr : file-like | None
Where to send stderr.
prefix : str
The text to prefix to messages.
work_dir : str
Where to store the temporary files.
"""
# let's make sure we can actually write where we want
if not op.isfile(fname_in):
raise IOError('input file not found: %s' % fname_in)
if not op.isdir(op.dirname(op.abspath(fname_out))):
raise IOError('output directory for output file does not exist')
if any(x in options for x in ('-f ', '-o ', '-hp ')):
raise ValueError('options cannot contain -o, -f, or -hp, these are '
'set automatically')
port = str(int(port))
t0 = time.time()
remote_in = op.join(work_dir, 'temp_%s_raw.fif' % t0)
remote_out = op.join(work_dir, 'temp_%s_raw_sss.fif' % t0)
remote_pos = op.join(work_dir, 'temp_%s_raw_sss.pos' % t0)
print('%sOn %s: copying' % (prefix, host), end='')
fname_in = op.realpath(fname_in) # in case it's a symlink
cmd = ['scp', '-P' + port, fname_in, host + ':' + remote_in]
run_subprocess(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if fname_pos is not None:
options += ' -hp ' + remote_pos
print(', MaxFilter', end='')
cmd = ['ssh', '-p', port, host,
'maxfilter -f ' + remote_in + ' -o ' + remote_out + ' ' + options]
try:
run_subprocess(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(', copying to %s' % (op.basename(fname_out),), end='')
if fname_pos is not None:
try:
cmd = ['scp', '-P' + port, host + ':' + remote_pos, fname_pos]
run_subprocess(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except Exception:
pass
cmd = ['scp', '-P' + port, host + ':' + remote_out, fname_out]
run_subprocess(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
finally:
print(', cleaning', end='')
files = [remote_in, remote_out]
files += [remote_pos] if fname_pos is not None else []
cmd = ['ssh', '-p', port, host, 'rm -f ' + ' '.join(files)]
try:
run_subprocess(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception:
pass
print(' (%i sec)' % (time.time() - t0,))
def run_sss_positions(fname_in, fname_out, host='kasga', opts='', port=22,
prefix=' ', work_dir='~/', t_window=None,
t_step_min=None, dist_limit=None):
"""Run Maxfilter remotely and fetch resulting file
Parameters
----------
fname_in : str
The filename to process. Additional ``-1`` files will be
automatically detected.
fname_out : str
Output filename to use to store the resulting head positions
on the local machine.
host : str
The SSH/scp host to run the command on
opts : str
Additional command-line options to pass to MaxFilter.
port : int
The SSH port.
prefix : str
The prefix to use when printing status updates.
work_dir : str
Where to store the temporary files.
t_window : float | None
Time window (sec) to use.
dist_limit : float | None
Distance limit (m) to use.
"""
# let's make sure we can actually write where we want
if not op.isfile(fname_in):
raise IOError('input file not found: %s' % fname_in)
if not op.isdir(op.dirname(op.abspath(fname_out))):
raise IOError('output directory for output file does not exist')
pout = op.dirname(fname_in)
fnames_in = [fname_in]
for ii in range(1, 11):
next_name = op.splitext(fname_in)[0] + '-%s' % ii + '.fif'
if op.isfile(next_name):
fnames_in.append(next_name)
else:
break
if t_window is not None:
opts += ' -hpiwin %d' % (round(1000 * t_window),)
if t_step_min is not None:
opts += ' -hpistep %d' % (round(1000 * t_step_min),)
if dist_limit is not None:
opts += ' -hpie %d' % (round(1000 * dist_limit),)
t0 = time.time()
print('%sOn %s: copying' % (prefix, host), end='')
cmd = ['rsync', '--partial', '-Lave', 'ssh -p %s' % port,
'--include', '*/']
for fname in fnames_in:
cmd += ['--include', op.basename(fname)]
cmd += ['--exclude', '*', op.dirname(fnames_in[0]) + '/',
'%s:%s' % (host, work_dir)]
run_subprocess(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
remote_ins = [op.join(work_dir, op.basename(f)) for f in fnames_in]
fnames_out = [op.basename(r)[:-4] + '.pos' for r in remote_ins]
for fi, file_out in enumerate(fnames_out):
remote_out = op.join(work_dir, 'temp_%s_raw_quat.fif' % t0)
remote_hp = op.join(work_dir, 'temp_%s_hp.txt' % t0)
print(', running -headpos%s' % opts, end='')
cmd = ['ssh', '-p', str(port), host,
'/neuro/bin/util/maxfilter -f ' + remote_ins[fi] + ' -o ' +
remote_out +
' -headpos -format short -hp ' + remote_hp + ' ' + opts]
run_subprocess(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(', copying', end='')
cmd = ['scp', '-P' + str(port), host + ':' + remote_hp,
op.join(pout, file_out)]
run_subprocess(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
cmd = ['ssh', '-p', str(port), host, 'rm -f %s %s %s'
% (remote_ins[fi], remote_hp, remote_out)]
run_subprocess(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# concatenate hp pos file for split raw files if any
data = []
for f in fnames_out:
data.append(read_head_pos(op.join(pout, f)))
os.remove(op.join(pout, f))
pos_data = np.concatenate(np.array(data))
print(', writing', end='')
write_head_pos(fname_out, pos_data)
print(' (%i sec)' % (time.time() - t0,))
def run_sss_locally(p, subjects, run_indices):
"""Run SSS locally using maxwell filter in python
See Also
--------
mne.preprocessing.maxwell_filter
"""
data_dir = op.join(op.dirname(__file__), 'data')
if p.cal_file is None:
cal_file = op.join(data_dir, 'sss_cal.dat')
else:
cal_file = p.cal_file
if p.ct_file is None:
ct_file = op.join(data_dir, 'ct_sparse.fif')
else:
ct_file = p.ct_file
assert isinstance(p.tsss_dur, float) and p.tsss_dur > 0
st_duration = p.tsss_dur
assert (isinstance(p.sss_regularize, string_types) or
p.sss_regularize is None)
reg = p.sss_regularize
for si, subj in enumerate(subjects):
if p.disp_files:
print(' Maxwell filtering subject %g/%g (%s).'
% (si + 1, len(subjects), subj))
# locate raw files with splits
sss_dir = op.join(p.work_dir, subj, p.sss_dir)
if not op.isdir(sss_dir):
os.mkdir(sss_dir)
raw_files = get_raw_fnames(p, subj, 'raw', erm=False,
run_indices=run_indices[si])
raw_files_out = get_raw_fnames(p, subj, 'sss', erm=False,
run_indices=run_indices[si])
erm_files = get_raw_fnames(p, subj, 'raw', 'only')
erm_files_out = get_raw_fnames(p, subj, 'sss', 'only')
prebad_file = _prebad(p, subj)
# process raw files
for ii, (r, o) in enumerate(zip(raw_files, raw_files_out)):
if not op.isfile(r):
raise NameError('File not found (' + r + ')')
raw = read_raw_fif(r, preload=True, allow_maxshield='yes')
raw.fix_mag_coil_types()
_load_meg_bads(raw, prebad_file, disp=ii == 0, prefix=' ' * 6)
print(' Processing %s ...' % op.basename(r))
# estimate head position for movement compensation
head_pos, annot, _ = _head_pos_annot(p, r, prefix=' ')
raw.annotations = annot
# get the destination head position
assert isinstance(p.trans_to, (string_types, tuple, type(None)))
trans_to = _load_trans_to(p, subj, run_indices[si])
# filter cHPI signals
if p.filter_chpi:
t0 = time.time()
print(' Filtering cHPI signals ... ', end='')
raw = filter_chpi(raw)
print('%i sec' % (time.time() - t0,))
# apply maxwell filter
t0 = time.time()
print(' Running maxwell_filter ... ', end='')
raw_sss = maxwell_filter(
raw, origin=p.sss_origin, int_order=p.int_order,
ext_order=p.ext_order, calibration=cal_file,
cross_talk=ct_file, st_correlation=p.st_correlation,
st_duration=st_duration, destination=trans_to,
coord_frame='head', head_pos=head_pos, regularize=reg,
bad_condition='warning')
print('%i sec' % (time.time() - t0,))
raw_sss.save(o, overwrite=True, buffer_size_sec=None)
# process erm files if any
for ii, (r, o) in enumerate(zip(erm_files, erm_files_out)):
if not op.isfile(r):
raise NameError('File not found (' + r + ')')
raw = read_raw_fif(r, preload=True, allow_maxshield='yes')
raw.fix_mag_coil_types()
_load_meg_bads(raw, prebad_file, disp=False)
print(' %s ...' % op.basename(r))
t0 = time.time()
print(' Running maxwell_filter ... ', end='')
# apply maxwell filter
raw_sss = maxwell_filter(
raw, int_order=p.int_order, ext_order=p.ext_order,
calibration=cal_file, cross_talk=ct_file,
st_correlation=p.st_correlation, st_duration=st_duration,
destination=None, coord_frame='meg')
print('%i sec' % (time.time() - t0,))
raw_sss.save(o, overwrite=True, buffer_size_sec=None)
def _load_trans_to(p, subj, run_indices, raw=None):
if isinstance(p.trans_to, string_types):
if p.trans_to == 'median':
trans_to = op.join(p.work_dir, subj, p.raw_dir,
subj + '_median_pos.fif')
if not op.isfile(trans_to):
calc_median_hp(p, subj, trans_to, run_indices)
elif p.trans_to == 'twa':
trans_to = op.join(p.work_dir, subj, p.raw_dir,
subj + '_twa_pos.fif')
if not op.isfile(trans_to):
calc_twa_hp(p, subj, trans_to, run_indices)
trans_to = mne.read_trans(trans_to)
elif p.trans_to is None:
trans_to = None if raw is None else raw.info['dev_head_t']
else:
trans_to = np.array(p.trans_to, float)
t = np.eye(4)
if trans_to.shape == (4,):
theta = np.deg2rad(trans_to[3])
t[1:3, 1:3] = [[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]]
elif trans_to.shape != (3,):
raise ValueError('trans_to must have 3 or 4 elements, '
'got shape %s' % (trans_to.shape,))
t[:3, 3] = trans_to[:3]
trans_to = mne.Transform('meg', 'head', t)
if trans_to is not None:
trans_to = mne.transforms._ensure_trans(trans_to, 'meg', 'head')
return trans_to
def _load_meg_bads(raw, prebad_file, disp=True, prefix=' '):
"""Helper to load MEG bad channels from a file (pre-MF)"""
with open(prebad_file, 'r') as fid:
lines = fid.readlines()
lines = [line.strip() for line in lines if len(line.strip()) > 0]
if len(lines) > 0:
try:
int(lines[0][0])
except ValueError:
# MNE-Python type file
bads = lines
else:
# Maxfilter type file
if len(lines) > 1:
raise RuntimeError('Could not parse bad file')
bads = ['MEG%04d' % int(bad) for bad in lines[0].split()]
else:
bads = list()
if disp:
pl = '' if len(bads) == 1 else 's'
print('%sMarking %s bad MEG channel%s using %s'
% (prefix, len(bads), pl, op.basename(prebad_file)))
raw.info['bads'] = bads
raw.info._check_consistency()
def extract_expyfun_events(fname, return_offsets=False):
"""Extract expyfun-style serial-coded events from file
Parameters
----------
fname : str
Filename to use.
return_offsets : bool
If True, return the time of each press relative to trial onset
in addition to the press number.
Returns
-------
events : array
Array of events of shape (N, 3), re-coded such that 1 triggers
are renamed according to their binary expyfun representation.
presses : list of arrays
List of all press events that occurred between each one
trigger. Each array has shape (N_presses,). If return_offset is True,
then each array has shape (N_presses, 2), with the first column
as the time offset from the trial trigger.
orig_events : array
Original events array.
Notes
-----
When this function translates binary event codes into decimal integers, it
adds 1 to the value of all events. This is done to prevent the occurrence
of events with a value of 0 (which downstream processing would treat as
non-events). If you need to convert the integer event codes back to binary,
subtract 1 before doing so to yield the original binary values.
"""
# Read events
raw = read_raw_fif(fname, allow_maxshield='yes', preload=True)
raw.pick_types(meg=False, stim=True)
orig_events = find_events(raw, stim_channel='STI101', shortest_event=0)
events = list()
for ch in range(1, 9):
stim_channel = 'STI%03d' % ch
ev_101 = find_events(raw, stim_channel='STI101', mask=2 ** (ch - 1),
mask_type='and')
if stim_channel in raw.ch_names:
ev = find_events(raw, stim_channel=stim_channel)
if not np.array_equal(ev_101[:, 0], ev[:, 0]):
warnings.warn('Event coding mismatch between STIM channels')
else:
ev = ev_101
ev[:, 2] = 2 ** (ch - 1)
events.append(ev)
events = np.concatenate(events)
events = events[np.argsort(events[:, 0])]
# check for the correct number of trials
aud_idx = np.where(events[:, 2] == 1)[0]
breaks = np.concatenate(([0], aud_idx, [len(events)]))
resps = []
event_nums = []
for ti in range(len(aud_idx)):
# pull out responses (they come *after* 1 trig)
these = events[breaks[ti + 1]:breaks[ti + 2], :]
resp = these[these[:, 2] > 8]
resp = np.c_[(resp[:, 0] - events[ti, 0]) / raw.info['sfreq'],
np.log2(resp[:, 2]) - 3]
resps.append(resp if return_offsets else resp[:, 1])
# look at trial coding, double-check trial type (pre-1 trig)
these = events[breaks[ti + 0]:breaks[ti + 1], 2]
serials = these[np.logical_and(these >= 4, these <= 8)]
en = np.sum(2 ** np.arange(len(serials))[::-1] * (serials == 8)) + 1
event_nums.append(en)
these_events = events[aud_idx]
these_events[:, 2] = event_nums
return these_events, resps, orig_events
def fix_eeg_files(p, subjects, structurals=None, dates=None, run_indices=None):
"""Reorder EEG channels based on UW cap setup and params
Reorders only the SSS files based on params, to leave the raw files
in an unmodified state.
Parameters
----------
p : instance of Parameters
Analysis parameters.
subjects : list of str
Subject names to analyze (e.g., ['Eric_SoP_001', ...]).
structurals : list of str
Subject structural names.
dates : list of tuple
Dates that each subject was run.
run_indices : array-like | None
Run indices to include.
"""
if run_indices is None:
run_indices = [None] * len(subjects)
for si, subj in enumerate(subjects):
if p.disp_files:
print(' Fixing subject %g/%g.' % (si + 1, len(subjects)))
raw_names = get_raw_fnames(p, subj, 'sss', True, False,
run_indices[si])
# Now let's make sure we only run files that actually exist
names = [name for name in raw_names if op.isfile(name)]
# noinspection PyPep8
if structurals is not None and structurals[si] is not None and \
dates is not None:
assert isinstance(structurals[si], string_types)
assert dates[si] is None or (isinstance(dates[si], tuple) and
len(dates[si]) == 3)
assert dates[si] is None or all([isinstance(d, int)
for d in dates[si]])
anon = dict(first_name=subj, last_name=structurals[si],
birthday=dates[si])
else:
anon = None
fix_eeg_channels(names, anon)
def get_fsaverage_medial_vertices(concatenate=True, subjects_dir=None,
vertices=None):
"""Returns fsaverage medial wall vertex numbers
These refer to the standard fsaverage source space
(with vertices from 0 to 2*10242-1).
Parameters
----------
concatenate : bool
If True, the returned vertices will be indices into the left and right
hemisphere that are part of the medial wall. This is
Useful when treating the source space as a single entity (e.g.,
during clustering).
subjects_dir : str
Directory containing subjects data. If None use
the Freesurfer SUBJECTS_DIR environment variable.
vertices : None | list
Can be None to use ``[np.arange(10242)] * 2``.
Returns
-------
vertices : list of array, or array
The medial wall vertices.
"""
if vertices is None:
vertices = [np.arange(10242), np.arange(10242)]
subjects_dir = mne.utils.get_subjects_dir(subjects_dir, raise_error=True)
label_dir = op.join(subjects_dir, 'fsaverage', 'label')
lh = read_label(op.join(label_dir, 'lh.Medial_wall.label'))
rh = read_label(op.join(label_dir, 'rh.Medial_wall.label'))
if concatenate:
bad_left = np.where(np.in1d(vertices[0], lh.vertices))[0]
bad_right = np.where(np.in1d(vertices[1], rh.vertices))[0]
return np.concatenate((bad_left, bad_right + len(vertices[0])))
else:
return [lh.vertices, rh.vertices]
@verbose
def get_fsaverage_label_operator(parc='aparc.a2009s', remove_bads=True,
combine_medial=False, return_labels=False,
subjects_dir=None, verbose=None):
"""Get a label operator matrix for fsaverage."""
subjects_dir = mne.utils.get_subjects_dir(subjects_dir, raise_error=True)
src = mne.read_source_spaces(op.join(
subjects_dir, 'fsaverage', 'bem', 'fsaverage-5-src.fif'),
verbose=False)
fs_vertices = [np.arange(10242), np.arange(10242)]
assert all(np.array_equal(a['vertno'], b)
for a, b in zip(src, fs_vertices))
labels = mne.read_labels_from_annot('fsaverage', parc)
# Remove bad labels
if remove_bads:
bads = get_fsaverage_medial_vertices(False)
bads = dict(lh=bads[0], rh=bads[1])
assert all(b.size > 1 for b in bads.values())
labels = [label for label in labels
if np.in1d(label.vertices, bads[label.hemi]).mean() < 0.8]
del bads
if combine_medial:
labels = combine_medial_labels(labels)
offsets = dict(lh=0, rh=10242)
rev_op = np.zeros((20484, len(labels)))
for li, label in enumerate(labels):
if isinstance(label, mne.BiHemiLabel):
use_labels = [label.lh, label.rh]
else:
use_labels = [label]
for ll in use_labels:
rev_op[ll.get_vertices_used() + offsets[ll.hemi], li:li + 1] = 1.
# every src vertex is in exactly one label, except medial wall verts
# assert (rev_op.sum(-1) == 1).sum()
label_op = mne.SourceEstimate(np.eye(20484), fs_vertices, 0, 1)
label_op = label_op.extract_label_time_course(labels, src)
out = (label_op, rev_op)
if return_labels:
out += (labels,)
return out
@verbose
def combine_medial_labels(labels, subject='fsaverage', surf='white',
dist_limit=0.02, subjects_dir=None):
subjects_dir = mne.utils.get_subjects_dir(subjects_dir, raise_error=True)
rrs = dict((hemi, mne.read_surface(op.join(subjects_dir, subject, 'surf',
'%s.%s' % (hemi, surf)))[0] / 1000.)
for hemi in ('lh', 'rh'))
use_labels = list()
used = np.zeros(len(labels), bool)
logger.info('Matching medial regions for %s labels on %s %s, d=%0.1f mm'
% (len(labels), subject, surf, 1000 * dist_limit))
for li1, l1 in enumerate(labels):
if used[li1]:
continue
used[li1] = True
use_label = l1.copy()
rr1 = rrs[l1.hemi][l1.vertices]
for li2 in np.where(~used)[0]:
l2 = labels[li2]
same_name = (l2.name.replace(l2.hemi, '') ==
l1.name.replace(l1.hemi, ''))
if l2.hemi != l1.hemi and same_name:
rr2 = rrs[l2.hemi][l2.vertices]
mean_min = np.mean(mne.surface._compute_nearest(
rr1, rr2, return_dists=True)[1])
if mean_min <= dist_limit:
use_label += l2
used[li2] = True
logger.info(' Matched: ' + l1.name)
use_labels.append(use_label)
logger.info('Total %d labels' % (len(use_labels),))
return use_labels
def _restrict_reject_flat(reject, flat, raw):
"""Restrict a reject and flat dict based on channel presence"""
use_reject, use_flat = dict(), dict()
for in_, out in zip([reject, flat], [use_reject, use_flat]):
use_keys = [key for key in in_.keys() if key in raw]
for key in use_keys:
out[key] = in_[key]
return use_reject, use_flat
def save_epochs(p, subjects, in_names, in_numbers, analyses, out_names,
out_numbers, must_match, decim, run_indices):
"""Generate epochs from raw data based on events
Can only complete after preprocessing is complete.
Parameters
----------
p : instance of Parameters
Analysis parameters.
subjects : list of str
Subject names to analyze (e.g., ['Eric_SoP_001', ...]).
in_names : list of str
Names of input events.
in_numbers : list of list of int
Event numbers (in scored event files) associated with each name.
analyses : list of str
Lists of analyses of interest.
out_names : list of list of str
Event types to make out of old ones.
out_numbers : list of list of int
Event numbers to convert to (e.g., [[1, 1, 2, 3, 3], ...] would create
three event types, where the first two and last two event types from
the original list get collapsed over).
must_match : list of int
Indices from the original in_names that must match in event counts
before collapsing. Should eventually be expanded to allow for
ratio-based collapsing.
decim : int | list of int
Amount to decimate.
run_indices : array-like | None
Run indices to include.
"""
in_names = np.asanyarray(in_names)
old_dict = dict()
for n, e in zip(in_names, in_numbers):
old_dict[n] = e
# let's do some sanity checks
if len(in_names) != len(in_numbers):
raise RuntimeError('in_names (%d) must have same length as '
'in_numbers (%d)'
% (len(in_names), len(in_numbers)))
if np.any(np.array(in_numbers) <= 0):
raise ValueError('in_numbers must all be > 0')
if len(out_names) != len(out_numbers):
raise RuntimeError('out_names must have same length as out_numbers')
for name, num in zip(out_names, out_numbers):
num = np.array(num)
if len(name) != len(np.unique(num[num > 0])):
raise RuntimeError('each entry in out_names must have length '
'equal to the number of unique elements in the '
'corresponding entry in out_numbers:\n%s\n%s'
% (name, np.unique(num[num > 0])))
if len(num) != len(in_names):
raise RuntimeError('each entry in out_numbers must have the same '
'length as in_names')
if (np.array(num) == 0).any():
raise ValueError('no element of out_numbers can be zero')
ch_namess = list()
drop_logs = list()
sfreqs = set()
for si, subj in enumerate(subjects):
if p.disp_files:
print(' Loading raw files for subject %s.' % subj)
epochs_dir = op.join(p.work_dir, subj, p.epochs_dir)
if not op.isdir(epochs_dir):
os.mkdir(epochs_dir)
evoked_dir = op.join(p.work_dir, subj, p.inverse_dir)
if not op.isdir(evoked_dir):
os.mkdir(evoked_dir)
# read in raw files
raw_names = get_raw_fnames(p, subj, 'pca', False, False,
run_indices[si])
# read in events
first_samps = []
last_samps = []
for raw_fname in raw_names:
raw = read_raw_fif(raw_fname, preload=False)
first_samps.append(raw._first_samps[0])
last_samps.append(raw._last_samps[-1])
# read in raw files
raw = [read_raw_fif(fname, preload=False) for fname in raw_names]
_fix_raw_eog_cals(raw, raw_names) # EOG epoch scales might be bad!
raw = concatenate_raws(raw)
# read in events
events = [read_events(fname) for fname in
get_event_fnames(p, subj, run_indices[si])]
events = concatenate_events(events, first_samps, last_samps)
# do time adjustment
t_adj = np.zeros((1, 3), dtype='int')
t_adj[0, 0] = np.round(-p.t_adjust * raw.info['sfreq']).astype(int)
events = events.astype(int) + t_adj
new_sfreq = raw.info['sfreq'] / decim[si]
if p.disp_files:
print(' Epoching data (decim=%s -> sfreq=%s Hz).'
% (decim[si], new_sfreq))
if new_sfreq not in sfreqs:
if len(sfreqs) > 0:
warnings.warn('resulting new sampling frequency %s not equal '
'to previous values %s' % (new_sfreq, sfreqs))
sfreqs.add(new_sfreq)
if p.autoreject_thresholds:
from autoreject import get_rejection_threshold
print(' Using autreject to compute rejection thresholds')
temp_epochs = Epochs(raw, events, event_id=None, tmin=p.tmin,
tmax=p.tmax, baseline=_get_baseline(p),
proj=True, reject=None, flat=None,
preload=True, decim=decim[si])
new_dict = get_rejection_threshold(temp_epochs)
use_reject = dict()
use_reject.update((k, new_dict[k]) for k in p.autoreject_types)
use_reject, use_flat = _restrict_reject_flat(use_reject,
p.flat, raw)
else:
use_reject, use_flat = _restrict_reject_flat(p.reject, p.flat, raw)
epochs = Epochs(raw, events, event_id=old_dict, tmin=p.tmin,
tmax=p.tmax, baseline=_get_baseline(p),
reject=use_reject, flat=use_flat, proj='delayed',
preload=True, decim=decim[si], on_missing=p.on_missing,
reject_tmin=p.reject_tmin, reject_tmax=p.reject_tmax)
del raw
if epochs.events.shape[0] < 1:
epochs.plot_drop_log()
raise ValueError('No valid epochs')
drop_logs.append(epochs.drop_log)
ch_namess.append(epochs.ch_names)
# only kept trials that were not dropped
sfreq = epochs.info['sfreq']
epochs_fnames, evoked_fnames = get_epochs_evokeds_fnames(p, subj,
analyses)
mat_file, fif_file = epochs_fnames
# now deal with conditions to save evoked
if p.disp_files:
print(' Matching trial counts and saving data to disk.')
for var, name in ((out_names, 'out_names'),
(out_numbers, 'out_numbers'),
(must_match, 'must_match'),
(evoked_fnames, 'evoked_fnames')):
if len(var) != len(analyses):
raise ValueError('len(%s) (%s) != len(analyses) (%s)'
% (name, len(var), len(analyses)))
for analysis, names, numbers, match, fn in zip(analyses, out_names,
out_numbers, must_match,
evoked_fnames):
# do matching
numbers = np.asanyarray(numbers)
nn = numbers[numbers >= 0]
new_numbers = []
for num in numbers:
if num > 0 and num not in new_numbers:
# Eventually we could relax this requirement, but not
# having it in place is likely to cause people pain...
if any(num < n for n in new_numbers):
raise RuntimeError('each list of new_numbers must be '
' monotonically increasing')
new_numbers.append(num)
new_numbers = np.array(new_numbers)
in_names_match = in_names[match]
# use some variables to allow safe name re-use
offset = max(epochs.events[:, 2].max(), new_numbers.max()) + 1
safety_str = '__mnefun_copy__'
assert len(new_numbers) == len(names) # checked above
if p.match_fun is None:
# first, equalize trial counts (this will make a copy)
e = epochs[list(in_names[numbers > 0])]
if len(in_names_match) > 1:
e.equalize_event_counts(in_names_match)
# second, collapse relevant types
for num, name in zip(new_numbers, names):
collapse = [x for x in in_names[num == numbers]
if x in e.event_id]
combine_event_ids(e, collapse,
{name + safety_str: num + offset},
copy=False)
for num, name in zip(new_numbers, names):
e.events[e.events[:, 2] == num + offset, 2] -= offset
e.event_id[name] = num
del e.event_id[name + safety_str]
else: # custom matching
e = p.match_fun(epochs.copy(), analysis, nn,
in_names_match, names)
# now make evoked for each out type
evokeds = list()
for name in names:
this_e = e[name]
if len(this_e) > 0:
evokeds.append(this_e.average())
evokeds.append(this_e.standard_error())
write_evokeds(fn, evokeds)
naves = [str(n) for n in sorted(set([evoked.nave
for evoked in evokeds]))]
naves = ', '.join(naves)
if p.disp_files:
print(' Analysis "%s": %s epochs / condition'
% (analysis, naves))
if p.disp_files:
print(' Saving epochs to disk.')
if 'mat' in p.epochs_type:
spio.savemat(mat_file, dict(epochs=epochs.get_data(),
events=epochs.events, sfreq=sfreq,
drop_log=epochs.drop_log),
do_compression=True, oned_as='column')
if 'fif' in p.epochs_type:
epochs.save(fif_file)
if p.plot_drop_logs:
for subj, drop_log in zip(subjects, drop_logs):
plot_drop_log(drop_log, threshold=p.drop_thresh, subject=subj)
def gen_inverses(p, subjects, run_indices):
"""Generate inverses
Can only complete successfully following forward solution
calculation and covariance estimation.
Parameters
----------
p : instance of Parameters
Analysis parameters.
subjects : list of str
Subject names to analyze (e.g., ['Eric_SoP_001', ...]).
run_indices : array-like | None
Run indices to include.
"""
for si, subj in enumerate(subjects):
out_flags, meg_bools, eeg_bools = [], [], []
if p.disp_files:
print(' Subject %s. ' % subj)
inv_dir = op.join(p.work_dir, subj, p.inverse_dir)
fwd_dir = op.join(p.work_dir, subj, p.forward_dir)
cov_dir = op.join(p.work_dir, subj, p.cov_dir)
if not op.isdir(inv_dir):
os.mkdir(inv_dir)
make_erm_inv = len(p.runs_empty) > 0
# Shouldn't matter which raw file we use
raw_fname = get_raw_fnames(p, subj, 'pca', True, False,
run_indices[si])[0]
raw = read_raw_fif(raw_fname)
meg, eeg = 'meg' in raw, 'eeg' in raw
if meg:
out_flags += ['-meg']
meg_bools += [True]
eeg_bools += [False]
if eeg:
out_flags += ['-eeg']
meg_bools += [False]
eeg_bools += [True]
if meg and eeg:
out_flags += ['-meg-eeg']
meg_bools += [True]
eeg_bools += [True]
if p.compute_rank:
from mne.utils import estimate_rank
epochs_fnames, _ = get_epochs_evokeds_fnames(p, subj, p.analyses)
_, fif_file = epochs_fnames
epochs = mne.read_epochs(fif_file)
rank = dict()
if meg:
eps = epochs.copy().pick_types(meg=meg, eeg=False)
rank['meg'] = estimate_rank(eps._data.transpose([1, 0, 2]).
reshape(len(eps.picks), -1),
tol=1e-6)
if eeg:
eps = epochs.copy().pick_types(meg=False, eeg=eeg)
rank['eeg'] = estimate_rank(eps._data.transpose([1, 0, 2]).
reshape(len(eps.picks), -1),
tol=1e-6)
for k, v in rank.items():
print(' %s : rank %2d\n' % (k, v), end='')
else:
rank = None
if make_erm_inv:
erm_name = op.join(cov_dir, safe_inserter(p.runs_empty[0], subj) +
p.pca_extra + p.inv_tag + '-cov.fif')
empty_cov = read_cov(erm_name)
if empty_cov.get('method', 'empirical') == 'empirical':
empty_cov = regularize(empty_cov, raw.info)
for name in p.inv_names:
s_name = safe_inserter(name, subj)
temp_name = s_name + ('-%d' % p.lp_cut) + p.inv_tag
fwd_name = op.join(fwd_dir, s_name + p.inv_tag + '-fwd.fif')
fwd = read_forward_solution(fwd_name)
fwd = convert_forward_solution(fwd, surf_ori=True)
looses = [1]
tags = [p.inv_free_tag]
fixeds = [False]
depths = [0.8]
if fwd['src'][0]['type'] == 'surf':
looses += [0, 0.2]
tags += [p.inv_fixed_tag, p.inv_loose_tag]
fixeds += [True, False]
depths += [0.8, 0.8]
cov_name = op.join(cov_dir, safe_inserter(name, subj) +
('-%d' % p.lp_cut) + p.inv_tag + '-cov.fif')
cov = read_cov(cov_name)
if cov.get('method', 'empirical') == 'empirical':
cov = regularize(cov, raw.info)
for f, m, e in zip(out_flags, meg_bools, eeg_bools):
fwd_restricted = pick_types_forward(fwd, meg=m, eeg=e)
for l, s, x, d in zip(looses, tags, fixeds, depths):
inv_name = op.join(inv_dir, temp_name + f + s + '-inv.fif')
inv = make_inverse_operator(raw.info, fwd_restricted, cov,
loose=l, depth=d, fixed=x,
use_cps=True, rank=rank)
write_inverse_operator(inv_name, inv)
if (not e) and make_erm_inv:
inv_name = op.join(inv_dir, temp_name + f +
p.inv_erm_tag + s + '-inv.fif')
inv = make_inverse_operator(raw.info, fwd_restricted,
empty_cov, loose=l,
depth=d, fixed=x)
write_inverse_operator(inv_name, inv)
def gen_forwards(p, subjects, structurals, run_indices):
"""Generate forward solutions
Can only complete successfully once coregistration is performed
(usually in mne_analyze).
Parameters
----------
p : instance of Parameters
Analysis parameters.
subjects : list of str
Subject names to analyze (e.g., ['Eric_SoP_001', ...]).
structurals : list (of str or None)
The structural data names for each subject (e.g., ['AKCLEE_101', ...]).
If None, a spherical BEM and volume grid space will be used.
run_indices : array-like | None
Run indices to include.
"""
for si, subj in enumerate(subjects):
struc = structurals[si]
fwd_dir = op.join(p.work_dir, subj, p.forward_dir)
if not op.isdir(fwd_dir):
os.mkdir(fwd_dir)
raw_fname = get_raw_fnames(p, subj, 'sss', False, False,
run_indices[si])[0]
info = read_info(raw_fname)
bem, src, trans, bem_type = _get_bem_src_trans(p, info, subj, struc)
if not getattr(p, 'translate_positions', True):
raise RuntimeError('Not translating positions is no longer '
'supported')
print(' Creating forward solution(s) using a %s for %s...'
% (bem_type, subj))
# XXX Don't actually need to generate a different fwd for each inv
# anymore, since all runs are included, but changing the filename
# would break a lot of existing pipelines :(
for ii, (inv_name, inv_run) in enumerate(zip(p.inv_names,
p.inv_runs)):
fwd_name = op.join(fwd_dir, safe_inserter(inv_name, subj) +
p.inv_tag + '-fwd.fif')
fwd = make_forward_solution(
info, trans, src, bem, n_jobs=p.n_jobs, mindist=p.fwd_mindist)
write_forward_solution(fwd_name, fwd, overwrite=True)
def _get_bem_src_trans(p, info, subj, struc):
subjects_dir = mne.utils.get_subjects_dir(p.subjects_dir, raise_error=True)
assert isinstance(subjects_dir, string_types)
if struc is None: # spherical case
bem, src, trans = _spherical_conductor(info, subj, p.src_pos)
bem_type = 'spherical-model'
else:
trans = op.join(p.work_dir, subj, p.trans_dir, subj + '-trans.fif')
if not op.isfile(trans):
old = trans
trans = op.join(p.work_dir, subj, p.trans_dir,
subj + '-trans_head2mri.txt')
if not op.isfile(trans):
raise IOError('Unable to find head<->MRI trans files in:\n'
'%s\n%s' % (old, trans))
trans = mne.read_trans(trans)
trans = mne.transforms._ensure_trans(trans, 'mri', 'head')
for mid in ('oct6', 'oct-6'):
src_space_file = op.join(subjects_dir, struc, 'bem',
'%s-%s-src.fif' % (struc, mid))
if op.isfile(src_space_file):
break
else: # if neither exists, use last filename
print(' Creating source space for %s...' % subj)
src = setup_source_space(struc, spacing='oct6',
n_jobs=p.n_jobs)
write_source_spaces(src_space_file, src)
src = read_source_spaces(src_space_file)
bem = op.join(subjects_dir, struc, 'bem', '%s-%s-bem-sol.fif'
% (struc, p.bem_type))
bem = mne.read_bem_solution(bem, verbose=False)
bem_type = ('%s-layer BEM' % len(bem['surfs']))
return bem, src, trans, bem_type
def gen_covariances(p, subjects, run_indices):
"""Generate forward solutions
Can only complete successfully once preprocessing is performed.
Parameters
----------
p : instance of Parameters
Analysis parameters.
subjects : list of str
Subject names to analyze (e.g., ['Eric_SoP_001', ...]).
run_indices : array-like | None
Run indices to include.
"""
for si, subj in enumerate(subjects):
print(' Subject %s/%s...' % (si + 1, len(subjects)))
cov_dir = op.join(p.work_dir, subj, p.cov_dir)
if not op.isdir(cov_dir):
os.mkdir(cov_dir)
# Make empty room cov
if p.runs_empty:
if len(p.runs_empty) > 1:
raise ValueError('Too many empty rooms; undefined output!')
new_run = safe_inserter(p.runs_empty[0], subj)
empty_cov_name = op.join(cov_dir, new_run + p.pca_extra +
p.inv_tag + '-cov.fif')
empty_fif = get_raw_fnames(p, subj, 'pca', 'only', False)[0]
raw = read_raw_fif(empty_fif, preload=True)
raw.pick_types(meg=True, eog=True, exclude='bads')
use_reject, use_flat = _restrict_reject_flat(p.reject, p.flat, raw)
cov = compute_raw_covariance(raw, reject=use_reject, flat=use_flat)
write_cov(empty_cov_name, cov)
# Make evoked covariances
for inv_name, inv_run in zip(p.inv_names, p.inv_runs):
if run_indices[si] is None:
ridx = inv_run
else:
ridx = np.intersect1d(run_indices[si], inv_run)
raw_fnames = get_raw_fnames(p, subj, 'pca', False, False, ridx)
eve_fnames = get_event_fnames(p, subj, ridx)
raws = []
first_samps = []
last_samps = []
for raw_fname in raw_fnames:
raws.append(read_raw_fif(raw_fname, preload=False))
first_samps.append(raws[-1]._first_samps[0])
last_samps.append(raws[-1]._last_samps[-1])
_fix_raw_eog_cals(raws, raw_fnames) # safe b/c cov only needs MEEG
raw = concatenate_raws(raws)
events = [read_events(e) for e in eve_fnames]
old_count = sum(len(e) for e in events)
events = [p.pick_events_cov(e) for e in events]
new_count = sum(len(e) for e in events)
if new_count != old_count:
print(' Using %s instead of %s original events for '
'covariance calculation' % (new_count, old_count))
events = concatenate_events(events, first_samps,
last_samps)
use_reject, use_flat = _restrict_reject_flat(p.reject, p.flat, raw)
epochs = Epochs(raw, events, event_id=None, tmin=p.bmin,
tmax=p.bmax, baseline=(None, None), proj=False,
reject=use_reject, flat=use_flat, preload=True)
epochs.pick_types(meg=True, eeg=True, exclude=[])
cov_name = op.join(cov_dir, safe_inserter(inv_name, subj) +
('-%d' % p.lp_cut) + p.inv_tag + '-cov.fif')
cov = compute_covariance(epochs, method=p.cov_method)
write_cov(cov_name, cov)
def _fix_raw_eog_cals(raws, raw_names):
"""Fix for annoying issue where EOG cals don't match"""
# Warning: this will only produce correct EOG scalings with preloaded
# raw data!
picks = pick_types(raws[0].info, eeg=False, meg=False, eog=True,
exclude=[])
if len(picks) > 0:
first_cals = _cals(raws[0])[picks]
for ri, r in enumerate(raws[1:]):
picks_2 = pick_types(r.info, eeg=False, meg=False, eog=True,
exclude=[])
assert np.array_equal(picks, picks_2)
these_cals = _cals(r)[picks]
if not np.array_equal(first_cals, these_cals):
warnings.warn('Adjusting EOG cals for %s' % raw_names[ri + 1])
_cals(r)[picks] = first_cals
def _cals(raw):
"""Helper to deal with the .cals->._cals attribute change"""
try:
return raw._cals
except AttributeError:
return raw.cals
def _get_fir_kwargs(fir_design):
"""Get FIR kwargs in backward-compatible way."""
fir_kwargs = dict()
old_kwargs = dict()
if 'fir_design' in mne.fixes._get_args(mne.filter.filter_data):
fir_kwargs.update(fir_design=fir_design)
old_kwargs.update(fir_design='firwin2')
elif fir_design != 'firwin2':
raise RuntimeError('cannot use fir_design=%s with old MNE'
% fir_design)
return fir_kwargs, old_kwargs
# noinspection PyPep8Naming
def _raw_LRFCP(raw_names, sfreq, l_freq, h_freq, n_jobs, n_jobs_resample,
projs, bad_file, disp_files=False, method='fir',
filter_length=32768, apply_proj=True, preload=True,
force_bads=False, l_trans=0.5, h_trans=0.5,
allow_maxshield=False, phase='zero-double', fir_window='hann',
fir_design='firwin2', pick=True):
"""Helper to load, filter, concatenate, then project raw files"""
if isinstance(raw_names, str):
raw_names = [raw_names]
if disp_files:
print(' Loading and filtering %d files.' % len(raw_names))
raw = list()
for rn in raw_names:
r = read_raw_fif(rn, preload=True, allow_maxshield='yes')
if pick:
r.pick_types(meg=True, eeg=True, eog=True, ecg=True, exclude=())
r.load_bad_channels(bad_file, force=force_bads)
r.pick_types(meg=True, eeg=True, eog=True, ecg=True, exclude=[])
if _needs_eeg_average_ref_proj(r.info):
r.set_eeg_reference(projection=True)
if sfreq is not None:
r.resample(sfreq, n_jobs=n_jobs_resample, npad='auto')
fir_kwargs = _get_fir_kwargs(fir_design)[0]
if l_freq is not None or h_freq is not None:
r.filter(l_freq=l_freq, h_freq=h_freq, picks=None,
n_jobs=n_jobs, method=method,
filter_length=filter_length, phase=phase,
l_trans_bandwidth=l_trans, h_trans_bandwidth=h_trans,
fir_window=fir_window, **fir_kwargs)
raw.append(r)
_fix_raw_eog_cals(raw, raw_names)
raws_del = raw[1:]
raw = concatenate_raws(raw, preload=preload)
for r in raws_del:
del r
if disp_files and apply_proj and len(projs) > 0:
print(' Adding and applying projectors.')
raw.add_proj(projs)
if apply_proj:
raw.apply_proj()
return raw
def do_preprocessing_combined(p, subjects, run_indices):
"""Do preprocessing on all raw files together
Calculates projection vectors to use to clean data.
Parameters
----------
p : instance of Parameters
Analysis parameters.
subjects : list of str
Subject names to analyze (e.g., ['Eric_SoP_001', ...]).
run_indices : array-like | None
Run indices to include.
"""
drop_logs = list()
for si, subj in enumerate(subjects):
if p.disp_files:
print(' Preprocessing subject %g/%g (%s).'
% (si + 1, len(subjects), subj))
pca_dir = op.join(p.work_dir, subj, p.pca_dir)
bad_dir = op.join(p.work_dir, subj, p.bad_dir)
# Create SSP projection vectors after marking bad channels
raw_names = get_raw_fnames(p, subj, 'sss', False, False,
run_indices[si])
empty_names = get_raw_fnames(p, subj, 'sss', 'only')
for r in raw_names + empty_names:
if not op.isfile(r):
raise NameError('File not found (' + r + ')')
bad_file = op.join(bad_dir, 'bad_ch_' + subj + p.bad_tag)
fir_kwargs, old_kwargs = _get_fir_kwargs(p.fir_design)
if isinstance(p.auto_bad, float):
print(' Creating bad channel file, marking bad channels:\n'
' %s' % bad_file)
if not op.isdir(bad_dir):
os.mkdir(bad_dir)
# do autobad
raw = _raw_LRFCP(raw_names, p.proj_sfreq, None, None, p.n_jobs_fir,
p.n_jobs_resample, list(), None, p.disp_files,
method='fir', filter_length=p.filter_length,
apply_proj=False, force_bads=False,
l_trans=p.hp_trans, h_trans=p.lp_trans,
phase=p.phase, fir_window=p.fir_window,
pick=True, **fir_kwargs)
events = fixed_len_events(p, raw)
# do not mark eog channels bad
meg, eeg = 'meg' in raw, 'eeg' in raw
picks = pick_types(raw.info, meg=meg, eeg=eeg, eog=False,
exclude=[])
assert p.auto_bad_flat is None or isinstance(p.auto_bad_flat, dict)
assert p.auto_bad_reject is None or isinstance(p.auto_bad_reject,
dict)
if p.auto_bad_reject is None and p.auto_bad_flat is None:
raise RuntimeError('Auto bad channel detection active. Noisy '
'and flat channel detection '
'parameters not defined. '
'At least one criterion must be defined.')
epochs = Epochs(raw, events, None, p.tmin, p.tmax,
baseline=_get_baseline(p), picks=picks,
reject=p.auto_bad_reject, flat=p.auto_bad_flat,
proj=True, preload=True, decim=1,
reject_tmin=p.reject_tmin,
reject_tmax=p.reject_tmax)
# channel scores from drop log
scores = Counter([ch for d in epochs.drop_log for ch in d])
ch_names = np.array(list(scores.keys()))
# channel scores expressed as percentile and rank ordered
counts = (100 * np.array([scores[ch] for ch in ch_names], float) /
len(epochs.drop_log))
order = np.argsort(counts)[::-1]
# boolean array masking out channels with <= % epochs dropped
mask = counts[order] > p.auto_bad
badchs = ch_names[order[mask]]
if len(badchs) > 0:
# Make sure we didn't get too many bad MEG or EEG channels
for m, e, thresh in zip([True, False], [False, True],
[p.auto_bad_meg_thresh,
p.auto_bad_eeg_thresh]):
picks = pick_types(epochs.info, meg=m, eeg=e, exclude=[])
if len(picks) > 0:
ch_names = [epochs.ch_names[pp] for pp in picks]
n_bad_type = sum(ch in ch_names for ch in badchs)
if n_bad_type > thresh:
stype = 'meg' if m else 'eeg'
raise RuntimeError('Too many bad %s channels '
'found: %s > %s'
% (stype, n_bad_type, thresh))
print(' The following channels resulted in greater than '
'{:.0f}% trials dropped:\n'.format(p.auto_bad * 100))
print(badchs)
with open(bad_file, 'w') as f:
f.write('\n'.join(badchs))
if not op.isfile(bad_file):
print(' No bad channel file found, clearing bad channels:\n'
' %s' % bad_file)
bad_file = None
proj_nums = p.proj_nums
eog_t_lims = [-0.25, 0.25]
ecg_t_lims = [-0.08, 0.08]
eog_f_lims = [0, 2]
ecg_f_lims = [5, 35]
ecg_eve = op.join(pca_dir, 'preproc_ecg-eve.fif')
ecg_proj = op.join(pca_dir, 'preproc_ecg-proj.fif')
eog_eve = op.join(pca_dir, 'preproc_blink-eve.fif')
eog_proj = op.join(pca_dir, 'preproc_blink-proj.fif')
cont_proj = op.join(pca_dir, 'preproc_cont-proj.fif')
all_proj = op.join(pca_dir, 'preproc_all-proj.fif')
if not op.isdir(pca_dir):
os.mkdir(pca_dir)
pre_list = [r for ri, r in enumerate(raw_names)
if ri in p.get_projs_from]
# Calculate and apply continuous projectors if requested
projs = list()
raw_orig = _raw_LRFCP(
raw_names=pre_list, sfreq=p.proj_sfreq, l_freq=None, h_freq=None,
n_jobs=p.n_jobs_fir, n_jobs_resample=p.n_jobs_resample,
projs=projs, bad_file=bad_file, disp_files=p.disp_files,
method='fir', filter_length=p.filter_length, force_bads=False,
l_trans=p.hp_trans, h_trans=p.lp_trans, phase=p.phase,
fir_window=p.fir_window, pick=True, **fir_kwargs)
# Apply any user-supplied extra projectors
if p.proj_extra is not None:
if p.disp_files:
print(' Adding extra projectors from "%s".' % p.proj_extra)
extra_proj = op.join(pca_dir, p.proj_extra)
projs = read_proj(extra_proj)
# Calculate and apply ERM projectors
proj_nums = np.array(proj_nums, int)
if proj_nums.shape != (3, 3):
raise ValueError('proj_nums must be an array with shape (3, 3), '
'got %s' % (projs.shape,))
if any(proj_nums[2]):
if len(empty_names) >= 1:
if p.disp_files:
print(' Computing continuous projectors using ERM.')
# Use empty room(s), but processed the same way
raw = _raw_LRFCP(
raw_names=empty_names, sfreq=p.proj_sfreq,
l_freq=None, h_freq=None, n_jobs=p.n_jobs_fir,
n_jobs_resample=p.n_jobs_resample, projs=projs,
bad_file=bad_file, disp_files=p.disp_files, method='fir',
filter_length=p.filter_length, force_bads=True,
l_trans=p.hp_trans, h_trans=p.lp_trans,
phase=p.phase, fir_window=p.fir_window, **fir_kwargs)
else:
if p.disp_files:
print(' Computing continuous projectors using data.')
raw = raw_orig.copy()
raw.filter(None, p.cont_lp, n_jobs=p.n_jobs_fir, method='fir',
filter_length=p.filter_length, h_trans_bandwidth=0.5,
fir_window=p.fir_window, phase=p.phase, **fir_kwargs)
raw.add_proj(projs)
raw.apply_proj()
pr = compute_proj_raw(raw, duration=1, n_grad=proj_nums[2][0],
n_mag=proj_nums[2][1], n_eeg=proj_nums[2][2],
reject=None, flat=None, n_jobs=p.n_jobs_mkl)
write_proj(cont_proj, pr)
projs.extend(pr)
del raw
# Calculate and apply the ECG projectors
if any(proj_nums[0]):
if p.disp_files:
print(' Computing ECG projectors.')
raw = raw_orig.copy()
raw.filter(ecg_f_lims[0], ecg_f_lims[1], n_jobs=p.n_jobs_fir,
method='fir', filter_length=p.filter_length,
l_trans_bandwidth=0.5, h_trans_bandwidth=0.5,
phase='zero-double', fir_window='hann',
**old_kwargs)
raw.add_proj(projs)
raw.apply_proj()
pr, ecg_events, drop_log = \
compute_proj_ecg(raw, n_grad=proj_nums[0][0],
n_jobs=p.n_jobs_mkl,
n_mag=proj_nums[0][1], n_eeg=proj_nums[0][2],
tmin=ecg_t_lims[0], tmax=ecg_t_lims[1],
l_freq=None, h_freq=None, no_proj=True,
qrs_threshold='auto', ch_name=p.ecg_channel,
reject=p.ssp_ecg_reject, return_drop_log=True,
average=p.proj_ave)
n_good = sum(len(d) == 0 for d in drop_log)
if n_good >= 20:
write_events(ecg_eve, ecg_events)
write_proj(ecg_proj, pr)
projs.extend(pr)
else:
plot_drop_log(drop_log)
raw.plot(events=ecg_events)
raise RuntimeError('Only %d/%d good ECG epochs found'
% (n_good, len(ecg_events)))
del raw
# Next calculate and apply the EOG projectors
if any(proj_nums[1]):
if p.disp_files:
print(' Computing EOG projectors.')
raw = raw_orig.copy()
raw.filter(eog_f_lims[0], eog_f_lims[1], n_jobs=p.n_jobs_fir,
method='fir', filter_length=p.filter_length,
l_trans_bandwidth=0.5, h_trans_bandwidth=0.5,
phase='zero-double', fir_window='hann',
**old_kwargs)
raw.add_proj(projs)
raw.apply_proj()
pr, eog_events = \
compute_proj_eog(raw, n_grad=proj_nums[1][0],
n_jobs=p.n_jobs_mkl,
n_mag=proj_nums[1][1], n_eeg=proj_nums[1][2],
tmin=eog_t_lims[0], tmax=eog_t_lims[1],
l_freq=None, h_freq=None, no_proj=True,
ch_name=p.eog_channel,
reject=p.ssp_eog_reject, average=p.proj_ave)
if eog_events.shape[0] >= 5:
write_events(eog_eve, eog_events)
write_proj(eog_proj, pr)
projs.extend(pr)
else:
warnings.warn('Only %d EOG events!' % eog_events.shape[0])
del raw
# save the projectors
write_proj(all_proj, projs)
# look at raw_orig for trial DQs now, it will be quick
raw_orig.filter(p.hp_cut, p.lp_cut, n_jobs=p.n_jobs_fir, method='fir',
filter_length=p.filter_length,
l_trans_bandwidth=p.hp_trans, phase=p.phase,
h_trans_bandwidth=p.lp_trans, fir_window=p.fir_window,
**fir_kwargs)
raw_orig.add_proj(projs)
raw_orig.apply_proj()
# now let's epoch with 1-sec windows to look for DQs
events = fixed_len_events(p, raw_orig)
use_reject, use_flat = _restrict_reject_flat(p.reject, p.flat,
raw_orig)
epochs = Epochs(raw_orig, events, None, p.tmin, p.tmax, preload=False,
baseline=_get_baseline(p), reject=use_reject,
flat=use_flat, proj=True)
try:
epochs.drop_bad()
except AttributeError: # old way
epochs.drop_bad_epochs()
drop_logs.append(epochs.drop_log)
del raw_orig
del epochs
if p.plot_drop_logs:
for subj, drop_log in zip(subjects, drop_logs):
plot_drop_log(drop_log, p.drop_thresh, subject=subj)
def apply_preprocessing_combined(p, subjects, run_indices):
"""Actually apply and save the preprocessing (projs, filtering)
Can only run after do_preprocessing_combined is done.
Filters data, adds projection vectors, and saves to disk
(overwriting old files).
Parameters
----------
p : instance of Parameters
Analysis parameters.
subjects : list of str
Subject names to analyze (e.g., ['Eric_SoP_001', ...]).
run_indices : array-like | None
Run indices to include.
"""
# Now actually save some data
for si, subj in enumerate(subjects):
if p.disp_files:
print(' Applying processing to subject %g/%g.'
% (si + 1, len(subjects)))
pca_dir = op.join(p.work_dir, subj, p.pca_dir)
names_in = get_raw_fnames(p, subj, 'sss', False, False,
run_indices[si])
names_out = get_raw_fnames(p, subj, 'pca', False, False,
run_indices[si])
erm_in = get_raw_fnames(p, subj, 'sss', 'only')
erm_out = get_raw_fnames(p, subj, 'pca', 'only')
bad_dir = op.join(p.work_dir, subj, p.bad_dir)
bad_file = op.join(bad_dir, 'bad_ch_' + subj + p.bad_tag)
bad_file = None if not op.isfile(bad_file) else bad_file
all_proj = op.join(pca_dir, 'preproc_all-proj.fif')
projs = read_proj(all_proj)
fir_kwargs = _get_fir_kwargs(p.fir_design)[0]
if len(erm_in) > 0:
for ii, (r, o) in enumerate(zip(erm_in, erm_out)):
if p.disp_files:
print(' Processing erm file %d/%d.'
% (ii + 1, len(erm_in)))
raw = _raw_LRFCP(
raw_names=r, sfreq=None, l_freq=p.hp_cut, h_freq=p.lp_cut,
n_jobs=p.n_jobs_fir, n_jobs_resample=p.n_jobs_resample,
projs=projs, bad_file=bad_file, disp_files=False, method='fir',
apply_proj=False, filter_length=p.filter_length,
force_bads=True, l_trans=p.hp_trans, h_trans=p.lp_trans,
phase=p.phase, fir_window=p.fir_window, pick=False,
**fir_kwargs)
raw.save(o, overwrite=True, buffer_size_sec=None)
for ii, (r, o) in enumerate(zip(names_in, names_out)):
if p.disp_files:
print(' Processing file %d/%d.'
% (ii + 1, len(names_in)))
raw = _raw_LRFCP(
raw_names=r, sfreq=None, l_freq=p.hp_cut, h_freq=p.lp_cut,
n_jobs=p.n_jobs_fir, n_jobs_resample=p.n_jobs_resample,
projs=projs, bad_file=bad_file, disp_files=False, method='fir',
apply_proj=False, filter_length=p.filter_length,
force_bads=False, l_trans=p.hp_trans, h_trans=p.lp_trans,
phase=p.phase, fir_window=p.fir_window, pick=False,
**fir_kwargs)
raw.save(o, overwrite=True, buffer_size_sec=None)
# look at raw_clean for ExG events
if p.plot_raw:
_viz_raw_ssp_events(p, subj, run_indices[si])
class FakeEpochs(object):
"""Make iterable epoch-like class, convenient for MATLAB transition"""
def __init__(self, data, ch_names, tmin=-0.2, sfreq=1000.0):
raise RuntimeError('Use mne.EpochsArray instead')
def timestring(t):
"""Reformat time to convenient string
Parameters
----------
t : float
Elapsed time in seconds.
Returns
time : str
The time in HH:MM:SS.
"""
def rediv(ll, b):
return list(divmod(ll[0], b)) + ll[1:]
return "%d:%02d:%02d.%03d" % tuple(reduce(rediv, [[t * 1000, ], 1000, 60,
60]))
def source_script(script_name):
"""Set environmental variables by source-ing a bash script
Parameters
----------
script_name : str
Path to the script to execute and get the environment variables from.
"""
cmd = ['bash', '-c', 'source ' + script_name + ' > /dev/null && env']
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
for line in proc.stdout:
(key, _, value) = line.partition("=")
os.environ[key] = value.strip()
proc.communicate()
def fixed_len_events(p, raw):
"""Create fixed length trial events from raw object"""
dur = p.tmax - p.tmin
events = make_fixed_length_events(raw, 1, duration=dur)
return events
def _viz_raw_ssp_events(p, subj, ridx):
"""Helper to plot filtered cleaned raw trace with ExG events"""
pca_dir = op.join(p.work_dir, subj, p.pca_dir)
raw_names = get_raw_fnames(p, subj, 'sss', False, False, ridx)
pre_list = [r for ri, r in enumerate(raw_names)
if ri in p.get_projs_from]
all_proj = op.join(pca_dir, 'preproc_all-proj.fif')
projs = read_proj(all_proj)
colors = dict()
ev = np.zeros((0, 3), int)
for n, c, cid in zip(['ecg', 'blink'], ['r', 'b'], [999, 998]):
fname = op.join(pca_dir, 'preproc_%s-eve.fif' % n)
if op.isfile(fname):
ev = np.concatenate((ev, read_events(fname)))
colors[cid] = c
ev = ev[np.argsort(ev[:, 0], axis=0)]
raw = _raw_LRFCP(pre_list, p.proj_sfreq, None, None, p.n_jobs_fir,
p.n_jobs_resample, projs, None, p.disp_files,
method='fir', filter_length=p.filter_length,
force_bads=False, l_trans=p.hp_trans, h_trans=p.lp_trans)
raw.plot(events=ev, event_color=colors)
def _prebad(p, subj):
"""Helper for locating file containing bad channels during acq"""
prebad_file = op.join(p.work_dir, subj, p.raw_dir, subj + '_prebad.txt')
if not op.isfile(prebad_file): # SSS prebad file
raise RuntimeError('Could not find SSS prebad file: %s' % prebad_file)
return prebad_file
def _head_pos_annot(p, raw_fname, prefix=' '):
"""Locate head position estimation file and do annotations."""
if p.movecomp is None:
return None, None, None
t_window = p.coil_t_window
raw = mne.io.read_raw_fif(raw_fname, allow_maxshield='yes')
if t_window == 'auto':
hpi_freqs, _, _ = _get_hpi_info(raw.info)
# Use the longer of 5 cycles and the difference in HPI freqs.
# This will be 143 ms for 7 Hz spacing (old) and
# 60 ms for 83 Hz lowest freq.
t_window = max(5. / min(hpi_freqs), 1. / np.diff(hpi_freqs).min())
t_window = round(1000 * t_window) / 1000. # round to ms
pos_fname = raw_fname[:-4] + '.pos'
if not op.isfile(pos_fname):
# XXX Someday we can do:
# head_pos = _calculate_chpi_positions(
# raw, t_window=t_window, dist_limit=dist_limit)
# write_head_positions(pos_fname, head_pos)
print('%sEstimating position file %s' % (prefix, pos_fname,))
run_sss_positions(raw_fname, pos_fname,
host=p.sws_ssh, port=p.sws_port, prefix=prefix,
work_dir=p.sws_dir, t_window=t_window,
t_step_min=p.coil_t_step_min,
dist_limit=p.coil_dist_limit)
head_pos = read_head_pos(pos_fname)
# do the coil counts
count_fname = raw_fname[:-4] + '-counts.h5'
if p.coil_dist_limit is None or p.coil_bad_count_duration_limit is None:
fit_data = None
else:
if not op.isfile(count_fname):
fit_t, counts, n_coils = compute_good_coils(
raw, p.coil_t_step_min, t_window, p.coil_dist_limit,
prefix=prefix, verbose=True)
write_hdf5(count_fname,
dict(fit_t=fit_t, counts=counts, n_coils=n_coils,
t_step=p.coil_t_step_min, t_window=t_window,
coil_dist_limit=p.coil_dist_limit), title='mnefun')
fit_data = read_hdf5(count_fname, 'mnefun')
for key, val in (('t_step', p.coil_t_step_min),
('t_window', t_window),
('coil_dist_limit', p.coil_dist_limit)):
if fit_data[key] != val:
raise RuntimeError('Data mismatch %s (%s != %s), set '
'to match existing file or delete it:\n%s'
% (key, val, fit_data[key], count_fname))
# do the annotations
lims = [p.rotation_limit, p.translation_limit, p.coil_dist_limit,
p.coil_t_step_min, t_window, p.coil_bad_count_duration_limit]
annot_fname = raw_fname[:-4] + '-annot.fif'
if not op.isfile(annot_fname):
if np.isfinite(lims[:3]).any() or np.isfinite(lims[5]):
print(prefix.join(['', 'Annotating raw segments with:\n',
u' rotation_limit = %s °/s\n' % lims[0],
u' translation_limit = %s m/s\n' % lims[1],
u' coil_dist_limit = %s m\n' % lims[2],
u' t_step, t_window = %s, %s sec\n'
% (lims[3], lims[4]),
u' 3-good limit = %s sec' % (lims[5],)]))
annot = annotate_head_pos(
raw, head_pos, rotation_limit=lims[0], translation_limit=lims[1],
fit_t=fit_data['fit_t'], counts=fit_data['counts'],
prefix=' ' + prefix, coil_bad_count_duration_limit=lims[5])
if annot is not None:
annot.save(annot_fname)
try:
annot = read_annotations(annot_fname)
except IOError: # no annotations requested
annot = None
return head_pos, annot, fit_data
def info_sss_basis(info, origin='auto', int_order=8, ext_order=3,
coord_frame='head', regularize='in', ignore_ref=True):
"""Compute the SSS basis for a given measurement info structure
Parameters
----------
info : instance of io.Info
The measurement info.
origin : array-like, shape (3,) | str
Origin of internal and external multipolar moment space in meters.
The default is ``'auto'``, which means a head-digitization-based
origin fit when ``coord_frame='head'``, and ``(0., 0., 0.)`` when
``coord_frame='meg'``.
int_order : int
Order of internal component of spherical expansion.
ext_order : int
Order of external component of spherical expansion.
coord_frame : str
The coordinate frame that the ``origin`` is specified in, either
``'meg'`` or ``'head'``. For empty-room recordings that do not have
a head<->meg transform ``info['dev_head_t']``, the MEG coordinate
frame should be used.
destination : str | array-like, shape (3,) | None
The destination location for the head. Can be ``None``, which
will not change the head position, or a string path to a FIF file
containing a MEG device<->head transformation, or a 3-element array
giving the coordinates to translate to (with no rotations).
For example, ``destination=(0, 0, 0.04)`` would translate the bases
as ``--trans default`` would in MaxFilter™ (i.e., to the default
head location).
regularize : str | None
Basis regularization type, must be "in", "svd" or None.
"in" is the same algorithm as the "-regularize in" option in
MaxFilter™. "svd" (new in v0.13) uses SVD-based regularization by
cutting off singular values of the basis matrix below the minimum
detectability threshold of an ideal head position (usually near
the device origin).
ignore_ref : bool
If True, do not include reference channels in compensation. This
option should be True for KIT files, since Maxwell filtering
with reference channels is not currently supported.
"""
if coord_frame not in ('head', 'meg'):
raise ValueError('coord_frame must be either "head" or "meg", not "%s"'
% coord_frame)
origin = _check_origin(origin, info, 'head')
regularize = _check_regularize(regularize, ('in', 'svd'))
meg_picks, mag_picks, grad_picks, good_picks, coil_scale, mag_or_fine = \
_get_mf_picks(info, int_order, ext_order, ignore_ref)
info_good = pick_info(info, good_picks, copy=True)
all_coils = _prep_mf_coils(info_good, ignore_ref=ignore_ref)
# remove MEG bads in "to" info
decomp_coil_scale = coil_scale[good_picks]
exp = dict(int_order=int_order, ext_order=ext_order, head_frame=True,
origin=origin)
# prepare regularization techniques
if _prep_regularize is None:
raise RuntimeError('mne-python needs to be on the experimental SVD '
'branch to use this function')
_prep_regularize(regularize, all_coils, None, exp, ignore_ref,
coil_scale, grad_picks, mag_picks, mag_or_fine)
# noinspection PyPep8Naming
S = _trans_sss_basis(exp, all_coils, info['dev_head_t'],
coil_scale=decomp_coil_scale)
if regularize is not None:
# noinspection PyPep8Naming
S = _regularize(regularize, exp, S, mag_or_fine, t=0.)[0]
S /= np.linalg.norm(S, axis=0)
return S
def clean_brain(brain_img):
"""Remove borders of a brain image and make transparent."""
bg = (brain_img == brain_img[0, 0]).all(-1)
brain_img = brain_img[(~bg).any(axis=-1)]
brain_img = brain_img[:, (~bg).any(axis=0)]
alpha = 255 * np.ones(brain_img.shape[:-1], np.uint8)
x, y = np.where((brain_img == 255).all(-1))
alpha[x, y] = 0
return np.concatenate((brain_img, alpha[..., np.newaxis]), -1)
def plot_colorbar(pos_lims, ticks=None, ticklabels=None, figsize=(1, 2),
labelsize='small', ticklabelsize='x-small', ax=None,
label='', tickrotation=0., orientation='vertical',
end_labels=None):
import matplotlib.pyplot as plt
from matplotlib.colorbar import ColorbarBase
from matplotlib.colors import Normalize
with plt.rc_context({'axes.labelsize': labelsize,
'xtick.labelsize': ticklabelsize,
'ytick.labelsize': ticklabelsize}):
cmap = mne.viz.utils.mne_analyze_colormap(
limits=pos_lims, format='matplotlib')
adjust = (ax is None)
if ax is None:
fig, ax = plt.subplots(1, figsize=figsize)
else:
fig = ax.figure
norm = Normalize(vmin=-pos_lims[2], vmax=pos_lims[2])
if ticks is None:
ticks = [-pos_lims[2], -pos_lims[1], -pos_lims[0], 0.,
pos_lims[0], pos_lims[1], pos_lims[2]]
if ticklabels is None:
ticklabels = ticks
assert len(ticks) == len(ticklabels)
cbar = ColorbarBase(ax, cmap, norm=norm, ticks=ticks, label=label,
orientation=orientation)
for key in ('left', 'top',
'bottom' if orientation == 'vertical' else 'right'):
ax.spines[key].set_visible(False)
cbar.set_ticklabels(ticklabels)
if orientation == 'horizontal':
plt.setp(ax.xaxis.get_majorticklabels(), rotation=tickrotation)
else:
plt.setp(ax.yaxis.get_majorticklabels(), rotation=tickrotation)
cbar.outline.set_visible(False)
lims = np.array(list(ax.get_xlim()) + list(ax.get_ylim()))
if end_labels is not None:
if orientation == 'horizontal':
delta = np.diff(lims[:2]) * np.array([-0.05, 0.05])
xs = np.array(lims[:2]) + delta
has = ['right', 'left']
ys = [lims[2:].mean()] * 2
vas = ['center', 'center']
else:
xs = [lims[:2].mean()] * 2
has = ['center'] * 2
delta = np.diff(lims[2:]) * np.array([-0.05, 0.05])
ys = lims[2:] + delta
vas = ['top', 'bottom']
for x, y, l, ha, va in zip(xs, ys, end_labels, has, vas):
ax.text(x, y, l, ha=ha, va=va, fontsize=ticklabelsize)
if adjust:
fig.subplots_adjust(0.01, 0.05, 0.2, 0.95)
return fig
def plot_reconstruction(evoked, origin=(0., 0., 0.04)):
"""Plot the reconstructed data for Evoked
Currently only works for MEG data.
Parameters
----------
evoked : instance of Evoked
The evoked data.
origin : array-like, shape (3,)
The head origin to use.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from mne.forward._field_interpolation import _map_meg_channels
import matplotlib.pyplot as plt
evoked = evoked.copy().pick_types(meg=True, exclude='bads')
info_to = deepcopy(evoked.info)
info_to['projs'] = []
op = _map_meg_channels(
evoked.info, info_to, mode='accurate', origin=(0., 0., 0.04))
fig, axs = plt.subplots(3, 2, squeeze=False)
titles = dict(grad='Gradiometers (fT/cm)', mag='Magnetometers (fT)')
for mi, meg in enumerate(('grad', 'mag')):
picks = pick_types(evoked.info, meg=meg)
kwargs = dict(ylim=dict(grad=[-250, 250], mag=[-600, 600]),
spatial_colors=True, picks=picks)
evoked.plot(axes=axs[0, mi], proj=False,
titles=dict(grad='Proj off', mag=''), **kwargs)
evoked_remap = evoked.copy().apply_proj()
evoked_remap.info['projs'] = []
evoked_remap.plot(axes=axs[1, mi],
titles=dict(grad='Proj on', mag=''), **kwargs)
evoked_remap.data = np.dot(op, evoked_remap.data)
evoked_remap.plot(axes=axs[2, mi],
titles=dict(grad='Recon', mag=''), **kwargs)
axs[0, mi].set_title(titles[meg])
for ii in range(3):
if ii in (0, 1):
axs[ii, mi].set_xlabel('')
if ii in (1, 2):
axs[ii, mi].set_title('')
for ii in range(3):
axs[ii, 1].set_ylabel('')
axs[0, 0].set_ylabel('Original')
axs[1, 0].set_ylabel('Projection')
axs[2, 0].set_ylabel('Reconstruction')
fig.tight_layout()
return fig
def plot_chpi_snr_raw(raw, win_length, n_harmonics=None, show=True):
"""Compute and plot cHPI SNR from raw data
Parameters
----------
win_length : float
Length of window to use for SNR estimates (seconds). A longer window
will naturally include more low frequency power, resulting in lower
SNR.
n_harmonics : int or None
Number of line frequency harmonics to include in the model. If None,
use all harmonics up to the MEG analog lowpass corner.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
cHPI SNR as function of time, residual variance.
Notes
-----
A general linear model including cHPI and line frequencies is fit into
each data window. The cHPI power obtained from the model is then divided
by the residual variance (variance of signal unexplained by the model) to
obtain the SNR.
The SNR may decrease either due to decrease of cHPI amplitudes (e.g.
head moving away from the helmet), or due to increase in the residual
variance. In case of broadband interference that overlaps with the cHPI
frequencies, the resulting decreased SNR accurately reflects the true
situation. However, increased narrowband interference outside the cHPI
and line frequencies would also cause an increase in the residual variance,
even though it wouldn't necessarily affect estimation of the cHPI
amplitudes. Thus, this method is intended for a rough overview of cHPI
signal quality. A more accurate picture of cHPI quality (at an increased
computational cost) can be obtained by examining the goodness-of-fit of
the cHPI coil fits.
"""
import matplotlib.pyplot as plt
# plotting parameters
legend_fontsize = 10
title_fontsize = 10
tick_fontsize = 10
label_fontsize = 10
# get some info from fiff
sfreq = raw.info['sfreq']
linefreq = raw.info['line_freq']
if n_harmonics is not None:
linefreqs = (np.arange(n_harmonics + 1) + 1) * linefreq
else:
linefreqs = np.arange(linefreq, raw.info['lowpass'], linefreq)
buflen = int(win_length * sfreq)
if buflen <= 0:
raise ValueError('Window length should be >0')
(cfreqs, _, _, _, _) = _get_hpi_info(raw.info)
print('Nominal cHPI frequencies: %s Hz' % cfreqs)
print('Sampling frequency: %s Hz' % sfreq)
print('Using line freqs: %s Hz' % linefreqs)
print('Using buffers of %s samples = %s seconds\n'
% (buflen, buflen/sfreq))
pick_meg = pick_types(raw.info, meg=True, exclude=[])
pick_mag = pick_types(raw.info, meg='mag', exclude=[])
pick_grad = pick_types(raw.info, meg='grad', exclude=[])
nchan = len(pick_meg)
# grad and mag indices into an array that already has meg channels only
pick_mag_ = np.in1d(pick_meg, pick_mag).nonzero()[0]
pick_grad_ = np.in1d(pick_meg, pick_grad).nonzero()[0]
# create general linear model for the data
t = np.arange(buflen) / float(sfreq)
model = np.empty((len(t), 2+2*(len(linefreqs)+len(cfreqs))))
model[:, 0] = t
model[:, 1] = np.ones(t.shape)
# add sine and cosine term for each freq
allfreqs = np.concatenate([linefreqs, cfreqs])
model[:, 2::2] = np.cos(2 * np.pi * t[:, np.newaxis] * allfreqs)
model[:, 3::2] = np.sin(2 * np.pi * t[:, np.newaxis] * allfreqs)
inv_model = linalg.pinv(model)
# drop last buffer to avoid overrun
bufs = np.arange(0, raw.n_times, buflen)[:-1]
tvec = bufs/sfreq
snr_avg_grad = np.zeros([len(cfreqs), len(bufs)])
hpi_pow_grad = np.zeros([len(cfreqs), len(bufs)])
snr_avg_mag = np.zeros([len(cfreqs), len(bufs)])
resid_vars = np.zeros([nchan, len(bufs)])
for ind, buf0 in enumerate(bufs):
print('Buffer %s/%s' % (ind+1, len(bufs)))
megbuf = raw[pick_meg, buf0:buf0+buflen][0].T
coeffs = np.dot(inv_model, megbuf)
coeffs_hpi = coeffs[2+2*len(linefreqs):]
resid_vars[:, ind] = np.var(megbuf-np.dot(model, coeffs), 0)
# get total power by combining sine and cosine terms
# sinusoidal of amplitude A has power of A**2/2
hpi_pow = (coeffs_hpi[0::2, :]**2 + coeffs_hpi[1::2, :]**2)/2
hpi_pow_grad[:, ind] = hpi_pow[:, pick_grad_].mean(1)
# divide average HPI power by average variance
snr_avg_grad[:, ind] = hpi_pow_grad[:, ind] / \
resid_vars[pick_grad_, ind].mean()
snr_avg_mag[:, ind] = hpi_pow[:, pick_mag_].mean(1) / \
resid_vars[pick_mag_, ind].mean()
cfreqs_legend = ['%s Hz' % fre for fre in cfreqs]
fig, axs = plt.subplots(4, 1, sharex=True)
# SNR plots for gradiometers and magnetometers
ax = axs[0]
lines1 = ax.plot(tvec, 10*np.log10(snr_avg_grad.T))
lines1_med = ax.plot(tvec, 10*np.log10(np.median(snr_avg_grad, axis=0)),
lw=2, ls=':', color='k')
ax.set_xlim([tvec.min(), tvec.max()])
ax.set(ylabel='SNR (dB)')
ax.yaxis.label.set_fontsize(label_fontsize)
ax.set_title('Mean cHPI power / mean residual variance, gradiometers',
fontsize=title_fontsize)
ax.tick_params(axis='both', which='major', labelsize=tick_fontsize)
ax = axs[1]
lines2 = ax.plot(tvec, 10*np.log10(snr_avg_mag.T))
lines2_med = ax.plot(tvec, 10 * np.log10(np.median(snr_avg_mag, axis=0)),
lw=2, ls=':', color='k')
ax.set_xlim([tvec.min(), tvec.max()])
ax.set(ylabel='SNR (dB)')
ax.yaxis.label.set_fontsize(label_fontsize)
ax.set_title('Mean cHPI power / mean residual variance, magnetometers',
fontsize=title_fontsize)
ax.tick_params(axis='both', which='major', labelsize=tick_fontsize)
ax = axs[2]
lines3 = ax.plot(tvec, hpi_pow_grad.T)
lines3_med = ax.plot(tvec, np.median(hpi_pow_grad, axis=0),
lw=2, ls=':', color='k')
ax.set_xlim([tvec.min(), tvec.max()])
ax.set(ylabel='Power (T/m)$^2$')
ax.yaxis.label.set_fontsize(label_fontsize)
ax.set_title('Mean cHPI power, gradiometers',
fontsize=title_fontsize)
ax.tick_params(axis='both', which='major', labelsize=tick_fontsize)
# residual (unexplained) variance as function of time
ax = axs[3]
cls = plt.get_cmap('plasma')(np.linspace(0., 0.7, len(pick_meg)))
ax.set_prop_cycle(color=cls)
ax.semilogy(tvec, resid_vars[pick_grad_, :].T, alpha=.4)
ax.set_xlim([tvec.min(), tvec.max()])
ax.set(ylabel='Var. (T/m)$^2$', xlabel='Time (s)')
ax.xaxis.label.set_fontsize(label_fontsize)
ax.yaxis.label.set_fontsize(label_fontsize)
ax.set_title('Residual (unexplained) variance, all gradiometer channels',
fontsize=title_fontsize)
ax.tick_params(axis='both', which='major', labelsize=tick_fontsize)
tight_layout(pad=.5, w_pad=.1, h_pad=.2) # from mne.viz
# tight_layout will screw these up
ax = axs[0]
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# order curve legends according to mean of data
sind = np.argsort(snr_avg_grad.mean(axis=1))[::-1]
handles = [lines1[i] for i in sind]
handles.append(lines1_med[0])
labels = [cfreqs_legend[i] for i in sind]
labels.append('Median')
ax.legend(handles, labels,
prop={'size': legend_fontsize}, bbox_to_anchor=(1.02, 0.5, ),
loc='center left', borderpad=1)
ax = axs[1]
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
sind = np.argsort(snr_avg_mag.mean(axis=1))[::-1]
handles = [lines2[i] for i in sind]
handles.append(lines2_med[0])
labels = [cfreqs_legend[i] for i in sind]
labels.append('Median')
ax.legend(handles, labels,
prop={'size': legend_fontsize}, bbox_to_anchor=(1.02, 0.5, ),
loc='center left', borderpad=1)
ax = axs[2]
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
sind = np.argsort(hpi_pow_grad.mean(axis=1))[::-1]
handles = [lines3[i] for i in sind]
handles.append(lines3_med[0])
labels = [cfreqs_legend[i] for i in sind]
labels.append('Median')
ax.legend(handles, labels,
prop={'size': legend_fontsize}, bbox_to_anchor=(1.02, 0.5, ),
loc='center left', borderpad=1)
ax = axs[3]
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
plt.show(show)
return fig
@verbose
def compute_good_coils(raw, t_step=0.01, t_window=0.2, dist_limit=0.005,
prefix='', verbose=None):
"""Comute time-varying coil distances."""
from scipy.spatial.distance import cdist
hpi_dig_head_rrs = _get_hpi_initial_fit(raw.info, verbose=False)
n_window = (int(round(t_window * raw.info['sfreq'])) // 2) * 2 + 1
del t_window
hpi = _setup_hpi_struct(raw.info, n_window, verbose=False)
hpi_coil_dists = cdist(hpi_dig_head_rrs, hpi_dig_head_rrs)
n_step = int(round(t_step * raw.info['sfreq']))
del t_step
starts = np.arange(0, len(raw.times) - n_window // 2, n_step)
counts = np.empty(len(starts), int)
head_dev_t = mne.transforms.invert_transform(
raw.info['dev_head_t'])['trans']
coil_dev_rrs = mne.transforms.apply_trans(head_dev_t, hpi_dig_head_rrs)
last_fit = None
last = -10.
logger.info('%sComputing %d coil fits in %0.1f ms steps over %0.1f sec'
% (prefix, len(starts), (n_step / raw.info['sfreq']) * 1000,
raw.times[-1]))
for ii, start in enumerate(starts):
time_sl = slice(max(start - n_window // 2, 0), start + n_window // 2)
t = start / raw.info['sfreq']
if t - last >= 10. - 1e-7:
logger.info('%s Fitting %0.1f - %0.1f sec'
% (prefix, t, min(t + 10., raw.times[-1])))
last = t
# Ignore warnings about segments with not enough coils on
sin_fit = _fit_cHPI_amplitudes(raw, time_sl, hpi, t, verbose=False)
# skip this window if it bad.
if sin_fit is None:
counts[ii] = 0
continue
# check if data has sufficiently changed
if last_fit is not None: # first iteration
# The sign of our fits is arbitrary
flips = np.sign((sin_fit * last_fit).sum(-1, keepdims=True))
sin_fit *= flips
corr = np.corrcoef(sin_fit.ravel(), last_fit.ravel())[0, 1]
# check to see if we need to continue
if corr * corr > 0.98:
# don't need to refit data
counts[ii] = counts[ii - 1]
continue
last_fit = sin_fit.copy()
outs = [_fit_magnetic_dipole(f, pos, hpi['coils'], hpi['scale'],
hpi['method'])
for f, pos in zip(sin_fit, coil_dev_rrs)]
coil_dev_rrs = np.array([o[0] for o in outs])
these_dists = cdist(coil_dev_rrs, coil_dev_rrs)
these_dists = np.abs(hpi_coil_dists - these_dists)
# there is probably a better algorithm for finding the bad ones...
use_mask = np.ones(hpi['n_freqs'], bool)
good = False
while not good:
d = these_dists[use_mask][:, use_mask]
d_bad = (d > dist_limit)
good = not d_bad.any()
if not good:
if use_mask.sum() == 2:
use_mask[:] = False
break # failure
# exclude next worst point
badness = (d * d_bad).sum(axis=0)
exclude_coils = np.where(use_mask)[0][np.argmax(badness)]
use_mask[exclude_coils] = False
counts[ii] = use_mask.sum()
t = (starts + n_window // 2) / raw.info['sfreq']
return t, counts, len(hpi_dig_head_rrs)
@verbose
def plot_good_coils(raw, t_step=1., t_window=0.2, dist_limit=0.005,
show=True, verbose=None):
"""Plot the good coil count as a function of time."""
import matplotlib.pyplot as plt
if isinstance(raw, dict): # fit_data calculated and stored to disk
t = raw['fit_t']
counts = raw['counts']
n_coils = raw['n_coils']
else:
t, counts, n_coils = compute_good_coils(raw, t_step, t_window,
dist_limit)
del t_step, t_window, dist_limit
fig, ax = plt.subplots(figsize=(8, 2))
ax.step(t, counts, zorder=4, color='k', clip_on=False)
ax.set(xlim=t[[0, -1]], ylim=[0, n_coils], xlabel='Time (sec)',
ylabel='Good coils')
ax.set(yticks=np.arange(n_coils + 1))
for comp, n, color in ((np.greater_equal, 5, '#2ca02c'),
(np.equal, 4, '#98df8a'),
(np.equal, 3, (1, 1, 0)),
(np.less_equal, 2, (1, 0, 0))):
mask = comp(counts, n)
mask[:-1] |= comp(counts[1:], n)
ax.fill_between(t, 0, n_coils, where=mask,
color=color, edgecolor='none', linewidth=0, zorder=1)
ax.grid(True)
fig.tight_layout()
mne.viz.utils.plt_show(show)
return fig
def compute_auc(dip, tmin=-np.inf, tmax=np.inf):
"""Compute the AUC values for a DipoleFixed object."""
if not isinstance(dip, DipoleFixed):
raise TypeError('dip must be a DipoleFixed, got "%s"' % (type(dip),))
pick = pick_types(dip.info, meg=False, dipole=True)
if len(pick) != 1:
raise RuntimeError('Could not find dipole data')
time_mask = _time_mask(dip.times, tmin, tmax, dip.info['sfreq'])
data = dip.data[pick[0], time_mask]
return np.sum(np.abs(data)) * len(data) * (1. / dip.info['sfreq'])
def _spherical_conductor(info, subject, pos):
"""Helper to make spherical conductor model."""
bem = make_sphere_model(info=info, r0='auto',
head_radius='auto', verbose=False)
src = setup_volume_source_space(subject=subject, sphere=bem,
pos=pos, mindist=1.)
return bem, src, None
def annotate_head_pos(raw, head_pos, rotation_limit=45, translation_limit=0.1,
fit_t=None, counts=None, prefix=' ',
coil_bad_count_duration_limit=0.1):
u"""Annotate a raw instance based on coil counts and head positions.
Parameters
----------
raw : instance of Raw
The raw instance.
head_pos : ndarray | None
The head positions. Can be None if movement compensation is off
to short-circuit the function.
rotation_limit : float
The rotational velocity limit in °/s.
Can be infinite to skip rotation checks.
translation_limit : float
The translational velocity limit in m/s.
Can be infinite to skip translation checks.
fit_t : ndarray
Fit times.
counts : ndarray
Coil counts.
prefix : str
The prefix for printing.
coil_bad_count_duration_limit : float
The lower limit for bad coil counts to remove segments of data.
Returns
-------
annot : instance of Annotations | None
The annotations.
"""
# XXX: Add `sphere_dist_limit` to ensure no sensor collisions at some
# point
do_rotation = np.isfinite(rotation_limit) and head_pos is not None
do_translation = np.isfinite(translation_limit) and head_pos is not None
do_coils = fit_t is not None and counts is not None
if not (do_rotation or do_translation or do_coils):
return None
head_pos_t = head_pos[:, 0]
dt = np.diff(head_pos_t)
annot = mne.Annotations([], [], [])
# Annotate based on bad coil distances
if do_coils:
if np.isfinite(coil_bad_count_duration_limit):
changes = np.diff((counts < 3).astype(int))
bad_onsets = fit_t[np.where(changes == 1)[0]]
bad_offsets = fit_t[np.where(changes == -1)[0]]
# Deal with it starting out bad
if counts[0] < 3:
bad_onsets = np.concatenate([[0.], bad_onsets])
if counts[-1] < 3:
bad_offsets = np.concatenate([bad_offsets, [raw.times[-1]]])
assert len(bad_onsets) == len(bad_offsets)
assert (bad_onsets[1:] > bad_offsets[:-1]).all()
count = 0
dur = 0.
for onset, offset in zip(bad_onsets, bad_offsets):
if offset - onset > coil_bad_count_duration_limit - 1e-6:
annot.append(onset, offset - onset, 'BAD_HPI_COUNT')
dur += offset - onset
count += 1
print('%sOmitting %5.1f%% (%3d segments) '
'due to < 3 good coils for over %s sec'
% (prefix, 100 * dur / raw.times[-1], count,
coil_bad_count_duration_limit))
# Annotate based on rotational velocity
if do_rotation:
assert rotation_limit > 0
# Rotational velocity (radians / sec)
r = mne.transforms._angle_between_quats(head_pos[:-1, 1:4],
head_pos[1:, 1:4])
r /= dt
bad_idx = np.where(r >= np.deg2rad(rotation_limit))[0]
bad_pct = 100 * dt[bad_idx].sum() / (head_pos[-1, 0] - head_pos[0, 0])
print(u'%sOmitting %5.1f%% (%3d segments) due to bad rotational '
'velocity (>=%5.1f deg/s), with max %0.2f deg/s'
% (prefix, bad_pct, len(bad_idx), rotation_limit,
np.rad2deg(r.max())))
for idx in bad_idx:
annot.append(head_pos_t[idx], dt[idx], 'BAD_RV')
# Annotate based on translational velocity
if do_translation:
assert translation_limit > 0
v = np.linalg.norm(np.diff(head_pos[:, 4:7], axis=0), axis=-1)
v /= dt
bad_idx = np.where(v >= translation_limit)[0]
bad_pct = 100 * dt[bad_idx].sum() / (head_pos[-1, 0] - head_pos[0, 0])
print(u'%sOmitting %5.1f%% (%3d segments) due to translational '
u'velocity (>=%5.1f m/s), with max %0.4f m/s'
% (prefix, bad_pct, len(bad_idx), translation_limit, v.max()))
for idx in bad_idx:
annot.append(head_pos_t[idx], dt[idx], 'BAD_TV')
# Annotate on distance from the sensors
return annot
|
class Instruccion:
'''This is an abstract class'''
class CrearBD(Instruccion) :
'''
Esta clase representa la funcion para crear una base de datos solo recibe el nombre de la BD
'''
def __init__(self,reemplazar,verificacion,nombre, propietario, modo) :
self.reemplazar = reemplazar
self.verificacion = verificacion
self.nombre = nombre
self.propietario = propietario
self.modo = modo
class CrearTabla(Instruccion) :
'''
Esta clase representa la instrucción crear tabla.
La instrucción crear tabla recibe como parámetro nombre de tabla, lista de columnas y una tabla padre
'''
def __init__(self, nombre, padre, columnas = []) :
self.nombre = nombre
self.columnas = columnas
self.padre = padre
class CrearType(Instruccion) :
'''
Esta clase representa la instrucción crear tipo.
La instrucción crear tipo recibe como parámetro nombre del tipo, lista de valores
'''
def __init__(self, nombre, valores = []) :
self.nombre = nombre
self.valores = valores
class EliminarTabla(Instruccion) :
'''
Esta clase representa la instrucción drope table.
La instrucción drope table recibe como parámetro la existencia y el nombre
'''
def __init__(self, existencia, nombre) :
self.nombre = nombre
self.existencia = existencia
class EliminarDB(Instruccion) :
'''
Esta clase representa la instrucción drope database.
La instrucción drope database recibe como parámetro la existencia y el nombre
'''
def __init__(self, existencia, nombre) :
self.nombre = nombre
self.existencia = existencia
class columnaTabla(Instruccion) :
'''
Esta clase las columnas de una tabla
'''
def __init__(self, id, tipo, valor,zonahoraria, atributos = []) :
self.id = id
self.tipo = tipo
self.valor = valor
self.zonahoraria = zonahoraria
self.atributos = atributos
class llaveTabla(Instruccion) :
'''
Esta clase representa las llaves de una tabla ya sean foraneas o primarias
Tipo= Primaria=True
Tipo= Foreing=False
'''
def __init__(self, tipo,referencia,columnas = [],columnasRef = []) :
self.tipo = tipo
self.referencia = referencia
self.columnas = columnas
self.columnasRef = columnasRef
class atributoColumna(Instruccion) :
'''
Esta clase representa los atributos de una columna
'''
def __init__(self, default,constraint,null,unique,primary,check) :
self.default = default
self.constraint = constraint
self.null = null
self.unique = unique
self.primary = primary
self.check = check
class Insertar(Instruccion):
'''
Estan clase representa los valores a insertar en una tabla
'''
def __init__(self, nombre, columnas, valores=[]) :
self.nombre = nombre
self.columnas = columnas
self.valores = valores
class Actualizar(Instruccion):
'''
Esta clase representa los valores a actualizar de la tabla
'''
def __init__(self, nombre, condicion, valores=[]) :
self.nombre = nombre
self.condicion = condicion
self.valores = valores
class columna_actualizar(Instruccion):
'''
Esta clase representa las columnas a actualizar
'''
def __init__(self, nombre, valor) :
self.nombre = nombre
self.valor = valor
class Eliminar(Instruccion):
'''
Esta clase representa la eliminacion de una tabla
'''
def __init__(self, nombre, condicion):
self.nombre = nombre
self.condicion = condicion
class DBElegida(Instruccion):
'''
Esta clase representa la base de datos elegida
'''
def __init__(self,nombre):
self.nombre = nombre
class MostrarDB(Instruccion):
'''
Esta clase representa las base de datos creadas
'''
class MostrarTB(Instruccion):
'''
Esta clase Muestra Tablas de una bd
'''
class Limite_Select(Instruccion):
'''
Esta clase representa el limit del select
'''
def __init__(self, select, limit, offset):
self.select=select
self.limit=limit
self.offset=offset
class SELECT(Instruccion):
'''
Esta clase representa a una select
'''
def __init__(self, cantidad, parametros, cuerpo, funcion_alias):
self.cantida=cantidad
self.parametros=parametros
self.cuerpo=cuerpo
self.funcion_alias=funcion_alias
class Funcion_Alias(Instruccion):
'''
Esta clase representa un funcion junto a su alias
'''
def __init__(self, nombre, alias):
self.nombre=nombre
self.alias=alias
class CUERPO_SELECT(Instruccion):
'''
Esta clase representa el cuerpo de un select
'''
def __init__(self, b_from, b_join, b_where, b_group, b_having, b_order):
self.b_from=b_from
self.b_join=b_join
self.b_where=b_where
self.b_group=b_group
self.b_having=b_having
self.b_order=b_order
class Orden_Atributo(Instruccion):
'''
Esta clase representa el orden que tendra el atributo
'''
def __init__(self, nombre, direccion, rango):
self.nombre=nombre
self.direccion=direccion
self.rango=rango
class SubQuery(Instruccion):
'''
Esta clase representa a una subquery y su comparacion con la query principal
'''
def __init__(self, condicion, subquery, alias):
self.condicion=condicion
self.subquery=subquery
self.alias=alias
class Valor_From(Instruccion):
'''
Esta clase representa el contenido del from de una consulta
'''
def __init__(self, nombre, subquery, alias):
self.nombre=nombre
self.subquery=subquery
self.alias=alias
class SubQuery_IN(Instruccion):
'''
Esta clase representa el si se declara un in o not in en subquery
'''
def __init__(self, exp, tipo):
self.exp=exp
self.tipo=tipo
class Valor_Select(Instruccion):
'''
Esta clase representa los valores para un select
'''
def __init__(self, nombre, tipo, alias, fun_exp):
self.nombre=nombre
self.tipo=tipo
self.alias=alias
self.fun_exp=fun_exp
class Condicion_WHEN_THEN(Instruccion):
'''
Esta clase representa la condicion when then
'''
def __init__(self, exp, resultado):
self.exp=exp
self.resultado=resultado
class Case(Instruccion):
'''
Esta clase representa la un case
'''
def __init__(self, condicion, sino, alias):
self.condicion=condicion
self.sino=sino
self.alias=alias
#---------------------mmms
class ALTERDBO(Instruccion):
'''
ALTER DATABASE ID CONDICION = VALOR
'''
def __init__(self, Id, TipoCon,valor):
self.Id = Id
self.TipoCon = TipoCon
self.valor = valor
class ALTERTBO(Instruccion):
'''
ALTER TABLE
'''
def __init__(self, Id,cuerpo):
self.Id = Id
self.cuerpo = cuerpo
class ALTERTBO_RENAME(Instruccion):
'''
ALTER TABLE RENAME
'''
def __init__(self, Id1,Id2,operacion):
self.Id1 = Id1
self.Id2 = Id2
self.operacion = operacion
class ALTERTBO_ALTER_PROPIEDADES(Instruccion):
'''
ALTER TABLE ALTER
'''
def __init__(self, prop1,prop2,prop3,prop4,prop5):
self.prop1 = prop1
self.prop2 = prop2
self.prop3 = prop3
self.prop4 = prop4
self.prop5 = prop5
class ALTERTBO_ALTER(Instruccion):
'''
ALTER TABLE ALTER
'''
def __init__(self, instruccion,id,extra):
self.instruccion = instruccion
self.id = id
self.extra = extra
class ALTERTBO_DROP(Instruccion):
'''
ALTER TABLE DROP
'''
def __init__(self, instruccion,id):
self.instruccion = instruccion
self.id = id
class ALTERTBO_ADD(Instruccion):
'''
ALTER TABLE ADD
'''
def __init__(self, id,tipo,valortipo,instruccion,extra):
self.id = id
self.tipo = tipo
self.valortipo = valortipo
self.instruccion = instruccion
self.extra = extra
class ALTERTBO_ADD_EXTRAS(Instruccion):
'''
ALTER TABLE ADD Extras
'''
def __init__(self, instruccion,contenido, id , contenido2):
self.instruccion = instruccion
self.contenido = contenido
self.id = id
self.contenido2 = contenido2
#nuevo hoy 18/12/2020
class ALTERTBO_ALTER_SERIE(Instruccion):
'''
ALTER TABLE ADD Extras
'''
def __init__(self, listaval):
self.listaval = listaval
|
word = raw_input("Enter the word ")
word = word.lower()
file_txt = open("batman.txt", "r")
count = 0
for each in file_txt:
if word in each.lower():
count = count+1
print "The ", word ," occured ",count, " times"
|
"""
Runtime: 5834 ms, faster than 34.56% of Python3 online submissions for Count Primes.
Memory Usage: 52.7 MB, less than 85.70% of Python3 online submissions for Count Primes.
"""
from typing import List
from typing import Optional
import math
class Solution:
def countPrimes(self, n: int) -> int:
if n <= 2:
return 0
primes = [True] * n
primes[0] = False
primes[1] = False
for i in range(2, int(math.sqrt(n)) + 1):
if primes[i]:
multiple = i**2
while multiple < n:
primes[multiple] = False
multiple += i
return sum(primes)
def main():
sol = Solution()
print('Output:', sol.countPrimes(10))
print('Expected:', 4)
if __name__ == "__main__":
main()
|
#
# Copyright (c) 2021, The Board of Trustees of the Leland Stanford Junior University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import datasets
import torch
from datasets import load_dataset
from ..tasks.generic_dataset import CQA
from .base_dataset import Split
datasets.logging.set_verbosity('ERROR')
logger = logging.getLogger(__name__)
class HFDataset(CQA):
def __init__(self, data, make_example, **kwargs):
subsample = kwargs.get('subsample')
skip_cache = kwargs.pop('kwargs', True)
cache_name = os.path.join(os.path.dirname(data.cache_files[0]['filename']), data.split._name, str(subsample))
examples = []
if os.path.exists(cache_name) and not skip_cache:
logger.info(f'Loading cached data from {cache_name}')
examples = torch.load(cache_name)
for ex in data:
examples.append(make_example(ex, **kwargs))
if subsample is not None and len(examples) >= subsample:
break
os.makedirs(os.path.dirname(cache_name), exist_ok=True)
logger.info(f'Caching data to {cache_name}')
torch.save(examples, cache_name)
super().__init__(examples, **kwargs)
@classmethod
def return_splits(cls, name, root='.data', train='train', validation='validation', test='test', **kwargs):
# download datasets and cache them
train_data, validation_data, test_data = None, None, None
train_path, validation_path, test_path = None, None, None
if train:
train_data = load_dataset(name, split='train', cache_dir=root)
train_path = train_data.cache_files[0]['filename']
if validation:
validation_data = load_dataset(name, split=validation, cache_dir=root)
validation_path = validation_data.cache_files[0]['filename']
if test:
test_data = load_dataset(name, split='test', cache_dir=root)
test_path = test_data.cache_files[0]['filename']
if kwargs.pop('hf_test_overfit', False):
# override validation/ test data with train data
if validation:
validation_data = load_dataset(name, split='train', cache_dir=root)
validation_path = validation_data.cache_files[0]['filename']
if test:
test_data = load_dataset(name, split='train', cache_dir=root)
test_path = test_data.cache_files[0]['filename']
train_data = None if train is None else cls(train_data, **kwargs)
validation_data = None if validation is None else cls(validation_data, **kwargs)
test_data = None if test is None else cls(test_data, **kwargs)
return Split(train=train_data, eval=validation_data, test=test_data), Split(
train=train_path, eval=validation_path, test=test_path
)
|
# -*- coding: utf-8 -*-
#
# Scrapy spider which extracts names for today from http://www.nimipaivat.fi.
#
# Author: Jarno Tuovinen
#
# License: MIT
#
import re
import datetime
import scrapy
from extractor.items import NamedayItem
date_pattern = re.compile('\d+')
nameday_url = "http://www.nimipaivat.fi/"
# Create URLs for each day
urls = []
mlen = (31,28,31,30,31,30,31,31,30,31,30,31)
for currentmonth in range (len(mlen)):
for day in range (mlen[currentmonth]):
url = "{}{}.{}.".format(nameday_url, day+1, currentmonth+1)
urls.append(url)
class NamedaySpider(scrapy.Spider):
name = "nameday"
allowed_domains = ["nimipaivat.fi"]
start_urls = urls
def parse(self, response):
official_names = []
swedish_names = []
same_names = []
orthodox_names = []
unofficial_names = []
date = response.xpath("/html/body/div/div/div/h1/text()").extract_first()
ps = response.xpath("/html/body/div[@class='container']/div[@class='row']/div[@class='col-md-6']/p")
for p in ps:
if "Nimi" in p.extract():
official_names = p.xpath("strong/a/text()").extract()
elif "Ruotsinkieli" in p.extract():
swedish_names = p.xpath("strong/a/text()").extract()
elif "Saamenkieli" in p.extract():
same_names = p.xpath("strong/a/text()").extract()
elif "Ortodoksista" in p.extract():
orthodox_names = p.xpath("strong/a/text()").extract()
elif "virallista" in p.extract():
unofficial_names = p.xpath("strong/a/text()").extract()
# Extract day and month from date string
extracted_date = date_pattern.findall(date)
# Populate the item
item = NamedayItem()
item['day'] = extracted_date[0]
item['month'] = extracted_date[1]
# Uncomment these lines to make this crawler crawl forbidden names
# item['official_names'] = official_names
# item['swedish_names'] = swedish_names
# item['same_names'] = same_names
item['orthodox_names'] = orthodox_names
item['unofficial_names'] = unofficial_names
# Return item to pipeline
return item
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
pyu40 PyQt5 tutorial
In this example, we reimplement an
event handler.
author: Jan Bodnar
website: py40.com
last edited: January 2015
"""
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget, QApplication
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('Event handler')
self.show()
def keyPressEvent(self, e): # 重新实现这个基类方法, 输入ESC 键,关闭窗口
if e.key() == Qt.Key_Escape:
self.close()
elif e.key() == Qt.Key_E:
self.close()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
|
# Copyright (c) 2015-2016, The Authors and Contributors
# <see AUTHORS file>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Generator
import numpy as np
import logging
import pyximport
from supplychainpy._helpers._decorators import log_this
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
#pyximport.install()
from supplychainpy.demand._squared_error import sum_squared_errors_i1, sum_squared_errors_i2, sum_squared_errors_i0
class Forecast:
"""
"""
__simple_exponential_smoothing_forecast = {}
# make keyword args
def __init__(self, orders: list = None, average_orders: float = None, **kwargs):
self.__weighted_moving_average = None
self.__orders = orders
self.__average_orders = sum([int(demand) for demand in self.__orders]) / len(self.__orders)
self.__moving_average = []
self.__total_orders = sum([int(demand) for demand in self.__orders])
@property
def total_orders(self):
return self.__total_orders
@property
def moving_average(self) -> list:
return self.__moving_average
@moving_average.setter
def moving_average(self, forecast: list):
self.__moving_average = forecast
@property
def weighted_moving_average(self) -> list:
return self.__weighted_moving_average
# specify a start position and the forecast will build from this position
def moving_average_forecast(self, average_period: int = 3, forecast_length: int = 2,
base_forecast: bool = False, start_position: int = 0) -> list:
""" Generates a forecast from moving averages.
Generate a forecast from moving averages for as many periods as specified.
Args:
average_period (int): Number of periods to average.
forecast_length (int): Number of periods to forecast for.
base_forecast (bool): Start a moving average forecast from anywhere
in the list. For use when evaluating a forecast.
start_position (int): Where to start the forecast in the list when
Returns:
list: Returns a list of orders including the original and the forecast appended to the end.
The appended forecast is as long as specified by the forecast_length. For example
orders = [1, 3, 5, 67, 4, 65, 242, 50, 48, 24, 34, 20]
d = forecast_demand.Forecast(orders)
d.calculate_moving_average_forecast(forecast_length=3)
print(d.moving_average_forecast)
output:
[1, 3, 5, 67, 4, 65, 242, 50, 48, 24, 34, 20, 26, 27, 24]
orders = [1, 3, 5, 67, 4, 65, 242, 50, 48, 24, 34, 20]
d = forecast_demand.Forecast(orders)
d.calculate_moving_average_forecast(forecast_length=3, base_forecast=True, start_position=3)
print(d.moving_average_forecast):
output:
[3, 67, 65, 45, 60, 80]
Raises:
ValueError: Incorrect number of orders supplied. Please make sure you have enough orders to
calculate an average. The average_period is {}, while the
number of orders supplied is {}. The number of orders supplied should be equal
or greater than the average_period. Either decrease the average_period or
increase the start_position in the list.n
"""
if base_forecast:
start_period = start_position + average_period
end_period = len(self.__orders)
total_orders = 0
moving_average = []
if len(self.__orders[0:start_position]) < average_period:
raise ValueError("Incorrect number of orders supplied. Please make sure you have enough orders to "
"calculate an average. The average_period is {}, while the \n"
"number of orders supplied is {}. The number of orders supplied should be equal "
"or greater than the average_period.\n Either decrease the average_period or "
"increase the start_position in the list.".format(average_period, start_position))
else:
for i in self.__orders[0:start_position]:
moving_average.append(self.__orders[i])
count = 0
average_orders = 0.0
while count < forecast_length:
for items in moving_average[0:end_period]:
total_orders += items
count += 1
average_orders = total_orders / float(average_period)
moving_average.append(round(average_orders))
average_orders = 0.0
total_orders = 0.0
if count < 1:
start_period += len(moving_average) - average_period
else:
start_period += 1
end_period = len(moving_average)
self.__moving_average = moving_average
else:
start_period = len(self.__orders) - average_period
end_period = len(self.__orders)
total_orders = 0
moving_average = self.__orders
count = 0
average_orders = 0.0
while count < forecast_length:
for items in moving_average[start_period:end_period]:
total_orders += items
count += 1
average_orders = total_orders / float(average_period)
moving_average.append(round(average_orders))
average_orders = 0.0
total_orders = 0.0
if count < 1:
start_period += len(moving_average) - average_period
else:
start_period += 1
end_period = len(moving_average)
self.__moving_average = moving_average
return moving_average
def weighted_moving_average_forecast(self, weights: list, average_period: int = 3,
forecast_length: int = 3, base_forecast: bool = False,
start_position=0) -> list:
""" Generates a forecast from moving averages using user supplied weights.
Generate a forecast from moving averages for as many periods as specified and adjusts the forecast based
on the supplied weights.
Args:
average_period (int): Number of periods to average.
forecast_length (int): Number of periods to forecast for.
weights (list): A list of weights that sum up to one.
base_forecast(bool): Start a moving average forecast from anywhere
in the list. For use when evaluating a forecast.
start_position(int): Start position
Returns:
list: Returns a list of orders including the original and the forecast appended to the end.
The appended forecast is as long as specified by the forecast_length. For example
orders = [1, 3, 5, 67, 4, 65, 242, 50, 48, 24, 34, 20]
d = forecast_demand.Forecast(orders)
d.calculate_moving_average_forecast(forecast_length=3)
print(d.moving_average_forecast)
output:
[1, 3, 5, 67, 4, 65, 242, 50, 48, 24, 34, 20, 13, 11, 7]
Raises:
ValueError: The weights should equal 1 and be as long as the average period (default 3).'
The supplied weights total {} and is {} members long. Please check the supplied
weights.'.format(sum(weights), len(weights)))
"""
if sum(weights) != 1 or len(weights) != average_period:
raise ValueError(
'The weights should equal 1 and be as long as the average period (default 3).'
' The supplied weights total {} and is {} members long. Please check the supplied weights.'.format(
sum(weights), len(weights)))
else:
start_period = len(self.__orders) - average_period
if base_forecast:
start_period = start_position + average_period
end_period = len(self.__orders)
total_orders = 0
weighted_moving_average = []
if start_position + 1 < average_period:
raise ValueError("Incorrect number of orders supplied. Please make sure you have enough orders to "
"calculate an average. The average_period is {}, while the \n"
"number of orders supplied is {}. The number of orders supplied should be equal "
"or greater than the average_period.\n Either decrease the average_period or "
"increase the start_position in the list.".format(average_period, start_position))
else:
for i in self.__orders[0:start_position]:
weighted_moving_average.append(self.__orders[i])
count = 0
weight_count = 0
while count < forecast_length:
for items in weighted_moving_average[0:end_period]:
total_orders += items
count += 1
average_orders = (total_orders / float(average_period)) * weights[weight_count]
weighted_moving_average.append(round(average_orders))
total_orders = 0.0
if count < 1:
start_period += len(weighted_moving_average) - average_period
else:
start_period += 1
self.__weighted_moving_average = weighted_moving_average
else:
end_period = len(self.__orders)
total_orders = 0
weighted_moving_average = self.__orders
count = 0
average_orders = 0.0
while count < forecast_length:
weight_count = 0
for items in weighted_moving_average[start_period:end_period]:
total_orders += items
average_orders = (total_orders / float(average_period)) * weights[weight_count]
count += 1
weighted_moving_average.append(round(average_orders))
average_orders = 0.0
total_orders = 0.0
if count < 1:
start_period += len(weighted_moving_average) - average_period
else:
start_period += 1
end_period = len(weighted_moving_average)
self.__weighted_moving_average = weighted_moving_average
return weighted_moving_average
# also use to calculate the MAD for all forecasting methods given a spcific length of order
# TODO-feature fix base_forecast for correct period to period MAD calculation
def mean_absolute_deviation(self, forecasts: list, base_forecast: bool = False, start_period: int = 3) -> np.array:
""" calculates the mean absolute deviation (MAD) for a given forecast.
Calculates the mean absolute deviation for a forecast. The forecast and
Args:
forecasts (list): A previously calculated forecast.
base_forecast (bool): Start a moving average forecast from anywhere
in the list.
start_period (int): The start period of the forecast.
Returns:
np.array
Raises:
ValueError:
"""
if base_forecast:
end_period = len(forecasts) - start_period
forecast_array = np.array(forecasts[:end_period])
orders_array = np.array(self.__orders[start_period: len(forecasts)])
std_array = orders_array - forecast_array
std_array = sum(abs(std_array)) / len(std_array)
else:
forecast_array = np.array(forecasts)
orders_array = np.array(self.__orders)
std_array = orders_array - forecast_array
std_array = sum(abs(std_array)) / len(std_array)
return std_array
@log_this(logging.DEBUG, "Called to compute simple exponential smoothing.")
def simple_exponential_smoothing(self, *alpha)->Generator[dict, None, None]:
""" Generates forecast using simple exponential smoothing (SES).
Args:
alpha(args): A list of smoothing level constants (alpha values)
Returns:
dict: Forecast containing
{'demand': 165, 'level_estimates': 164.49748124123246, 'alpha': 0.7487406206162335,
'one_step_forecast': 163.0, 'forecast_error': 2.0, 'squared_error': 4.0, 't': 1}
Examples:
forecast_demand = Forecast(orders)
alpha_values = [0.2, 0.3, 0.4, 0.5, 0.6]
ses_forecast = [forecast for forecast in forecast_demand.simple_exponential_smoothing(*alpha_values)]
"""
for arg in alpha:
forecast = {}
current_level_estimate = self.__average_orders
forecast.update({'alpha': arg,
't': 0,
'demand': 0,
'level_estimates': current_level_estimate,
'one_step_forecast': 0,
'forecast_error': 0,
'squared_error': 0})
previous_level_estimate = current_level_estimate
for index, demand in enumerate(tuple(self.__orders), 1):
current_level_estimate = self._level_estimate(previous_level_estimate, arg, demand)
yield {'alpha': arg,
't': index,
'demand': demand,
'level_estimates': current_level_estimate,
'one_step_forecast': previous_level_estimate,
'forecast_error': self._forecast_error(demand, previous_level_estimate),
'squared_error': self._forecast_error(demand, previous_level_estimate) ** 2
}
previous_level_estimate = current_level_estimate
@log_this(logging.DEBUG, "Called HTEC exponential smoothing.")
def holts_trend_corrected_exponential_smoothing(self, alpha: float, gamma: float, intercept: float, slope: float):
forecast = {}
# log.debug('holts ')
current_level_estimate = intercept
forecast.update({'alpha': alpha,
'gamma': gamma,
't': 0,
'demand': 0,
'level_estimates': current_level_estimate,
'trend': slope,
'one_step_forecast': 0,
'forecast_error': 0,
'squared_error': 0
})
previous_trend = slope
previous_level_estimate = current_level_estimate
for index, demand in enumerate(tuple(self.__orders), 1):
log.debug('demand: {}'.format(demand))
one_step = previous_level_estimate + previous_trend
log.debug('one_step: {}'.format(one_step))
forecast_error = self._forecast_error(demand, one_step)
log.debug('forecast_error: {}'.format(forecast_error))
current_trend = self._holts_trend(previous_trend, gamma, alpha, forecast_error)
log.debug('trend: {}'.format(current_trend))
current_level_estimate = self._level_estimate_holts_trend_corrected(previous_level_estimate,
alpha,
previous_trend,
forecast_error)
log.debug('current_level: {}'.format(current_level_estimate))
squared_error = forecast_error ** 2
yield {'alpha': alpha,
'gamma': gamma,
't': index,
'demand': demand,
'trend': current_trend,
'level_estimates': current_level_estimate,
'one_step_forecast': one_step,
'forecast_error': forecast_error,
'squared_error': squared_error
}
log.debug('squared_error: {}'.format(squared_error))
previous_level_estimate = current_level_estimate
previous_trend = current_trend
@staticmethod
def holts_trend_corrected_forecast(forecast: list, forecast_length: int):
"""Creates a forecast for as many periods.
Args:
forecast:
forecast_length:
Returns:
"""
end_of_forecast = len(forecast) - 1
# print(forecast[end_of_forecast])
new_forecast = []
for i in range(forecast_length):
demand_forecast = forecast[end_of_forecast]['level_estimates'] + i * forecast[end_of_forecast]['trend']
new_forecast.append(demand_forecast)
return new_forecast
@staticmethod
def simple_exponential_smoothing_forecast(forecast: list, forecast_length: int):
end_of_forecast = len(forecast) - 1
new_forecast = []
for i in range(forecast_length):
demand_forecast = forecast[end_of_forecast]['level_estimates']
new_forecast.append(demand_forecast)
return new_forecast
@staticmethod
def _holts_trend(previous_trend: float, gamma: float, alpha: float, current_forecast_error: float):
return previous_trend + alpha * gamma * current_forecast_error
@staticmethod
def _level_estimate(lvl: float, smoothing_parameter: float, demand: int):
return lvl + smoothing_parameter * (demand - lvl)
@staticmethod
def _level_estimate_holts_trend_corrected(previous_level: float, smoothing_parameter: float, previous_trend: float,
forecast_error: float):
return previous_level + previous_trend + smoothing_parameter * forecast_error
@staticmethod
def _forecast_error(demand: int, one_step_forecast: float):
return float(demand) - one_step_forecast
@staticmethod
def sum_squared_errors(squared_error: list, smoothing_parameter: float) -> dict:
return sum_squared_errors_i0(squared_error, smoothing_parameter)
@staticmethod
def sum_squared_errors_indi(squared_error: list, smoothing_parameter: float) -> dict:
return sum_squared_errors_i2(squared_error, smoothing_parameter)
@staticmethod
def sum_squared_errors_indi_htces(squared_error: list, alpha: float, gamma: float) -> dict:
return sum_squared_errors_i1(squared_error, alpha, gamma)
@staticmethod
def standard_error(sse: dict, orders_count, smoothing_parameter, df: int = 1) -> float:
return (sse[smoothing_parameter] / (orders_count - df)) ** 0.5
def mean_forecast_error(self):
"""
Args:
forecasts (int): Number of periods to average.
base_forecast (bool): A list of weights that sum up to one.
Returns:
np.array
Raises:
ValueError:
"""
pass
def mean_aboslute_percentage_error_opt(self, forecast: list) -> int:
""" Calculates the mean absolute percentage error for the optimised forecasts
Args:
forecast (list): forecast generated
Returns:
"""
sum_ape = sum([abs((i['demand'] - i['level_estimates']) / i['demand']) for i in forecast])
mape = (sum_ape / len(forecast)) * 100
return mape
def optimise(self):
"""
Args:
forecasts (int): Number of periods to average.
base_forecast (bool): A list of weights that sum up to one.
Returns:
np.array
Raises:
ValueError:
"""
pass
def linear_regression(self):
"""
Args:
line (int): Number of periods to average.
base_forecast (bool): A list of weights that sum up to one.
Returns:
np.array
Raises:
ValueError:
"""
pass
def autoregressive(self):
"""
Args:
forecasts (int): Number of periods to average.
base_forecast (bool): A list of weights that sum up to one.
Returns:
np.array
Raises:
ValueError:
"""
pass
if __name__ == '__main__':
orders = [165, 171, 147, 143, 164, 160, 152, 150, 159, 169, 173, 203, 169, 166, 162, 147, 188, 161, 162, 169, 185,
188, 200, 229, 189, 218, 185, 199, 210, 193, 211, 208, 216, 218, 264, 304]
total_orders = 0
avg_orders = 0
for order in orders[:12]:
total_orders += order
avg_orders = total_orders / 12
f = Forecast(orders, avg_orders)
alpha = [0.2, 0.3, 0.4, 0.5, 0.6]
s = [i for i in f.simple_exponential_smoothing(*alpha)]
sum_squared_error = f.sum_squared_errors(s, 0.5)
# print(sum_squared_error)
standard_error = f.standard_error(sum_squared_error, len(orders), smoothing_parameter=0.5)
# print(standard_error)
|
import requests
import json
def test_add_shipment():
App_URL="http://127.0.0.1:8000/shipments/"
f = open('request.json', 'r')
request_json = json.loads(f.read())
response = requests.post(App_URL,request_json)
print(response.text)
def test_list_shipments():
App_URL="http://127.0.0.1:8000/shipments/"
response = requests.get(App_URL)
print(response.text)
|
import os
import struct
import json
import glob
from argparse import ArgumentParser
import UnityPy
from antlr4 import *
from evAssembler import EvCmd, evAssembler
from evLexer import evLexer
from evParser import evParser
from ev_argtype import EvArgType
from ev_cmd import EvCmdType
from function_definitions import FunctionDefinition
class GDataManager:
SCENARIO_MSGS = None
DISABLED_MSGS = False
@classmethod
def getMoveById(cls, moveId):
move_list = cls.getMoveList()
return move_list[moveId]
@classmethod
def getScenarioMsgList(cls):
if cls.DISABLED_MSGS:
return None
if not cls.SCENARIO_MSGS:
scenario1 = []
scenario2 = []
scenario3 = []
try:
with open("AssetFolder/english_Export/english_dp_scenario1.json", "r", encoding='utf-8') as ifobj:
data = json.load(ifobj)
for entry in data["labelDataArray"]:
labelName = entry["labelName"]
scenario1.append(labelName)
with open("AssetFolder/english_Export/english_dp_scenario2.json", "r", encoding='utf-8') as ifobj:
data = json.load(ifobj)
for entry in data["labelDataArray"]:
labelName = entry["labelName"]
scenario2.append(labelName)
with open("AssetFolder/english_Export/english_dp_scenario3.json", "r", encoding='utf-8') as ifobj:
data = json.load(ifobj)
for entry in data["labelDataArray"]:
labelName = entry["labelName"]
scenario3.append(labelName)
except FileNotFoundError as exc:
cls.DISABLED_MSGS = True
print("Warning: english files not found. Message validation will not be enabled: {}".format(exc))
return None
cls.SCENARIO_MSGS = {
'dp_scenario1' : scenario1,
'dp_scenario2' : scenario2,
'dp_scenario3' : scenario3
}
return cls.SCENARIO_MSGS
def jsonDumpUnity(tree, ofpath):
with open(ofpath, "w") as ofobj:
json.dump(tree, ofobj, indent=4)
def validate_talk_msg(cmd: EvCmd, strList):
scenarioMsgList = GDataManager.getScenarioMsgList()
if scenarioMsgList is None:
return
msgIdx = cmd.args[0].data
msg = strList[msgIdx]
splitMsg = msg.split('%')
try:
dataFile = splitMsg[0]
unlocalized_key = splitMsg[1]
except IndexError:
return
# raise RuntimeError('Invalid msg: {} passed to {} at {}: {}'.format(msg, cmd.cmdType.name, cmd.line, cmd.column))
if dataFile not in scenarioMsgList:
raise RuntimeError('Unknown datafile: {} passed to {} at {}:{}'.format(dataFile, cmd.cmdType.name, cmd.line, cmd.column))
if unlocalized_key not in scenarioMsgList[dataFile]:
raise RuntimeError('Unknown message: {} passed to {} at {}:{}'.format(msg, cmd.cmdType.name, cmd.line, cmd.column))
def validate_talk_keywait(cmd: EvCmd, strList: list):
scenarioMsgList = GDataManager.getScenarioMsgList()
if scenarioMsgList is None:
return
msgIdx = cmd.args[0].data
msg = strList[msgIdx]
splitMsg = msg.split('%')
try:
dataFile = splitMsg[0]
unlocalized_key = splitMsg[1]
except IndexError:
return
# raise RuntimeError('Invalid msg: {} passed to {} at {}: {}'.format(msg, cmd.cmdType.name, cmd.line, cmd.column))
if dataFile not in scenarioMsgList:
raise RuntimeError('Unknown datafile: {} passed to {} at {}:{}'.format(dataFile, cmd.cmdType.name, cmd.line, cmd.column))
if unlocalized_key not in scenarioMsgList[dataFile]:
raise RuntimeError('Unknown message: {} passed to {} at {}:{}'.format(msg, cmd.cmdType.name, cmd.line, cmd.column))
def validate_easy_obj_msg(cmd: EvCmd, strList: list):
scenarioMsgList = GDataManager.getScenarioMsgList()
if scenarioMsgList is None:
return
msgIdx = cmd.args[0].data
msg = strList[msgIdx]
splitMsg = msg.split('%')
try:
dataFile = splitMsg[0]
unlocalized_key = splitMsg[1]
except IndexError:
return
# raise RuntimeError('Invalid msg: {} passed to {} at {}: {}'.format(msg, cmd.cmdType.name, cmd.line, cmd.column))
if dataFile not in scenarioMsgList:
raise RuntimeError('Unknown datafile: {} passed to {} at {}:{}'.format(dataFile, cmd.cmdType.name, cmd.line, cmd.column))
if unlocalized_key not in scenarioMsgList[dataFile]:
raise RuntimeError('Unknown message: {} passed to {} at {}:{}'.format(msg, cmd.cmdType.name, cmd.line, cmd.column))
VALIDATE_TABLE = {
EvCmdType._TALKMSG : validate_talk_msg,
EvCmdType._TALK_KEYWAIT : validate_talk_keywait,
EvCmdType._EASY_OBJ_MSG : validate_easy_obj_msg,
}
def convertToUnity(ifpath, scripts, strList):
# FunctionDefinition.load("ev_scripts.json")
tree = {}
treeScripts = []
for label, script in scripts.items():
scriptCommands = []
for cmd in script:
evCmdType = cmd.cmdType
# funcDef = FunctionDefinition.getFunctionDefinition(evCmdType)
scriptArgs = [
{
"argType" : EvArgType.CmdType,
"data" : evCmdType.value
}
]
if evCmdType in VALIDATE_TABLE:
valid_func = VALIDATE_TABLE[evCmdType]
try:
valid_func(cmd, strList)
except RuntimeError as exc:
print(exc)
# reqArgs = funcDef.noReqArgs()
# if len(cmd.args) < reqArgs:
# print("[Warning] {}:{} Too few arguments passed in. At least {} required. {} provided.".format(cmd.line, cmd.column, reqArgs, len(cmd.args)))
# noMaxArgs = funcDef.maxArgs()
# if len(cmd.args) > noMaxArgs:
# print("[Warning] {}:{} Too many arguments passed in. At most {} allowed. {} provided.".format(cmd.line, cmd.column, noMaxArgs, len(cmd.args)))
for i, arg in enumerate(cmd.args):
# argDef = funcDef.validArgs[i]
#if arg.argType not in argDef.validArgTypes:
# print("[Warning] {} {}:{} invalid argument".format(ifpath, arg.line, arg.column))
scriptArgs.append({
"argType" : arg.argType,
"data" : arg.data
})
scriptCommands.append({
"Arg" : scriptArgs
})
treeScripts.append({
"Label" : label,
"Commands" : scriptCommands
})
tree["Scripts"] = treeScripts
tree["StrList"] = strList
return tree
def repackUnity(ofpath, script, unityTree):
with open(ofpath, "rb") as ifobj:
bundle = UnityPy.load(ofpath)
for obj in bundle.objects:
if obj.type.name == "MonoBehaviour":
data = obj.read()
if obj.serialized_type.nodes:
tree = obj.read_typetree()
if data.name == script:
tree.update(unityTree)
obj.save_typetree(tree)
with open(ofpath, "wb") as ofobj:
# Thanks Aldo796
ofobj.write(bundle.file.save(packer=(64,2)))
def assemble(ifpath, ofpath, script):
input_stream = FileStream(ifpath)
lexer = evLexer(input_stream)
stream = CommonTokenStream(lexer)
parser = evParser(stream)
tree = parser.prog()
assembler = evAssembler()
walker = ParseTreeWalker()
walker.walk(assembler, tree)
unityTree = convertToUnity(assembler.scripts, assembler.strTbl)
repackUnity(ofpath, script, unityTree)
def repackUnityAll(ifpath, ofpath, scripts):
with open(ifpath, "rb") as ifobj:
bundle = UnityPy.load(ifpath)
for obj in bundle.objects:
if obj.type.name == "MonoBehaviour":
data = obj.read()
if obj.serialized_type.nodes:
tree = obj.read_typetree()
if data.name in scripts:
unityTree = scripts[data.name]
tree.update(unityTree)
obj.save_typetree(tree)
with open(ofpath, "wb") as ofobj:
# Thanks Aldo796
ofobj.write(bundle.file.save(packer=(64,2)))
def assemble_all():
scripts = {}
for ifpath in glob.glob("scripts/*.ev"):
basename = os.path.basename(ifpath)
basename = os.path.splitext(basename)[0]
input_stream = FileStream(ifpath)
lexer = evLexer(input_stream)
stream = CommonTokenStream(lexer)
parser = evParser(stream)
tree = parser.prog()
assembler = evAssembler()
walker = ParseTreeWalker()
walker.walk(assembler, tree)
unityTree = convertToUnity(ifpath, assembler.scripts, assembler.strTbl)
scripts[basename] = unityTree
repackUnityAll("Dpr/ev_script", "bin/ev_script", scripts)
def main():
# parser = ArgumentParser()
# parser.add_argument("-i", "--input", dest='ifpath', action='store', required=True)
# parser.add_argument("-o", "--output", dest='ofpath', action='store', required=True)
# parser.add_argument("-s", "--script", dest='script', action='store', required=True)
# vargs = parser.parse_args()
# assemble(vargs.ifpath, vargs.ofpath, vargs.script)
assemble_all()
if __name__ == "__main__":
main()
|
# Copyright 2019 Willian Fuks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
INPUT_TRAIN_SCHEMA = {
'customer_id': tf.FixedLenFeature([], tf.int64),
'skus_list': tf.VarLenFeature(tf.int64),
'actions_list': tf.VarLenFeature(tf.string),
}
|
#!/usr/bin/env python
'''
Usage:
./noob3a.py [options]
Options:
--help -h This help.
--mktest-data=D -D D Output test-data.
'''
import numpy as np;
from struct import pack,unpack;
import os;
__dt = 'float32'
def _pint(i): return pack("i",i);
def _upint(f): return unpack("i",f.read(4))[0];
def output_centered(fname, X,Y,Z,D,order=None):
psh = (len(Z), len(Y), len(X));
if order != 'flip':
psh = (len(X), len(Y), len(Z));
if D.shape != psh:
raise ValueError("invalid shape of D ({} vs ({})".format(D.shape,psh));
D = D.astype(__dt);
X = np.array(X).astype(__dt);
Y = np.array(Y).astype(__dt);
Z = np.array(Z).astype(__dt);
with open(fname, "wb") as f:
f.write(_pint(len(X)));
f.write(memoryview(X));
f.write(_pint(len(Y)));
f.write(memoryview(Y));
f.write(_pint(len(Z)));
f.write(memoryview(Z));
L = D.shape[0]*D.shape[1]*D.shape[2];
f.write(_pint(L));
f.write(memoryview(D));
def load_centered(fname,order=None):
with open(fname, "rb") as f:
xl = _upint(f);
x = np.fromfile(f,dtype=__dt,count=xl);
yl = _upint(f);
y = np.fromfile(f,dtype=__dt,count=yl);
zl = _upint(f);
z = np.fromfile(f,dtype=__dt,count=zl);
Dl = _upint(f);
if Dl != xl*yl*zl:
raise ValueError("invalid file ({} = {}*{}*{})".format(Dl,zl,yl,xl));
D = np.fromfile(f,dtype=__dt, count=Dl);
if order == 'flip':
D = D.reshape(zl,yl,xl);
else:
D = D.reshape(zl,yl,xl);
return x,y,z,D;
def mktestdata():
x=np.array([-1.0,0.0,1.0]).astype(__dt);
y=np.array([ 0.5,0.5,1.5]).astype(__dt);
z=np.array([ 0.0,0.5,2.5]).astype(__dt);
Z,Y,X = np.meshgrid(z,y,z,indexing='ij');
D = np.sqrt(X**2+Y**2 + Z);
return x,y,z,D;
def test1(fnametest):
x,y,z,D = mktestdata();
output_centered(fnametest, x,y,z,D);
xp,yp,zp,Dp = load_centered(fnametest);
out = np.max(Dp != D );
out|= np.max(x != xp);
out|= np.max(y != yp);
out|= np.max(z != zp);
if out:
raise ValueError("test failed");
os.remove(fnametest);
print("test passed");
if __name__ == "__main__":
from docopt import docopt;
opts = docopt(__doc__,help=True);
test1("tmp.dat");
if opts['--mktest-data']:
x,y,z,D=mktestdata();
output_centered(opts['--mktest-data'],x,y,z,D);
pass;
|
#!/bin/env python
# 2014-4-18 created by Linxzh
# to combine additional fqfile with specific fqfile
import os
import argparse
# arguments
parser = argparse.ArgumentParser(description='Merge the fqfiles of the same sample produced by different lane', prog = 'Fqfile Merger')
parser.add_argument('-dira', type = str)
parser.add_argument('-dirb', type = str)
parser.add_argument('-diro', type = str, help = 'output dir')
args = parser.parse_args()
|
import inspect
from django.dispatch import receiver
from django_mvc.signals import django_inited,actions_inited
class Action(object):
"""
all attr name should be lower case
"""
def __init__(self,action,tag="button",tag_body=None,tag_attrs=None,permission=None):
self.permission = permission if permission else None
self.tag = tag.lower()
self.action = action
self.tag_body = tag_body or action
self.tag_attrs = tag_attrs or {}
self.callable_attrs = None
self.cloned_actions = None
self.initialized = False
def initialize(self):
if self.initialized:
return
from django_mvc.forms import widgets
if self.permission and callable(self.permission):
self.permission = self.permission()
if self.tag_body and callable(self.tag_body):
self.tag_body = self.tag_body()
default_attrs = []
if self.tag == "option":
default_attrs=[("value",self.action)]
elif self.tag == "button":
if "onclick" in self.tag_attrs:
default_attrs=[("class","btn btn-primary"),("value",self.action),("name","action__")]
else:
default_attrs=[("class","btn btn-primary"),("type","submit"),("value",self.action),("name","action__")]
for k,v in default_attrs:
if k not in self.tag_attrs:
self.tag_attrs[k] = v
if self.tag_attrs:
for k in self.tag_attrs.keys():
if callable(self.tag_attrs[k]):
argspec = inspect.getfullargspec(self.tag_attrs[k])
if not argspec.args:
self.tag_attrs[k] = self.tag_attrs[k]()
if callable(self.tag_attrs[k]):
#initialized attribute need further initialization
argspec = inspect.getfullargspec(self.tag_attrs[k])
self.callable_attrs = [] if self.callable_attrs is None else self.callable_attrs
self.callable_attrs.append((k,(lambda method,argspec:(lambda kwargs: method(*[kwargs.get(p) for p in argspec.args])))(self.tag_attrs[k],argspec) ))
#delete non-ready attributes which need further initialization from tag_attrs
if self.callable_attrs:
self.html = self._html2
for k,m in self.callable_attrs:
del self.tag_attrs[k]
else:
self.html = self._html1
self._widget = widgets.HtmlTag(self.tag,self.tag_attrs,self.tag_body)
if not self.permission:
self.has_permission = self._always_has_permission
elif isinstance(self.permission,str):
self.permission = RightPermission(self.permission)
self.has_permission = self._check_permission
elif isinstance(self.permission,(list,tuple)):
self.permission = [ RightPermission(perm) if isinstance(perm,str) else perm for perm in self.permission]
self.has_permission = self._check_any_permissions
else:
self.has_permission = self._check_permission
if self.cloned_actions:
for a in self.cloned_actions:
a.initialize()
self.initialized = True
def clone(self,tag_attrs=None,tag_body=None):
attrs = dict(self.tag_attrs)
if tag_attrs:
attrs.update(tag_attrs)
action = Action(self.action,tag=self.tag,tag_body=tag_body or self.tag_body,tag_attrs=attrs,permission=self.permission)
if self.cloned_actions is None:
self.cloned_actions = [action]
else:
self.cloned_actions.append(action)
if self.initialized:
action.initialize()
return action
def _always_has_permission(self,user):
return True;
def _check_permission(self,user):
print("check permission:user={}, permission={}".format(user,self.permission))
if user.is_superuser:
return True
return self.permission.check(user)
def _check_any_permissions(self,user):
print("check permission:user={}, permission={}".format(user,self.permission))
if user.is_superuser:
return True
for perm in self.permission:
if perm.check(user):
return True
return False
@property
def widget(self):
return self._widget
@property
def basehtml(self):
return self.html()
def _html1(self,value = "",**kwargs):
value = value or ""
if value == self.action:
return self._widget.render("selected=\"selected\"")
else:
return self._widget.render()
def _html2(self,value = "",**kwargs):
value = value or ""
attrs = {}
for k,m in self.callable_attrs:
v = m(kwargs)
if v:
attrs[k] = v
if attrs:
attrs = " ".join(["{}=\"{}\"".format(key,value) for key,value in attrs.items()])
else:
attrs = None
if value == self.action:
return self._widget.render("selected=\"selected\"",attrs=attrs)
else:
return self._widget.render(attrs=attrs)
class BasePermission(object):
def initialize(self):
pass
def check(self,user):
return False
class GroupPermission(BasePermission):
def __init__(self,group):
self.group_not_exist = False
self.group = group
def initialize(self):
from django.contrib.auth.models import Group
try:
if isinstance(self.group,str):
self.group = Group.objects.get(name=self.group)
elif isinstance(self.group,int):
self.group = Group.objects.get(id=self.group)
elif isinstance(self.group,Group):
pass
else:
self.group_not_exist = True
except ObjectDoesNotExist as ex:
self.group_not_exist = True
def __str__(self):
return "User Group:{}".format(self.group)
def check(self,user):
if self.group_not_exist:
return False
elif self.group:
return self.group in user.groups.all()
else:
return True
class RightPermission(BasePermission):
def __init__(self,permission):
self.permission = permission
def __str__(self):
return "Permission:{}".format(self.permission)
def check(self,user):
return user.has_perm(self.permission)
class UsernamePermission(BasePermission):
def __init__(self,user,casesensitive=False,exact_match=False,exclusive=False):
self.casesensitive = casesensitive
self.exclusive = exclusive
self.exact_match = exact_match
if isinstance(user,str):
self.user = [user if self.casesensitive else user.upper()]
else:
self.user = [u if self.casesensitive else u.upper() for u in user]
if self.exact_match:
self.is_match = lambda loginuser,user: loginuser == user if self.casesensitive else loginuser.upper() == user
else:
self.is_match = lambda loginuser,user: user in loginuser if self.casesensitive else user in loginuser.upper()
def __str__(self):
return "Login user should {} {} with {}".format("not be" if self.exclusive else "be",self.user,"case sensitive" if self.casesensitive else "case insensitive")
def check(self,user):
if self.exclusive:
for u in self.user:
if self.is_match(user.username,u):
return False
return True
else:
for u in self.user:
if self.is_match(user.username,u):
return True
return False
class AndPermission(BasePermission):
def __init__(self,permissions):
self.permissions = permissions
def initialize(self):
for permission in self.permissions:
permission.initialize()
def check(self,user):
for perm in self.permissions:
if not perm.check(user):
return False
return True
class OrPermission(BasePermission):
def __init__(self,permissions):
self.permissions = permissions
def initialize(self):
for permission in self.permissions:
permission.initialize()
def check(self,user):
for perm in self.permissions:
if perm.check(user):
return True
return False
class GetActionMixin(object):
def get_action(self,action_name):
return BUTTON_ACTIONS.get(action_name) or OPTION_ACTIONS.get(action_name)
class DynamicAction(Action):
def __init__(self,action,extra_attrs):
self.action = action
self.extra_attrs = extra_attrs
def clone(self):
raise NotImplementedError("Not Implemented")
@property
def has_permission(self):
return self.action.has_permission
@property
def widget(self):
return self.action.widget
@property
def basehtml(self):
return self.html()
@property
def tag_attrs(self):
return self.action.tag_attrs
def html(self,value = "",**kwargs):
value = value or ""
if value == self.action.action:
return self.widget.render("selected=\"selected\" {}".format(self.extra_attrs))
else:
return self.widget.render(self.extra_attrs)
BUTTON_ACTIONS = {
"save":Action("save","button","Save",{"class":"btn btn-primary btn-success","type":"submit"}),
"select":Action("select","button","Select",{"class":"btn btn-primary btn-success","type":"submit",}),
"cancel":Action("cancel","a","Cancel",{
"class":"btn btn-danger",
"onclick":lambda nexturl: "window.location='{}';".format(nexturl) if nexturl else "history.go(-1);"
}),
"upload":Action("upload","button","Upload",{"class":"btn btn-success","type":"submit"}),
"download":Action("download","button","Download",{"class":"btn btn-success","type":"submit","style":"width:260px"}),
"deleteconfirm":Action("delete","button","Delete",{"class":"btn btn-success","type":"submit"}),
"deleteconfirmed":Action("deleteconfirm","button","Yes,I'm sure",{"class":"btn btn-success","type":"submit"}),
"archiveconfirm":Action("archive","button","Archive",{"class":"btn btn-success","type":"submit"}),
"archiveconfirmed":Action("archiveconfirm","button","Archive",{"class":"btn btn-success","type":"submit"}),
"close":Action("close","button","Close",{"class":"btn btn-success","type":"submit"}),
"update_filter":Action("search","button","Update",{"class":"btn btn-success","type":"submit","style":"width:100px"}),
}
OPTION_ACTIONS = {
"empty_action":Action("","option","----------"),
"delete_selected_documents":Action("deleteconfirm","option","Delete selected documents",permission="prescription.delete_prescription"),
"archive_selected_documents":Action("archiveconfirm","option","Archive selected documents",permission="document.archive_document"),
}
@receiver(django_inited)
def initialize_actions(sender,**kwargs):
for action in BUTTON_ACTIONS.values():
action.initialize()
for action in OPTION_ACTIONS.values():
action.initialize()
actions_inited.send(sender="actions")
|
from __future__ import print_function, division
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from lib import replay_memory
from common import GridAnnotationWindow
import Tkinter
def main():
print("Loading replay memory...")
memory = replay_memory.ReplayMemory.create_instance_supervised()
win = GridAnnotationWindow.create(
memory,
current_anno_attribute_name="current_lane_grid",
save_to_fp="annotations_current_lane.pickle",
every_nth_example=20
)
win.brush_size = 2
win.autosave_every_nth = 100
win.master.wm_title("Annotate current lane")
Tkinter.mainloop()
if __name__ == "__main__":
main()
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.path as mpath
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
from matplotlib.collections import PatchCollection
fig, ax = plt.subplots()
# y_lim = 260000000
plt.ylim([0, 25])
plt.xlim([0, 21])
for n in range(1,21):
center = [n,0]
print float(n)/100
fancybox = mpatches.FancyBboxPatch(
center, 0.5, 20,
boxstyle=mpatches.BoxStyle("Round", pad=0.2))
ax.add_patch(fancybox)
plt.savefig('round_corner.png')
|
#base enemy
import character
import animation
import math
import itertools
import random
import Message
class Enemy(character.Character):
def __init__(self, messageLog, currentMap):
super(Enemy, self).__init__(messageLog, currentMap)
self.minLevel = 0
self.baseDanger = 1
self.minGroup = 1
self.team = 1;
self.character = "e"
self.name = "Enemy"
self.maxmp = 0
self.mp = 0
self.mpChargeRate = 0
self.mpAcc = 0
self.ai = "Basic"
self.FlameReady = None
self.State = None
self.Target = None
def update(self):
super().update()
if self.mp < self.maxmp:
self.mpAcc += self.mpChargeRate
if self.mpAcc >= 1:
self.mp += round(self.mpAcc)
self.mpAcc -= round(self.mpAcc)
self.mp = min(self.mp, self.maxmp)
def BasicUpdate(self):
try:
nearestEnemy = min([i for i in self.currentMap.characters if i != self and i.team != self.team],
key = lambda i: abs(self.x - i.x) + abs(self.y - i.y))
dx = 0 if nearestEnemy.x == self.x else (-1 if nearestEnemy.x < self.x else 1)
dy = 0 if nearestEnemy.y == self.y else (-1 if nearestEnemy.y < self.y else 1)
self.tryMove(self.x + dx, self.y + dy)
except ValueError:
self.Wait()
# Returns something that resembles the risk of dying in this turn. Essentially, how dangerous the place it.
def GetRoomDanger(self):
EnemiesInRoom = [i for i in self.currentMap.characters if i.team != self.team\
and len([j for j in self.currentMap.GetRooms(i.x, i.y)\
if j in self.currentMap.GetRooms(self.x, self.y)]) > 0]
if len(EnemiesInRoom) > 0:
# Calculate enemies average damage per hit to dragon
try:
return sum(map(lambda i: i.GetAverageDamage(self.ToDefend(),
self.level)/max(abs(i.x - self.x ),abs(i.y-self.y)),
EnemiesInRoom))/max(1,self.hp)
except ZeroDivisionError:
return 1
else:
return 0
def CastHeal(self, target):
self.mp -= 3
target.hp += self.level+1
target.hp = min(target.hp, target.maxhp)
if self != target:
self.messageLog.append(Message.Message(self.name + " heals " + target.name, [(self.x, self.y),(target.x, target.y)]))
else:
self.messageLog.append(Message.Message(self.name + " heals himself", [(self.x, self.y)]))
self.ticksUntilTurn = round(200/self.speed)
self.animations.append(animation.HealAnimation((target.x, target.y)))
def PrepareFlameBreath(self, target, radius, distance):
self.ticksUntilTurn = round(200/self.speed)
self.FlameReady = (target, radius, distance)
def FlameBreathMPCost(self, radius, distance):
return radius * distance // 2
def CastFlameBreath(self, target, radius, distance):
self.mp -= self.FlameBreathMPCost(radius, distance)
#Draw animation
self.messageLog.append(Message.Message(self.name + " breathes burning flame", [(self.x, self.y)]))
frames = []
for i in range(distance):
frames.append(self.CastFlame_(self.x, self.y, target[0], target[1], radius, i))
self.animations.append(animation.DragonsBreathAnimation((self.x, self.y), frames))
grid = self.CastFlame_(self.x, self.y, target[0], target[1], radius, i)
width = len(grid)
height = len(grid[0])
for i in range( (width//2) * -1, width // 2):
for j in range ( (height//2) * -1, height // 2):
windowWidth = len(self.currentMap.Map)
windowHeight = len(self.currentMap.Map[0])
if (self.x + i > 0) and (self.x + i < windowWidth) and (self.y + j > 0) and (self.y + j < windowHeight):
x = self.x + i
y = self.y + j
#print (x, y)
monsterInSquare = [l for l in self.currentMap.characters if (l.x == x) and (l.y == y)]
for k in monsterInSquare:
if k != self:
#rint ("BURN CHECK: ", x, y, grid[i][j], k.name)
igniteResult = \
k.Ignite(max(.1, grid[i][j] - \
(0 if (k.leftHandEquipped == None) or (k.leftHandEquipped.ItemClass != 6) else k.leftHandEquipped.ToDefend) - \
(0 if (k.rightHandEquipped == None) or (k.rightHandEquipped.ItemClass != 6) else k.rightHandEquipped.ToDefend)), self.burnDamage, self)
#print (str(max(.1, grid[i][j] - \
# (0 if (k.leftHandEquipped == None) or (k.leftHandEquipped.ItemClass != 6) else k.leftHandEquipped.ToDefend) - \
# (0 if (k.rightHandEquipped == None) or (k.rightHandEquipped.ItemClass != 6) else k.rightHandEquipped.ToDefend))) + "%")
if igniteResult[0]:
self.messageLog.append(Message.Message(k.name + " is ignited by the scorching flame!", [(self.x, self.y),(k.x, k.y)]))
if igniteResult[1]:
self.messageLog.append(Message.Message(k.name + " has been prematurely cremated!", [(self.x, self.y),(k.x, k.y)]))
self.Killed(k)
self.ticksUntilTurn = round(100/self.speed)
def CastFlame_(self, sx, sy, tx, ty, radius, distance):
grid = [[0]*(2*round(distance)+1) for i in range(2*round(distance) + 1)]
angle = math.atan2(ty - sy, tx - sx)
invangle = angle + math.pi/2
endx = math.sin(angle) * distance
endy = math.cos(angle) * distance
angles = []
#print ('-', angle, '-')
for i in range(round(radius) * -1, round (radius)):
nextEndX = endx + math.sin(invangle) * i
nextEndY = endy + math.cos(invangle) * i
#print ("End: ", nextEndX, nextEndY)
angles.append(math.atan2(nextEndY, nextEndX))
for i in angles:
#print (i)
for j in range(1,round(distance)):
x = math.sin(i) * j
y = math.cos(i) * j
xpart, xint = math.modf(x - 0.5)
ypart, yint = math.modf(y - 0.5)
xint = int(xint)
yint = int(yint)
if (not(self.currentMap.Map[round(sx+xint)][round(sy+yint)].walkable)):
break;
xpart = xpart + 1 if xpart < 0 else xpart
ypart = ypart + 1 if ypart < 0 else ypart
if(self.currentMap.Map[round(sx)+xint][round(sy)+yint].walkable):
grid[xint][yint] = grid[xint][yint] + xpart / 8 + ypart / 8
if(self.currentMap.Map[round(sx)+xint+1][round(sy)+yint].walkable):
grid[xint + 1][yint] = grid[xint + 1][yint] + (1 - xpart) / 8 + ypart / 8
if(self.currentMap.Map[round(sx)+xint+1][round(sy)+yint+1].walkable):
grid[xint + 1][yint + 1] = grid[xint + 1 ][yint + 1] + (1 - xpart) / 8 + (1 - ypart) / 8
if(self.currentMap.Map[round(sx)+xint][round(sy)+yint+1].walkable):
grid[xint][yint + 1] = grid[xint][yint + 1] + xpart / 8 + (1 - ypart) / 8
grid[0][0] = 0
return grid
|
from random import randint
print ('\n\n\nVamos jogar JOKENPÔ!!!\n\n\n')
escolha = int(input('Escolha PEDRA, PAPEL ou TESOURA:\n\n[1] PEDRA\n\n[2] PAPEL\n\n[3] TESOURA\n\nFaça a sua escolha: '))
if escolha != 1 and escolha != 2 and escolha != 3:
print('\n\nVocê escolheu: {}\nApenas números entre 1 e 3 são aceitos, tente novamente'.format(escolha))
else:
lista = ['','PEDRA','PAPEL','TESOURA']
print('Você escolheu: {}\n\n'.format(lista[escolha]))
computador = randint(1,3)
print('O computador escolheu: {}\n\n'.format(lista[computador]))
if escolha == 1 and computador == 1:
print('Emppate')
elif escolha == 1 and computador == 2:
print('O computador ganhou')
elif escolha == 1 and computador == 3:
print('Você ganhou')
elif escolha == 2 and computador == 2:
print('Emppate')
elif escolha == 2 and computador == 1:
print('Você ganhou')
elif escolha == 2 and computador == 3:
print('O computador ganhou')
elif escolha == 3 and computador == 3:
print('Emppate')
elif escolha == 3 and computador == 1:
print('O computador ganhou')
elif escolha == 3 and computador == 2:
print('Você ganhou')
print ('\n\n === FIM ===\n\n\n\n')
|
from functools import wraps
from rest_framework.response import Response
from rest_framework import status
def base_response(func):
# @wraps(func)
def response(serializer=None, data=None, **kwargs):
if serializer is not None and data is None:
data = serializer.data
elif serializer is not None and data is not None:
data = serializer.data
return func(data, **kwargs)
return response
@base_response
def success_response(data, **kwargs):
return Response(data=data, status=status.HTTP_200_OK, **kwargs)
@base_response
def not_found_response(data, **kwargs):
return Response(data=data, status=status.HTTP_404_NOT_FOUND, **kwargs)
@base_response
def created_response(data, **kwargs):
return Response(data=data, status=status.HTTP_201_CREATED, **kwargs)
|
# Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from deeppavlov.core.models.estimator import Estimator
from deeppavlov.core.models.serializable import Serializable
from deeppavlov.core.common.log import get_logger
from deeppavlov.core.common.registry import register
from deeppavlov.core.common.file import save_pickle
from deeppavlov.core.common.file import load_pickle
from deeppavlov.core.commands.utils import expand_path, make_all_dirs, is_file_exist
logger = get_logger(__name__)
@register('sentence2vector_w2v_tfidf')
class SentenceW2vVectorizerTfidfWeights(Estimator, Serializable):
"""
Sentence vectorizer which produce one vector as tf-idf weighted sum of words vectors in sentence
Parameters:
save_path: path to save the model
load_path: path to load the model
Returns:
None
"""
def __init__(self, save_path: str = None, load_path: str = None, **kwargs) -> None:
self.save_path = save_path
self.load_path = load_path
if is_file_exist(self.load_path):
self.load()
else:
if kwargs['mode'] != 'train':
self.load()
else:
self.vectorizer = TfidfVectorizer()
def __call__(self, questions: List[str], tokens_fasttext_vectors: List) -> List:
"""Vectorize list of sentences
Parameters:
questions: list of questions/sentences
tokens_fasttext_vectors: fasttext vectors for sentences
Returns:
List of vectorized sentences
"""
if isinstance(questions[0], list):
questions = [' '.join(x) for x in questions]
q_vects = self.vectorizer.transform(questions)
questions_vectors = []
for i, q in enumerate(questions):
q_weights = []
for token in q.split():
if token in self.token2idx:
tfidf_vector = q_vects[i, :]
q_weights.append(tfidf_vector[0, self.token2idx[token]])
else:
q_weights.append(0)
if sum(q_weights) == 0:
questions_vectors.append(None)
else:
questions_vectors.append(np.average(tokens_fasttext_vectors[i], weights=q_weights, axis=0))
return questions_vectors
def fit(self, x_train: List) -> None:
"""Train tf-idf weights
Parameters:
x_train: train sentences
Returns:
None
"""
if isinstance(x_train[0], list):
x_train = [' '.join(x) for x in x_train]
self.vectorizer = TfidfVectorizer()
self.vectorizer.fit(x_train)
self.token2idx = self.vectorizer.vocabulary_
def save(self) -> None:
"""Save model"""
logger.info("Saving tfidf_vectorizer to {}".format(self.save_path))
path = expand_path(self.save_path)
make_all_dirs(path)
save_pickle(self.vectorizer, path)
def load(self) -> None:
"""Load model"""
logger.info("Loading tfidf_vectorizer from {}".format(self.load_path))
self.vectorizer = load_pickle(expand_path(self.load_path))
self.token2idx = self.vectorizer.vocabulary_
|
import pygame
import cmath
import math as m
from pygame import *
from cmath import *
import matplotlib as mat
import numpy as np
xmax = 20
ymax = 16
inter = 1
interx = 1
tmax = 10
intert = 10
j = sqrt(-1)
WOUT_Last = [ 0, 0]
def ln(x):
try:
return log(abs(x)) - j * ( atan( x.real / ( .0000001 + x.imag)) - pi / 2)
except:
return -100
def Integrate(f, x, D):
Intout = 0
for i in range( -int( D * abs(x)), int( D * abs(x))):
Intout += f(i / D * (x + .00001) / (abs(x) + .00001)) / D
return Intout
def Dedekind(x):
DedeOUT = x
if abs(x) <= 1:
for i in range(1,101):
DedeOUT *= ( 1 - x ** i)
else:
DedeOUT = 0
return x * DedeOUT
def Zeta(z):
ZetaOUT = 0
if z.real == 1 and z.imag == 0:
return 10000
elif z.real >= 0:
for i in range(1,501):
ZetaOUT += (-1) ** i / (i ** z)
return ZetaOUT / (2 ** (1 - z) - 1)
else:
return 0
def Eta(x):
return Zeta(j * x + 1 / 2)
def RiemannZ(n):
i = 0
if n > 0:
while int(n) != 0:
while abs(Eta(i)) > .04:
i += abs(Eta(i)) ** 2 / 5
n += -1
i += .1
else:
while int(n) != 0:
while abs(Eta(-i)) > .04:
i += -abs(Eta(i)) ** 2 / 5
n += 1
i += -.1
return i
def fact(x):
factOUT = 1
for i in range(1, x + 1):
factOUT *= i
if factOUT == 1:
return 1
else:
return factOUT
def liPart(k):
liPOUT = 0
for n in range(0, m.floor((k + 1) / 2)):
liPOUT += 1/( 2 * n + 1)
return liPOUT
def Welliptic(x, w1, w2):
global WOUT2
global WOUT_Last
WOUT1 = 0
for m_1 in range(-50,51):
try:
WOUT1 += sin( pi * ( x - 2 * w2 * m_1) / ( 2 * w1)) ** (-2)
except:
WOUT1 += 100
if WOUT_Last != [ w1, w2]:
WOUT2 = 0
for m_2 in range(-50, 51):
if m_2 == 0:
continue
else:
WOUT2 += sin(pi * w2 * m_2 / w1) ** (-2)
WOUT_Last = [w1, w2]
return (pi / (2 * w1)) ** 2 * ( -1 / 3 + WOUT1 - WOUT2)
def li( z, p = 1):
liOUT = 0
for i in range(1, 100):
liOUT += ( p ** i) * ( ln(z) ** i) * liPart(i) / ( fact(i) * (-2) ** ( i -1))
if z.real == 1 and z.imag == 0:
return -100
else:
return sqrt(z ** p) * liOUT + ln(abs(ln(z))) + ln(p) + np.euler_gamma
def Draw(F):
for i_1 in range( -400, 401, interx):
i_x = i_1 * xmax / 800
draw.line(DisplaySurf, (0, 255, 0), (i_1 + 400, 300 - F(i_x).real * 600 / ymax),
(i_1 + interx + 400, 300 - F(i_x + interx * xmax / 800).real * 600 / ymax), 3)
draw.line(DisplaySurf, (255, 0, 0), (i_1 + 400, 300 - F(i_x).imag * 600 / ymax),
(i_1 + interx + 400, 300 - F(i_x + interx * xmax / 800).imag * 600 / ymax), 3)
def DrawP(G, F):
for i_1 in range( -tmax * intert, tmax * intert, 1):
i_t = i_1 / intert
draw.line(DisplaySurf, (0, 255, 0), ( 400 + G(i_t).real * 400 / xmax, 300 + F(i_t).real * 300 / ymax),
( 400 + G(i_t + 1 / intert).real * 400 / xmax,
300 + F(i_t + 1 / intert).real * 300 / ymax), 3)
draw.line(DisplaySurf, (255, 0, 0), ( 400 + G(i_t).imag * 400 / xmax, 300 + F(i_t).real * 300 / ymax),
( 400 + G(i_t + 1 / intert).imag * 400 / xmax,
300 + F(i_t + 1 / intert).real * 300 / ymax), 3)
def Cloth(x):
if x == 0:
return 0
else:
return tan(x ** 2) + Cloth( x - 1)
init()
DisplaySurf = pygame.display.set_mode((800, 600), 0, 32)
display.set_caption('Graph')
DisplaySurf.fill(( 255, 255, 255))
for i_1 in range( -2 * (xmax), 2 * (xmax + inter), inter):
draw.line(DisplaySurf, (51, 51, 51), (i_1 / 2 * 400 / xmax + 400, 600), (i_1 / 2 * 400 / xmax + 400, 0), 1)
for i_1 in range( -xmax - xmax % inter, xmax + inter, 2 * inter):
draw.line(DisplaySurf, (102, 102, 102), ( i_1 * 400 / xmax + 400, 600), ( i_1 * 400 / xmax + 400, 0), 1)
for i_1 in range( -2 * (ymax), 2 * (ymax + inter), inter):
draw.line(DisplaySurf, (51, 51, 51), ( 0, i_1 / 2 * 300 / ymax + 300), ( 800, i_1 / 2 * 300 / ymax + 300), 1)
for i_1 in range( -ymax - ymax % 2, ymax + inter, 2 * inter):
draw.line(DisplaySurf, (102, 102, 102), ( 0, i_1 * 300 / ymax + 300), ( 800, i_1 * 300 / ymax + 300), 1)
draw.line(DisplaySurf, (0, 0, 0), ( 0, 300), ( 800, 300), 3)
draw.line(DisplaySurf, (0, 0, 0), ( 400, 0), ( 400, 600), 3)
draw.circle(DisplaySurf, ( 0, 0, 0), ( 400, 300), 8, 2)
Draw(lambda x: Integrate( lambda t: t ** 2 * cos(2 * pi * t * int(x)), 30, 10) + j * 5)
while True:
for event in pygame.event.get():
if event.type == QUIT:
quit()
exit()
display.update()
|
from tenzing.core.models import (tenzing_model, model_relation)
from tenzing.core import model_implementations
from tenzing.core import summary
from tenzing.core import plotting
|
"""
test_runner_browndye2.py
test runner_browndye2.py script(s)
"""
import os
import glob
import seekr2.tests.make_test_model as make_test_model
import seekr2.modules.common_sim_browndye2 as sim_browndye2
import seekr2.modules.runner_browndye2 as runner_browndye2
def test_runner_browndye2_b_surface_default(tmp_path):
model = make_test_model.make_test_model(tmp_path)
b_surface_abs_path = os.path.join(tmp_path, "b_surface")
receptor_pqr_filename = os.path.join(
b_surface_abs_path, model.browndye_settings.receptor_pqr_filename)
ligand_pqr_filename = os.path.join(
b_surface_abs_path, model.browndye_settings.ligand_pqr_filename)
bd_milestone = model.k_on_info.bd_milestones[0]
ghost_index_rec = \
sim_browndye2.add_ghost_atom_to_pqr_from_atoms_center_of_mass(
receptor_pqr_filename, bd_milestone.receptor_indices)
ghost_index_lig = \
sim_browndye2.add_ghost_atom_to_pqr_from_atoms_center_of_mass(
ligand_pqr_filename, bd_milestone.ligand_indices)
assert ghost_index_rec == 148
assert ghost_index_lig == 16
receptor_xml_filename = sim_browndye2.make_pqrxml(receptor_pqr_filename)
ligand_xml_filename = sim_browndye2.make_pqrxml(ligand_pqr_filename)
debye_length, reaction_filename = \
runner_browndye2.make_browndye_input_xml(
model, tmp_path, receptor_xml_filename, ligand_xml_filename,
model.k_on_info.b_surface_num_steps)
model.browndye_settings.debye_length = debye_length
assert os.path.exists(os.path.join(b_surface_abs_path, "apbs_input.xml"))
assert os.path.exists(os.path.join(b_surface_abs_path, "input.xml"))
abs_reaction_path = os.path.join(b_surface_abs_path,
reaction_filename)
runner_browndye2.make_browndye_reaction_xml(model, abs_reaction_path)
assert os.path.exists(abs_reaction_path)
bd_directory = b_surface_abs_path
runner_browndye2.run_bd_top(model.browndye_settings.browndye_bin_dir,
bd_directory)
#runner_browndye2.modify_variables(bd_directory, 10000)
runner_browndye2.run_nam_simulation(
model.browndye_settings.browndye_bin_dir, bd_directory,
model.k_on_info.bd_output_glob)
#assert os.path.exists(os.path.join(b_surface_abs_path, "results.xml"))
assert len(glob.glob(os.path.join(b_surface_abs_path, "results*.xml"))) > 0
"""
bd_milestone_abs_path = os.path.join(tmp_path, bd_milestone.directory)
assert os.path.exists(bd_milestone_abs_path)
runner_browndye2.extract_bd_surface(model, bd_milestone, 10)
bd_directory_list = runner_browndye2.make_fhpd_directories(
model, bd_milestone)
for bd_directory in bd_directory_list:
runner_browndye2.run_bd_top(model.browndye_settings.browndye_bin_dir,
bd_directory)
runner_browndye2.modify_variables(bd_directory, n_trajectories=1000)
runner_browndye2.run_nam_simulation(
model.browndye_settings.browndye_bin_dir, bd_directory,
model.k_on_info.bd_output_glob)
runner_browndye2.combine_fhpd_results(
model, bd_milestone, bd_directory_list)
assert os.path.exists(os.path.join(bd_milestone_abs_path, "results.xml"))
"""
return
# TODO: bd_milestone test runs
|
'''
Python program find the longest string of a given list of strings.
Input:
['cat', 'car', 'fear', 'center']
Output:
center
Input:
['cat', 'dog', 'shatter', 'donut', 'at', 'todo', '']
Output:
shatter
[[0, 4], [1, 0], [1, 3], [4, 1]]
Input:
[([1, 2, 3, 2], [], [7, 9, 2, 1, 4]),2]
Output:
[[0, 1], [0, 3], [2, 2]]
'''
def test(words):
return max(words, key=len)
strs = ['cat', 'car', 'fear', 'center']
print("Original strings:")
print(strs)
print("Longest string of the said list of strings:")
print(test(strs))
strs = ['cat', 'dog', 'shatter', 'donut', 'at', 'todo', '']
print("\nOriginal strings:")
print(strs)
print("Longest string of the said list of strings:")
print(test(strs))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('employee_code', models.CharField(max_length=20)),
('name', models.CharField(max_length=100)),
('qualification', models.CharField(max_length=20)),
('year_of_passing', models.CharField(default=b'', max_length=10)),
('doj', models.DateField(null=True, blank=True)),
('dor', models.DateField(null=True, blank=True)),
('designation', models.CharField(max_length=100)),
('client_name', models.CharField(max_length=100)),
('project_name', models.CharField(max_length=100)),
('father', models.CharField(max_length=100)),
('permanent_address', models.CharField(max_length=1024)),
('temporary_address', models.CharField(max_length=1024)),
('email_id', models.EmailField(max_length=75)),
('pan_no', models.CharField(max_length=20)),
('passport_no', models.CharField(max_length=50)),
('dob', models.DateField(null=True, blank=True)),
('blood_group', models.CharField(max_length=20)),
('emergency_contact', models.CharField(max_length=100)),
('primary_contact', models.CharField(max_length=100)),
('secondary_contact', models.CharField(max_length=100)),
('reference', models.CharField(max_length=100)),
('msys_reference', models.CharField(max_length=100)),
('contact_no', models.CharField(max_length=20)),
('msys_email', models.EmailField(max_length=75)),
('employees_number', models.CharField(max_length=20)),
('emails', models.EmailField(max_length=75)),
('acount_no', models.CharField(max_length=20)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'employee',
},
bases=(models.Model,),
),
]
|
""" unit test
"""
import numpy as np
from searchsorted import searchsorted
__all__ = ['TestSearchSorted']
class TestSearchSorted:
def test1(self):
arr = np.linspace(0, 10, 21)
value = 3.3
index = searchsorted(arr, 3.3)
assert arr[index] == 3.
if __name__ == '__main__':
test = TestSearchSorted()
test.test1()
|
#!/usr/bin/python3
'''
NAME:
lf_pdf_search.py
PURPOSE:
lf_pdf_search.py will run a pdf grep looking for specific information in pdf files
"pdfgrep -r --include 'ASA*.pdf' 'ASA End Date'"
EXAMPLE:
lf_pdf_search.py
NOTES:
1. copy lf_pdf_search.py to a directory that has the pdf information
TO DO NOTES:
'''
import datetime
import pprint
import sys
if sys.version_info[0] != 3:
print("This script requires Python3")
exit()
import os
import socket
import logging
import time
from time import sleep
import argparse
import json
import configparser
import subprocess
import csv
import shutil
import os.path
import xlsxwriter
import re
import pandas as pd
class lf_pdf_search():
def __init__(self):
self.renewal_info = ""
self.timeout = 10
self.outfile = "pdf_search"
self.result = ""
self.stdout_log_txt = ""
self.stdout_log = ""
self.stderr_log_txt = ""
self.stderr_log = ""
self.processed_log_txt = ""
self.dataframe = ""
self.pdf_search_csv = ""
def get_data(self):
# o.k. a little over kill here , just save data to file to help debug if something goes wrong
if self.outfile is not None:
self.stdout_log_txt = self.outfile
self.stdout_log_txt = self.stdout_log_txt + "-{}-stdout.txt".format("test")
self.stdout_log = open(self.stdout_log_txt, 'w+')
self.stderr_log_txt = self.outfile
self.stderr_log_txt = self.stderr_log_txt + "-{}-stderr.txt".format("test")
#self.logger.info("stderr_log_txt: {}".format(stderr_log_txt))
self.stderr_log = open(self.stderr_log_txt, 'w+')
print("Names {} {}".format(self.stdout_log.name, self.stderr_log.name))
# have ability to pass in a specific command
command = "pdfgrep -r --include 'ASA*.pdf' 'ASA End Date'"
print("running {}".format(command))
process = subprocess.Popen(['pdfgrep','-r','--include','ASA*.pdf','ASA End Date'], shell=False, stdout=self.stdout_log, stderr=self.stderr_log, universal_newlines=True)
try:
process.wait(timeout=int(self.timeout))
self.result = "SUCCESS"
except subprocess.TimeoutExpired:
process.terminate()
self.result = "TIMEOUT"
self.stdout_log.close()
self.stderr_log.close()
return self.stdout_log_txt
def preprocess_data(self):
pass
# this method uses pandas dataframe - will use for data manipulation,
# the data mainupulation may be done in other manners
def datafile_to_dataframe(self):
# note the error_bad_lines=False will miss one of the lines
delimiter_list = [':']
try:
self.dataframe = pd.read_csv(self.stdout_log_txt, delimiter = [':'])
#self.dataframe = pd.read_csv(self.stdout_log_txt, sep = ':')
except:
print("one of the files may have a SN: in it need to correct ")
self.dataframe = pd.read_csv(self.stdout_log_txt, delimiter = ':', error_bad_lines=False)
#print(self.dataframe)
print("saving data to .csv")
# this removes the extention of .txt
self.pdf_search_csv= self.stdout_log_txt[:-4]
self.pdf_search_csv = self.pdf_search_csv + ".csv"
self.pdf_search_csv = self.dataframe.to_csv(self.pdf_search_csv,mode='w',index=False)
def main():
# arguments
parser = argparse.ArgumentParser(
prog='lf_pdf_search.py',
formatter_class=argparse.RawTextHelpFormatter,
epilog='''\
lf_pdf_search.py : for running scripts listed in lf_check_config.ini file
''',
description='''\
lf_pdf_search.py
-----------
Summary :
---------
show renewas
''')
parser.add_argument('--outfile', help="--outfile <Output Generic Name> used as base name for all files generated", default="")
parser.add_argument('--logfile', help="--logfile <logfile Name> logging for output of lf_pdf_search script", default="lf_pdf_search.log")
args = parser.parse_args()
pdf_search = lf_pdf_search()
output_file = pdf_search.get_data()
pdf_search.datafile_to_dataframe()
print("output file: {}".format(str(output_file)))
print("END lf_pdf_search.py")
if __name__ == "__main__":
main()
|
from feed.spiders.spider import Spider
class AliyunfeWeeklySpider(Spider):
name = 'aliyunfeweekly'
def __init__(self):
Spider.__init__(self,
start_urls=[
'https://github.com/aliyunfe/weekly',
],
index_xpath="//a[contains(text(),'》第')]/@href",
article_title_xpath="//h2//*[@class='final-path']/text()",
article_content_xpath="//div[@id='readme']",
index_limit_count=3,
index_reverse=True
)
|
from driver import ADC
print("-------------------start adc test--------------------")
adc = ADC()
adc.open("ADC0")
value = adc.read()
print(value)
adc.close()
print("-------------------end adc test--------------------")
|
import tempfile, os
import torch
import numpy as np
import cv2
import imageio
"""
Utilities for making visualizations.
"""
def save_video(images, output_fn):
writer = imageio.get_writer(output_fn, fps=20)
for im in images:
writer.append_data(im)
writer.close()
def draw_boxes(img, boxes, grid_size, color):
img = img.copy()
for j in range(boxes.shape[0]):
x, y, w, h = boxes[j] * grid_size
if x == 0 and y == 0 and w == grid_size and h == grid_size:
continue
cv2.rectangle(img, (int(x), int(y)), (int(x + w), int(y + h)), color, 2)
return img
def plot_vid(vids, boxes_gt=None, boxes_pred=None):
vids = vids.cpu().numpy()
vids = np.transpose(vids, [0, 2, 3, 1])
output_imgs = []
for i in range(0, vids.shape[0], 1):
img = np.clip((vids[i] * np.array([0.229, 0.224, 0.225]) + np.array([0.485, 0.456, 0.406])) * 255, 0,
255).astype('uint8').copy()
grid_size = img.shape[0] - 1
if boxes_gt is not None:
img = draw_boxes(img, boxes_gt[i], grid_size, color=(255, 0, 0))
if boxes_pred is not None:
img = draw_boxes(img, boxes_pred[i], grid_size, color=(0, 0, 255))
output_imgs.append(img)
return output_imgs
def save_images(args, t, val_samples, dir_name='val'):
path = os.path.join(args.output_dir, dir_name, str(t))
if not os.path.exists(path):
os.makedirs(path)
vids = val_samples['vids']
boxes = val_samples['gt_boxes']
pred_boxes = val_samples['pred_boxes']
# Video generation and boxes prediction
pred_vid_gtbox = val_samples.get('pred_vids_gt_boxes', None)
pred_vid_gtbox_boxes = val_samples.get('pred_vids_gt_boxes_boxes', None)
pred_vid_predbox = val_samples.get('pred_vids_pred_boxes', None)
pred_vid_predbox_boxes = val_samples.get('pred_vids_pred_boxes_boxes', None)
pred_vid_gtflows = val_samples.get('pred_vids_gt_flows', None)
pred_vid_predflows = val_samples.get('pred_vids_pred_flows', None)
ind = -1
for b in range(len(vids)):
for i in range(vids[b].shape[0]):
try:
# print("Save video id: {}".format(val_samples['video_id'][b][i]))
ind += 1
vids_i = vids[b][i]
boxes_i = boxes[b][i]
pred_boxes_i = pred_boxes[b][i]
boxes_mask = ~((boxes_i == torch.LongTensor([-1, -1, -1, -1]).to(boxes_i)).all(dim=-1) +
(boxes_i == torch.LongTensor([0, 0, 1, 1]).to(boxes_i)).all(dim=-1))
# save gt
boxes_i = [boxes_i[j][boxes_mask[j]] for j in range(boxes_mask.shape[0])]
output_imgs = plot_vid(vids_i.clone(), boxes_i.copy(), None)
save_video(output_imgs, os.path.join(path, f"gt_box_{ind}.mp4"))
# save pred
pred_boxes_i = [pred_boxes_i[j][boxes_mask[j]] for j in range(boxes_mask.shape[0])]
output_imgs = plot_vid(vids_i.clone(), None, pred_boxes_i.copy())
save_video(output_imgs, os.path.join(path, f"pred_box_{ind}.mp4"))
# save video gt
if pred_vid_gtbox is not None and pred_vid_gtbox_boxes is not None:
pred_vid_gtbox_vid_i = pred_vid_gtbox[b][i]
pred_vid_gtbox_boxes_i = pred_vid_gtbox_boxes[b][i]
pred_vid_gtbox_boxes_i = [pred_vid_gtbox_boxes_i[j][boxes_mask[j]] for j in range(boxes_mask.shape[0])]
output_imgs = plot_vid(pred_vid_gtbox_vid_i.clone(), pred_vid_gtbox_boxes_i.copy(), None)
save_video(output_imgs, os.path.join(path, f"pred_vid_gt_box_{ind}.mp4"))
# save gt flows video gt
if pred_vid_gtflows is not None and pred_vid_gtbox_boxes is not None:
pred_vid_gtbox_vid_i = pred_vid_gtflows[b][i]
pred_vid_gtbox_boxes_i = pred_vid_gtbox_boxes[b][i]
pred_vid_gtbox_boxes_i = [pred_vid_gtbox_boxes_i[j][boxes_mask[j]] for j in range(boxes_mask.shape[0])]
output_imgs = plot_vid(pred_vid_gtbox_vid_i.clone(), pred_vid_gtbox_boxes_i.copy(), None)
save_video(output_imgs, os.path.join(path, f"pred_vid_gt_flows_gtbox_{ind}.mp4"))
# save pred flows video gt
if pred_vid_predflows is not None and pred_vid_gtbox_boxes is not None:
pred_vid_gtbox_vid_i = pred_vid_predflows[b][i]
pred_vid_gtbox_boxes_i = pred_vid_gtbox_boxes[b][i]
pred_vid_gtbox_boxes_i = [pred_vid_gtbox_boxes_i[j][boxes_mask[j]] for j in range(boxes_mask.shape[0])]
output_imgs = plot_vid(pred_vid_gtbox_vid_i.clone(), pred_vid_gtbox_boxes_i.copy(), None)
save_video(output_imgs, os.path.join(path, f"pred_vid_pred_flows_gtbox_{ind}.mp4"))
# save video pred
if pred_vid_predbox is not None and pred_vid_predbox_boxes is not None:
pred_vid_predbox_vid_i = pred_vid_predbox[b][i]
pred_vid_predbox_boxes_i = pred_vid_predbox_boxes[b][i]
pred_vid_predbox_boxes_i = [pred_vid_predbox_boxes_i[j][boxes_mask[j]] for j in range(boxes_mask.shape[0])]
output_imgs = plot_vid(pred_vid_predbox_vid_i.clone(), None, pred_vid_predbox_boxes_i.copy())
save_video(output_imgs, os.path.join(path, f"pred_vid_pred_box_{ind}.mp4"))
except Exception as e:
print("error in saving video: {}".format(e))
|
#!/bin/env python3
import threading
from collections import deque
from time import sleep
from rich.layout import Layout
from rich.live import Live
from console import main_console
from data_structure.host_info import HostInfo
from data_structure.imaging_metrics import ImagingMetrics
from data_structure.log_message_info import LogMessageInfo
from data_structure.shot_running_info import ShotRunningInfo
from data_structure.special_battery_percentage import SpecialBatteryPercentageEnum, MemoryUsage
from data_structure.system_status_info import SystemStatusInfo, MountInfo
from destination.rich_console.device_status_panel import DeviceStatusPanel
from destination.rich_console.footer_panel import FooterPanel
from destination.rich_console.forecast_panel import ForecastPanel
from destination.rich_console.log_panel import LogPanel
from destination.rich_console.mount_panel import MountPanel
from destination.rich_console.progress_panel import ProgressPanel
from destination.rich_console.rich_console_header import RichConsoleHeader
from event_emitter import ee
from event_names import BotEvent
class RichConsoleManager:
"""A console manager powered by rich"""
def __init__(self, config=None):
self.config = config
self.thread = None
self.header = RichConsoleHeader(config=config)
self.layout = None
self.mount_panel = None
self.log_panel = None
self.progress_panel = None
self.device_status_panel = None
self.forecast_panel = None
self.footer_panel = None
self.setup()
# Register events
ee.on(BotEvent.UPDATE_SYSTEM_STATUS.name, self.update_status_panels)
ee.on(BotEvent.APPEND_LOG.name, self.update_log_panel)
ee.on(BotEvent.UPDATE_SHOT_STATUS.name, self.update_shot_status_panel)
ee.on(BotEvent.UPDATE_HOST_INFO.name, self.update_footer_panel)
ee.on(BotEvent.UPDATE_BATTERY_PERCENTAGE.name, self.update_footer_panel)
ee.on(BotEvent.UPDATE_MEMORY_USAGE.name, self.update_footer_panel)
ee.on(BotEvent.UPDATE_METRICS.name, self.update_metrics_panel)
def setup(self):
self.make_layout()
self.log_panel = LogPanel(layout=self.layout['logs'], config=self.config)
self.mount_panel = MountPanel(config=self.config)
self.progress_panel = ProgressPanel()
self.footer_panel = FooterPanel(config=self.config, host_info=HostInfo())
self.device_status_panel = DeviceStatusPanel(config=self.config, layout=self.layout['logs'])
self.forecast_panel = ForecastPanel(layout=self.layout['logs'], config=self.config)
self.layout['forecast'].update(self.forecast_panel)
self.layout['header'].update(self.header)
self.update_status_panels()
self.update_mount_info_panel()
self.layout['logs'].update(self.log_panel)
self.layout['imaging'].update(self.progress_panel)
def run(self):
if self.thread:
return
self.thread = threading.Thread(target=self.run_loop)
self.thread.daemon = True
self.thread.start()
def run_loop(self):
with Live(self.layout, refresh_per_second=4, screen=True, redirect_stderr=False):
while True:
sleep(0.25)
def make_layout(self):
"""Define the layout."""
layout = Layout(name='root')
layout.split(
Layout(name='header', size=3),
Layout(name='status', size=8),
Layout(name='main', ratio=1),
Layout(name='footer', size=1)
)
layout['status'].split_row(
Layout(name='mount_info', size=45), # DEC, RA, ALT, AZ, etc.
Layout(name='forecast', ratio=3), # guiding error, last focusing result, last image HFD, staridx,
Layout(name='imaging', ratio=2), # current_img, sequence_%
)
layout['main'].split_row(
Layout(name='logs', ratio=1), # general logs
Layout(name='device_status', size=24) # status of all connected devices, etc.
)
self.layout = layout
def update_mount_info_panel(self, mount_info: MountInfo = MountInfo()):
# Update mount information sub-panel
self.mount_panel.mount_info = mount_info
self.layout['mount_info'].update(self.mount_panel)
def update_metrics_panel(self, imaging_metircs: ImagingMetrics = ImagingMetrics()):
return
def update_device_status_panel(self, system_status_info: SystemStatusInfo = SystemStatusInfo()):
self.device_status_panel.system_status_info = system_status_info
self.layout['device_status'].update(self.device_status_panel)
def update_status_panels(self, system_status_info: SystemStatusInfo = SystemStatusInfo()):
"""Update 3 panels related to status of the system"""
# Mount Info panel which includes the coordination of the mount pointing at
if system_status_info.device_connection_info.mount_connected:
mount_info = system_status_info.mount_info
else:
# if mount is not connected, all info is not trustable.
mount_info = MountInfo()
self.update_mount_info_panel(mount_info=mount_info)
# Device Status panel which shows status of all connected devices
self.update_device_status_panel(system_status_info=system_status_info)
# Progress Panel which shows the progress of the imaging session
self.progress_panel.sequence_target = system_status_info.sequence_name
if system_status_info.sequence_total_time_in_sec > 0:
self.progress_panel.sequence_progress.update(
system_status_info.sequence_elapsed_time_in_sec * 100.0 / system_status_info.sequence_total_time_in_sec)
self.layout['imaging'].update(self.progress_panel)
def update_log_panel(self, log: LogMessageInfo = LogMessageInfo()):
if not log:
return
self.log_panel.append_log(log)
self.layout['logs'].update(self.log_panel)
if log.type == 'TITLE' or log.type == 'SUBTITLE':
try:
self.header.show_action_toast(log.message)
self.layout['header'].update(self.header)
except Exception as exception:
main_console.print(exception)
def update_shot_status_panel(self, shot_running_info: ShotRunningInfo = ShotRunningInfo()):
if not shot_running_info:
return
self.progress_panel.update_shot_running_info(shot_running_info=shot_running_info)
self.layout['imaging'].update(self.progress_panel)
def update_footer_panel(self, host_info: HostInfo = None,
battery_percentage: int = SpecialBatteryPercentageEnum.NOT_MONITORED.value,
update: bool = False, memory_history: deque = None,
memory_usage: MemoryUsage = None):
if host_info:
self.footer_panel.host_info = host_info
if update:
self.footer_panel.battery_percentage = battery_percentage
if memory_usage:
self.footer_panel.memory_usage = memory_usage
self.layout['footer'].update(self.footer_panel)
|
a,b,c=(d,e,)
|
import numpy as np
import random
class ER(object):
def __init__(self, memory_size, state_dim, action_dim, reward_dim, qpos_dim, qvel_dim, batch_size, history_length=1):
self.memory_size = memory_size
self.actions = np.random.normal(scale=0.35, size=(self.memory_size, action_dim))
self.rewards = np.random.normal(scale=0.35, size=(self.memory_size, ))
self.states = np.random.normal(scale=0.35, size=(self.memory_size,) + state_dim)
self.qpos = np.random.normal(scale=0.35, size=(self.memory_size, qpos_dim))
self.qvel = np.random.normal(scale=0.35, size=(self.memory_size, qvel_dim))
self.terminals = np.zeros(self.memory_size, dtype=np.float32)
self.batch_size = batch_size
self.history_length = history_length
self.count = 0
self.current = 0
self.state_dim = state_dim
self.action_dim = action_dim
# pre-allocate prestates and poststates for minibatch
self.prestates = np.empty((self.batch_size, self.history_length) + state_dim, dtype=np.float32)
self.poststates = np.empty((self.batch_size, self.history_length) + state_dim, dtype=np.float32)
self.traj_length = 2
self.traj_states = np.empty((self.batch_size, self.traj_length) + state_dim, dtype=np.float32)
self.traj_actions = np.empty((self.batch_size, self.traj_length-1, action_dim), dtype=np.float32)
def add(self, actions, rewards, next_states, terminals, qposs=[], qvels = []):
# state is post-state, after action and reward
for idx in range(len(actions)):
self.actions[self.current, ...] = actions[idx]
self.rewards[self.current] = rewards[idx]
self.states[self.current, ...] = next_states[idx]
self.terminals[self.current] = terminals[idx]
if len(qposs) == len(actions):
self.qpos[self.current, ...] = qposs[idx]
self.qvel[self.current, ...] = qvels[idx]
self.count = max(self.count, self.current + 1)
self.current = (self.current + 1) % self.memory_size
def get_state(self, index):
assert self.count > 0, "replay memory is empty"
# normalize index to expected range, allows negative indexes
index = index % self.count
# if is not in the beginning of matrix
if index >= self.history_length - 1:
# use faster slicing
return self.states[(index - (self.history_length - 1)):(index + 1), ...]
else:
# otherwise normalize indexes and use slower list based access
indexes = [(index - i) % self.count for i in reversed(range(self.history_length))]
return self.states[indexes, ...]
def sample(self, indexes=None):
# memory must include poststate, prestate and history
assert self.count > self.history_length
if indexes is None:
# sample random indexes
indexes = []
while len(indexes) < self.batch_size:
# find random index
while True:
# sample one index (ignore states wraping over
index = random.randint(self.history_length, self.count - 1)
# if wraps over current pointer, then get new one
if index >= self.current > index - self.history_length:
continue
# if wraps over episode end, then get new one
# poststate (last screen) can be terminal state!
if self.terminals[(index - self.history_length):index].any():
continue
# otherwise use this index
break
# having index first is fastest in C-order matrices
self.prestates[len(indexes), ...] = self.get_state(index - 1)
self.poststates[len(indexes), ...] = self.get_state(index)
indexes.append(index)
actions = self.actions[indexes, ...]
rewards = self.rewards[indexes, ...]
if hasattr(self, 'qpos'):
qpos = self.qpos[indexes, ...]
qvels = self.qvel[indexes, ...]
else:
qpos = []
qvels = []
terminals = self.terminals[indexes]
return np.squeeze(self.prestates, axis=1), actions, rewards, \
np.squeeze(self.poststates, axis=1), terminals, qpos, qvels
|
from setuptools import setup, find_packages
DESCRIPTION = '''\
Bob is a suite of implementations of the Scheme language in Python'''
setup(
name='bobscheme',
description=DESCRIPTION,
author="Eli Bendersky",
author_email="eliben@gmail.com",
version='1.0.0',
packages=find_packages(),
license='PD',
entry_points={
"console_scripts": [
'bob = bob.cmd:main',
],
},
)
|
import json
from toggl.TogglPy import Toggl
from os import path, makedirs
import getopt
import sys
from datetime import datetime, timedelta
import logging
logger = logging.getLogger(__name__)
team_report_template = """
---
### Week of {0}
{1}
#### {2}"""
def user_weekly_report(directory, since, until, api_key, users, workspace):
"""
Downloads each users weekly reports as a summary and details pdf into their own directories.
:param workspace:
:param api_key: The toggl api key to use.
:param users: The dictionary are users with the key as the user id and the values as the full name.
:param directory: The root destination directory. Each user will have a sub-directory created for their reports
:param since: The start date in the form yyyy-MM-dd
:param until: the end data in the form yyyy-MM-dd. Files will be prepended with this date string.
:return: None
"""
logging.info("Downloading user weekly reports from {} until {} into {}".format(since, until, directory))
toggl = Toggl()
toggl.setAPIKey(api_key)
for uid, name in users.items():
logger.info("Downloading reports for {}".format(name))
name = users[uid]
data = {
'workspace_id': workspace, # see the next example for getting a workspace id
'since': since,
'until': until,
'user_ids': uid
}
folder = path.join(directory, name)
if not path.exists(folder):
logger.info("Creating the folder {}".format(folder))
makedirs(folder)
details = path.join(folder, until + "-details.pdf")
summary = path.join(folder, until + "-summary.pdf")
try:
toggl.getDetailedReportPDF(data, details)
logger.info("Downloaded {}".format(details))
toggl.getSummaryReportPDF(data, summary)
logger.info("Downloaded {}".format(summary))
except Exception as e:
logging.error(e)
def team_weekly_report(destination, since, until, api_key, workspace):
logger.info("Downloading the team weekly report from {} until {} into {}".format(since, until, destination))
toggl = Toggl()
toggl.setAPIKey(api_key)
data = {
'workspace_id': workspace, # see the next example for getting a workspace id
'since': since,
'until': until
}
try:
result = toggl.getSummaryReport(data)
except Exception as e:
logger.error("Unable to download the team weekly data {}".format(e))
return
# Calculate hours and minutes
total_ms = result['total_grand']
if total_ms:
hours, minutes = divmod(total_ms / 1000 / 60, 60)
time_str = "Total team hours: {:.0f}h {:.0f}m".format(hours, minutes)
else:
time_str = "Total team hours: No hours recorded"
# Find all project worked on
items_worked_on = [item["title"]["time_entry"] for project in result["data"] for item in project["items"]]
if len(items_worked_on) == 0:
items_worked_on = ["No tasks worked on for this time period"]
# Calculate the pretty data for the start of the week
date = datetime.strptime(since, "%Y-%m-%d")
formatted_week = date.strftime("%B %d")
formatted_items = "- " + "\n- ".join(items_worked_on)
formatted_team_report = team_report_template.format(formatted_week, formatted_items, time_str)
logger.info("Created team report:")
logger.info(formatted_team_report)
logger.info("Adding to team log file %s", destination)
with open(destination, "a") as report:
report.write(formatted_team_report)
logger.info("Done team report")
if __name__ == '__main__':
logger.setLevel(logging.INFO)
opts, args = getopt.getopt(sys.argv[1:], "hp:s:u:")
now = datetime.now()
f = ""
u = now.strftime("%Y-%m-%d")
s = (now - timedelta(days=7)).strftime("%Y-%m-%d")
for o, a in opts:
if o == "-p":
f = a
if o == "-s":
s = a
if o == "-u":
u = a
if o == "-h":
exit(0)
if not f:
f = path.realpath(path.join(path.dirname(path.realpath(__file__)), 'team-logs.md'))
env_file = path.join(path.dirname(path.realpath(__file__)), '.env')
with open(env_file) as ef:
env = json.load(ef)
# user_weekly_report(f, s, u, env["toggl_api_key"], env["toggl_users"], env["toggl_workspace"])
team_weekly_report(f, s, u, env["toggl_api_key"], env["toggl_workspace"])
|
# Generated by Django 2.1.7 on 2019-11-08 18:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("ui", "0025_preset_id_textfield"),
]
operations = [
migrations.AddField(
model_name="collection",
name="schedule_retranscode",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="video",
name="schedule_retranscode",
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name="video",
name="status",
field=models.CharField(
choices=[
("Created", "Created"),
("Uploading", "Uploading"),
("Upload failed", "Upload failed"),
("Transcoding", "Transcoding"),
(
"Transcode failed internal error",
"Transcode failed internal error",
),
("Transcode failed video error", "Transcode failed video error"),
("Retranscoding", "Retranscoding"),
("Retranscode failed error", "Retranscode failed error"),
("Retranscode scheduled", "Retranscode scheduled"),
("Complete", "Complete"),
("Error", "Error"),
],
default="Created",
max_length=50,
),
),
]
|
import typing as t
from .client import Client
from .lexicon import Lex
from .local import LocalClient
from .objects import *
from .query import exp, Expression
__title__ = "valorant"
__author__ = "frissyn"
__doc__ = "Complete Python interface for the Valorant API. Works right out of the box!"
__all__ = ["Client", "exp", "Expression", "LocalClient", "Lex"]
class Version(t.NamedTuple):
major: int
minor: int
micro: int
release: t.Literal["alpha", "beta", "dev"]
version_info = Version(major=1, minor=0, micro=2, release="")
if not version_info.release:
tag = ""
else:
tag = "-" + version_info.release
__version__ = ".".join(str(i) for i in version_info[:3]) + tag
|
#!/pxrpythonsubst
#
# Copyright 2018 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from maya import cmds
import os
import sys
import unittest
class testProxyShapeDrawLighting(unittest.TestCase):
@classmethod
def setUpClass(cls):
# The test USD data is authored Z-up, so make sure Maya is configured
# that way too.
cmds.upAxis(axis='z')
cls._testDir = os.path.abspath('.')
cls._testRootNodeName = 'ProxyShapeDrawLightingTest'
cls._testSceneName = '%s.ma' % cls._testRootNodeName
cls._testSceneFullPath = os.path.abspath(cls._testSceneName)
cls._nativeNodePathName = '|%s|Native' % cls._testRootNodeName
cls._nativeTorusPathName = '%s|Torus' % cls._nativeNodePathName
cls._nativePlanePathName = '%s|Plane' % cls._nativeNodePathName
cls._hydraNodePathName = '|%s|Hydra' % cls._testRootNodeName
cls._hydraTorusPathName = '%s|Torus' % cls._hydraNodePathName
cls._hydraPlanePathName = '%s|Plane' % cls._hydraNodePathName
cls._spotLightNameFormat = 'SpotLight_%d'
cls._directionalLightNameFormat = 'DirectionalLight_%d'
cls._numberOfLights = 4
cls._cameraName = 'MainCamera'
@classmethod
def _HideNativeGeometry(cls):
cmds.hide(cls._nativeTorusPathName)
cmds.hide(cls._nativePlanePathName)
@classmethod
def _HideHydraGeometry(cls):
cmds.hide(cls._hydraTorusPathName)
cmds.hide(cls._hydraPlanePathName)
@classmethod
def _HideAllLights(cls):
for i in range(1, cls._numberOfLights + 1):
cmds.hide(cls._spotLightNameFormat % i)
cmds.hide(cls._directionalLightNameFormat % i)
@classmethod
def _ShowSpotLight(cls, lightIndex):
cmds.showHidden(cls._spotLightNameFormat % lightIndex)
@classmethod
def _ShowDirectionalLight(cls, lightIndex):
cmds.showHidden(cls._directionalLightNameFormat % lightIndex)
@classmethod
def _SetShadowsEnabled(cls, enabled):
for i in range(1, cls._numberOfLights + 1):
cmds.setAttr(
'%sShape.useDepthMapShadows' % cls._spotLightNameFormat % i,
enabled)
cmds.setAttr(
'%sShape.useDepthMapShadows' % cls._directionalLightNameFormat % i,
enabled)
def setUp(self):
# To control where the rendered images are written, we force Maya to
# use the test directory as the workspace.
cmds.workspace(self._testDir, o=True)
cmds.file(self._testSceneFullPath, open=True, force=True)
def _WriteViewportImage(self, outputImageName, suffix):
# Make sure the hardware renderer is available
MAYA_RENDERER_NAME = 'mayaHardware2'
mayaRenderers = cmds.renderer(query=True, namesOfAvailableRenderers=True)
self.assertIn(MAYA_RENDERER_NAME, mayaRenderers)
# Make it the current renderer.
cmds.setAttr('defaultRenderGlobals.currentRenderer', MAYA_RENDERER_NAME,
type='string')
# Set the image format to PNG.
cmds.setAttr('defaultRenderGlobals.imageFormat', 32)
# Set the render mode to shaded and textured.
cmds.setAttr('hardwareRenderingGlobals.renderMode', 4)
# Specify the output image prefix. The path to it is built from the
# workspace directory.
cmds.setAttr('defaultRenderGlobals.imageFilePrefix',
'%s_%s' % (outputImageName, suffix),
type='string')
# Apply the viewer's color transform to the rendered image, otherwise
# it comes out too dark.
cmds.setAttr('defaultColorMgtGlobals.outputTransformEnabled', 1)
# Do the render.
cmds.ogsRender(camera=self._cameraName, currentFrame=True, width=960,
height=540)
def _RenderEachLight(self, enableShadows=False):
self._SetShadowsEnabled(enableShadows)
for i in range(1, self._numberOfLights + 1):
self._HideAllLights()
self._ShowSpotLight(i)
self._WriteViewportImage(self._testName, 'Spot_%d' % i)
self._HideAllLights()
self._ShowDirectionalLight(i)
self._WriteViewportImage(self._testName, 'Directional_%d' % i)
def testMayaNativeTorusAndPlane(self):
"""
Tests performing a hardware render of Maya native geometry.
An image is generated for each light in isolation.
"""
self._testName = 'MayaNativeTorusAndPlane'
self._HideHydraGeometry()
self._RenderEachLight(enableShadows=False)
def testMayaNativeTorusAndPlaneWithShadows(self):
"""
Tests performing a hardware render of Maya native geometry with shadows.
An image is generated for each light in isolation.
"""
self._testName = 'MayaNativeTorusAndPlaneWithShadows'
self._HideHydraGeometry()
self._RenderEachLight(enableShadows=True)
def testHydraTorusAndPlane(self):
"""
Tests performing a hardware render of Hydra-drawn geometry.
Hydra draws USD proxy shapes that reference USD produced by exporting
the Maya native geometry.
An image is generated for each light in isolation.
"""
self._testName = 'HydraTorusAndPlane'
self._HideNativeGeometry()
self._RenderEachLight(enableShadows=False)
def testHydraTorusAndPlaneWithShadows(self):
"""
Tests performing a hardware render of Hydra-drawn geometry with shadows.
Hydra draws USD proxy shapes that reference USD produced by exporting
the Maya native geometry.
An image is generated for each light in isolation.
"""
self._testName = 'HydraTorusAndPlaneWithShadows'
self._HideNativeGeometry()
self._RenderEachLight(enableShadows=True)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(
testProxyShapeDrawLighting)
results = unittest.TextTestRunner(stream=sys.stdout).run(suite)
if results.wasSuccessful():
exitCode = 0
else:
exitCode = 1
# maya running interactively often absorbs all the output. comment out the
# following to prevent maya from exiting and open the script editor to look
# at failures.
cmds.quit(abort=True, exitCode=exitCode)
|
"""
Your task is to complete the implementation of the functions below, which do some kind of
manipulation of linked lists (that is, you need to use the LinkedList class defined in the p0 session)
"""
from m03_sequences.p0.linked_list import LinkedList
from m03_sequences.p0.linked_list import Node
def find(L, e):
"""
This function should look for an element in a linked list and prints on the console the result of the search
e.g. "Element not found in the list", "Element X found in the list at position Y"
:param L: the list
:param e: the element to look for
"""
def search(node, pos):
if node == None:
return "Element not found in the list"
if node._data == e:
return "Element %s found in the list at position %d." % (str(e), pos)
return search(node._next, pos + 1)
print(search(L._head, 0))
def cat(L1,L2):
"""
This function should append the content of one list L2 at the end of the list L1. in other words, this
function "concatenates" L1 and L2.
(Therefore, L1 is modified as an effect of the execution of this function)
Example: L1 = 10 -> 20 -> 5 ; L2 = 89 -> 56 -> 80
After executiing the function, it will be: L1 = 10 -> 20 -> 5 -> 89 -> 56 -> 80 ; L2 = 89 -> 56 -> 80
"""
if L1 == None:
L1 = L2
return
head = L1._head
while head._next != None:
head = head._next
head._next = L2._head
def copy(L):
"""
This function returns a new list which is a copy of the list L.
This function returns a "deep" copy of the list L, that is, a new list whose element are the same
as L (and in the same order).
Hint for the implementation: The LinkedList allows to add elements only at the "head" of a linked list...so...
:param L: the list to copy
:return: a "deep" copy of the list L
"""
C = LinkedList()
root = Node(0)
head = root
node = L._head
while node != None:
head._next = Node(node._data)
head = head._next
node = node._next
C._head = root._next
return C
def copy_and_cat(L1,L2):
"""
This function should return a new list that is the concatenation of two lists L1 and L2.
That is, the lists L1 and L2 should NOT be modified by this function
(Hint: make a "deep copy" of the two lists and build the returned lists using these copies...)
:param L1:
:param L2:
:return: a list containing the concatenation of L1 and L2
"""
C1 = copy(L1)
C2 = copy(L2)
cat(C1, C2)
return C1
def len_recursive(node):
"""
This function should calculate the length of a linked list recursively
:param node: the head of the list
:return: the length of the list
"""
if node == None:
return 0
return 1 + len_recursive(node._next)
""" main() to do some testing"""
if __name__ == '__main__':
L1 = LinkedList()
L2 = LinkedList()
L1.add_first("Jin")
L1.add_first("Jun")
L1.add_first("Jan")
L1.print()
L2.add_first(118)
L2.add_first(115)
L2.add_first(89)
L2.add_first(87)
L2.print()
find(L1,"Jin")
find(L1, 908)
cat(L1,L2)
L1.print()
len_l2 = len_recursive(L2._head)
print("Length of list is: {0}".format(len_l2))
print("Length of list is: {0}".format(len_recursive(L1._head)))
print("""Test deep copy and copy_cat""")
L3 = copy(L1)
L3.print()
L3.add_first("new")
L3.print()
L1.print()
L4 = copy_and_cat(L1,L3)
L4.print()
|
from concurrent.futures.thread import ThreadPoolExecutor
import os
from pydantic import BaseSettings
from rq import Queue
import redis as rd
import logging
logger = logging.getLogger("api")
# Redis Queue for model-prediction jobs
redis = rd.Redis(host="redis", port=6379)
prediction_queue = Queue(os.getenv('NAME'), connection=redis)
class Settings(BaseSettings):
ready_to_predict = False
model_settings = Settings()
image_map = {}
class PredictionException(Exception):
pass
connected = False
shutdown = False
pool = ThreadPoolExecutor(10)
WAIT_TIME = 10
|
# Copyright (C) 2014 Andrea Biancini <andrea.biancini@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Andrea Biancini <andrea.biancini@gmail.com>'
"""
Kruskal's algorithm for minimum spanning trees. D. Eppstein, April 2006.
"""
from ..link import Link
parent = dict()
rank = dict()
def _make_set(vertex):
parent[vertex] = vertex
rank[vertex] = 0
def _find(vertex):
if parent[vertex] != vertex:
parent[vertex] = _find(parent[vertex])
return parent[vertex]
def _union(vertex1, vertex2):
root1 = _find(vertex1)
root2 = _find(vertex2)
if root1 != root2:
if rank[root1] > rank[root2]:
parent[root2] = root1
else:
parent[root1] = root2
if rank[root1] == rank[root2]:
rank[root2] += 1
def kruskal(graph):
for vertice in graph['vertices']:
_make_set(vertice)
minimum_spanning_tree = set()
edges = list(graph['edges'])
edges.sort()
for edge in edges:
weight, vertice1, vertice2 = edge
if _find(vertice1) != _find(vertice2):
_union(vertice1, vertice2)
minimum_spanning_tree.add(edge)
return minimum_spanning_tree
def perform(topo_edges):
vertices = []
edges = []
for edge in topo_edges:
vertices.append(edge.src)
vertices.append(edge.dst)
edges.append((edge.cost, edge.src, edge.dst))
graph = { 'vertices': list(set(vertices)), 'edges': set(edges) }
mst = kruskal(graph)
links = []
for curedge in topo_edges:
if (curedge.cost, curedge.src, curedge.dst) in mst or \
(curedge.cost, curedge.dst, curedge.src) in mst:
links.append(curedge)
return links
|
import argparse
import string
import re
import docx
REGEX = '\([a-zA-Z][^)\[\]=]*[0-9]\)'
EXCLUDE = set(string.punctuation) - set('-')
def find_reference_for_cite_regex(cite_regex):
for reference_id, reference in enumerate(references, 0):
result = re.findall(cite_regex, reference)
if result:
assert len(result) == 1
return reference_id, reference
def get_references_as_lines(docx_path):
doc = docx.Document(docx_path)
go = False
result = list()
for p in doc.paragraphs:
if p.text == 'References':
go = True
elif go and p.text.startswith('Table'):
go = False
if go:
result.append(p.text)
return result
def get_text_as_string(docx_path):
doc = docx.Document(docx_path)
go = True
result = list()
for p in doc.paragraphs:
if p.text == 'References':
go = False
if go:
result.append(p.text)
return " ".join(result)
class CitationResult(object):
def __init__(self, citation_in_text, citation, regex, reference):
self.citation_in_text = citation_in_text
self.citation = citation
self.reference = reference
self.regex = regex
self.found = self.reference != None
def __repr__(self):
return (
"{found}\n"
"Citation: {citation}\n"
"Citation In Text: {citation_in_text}\n"
"Reference: {reference}\n"
).format(found="FOUND" if self.found else "NOT FOUND",
citation=self.citation,
citation_in_text=self.citation_in_text,
reference=self.reference)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='docx_path')
args = parser.parse_args()
docx_path = args.docx_path
result = list()
references = get_references_as_lines(docx_path)
found_reference_ids = set()
text = get_text_as_string(docx_path)
citations = re.findall(REGEX, text)
for citation in citations:
cleaned_citation = citation.replace('e.g., ', '')\
.replace('i.e., ', '')\
.replace('see ', '')
try:
cites = cleaned_citation.split(';')
for index, cite in enumerate(cites, 1):
cite = cite.replace('(', '').replace(')', '').strip()
if ' et al' in cite:
name = cite.split(' et al., ')[0].strip()
year = cite.split(' et al., ')[1].strip()
regex = '{}.*\({}\).*'.format(name, year)
reference_index, reference = find_reference_for_cite_regex(regex)
found_reference_ids.add(reference_index)
result.append(CitationResult(citation_in_text=citation,
citation=cite,
regex=regex,
reference=reference))
else:
names_and_year = cite.split(',')
if len(names_and_year) == 2:
# Bartram & Casimir, 2007
year = names_and_year[1].strip()
names = names_and_year[0].split('&')
name_regex = ''.join(['{}.*'.format(n.strip()) for n in names])
regex = name_regex + '\({}\)'.format(year)
reference_index, reference = find_reference_for_cite_regex(regex)
found_reference_ids.add(reference_index)
result.append(CitationResult(citation_in_text=citation,
citation=cite,
regex=regex,
reference=reference))
else:
# Lowe, Kroeck, & Sivasubramaniam, 1996
names_and_year[-2] = names_and_year[-2].replace('&', '')
names = names_and_year[:-1]
year = names_and_year[-1].strip()
name_regex = ''.join(['{}.*'.format(n.strip()) for n in names])
regex = name_regex + '\({}\)'.format(year)
reference_index, reference = find_reference_for_cite_regex(regex)
found_reference_ids.add(reference_index)
result.append(CitationResult(citation_in_text=citation,
citation=cite,
regex=regex,
reference=reference))
except Exception as e:
result.append(CitationResult(citation_in_text=citation,
citation=None,
regex=None,
reference=None))
for r in result:
if not r.found:
print(r)
obslete_reference_ids = set(range(len(references))) - found_reference_ids
if obslete_reference_ids:
print()
print("*"*30)
print(len(obslete_reference_ids), "REFERENCES ARE NOT CITED")
print("*"*30)
for i in obslete_reference_ids:
print()
print("REFERENCE NO.", i, "SEEMS NOT TO BE CITED.")
print(references[i])
|
import uno
from com.sun.star.awt.MessageBoxType import MESSAGEBOX, INFOBOX, WARNINGBOX, ERRORBOX, QUERYBOX
from com.sun.star.awt.MessageBoxButtons import BUTTONS_OK, BUTTONS_OK_CANCEL, BUTTONS_YES_NO, BUTTONS_YES_NO_CANCEL, BUTTONS_RETRY_CANCEL, BUTTONS_ABORT_IGNORE_RETRY
from com.sun.star.awt.MessageBoxResults import OK, YES, NO, CANCEL
def MsgBox(doc, msgText, msgTitle, msgType, msgButtons):
"""
Parameter:
msgText : Der Text in der MessageBox
msgTitel : Text der Titelleiste
msgType : MESSAGEBOX = 0
INFOBOX = 1
WARNINGBOX = 2
ERRORBOX = 3
QUERYBOX = 4
msgButtons: BUTTONS_OK = 0
BUTTONS_OK_CANCEL = 1
BUTTONS_YES_NO = 2
BUTTONS_YES_NO_CANCEL = 3
BUTTONS_RETRY_CANCEL = 4
BUTTONS_ABORT_IGNORE_RETRY = 5
Rückgabewerte: CANCEL = 0
OK = 1
YES = 2
NO = 3
RETRY = 4
IGNORE = 5
"""
ctx = uno.getComponentContext()
smgr = ctx.ServiceManager
toolkit = smgr.createInstanceWithContext('com.sun.star.awt.Toolkit', ctx)
parentWin = doc.CurrentController.Frame.ContainerWindow
mBox = toolkit.createMessageBox(parentWin, msgType, msgButtons, msgTitle, msgText)
return mBox.execute()
|
import re
import requests
# Regexr: https://regexr.com/
# (cool site I use to real-time hack with regular expressions)
# List of URL's that I want to Regular Expression test:
urls = [
"https://cdn.shopify.com/s/files/1/0021/6504/7351/products/trick-worm-cherry-seed_608x544.jpg?v=1531854672",
"https://cdn.shopify.com/s/files/1/0021/6504/7351/products/trick-worm-cotton-candy_576x544.jpg?v=1531854672",
"https://cdn.shopify.com/s/files/1/0326/6501/products/s-l1600_072c97c9-5dc0-4f44-bd32-779f61ebb566.jpg",
"https://cdn.shopify.com/s/files/1/0326/6501/products/s-l1600_528d332c-50bf-42cb-8095-0ae8c632587d.jpg",
"https://cdn.shopify.com/s/files/1/0326/6501/products/sunfish_1e583ab5-7011-4a85-9894-2d061337ba20.jpg",
"https://cdn.shopify.com/s/files/1/0326/6501/products/yellow_perch_1a63bf6e-1b22-4f7e-9610-4ee6903be355.jpg",
"https://cdn.shopify.com/s/files/1/0326/6501/products/Green_Pumpkin_d024ccf3-349e-4b75-b8b5-97f7cf18156d.jpg",
"https://cdn.shopify.com/s/files/1/0326/6501/products/mo_obsession.png",
"https://cdn.shopify.com/s/files/1/0021/6504/7351/products/12476921905207_448x448.jpg?v=1535733596",
"https://cdn.shopify.com/s/files/1/0021/6504/7351/products/12476921970743_448x448.jpg?v=1535733596",
"https://cdn.shopify.com/s/files/1/0326/6501/products/KGrHqN_p8FC4Jn_EklBRrML9vqrQ_60_1.jpeg",
"https://cdn.shopify.com/s/files/1/0326/6501/products/spro-bbz-1-swimbait-6-floating-blue-black-herring_zoom.jpeg",
"https://cdn.shopify.com/s/files/1/0326/6501/products/gary-yamamoto-shad-shape-worm-green-pumpkin-watermelon-laminate_zoom.jpg"
]
def get_optimal_shopify_url(url, verify=0):
# The magic only works on Shopify URL's
if 'cdn.shopify.com' not in url:
return url
# Clean off the variant URL parameter
new_url = re.sub(r'\?v=([0-9]*)', '', url)
# Build a regex for finding built in dimension URL's
regexp_dims = re.compile(r'_([0-9]*)x([0-9]*)\.')
# If we find any, then substitute for the 1024x1024
if regexp_dims.search(url):
new_url = re.sub(r'_([0-9]*)x([0-9]*)\.', '_1024x1024.', new_url)
# If we don't, add the 1024x1024 to the URL
else:
if ".jpg" in new_url:
new_url = re.sub(r'.jpg', '_1024x1024.jpg', new_url)
if ".jpeg" in new_url:
new_url = re.sub(r'.jpeg', '_1024x1024.jpeg', new_url)
if ".png" in new_url:
new_url = re.sub(r'.png', '_1024x1024.png', new_url)
# If we request to verify the URL, and it turns out the new URL doesn't work, just return the old one.
if verify == 1:
r = requests.get(new_url)
if r.status_code != 200:
return url
return new_url
for url in urls:
start_url = url
print("URL: {}".format(url))
new_url = get_optimal_shopify_url(url,1)
print("URL: {}".format(new_url))
print("")
|
#!/usr/bin/env python3
"""
script for downloading genomes from NCBI
"""
# python modules
import os
import sys
import argparse
import pandas as pd
from tqdm import tqdm
from subprocess import Popen
from glob import glob as glob
from multiprocessing import Pool
from subprocess import Popen, PIPE
def calcMD5(path, md5):
"""
calc MD5 based on path
"""
# check that file exists
if os.path.exists(path) is False:
yield False
else:
command = [md5, path]
p = Popen(command, stdout = PIPE)
for line in p.communicate()[0].splitlines():
line = line.decode('ascii').strip().split()
# check if `md5` output
if line[0] == 'MD5':
yield line[-1]
# else assume md5sum output
else:
yield line[0]
p.wait()
yield False
def md5check(f, ftp, md5, md5p, exclude):
"""
* comfirm that downloaded files match md5 checksum on server
* if md5 is False, only check path for the download
"""
files = glob(f)
# if no md5 file is specified: download files if path does not exist
if md5 is False:
if len(files) == 0:
return False
print('## already downloaded:', f)
return True
# get md5s from server
## path to md5 file on ftp server
md5 = '%s/%s' % (ftp.rsplit('/', 1)[0], md5)
## read md5 table from server
try:
md5 = pd.read_csv(md5, delim_whitespace = True, names = ['ftp md5', 'file'])
except:
return False
## filter for md5 files that match file type
t = f.split('*')[1]
md5 = md5[md5['file'].str.contains(t)]
## remove preceding characters from file paths
md5['file'] = [i.replace('./', '') for i in md5['file']]
## exclude md5s for sub directories
md5 = md5[~md5['file'].str.contains('/')]
## exclude files
md5 = md5[~md5['file'].str.contains(exclude.replace('*', ''))]
# get local md5s
md5['local md5'] = [[j for j in calcMD5(i, md5p)][0] for i in md5['file']]
# return false if md5s do not match
for i, File in md5.iterrows():
if File['ftp md5'] != File['local md5']:
try:
os.remove(File['file'])
return False
except:
return False
print('## already downloaded:', f)
return True
def wget(ftp, f = False, exclude = False, name = False,
md5 = False, md5p = 'md5sum', tries = 10):
"""
download files with wget
"""
# file name
if f is False:
f = ftp.rsplit('/', 1)[-1]
# downloaded file if it does not already exist
# check md5s on server (optional)
t = 0
while md5check(f, ftp, md5, md5p, exclude) is not True:
t += 1
if name is not False:
print('# downloading:', name, f)
if exclude is False:
command = 'wget -q --random-wait %s' % (ftp)
else:
command = 'wget -q --random-wait -R %s %s' % (exclude, ftp)
p = Popen(command, shell = True)
p.communicate()
if t >= tries:
print('not downloaded:', name, f)
return [f, False]
return [f, True]
def check(line, queries):
"""
check that at least one of
queries is in list, l
"""
line = line.strip()
spLine = line.replace('.', ' ').split()
matches = set(spLine).intersection(queries)
if len(matches) > 0:
return matches, line.split('\t')
return matches, False
def entrez(db, acc):
"""
search entrez using specified database
and accession
"""
c1 = ['esearch', '-db', db, '-query', acc]
c2 = ['efetch', '-db', 'BioSample', '-format', 'docsum']
p1 = Popen(c1, stdout = PIPE, stderr = PIPE)
p2 = Popen(c2, stdin = p1.stdout, stdout = PIPE, stderr = PIPE)
return p2.communicate()
def searchAccession(acc):
"""
attempt to use NCBI Entrez to get
BioSample ID
"""
# try genbank file
# genome database
out, error = entrez('genome', acc)
for line in out.splitlines():
line = line.decode('ascii').strip()
if 'Assembly_Accession' in line or 'BioSample' in line:
newAcc = line.split('>')[1].split('<')[0].split('.')[0].split(',')[0]
if len(newAcc) > 0:
return (True, acc, newAcc)
# nucleotide database
out, error = entrez('nucleotide', acc)
for line in out.splitlines():
line = line.decode('ascii').strip()
if 'Assembly_Accession' in line or 'BioSample' in line:
newAcc = line.split('>')[1].split('<')[0].split('.')[0].split(',')[0]
if len(newAcc) > 0:
return (True, acc, newAcc)
# assembly database
out, error = entrez('assembly', acc)
for line in out.splitlines():
line = line.decode('ascii').strip()
if 'Assembly_Accession' in line or 'BioSample' in line:
newAcc = line.split('>')[1].split('<')[0].split('.')[0].split(',')[0]
if len(newAcc) > 0:
return (True, acc, newAcc)
for error in error.splitlines():
error = error.decode('ascii').strip()
if '500 Can' in error:
return (False, acc, 'no network')
return (False, acc, 'efetch failed')
def getFTPs(accessions, ftp, search, exclude, convert = False, threads = 1, attempt = 1,
max_attempts = 2):
"""
download genome info from NCBI
"""
info = wget(ftp)[0]
allMatches = []
for genome in open(info, encoding = 'utf8'):
genome = str(genome)
matches, genomeInfo = check(genome, accessions)
if genomeInfo is not False:
f = genomeInfo[0] + search
Gftp = genomeInfo[19]
Gftp = Gftp + '/' + search
allMatches.extend(matches)
yield [Gftp, f, exclude, matches]
# print accessions that could not be matched
# and whether or not they could be converted (optional)
newAccs = []
missing = accessions.difference(set(allMatches))
if convert is True:
pool = Pool(threads)
pool = pool.imap_unordered(searchAccession, missing)
for newAcc in tqdm(pool, total = len(missing)):
status, accession, newAcc = newAcc
if status is True:
newAccs.append(newAcc)
print('not found:', accession, '->', newAcc)
else:
for accession in missing:
print('not found:', accession)
# re-try after converting accessions (optional)
if len(newAccs) > 0 and attempt <= max_attempts:
print('convert accession attempt', attempt)
attempt += 1
for hit in getFTPs(set(newAccs), ftp, search, exclude, convert,
threads = 1, attempt = attempt):
yield hit
def wgetGenome(pars, md5 = 'md5checksums.txt'):
"""
"""
ftp, f, exclude, matches, md5p = pars
name = ';'.join(list(matches))
return wget(ftp, f, exclude, name, md5 = md5, md5p = md5p)
def download(args):
"""
download genomes from NCBI
"""
accessions, infoFTP = set(args['g']), args['i']
search, exclude, md5p = args['s'], args['e'], args['m']
FTPs = getFTPs(accessions, infoFTP, search, exclude, threads = args['t'],
convert = args['convert'])
FTPs = [ftp + [md5p] for ftp in FTPs]
if args['test'] is True:
for genome in FTPs:
print('found:', ';'.join(genome[-1]), genome[0])
return FTPs
pool = Pool(args['t'])
pool = pool.imap_unordered(wgetGenome, FTPs)
files = []
for f in tqdm(pool, total = len(accessions)):
files.append(f)
return files
if __name__ == '__main__':
ftp = 'ftp://ftp.ncbi.nih.gov/genomes/genbank/assembly_summary_genbank.txt'
parser = argparse.ArgumentParser(description='# download genomes from NCBI')
parser.add_argument(\
'-g', nargs = '*', action = 'store',
required = True, help = 'list of genome accession numbers (- for stdin)')
parser.add_argument(\
'-s', default = '*.fna.gz',
required = False, help = 'search term for download (default = "*.fna.gz")')
parser.add_argument(\
'-e', default = '*from_genomic*',
required = False,
help = 'search exclusion term, or False (default = "*from_genomic*")')
parser.add_argument(\
'-i', default = ftp,
required = False, help = 'genome info FTP (default: %s)' % (ftp))
parser.add_argument(\
'-m', default = 'md5sum', type = str,
required = False, help = 'md5 program (default = md5sum, md5 on Mac)')
parser.add_argument(\
'-t', default = 3, type = int,
required = False, help = 'threads (default = 3)')
parser.add_argument(\
'--convert', action = 'store_true', required = False,
help = 'convert missing accessions using Entrez Direct (slow; requires `esearch` and `efetch`)')
parser.add_argument(\
'--test', action = 'store_true', required = False,
help = 'look for genomes, but do not download them')
args = vars(parser.parse_args())
if args['e'] == 'False' or args['e'] == 'FALSE':
args['e'] = False
if args['g'][0] == '-':
args['g'] = [i.strip() for i in sys.stdin]
print('# downloading genome info:', args['i'])
download(args)
|
"""
Linear Regression model
-----------------------
A forecasting model using a linear regression of some of the target series' lags, as well as optionally some
covariate series' lags in order to obtain a forecast.
"""
from typing import List, Optional, Sequence, Tuple, Union
import numpy as np
from scipy.optimize import linprog
from sklearn.linear_model import LinearRegression, PoissonRegressor, QuantileRegressor
from darts.logging import get_logger
from darts.models.forecasting.regression_model import RegressionModel, _LikelihoodMixin
from darts.timeseries import TimeSeries
logger = get_logger(__name__)
class LinearRegressionModel(RegressionModel, _LikelihoodMixin):
def __init__(
self,
lags: Union[int, list] = None,
lags_past_covariates: Union[int, List[int]] = None,
lags_future_covariates: Union[Tuple[int, int], List[int]] = None,
output_chunk_length: int = 1,
likelihood: str = None,
quantiles: List[float] = None,
random_state: Optional[int] = None,
**kwargs,
):
"""Linear regression model.
Parameters
----------
lags
Lagged target values used to predict the next time step. If an integer is given the last `lags` past lags
are used (from -1 backward). Otherwise a list of integers with lags is required (each lag must be < 0).
lags_past_covariates
Number of lagged past_covariates values used to predict the next time step. If an integer is given the last
`lags_past_covariates` past lags are used (inclusive, starting from lag -1). Otherwise a list of integers
with lags < 0 is required.
lags_future_covariates
Number of lagged future_covariates values used to predict the next time step. If an tuple (past, future) is
given the last `past` lags in the past are used (inclusive, starting from lag -1) along with the first
`future` future lags (starting from 0 - the prediction time - up to `future - 1` included). Otherwise a list
of integers with lags is required.
output_chunk_length
Number of time steps predicted at once by the internal regression model. Does not have to equal the forecast
horizon `n` used in `predict()`. However, setting `output_chunk_length` equal to the forecast horizon may
be useful if the covariates don't extend far enough into the future.
likelihood
Can be set to `quantile` or 'poisson'. If set, the model will be probabilistic, allowing sampling at
prediction time. If set to `quantile`, the `sklearn.linear_model.QuantileRegressor` is used. Similarly, if
set to `poisson`, the `sklearn.linear_model.PoissonRegressor` is used.
quantiles
Fit the model to these quantiles if the `likelihood` is set to `quantile`.
random_state
Control the randomness of the sampling. Used as seed for
`link <https://numpy.org/doc/stable/reference/random/generator.html#numpy.random.Generator>`_ . Ignored when
no`likelihood` is set.
Default: ``None``.
**kwargs
Additional keyword arguments passed to `sklearn.linear_model.LinearRegression` (by default), to
`sklearn.linear_model.PoissonRegressor` (if `likelihood="poisson"`), or to
`sklearn.linear_model.QuantileRegressor` (if `likelihood="quantile"`).
"""
self.kwargs = kwargs
self._median_idx = None
self._model_container = None
self.quantiles = None
self.likelihood = likelihood
self._rng = None
# parse likelihood
available_likelihoods = ["quantile", "poisson"] # to be extended
if likelihood is not None:
self._check_likelihood(likelihood, available_likelihoods)
self._rng = np.random.default_rng(seed=random_state)
if likelihood == "poisson":
model = PoissonRegressor(**kwargs)
if likelihood == "quantile":
model = QuantileRegressor(**kwargs)
self.quantiles, self._median_idx = self._prepare_quantiles(quantiles)
self._model_container = self._get_model_container()
else:
model = LinearRegression(**kwargs)
super().__init__(
lags=lags,
lags_past_covariates=lags_past_covariates,
lags_future_covariates=lags_future_covariates,
output_chunk_length=output_chunk_length,
model=model,
)
def __str__(self):
return f"LinearRegression(lags={self.lags})"
def fit(
self,
series: Union[TimeSeries, Sequence[TimeSeries]],
past_covariates: Optional[Union[TimeSeries, Sequence[TimeSeries]]] = None,
future_covariates: Optional[Union[TimeSeries, Sequence[TimeSeries]]] = None,
max_samples_per_ts: Optional[int] = None,
n_jobs_multioutput_wrapper: Optional[int] = None,
**kwargs,
):
"""
Fit/train the model on one or multiple series.
Parameters
----------
series
TimeSeries or Sequence[TimeSeries] object containing the target values.
past_covariates
Optionally, a series or sequence of series specifying past-observed covariates
future_covariates
Optionally, a series or sequence of series specifying future-known covariates
max_samples_per_ts
This is an integer upper bound on the number of tuples that can be produced
per time series. It can be used in order to have an upper bound on the total size of the dataset and
ensure proper sampling. If `None`, it will read all of the individual time series in advance (at dataset
creation) to know their sizes, which might be expensive on big datasets.
If some series turn out to have a length that would allow more than `max_samples_per_ts`, only the
most recent `max_samples_per_ts` samples will be considered.
n_jobs_multioutput_wrapper
Number of jobs of the MultiOutputRegressor wrapper to run in parallel. Only used if the model doesn't
support multi-output regression natively.
**kwargs
Additional keyword arguments passed to the `fit` method of the model.
"""
if self.likelihood == "quantile":
# empty model container in case of multiple calls to fit, e.g. when backtesting
self._model_container.clear()
# set solver for linear program
if "solver" not in self.kwargs:
# set default fast solver
self.kwargs["solver"] = "highs"
# test solver availability with dummy problem
c = [1]
try:
linprog(c=c, method=self.kwargs["solver"])
except ValueError as ve:
logger.warning(
f"{ve}. Upgrading scipy enables significantly faster solvers"
)
# set solver to slow legacy
self.kwargs["solver"] = "interior-point"
for quantile in self.quantiles:
self.kwargs["quantile"] = quantile
self.model = QuantileRegressor(**self.kwargs)
super().fit(
series=series,
past_covariates=past_covariates,
future_covariates=future_covariates,
max_samples_per_ts=max_samples_per_ts,
**kwargs,
)
self._model_container[quantile] = self.model
return self
else:
super().fit(
series=series,
past_covariates=past_covariates,
future_covariates=future_covariates,
max_samples_per_ts=max_samples_per_ts,
**kwargs,
)
return self
def predict(
self,
n: int,
series: Optional[Union[TimeSeries, Sequence[TimeSeries]]] = None,
past_covariates: Optional[Union[TimeSeries, Sequence[TimeSeries]]] = None,
future_covariates: Optional[Union[TimeSeries, Sequence[TimeSeries]]] = None,
num_samples: int = 1,
**kwargs,
) -> Union[TimeSeries, Sequence[TimeSeries]]:
"""Forecasts values for `n` time steps after the end of the series.
Parameters
----------
n : int
Forecast horizon - the number of time steps after the end of the series for which to produce predictions.
series : TimeSeries or list of TimeSeries, optional
Optionally, one or several input `TimeSeries`, representing the history of the target series whose future
is to be predicted. If specified, the method returns the forecasts of these series. Otherwise, the method
returns the forecast of the (single) training series.
past_covariates : TimeSeries or list of TimeSeries, optional
Optionally, the past-observed covariates series needed as inputs for the model.
They must match the covariates used for training in terms of dimension and type.
future_covariates : TimeSeries or list of TimeSeries, optional
Optionally, the future-known covariates series needed as inputs for the model.
They must match the covariates used for training in terms of dimension and type.
num_samples : int, default: 1
Specifies the numer of samples to obtain from the model. Should be set to 1 if no `likelihood` is specified.
**kwargs : dict, optional
Additional keyword arguments passed to the `predict` method of the model. Only works with
univariate target series.
"""
if self.likelihood == "quantile":
model_outputs = []
for quantile, fitted in self._model_container.items():
self.model = fitted
prediction = super().predict(
n, series, past_covariates, future_covariates, **kwargs
)
model_outputs.append(prediction.all_values(copy=False))
model_outputs = np.concatenate(model_outputs, axis=-1)
samples = self._sample_quantiles(model_outputs, num_samples)
# build timeseries from samples
return self._ts_like(prediction, samples)
elif self.likelihood == "poisson":
prediction = super().predict(
n, series, past_covariates, future_covariates, **kwargs
)
samples = self._sample_poisson(
np.array(prediction.all_values(copy=False)), num_samples
)
# build timeseries from samples
return self._ts_like(prediction, samples)
else:
return super().predict(
n, series, past_covariates, future_covariates, num_samples, **kwargs
)
|
from cryptography.fernet import Fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from rich.console import Console
from rich.traceback import install
from typing import Union, Optional
import os
import base64
import tarfile
import click
CONSOLE = Console()
install(console=CONSOLE)
@click.group()
@click.option(
"--output_folder",
"-o",
type=str,
default="./encrypted",
help="Output folder for encrypted/decrypted items",
)
@click.option(
"--iterations",
"-i",
type=int,
default=int(1e6),
help="Number of algorithm iterations",
)
@click.option(
"--salt",
"-s",
default=None,
help="Custom salt used when encrypting/decrypting",
)
@click.pass_context
def main(
ctx,
output_folder: str,
iterations: Union[int, float],
salt: Optional[Union[int, float, str]] = None,
):
CONSOLE.rule(title="[bold blue]File Encryption Tool")
CONSOLE.print(
"\n\t[#B0C4DE]Simple tool to encrypt or decrypt your files and folders using the [italic]SHA-2[/italic] algorithm with [italic]512-bit[/italic] hashing."
)
# All context objects here
ctx.obj["output_folder"] = output_folder
ctx.obj["iterations"] = iterations
ctx.obj["salt"] = salt
@main.command(name="encrypt")
@click.option(
"--key-length",
"-k",
type=Union[str, int, float],
default=32,
help="Bitrate of the key",
)
@click.option(
"--save-key",
"-S",
type=bool,
default=False,
is_flag=True,
help="Store key file",
)
@click.argument("targets", nargs=-1)
def encrypt_targets(
ctx, key_length: Optional[int] = None, save_key: bool = False, **kwargs,
):
# Get password
def _auth():
password = bytes(
CONSOLE.input(prompt="\nEnter password: ", password=True),
encoding="utf-8",
)
verification = bytes(
CONSOLE.input(prompt="Verify password: ", password=True),
encoding="utf-8",
)
return (password, verification)
password, verification = _auth()
if password != verification:
while password != verification:
CONSOLE.print(
"[bold red]Error:[/bold red] [italic]Passwords do not match.[/italic]"
)
password, verification = _auth()
# Get file buffer
if not "targets" in kwargs:
path_buffer = CONSOLE.input(
"Enter file locations (comma-separated): "
).split(",")
path_buffer = [p.strip() for p in path_buffer]
else:
path_buffer = kwargs["targets"]
# Assert output folder existence
os.makedirs(ctx.obj["output_folder"], exist_ok=True)
# Parse remaining kwargs
key_file = "" if "key_file" not in kwargs else kwargs["key_file"]
key_file = (
os.path.join(key_file, "keyfile")
if os.path.splitext(key_file)[1] == ""
else key_file
)
# Parse potential float input
if isinstance(ctx.obj["iterations"], float):
iterations = int(ctx.obj["iterations"])
# Parse salt
if ctx.obj["salt"]:
if isinstance(ctx.obj["salt"], float):
salt = bytes(int(ctx.obj["salt"]))
elif isinstance(ctx.obj["salt"], str):
salt = bytes(ctx.obj["salt"], encoding="utf-8")
else:
salt = ctx.obj["salt"]
else:
salt = bytes(420)
# Parse key length
if key_length:
if not key_length <= ((2 ** 32) - 1) * hashes.SHA512().digest_size:
raise ValueError("key_length is set too large.")
# Infer target types of paths in buffer
type_buffer = {
path: type
for (path, type) in zip(
path_buffer,
[
"file" if os.path.isfile(p) else "directory"
for p in path_buffer
],
)
}
# Init Key Derivative Function
if not iterations < int(1e5):
kdf = PBKDF2HMAC(
algorithm=hashes.SHA512(),
length=key_length,
salt=salt,
iterations=iterations,
backend=default_backend(),
)
else:
CONSOLE.log(
f"[bold red]Warning:[/bold red] Iterations of {iterations} is low. Increase the amount to help migigate brute-force attempts."
)
# Generate key
key = base64.urlsafe_b64encode(kdf.derive(password))
# Save key
if save_key:
with open("./keyfile" if not key_file else key_file, "wb") as f:
f.write(key)
# Fernet instance
fernet = Fernet(key=key)
# Encryption loop
for target, type in type_buffer.items():
# If the target is a directory, tarball and gzip it first
CONSOLE.log(f"Encrypting '{target}'...")
if type == "directory":
CONSOLE.log(f"Processing directory '{target}'...")
tar = tarfile.open(f"{target}.folder", "w:gz")
tar.add(target, arcname=f"{target}")
tar.close()
target = f"{target}.folder"
# Parse target file
with open(target, "rb") as f:
original = f.read()
# Encrypt target file
encrypted = fernet.encrypt(original)
# Write encrypted output
target_name = os.path.split(target)[-1]
with open(
os.path.join(ctx.obj["output_folder"], target_name + ".crypto"),
"wb",
) as f:
f.write(encrypted)
# Teardown
if type == "directory" and os.path.exists(f"./{target_name}"):
CONSOLE.log("Cleaning up iteration temporary files...")
os.unlink(f"./{target_name}")
# Vertical seperator
CONSOLE.print("")
# Report completion
CONSOLE.log(
f"Encryption completed: {len(type_buffer.keys())} targets encrypted."
)
@main.command(name="decrypt")
@click.argument("targets")
def decrypt_targets(
ctx, output_folder: str, salt: Optional[Union[int, float, str]] = None,
):
# Get password
password = bytes(
bytes(
CONSOLE.input(prompt="\nEnter password: ", password=True),
encoding="utf-8",
)
)
# Get file buffer
path_buffer = CONSOLE.input(
"Enter file locations (comma-separated): "
).split(",")
path_buffer = [p.strip() for p in path_buffer]
# Assert output folder existence
os.makedirs(output_folder, exist_ok=True)
# Parse salt
if salt:
if isinstance(salt, float):
salt = bytes(int(salt))
elif isinstance(salt, str):
salt = bytes(salt, encoding="utf-8")
else:
salt = salt
else:
salt = bytes(420)
# Init Key Derivative Function
kdf = PBKDF2HMAC(
algorithm=hashes.SHA512(), salt=salt, backend=default_backend(),
)
# Derive key
key = base64.urlsafe_b64encode(kdf.derive(password))
# Fernet instance
fernet = Fernet(key=key)
# Decryption loop
for target in path_buffer:
# Determine target type
if target.find(".folder") > 0:
CONSOLE.log(f"Decrypting directory: '{target}'...")
isdir = True
else:
CONSOLE.log(f"Decrypting file: '{target}'...")
isdir = False
# Open encrypted target file
with open(target, "rb") as f:
encrypted = f.read()
# Decrypt encrypted file
decrypted = fernet.decrypt(encrypted)
# Write decrypted output
target_name = os.path.splitext(os.path.split(target)[-1])[0]
with open(os.path.join(output_folder, target_name), "wb") as f:
f.write(decrypted)
if isdir:
CONSOLE.log("Extracting directory...")
# Extract the compressed target
tar = tarfile.open(
os.path.join(output_folder, f"{target_name}"), "r:gz"
)
tar.extractall(output_folder)
tar.close()
# Delete compressed decrypted file
os.unlink(os.path.join(output_folder, f"{target_name}"))
# Vertical seperator
CONSOLE.print("")
# Report completion
CONSOLE.log(f"Decryption completed: {len(path_buffer)} targets decrypted.")
def interface(header: bool = True):
# Menu
if header:
CONSOLE.rule(title="[bold blue]File Encryption Tool")
CONSOLE.print(
"\n\t[#B0C4DE]Simple tool to encrypt or decrypt your files and folders using the [italic]SHA-2[/italic] algorithm with [italic]512-bit[/italic] hashing."
)
choice = CONSOLE.input(
prompt="\n\t[blue]1.[/blue] [#778899]Encrypt file(s) or folder(s)[/#778899]\n\t[blue]2.[/blue] [#778899]Decrypt file(s) or folder(s)[/#778899]\n\n\t[blue]-[/blue] [#778899]Type[/#778899] [italic]'quit'[/italic] [#778899]to exit.[/#778899]\n\n"
)
# Options
if choice == "1":
CONSOLE.rule(title="[bold purple]Encrypt targets")
encrypt_targets(
output_folder="./encrypted", iterations=1e6, key_length=32
)
elif choice == "2":
CONSOLE.rule(title="[bold green]Decrypt targets")
decrypt_targets(
output_folder="./decrypted", iterations=1e6, key_length=32
)
elif choice == "quit":
CONSOLE.rule(title="[italic #B0C4DE]Application terminated")
else:
CONSOLE.rule(title="\n[italic #B0C4DE]Supported options:\n")
interface(header=False)
def start():
main(obj=dict)
if __name__ == "__main__":
# Start application
start()
|
from ast import literal_eval
from atexit import register
from contextlib import contextmanager
from flask_login import current_user
from importlib.util import module_from_spec, spec_from_file_location
from json import loads
from logging import error, info, warning
from operator import attrgetter
from os import getenv, getpid
from pathlib import Path
from re import search
from sqlalchemy import (
Boolean,
Column,
create_engine,
event,
ForeignKey,
Float,
inspect,
Integer,
PickleType,
String,
Table,
Text,
)
from sqlalchemy.dialects.mysql.base import MSMediumBlob
from sqlalchemy.exc import InvalidRequestError, OperationalError
from sqlalchemy.ext.associationproxy import ASSOCIATION_PROXY
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta
from sqlalchemy.ext.mutable import MutableDict, MutableList
from sqlalchemy.orm import aliased, configure_mappers, scoped_session, sessionmaker
from sqlalchemy.orm.collections import InstrumentedList
from sqlalchemy.types import JSON
from time import sleep
from traceback import format_exc
from uuid import getnode
from eNMS.variables import vs
class Database:
def __init__(self):
for setting in vs.database.items():
setattr(self, *setting)
self.database_url = getenv("DATABASE_URL", "sqlite:///database.db")
self.dialect = self.database_url.split(":")[0]
self.rbac_error = type("RbacError", (Exception,), {})
self.configure_columns()
self.engine = create_engine(
self.database_url,
**self.engine["common"],
**self.engine.get(self.dialect, {}),
)
self.session = scoped_session(sessionmaker(autoflush=False, bind=self.engine))
self.base = declarative_base(metaclass=self.create_metabase())
self.configure_associations()
self.configure_events()
self.field_conversion = {
"bool": bool,
"dict": self.dict_conversion,
"float": float,
"int": int,
"integer": int,
"json": loads,
"list": str,
"str": str,
"date": str,
}
for retry_type, values in self.transactions["retry"].items():
for parameter, number in values.items():
setattr(self, f"retry_{retry_type}_{parameter}", number)
register(self.cleanup)
def _initialize(self, env):
self.register_custom_models()
try:
self.base.metadata.create_all(bind=self.engine)
except OperationalError:
info(f"Bypassing metadata creation for process {getpid()}")
configure_mappers()
self.configure_model_events(env)
if env.detect_cli():
return
first_init = not self.fetch("user", allow_none=True, name="admin")
if first_init:
admin_user = vs.models["user"](name="admin", is_admin=True)
self.session.add(admin_user)
self.session.commit()
if not admin_user.password:
admin_user.update(password="admin")
self.factory(
"server",
**{
"name": vs.server,
"description": vs.server,
"mac_address": str(getnode()),
"ip_address": vs.server_ip,
"status": "Up",
},
)
parameters = self.factory(
"parameters",
**{
f"banner_{property}": vs.settings["notification_banner"][property]
for property in ("active", "deactivate_on_restart", "properties")
},
)
self.session.commit()
for run in self.fetch(
"run", all_matches=True, allow_none=True, status="Running"
):
run.status = "Aborted (RELOAD)"
run.service.status = "Idle"
parameters = self.fetch("parameters")
if parameters.banner_deactivate_on_restart:
parameters.banner_active = False
self.session.commit()
return first_init
def create_metabase(self):
class SubDeclarativeMeta(DeclarativeMeta):
def __init__(cls, *args): # noqa: N805
DeclarativeMeta.__init__(cls, *args)
if hasattr(cls, "database_init") and "database_init" in cls.__dict__:
cls.database_init()
self.set_custom_properties(cls)
return SubDeclarativeMeta
@staticmethod
def dict_conversion(input):
try:
return literal_eval(input)
except Exception:
return loads(input)
def configure_columns(self):
class CustomPickleType(PickleType):
cache_ok = True
if self.dialect.startswith(("mariadb", "mysql")):
impl = MSMediumBlob
self.Dict = MutableDict.as_mutable(CustomPickleType)
self.List = MutableList.as_mutable(CustomPickleType)
if self.dialect == "postgresql":
self.LargeString = Text
else:
self.LargeString = Text(self.columns["length"]["large_string_length"])
self.SmallString = String(self.columns["length"]["small_string_length"])
self.TinyString = String(self.columns["length"]["tiny_string_length"])
default_ctypes = {
self.Dict: {},
self.List: [],
self.LargeString: "",
self.SmallString: "",
self.TinyString: "",
Text: "",
}
def init_column(column_type, *args, **kwargs):
if "default" not in kwargs and column_type in default_ctypes:
kwargs["default"] = default_ctypes[column_type]
return Column(column_type, *args, **kwargs)
self.Column = init_column
def configure_events(self):
if self.dialect == "sqlite":
@event.listens_for(self.engine, "connect")
def do_begin(connection, _):
def regexp(pattern, value):
return search(pattern, str(value)) is not None
connection.create_function("regexp", 2, regexp)
@event.listens_for(self.base, "mapper_configured", propagate=True)
def model_inspection(mapper, model):
name = model.__tablename__
for col in inspect(model).columns:
if not col.info.get("model_properties", True):
continue
if col.type == PickleType:
is_list = isinstance(col.default.arg, list)
property_type = "list" if is_list else "dict"
else:
property_type = {
Boolean: "bool",
Integer: "int",
Float: "float",
JSON: "dict",
}.get(type(col.type), "str")
vs.model_properties[name][col.key] = property_type
for descriptor in inspect(model).all_orm_descriptors:
if descriptor.extension_type is ASSOCIATION_PROXY:
property = (
descriptor.info.get("name")
or f"{descriptor.target_collection}_{descriptor.value_attr}"
)
vs.model_properties[name][property] = "str"
if hasattr(model, "parent_type"):
vs.model_properties[name].update(vs.model_properties[model.parent_type])
if "service" in name and name != "service":
vs.model_properties[name].update(vs.model_properties["service"])
vs.models.update({name: model, name.lower(): model})
vs.model_properties[name].update(model.model_properties)
for relation in mapper.relationships:
if getattr(relation.mapper.class_, "private", False):
continue
property = str(relation).split(".")[1]
vs.relationships[name][property] = {
"model": relation.mapper.class_.__tablename__,
"list": relation.uselist,
}
def configure_model_events(self, env):
@event.listens_for(self.base, "after_insert", propagate=True)
def log_instance_creation(mapper, connection, target):
if hasattr(target, "name") and target.type != "run":
env.log("info", f"CREATION: {target.type} '{target.name}'")
@event.listens_for(self.base, "before_delete", propagate=True)
def log_instance_deletion(mapper, connection, target):
name = getattr(target, "name", str(target))
env.log("info", f"DELETION: {target.type} '{name}'")
@event.listens_for(self.base, "before_update", propagate=True)
def log_instance_update(mapper, connection, target):
state, changelog = inspect(target), []
for attr in state.attrs:
hist = state.get_history(attr.key, True)
if (
getattr(target, "private", False)
or not getattr(target, "log_changes", True)
or not getattr(state.class_, attr.key).info.get("log_change", True)
or attr.key in vs.private_properties_set
or not hist.has_changes()
):
continue
change = f"{attr.key}: "
property_type = type(getattr(target, attr.key))
if property_type in (InstrumentedList, MutableList):
if property_type == MutableList:
added = [x for x in hist.added[0] if x not in hist.deleted[0]]
deleted = [x for x in hist.deleted[0] if x not in hist.added[0]]
else:
added, deleted = hist.added, hist.deleted
if deleted:
change += f"DELETED: {deleted}"
if added:
change += f"{' / ' if deleted else ''}ADDED: {added}"
else:
change += (
f"'{hist.deleted[0] if hist.deleted else None}' => "
f"'{hist.added[0] if hist.added else None}'"
)
changelog.append(change)
if changelog:
name, changes = (
getattr(target, "name", target.id),
" | ".join(changelog),
)
env.log("info", f"UPDATE: {target.type} '{name}': ({changes})")
for model in vs.models.values():
if "configure_events" in vars(model):
model.configure_events()
if env.use_vault:
for model in vs.private_properties:
@event.listens_for(vs.models[model].name, "set", propagate=True)
def vault_update(target, new_name, old_name, *_):
if new_name == old_name:
return
for property in vs.private_properties[target.class_type]:
path = f"secret/data/{target.type}"
data = env.vault_client.read(f"{path}/{old_name}/{property}")
if not data:
return
env.vault_client.write(
f"{path}/{new_name}/{property}",
data={property: data["data"]["data"][property]},
)
env.vault_client.delete(f"{path}/{old_name}")
def configure_associations(self):
for name, association in self.relationships["associations"].items():
model1, model2 = association["model1"], association["model2"]
setattr(
self,
f"{name}_table",
Table(
f"{name}_association",
self.base.metadata,
Column(
model1["column"],
Integer,
ForeignKey(
f"{model1['foreign_key']}.id", **model1.get("kwargs", {})
),
primary_key=True,
),
Column(
model2["column"],
Integer,
ForeignKey(
f"{model2['foreign_key']}.id", **model2.get("kwargs", {})
),
primary_key=True,
),
),
)
def query(self, model, rbac="read", username=None, properties=None):
if properties:
entity = [getattr(vs.models[model], property) for property in properties]
else:
entity = [vs.models[model]]
query = self.session.query(*entity)
if rbac and model != "user":
user = current_user or self.fetch("user", name=username or "admin")
if user.is_authenticated and not user.is_admin:
if model in vs.rbac["advanced"]["admin_models"].get(rbac, []):
raise self.rbac_error
if (
rbac == "read"
and vs.rbac["advanced"]["deactivate_rbac_on_read"]
and model != "pool"
):
return query
query = vs.models[model].rbac_filter(query, rbac, user)
return query
def fetch(
self,
instance_type,
allow_none=False,
all_matches=False,
rbac="read",
username=None,
**kwargs,
):
query = self.query(instance_type, rbac, username=username).filter(
*(
getattr(vs.models[instance_type], key) == value
for key, value in kwargs.items()
)
)
for index in range(self.retry_fetch_number):
try:
result = query.all() if all_matches else query.first()
break
except Exception as exc:
self.session.rollback()
if index == self.retry_fetch_number - 1:
error(f"Fetch n°{index} failed ({format_exc()})")
raise exc
else:
warning(f"Fetch n°{index} failed ({str(exc)})")
sleep(self.retry_fetch_time * (index + 1))
if result or allow_none:
return result
else:
raise self.rbac_error(
f"There is no {instance_type} in the database "
f"with the following characteristics: {kwargs}"
)
def delete(self, model, **kwargs):
instance = self.fetch(model, **{"rbac": "edit", **kwargs})
return self.delete_instance(instance)
def fetch_all(self, model, **kwargs):
return self.fetch(model, allow_none=True, all_matches=True, **kwargs)
def objectify(self, model, object_list, **kwargs):
return [self.fetch(model, id=object_id, **kwargs) for object_id in object_list]
def delete_instance(self, instance):
try:
instance.delete()
except Exception as exc:
return {"alert": f"Unable to delete {instance.name} ({exc})."}
serialized_instance = instance.serialized
self.session.delete(instance)
return serialized_instance
def delete_all(self, *models):
for model in models:
for instance in self.fetch_all(model):
self.delete_instance(instance)
self.session.commit()
def export(self, model, private_properties=False):
return [
instance.to_dict(export=True, private_properties=private_properties)
for instance in self.fetch_all(model)
]
def factory(self, _class, commit=False, no_fetch=False, rbac="edit", **kwargs):
def transaction(_class, **kwargs):
characters = set(kwargs.get("name", "") + kwargs.get("scoped_name", ""))
if set("/\\'" + '"') & characters:
raise Exception("Names cannot contain a slash or a quote.")
instance, instance_id = None, kwargs.pop("id", 0)
if instance_id:
instance = self.fetch(_class, id=instance_id, rbac=rbac)
elif "name" in kwargs and not no_fetch:
instance = self.fetch(
_class, allow_none=True, name=kwargs["name"], rbac=rbac
)
if instance and not kwargs.get("must_be_new"):
instance.update(**kwargs)
else:
instance = vs.models[_class](rbac=rbac, **kwargs)
self.session.add(instance)
return instance
if not commit:
instance = transaction(_class, **kwargs)
else:
for index in range(self.retry_commit_number):
try:
instance = transaction(_class, **kwargs)
self.session.commit()
break
except Exception as exc:
self.session.rollback()
if index == self.retry_commit_number - 1:
error(f"Commit n°{index} failed ({format_exc()})")
raise exc
else:
warning(f"Commit n°{index} failed ({str(exc)})")
sleep(self.retry_commit_time * (index + 1))
return instance
def get_credential(
self, username, name=None, device=None, credential_type="any", optional=False
):
pool_alias = aliased(vs.models["pool"])
query = (
self.session.query(vs.models["credential"])
.join(vs.models["pool"], vs.models["credential"].user_pools)
.join(vs.models["user"], vs.models["pool"].users)
)
if device:
query = query.join(pool_alias, vs.models["credential"].device_pools).join(
vs.models["device"], pool_alias.devices
)
query = query.filter(vs.models["user"].name == username)
if name:
query = query.filter(vs.models["credential"].name == name)
if device:
query = query.filter(vs.models["device"].name == device.name)
if credential_type != "any":
query = query.filter(vs.models["credential"].role == credential_type)
credentials = max(query.all(), key=attrgetter("priority"), default=None)
if not credentials and not optional:
raise Exception(f"No matching credentials found for DEVICE '{device.name}'")
return credentials
def register_custom_models(self):
for model in ("device", "link", "service"):
paths = [vs.path / "eNMS" / "models" / f"{model}s"]
load_examples = vs.settings["app"].get("startup_migration") == "examples"
if vs.settings["paths"][f"custom_{model}s"]:
paths.append(Path(vs.settings["paths"][f"custom_{model}s"]))
for path in paths:
for file in path.glob("**/*.py"):
if "init" in str(file):
continue
if not load_examples and "examples" in str(file):
continue
info(f"Loading {model}: {file}")
spec = spec_from_file_location(file.stem, str(file))
try:
spec.loader.exec_module(module_from_spec(spec))
except InvalidRequestError:
error(f"Error loading {model} '{file}'\n{format_exc()}")
@contextmanager
def session_scope(self):
try:
yield self.session
self.session.commit()
except Exception:
self.session.rollback()
raise
finally:
self.session.close()
def set_custom_properties(self, table):
model = getattr(table, "__tablename__", None)
if not model:
return
for property, values in vs.properties["custom"].get(model, {}).items():
if values.get("private", False):
kwargs = {}
else:
if "default_function" in values:
values["default"] = getattr(vs.custom, values['default_function'])
kwargs = {
"default": values["default"],
"info": {"log_change": values.get("log_change", True)},
}
column = self.Column(
{
"bool": Boolean,
"dict": self.Dict,
"float": Float,
"integer": Integer,
"json": JSON,
"str": self.LargeString,
"select": self.SmallString,
"multiselect": self.List,
}[values.get("type", "str")],
**kwargs,
)
if not values.get("serialize", True):
self.dont_serialize[model].append(property)
if not values.get("migrate", True):
self.dont_migrate[model].append(property)
setattr(table, property, column)
return table
def cleanup(self):
self.engine.dispose()
db = Database()
|
#
# subunit: extensions to Python unittest to get test results from subprocesses.
# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
"""Subunit - a streaming test protocol
Overview
++++++++
The ``subunit`` Python package provides a number of ``unittest`` extensions
which can be used to cause tests to output Subunit, to parse Subunit streams
into test activity, perform seamless test isolation within a regular test
case and variously sort, filter and report on test runs.
Key Classes
-----------
The ``subunit.TestProtocolClient`` class is a ``unittest.TestResult``
extension which will translate a test run into a Subunit stream.
The ``subunit.ProtocolTestCase`` class is an adapter between the Subunit wire
protocol and the ``unittest.TestCase`` object protocol. It is used to translate
a stream into a test run, which regular ``unittest.TestResult`` objects can
process and report/inspect.
Subunit has support for non-blocking usage too, for use with asyncore or
Twisted. See the ``TestProtocolServer`` parser class for more details.
Subunit includes extensions to the Python ``TestResult`` protocol. These are
all done in a compatible manner: ``TestResult`` objects that do not implement
the extension methods will not cause errors to be raised, instead the extension
will either lose fidelity (for instance, folding expected failures to success
in Python versions < 2.7 or 3.1), or discard the extended data (for extra
details, tags, timestamping and progress markers).
The test outcome methods ``addSuccess``, ``addError``, ``addExpectedFailure``,
``addFailure``, ``addSkip`` take an optional keyword parameter ``details``
which can be used instead of the usual python unittest parameter.
When used the value of details should be a dict from ``string`` to
``testtools.content.Content`` objects. This is a draft API being worked on with
the Python Testing In Python mail list, with the goal of permitting a common
way to provide additional data beyond a traceback, such as captured data from
disk, logging messages etc. The reference for this API is in testtools (0.9.0
and newer).
The ``tags(new_tags, gone_tags)`` method is called (if present) to add or
remove tags in the test run that is currently executing. If called when no
test is in progress (that is, if called outside of the ``startTest``,
``stopTest`` pair), the the tags apply to all subsequent tests. If called
when a test is in progress, then the tags only apply to that test.
The ``time(a_datetime)`` method is called (if present) when a ``time:``
directive is encountered in a Subunit stream. This is used to tell a TestResult
about the time that events in the stream occurred at, to allow reconstructing
test timing from a stream.
The ``progress(offset, whence)`` method controls progress data for a stream.
The offset parameter is an int, and whence is one of subunit.PROGRESS_CUR,
subunit.PROGRESS_SET, PROGRESS_PUSH, PROGRESS_POP. Push and pop operations
ignore the offset parameter.
Python test support
-------------------
``subunit.run`` is a convenience wrapper to run a Python test suite via
the command line, reporting via Subunit::
$ python -m subunit.run mylib.tests.test_suite
The ``IsolatedTestSuite`` class is a TestSuite that forks before running its
tests, allowing isolation between the test runner and some tests.
Similarly, ``IsolatedTestCase`` is a base class which can be subclassed to get
tests that will fork() before that individual test is run.
`ExecTestCase`` is a convenience wrapper for running an external
program to get a Subunit stream and then report that back to an arbitrary
result object::
class AggregateTests(subunit.ExecTestCase):
def test_script_one(self):
'./bin/script_one'
def test_script_two(self):
'./bin/script_two'
# Normally your normal test loading would take of this automatically,
# It is only spelt out in detail here for clarity.
suite = unittest.TestSuite([AggregateTests("test_script_one"),
AggregateTests("test_script_two")])
# Create any TestResult class you like.
result = unittest._TextTestResult(sys.stdout)
# And run your suite as normal, Subunit will exec each external script as
# needed and report to your result object.
suite.run(result)
Utility modules
---------------
* subunit.chunked contains HTTP chunked encoding/decoding logic.
* subunit.test_results contains TestResult helper classes.
"""
import os
import re
import subprocess
import sys
import unittest
try:
from io import UnsupportedOperation as _UnsupportedOperation
except ImportError:
_UnsupportedOperation = AttributeError
from extras import safe_hasattr
from testtools import content, content_type, ExtendedToOriginalDecorator
from testtools.content import TracebackContent
from testtools.compat import _b, _u, BytesIO, StringIO
try:
from testtools.testresult.real import _StringException
RemoteException = _StringException
except ImportError:
raise ImportError ("testtools.testresult.real does not contain "
"_StringException, check your version.")
from testtools import testresult, CopyStreamResult
from subunit import chunked, details, iso8601, test_results
from subunit.v2 import ByteStreamToStreamResult, StreamResultToBytes
# same format as sys.version_info: "A tuple containing the five components of
# the version number: major, minor, micro, releaselevel, and serial. All
# values except releaselevel are integers; the release level is 'alpha',
# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
# releaselevel of 'dev' for unreleased under-development code.
#
# If the releaselevel is 'alpha' then the major/minor/micro components are not
# established at this point, and setup.py will use a version of next-$(revno).
# If the releaselevel is 'final', then the tarball will be major.minor.micro.
# Otherwise it is major.minor.micro~$(revno).
__version__ = (1, 3, 0, 'final', 0)
PROGRESS_SET = 0
PROGRESS_CUR = 1
PROGRESS_PUSH = 2
PROGRESS_POP = 3
def test_suite():
import subunit.tests
return subunit.tests.test_suite()
def join_dir(base_path, path):
"""
Returns an absolute path to C{path}, calculated relative to the parent
of C{base_path}.
@param base_path: A path to a file or directory.
@param path: An absolute path, or a path relative to the containing
directory of C{base_path}.
@return: An absolute path to C{path}.
"""
return os.path.join(os.path.dirname(os.path.abspath(base_path)), path)
def tags_to_new_gone(tags):
"""Split a list of tags into a new_set and a gone_set."""
new_tags = set()
gone_tags = set()
for tag in tags:
if tag[0] == '-':
gone_tags.add(tag[1:])
else:
new_tags.add(tag)
return new_tags, gone_tags
class DiscardStream(object):
"""A filelike object which discards what is written to it."""
def fileno(self):
raise _UnsupportedOperation()
def write(self, bytes):
pass
def read(self, len=0):
return _b('')
class _ParserState(object):
"""State for the subunit parser."""
def __init__(self, parser):
self.parser = parser
self._test_sym = (_b('test'), _b('testing'))
self._colon_sym = _b(':')
self._error_sym = (_b('error'),)
self._failure_sym = (_b('failure'),)
self._progress_sym = (_b('progress'),)
self._skip_sym = _b('skip')
self._success_sym = (_b('success'), _b('successful'))
self._tags_sym = (_b('tags'),)
self._time_sym = (_b('time'),)
self._xfail_sym = (_b('xfail'),)
self._uxsuccess_sym = (_b('uxsuccess'),)
self._start_simple = _u(" [")
self._start_multipart = _u(" [ multipart")
def addError(self, offset, line):
"""An 'error:' directive has been read."""
self.parser.stdOutLineReceived(line)
def addExpectedFail(self, offset, line):
"""An 'xfail:' directive has been read."""
self.parser.stdOutLineReceived(line)
def addFailure(self, offset, line):
"""A 'failure:' directive has been read."""
self.parser.stdOutLineReceived(line)
def addSkip(self, offset, line):
"""A 'skip:' directive has been read."""
self.parser.stdOutLineReceived(line)
def addSuccess(self, offset, line):
"""A 'success:' directive has been read."""
self.parser.stdOutLineReceived(line)
def lineReceived(self, line):
"""a line has been received."""
parts = line.split(None, 1)
if len(parts) == 2 and line.startswith(parts[0]):
cmd, rest = parts
offset = len(cmd) + 1
cmd = cmd.rstrip(self._colon_sym)
if cmd in self._test_sym:
self.startTest(offset, line)
elif cmd in self._error_sym:
self.addError(offset, line)
elif cmd in self._failure_sym:
self.addFailure(offset, line)
elif cmd in self._progress_sym:
self.parser._handleProgress(offset, line)
elif cmd in self._skip_sym:
self.addSkip(offset, line)
elif cmd in self._success_sym:
self.addSuccess(offset, line)
elif cmd in self._tags_sym:
self.parser._handleTags(offset, line)
self.parser.subunitLineReceived(line)
elif cmd in self._time_sym:
self.parser._handleTime(offset, line)
self.parser.subunitLineReceived(line)
elif cmd in self._xfail_sym:
self.addExpectedFail(offset, line)
elif cmd in self._uxsuccess_sym:
self.addUnexpectedSuccess(offset, line)
else:
self.parser.stdOutLineReceived(line)
else:
self.parser.stdOutLineReceived(line)
def lostConnection(self):
"""Connection lost."""
self.parser._lostConnectionInTest(_u('unknown state of '))
def startTest(self, offset, line):
"""A test start command received."""
self.parser.stdOutLineReceived(line)
class _InTest(_ParserState):
"""State for the subunit parser after reading a test: directive."""
def _outcome(self, offset, line, no_details, details_state):
"""An outcome directive has been read.
:param no_details: Callable to call when no details are presented.
:param details_state: The state to switch to for details
processing of this outcome.
"""
test_name = line[offset:-1].decode('utf8')
if self.parser.current_test_description == test_name:
self.parser._state = self.parser._outside_test
self.parser.current_test_description = None
no_details()
self.parser.client.stopTest(self.parser._current_test)
self.parser._current_test = None
self.parser.subunitLineReceived(line)
elif self.parser.current_test_description + self._start_simple == \
test_name:
self.parser._state = details_state
details_state.set_simple()
self.parser.subunitLineReceived(line)
elif self.parser.current_test_description + self._start_multipart == \
test_name:
self.parser._state = details_state
details_state.set_multipart()
self.parser.subunitLineReceived(line)
else:
self.parser.stdOutLineReceived(line)
def _error(self):
self.parser.client.addError(self.parser._current_test,
details={})
def addError(self, offset, line):
"""An 'error:' directive has been read."""
self._outcome(offset, line, self._error,
self.parser._reading_error_details)
def _xfail(self):
self.parser.client.addExpectedFailure(self.parser._current_test,
details={})
def addExpectedFail(self, offset, line):
"""An 'xfail:' directive has been read."""
self._outcome(offset, line, self._xfail,
self.parser._reading_xfail_details)
def _uxsuccess(self):
self.parser.client.addUnexpectedSuccess(self.parser._current_test)
def addUnexpectedSuccess(self, offset, line):
"""A 'uxsuccess:' directive has been read."""
self._outcome(offset, line, self._uxsuccess,
self.parser._reading_uxsuccess_details)
def _failure(self):
self.parser.client.addFailure(self.parser._current_test, details={})
def addFailure(self, offset, line):
"""A 'failure:' directive has been read."""
self._outcome(offset, line, self._failure,
self.parser._reading_failure_details)
def _skip(self):
self.parser.client.addSkip(self.parser._current_test, details={})
def addSkip(self, offset, line):
"""A 'skip:' directive has been read."""
self._outcome(offset, line, self._skip,
self.parser._reading_skip_details)
def _succeed(self):
self.parser.client.addSuccess(self.parser._current_test, details={})
def addSuccess(self, offset, line):
"""A 'success:' directive has been read."""
self._outcome(offset, line, self._succeed,
self.parser._reading_success_details)
def lostConnection(self):
"""Connection lost."""
self.parser._lostConnectionInTest(_u(''))
class _OutSideTest(_ParserState):
"""State for the subunit parser outside of a test context."""
def lostConnection(self):
"""Connection lost."""
def startTest(self, offset, line):
"""A test start command received."""
self.parser._state = self.parser._in_test
test_name = line[offset:-1].decode('utf8')
self.parser._current_test = RemotedTestCase(test_name)
self.parser.current_test_description = test_name
self.parser.client.startTest(self.parser._current_test)
self.parser.subunitLineReceived(line)
class _ReadingDetails(_ParserState):
"""Common logic for readin state details."""
def endDetails(self):
"""The end of a details section has been reached."""
self.parser._state = self.parser._outside_test
self.parser.current_test_description = None
self._report_outcome()
self.parser.client.stopTest(self.parser._current_test)
def lineReceived(self, line):
"""a line has been received."""
self.details_parser.lineReceived(line)
self.parser.subunitLineReceived(line)
def lostConnection(self):
"""Connection lost."""
self.parser._lostConnectionInTest(_u('%s report of ') %
self._outcome_label())
def _outcome_label(self):
"""The label to describe this outcome."""
raise NotImplementedError(self._outcome_label)
def set_simple(self):
"""Start a simple details parser."""
self.details_parser = details.SimpleDetailsParser(self)
def set_multipart(self):
"""Start a multipart details parser."""
self.details_parser = details.MultipartDetailsParser(self)
class _ReadingFailureDetails(_ReadingDetails):
"""State for the subunit parser when reading failure details."""
def _report_outcome(self):
self.parser.client.addFailure(self.parser._current_test,
details=self.details_parser.get_details())
def _outcome_label(self):
return "failure"
class _ReadingErrorDetails(_ReadingDetails):
"""State for the subunit parser when reading error details."""
def _report_outcome(self):
self.parser.client.addError(self.parser._current_test,
details=self.details_parser.get_details())
def _outcome_label(self):
return "error"
class _ReadingExpectedFailureDetails(_ReadingDetails):
"""State for the subunit parser when reading xfail details."""
def _report_outcome(self):
self.parser.client.addExpectedFailure(self.parser._current_test,
details=self.details_parser.get_details())
def _outcome_label(self):
return "xfail"
class _ReadingUnexpectedSuccessDetails(_ReadingDetails):
"""State for the subunit parser when reading uxsuccess details."""
def _report_outcome(self):
self.parser.client.addUnexpectedSuccess(self.parser._current_test,
details=self.details_parser.get_details())
def _outcome_label(self):
return "uxsuccess"
class _ReadingSkipDetails(_ReadingDetails):
"""State for the subunit parser when reading skip details."""
def _report_outcome(self):
self.parser.client.addSkip(self.parser._current_test,
details=self.details_parser.get_details("skip"))
def _outcome_label(self):
return "skip"
class _ReadingSuccessDetails(_ReadingDetails):
"""State for the subunit parser when reading success details."""
def _report_outcome(self):
self.parser.client.addSuccess(self.parser._current_test,
details=self.details_parser.get_details("success"))
def _outcome_label(self):
return "success"
class TestProtocolServer(object):
"""A parser for subunit.
:ivar tags: The current tags associated with the protocol stream.
"""
def __init__(self, client, stream=None, forward_stream=None):
"""Create a TestProtocolServer instance.
:param client: An object meeting the unittest.TestResult protocol.
:param stream: The stream that lines received which are not part of the
subunit protocol should be written to. This allows custom handling
of mixed protocols. By default, sys.stdout will be used for
convenience. It should accept bytes to its write() method.
:param forward_stream: A stream to forward subunit lines to. This
allows a filter to forward the entire stream while still parsing
and acting on it. By default forward_stream is set to
DiscardStream() and no forwarding happens.
"""
self.client = ExtendedToOriginalDecorator(client)
if stream is None:
stream = sys.stdout
if sys.version_info > (3, 0):
stream = stream.buffer
self._stream = stream
self._forward_stream = forward_stream or DiscardStream()
# state objects we can switch too
self._in_test = _InTest(self)
self._outside_test = _OutSideTest(self)
self._reading_error_details = _ReadingErrorDetails(self)
self._reading_failure_details = _ReadingFailureDetails(self)
self._reading_skip_details = _ReadingSkipDetails(self)
self._reading_success_details = _ReadingSuccessDetails(self)
self._reading_xfail_details = _ReadingExpectedFailureDetails(self)
self._reading_uxsuccess_details = _ReadingUnexpectedSuccessDetails(self)
# start with outside test.
self._state = self._outside_test
# Avoid casts on every call
self._plusminus = _b('+-')
self._push_sym = _b('push')
self._pop_sym = _b('pop')
def _handleProgress(self, offset, line):
"""Process a progress directive."""
line = line[offset:].strip()
if line[0] in self._plusminus:
whence = PROGRESS_CUR
delta = int(line)
elif line == self._push_sym:
whence = PROGRESS_PUSH
delta = None
elif line == self._pop_sym:
whence = PROGRESS_POP
delta = None
else:
whence = PROGRESS_SET
delta = int(line)
self.client.progress(delta, whence)
def _handleTags(self, offset, line):
"""Process a tags command."""
tags = line[offset:].decode('utf8').split()
new_tags, gone_tags = tags_to_new_gone(tags)
self.client.tags(new_tags, gone_tags)
def _handleTime(self, offset, line):
# Accept it, but do not do anything with it yet.
try:
event_time = iso8601.parse_date(line[offset:-1])
except TypeError:
raise TypeError(_u("Failed to parse %r, got %r")
% (line, sys.exec_info[1]))
self.client.time(event_time)
def lineReceived(self, line):
"""Call the appropriate local method for the received line."""
self._state.lineReceived(line)
def _lostConnectionInTest(self, state_string):
error_string = _u("lost connection during %stest '%s'") % (
state_string, self.current_test_description)
self.client.addError(self._current_test, RemoteError(error_string))
self.client.stopTest(self._current_test)
def lostConnection(self):
"""The input connection has finished."""
self._state.lostConnection()
def readFrom(self, pipe):
"""Blocking convenience API to parse an entire stream.
:param pipe: A file-like object supporting __iter__.
:return: None.
"""
for line in pipe:
self.lineReceived(line)
self.lostConnection()
def _startTest(self, offset, line):
"""Internal call to change state machine. Override startTest()."""
self._state.startTest(offset, line)
def subunitLineReceived(self, line):
self._forward_stream.write(line)
def stdOutLineReceived(self, line):
self._stream.write(line)
class TestProtocolClient(testresult.TestResult):
"""A TestResult which generates a subunit stream for a test run.
# Get a TestSuite or TestCase to run
suite = make_suite()
# Create a stream (any object with a 'write' method). This should accept
# bytes not strings: subunit is a byte orientated protocol.
stream = file('tests.log', 'wb')
# Create a subunit result object which will output to the stream
result = subunit.TestProtocolClient(stream)
# Optionally, to get timing data for performance analysis, wrap the
# serialiser with a timing decorator
result = subunit.test_results.AutoTimingTestResultDecorator(result)
# Run the test suite reporting to the subunit result object
suite.run(result)
# Close the stream.
stream.close()
"""
def __init__(self, stream):
testresult.TestResult.__init__(self)
stream = make_stream_binary(stream)
self._stream = stream
self._progress_fmt = _b("progress: ")
self._bytes_eol = _b("\n")
self._progress_plus = _b("+")
self._progress_push = _b("push")
self._progress_pop = _b("pop")
self._empty_bytes = _b("")
self._start_simple = _b(" [\n")
self._end_simple = _b("]\n")
def addError(self, test, error=None, details=None):
"""Report an error in test test.
Only one of error and details should be provided: conceptually there
are two separate methods:
addError(self, test, error)
addError(self, test, details)
:param error: Standard unittest positional argument form - an
exc_info tuple.
:param details: New Testing-in-python drafted API; a dict from string
to subunit.Content objects.
"""
self._addOutcome("error", test, error=error, details=details)
if self.failfast:
self.stop()
def addExpectedFailure(self, test, error=None, details=None):
"""Report an expected failure in test test.
Only one of error and details should be provided: conceptually there
are two separate methods:
addError(self, test, error)
addError(self, test, details)
:param error: Standard unittest positional argument form - an
exc_info tuple.
:param details: New Testing-in-python drafted API; a dict from string
to subunit.Content objects.
"""
self._addOutcome("xfail", test, error=error, details=details)
def addFailure(self, test, error=None, details=None):
"""Report a failure in test test.
Only one of error and details should be provided: conceptually there
are two separate methods:
addFailure(self, test, error)
addFailure(self, test, details)
:param error: Standard unittest positional argument form - an
exc_info tuple.
:param details: New Testing-in-python drafted API; a dict from string
to subunit.Content objects.
"""
self._addOutcome("failure", test, error=error, details=details)
if self.failfast:
self.stop()
def _addOutcome(self, outcome, test, error=None, details=None,
error_permitted=True):
"""Report a failure in test test.
Only one of error and details should be provided: conceptually there
are two separate methods:
addOutcome(self, test, error)
addOutcome(self, test, details)
:param outcome: A string describing the outcome - used as the
event name in the subunit stream.
:param error: Standard unittest positional argument form - an
exc_info tuple.
:param details: New Testing-in-python drafted API; a dict from string
to subunit.Content objects.
:param error_permitted: If True then one and only one of error or
details must be supplied. If False then error must not be supplied
and details is still optional. """
self._stream.write(_b("%s: " % outcome) + self._test_id(test))
if error_permitted:
if error is None and details is None:
raise ValueError
else:
if error is not None:
raise ValueError
if error is not None:
self._stream.write(self._start_simple)
tb_content = TracebackContent(error, test)
for bytes in tb_content.iter_bytes():
self._stream.write(bytes)
elif details is not None:
self._write_details(details)
else:
self._stream.write(_b("\n"))
if details is not None or error is not None:
self._stream.write(self._end_simple)
def addSkip(self, test, reason=None, details=None):
"""Report a skipped test."""
if reason is None:
self._addOutcome("skip", test, error=None, details=details)
else:
self._stream.write(_b("skip: %s [\n" % test.id()))
self._stream.write(_b("%s\n" % reason))
self._stream.write(self._end_simple)
def addSuccess(self, test, details=None):
"""Report a success in a test."""
self._addOutcome("successful", test, details=details, error_permitted=False)
def addUnexpectedSuccess(self, test, details=None):
"""Report an unexpected success in test test.
Details can optionally be provided: conceptually there
are two separate methods:
addError(self, test)
addError(self, test, details)
:param details: New Testing-in-python drafted API; a dict from string
to subunit.Content objects.
"""
self._addOutcome("uxsuccess", test, details=details,
error_permitted=False)
if self.failfast:
self.stop()
def _test_id(self, test):
result = test.id()
if type(result) is not bytes:
result = result.encode('utf8')
return result
def startTest(self, test):
"""Mark a test as starting its test run."""
super(TestProtocolClient, self).startTest(test)
self._stream.write(_b("test: ") + self._test_id(test) + _b("\n"))
self._stream.flush()
def stopTest(self, test):
super(TestProtocolClient, self).stopTest(test)
self._stream.flush()
def progress(self, offset, whence):
"""Provide indication about the progress/length of the test run.
:param offset: Information about the number of tests remaining. If
whence is PROGRESS_CUR, then offset increases/decreases the
remaining test count. If whence is PROGRESS_SET, then offset
specifies exactly the remaining test count.
:param whence: One of PROGRESS_CUR, PROGRESS_SET, PROGRESS_PUSH,
PROGRESS_POP.
"""
if whence == PROGRESS_CUR and offset > -1:
prefix = self._progress_plus
offset = _b(str(offset))
elif whence == PROGRESS_PUSH:
prefix = self._empty_bytes
offset = self._progress_push
elif whence == PROGRESS_POP:
prefix = self._empty_bytes
offset = self._progress_pop
else:
prefix = self._empty_bytes
offset = _b(str(offset))
self._stream.write(self._progress_fmt + prefix + offset +
self._bytes_eol)
def tags(self, new_tags, gone_tags):
"""Inform the client about tags added/removed from the stream."""
if not new_tags and not gone_tags:
return
tags = set([tag.encode('utf8') for tag in new_tags])
tags.update([_b("-") + tag.encode('utf8') for tag in gone_tags])
tag_line = _b("tags: ") + _b(" ").join(tags) + _b("\n")
self._stream.write(tag_line)
def time(self, a_datetime):
"""Inform the client of the time.
":param datetime: A datetime.datetime object.
"""
time = a_datetime.astimezone(iso8601.Utc())
self._stream.write(_b("time: %04d-%02d-%02d %02d:%02d:%02d.%06dZ\n" % (
time.year, time.month, time.day, time.hour, time.minute,
time.second, time.microsecond)))
def _write_details(self, details):
"""Output details to the stream.
:param details: An extended details dict for a test outcome.
"""
self._stream.write(_b(" [ multipart\n"))
for name, content in sorted(details.items()):
self._stream.write(_b("Content-Type: %s/%s" %
(content.content_type.type, content.content_type.subtype)))
parameters = content.content_type.parameters
if parameters:
self._stream.write(_b(";"))
param_strs = []
for param, value in parameters.items():
param_strs.append("%s=%s" % (param, value))
self._stream.write(_b(",".join(param_strs)))
self._stream.write(_b("\n%s\n" % name))
encoder = chunked.Encoder(self._stream)
list(map(encoder.write, content.iter_bytes()))
encoder.close()
def done(self):
"""Obey the testtools result.done() interface."""
def RemoteError(description=_u("")):
return (_StringException, _StringException(description), None)
class RemotedTestCase(unittest.TestCase):
"""A class to represent test cases run in child processes.
Instances of this class are used to provide the Python test API a TestCase
that can be printed to the screen, introspected for metadata and so on.
However, as they are a simply a memoisation of a test that was actually
run in the past by a separate process, they cannot perform any interactive
actions.
"""
def __eq__ (self, other):
try:
return self.__description == other.__description
except AttributeError:
return False
def __init__(self, description):
"""Create a psuedo test case with description description."""
self.__description = description
def error(self, label):
raise NotImplementedError("%s on RemotedTestCases is not permitted." %
label)
def setUp(self):
self.error("setUp")
def tearDown(self):
self.error("tearDown")
def shortDescription(self):
return self.__description
def id(self):
return "%s" % (self.__description,)
def __str__(self):
return "%s (%s)" % (self.__description, self._strclass())
def __repr__(self):
return "<%s description='%s'>" % \
(self._strclass(), self.__description)
def run(self, result=None):
if result is None: result = self.defaultTestResult()
result.startTest(self)
result.addError(self, RemoteError(_u("Cannot run RemotedTestCases.\n")))
result.stopTest(self)
def _strclass(self):
cls = self.__class__
return "%s.%s" % (cls.__module__, cls.__name__)
class ExecTestCase(unittest.TestCase):
"""A test case which runs external scripts for test fixtures."""
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
unittest.TestCase.__init__(self, methodName)
testMethod = getattr(self, methodName)
self.script = join_dir(sys.modules[self.__class__.__module__].__file__,
testMethod.__doc__)
def countTestCases(self):
return 1
def run(self, result=None):
if result is None: result = self.defaultTestResult()
self._run(result)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self._run(testresult.TestResult())
def _run(self, result):
protocol = TestProtocolServer(result)
process = subprocess.Popen(self.script, shell=True,
stdout=subprocess.PIPE)
make_stream_binary(process.stdout)
output = process.communicate()[0]
protocol.readFrom(BytesIO(output))
class IsolatedTestCase(unittest.TestCase):
"""A TestCase which executes in a forked process.
Each test gets its own process, which has a performance overhead but will
provide excellent isolation from global state (such as django configs,
zope utilities and so on).
"""
def run(self, result=None):
if result is None: result = self.defaultTestResult()
run_isolated(unittest.TestCase, self, result)
class IsolatedTestSuite(unittest.TestSuite):
"""A TestSuite which runs its tests in a forked process.
This decorator that will fork() before running the tests and report the
results from the child process using a Subunit stream. This is useful for
handling tests that mutate global state, or are testing C extensions that
could crash the VM.
"""
def run(self, result=None):
if result is None: result = testresult.TestResult()
run_isolated(unittest.TestSuite, self, result)
def run_isolated(klass, self, result):
"""Run a test suite or case in a subprocess, using the run method on klass.
"""
c2pread, c2pwrite = os.pipe()
# fixme - error -> result
# now fork
pid = os.fork()
if pid == 0:
# Child
# Close parent's pipe ends
os.close(c2pread)
# Dup fds for child
os.dup2(c2pwrite, 1)
# Close pipe fds.
os.close(c2pwrite)
# at this point, sys.stdin is redirected, now we want
# to filter it to escape ]'s.
### XXX: test and write that bit.
stream = os.fdopen(1, 'wb')
result = TestProtocolClient(stream)
klass.run(self, result)
stream.flush()
sys.stderr.flush()
# exit HARD, exit NOW.
os._exit(0)
else:
# Parent
# Close child pipe ends
os.close(c2pwrite)
# hookup a protocol engine
protocol = TestProtocolServer(result)
fileobj = os.fdopen(c2pread, 'rb')
protocol.readFrom(fileobj)
os.waitpid(pid, 0)
# TODO return code evaluation.
return result
def TAP2SubUnit(tap, output_stream):
"""Filter a TAP pipe into a subunit pipe.
This should be invoked once per TAP script, as TAP scripts get
mapped to a single runnable case with multiple components.
:param tap: A tap pipe/stream/file object - should emit unicode strings.
:param subunit: A pipe/stream/file object to write subunit results to.
:return: The exit code to exit with.
"""
output = StreamResultToBytes(output_stream)
UTF8_TEXT = 'text/plain; charset=UTF8'
BEFORE_PLAN = 0
AFTER_PLAN = 1
SKIP_STREAM = 2
state = BEFORE_PLAN
plan_start = 1
plan_stop = 0
# Test data for the next test to emit
test_name = None
log = []
result = None
def missing_test(plan_start):
output.status(test_id='test %d' % plan_start,
test_status='fail', runnable=False,
mime_type=UTF8_TEXT, eof=True, file_name="tap meta",
file_bytes=b"test missing from TAP output")
def _emit_test():
"write out a test"
if test_name is None:
return
if log:
log_bytes = b'\n'.join(log_line.encode('utf8') for log_line in log)
mime_type = UTF8_TEXT
file_name = 'tap comment'
eof = True
else:
log_bytes = None
mime_type = None
file_name = None
eof = True
del log[:]
output.status(test_id=test_name, test_status=result,
file_bytes=log_bytes, mime_type=mime_type, eof=eof,
file_name=file_name, runnable=False)
for line in tap:
if state == BEFORE_PLAN:
match = re.match("(\d+)\.\.(\d+)\s*(?:\#\s+(.*))?\n", line)
if match:
state = AFTER_PLAN
_, plan_stop, comment = match.groups()
plan_stop = int(plan_stop)
if plan_start > plan_stop and plan_stop == 0:
# skipped file
state = SKIP_STREAM
output.status(test_id='file skip', test_status='skip',
file_bytes=comment.encode('utf8'), eof=True,
file_name='tap comment')
continue
# not a plan line, or have seen one before
match = re.match("(ok|not ok)(?:\s+(\d+)?)?(?:\s+([^#]*[^#\s]+)\s*)?(?:\s+#\s+(TODO|SKIP|skip|todo)(?:\s+(.*))?)?\n", line)
if match:
# new test, emit current one.
_emit_test()
status, number, description, directive, directive_comment = match.groups()
if status == 'ok':
result = 'success'
else:
result = "fail"
if description is None:
description = ''
else:
description = ' ' + description
if directive is not None:
if directive.upper() == 'TODO':
result = 'xfail'
elif directive.upper() == 'SKIP':
result = 'skip'
if directive_comment is not None:
log.append(directive_comment)
if number is not None:
number = int(number)
while plan_start < number:
missing_test(plan_start)
plan_start += 1
test_name = "test %d%s" % (plan_start, description)
plan_start += 1
continue
match = re.match("Bail out\!(?:\s*(.*))?\n", line)
if match:
reason, = match.groups()
if reason is None:
extra = ''
else:
extra = ' %s' % reason
_emit_test()
test_name = "Bail out!%s" % extra
result = "fail"
state = SKIP_STREAM
continue
match = re.match("\#.*\n", line)
if match:
log.append(line[:-1])
continue
# Should look at buffering status and binding this to the prior result.
output.status(file_bytes=line.encode('utf8'), file_name='stdout',
mime_type=UTF8_TEXT)
_emit_test()
while plan_start <= plan_stop:
# record missed tests
missing_test(plan_start)
plan_start += 1
return 0
def tag_stream(original, filtered, tags):
"""Alter tags on a stream.
:param original: The input stream.
:param filtered: The output stream.
:param tags: The tags to apply. As in a normal stream - a list of 'TAG' or
'-TAG' commands.
A 'TAG' command will add the tag to the output stream,
and override any existing '-TAG' command in that stream.
Specifically:
* A global 'tags: TAG' will be added to the start of the stream.
* Any tags commands with -TAG will have the -TAG removed.
A '-TAG' command will remove the TAG command from the stream.
Specifically:
* A 'tags: -TAG' command will be added to the start of the stream.
* Any 'tags: TAG' command will have 'TAG' removed from it.
Additionally, any redundant tagging commands (adding a tag globally
present, or removing a tag globally removed) are stripped as a
by-product of the filtering.
:return: 0
"""
new_tags, gone_tags = tags_to_new_gone(tags)
source = ByteStreamToStreamResult(original, non_subunit_name='stdout')
class Tagger(CopyStreamResult):
def status(self, **kwargs):
tags = kwargs.get('test_tags')
if not tags:
tags = set()
tags.update(new_tags)
tags.difference_update(gone_tags)
if tags:
kwargs['test_tags'] = tags
else:
kwargs['test_tags'] = None
super(Tagger, self).status(**kwargs)
output = Tagger([StreamResultToBytes(filtered)])
source.run(output)
return 0
class ProtocolTestCase(object):
"""Subunit wire protocol to unittest.TestCase adapter.
ProtocolTestCase honours the core of ``unittest.TestCase`` protocol -
calling a ProtocolTestCase or invoking the run() method will make a 'test
run' happen. The 'test run' will simply be a replay of the test activity
that has been encoded into the stream. The ``unittest.TestCase`` ``debug``
and ``countTestCases`` methods are not supported because there isn't a
sensible mapping for those methods.
# Get a stream (any object with a readline() method), in this case the
# stream output by the example from ``subunit.TestProtocolClient``.
stream = file('tests.log', 'rb')
# Create a parser which will read from the stream and emit
# activity to a unittest.TestResult when run() is called.
suite = subunit.ProtocolTestCase(stream)
# Create a result object to accept the contents of that stream.
result = unittest._TextTestResult(sys.stdout)
# 'run' the tests - process the stream and feed its contents to result.
suite.run(result)
stream.close()
:seealso: TestProtocolServer (the subunit wire protocol parser).
"""
def __init__(self, stream, passthrough=None, forward=None):
"""Create a ProtocolTestCase reading from stream.
:param stream: A filelike object which a subunit stream can be read
from.
:param passthrough: A stream pass non subunit input on to. If not
supplied, the TestProtocolServer default is used.
:param forward: A stream to pass subunit input on to. If not supplied
subunit input is not forwarded.
"""
stream = make_stream_binary(stream)
self._stream = stream
self._passthrough = passthrough
if forward is not None:
forward = make_stream_binary(forward)
self._forward = forward
def __call__(self, result=None):
return self.run(result)
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
protocol = TestProtocolServer(result, self._passthrough, self._forward)
line = self._stream.readline()
while line:
protocol.lineReceived(line)
line = self._stream.readline()
protocol.lostConnection()
class TestResultStats(testresult.TestResult):
"""A pyunit TestResult interface implementation for making statistics.
:ivar total_tests: The total tests seen.
:ivar passed_tests: The tests that passed.
:ivar failed_tests: The tests that failed.
:ivar seen_tags: The tags seen across all tests.
"""
def __init__(self, stream):
"""Create a TestResultStats which outputs to stream."""
testresult.TestResult.__init__(self)
self._stream = stream
self.failed_tests = 0
self.skipped_tests = 0
self.seen_tags = set()
@property
def total_tests(self):
return self.testsRun
def addError(self, test, err, details=None):
self.failed_tests += 1
def addFailure(self, test, err, details=None):
self.failed_tests += 1
def addSkip(self, test, reason, details=None):
self.skipped_tests += 1
def formatStats(self):
self._stream.write("Total tests: %5d\n" % self.total_tests)
self._stream.write("Passed tests: %5d\n" % self.passed_tests)
self._stream.write("Failed tests: %5d\n" % self.failed_tests)
self._stream.write("Skipped tests: %5d\n" % self.skipped_tests)
tags = sorted(self.seen_tags)
self._stream.write("Seen tags: %s\n" % (", ".join(tags)))
@property
def passed_tests(self):
return self.total_tests - self.failed_tests - self.skipped_tests
def tags(self, new_tags, gone_tags):
"""Accumulate the seen tags."""
self.seen_tags.update(new_tags)
def wasSuccessful(self):
"""Tells whether or not this result was a success"""
return self.failed_tests == 0
def read_test_list(path):
"""Read a list of test ids from a file on disk.
:param path: Path to the file
:return: Sequence of test ids
"""
f = open(path, 'rb')
try:
return [l.rstrip("\n") for l in f.readlines()]
finally:
f.close()
def make_stream_binary(stream):
"""Ensure that a stream will be binary safe. See _make_binary_on_windows.
:return: A binary version of the same stream (some streams cannot be
'fixed' but can be unwrapped).
"""
try:
fileno = stream.fileno()
except (_UnsupportedOperation, AttributeError):
pass
else:
_make_binary_on_windows(fileno)
return _unwrap_text(stream)
def _make_binary_on_windows(fileno):
"""Win32 mangles \r\n to \n and that breaks streams. See bug lp:505078."""
if sys.platform == "win32":
import msvcrt
msvcrt.setmode(fileno, os.O_BINARY)
def _unwrap_text(stream):
"""Unwrap stream if it is a text stream to get the original buffer."""
exceptions = (_UnsupportedOperation, IOError)
if sys.version_info > (3, 0):
unicode_type = str
else:
unicode_type = unicode
exceptions += (ValueError,)
try:
# Read streams
if type(stream.read(0)) is unicode_type:
return stream.buffer
except exceptions:
# Cannot read from the stream: try via writes
try:
stream.write(_b(''))
except TypeError:
return stream.buffer
return stream
|
# -*- coding: utf-8 -*-
"""
(c) 2018 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <pingou@pingoured.fr>
"""
from __future__ import unicode_literals, absolute_import
import datetime
import os
import shutil
import sys
import tempfile
import time
import unittest
import pygit2
import six
from mock import ANY, patch, MagicMock, call
sys.path.insert(
0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
)
import pagure.lib.tasks_services
import pagure.lib.query
import tests
import pagure.lib.tasks_services
class PagureLibTaskServicestests(tests.Modeltests):
""" Tests for pagure.lib.task_services """
maxDiff = None
def setUp(self):
""" Set up the environnment, ran before every tests. """
super(PagureLibTaskServicestests, self).setUp()
tests.create_projects(self.session)
# Create a fork of test for foo
item = pagure.lib.model.Project(
user_id=2, # foo
name="test",
is_fork=True,
parent_id=1,
description="test project #1",
hook_token="aaabbbccc_foo",
)
item.close_status = [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
]
self.session.add(item)
self.session.commit()
def test_webhook_notification_invalid_project(self):
""" Test the webhook_notification method. """
self.assertRaises(
RuntimeError,
pagure.lib.tasks_services.webhook_notification,
topic="topic",
msg={"payload": ["a", "b", "c"]},
namespace=None,
name="invalid",
user=None,
)
@patch("pagure.lib.tasks_services.call_web_hooks")
def test_webhook_notification_no_webhook(self, call_wh):
""" Test the webhook_notification method. """
output = pagure.lib.tasks_services.webhook_notification(
topic="topic",
msg={"payload": ["a", "b", "c"]},
namespace=None,
name="test",
user=None,
)
self.assertIsNone(output)
call_wh.assert_not_called()
@patch("pagure.lib.git.log_commits_to_db")
def test_log_commit_send_notifications_invalid_project(self, log):
""" Test the log_commit_send_notifications method. """
output = pagure.lib.tasks_services.log_commit_send_notifications(
name="invalid",
commits=[],
abspath=None,
branch=None,
default_branch=None,
namespace=None,
username=None,
)
self.assertIsNone(output)
log.assert_not_called()
@patch("pagure.lib.notify.notify_new_commits")
@patch("pagure.lib.git.log_commits_to_db")
def test_log_commit_send_notifications_valid_project(self, log, notif):
""" Test the log_commit_send_notifications method. """
output = pagure.lib.tasks_services.log_commit_send_notifications(
name="test",
commits=["hash1", "hash2"],
abspath="/path/to/git",
branch="master",
default_branch="master",
namespace=None,
username=None,
)
self.assertIsNone(output)
log.assert_called_once_with(
ANY, ANY, ["hash1", "hash2"], "/path/to/git"
)
notif.assert_called_once_with(
"/path/to/git", ANY, "master", ["hash1", "hash2"]
)
@patch("pagure.lib.tasks_services.trigger_jenkins_build")
def test_trigger_ci_build_invalid_project(self, trigger_jenk):
""" Test the trigger_ci_build method. """
output = pagure.lib.tasks_services.trigger_ci_build(
project_name="invalid",
cause="PR#ID",
branch="feature",
branch_to="master",
ci_type="jenkins",
)
self.assertIsNone(output)
trigger_jenk.assert_not_called()
@patch("pagure.lib.tasks_services.trigger_jenkins_build")
def test_trigger_ci_build_not_configured_project(self, trigger_jenk):
""" Test the trigger_ci_build method. """
self.assertRaises(
pagure.exceptions.PagureException,
pagure.lib.tasks_services.trigger_ci_build,
project_name="test",
cause="PR#ID",
branch="feature",
branch_to="master",
ci_type="jenkins",
)
trigger_jenk.assert_not_called()
@patch("pagure.lib.tasks_services.trigger_jenkins_build")
def test_trigger_ci_build_not_configured_project_fork(self, trigger_jenk):
""" Test the trigger_ci_build method. """
self.assertRaises(
pagure.exceptions.PagureException,
pagure.lib.tasks_services.trigger_ci_build,
project_name="forks/foo/test",
cause="PR#ID",
branch="feature",
branch_to="master",
ci_type="jenkins",
)
trigger_jenk.assert_not_called()
@patch("pagure.lib.query._get_project")
def test_load_json_commits_to_db_invalid_data_type(self, get_project):
""" Test the load_json_commits_to_db method. """
output = pagure.lib.tasks_services.load_json_commits_to_db(
name="test",
commits=["hash1", "hash2"],
abspath="/path/to/git",
data_type="invalid",
agent="pingou",
namespace=None,
username=None,
)
self.assertIsNone(output)
get_project.assert_not_called()
@patch("pagure.lib.tasks_services.get_files_to_load")
def test_load_json_commits_to_db_invalid_project(self, get_files):
""" Test the load_json_commits_to_db method. """
output = pagure.lib.tasks_services.load_json_commits_to_db(
name="invalid",
commits=["hash1", "hash2"],
abspath="/path/to/git",
data_type="ticket",
agent="pingou",
namespace=None,
username=None,
)
self.assertIsNone(output)
get_files.assert_not_called()
@patch("pagure.lib.git.update_request_from_git")
@patch("pagure.lib.git.update_ticket_from_git")
def test_load_json_commits_to_db_invalid_path(self, up_issue, up_pr):
""" Test the load_json_commits_to_db method. """
output = pagure.lib.tasks_services.load_json_commits_to_db(
name="test",
commits=["hash1", "hash2"],
abspath=self.path,
data_type="ticket",
agent="pingou",
namespace=None,
username=None,
)
self.assertIsNone(output)
up_issue.assert_not_called()
up_pr.assert_not_called()
@patch("pagure.lib.git.update_request_from_git")
@patch("pagure.lib.git.update_ticket_from_git")
def test_load_json_commits_to_db_invalid_path_one_commit(
self, up_issue, up_pr
):
""" Test the load_json_commits_to_db method. """
output = pagure.lib.tasks_services.load_json_commits_to_db(
name="test",
commits=["hash1"],
abspath=self.path,
data_type="ticket",
agent="pingou",
namespace=None,
username=None,
)
self.assertIsNone(output)
up_issue.assert_not_called()
up_pr.assert_not_called()
@patch("pagure.lib.notify.send_email")
@patch("pagure.lib.git.update_request_from_git")
@patch("pagure.lib.git.update_ticket_from_git")
def test_load_json_commits_to_db_no_agent(self, up_issue, up_pr, send):
""" Test the load_json_commits_to_db method. """
output = pagure.lib.tasks_services.load_json_commits_to_db(
name="test",
commits=[],
abspath=None,
data_type="ticket",
agent=None,
namespace=None,
username=None,
)
self.assertIsNone(output)
up_issue.assert_not_called()
up_pr.assert_not_called()
send.assert_not_called()
@patch("pagure.lib.notify.send_email")
@patch("pagure.lib.git.update_request_from_git")
@patch("pagure.lib.git.update_ticket_from_git")
@patch("pagure.lib.git.read_git_lines")
def test_load_json_commits_to_db_no_agent(
self, git, up_issue, up_pr, send
):
""" Test the load_json_commits_to_db method. """
git.side_effect = [["file1"], ["file2"], ["files/image"], ["file1"]]
output = pagure.lib.tasks_services.load_json_commits_to_db(
name="test",
commits=["hash1", "hash2"],
abspath=self.path,
data_type="ticket",
agent=None,
namespace=None,
username=None,
)
self.assertIsNone(output)
up_issue.assert_not_called()
up_pr.assert_not_called()
send.assert_not_called()
@patch("json.loads")
@patch("pagure.lib.notify.send_email")
@patch("pagure.lib.git.update_request_from_git")
@patch("pagure.lib.git.update_ticket_from_git")
@patch("pagure.lib.git.read_git_lines")
def test_load_json_commits_to_db_tickets(
self, git, up_issue, up_pr, send, json_loads
):
""" Test the load_json_commits_to_db method. """
git.side_effect = [["file1"], ["file2"], ["files/image"], ["file1"]]
json_loads.return_value = "foobar"
output = pagure.lib.tasks_services.load_json_commits_to_db(
name="test",
commits=["hash1", "hash2"],
abspath=self.path,
data_type="ticket",
agent=None,
namespace=None,
username=None,
)
self.assertIsNone(output)
calls = [
call(
ANY,
agent=None,
issue_uid="file1",
json_data="foobar",
namespace=None,
reponame="test",
username=None,
),
call(
ANY,
agent=None,
issue_uid="file2",
json_data="foobar",
namespace=None,
reponame="test",
username=None,
),
]
self.assertEqual(calls, up_issue.mock_calls)
up_pr.assert_not_called()
send.assert_not_called()
@patch("json.loads")
@patch("pagure.lib.notify.send_email")
@patch("pagure.lib.git.update_request_from_git")
@patch("pagure.lib.git.update_ticket_from_git")
@patch("pagure.lib.git.read_git_lines")
def test_load_json_commits_to_db_prs(
self, git, up_issue, up_pr, send, json_loads
):
""" Test the load_json_commits_to_db method. """
git.side_effect = [["file1"], ["file2"], ["files/image"], ["file1"]]
json_loads.return_value = "foobar"
output = pagure.lib.tasks_services.load_json_commits_to_db(
name="test",
commits=["hash1", "hash2"],
abspath=self.path,
data_type="pull-request",
agent="pingou",
namespace=None,
username=None,
)
self.assertIsNone(output)
calls = [
call(
ANY,
json_data="foobar",
namespace=None,
reponame="test",
request_uid="file1",
username=None,
),
call(
ANY,
json_data="foobar",
namespace=None,
reponame="test",
request_uid="file2",
username=None,
),
]
up_issue.assert_not_called()
self.assertEqual(calls, up_pr.mock_calls)
calls = [
call(
"Good Morning\n\n"
"This is the log of loading all the files pushed in the git "
"repo into\n"
"the database. It should ignore files that are not JSON files,"
" this\nis fine.\n\n"
"Loading: file1 -- 1/2 ... ... Done\n"
"Loading: file2 -- 2/2 ... ... Done",
"Issue import report",
"bar@pingou.com",
)
]
self.assertEqual(calls, send.mock_calls)
@patch("json.loads")
@patch("pagure.lib.notify.send_email")
@patch("pagure.lib.git.update_request_from_git")
@patch("pagure.lib.git.update_ticket_from_git")
@patch("pagure.lib.git.read_git_lines")
def test_load_json_commits_to_db_prs_raises_error(
self, git, up_issue, up_pr, send, json_loads
):
""" Test the load_json_commits_to_db method. """
git.side_effect = [["file1"], ["file2"], ["files/image"], ["file1"]]
json_loads.return_value = "foobar"
up_pr.side_effect = Exception("foo error")
output = pagure.lib.tasks_services.load_json_commits_to_db(
name="test",
commits=["hash1", "hash2"],
abspath=self.path,
data_type="pull-request",
agent="pingou",
namespace=None,
username=None,
)
self.assertIsNone(output)
calls = [
call(
ANY,
json_data="foobar",
namespace=None,
reponame="test",
request_uid="file1",
username=None,
)
]
up_issue.assert_not_called()
self.assertEqual(calls, up_pr.mock_calls)
calls = [
call(
"Good Morning\n\n"
"This is the log of loading all the files pushed in the git "
"repo into\n"
"the database. It should ignore files that are not JSON files,"
" this\nis fine.\n\n"
"Loading: file1 -- 1/2 ... ... FAILED\n",
"Issue import report",
"bar@pingou.com",
)
]
self.assertEqual(calls, send.mock_calls)
class PagureLibTaskServicesWithWebHooktests(tests.Modeltests):
""" Tests for pagure.lib.task_services """
maxDiff = None
def setUp(self):
""" Set up the environnment, ran before every tests. """
super(PagureLibTaskServicesWithWebHooktests, self).setUp()
pagure.config.config["REQUESTS_FOLDER"] = None
self.sshkeydir = os.path.join(self.path, "sshkeys")
pagure.config.config["MIRROR_SSHKEYS_FOLDER"] = self.sshkeydir
tests.create_projects(self.session)
project = pagure.lib.query._get_project(self.session, "test")
settings = project.settings
settings["Web-hooks"] = "http://foo.com/api/flag\nhttp://bar.org/bar"
project.settings = settings
self.session.add(project)
self.session.commit()
@patch("pagure.lib.tasks_services.call_web_hooks")
def test_webhook_notification_no_webhook(self, call_wh):
""" Test the webhook_notification method. """
output = pagure.lib.tasks_services.webhook_notification(
topic="topic",
msg={"payload": ["a", "b", "c"]},
namespace=None,
name="test",
user=None,
)
self.assertIsNone(output)
project = pagure.lib.query._get_project(self.session, "test")
call_wh.assert_called_once_with(
ANY,
"topic",
{"payload": ["a", "b", "c"]},
["http://foo.com/api/flag", "http://bar.org/bar"],
)
@patch("time.time", MagicMock(return_value=2))
@patch("uuid.uuid4", MagicMock(return_value="not_so_random"))
@patch("datetime.datetime")
@patch("requests.post")
def test_webhook_notification_no_webhook(self, post, dt):
""" Test the webhook_notification method. """
post.return_value = False
utcnow = MagicMock()
utcnow.year = 2018
dt.utcnow.return_value = utcnow
output = pagure.lib.tasks_services.webhook_notification(
topic="topic",
msg={"payload": ["a", "b", "c"]},
namespace=None,
name="test",
user=None,
)
self.assertIsNone(output)
project = pagure.lib.query._get_project(self.session, "test")
self.assertEqual(post.call_count, 2)
calls = [
call(
"http://bar.org/bar",
data="{"
'"i": 1, '
'"msg": {'
'"pagure_instance": "http://localhost.localdomain/", '
'"payload": ["a", "b", "c"], '
'"project_fullname": "test"}, '
'"msg_id": "2018-not_so_random", '
'"timestamp": 2, '
'"topic": "topic"}',
headers={
"X-Pagure": "http://localhost.localdomain/",
"X-Pagure-project": "test",
"X-Pagure-Signature": "74b12f0b25bf7767014a0c0de9f3c10"
"191e943d8",
"X-Pagure-Signature-256": "f3d757796554466eac49a5282b2"
"4ee32a1ecfb65dedd6c6231fb207240a9fe58",
"X-Pagure-Topic": b"topic",
"Content-Type": "application/json",
},
timeout=60,
),
call(
"http://foo.com/api/flag",
data="{"
'"i": 1, '
'"msg": {'
'"pagure_instance": "http://localhost.localdomain/", '
'"payload": ["a", "b", "c"], '
'"project_fullname": "test"}, '
'"msg_id": "2018-not_so_random", '
'"timestamp": 2, '
'"topic": "topic"}',
headers={
"X-Pagure": "http://localhost.localdomain/",
"X-Pagure-project": "test",
"X-Pagure-Signature": "74b12f0b25bf7767014a0c0de9f3c10"
"191e943d8",
"X-Pagure-Signature-256": "f3d757796554466eac49a5282b2"
"4ee32a1ecfb65dedd6c6231fb207240a9fe58",
"X-Pagure-Topic": b"topic",
"Content-Type": "application/json",
},
timeout=60,
),
]
print(post.mock_calls)
self.assertEqual(calls, post.mock_calls)
class PagureLibTaskServicesJenkinsCItests(tests.Modeltests):
""" Tests for pagure.lib.task_services """
maxDiff = None
def setUp(self):
""" Set up the environnment, ran before every tests. """
super(PagureLibTaskServicesJenkinsCItests, self).setUp()
pagure.config.config["REQUESTS_FOLDER"] = None
self.sshkeydir = os.path.join(self.path, "sshkeys")
pagure.config.config["MIRROR_SSHKEYS_FOLDER"] = self.sshkeydir
tests.create_projects(self.session)
project = pagure.lib.query.get_authorized_project(self.session, "test")
# Install the plugin at the DB level
plugin = pagure.lib.plugins.get_plugin("Pagure CI")
dbobj = plugin.db_object()
dbobj.ci_url = "https://ci.server.org/"
dbobj.ci_job = "pagure"
dbobj.pagure_ci_token = "random_token"
dbobj.project_id = project.id
self.session.add(dbobj)
self.session.commit()
# Create a fork of test for foo
item = pagure.lib.model.Project(
user_id=2, # foo
name="test",
is_fork=True,
parent_id=1,
description="test project #1",
hook_token="aaabbbccc_foo",
)
item.close_status = [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
]
self.session.add(item)
self.session.commit()
@patch("pagure.lib.tasks_services.trigger_jenkins_build")
def test_trigger_ci_build_invalid_ci(self, trigger_jenk):
""" Test the trigger_ci_build method. """
output = pagure.lib.tasks_services.trigger_ci_build(
project_name="test",
cause="PR#ID",
branch="feature",
branch_to="master",
ci_type="travis",
)
self.assertIsNone(output)
trigger_jenk.assert_not_called()
@patch("pagure.lib.tasks_services.trigger_jenkins_build")
def test_trigger_ci_build_invalid_ci_fork(self, trigger_jenk):
""" Test the trigger_ci_build method. """
output = pagure.lib.tasks_services.trigger_ci_build(
project_name="forks/foo/test",
cause="PR#ID",
branch="feature",
branch_to="master",
ci_type="travis",
)
self.assertIsNone(output)
trigger_jenk.assert_not_called()
@patch("pagure.lib.tasks_services.trigger_jenkins_build")
def test_trigger_ci_build_valid_project(self, trigger_jenk):
""" Test the trigger_ci_build method. """
output = pagure.lib.tasks_services.trigger_ci_build(
project_name="test",
cause="PR#ID",
branch="feature",
branch_to="master",
ci_type="jenkins",
)
self.assertIsNone(output)
trigger_jenk.assert_called_once_with(
branch="feature",
cause="PR#ID",
ci_password=None,
ci_username=None,
job="pagure",
project_path="test.git",
token="random_token",
url="https://ci.server.org/",
branch_to="master",
)
@patch("pagure.lib.tasks_services.trigger_jenkins_build")
def test_trigger_ci_build_valid_project_fork(self, trigger_jenk):
""" Test the trigger_ci_build method. """
output = pagure.lib.tasks_services.trigger_ci_build(
project_name="forks/foo/test",
cause="PR#ID",
branch="feature",
branch_to="master",
ci_type="jenkins",
)
self.assertIsNone(output)
trigger_jenk.assert_called_once_with(
branch="feature",
cause="PR#ID",
ci_password=None,
ci_username=None,
job="pagure",
project_path="forks/foo/test.git",
token="random_token",
url="https://ci.server.org/",
branch_to="master",
)
class PagureLibTaskServicesJenkinsCIAuthtests(tests.Modeltests):
""" Tests for pagure.lib.task_services """
maxDiff = None
def setUp(self):
""" Set up the environnment, ran before every tests. """
super(PagureLibTaskServicesJenkinsCIAuthtests, self).setUp()
pagure.config.config["REQUESTS_FOLDER"] = None
self.sshkeydir = os.path.join(self.path, "sshkeys")
pagure.config.config["MIRROR_SSHKEYS_FOLDER"] = self.sshkeydir
tests.create_projects(self.session)
project = pagure.lib.query.get_authorized_project(self.session, "test")
# Install the plugin at the DB level
plugin = pagure.lib.plugins.get_plugin("Pagure CI")
dbobj = plugin.db_object()
dbobj.ci_url = "https://ci.server.org/"
dbobj.ci_job = "pagure"
dbobj.ci_username = "jenkins_username"
dbobj.ci_password = "jenkins_password"
dbobj.pagure_ci_token = "random_token"
dbobj.project_id = project.id
self.session.add(dbobj)
self.session.commit()
# Create a fork of test for foo
item = pagure.lib.model.Project(
user_id=2, # foo
name="test",
is_fork=True,
parent_id=1,
description="test project #1",
hook_token="aaabbbccc_foo",
)
item.close_status = [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
]
self.session.add(item)
self.session.commit()
@patch("pagure.lib.tasks_services.trigger_jenkins_build")
def test_trigger_ci_build_invalid_ci(self, trigger_jenk):
""" Test the trigger_ci_build method. """
output = pagure.lib.tasks_services.trigger_ci_build(
project_name="test",
cause="PR#ID",
branch="feature",
branch_to="master",
ci_type="travis",
)
self.assertIsNone(output)
trigger_jenk.assert_not_called()
@patch("pagure.lib.tasks_services.trigger_jenkins_build")
def test_trigger_ci_build_invalid_ci_fork(self, trigger_jenk):
""" Test the trigger_ci_build method. """
output = pagure.lib.tasks_services.trigger_ci_build(
project_name="forks/foo/test",
cause="PR#ID",
branch="feature",
branch_to="master",
ci_type="travis",
)
self.assertIsNone(output)
trigger_jenk.assert_not_called()
@patch("pagure.lib.tasks_services.trigger_jenkins_build")
def test_trigger_ci_build_valid_project(self, trigger_jenk):
""" Test the trigger_ci_build method. """
output = pagure.lib.tasks_services.trigger_ci_build(
project_name="test",
cause="PR#ID",
branch="feature",
branch_to="master",
ci_type="jenkins",
)
self.assertIsNone(output)
trigger_jenk.assert_called_once_with(
branch="feature",
cause="PR#ID",
ci_password="jenkins_password",
ci_username="jenkins_username",
job="pagure",
project_path="test.git",
token="random_token",
url="https://ci.server.org/",
branch_to="master",
)
@patch("pagure.lib.tasks_services.trigger_jenkins_build")
def test_trigger_ci_build_valid_project_fork(self, trigger_jenk):
""" Test the trigger_ci_build method. """
output = pagure.lib.tasks_services.trigger_ci_build(
project_name="forks/foo/test",
cause="PR#ID",
branch="feature",
branch_to="master",
ci_type="jenkins",
)
self.assertIsNone(output)
trigger_jenk.assert_called_once_with(
branch="feature",
cause="PR#ID",
ci_password="jenkins_password",
ci_username="jenkins_username",
job="pagure",
project_path="forks/foo/test.git",
token="random_token",
url="https://ci.server.org/",
branch_to="master",
)
class PagureLibTaskServicesLoadJsonTickettests(tests.Modeltests):
""" Tests for pagure.lib.task_services """
maxDiff = None
def setUp(self):
""" Set up the environnment, ran before every tests. """
super(PagureLibTaskServicesLoadJsonTickettests, self).setUp()
tests.create_projects(self.session)
self.gitrepo = os.path.join(self.path, "repos", "tickets", "test.git")
repopath = os.path.join(self.path, "repos", "tickets")
os.makedirs(self.gitrepo)
self.repo_obj = pygit2.init_repository(self.gitrepo, bare=True)
project = pagure.lib.query.get_authorized_project(self.session, "test")
# Create an issue to play with
msg = pagure.lib.query.new_issue(
session=self.session,
repo=project,
title="Test issue",
content="We should work on this",
user="pingou",
)
self.assertEqual(msg.title, "Test issue")
issue = pagure.lib.query.search_issues(
self.session, project, issueid=1
)
# Add a couple of comment on the ticket
msg = pagure.lib.query.add_issue_comment(
session=self.session,
issue=issue,
comment="Hey look a comment!",
user="foo",
)
self.session.commit()
self.assertEqual(msg, "Comment added")
commits = [
commit
for commit in self.repo_obj.walk(
self.repo_obj.head.target, pygit2.GIT_SORT_NONE
)
]
# 2 commits: creation - new comment
self.assertEqual(len(commits), 2)
issue = pagure.lib.query.search_issues(
self.session, project, issueid=1
)
self.assertEqual(len(issue.comments), 1)
@patch("pagure.lib.notify.send_email")
@patch("pagure.lib.git.update_request_from_git")
def test_loading_issue_json(self, up_pr, send):
""" Test loading the JSON file of a ticket. """
project = pagure.lib.query.get_authorized_project(self.session, "test")
issue = pagure.lib.query.search_issues(
self.session, project, issueid=1
)
commits = [
commit.oid.hex
for commit in self.repo_obj.walk(
self.repo_obj.head.target, pygit2.GIT_SORT_NONE
)
]
output = pagure.lib.tasks_services.load_json_commits_to_db(
name="test",
commits=commits,
abspath=self.gitrepo,
data_type="ticket",
agent="pingou",
namespace=None,
username=None,
)
self.assertIsNone(output)
up_pr.assert_not_called()
calls = [
call(
"Good Morning\n\n"
"This is the log of loading all the files pushed in the git "
"repo into\n"
"the database. It should ignore files that are not JSON files,"
" this\nis fine.\n\n"
"Loading: %s -- 1/1 ... ... Done" % issue.uid,
"Issue import report",
"bar@pingou.com",
)
]
self.assertEqual(calls, send.mock_calls)
project = pagure.lib.query.get_authorized_project(self.session, "test")
issue = pagure.lib.query.search_issues(
self.session, project, issueid=1
)
self.assertEqual(len(issue.comments), 1)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
def reverse(num):
return int(str(num)[::-1])
def main():
t = int(input())
for i in range(t):
a, b = raw_input().split()
a = int(a)
b = int(b)
print int(str((int(str(a)[::-1]) + int(str(b)[::-1])))[::-1])
main()
|
"""
Logger
This is a wrapper that handles logs in the system
"""
import os
import sys
from loguru import logger
# configurations for log handling
# info log configurations
logger.add(
sink=sys.stdout,
backtrace=True if os.environ.get("ENV", "development") == "development" else False,
colorize=True,
format="<green>{time}</green> <level>{message}</level>",
enqueue=True,
level="INFO",
)
# error logs
logger.add(
sink=sys.stderr,
backtrace=True if os.environ.get("ENV", "development") == "development" else False,
colorize=True,
format="<green>{time}</green> <level>{message}</level>",
enqueue=True,
level="ERROR",
)
# debug logs
logger.add(
sink=sys.stdout,
backtrace=True if os.environ.get("ENV", "development") == "development" else False,
colorize=True,
format="<green>{time}</green> <level>{message}</level>",
enqueue=True,
level="DEBUG",
)
# warning logs
logger.add(
sink=sys.stdout,
backtrace=True if os.environ.get("ENV", "development") == "development" else False,
colorize=True,
format="<green>{time}</green> <level>{message}</level>",
enqueue=True,
level="WARNING",
)
# critical logs
logger.add(
sink=sys.stderr,
backtrace=True if os.environ.get("ENV", "development") == "development" else False,
colorize=True,
format="<green>{time}</green> <level>{message}</level>",
enqueue=True,
level="CRITICAL",
)
# trace logs
logger.add(
sink=sys.stderr,
backtrace=True if os.environ.get("ENV", "development") == "development" else False,
colorize=True,
format="<green>{time}</green> <level>{message}</level>",
enqueue=True,
level="TRACE",
)
|
from jinja2 import Environment, FileSystemLoader
ENV = Environment(loader=FileSystemLoader('.'))
template = ENV.get_template("router_template.j2")
parameters = { "hostname": "MELIH1985", "loopback10": "10.10.10.10", "loopback11": "11.11.11.11", "loopback0": "9.9.9.9", "vlan8":"8.8.8.8" }
print(template.render(router=[parameters]))
|
def simple_bubble_sort(l):
for j in range(1, len(l))[::-1]:
for i in range(j):
if l[i] > l[i + 1]:
l[i], l[i + 1] = l[i + 1], l[i]
def bubble_sort(l):
j = len(l) - 1
while j:
k = 0
for i in range(j):
if l[i] > l[i + 1]:
l[i], l[i + 1] = l[i + 1], l[i]
k = i
j = k
def _merge_sort(l, left, right):
if right - left > 1:
middle = (left + right + 1) // 2
_merge_sort(l, left, middle)
_merge_sort(l, middle, right)
r = l[middle:right]
while r:
right -= 1
if l[middle - 1] > r[-1]:
middle -= 1
l[right] = l[middle]
if middle == left:
l[left:right] = r
break
else:
l[right] = r.pop()
def merge_sort(l):
_merge_sort(l, 0, len(l))
def simple_insertion_sort(l):
for j in range(1, len(l)):
for i in range(j)[::-1]:
if l[i] > l[i + 1]:
l[i], l[i + 1] = l[i + 1], l[i]
def insertion_sort(l):
for i in range(1, len(l)):
x = l[i]
while i and l[i - 1] > x:
l[i] = l[i - 1]
i -= 1
l[i] = x
def shell_sort(l):
j = 2 * (len(l) // 4) + 1
while True:
for i in range(j, len(l)):
x = l[i]
while i >= j and l[i - j] > x:
l[i] = l[i - j]
i -= j
l[i] = x
if j == 1:
break
j = 2 * ((j - 1) // 4) + 1
def selection_sort(l):
for j in range(1, len(l))[::-1]:
k = j
for i in range(j):
if l[i] > l[k]:
k = i
l[j], l[k] = l[k], l[j]
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(description="Sort lists")
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-bs",
"--simple-bubble",
action="store_const",
const=simple_bubble_sort,
help="Use simple bubble sort algorithm",
dest="algorithm",
)
group.add_argument(
"-b",
"--bubble",
action="store_const",
const=bubble_sort,
help="Use bubble sort algorithm",
dest="algorithm",
)
group.add_argument(
"-m",
"--merge",
action="store_const",
const=merge_sort,
help="Use merge sort algorithm",
dest="algorithm",
)
group.add_argument(
"-is",
"--simple-insertion",
action="store_const",
const=simple_insertion_sort,
help="Use simple insertion sort algorithm",
dest="algorithm",
)
group.add_argument(
"-i",
"--insertion",
action="store_const",
const=insertion_sort,
help="Use insertion sort algorithm",
dest="algorithm",
)
group.add_argument(
"-sh",
"--shell",
action="store_const",
const=shell_sort,
help="Use shell sort algorithm",
dest="algorithm",
)
group.add_argument(
"-se",
"--selection",
action="store_const",
const=selection_sort,
help="Use selection sort algorithm",
dest="algorithm",
)
parser.set_defaults(algorithm=merge_sort)
parser.add_argument("list", nargs="+", type=int, help="The list of numbers to sort")
args = parser.parse_args()
args.algorithm(args.list)
print(" ".join(map(str, args.list)))
|
# integrations tests
# for unit tests, go to the `tests` folder of each submodules
import time
import pytest
import grpc
import numpy as np
from serving_utils import Client, PredictInput
@pytest.mark.integration
@pytest.mark.asyncio
async def test_client():
test_serving_ports = [
8501,
8502,
8503,
8504,
8505,
8506,
8507,
]
while True:
for serving_port in test_serving_ports:
time.sleep(1)
try:
client = Client(
host="localhost",
port=serving_port,
)
client.predict(None, output_names='wrong_model', model_signature_name='test')
break
except Exception:
continue
else:
break
clients = []
for serving_port in test_serving_ports:
clients.append(Client(
host="localhost",
port=serving_port,
))
# fake model is generated from `train_for_test.py`
model_name = 'test_model'
# test client list_models
for client in clients:
with pytest.raises(grpc.RpcError) as e:
client.list_models()
assert e.code() == grpc.StatusCode.UNIMPLEMENTED
# test client predict correct result
req_data = [
PredictInput(name='a', value=np.int16(2)),
PredictInput(name='b', value=np.int16(3)),
]
output_names = ['c']
expected_output = {'c': 8} # c = a + 2 * b
for client in clients:
actual_output = client.predict(
data=req_data,
output_names=output_names,
model_name=model_name,
model_signature_name='test',
)
assert actual_output == expected_output
actual_output = await client.async_predict(
data=req_data,
output_names=output_names,
model_name=model_name,
model_signature_name='test',
)
assert actual_output == expected_output
# test client predict with simpler format
req_data = {
'a': np.int16(2),
'b': np.int16(3),
}
output_names = ['c']
expected_output = {'c': 8} # c = a + 2 * b
for client in clients:
actual_output = client.predict(
data=req_data,
output_names=output_names,
model_name=model_name,
model_signature_name='test',
)
assert actual_output == expected_output
actual_output = await client.async_predict(
data=req_data,
output_names=output_names,
model_name=model_name,
model_signature_name='test',
)
assert actual_output == expected_output
|
# 특정 원소가 속한 집합을 찾기
def find_parent(parent, x):
# 루트 노드가 아니라면, 루트 노드를 찾을 때까지 재귀적으로 호출
if parent[x] != x:
parent[x] = find_parent(parent, parent[x])
return parent[x]
# 두 원소가 속한 집합을 합치기
def union_parent(parent, a, b):
a = find_parent(parent, a)
b = find_parent(parent, b)
if a < b:
parent[b] = a
else:
parent[a] = b
# 탑승구의 개수 입력받기
g = int(input())
# 비행기의 개수 입력받기
p = int(input())
parent = [0] * (g + 1) # 부모 테이블 초기화
# 부모 테이블상에서, 부모를 자기 자신으로 초기화
for i in range(1, g + 1):
parent[i] = i
result = 0
for _ in range(p):
data = find_parent(parent, int(input())) # 현재 비행기의 탑승구의 루트 확인
if data == 0: # 현재 루트가 0이 아니라면, 종료
break
union_parent(parent, data, data - 1) # 그렇지 않다면 바로 왼쪽의 집합과 합치기
result += 1
print(result)
|
import math
from copy import deepcopy
import networkx as nx
import numpy as np
import xxhash
import warnings
budget_eps = 1e-5
class S2VGraph(object):
def __init__(self, g):
self.num_nodes = g.number_of_nodes()
self.node_labels = np.arange(self.num_nodes)
self.all_nodes_set = set(self.node_labels)
x, y = zip(*g.edges())
self.num_edges = len(x)
self.edge_pairs = np.ndarray(shape=(self.num_edges, 2), dtype=np.int32)
self.edge_pairs[:, 0] = x
self.edge_pairs[:, 1] = y
self.edge_pairs = np.ravel(self.edge_pairs)
self.node_degrees = np.array([deg for (node, deg) in sorted(g.degree(), key=lambda deg_pair: deg_pair[0])])
self.first_node = None
self.dynamic_edges = None
def add_edge(self, first_node, second_node):
nx_graph = self.to_networkx()
nx_graph.add_edge(first_node, second_node)
s2v_graph = S2VGraph(nx_graph)
return s2v_graph, 1
def add_edge_dynamically(self, first_node, second_node):
self.dynamic_edges.append((first_node, second_node))
self.node_degrees[first_node] += 1
self.node_degrees[second_node] += 1
return 1
def populate_banned_actions(self, budget=None):
if budget is not None:
if budget < budget_eps:
self.banned_actions = self.all_nodes_set
return
if self.first_node is None:
self.banned_actions = self.get_invalid_first_nodes(budget)
else:
self.banned_actions = self.get_invalid_edge_ends(self.first_node, budget)
def get_invalid_first_nodes(self, budget=None):
return set([node_id for node_id in self.node_labels if self.node_degrees[node_id] == (self.num_nodes - 1)])
def get_invalid_edge_ends(self, query_node, budget=None):
results = set()
results.add(query_node)
existing_edges = self.edge_pairs.reshape(-1, 2)
existing_left = existing_edges[existing_edges[:,0] == query_node]
results.update(np.ravel(existing_left[:,1]))
existing_right = existing_edges[existing_edges[:,1] == query_node]
results.update(np.ravel(existing_right[:,0]))
if self.dynamic_edges is not None:
dynamic_left = [entry[0] for entry in self.dynamic_edges if entry[0] == query_node]
results.update(dynamic_left)
dynamic_right = [entry[1] for entry in self.dynamic_edges if entry[1] == query_node]
results.update(dynamic_right)
return results
def init_dynamic_edges(self):
self.dynamic_edges = []
def apply_dynamic_edges(self):
nx_graph = self.to_networkx()
for edge in self.dynamic_edges:
nx_graph.add_edge(edge[0], edge[1])
return S2VGraph(nx_graph)
def to_networkx(self):
edges = self.convert_edges()
g = nx.Graph()
g.add_edges_from(edges)
return g
def convert_edges(self):
return np.reshape(self.edge_pairs, (self.num_edges, 2))
def display(self, ax=None):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
nx_graph = self.to_networkx()
nx.draw_shell(nx_graph, with_labels=True, ax=ax)
def display_with_positions(self, node_positions, ax=None):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
nx_graph = self.to_networkx()
nx.draw(nx_graph, pos=node_positions, with_labels=True, ax=ax)
def draw_to_file(self, filename):
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
fig_size_length = self.num_nodes / 5
figsize = (fig_size_length, fig_size_length)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
self.display(ax=ax)
fig.savefig(filename)
plt.close()
def get_adjacency_matrix(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
nx_graph = self.to_networkx()
adj_matrix = np.asarray(nx.convert_matrix.to_numpy_matrix(nx_graph, nodelist=self.node_labels))
return adj_matrix
def copy(self):
return deepcopy(self)
def __repr__(self):
gh = get_graph_hash(self, size=32, include_first=True)
return f"Graph State with hash {gh}"
def get_graph_hash(g, size=32, include_first=False):
if size == 32:
hash_instance = xxhash.xxh32()
elif size == 64:
hash_instance = xxhash.xxh64()
else:
raise ValueError("only 32 or 64-bit hashes supported.")
if include_first:
if g.first_node is not None:
hash_instance.update(np.array([g.first_node]))
else:
hash_instance.update(np.zeros(g.num_nodes))
hash_instance.update(g.edge_pairs)
graph_hash = hash_instance.intdigest()
return graph_hash
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Documentation TBD"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import os
from .common import container_type
class copy(object):
"""Documentation TBD"""
def __init__(self, **kwargs):
"""Documentation TBD"""
#super(copy, self).__init__()
self.dest = kwargs.get('dest', '')
self.src = kwargs.get('src', '')
self.__from = kwargs.get('_from', '') # Docker specific
def toString(self, ctype):
"""Documentation TBD"""
if self.dest and self.src:
if ctype == container_type.DOCKER:
# Format:
# COPY src1 \
# src2 \
# src3 \
# dest/
# COPY src dest
copy = ['COPY ']
if self.__from:
copy[0] = copy[0] + '--from={} '.format(self.__from)
if isinstance(self.src, list):
copy[0] = copy[0] + self.src[0]
copy.extend([' {}'.format(x) for x in self.src[1:]])
# Docker requires a trailing slash. Add one if missing.
copy.append(' {}'.format(os.path.join(self.dest, '')))
else:
copy[0] = copy[0] + '{0} {1}'.format(self.src, self.dest)
return ' \\\n'.join(copy)
if ctype == container_type.SINGULARITY:
# Format:
# %files
# src1 dest
# src2 dest
# src3 dest
if self.__from:
logging.warning('The Docker specific "COPY --from" syntax was requested. Singularity does not have an equivalent, so this is probably not going to do what you want.')
# Note: if the source is a file and the destination
# path does not already exist in the container, this
# will likely error. Probably need a '%setup' step to
# first create the directory.
if isinstance(self.src, list):
return '%files\n' + '\n'.join(
[' {0} {1}'.format(x, self.dest) for x in self.src])
else:
return '%files\n {0} {1}'.format(self.src, self.dest)
else:
logging.error('Unknown container type')
return ''
else:
return ''
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Abstract: Operations on earthquake data as examples for how to use the core concept 'event'
Use Cases:
- get all locations of earthquakes with a magnitude of 4 or higher
- get all earthquakes which origin is 100 m or deeper
- get all earthquakes from 12/01/2014 00:00:00 - 12/6/2014 23:59:59
- get all earthquakes in Alaska
Provided data:
CSV file with all global earthquake events for December 2014.
The fields for each earthquake are:
time,latitude,longitude,depth,mag,magType,nst,gap,dmin,rms,net,id,updated,place,type
"""
__author__ = "Marc Tim Thiemann"
__copyright__ = "Copyright 2014"
__credits__ = ["Marc Tim Thiemann"]
__license__ = ""
__version__ = "0.1"
__maintainer__ = ""
__email__ = ""
__date__ = "January 2015"
__status__ = "Development"
import sys
sys.path = [ '.', '../..' ] + sys.path
from utils import _init_log
from events import *
import dateutil.parser
from datetime import *
import csv
log = _init_log("example-1")
f = open('../data/events/earthquake_data.csv')
csv_f = csv.reader(f)
events = []
for row in csv_f:
properties = { 'latitude': row[1],
'longitude': row[2],
'depth': row[3],
'mag': row[4],
'magType': row[5],
'nst': row[6],
'gap': row[7],
'dmin': row[8],
'rms': row[9],
'net': row[10],
'id': row[11],
'updated': row[12],
'place': row[13],
'type': row[14]}
dt = dateutil.parser.parse(row[0], fuzzy = True, ignoretz = True)
events.append(PyEvent((dt, dt), properties))
print 'Get all locations of earthquakes with a magnitude of 4 or higher during December 2014'
locations = []
for e in events:
if(e.get('mag') >= 4):
locations.append((e.get('latitude'), e.get('longitude')))
print 'Get all earthquakes from 12/01/2014 00:00:00 - 12/6/2014 23:59:59'
earthquakesFirstSevenDays = []
for e in events:
if(e.during((datetime(2014, 12, 01, 0, 0, 0), datetime(2014, 12, 6, 23, 59, 59)))):
earthquakesFirstSevenDays.append(e)
print 'Get all earthquakes in Alaska during December 2014'
earthquakesInAlaska = []
for e in events:
if "Alaska" in e.get('place'):
earthquakesInAlaska.append(e)
print 'Get all earthquakes which origin is 100 m or deeper'
deepEarthQuakes = []
for e in events:
depth = 0.0
try:
depth = float(e.get('depth'))
except:
print 'Not a Number!'
if(depth >= 100):
deepEarthQuakes.append(e)
|
from ..api import get_many, ApiVersion, int_field, bool_field, date_field
from datetime import date
from typing import Iterator, List, Optional
class OckovaciZarizeni:
""" Očkovací zařízení
Datová sada poskytuje seznam očkovacích zařízení v ČR jako doplnění seznamu očkovacích míst, kde
jsou podávány očkovací látky proti onemocnění COVID-19. Jedná se především o praktické lékaře,
ale i další, kde se očkování provádí.
Attributes
----------
zarizeni_kod: str
Kód zdravotnického zařízení, ve kterém je prováděno očkování (podle číselníku poskytovatelů
zdravotních služeb na úrovni jednotlivých zařízení).
zarizeni_nazev: str
Název zdravotnického zařízení, ve kterém je prováděno očkování.
provoz_zahajen: bool
Příznak, zda bylo zahájeno očkování v daném zdravotnickém zařízení.
kraj_nuts_kod: str
Identifikátor kraje podle klasifikace NUTS 3, ve kterém se nachází očkovací místo.
kraj_nazev: str
Název kraje, ve kterém se nachází očkovací místo.
okres_lau_kod: str
Identifikátor okresu podle klasifikace LAU 1, ve kterém se očkovací zařízení nachází.
okres_nazev: str
Název okresu, ve kterém se očkovací zařízení nachází.
zrizovatel_kod: int
Kód zřizovatele zdravotnického zařízení podle číselníku ÚZIS.
zrizovatel_nazev: str
Název zřizovatele zdravotnického zařízení.
provoz_ukoncen: date
Datum, kdy bylo ukončeno očkování v daném zdravotnickém zařízení.
prakticky_lekar: bool
Příznak, zda se jedná o praktického lékaře.
"""
def __init__(self, line: List[str]) :
self.zarizeni_kod: str = line[0]
self.zarizeni_nazev: str = line[1]
self.provoz_zahajen: bool = bool_field(line[2])
self.kraj_nuts_kod: str = line[3]
self.kraj_nazev: str = line[4]
self.okres_lau_kod: str = line[5]
self.okres_nazev: str = line[6]
self.zrizovatel_kod: int = int_field(line[7])
self.zrizovatel_nazev: str = line[8]
self.provoz_ukoncen: Optional[date] = date_field(line[9])
self.prakticky_lekar: bool = bool_field(line[10])
@staticmethod
def get(cache_dir: Optional[str]) -> Iterator['OckovaciZarizeni'] :
return get_many('ockovaci-zarizeni', OckovaciZarizeni, ApiVersion.V2, cache_dir)
|
"""
Copyright (c) 2015-2018 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import pytest
from flexmock import flexmock
import datetime
import random
import sys
import json
from osbs.build.user_params import BuildUserParams
from osbs.exceptions import OsbsValidationException
from osbs.constants import BUILD_TYPE_WORKER, REACTOR_CONFIG_ARRANGEMENT_VERSION
from tests.constants import (TEST_COMPONENT, TEST_FILESYSTEM_KOJI_TASK_ID,
TEST_GIT_BRANCH, TEST_GIT_REF, TEST_GIT_URI,
TEST_IMAGESTREAM, TEST_KOJI_TASK_ID, TEST_USER)
class TestBuildUserParams(object):
def get_minimal_kwargs(self):
return {
# Params needed to avoid exceptions.
'user': TEST_USER,
'base_image': 'base_image',
'name_label': 'name_label',
'git_uri': TEST_GIT_URI,
'build_from': 'image:buildroot:latest',
}
def test_v2_spec_name2(self):
kwargs = self.get_minimal_kwargs()
kwargs.update({
'git_uri': TEST_GIT_URI,
'git_branch': TEST_GIT_BRANCH,
})
spec = BuildUserParams()
spec.set_params(**kwargs)
assert spec.name.value.startswith('path-master')
@pytest.mark.parametrize('rand,timestr', [
('12345', '20170501123456'),
('67890', '20170731111111'),
])
@pytest.mark.parametrize(('platform'), (
('x86_64'),
(None),
))
def test_v2_image_tag(self, rand, timestr, platform):
kwargs = self.get_minimal_kwargs()
kwargs.update({
'component': 'foo',
'koji_target': 'tothepoint',
})
if platform:
kwargs['platform'] = platform
(flexmock(sys.modules['osbs.build.spec'])
.should_receive('utcnow').once()
.and_return(datetime.datetime.strptime(timestr, '%Y%m%d%H%M%S')))
(flexmock(random)
.should_receive('randrange').once()
.with_args(10**(len(rand) - 1), 10**len(rand))
.and_return(int(rand)))
spec = BuildUserParams()
spec.set_params(**kwargs)
img_tag = '{user}/{component}:{koji_target}-{random_number}-{time_string}'
if platform:
img_tag += '-{platform}'
img_tag = img_tag.format(random_number=rand, time_string=timestr, **kwargs)
assert spec.image_tag.value == img_tag
def test_user_params_bad_json(self):
required_json = json.dumps({
'arrangement_version': 6,
'customize_conf': 'prod_customize.json',
'git_ref': 'master'
}, sort_keys=True)
spec = BuildUserParams()
spec.from_json(None)
assert spec.to_json() == required_json
spec.from_json("")
assert spec.to_json() == required_json
assert '{0}'.format(spec)
@pytest.mark.parametrize(('missing_arg'), (
'name_label',
'base_image',
))
def test_user_params_bad_none_flatpak(self, missing_arg):
kwargs = self.get_minimal_kwargs()
kwargs['flatpak'] = False
kwargs.pop(missing_arg)
spec = BuildUserParams()
with pytest.raises(OsbsValidationException):
spec.set_params(**kwargs)
def test_user_params_bad_compose_ids(self):
kwargs = self.get_minimal_kwargs()
kwargs['compose_ids'] = True
spec = BuildUserParams()
with pytest.raises(OsbsValidationException):
spec.set_params(**kwargs)
@pytest.mark.parametrize(('signing_intent', 'compose_ids', 'yum_repourls', 'exc'), (
('release', [1, 2], ['http://example.com/my.repo'], OsbsValidationException),
('release', [1, 2], None, OsbsValidationException),
(None, [1, 2], ['http://example.com/my.repo'], OsbsValidationException),
('release', None, ['http://example.com/my.repo'], None),
('release', None, None, None),
(None, [1, 2], None, None),
(None, None, ['http://example.com/my.repo'], None),
(None, None, None, None),
))
def test_v2_compose_ids_and_signing_intent(self, signing_intent, compose_ids, yum_repourls,
exc):
kwargs = self.get_minimal_kwargs()
if signing_intent:
kwargs['signing_intent'] = signing_intent
if compose_ids:
kwargs['compose_ids'] = compose_ids
if yum_repourls:
kwargs['yum_repourls'] = yum_repourls
kwargs.update({
'git_uri': 'https://github.com/user/reponame.git',
'git_branch': 'master',
})
spec = BuildUserParams()
if exc:
with pytest.raises(exc):
spec.set_params(**kwargs)
else:
spec.set_params(**kwargs)
if yum_repourls:
assert spec.yum_repourls.value == yum_repourls
if signing_intent:
assert spec.signing_intent.value == signing_intent
if compose_ids:
assert spec.compose_ids.value == compose_ids
def test_v2_all_values_and_json(self):
# all values that BuildUserParams stores
param_kwargs = {
# 'arrangement_version': self.arrangement_version, # calculated value
'base_image': 'buildroot:old',
# 'build_from': 'buildroot:old', # only one of build_*
# 'build_json_dir': self.build_json_dir, # init paramater
'build_image': 'buildroot:latest',
# 'build_imagestream': 'buildroot:name_label',
'build_type': BUILD_TYPE_WORKER,
'component': TEST_COMPONENT,
'compose_ids': [1, 2],
'filesystem_koji_task_id': TEST_FILESYSTEM_KOJI_TASK_ID,
'flatpak': False,
# 'flatpak_base_image': self.flatpak_base_image, # not used with false flatpack
'git_branch': TEST_GIT_BRANCH,
'git_ref': TEST_GIT_REF,
'git_uri': TEST_GIT_URI,
'image_tag': 'user/None:none-0-0',
'imagestream_name': TEST_IMAGESTREAM,
'isolated': False,
'koji_parent_build': 'fedora-26-9',
'koji_target': 'tothepoint',
# 'name': self.name, # calculated value
'platform': 'x86_64',
'platforms': ['x86_64', ],
'reactor_config_map': 'reactor-config-map',
'reactor_config_override': 'reactor-config-override',
'release': '29',
'scratch': False,
'signing_intent': False,
'task_id': TEST_KOJI_TASK_ID,
'trigger_imagestreamtag': 'base_image:latest',
'user': TEST_USER,
# 'yum_repourls': , # not used with compose_ids
}
# additional values that BuildUserParams requires but stores under different names
param_kwargs.update({
'name_label': 'name_label',
})
rand = '12345'
timestr = '20170731111111'
(flexmock(sys.modules['osbs.build.spec'])
.should_receive('utcnow').once()
.and_return(datetime.datetime.strptime(timestr, '%Y%m%d%H%M%S')))
(flexmock(random)
.should_receive('randrange').once()
.with_args(10**(len(rand) - 1), 10**len(rand))
.and_return(int(rand)))
build_json_dir = 'inputs'
spec = BuildUserParams(build_json_dir)
spec.set_params(**param_kwargs)
expected_json = {
"arrangement_version": REACTOR_CONFIG_ARRANGEMENT_VERSION,
"base_image": "buildroot:old",
"build_image": "buildroot:latest",
"build_json_dir": build_json_dir,
"build_type": "worker",
"component": TEST_COMPONENT,
"compose_ids": [1, 2],
"customize_conf": "prod_customize.json",
"filesystem_koji_task_id": TEST_FILESYSTEM_KOJI_TASK_ID,
"git_branch": TEST_GIT_BRANCH,
"git_ref": TEST_GIT_REF,
"git_uri": TEST_GIT_URI,
"image_tag": "{0}/{1}:tothepoint-{2}-{3}-x86_64".format(TEST_USER, TEST_COMPONENT,
rand, timestr),
"imagestream_name": "name_label",
"koji_parent_build": "fedora-26-9",
"koji_target": "tothepoint",
"name": "path-master-cd1e4",
"platform": "x86_64",
"platforms": ["x86_64"],
"reactor_config_map": "reactor-config-map",
"reactor_config_override": "reactor-config-override",
"release": "29",
"trigger_imagestreamtag": "buildroot:old",
"user": TEST_USER
}
assert spec.to_json() == json.dumps(expected_json, sort_keys=True)
spec2 = BuildUserParams()
spec2.from_json(spec.to_json())
assert spec2.to_json() == json.dumps(expected_json, sort_keys=True)
|
import base64
from aqt.webview import AnkiWebView
def img_div(content, height):
b64_img = str(base64.b64encode(content), 'utf-8')
style = f"max-height:{height};width:auto"
return f'<img id="graph" style="{style}" src="data:image/png;base64,{b64_img}"></img>'
def error_div(detail: str) -> str:
return f"""
<div style="color: #ba3939; background: #ffe0e0; border: 1px solid #a33a3a; padding: 10px; margin: 10px;">
<b>Error</b>: {detail}
</div>
"""
def display_html(div_content: str, web: AnkiWebView, close_button: bool = True):
"""
Display the given HTML content at the beginning of the card's view, with a button to close.
"""
div = generate_addon_div(div_content, close_button=close_button)
inject_addon_div(div, web)
def generate_addon_div(div_content: str, close_button: bool = True):
close = f"""
<button
type="button" style="float: right" class="close"
onclick="$(this).parent().remove();">
×
</button>"""
div = f"""
<div id="onsei" style="overflow: hidden">
{close if close_button else ""}
{div_content}
</div>
""".replace("\n", "") # Need to remove newlines for the replace below to work
return div
def inject_addon_div(div: str, web: AnkiWebView) -> None:
script = f"""
if ($("#onsei").length > 0)
$('#onsei').replaceWith('{div}');
else
$('body').prepend('{div}');
"""
web.eval(script)
def remove_addon_div(web: AnkiWebView) -> None:
script = f"""
if ($("#onsei").length > 0) $("#onsei").remove();
"""
web.eval(script)
|
from setuptools import find_packages, setup
setup(
name='blns',
version='0.1.7',
url='https://github.com/danggrianto/big-list-of-naughty-strings',
license='MIT',
author='Daniel Anggrianto',
author_email='daniel@anggrianto.com',
description='Big List of Naughty String. Forked from https://github.com/minimaxir/big-list-of-naughty-strings',
keywords='Big List of Naughty String',
packages=['blns'],
platforms='any',
)
|
from django import template
register = template.Library()
@register.filter(name='ordinal')
def ordinal(value):
""" Cardinal to ordinal conversion for the edition field """
try:
digit = int(value)
except:
return value.split(' ')[0]
if digit < 1:
return digit
if digit % 100 == 11 or digit % 100 == 12 or digit % 100 == 13:
return value + 'th'
elif digit % 10 == 3:
return value + 'rd'
elif digit % 10 == 2:
return value + 'nd'
elif digit % 10 == 1:
return value + 'st'
else:
return value + 'th'
@register.filter(name='author_join')
def author_join(value):
d=u', '
last=u', and '
two=u' and'
if len(value) == 0:
return ''
elif len(value) == 1:
return value[0]
elif len(value) == 2:
return value[0] + two + value[1]
elif len(value) == 3:
return d.join(value[:-1]) + last + value[-1]
else:
return d.join(value[:3]) + ' <em>et al.</em>'
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MicrosoftgraphplannerBucket(Model):
"""MicrosoftgraphplannerBucket.
:param id:
:type id: str
:param name:
:type name: str
:param plan_id:
:type plan_id: str
:param order_hint:
:type order_hint: str
:param tasks:
:type tasks: list[~users.models.MicrosoftgraphplannerTask]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'plan_id': {'key': 'planId', 'type': 'str'},
'order_hint': {'key': 'orderHint', 'type': 'str'},
'tasks': {'key': 'tasks', 'type': '[MicrosoftgraphplannerTask]'},
}
def __init__(self, id=None, name=None, plan_id=None, order_hint=None, tasks=None):
super(MicrosoftgraphplannerBucket, self).__init__()
self.id = id
self.name = name
self.plan_id = plan_id
self.order_hint = order_hint
self.tasks = tasks
|
from scout.protocol import *
from scout.SteerByWire import *
|
"""
*
* Author: Juarez Paulino(coderemite)
* Email: juarez.paulino@gmail.com
*
"""
n=int(input())
a=sorted(map(int,input().split()))
print(min([sum(abs(x-y)for x,y in zip(a,range(i,n+1,2)))for i in[1,2]]))
|
from ...os_v3_hek.defs.matg import *
|
"""
attempt two at kata found here
https://www.codewars.com/kata/the-road-kill-detective/train/python
"""
def road_kill(photo):
ANIMALS = ['aardvark', 'alligator', 'armadillo', 'antelope', 'baboon', 'bear', 'bobcat', 'butterfly', 'cat', 'camel', 'cow', 'chameleon', 'dog', 'dolphin', 'duck', 'dragonfly', 'eagle', 'elephant', 'emu', 'echidna', 'fish', 'frog', 'flamingo', 'fox', 'goat', 'giraffe', 'gibbon', 'gecko', 'hyena', 'hippopotamus', 'horse', 'hamster', 'insect', 'impala', 'iguana', 'ibis', 'jackal', 'jaguar', 'jellyfish', 'kangaroo', 'kiwi', 'koala', 'killerwhale', 'lemur', 'leopard', 'llama', 'lion', 'monkey', 'mouse', 'moose', 'meercat', 'numbat', 'newt', 'ostrich', 'otter', 'octopus', 'orangutan', 'penguin', 'panther', 'parrot', 'pig', 'quail', 'quokka', 'quoll', 'rat', 'rhinoceros', 'racoon', 'reindeer', 'rabbit', 'snake', 'squirrel', 'sheep', 'seal', 'turtle', 'tiger', 'turkey', 'tapir', 'unicorn', 'vampirebat', 'vulture', 'wombat', 'walrus', 'wildebeast', 'wallaby', 'yak', 'zebra']
ANIMALS_REVERSED = []
for animal in ANIMALS:
ANIMALS_REVERSED.append(animal[::-1])
def test_photo(photo, animal):
test = False
animal_index = 0
for character in photo:
if character == "=":
pass
elif animal_index < len(animal) and character == animal[animal_index]:
test = True
animal_index += 1
elif character == animal[animal_index - 1]:
pass
else:
test = False
break
if test:
return True
return False
for animal in ANIMALS:
test = test_photo(photo, animal)
if test:
return animal
for animal in ANIMALS_REVERSED:
test = test_photo(photo, animal)
if test:
return animal[::-1]
return "??"
photo = "==========h===yyyyyy===eeee=n==a========"
#photo = "==unnn==Lii===cccccooor==nn"
#photo = "==========h===yyyyyy===eeee=n==a===b====="
#photo = "=====r=rrr=rra=====eee======bb====b======="
print(road_kill(photo))
|
n = int(input())
a = [int(i) for i in input().split()]
a_with_index = []
for i, v in enumerate(a):
a_with_index.append((v, str(i + 1)))
print(' '.join(v[1] for v in sorted(a_with_index)))
|
print ('Enter Your Name')
person = input()
print ('Hello ' + person)
|
from lib2to3 import fixer_base, pytree, patcomp
from lib2to3.pgen2 import token
from lib2to3.fixer_util import Leaf
"""
Fixes:
str
into:
unicode
"""
class FixStr(fixer_base.BaseFix):
PATTERN = """
'str'
"""
def transform(self, node, results):
node.value = 'unicode'
return node
|
import datetime
import logging
from shapely.wkt import loads
logger = logging.getLogger(__name__)
def create_s1_product_specs(product_type='*', polarisation='*', beam='*'):
'''A helper function to create a scihub API compliant product specs string
Args:
product_type (str): the product type to look at
polarisation (str):
Returns:
str: Copernicus' scihub compliant product specs string
Notes:
Default values for all product specifications is the *-character,
meaning to look for all kinds of data by default.
'''
# bring product type, polarisation and beam to query format
if product_type == 'GRD':
product_type = 'GRD_HD,GRD_MD,GRD_MS,GRD_HS'
if polarisation == 'VV,VH' or polarisation == 'VV VH':
polarisation = 'VV%2BVH'
if polarisation == 'HH,HV' or polarisation == ' HH HV':
polarisation = 'HH%2BHV'
if polarisation == '*':
polarisation = 'VV%2BVH'
product_type = "processinglevel={}".format(product_type)
polarisation = "polarization={}".format(polarisation)
beam = "beamSwath={}".format(beam)
return '{}&{}&{}'.format(
product_type, polarisation, beam
)
def create_query(aoi, toi, product_specs):
'''A helper function to create a scihub API compliant query
Args:
satellite (str): the satellite (e.g. Sentinel-1)
aoi (str): a Copernicus scihub compliant AOI string
toi (str): a Copernicus scihub compliant TOI string
product_specs (str): a Copernicus scihub compliant product specs string
Returns:
str: Copernicus' scihub compliant query string (i.e. OpenSearch query)
formattted with urllib
'''
# construct the final query
satellite = 'platform=SENTINEL-1'
aoi = aoi.replace(' ', '%20')
output = 'output=jsonlite'
query = '{}&{}&{}&{}&{}'.format(satellite, product_specs, aoi, toi, output)
return query
def create_aoi_str(aoi_wkt):
'''A helper function to create a scihub API compliant AOI string
Args:
aoi (str): is WKT representation of the Area Of Interest
Returns:
str: Copernicus' scihub compliant AOI string
'''
geom = loads(aoi_wkt)
if geom.geom_type == 'Point':
aoi_str = "intersectsWith=({}, {})".format(geom.y, geom.x)
else:
# simplify geometry
aoi_convex = geom.convex_hull
# create scihub-confrom aoi string
aoi_str = 'intersectsWith={}'.format(aoi_convex)
return aoi_str
def create_toi_str(start='2014-10-01',
end=datetime.datetime.now().strftime("%Y-%m-%d")
):
'''A helper function to create a scihub API compliant TOI string
Args:
start (str): the start date of the Time Of Interest represented by
a string of a YYYY-MM-DD format string
end (str): the end date of the Time Of Interest represented by
a string of a YYYY-MM-DD format string
Returns:
str: Copernicus' scihub compliant TOI string
'''
# bring start and end date to query format
start = '{}T00:00:01Z'.format(start)
end = '{}T23:59:00Z'.format(end)
toi = ('start={}&end={}'.format(start, end,))
return toi
|
import json
import sys
import re
from difflib import SequenceMatcher
class HelsinkiGeocoder():
def __init__(self, addr_datafile, filter_func=None):
self.filter_func = filter_func
self.load_data(addr_datafile)
self.pattern = re.compile("^([^\d^/]*)(\s*[-\d]*)(\s*[A-ö]*)")
def argsort(self, seq):
# this is to easily find the best candidate of the fuzzy sequence matching
# http://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python
return sorted(range(len(seq)), key=seq.__getitem__)
def load_data(self, addr_datafile):
try:
self.addresses = json.load(
open(addr_datafile, 'r',
encoding='iso-8859-15'))['features']
except:
sys.exit('Couldn\'t read the address data. Is it GeoJSON?')
# construct a dict for searching: top level keys are street names
# value for a street name is a dict: keys are house numbers on that street
self.addr_dict = {}
for place in self.addresses:
# Skip places filter returns false for
if self.filter_func and not self.filter_func(place):
continue
street = place['properties']['katunimi'].lower()
#if key for current street doesn't exist, add it
if street not in self.addr_dict.keys():
self.addr_dict[street.lower()] = {}
#there are a couple of addresses with number None...index those by 0
number = place['properties']['osoitenumero_teksti']
number = number.lower() if number else '0'
self.addr_dict[street][number] = place
def geocode(self, address):
lookup_addr = address.strip().lower()
result = None
# Get address-looking components from the input string
hits = re.search(self.pattern, lookup_addr)
street_name = hits.group(1).strip() #strip the trailing space
building_number = hits.group(2).strip() if hits.group(2) else '0'
next_letter = hits.group(3).strip() if hits.group(3) else None
# === Start searching for a match for the input address ===
# Try to find an exact match for the street name component
try:
street = self.addr_dict[street_name]
# Nothing matching the exact street name found so try fuzzy matching
except KeyError:
all_streets = self.addr_dict.keys()
# filter the streetnames by identical initial letters for performance
candidates = [name for name in all_streets if name[:2] == street_name[:2]]
if not candidates:
# there is nothing that matches even the first 2 letters...
return None
# compute fuzzy matches
ratios = [SequenceMatcher(lambda x: x == " ", street_name, candidate).ratio() for candidate in candidates]
best_index = self.argsort(ratios)[-1]
best_match = candidates[best_index]
# only continue the search if similarity ratio exceeds ARBITRARY threshold
# this just barely allows "hakaniemen halli" to be matched to "hakaniemen kauppahalli"
if ratios[best_index] < 0.84:
print(street_name.encode('ascii', errors='ignore'))
return None
street = self.addr_dict[best_match]
# == Now that we found the street, match cases descending in accuracy ==
# 1. Try to match the exact building number
if building_number in street:
return street[building_number]
# 2. Try to match building number + proceeding letter ("10a" is valid but could by typoed as "10 A")
elif next_letter:
building_numbertext = ''.join([building_number, next_letter.lower()])
try:
return street[building_numbertext]
except KeyError:
pass
# 3. Case "10-12": exact matching didnt work, so try with only the first part of the dashed number pair
if '-' in building_number:
try:
return street[building_number.split('-')[0]]
except KeyError:
pass
# All options exhausted, didn't find anything :(
return None
|
#!/usr/bin/env python
from argparse import ArgumentParser
from pathlib import Path
from shutil import move
from sys import stderr
import regex as re
remap_dict = {}
remap_dict['cross-lingual'] = {}
remap_dict['cross-lingual']['e'] = 'i'
remap_dict['cross-lingual']['ʋ'] = 'v'
parser = ArgumentParser(
description="Filter an IPA text file to remap OOV IPA."
" Used to avoid inconsistency in training/eval."
)
parser.add_argument("--text")
parser.add_argument(
"-d", "--data-dir", help="Path to Kaldi data directory with text file."
)
parser.add_argument(
"-e", "--expname", help="Experiment name for choosing remap_dict"
)
args = parser.parse_args()
text_path = Path(args.text)
norm_text_path = text_path.with_suffix(".remapped")
cur_remap = remap_dict[str(args.expname)]
# Perform IPA remapping
with open(text_path) as fin, open(norm_text_path, "w") as fout:
for idx, line in enumerate(fin):
ret = line.strip().split(sep = " ", maxsplit=1)
if len(ret) == 1:
continue
key = ret[0]
text = ret[1].strip()
if 'Bulgarian' in str(text_path):
text = text.replace('∅','')
for symb in cur_remap.keys():
text = text.replace(symb, cur_remap[symb])
text = re.sub(' +', ' ', text)
print(key, text, file=fout)
backup_path = text_path.with_suffix(".remapped.bak")
move(text_path, backup_path)
move(norm_text_path, text_path)
|
# this is a script for generating all text representaions in ./DATA file.
# This is for accelerating the code.
from all_representations import all_text_representation
from clusters import cluster, DA_icsi_list
# All scenario based extractive summaries in AMI
DA_ami_list = [i+1 for i in range(151)]
for j in [9, 24, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 150]:
DA_ami_list.remove(j)
# you do not need to run this code until you add a new corpus.
if __name__ == '__main__':
print('*****************************')
for veci in [ "POS"]:#"word2vec", "POS+word2vec", "lexical", "POS"
for num_taxonomy in [3]: #, 4, 5, 7]:
for element in ['icsi']: #''ami']:
text_rep = all_text_representation(corpus=element, text_to_vector_type=veci,n_taxonomy=num_taxonomy, with_label= True )
text_rep.save_abreviated_labels()
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 16 11:25:42 2020
@author: ted
"""
import numpy as np
import pandas as pd
import os
import xarray as xr
dirname = os.path.dirname(__file__)
ARfilepath = os.path.join(dirname, 'ars_6h_daily_0to70S_100Eto120W_025025_197909to201808')
Rainfall_path = os.path.join(dirname, 'ClimateDriverRainfallAndARdateData/Rainfall')
os.chdir(Rainfall_path)
f = pd.read_csv('644_station_with_group.csv')
Station_List = f['Agent']
x1 = f['x1Index']
x2 = f['x2Index']
y1 = f['y1Index']
y2 = f['y2Index']
os.chdir(ARfilepath)
IVTfile = ["era5_6h_daily_0to70S_100Eto120W_025025_ivt_%04d%02d%04d%02d.nc" % (Year, 9, Year+1, 8) for Year in range(1950,2020) ]
ARfile = ["ar_%04d%02d%04d%02d_out.nc" % (Year, 9, Year+1, 8)for Year in range(1950,2020)]
fi = xr.open_mfdataset(IVTfile,concat_dim='time')
fa = xr.open_mfdataset(ARfile,concat_dim='time')
#%%
### Extract IVT,IVTX,IVTY, compute direction, and AR presence (represented by Shape) for grid point
def GetARivtForPoint(fi,fa,x,y):
Shape = fa.shape[:,y,x]
IVT = fi.ivt[:,y,x]
IVTX = fi.ivtx[:,y,x]
IVTY = fi.ivty[:,y,x]
dire = np.arctan2(IVTX,IVTY)*180/np.pi
return IVT, Shape,dire
def GetAllARdata(fa,Point1,Point2,Point3,Point4):
Datetime = fa.time
### Try to convert xarray Dataarray to make up a dataframe, but it is too slow, size of each array is 10,2272
ivt1 = Point1[0].to_masked_array()
ivt2 = Point2[0].to_masked_array()
ivt3 = Point3[0].to_masked_array()
ivt4 = Point4[0].to_masked_array()
dire1 = Point1[2].to_masked_array()
dire2 = Point2[2].to_masked_array()
dire3 = Point3[2].to_masked_array()
dire4 = Point4[2].to_masked_array()
shape1 = Point1[1].to_masked_array()
shape2 = Point2[1].to_masked_array()
shape3 = Point3[1].to_masked_array()
shape4 = Point4[1].to_masked_array()
d = {'Date':Datetime,'IVT1':ivt1,'IVT2':ivt2,'IVT3': ivt3, 'IVT4':ivt4,
'Shape1':shape1,'Shape2': shape2, 'Shape3':shape3,'Shape4': shape4,
'dire1': dire1,'dire2': dire2,'dire3': dire3, 'dire4':dire4}
Alldata = pd.DataFrame(data=d)
Alldata.dropna(inplace=True)
Alldata['MeanIVT']=Alldata[['IVT1','IVT2','IVT3','IVT4']].mean(axis=1)
Alldata['MeanDir']=Alldata[['dire1','dire2','dire3','dire4']].mean(axis=1)
Alldata['MeanDirM']=np.where(Alldata.MeanDir<0,Alldata.MeanDir+360,Alldata.MeanDir)
AllARdata_path = os.path.join(dirname, 'AR_date_arealMean')
os.chdir(AllARdata_path)
WriteFile = str(Station_List[i]) + '_AR_date_arealMean.csv'
Alldata.to_csv(WriteFile,index=False)
for i in range(len(Station_List)):
os.chdir(ARfilepath)
Point1 = GetARivtForPoint(fi,fa,x1[i],y1[i])
Point2 = GetARivtForPoint(fi,fa,x1[i],y2[i])
Point3 = GetARivtForPoint(fi,fa,x2[i],y1[i])
Point4 = GetARivtForPoint(fi,fa,x2[i],y2[i])
All = GetAllARdata(fa,Point1,Point2,Point3,Point4)
|
# -*- coding: utf-8 -*-
#
# rtk.tests.fmea.TestCause.py is part of The RTK Project
#
# All rights reserved.
# Copyright 2007 - 2017 Andrew Rowland andrew.rowland <AT> reliaqual <DOT> com
"""Test class for testing the Cause class."""
import sys
from os.path import dirname
sys.path.insert(0, dirname(dirname(dirname(__file__))) + "/rtk", )
import unittest
from nose.plugins.attrib import attr
from sqlalchemy.orm import scoped_session
from treelib import Tree
import Utilities as Utilities
from Configuration import Configuration
from analyses.fmea import dtmCause
from dao import DAO
from dao import RTKCause
__author__ = 'Andrew Rowland'
__email__ = 'andrew.rowland@reliaqual.com'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2014 - 2017 Andrew "weibullguy" Rowland'
class TestCauseDataModel(unittest.TestCase):
"""Class for testing the Cause model class."""
def setUp(self):
"""Set up the test fixture for the Cause model class."""
self.Configuration = Configuration()
self.Configuration.RTK_BACKEND = 'sqlite'
self.Configuration.RTK_PROG_INFO = {'host' : 'localhost',
'socket' : 3306,
'database': '/tmp/TestDB.rtk',
'user' : '',
'password': ''}
self.Configuration.DEBUG_LOG = \
Utilities.create_logger("RTK.debug", 'DEBUG', '/tmp/RTK_debug.log')
self.Configuration.USER_LOG = \
Utilities.create_logger("RTK.user", 'INFO', '/tmp/RTK_user.log')
# Create a data access object and connect to a test database.
self.dao = DAO()
_database = self.Configuration.RTK_BACKEND + ':///' + \
self.Configuration.RTK_PROG_INFO['database']
self.dao.db_connect(_database)
self.DUT = dtmCause(self.dao)
@attr(all=True, unit=True)
def test00_cause_create(self):
"""(TestCauseModel) __init__ should return instance of Cause data model."""
self.assertTrue(isinstance(self.DUT, dtmCause))
self.assertEqual(self.DUT.last_id, None)
@attr(all=True, unit=True)
def test01a_select_all(self):
"""(TestCauseModel): select_all() should return a Tree() object populated with RTKCause instances on success."""
_tree = self.DUT.select_all(1)
self.assertTrue(isinstance(_tree, Tree))
self.assertTrue(isinstance(_tree.get_node(1).data, RTKCause))
@attr(all=True, unit=True)
def test02a_select(self):
"""(TestCauseModel): select() should return an instance of the RTKCause data model on success."""
self.DUT.select_all(1)
_cause = self.DUT.select(1)
self.assertTrue(isinstance(_cause, RTKCause))
self.assertEqual(_cause.cause_id, 1)
self.assertEqual(_cause.description, 'Test Failure Cause #1')
@attr(all=True, unit=True)
def test02b_select_non_existent_id(self):
"""(TestCauseModel): select() should return None when a non-existent Cause ID is requested."""
_cause = self.DUT.select(100)
self.assertEqual(_cause, None)
@attr(all=True, unit=True)
def test03a_insert(self):
"""(TestCauseModel): insert() should return a zero error code on success when inserting a hardware failure Cause."""
self.DUT.select_all(1)
_error_code, _msg = self.DUT.insert(mechanism_id=1)
self.assertEqual(_error_code, 0)
self.assertEqual(_msg, 'RTK SUCCESS: Adding one or more items to '
'the RTK Program database.')
@attr(all=True, unit=True)
def test04a_delete(self):
"""(TestCauseModel): delete() should return a zero error code on success."""
self.DUT.select_all(1)
_error_code, _msg = self.DUT.delete(self.DUT.last_id)
self.assertEqual(_error_code, 0)
self.assertEqual(_msg, 'RTK SUCCESS: Deleting an item from the RTK '
'Program database.')
@attr(all=True, unit=True)
def test04b_delete_non_existent_id(self):
"""(TestCauseModel): delete() should return a non-zero error code when passed a Cause ID that doesn't exist."""
self.DUT.select_all(1)
_error_code, _msg = self.DUT.delete(300)
self.assertEqual(_error_code, 2005)
self.assertEqual(_msg, ' RTK ERROR: Attempted to delete non-existent '
'Cause ID 300.')
@attr(all=True, unit=True)
def test_05a_update(self):
"""(TestCauseModel): update() should return a zero error code on success."""
self.DUT.select_all(1)
_cause = self.DUT.tree.get_node(1).data
_cause.description = 'Test Failure Cause #1'
_error_code, _msg = self.DUT.update(1)
self.assertEqual(_error_code, 0)
self.assertEqual(_msg,
'RTK SUCCESS: Updating the RTK Program database.')
@attr(all=True, unit=True)
def test_05b_update_non_existent_id(self):
"""(TestCauseModel): update() should return a non-zero error code when passed a Cause ID that doesn't exist."""
self.DUT.select_all(1)
_error_code, _msg = self.DUT.update(100)
self.assertEqual(_error_code, 2006)
self.assertEqual(_msg, 'RTK ERROR: Attempted to save non-existent '
'Cause ID 100.')
@attr(all=True, unit=True)
def test06a_update_all(self):
"""(TestCauseModel): update_all() should return a zero error code on success."""
self.DUT.select_all(1)
_error_code, _msg = self.DUT.update_all()
self.assertEqual(_error_code, 0)
self.assertEqual(_msg,
'RTK SUCCESS: Updating the RTK Program database.')
|
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from ledger.accounts import models
from django.contrib.auth.models import Group
from ledgergw import models as ledgergw_models
from ledgergw import common
from django.db.models import Q
import json
import ipaddress
@csrf_exempt
def user_info_search(request, apikey):
jsondata = {'status': 404, 'message': 'API Key Not Found'}
ledger_user_json = {}
if ledgergw_models.API.objects.filter(api_key=apikey,active=1).count():
if common.api_allow(common.get_client_ip(request),apikey) is True:
keyword = request.POST.get('keyword', '')
jsondata = {'status': 200, 'message': 'No Results'}
jsondata['users'] = []
ledger_user_json = {}
search_filter = Q()
query_str_split = keyword.split(" ")
search_filter |= Q(email__icontains=keyword.lower())
#search_filter |= Q(first_name__icontains=query_str_split[0].lower())
if len(query_str_split) == 1:
search_filter |= Q(first_name__icontains=query_str_split[0].lower())
if len(query_str_split) > 1:
search_filter |= Q(Q(first_name__icontains=query_str_split[0].lower()) & Q(last_name__icontains=query_str_split[1].lower()))
#for se_wo in query_str_split:
#
# search_filter |= Q(first_name__icontains=se_wo.lower()) | Q(last_name__icontains=se_wo.lower())
ledger_users = models.EmailUser.objects.filter(search_filter)[:20]
#,last_name__icontains=keyword)
for ledger_obj in ledger_users:
ledger_user_json = {}
#if keyword.lower() in ledger_obj.first_name.lower()+' '+ledger_obj.last_name.lower() or keyword.lower() in ledger_obj.email.lower():
ledger_user_json['ledgerid'] = ledger_obj.id
ledger_user_json['email'] = ledger_obj.email
ledger_user_json['first_name'] = ledger_obj.first_name
ledger_user_json['last_name'] = ledger_obj.last_name
ledger_user_json['is_staff'] = ledger_obj.is_staff
ledger_user_json['is_superuser'] = ledger_obj.is_superuser
ledger_user_json['is_active'] = ledger_obj.is_active
ledger_user_json['date_joined'] = ledger_obj.date_joined.strftime('%d/%m/%Y %H:%M')
ledger_user_json['title'] = ledger_obj.title
if ledger_obj.dob:
ledger_user_json['dob'] = ledger_obj.dob.strftime('%d/%m/%Y %H:%M')
else:
ledger_user_json['dob'] = None
ledger_user_json['phone_number'] = ledger_obj.phone_number
ledger_user_json['position_title'] = ledger_obj.position_title
ledger_user_json['mobile_number'] = ledger_obj.mobile_number
ledger_user_json['fax_number'] = ledger_obj.fax_number
ledger_user_json['organisation'] = ledger_obj.organisation
#ledger_user_json['identification'] = ledger_obj.identification
#ledger_user_json['senior_card'] = ledger_obj.senior_card
ledger_user_json['character_flagged'] = ledger_obj.character_flagged
ledger_user_json['character_comments'] = ledger_obj.character_comments
ledger_user_json['extra_data'] = ledger_obj.extra_data
ledger_user_json['fullname'] = ledger_obj.get_full_name()
if ledger_obj.dob:
ledger_user_json['fullnamedob'] = ledger_obj.get_full_name_dob()
else:
ledger_user_json['fullnamedob'] = None
# Groups
#ledger_user_group = []
#for g in ledger_obj.groups.all():
# ledger_user_group.append({'group_id': g.id, 'group_name': g.name})
#ledger_user_json['groups'] = ledger_user_group
jsondata['users'].append(ledger_user_json)
jsondata['status'] = 200
jsondata['message'] = 'Results'
else:
jsondata['status'] = 403
jsondata['message'] = 'Access Forbidden'
else:
pass
return HttpResponse(json.dumps(jsondata), content_type='application/json')
@csrf_exempt
def user_info_id(request, userid,apikey):
jsondata = {'status': 404, 'message': 'API Key Not Found'}
ledger_user_json = {}
if ledgergw_models.API.objects.filter(api_key=apikey,active=1).count():
if common.api_allow(common.get_client_ip(request),apikey) is True:
ledger_user = models.EmailUser.objects.filter(id=int(userid))
if ledger_user.count() > 0:
ledger_obj = ledger_user[0]
ledger_user_json['ledgerid'] = ledger_obj.id
ledger_user_json['email'] = ledger_obj.email
ledger_user_json['first_name'] = ledger_obj.first_name
ledger_user_json['last_name'] = ledger_obj.last_name
ledger_user_json['is_staff'] = ledger_obj.is_staff
ledger_user_json['is_superuser'] = ledger_obj.is_superuser
ledger_user_json['is_active'] = ledger_obj.is_active
ledger_user_json['date_joined'] = ledger_obj.date_joined.strftime('%d/%m/%Y %H:%M')
ledger_user_json['title'] = ledger_obj.title
if ledger_obj.dob:
ledger_user_json['dob'] = ledger_obj.dob.strftime('%d/%m/%Y %H:%M')
else:
ledger_user_json['dob'] = None
ledger_user_json['phone_number'] = ledger_obj.phone_number
ledger_user_json['position_title'] = ledger_obj.position_title
ledger_user_json['mobile_number'] = ledger_obj.mobile_number
ledger_user_json['fax_number'] = ledger_obj.fax_number
ledger_user_json['organisation'] = ledger_obj.organisation
#ledger_user_json['identification'] = ledger_obj.identification
#ledger_user_json['senior_card'] = ledger_obj.senior_card
ledger_user_json['character_flagged'] = ledger_obj.character_flagged
ledger_user_json['character_comments'] = ledger_obj.character_comments
ledger_user_json['extra_data'] = ledger_obj.extra_data
ledger_user_json['fullname'] = ledger_obj.get_full_name()
if ledger_obj.dob:
ledger_user_json['fullnamedob'] = ledger_obj.get_full_name_dob()
else:
ledger_user_json['fullnamedob'] = None
# Groups
ledger_user_group = []
for g in ledger_obj.groups.all():
ledger_user_group.append({'group_id': g.id, 'group_name': g.name})
ledger_user_json['groups'] = ledger_user_group
jsondata['user'] = ledger_user_json
jsondata['status'] = 200
jsondata['message'] = 'User Found'
else:
jsondata['status'] = '404'
jsondata['message'] = 'User not found'
else:
jsondata['status'] = 403
jsondata['message'] = 'Access Forbidden'
else:
pass
return HttpResponse(json.dumps(jsondata), content_type='application/json')
@csrf_exempt
def user_info(request, ledgeremail,apikey):
jsondata = {'status': 404, 'message': 'API Key Not Found'}
ledger_user_json = {}
if ledgergw_models.API.objects.filter(api_key=apikey,active=1).count():
if common.api_allow(common.get_client_ip(request),apikey) is True:
ledger_user = models.EmailUser.objects.filter(email=ledgeremail)
if ledger_user.count() == 0:
a = models.EmailUser.objects.create(email=ledgeremail,first_name=request.POST['first_name'],last_name=request.POST['last_name'])
ledger_user = models.EmailUser.objects.filter(email=ledgeremail)
ledger_user.save()
if ledger_user.count() > 0:
ledger_obj = ledger_user[0]
ledger_user_json['ledgerid'] = ledger_obj.id
ledger_user_json['email'] = ledger_obj.email
ledger_user_json['first_name'] = ledger_obj.first_name
ledger_user_json['last_name'] = ledger_obj.last_name
ledger_user_json['is_staff'] = ledger_obj.is_staff
ledger_user_json['is_superuser'] = ledger_obj.is_superuser
ledger_user_json['is_active'] = ledger_obj.is_active
ledger_user_json['date_joined'] = ledger_obj.date_joined.strftime('%d/%m/%Y %H:%M')
ledger_user_json['title'] = ledger_obj.title
if ledger_obj.dob:
ledger_user_json['dob'] = ledger_obj.dob.strftime('%d/%m/%Y %H:%M')
else:
ledger_user_json['dob'] = None
ledger_user_json['phone_number'] = ledger_obj.phone_number
ledger_user_json['position_title'] = ledger_obj.position_title
ledger_user_json['mobile_number'] = ledger_obj.mobile_number
ledger_user_json['fax_number'] = ledger_obj.fax_number
ledger_user_json['organisation'] = ledger_obj.organisation
#ledger_user_json['residential_address'] = ledger_obj.residential_address
#ledger_user_json['postal_address'] = ledger_obj.postal_address
#ledger_user_json['billing_address'] = ledger_obj.billing_address
#ledger_user_json['identification'] = ledger_obj.identification
#ledger_user_json['senior_card'] = ledger_obj.senior_card
ledger_user_json['character_flagged'] = ledger_obj.character_flagged
ledger_user_json['character_comments'] = ledger_obj.character_comments
ledger_user_json['extra_data'] = ledger_obj.extra_data
ledger_user_json['fullname'] = ledger_obj.get_full_name()
if ledger_obj.dob:
ledger_user_json['fullnamedob'] = ledger_obj.get_full_name_dob()
else:
ledger_user_json['fullnamedob'] = None
# Groups
ledger_user_group = []
for g in ledger_obj.groups.all():
ledger_user_group.append({'group_id': g.id, 'group_name': g.name})
ledger_user_json['groups'] = ledger_user_group
jsondata['user'] = ledger_user_json
jsondata['status'] = 200
jsondata['message'] = 'User Found'
else:
jsondata['status'] = '404'
jsondata['message'] = 'User not found'
else:
jsondata['status'] = 403
jsondata['message'] = 'Access Forbidden'
else:
pass
return HttpResponse(json.dumps(jsondata), content_type='application/json')
def group_info(request, apikey):
ledger_json = {}
jsondata = {'status': 404, 'message': 'API Key Not Found'}
if ledgergw_models.API.objects.filter(api_key=apikey,active=1).count():
if common.api_allow(common.get_client_ip(request),apikey) is True:
groups = Group.objects.all()
ledger_json['groups_list'] = []
ledger_json['groups_id_map'] = {}
ledger_json['groups_name_map'] = {}
for g in groups:
ledger_json['groups_list'].append({'group_id': g.id,'group_name': g.name})
ledger_json['groups_id_map'][g.id] = g.name
ledger_json['groups_name_map'][g.name] = g.id
jsondata['groups_list'] = ledger_json['groups_list']
jsondata['groups_id_map'] = ledger_json['groups_id_map']
jsondata['groups_name_map'] = ledger_json['groups_name_map']
jsondata['status'] = 200
jsondata['message'] = 'Groups Retreived'
else:
jsondata['status'] = 403
jsondata['message'] = 'Access Forbidden'
return HttpResponse(json.dumps(jsondata), content_type='application/json')
def ip_check(request):
ledger_json = {}
ipaddress = common.get_client_ip(request)
jsondata = {'status': 200, 'ipaddress': str(ipaddress)}
return HttpResponse(json.dumps(jsondata), content_type='application/json')
|
import enum
import os
import glob
import re
# ディレクトリーを選んでください
while True:
print("""Which directory?
Example: .""")
# C:\muzudho\picture\2021-08-pg
path = input()
os.chdir(path)
# フィル名を一覧します
print(f"""Current directory: {os.getcwd()}
Files
-----""")
files = glob.glob("./*")
# とりあえず一覧します
for file in files:
# `file` - Example: `.\20210815shogi67.png`
basename = os.path.basename(file)
print(basename)
print("""
Are you sure this is the right directory (y/n)?""")
answer = input()
if answer == "y":
break
else:
print("Canceld")
# 正規表現のパターンを入力してください
while True:
print(r"""
Please enter a regular expression pattern.
Example: ^example-([\d\w]+)-([\d\w]+).txt$""")
# ^(.+)shogi(.+)$
patternText = input()
pattern = re.compile(patternText)
# とりあえず一覧します
for i, file in enumerate(files):
basename = os.path.basename(file)
result = pattern.match(basename)
if result:
# Matched
# グループ数
groupCount = len(result.groups())
buf = f"({i+1}) {basename}"
for j in range(0, groupCount):
buf += f" \\{j+1}=[{result.group(j+1)}]"
print(buf)
else:
# Unmatched
print(f"( ) {basename}")
print("""
Was there a match (y/n)?""")
answer = input()
if answer == "y":
break
else:
print("Canceld")
# 置換のシミュレーション
while True:
print(r"""
Enter the pattern after the conversion.
Example: example-\2-\1.txt""")
# \1pg\2
replacement = input()
print("""
Simulation
----------""")
for i, file in enumerate(files):
basename = os.path.basename(file)
result = pattern.match(basename)
if result:
# Matched
converted = re.sub(patternText, replacement, basename)
print(f"({i+1}) {basename} --> {converted}")
print("""
Do you want to run it (y/n)?""")
answer = input()
if answer == "y":
break
else:
print("Canceld")
# 置換実行
for i, file in enumerate(files):
basename = os.path.basename(file)
result = pattern.match(basename)
if result:
# Matched
converted = re.sub(patternText, replacement, basename)
oldPath = os.path.join(os.getcwd(), basename)
newPath = os.path.join(os.getcwd(), converted)
print(f"({i})Rename {oldPath} --> {newPath}")
os.rename(oldPath, newPath)
|
import pytest
from flask import Flask
from pynamodb.connection.base import Connection
from flask_pynamodb import PynamoDB
def test_invalid_app_at_init():
with pytest.raises(TypeError) as err:
PynamoDB("invalid app instance")
assert str(err.value) == "Invalid Flask app instance."
def test_invalid_app_at_init_app():
db = PynamoDB()
with pytest.raises(TypeError) as err:
db.init_app("Invalid app instance")
assert str(err.value) == "Invalid Flask app instance."
with pytest.raises(TypeError) as err:
db.init_app(None)
assert str(err.value) == "Invalid Flask app instance."
def test_connection_property(app: Flask, db: PynamoDB):
with app.app_context():
assert isinstance(db.connection, Connection)
assert app.extensions["pynamodb"]["connection"] is db.connection
app.extensions.pop("pynamodb")
assert isinstance(db.connection, Connection)
def test_cached_connection(todo: PynamoDB.Model):
todo._connection = todo._get_connection()
assert todo._connection is todo._get_connection()
|
import argparse
import itertools
import logging
import os
import random
import sys
from timeit import default_timer as timer
import pandas as pd
import psutil
import pyklb
import compress_zarr
def build_compressors(threads):
# We only have 2 choices
codecs = ["bzip2", "zlib"]
opts = []
for c, t in itertools.product(codecs, threads):
opts.append({
"name": c,
"threads": t
})
return opts
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--num-tiles", type=int, default=1)
parser.add_argument("-r", "--resolution", type=str, default="1")
parser.add_argument("-s", "--random-seed", type=int, default=None)
parser.add_argument("-i","--input-file", type=str, default="/allen/scratch/aindtemp/data/anatomy/2020-12-01-training-data/2020-12-01-stack-15/images/BrainSlice1_MMStack_Pos33_15_shift.tif")
parser.add_argument("-d","--output-data-file", type=str, default="/allen/scratch/aindtemp/cameron.arshadi/test_file.klb")
parser.add_argument("-o", "--output-metrics-file", type=str, default="/allen/scratch/aindtemp/cameron.arshadi/klb-compression-metrics.csv")
parser.add_argument("-l", "--log-level", type=str, default=logging.INFO)
parser.add_argument("-t", "--threads", type=int, nargs="+", default=[1])
args = parser.parse_args(sys.argv[1:])
print(args)
logging.basicConfig(format='%(asctime)s %(message)s', datefmt="%Y-%m-%d %H:%M")
logging.getLogger().setLevel(args.log_level)
compressors = build_compressors(args.threads)
run(compressors=compressors,
input_file=args.input_file,
num_tiles=args.num_tiles,
resolution=args.resolution,
random_seed=args.random_seed,
output_data_file=args.output_data_file,
output_metrics_file=args.output_metrics_file)
def run(compressors, input_file, num_tiles, resolution, random_seed, output_data_file, output_metrics_file):
if random_seed is not None:
random.seed(random_seed)
total_tests = num_tiles * len(compressors)
all_metrics = []
for ti in range(num_tiles):
data, rslice, read_dur = compress_zarr.read_random_chunk(input_file, resolution)
for c in compressors:
logging.info(f"starting test {len(all_metrics) + 1}/{total_tests}")
logging.info(f"compressor: {c['name']}")
psutil.cpu_percent(interval=None)
start = timer()
pyklb.writefull(data, output_data_file, compression=c['name'], numthreads=c['threads'])
cpu_utilization = psutil.cpu_percent(interval=None)
end = timer()
compress_dur = end - start
# TODO: check if this makes sense
bytes_written = os.path.getsize(output_data_file)
tile_metrics = {
'compressor_name': c['name'],
'tile': rslice,
'threads': c['threads'],
'bytes_read': data.nbytes,
'read_time': read_dur,
'read_bps': data.nbytes / read_dur,
'compress_bps': data.nbytes / compress_dur,
'compress_time': compress_dur,
'bytes_written': bytes_written,
'shape': data.shape,
'cpu_utilization': cpu_utilization,
'storage_ratio': data.nbytes / bytes_written
}
all_metrics.append(tile_metrics)
output_metrics_file = output_metrics_file.replace('.csv', '_' + os.path.basename(input_file) + '.csv')
df = pd.DataFrame.from_records(all_metrics)
df.to_csv(output_metrics_file, index_label='test_number')
if __name__ == "__main__":
main()
|
import inspect
from decorator import decorator
import torch
from torch import nn
from torchdrug import data
class cached_property(property):
"""
Cache the property once computed.
"""
def __init__(self, func):
self.func = func
self.__doc__ = func.__doc__
def __get__(self, obj, cls):
result = self.func(obj)
obj.__dict__[self.func.__name__] = result
return result
def cached(func, debug=False):
"""
Cache the result of last function call.
"""
@decorator
def wrapper(forward, self, *args, **kwargs):
def equal(x, y):
if isinstance(x, nn.Parameter):
x = x.data
if isinstance(y, nn.Parameter):
y = y.data
if type(x) != type(y):
return False
if isinstance(x, torch.Tensor):
return x.shape == y.shape and (x == y).all()
elif isinstance(x, data.Graph):
if x.num_node != y.num_node or x.num_edge != y.num_edge or x.num_relation != y.num_relation:
return False
edge_feature = getattr(x, "edge_feature", torch.tensor(0, device=x.device))
y_edge_feature = getattr(y, "edge_feature", torch.tensor(0, device=y.device))
if edge_feature.shape != y_edge_feature.shape:
return False
return (x.edge_list == y.edge_list).all() and (x.edge_weight == y.edge_weight).all() \
and (edge_feature == y_edge_feature).all()
else:
return x == y
if self.training:
return forward(self, *args, **kwargs)
func = inspect.signature(forward)
func = func.bind(self, *args, **kwargs)
func.apply_defaults()
arguments = func.arguments.copy()
arguments.pop(next(iter(arguments.keys())))
if hasattr(self, "_forward_cache"):
hit = True
message = []
for k, v in arguments.items():
if not equal(self._forward_cache[k], v):
hit = False
message.append("%s: miss" % k)
break
message.append("%s: hit" % k)
if debug:
print("[cache] %s" % ", ".join(message))
else:
hit = False
if debug:
print("[cache] cold start")
if hit:
return self._forward_cache["result"]
else:
self._forward_cache = {}
for k, v in arguments.items():
if isinstance(v, torch.Tensor) or isinstance(v, data.Graph):
v = v.detach()
self._forward_cache[k] = v
result = forward(self, *args, **kwargs)
self._forward_cache["result"] = result
return result
return wrapper(func)
|
#Copyright (C) 2013 by Ngan Nguyen
#
#Released under the MIT license, see LICENSE.txt
'''
Get the median models
Input a list of clones
Compute the probability of observing each clone
'''
import os
import sys
import numbers
import re
from math import log10, factorial
import cPickle as pickle
import gzip
import numpy as np
from scipy.stats import poisson
from jobTree.scriptTree.target import Target
from sonLib.bioio import system
import aimseqtk.lib.common as lcommon
import aimseqtk.src.recomb.recomb_common as rcommon
def read_clone_file(file):
clone2names = {}
f = open(file, 'r')
f.readline()
for line in f:
items = line.strip('\n').split('\t')
assert len(items) == 7
clone = items[0]
in_names = items[5].split(',')
out_names = items[6].split(',')
clone2names[clone] = (in_names, out_names)
f.close()
return clone2names
def get_ntclones(vaaj, sams, db_dir):
# get all nt sequences that result in v_aa_j
sam2ntclones = {}
items = vaaj.split('_')
assert len(items) == 3
v = items[0]
for sam in sams:
if not sam:
continue
ntclones = []
clonefile = os.path.join(db_dir, sam, v)
clones = pickle.load(gzip.open(clonefile, "rb"))
for clone in clones:
if clone.get_vseqj() == vaaj:
if clone.vdel is not None:
ntclones.append(clone)
sam2ntclones[sam] = ntclones
return sam2ntclones
def n_choose_k(n, k):
numerator = factorial(n)
denominator = (factorial(k) * factorial(n - k))
return numerator / denominator
def get_group_likelihood(log_p, allsams, presentsams, sam2total):
llhs = []
for sam in allsams:
logmu = log_p + log10(sam2total[sam])
llh = poisson.logsf(1, 10 ** logmu)
llhs.append(llh)
sum_llh = sum([10 ** l for l in llhs])
x = len(presentsams)
return poisson.logsf(x, sum_llh)
def aaclones_likelihood(clone2sams, model, db_dir, sam2total, group2sams,
outfile, ingroup, outgroup):
f = open(outfile, 'w')
f.write("sample\tnum_ntclones\tprob_observed\n")
for clone, (insams, outsams) in clone2sams.iteritems():
f.write("#%s\n" % clone)
events = []
event_llhs = []
for i, sams in enumerate([insams, outsams]):
if not sams:
continue
sam2ntclones = get_ntclones(clone, sams, db_dir)
f.write("#Group_%d\n" % (i + 1))
for sam, ntclones in sam2ntclones.iteritems():
total = sam2total[sam]
llhoods = []
for ntclone in ntclones:
clonellhood = rcommon.ntclone_likelihood(ntclone, model)
#prob_observed = clonellhood + log10(total)
logmu = log10(total) + clonellhood
prob_observed = poisson.logsf(1, 10 ** logmu) # prob. observing >=1 ntclone
llhoods.append(prob_observed)
if not rcommon.visited_event(events, ntclone):
events.append(ntclone)
event_llhs.append(clonellhood)
#if clonellhood != float(-inf):
# event_llhs.append(clonellhood)
llhoods_str = ",".join(["%f" % llh for llh in llhoods])
f.write("%s\t%d\t%s\n" % (sam, len(ntclones), llhoods_str))
# calc prob to observe the aa clones (sum of all nt events)
if sum([10**llh for llh in event_llhs]) > 0:
aa_llh = log10(sum([10**llh for llh in event_llhs]))
avr_total = (sam2total[ingroup] + sam2total[outgroup]) / 2
avr_logmu = aa_llh + log10(avr_total)
avr_aa_llh = poisson.logsf(1, 10 ** avr_logmu)
f.write("#Clone_log_likelihood: %f, %f\n" % (aa_llh, avr_aa_llh))
ingroup_llh = get_group_likelihood(aa_llh, group2sams[ingroup],
insams, sam2total)
outgroup_llh = get_group_likelihood(aa_llh, group2sams[outgroup],
outsams, sam2total)
f.write("#Ingrp vs Outgrp: %f vs %f\n#\n" % (ingroup_llh, outgroup_llh))
f.close()
def read_clonesize(file):
s2clones = {}
g2sams = {}
f = open(file, 'r')
f.readline()
sams = []
for line in f:
items = line.strip().split('\t')
sample = items[0]
clones = int(float(items[1]))
if re.search("Avr", sample):
group = sample.replace("_Avr", "")
g2sams[group] = sams
sams = []
s2clones[group] = clones
else:
s2clones[sample] = clones
sams.append(sample)
f.close()
return s2clones, g2sams
def main():
clone_file = sys.argv[1]
model_dir = sys.argv[2]
db_dir = sys.argv[3]
numclone_file = sys.argv[4]
outfile = sys.argv[5]
ingroup = sys.argv[6]
outgroup = sys.argv[7]
clone2sams = read_clone_file(clone_file)
model = rcommon.get_median_model(model_dir)
sam2total, group2sams = read_clonesize(numclone_file)
aaclones_likelihood(clone2sams, model, db_dir, sam2total, group2sams,
outfile, ingroup, outgroup)
if __name__ == '__main__':
main()
|
# REFERENCE (python requests): https://realpython.com/python-requests/
# REFERENCE (Hacker News API): https://github.com/HackerNews/API
print("Hacker News CLI by Topher Pedersen")
print("loading...")
from lxml import html
import requests
from urllib.request import urlopen
from bs4 import BeautifulSoup
story_content = []
story_title = []
story_link = []
def get_top_stories(first_story_index, last_story_index):
global story_content
global story_title
global story_count
global story_link
story_content = []
story_title = []
story_count = -1
story_link = []
top_stories = requests.get('https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty')
top_stories_str = top_stories.text
top_stories_json = top_stories.json()
for id in top_stories_json:
story_count = story_count + 1
if story_count < first_story_index:
continue
elif story_count > last_story_index:
break
url = 'https://hacker-news.firebaseio.com/v0/item/' + str(id) + '.json?print=pretty'
story = requests.get(url)
story_json = story.json()
story_title.append(story_json['title'])
try:
story_url = story_json['url']
story_link.append(story_url)
story_html = requests.get(story_url).content
story_content.append(story_html)
except:
story_content.append("nothing to see here, sorry")
print("loading...")
def show_titles():
global story_title
for title in story_title:
print(story_title)
def show_story(raw_html):
story_beautiful_soup = BeautifulSoup(raw_html, 'html.parser')
ptag = story_beautiful_soup.find_all('p')
for p in ptag:
paragraph_length = len(p.text)
line = [] # create array to hold individual lines of text
line.append("")
i = 0 # i keeps track of characters in paragraph
j = 0 # j keeps track of characters in individual lines
k = 0 # k keeps track of number of lines per paragraph
while i < paragraph_length:
line[k] = line[k] + p.text[i]
i = i + 1
if j < 80:
j = j + 1
else:
j = 0
k = k + 1
line.append("") # append new string for new line to list
for line in line:
print(line)
print("")
# print(p.text)
def select_story(selection):
global story_content
show_story(story_content[selection])
def show_top_stories():
i = 0
for title in story_title:
print(str(i) + ": " + title)
i = i + 1
keep_reading = True
while keep_reading == True:
page = input('Which page of stories would you like to view?: ')
page = int(page)
if page == 1:
get_top_stories(0, 9)
elif page == 2:
get_top_stories(10, 19)
elif page == 3:
get_top_stories(20, 29)
elif page == 4:
get_top_stories(30, 39)
elif page == 5:
get_top_stories(40, 49)
elif page == 6:
get_top_stories(50, 59)
elif page == 7:
get_top_stories(60, 69)
elif page == 8:
get_top_stories(70, 79)
elif page == 9:
get_top_stories(80, 89)
elif page == 10:
get_top_stories(90, 99)
show_top_stories()
story_selected = input("Select Story To Read: ")
story_selected = int(story_selected)
select_story(story_selected)
keep_reading = input('Keep reading? Enter y for yes, no for no:')
if keep_reading == "no" or keep_reading == "n":
keep_reading = False
else:
keep_reading = True
|
import copy
import datetime
import logging
from .config import config
from .fetch.federal_holidays import FederalHolidays
from .fetch.fixed_feasts import FixedFeasts
from .fetch.floating_feasts import FloatingFeasts
from .fetch.moveable_feasts import MoveableFeasts
from .season import YearIterator
from .static import StaticYear
from . import utils
from .valid_dates import valid_in_list
class Resolution:
"""Calculate the liturgical calendar for a full year"""
def __init__(self, session):
"""Constructor"""
self.session = session
self.logger = logging.getLogger(__name__)
def calculate_year(self, year):
"""Calculates the liturgical calendar and returns it as a StaticYear"""
# Start up Resolution for this year
self.logger.info('Starting resolution for ' + str(year))
resolution = ResolutionYear(year, self.session, self.logger)
# Set up the season framework
self.logger.info('Importing seasons...')
resolution.import_seasons()
self.logger.info('done')
# Add moveable feasts
self.logger.info('Adding moveable feasts...')
resolution.import_moveable_feasts()
self.logger.info('done')
# Add fixed feasts
self.logger.info('Adding fixed feasts...')
resolution.import_fixed_feasts()
self.logger.info('done')
# Add federal holidays
self.logger.info('Adding federal holidays...')
resolution.import_federal_holidays()
self.logger.info('done')
# Resolve
self.logger.info('Resolving...')
for cdate in sorted(resolution.full_year.iterkeys()):
resolution.full_year[cdate].resolve()
self.logger.info('done')
# Add floating feasts and re-resolve
self.logger.info('Adding floating feasts...')
resolution.import_floating_feasts()
for cdate in sorted(resolution.full_year.iterkeys()):
resolution.full_year[cdate].resolve()
self.logger.info('done')
# Freeze the current state
self.logger.info('Freezing current state...')
static = resolution.freeze()
self.logger.info('done')
return static
class ResolutionYear:
"""Describes a year in the resolution process"""
def __init__(self, year, session, logger):
"""Constructor"""
self.year = year
self.session = session
self.logger = logger
self.full_year = {}
self.extras = {}
def import_seasons(self):
"""Walk though the year and lay down our defaults according to the season on that date"""
self.season_ticker = YearIterator(self.session, self.year)
self.logger.debug('Fetched season iterator and began initial walk')
while self.season_ticker.day.year == self.year:
cdate = utils.day_to_lookup(self.season_ticker.day)
self.full_year[cdate] = ResolutionDay(self.season_ticker.day, self)
self.full_year[cdate].set_season(self.season_ticker.current(), self.season_ticker.sunday_count, self.season_ticker.is_last_week())
self.season_ticker.advance_by_day()
self.logger.debug('Completed initial walk')
# We also need one day of the next year, in case of vigil
alt_season_ticker = YearIterator(self.session, self.year + 1)
self.logger.debug('Fetched season iterator for day one of next year')
self.next_year_first = ResolutionDay(alt_season_ticker.day, self)
self.next_year_first.set_season(alt_season_ticker.current(), alt_season_ticker.sunday_count, alt_season_ticker.is_last_week())
def import_moveable_feasts(self):
"""Fetch moveable feasts and place them on the proper days"""
self.moveable = MoveableFeasts(self.session, self.year)
mf = self.moveable.feasts_by_date()
self.logger.debug('Found moveable feasts on {days} days'.format(days=len(mf)))
for info in mf:
cdate = utils.day_to_lookup(info['day'])
if cdate in self.full_year:
self.full_year[cdate].add_feast(ResolutionFeast(info['feasts'], info['day']))
else:
if cdate not in self.extras:
self.extras[cdate] = []
self.extras[cdate].append(ResolutionFeast(info['feasts'], info['day']))
self.logger.debug('Added moveable feasts')
def import_fixed_feasts(self):
"""Fetch fixed feasts and place them on the proper days"""
self.fixed = FixedFeasts(self.session, self.year)
ff = self.fixed.feasts_by_date()
self.logger.debug('Found fixed feasts on {days} days'.format(days=len(ff)))
for info in ff:
cdate = utils.day_to_lookup(info['day'])
if cdate in self.full_year:
self.full_year[cdate].add_feast(ResolutionFeast(info['feasts'], info['day']))
else:
if cdate not in self.extras:
self.extras[cdate] = []
self.extras[cdate].append(ResolutionFeast(info['feasts'], info['day']))
self.logger.debug('Added fixed feasts')
def import_floating_feasts(self):
"""Fetch floating feasts and place them on the proper days"""
self.floating = FloatingFeasts(self.session, self.year)
ff = self.floating.get_for_year(self.year, self.full_year)
self.logger.debug('Found floating feasts on {days} days'.format(days=len(ff)))
for info in ff:
cdate = utils.day_to_lookup(info['day'])
if cdate in self.full_year:
self.full_year[cdate].add_feast(ResolutionFeast(info['feasts'], info['day']))
else:
if cdate not in self.extras:
self.extras[cdate] = []
self.extras[cdate].append(ResolutionFeast(info['feasts'], info['day']))
self.logger.debug('Added floating feasts')
def import_federal_holidays(self):
"""Fetch federal holidays and place them on the proper days"""
self.federal = FederalHolidays(self.session, self.year)
fh = self.federal.holidays_by_date()
self.logger.debug('Found federal holidays on {days} days'.format(days=len(fh)))
for info in fh:
cdate = utils.day_to_lookup(info['day'])
if cdate in self.full_year:
self.full_year[cdate].add_holiday(ResolutionHoliday(info['holidays'], info['day']))
else:
if cdate not in self.extras:
self.extras[cdate] = []
self.extras[cdate].append(ResolutionHoliday(info['holidays'], info['day']))
self.logger.debug('Added federal holidays')
def freeze(self):
"""Freezes the current resolution and returns the static year"""
static_year = StaticYear(self.year, self.session)
overrides = []
for cdate in sorted(self.full_year.iterkeys()):
res_day = self.full_year[cdate]
if res_day.base_block is not None:
overrides.append(self._block_to_override(res_day.day, res_day.base_block, False))
if res_day.vigil_block is not None:
overrides.append(self._block_to_override(res_day.day, res_day.vigil_block, True))
static_year.override(overrides)
self.logger.debug('Froze year')
return static_year
def _block_to_override(self, day, block, is_vigil):
"""Converts a resolution block into a dictionary suitable for static overrides"""
override = {}
override['day'] = day
if is_vigil:
override['target_block'] = 'vigil'
else:
override['target_block'] = 'base'
override['color'] = block.color
override['name'] = block.name
override['note'] = block.note
override['services'] = []
for service in block.services:
override['services'].append({ 'name': service.name, 'start_time': service.start_time })
return override
def before(self, day):
"""Returns the day before the day given"""
target = copy.deepcopy(day)
target = target - datetime.timedelta(days=1)
return self.full_year[utils.day_to_lookup(target)]
def after(self, day):
"""Returns the day after the day given"""
target = copy.deepcopy(day)
target = target + datetime.timedelta(days=1)
cdate = utils.day_to_lookup(target)
if cdate in self.full_year:
return self.full_year[cdate]
class ResolutionDay:
"""Describes a day in the resolution process"""
def __init__(self, day, year):
"""Sets up a day within a year"""
self.day = copy.deepcopy(day)
self.year = year
self.season = None
self.current_feast = None
self.current_precedence = 100
self.has_vigil = False
self.feasts = []
self.holidays = []
self.base_block = None
self.vigil_block = None
self.logger = year.logger
def set_season(self, season, sunday_count, is_last_week):
"""Sets the season info for this day"""
self.season = season
self.sunday_count = sunday_count
self.is_last_week = is_last_week
def add_feast(self, feast):
"""Adds a feast"""
feast.change_day(self.day)
self.feasts.append(feast)
def add_holiday(self, holiday):
"""Adds a holiday"""
holiday.change_day(self.day)
self.holidays.append(holiday)
def resolve(self):
"""Look at today's season and any feasts, then set the main and vigil blocks accordingly"""
if self.season is None:
self.logger.warn('No season data for ' + utils.day_to_lookup(self.day))
return
pattern = self.season.pattern(self.day)
if pattern.has_vigil(self.day):
self._set_vigil_for_season()
else:
self._make_block_from_season()
self.current_precedence = self.season.precedence(self.day)
current_feast = None
for feast in self.feasts:
if feast.precedence() <= self.current_precedence:
self.current_precedence = feast.precedence()
current_feast = feast
elif feast.precedence() <= config['transfer_precedence']:
tomorrow = self.year.after(self.day)
tomorrow.add_feast(feast)
self.feasts.remove(feast)
self.logger.debug('Transferring ' + feast.name() + ' to ' + utils.day_to_lookup(tomorrow.day))
if current_feast is not None:
self.current_feast = current_feast
self._make_block_from_feast()
if self.current_feast.has_eve():
# Look back to yesterday and set the vigil
yesterday = self.year.before(self.day)
yesterday._set_vigil_for_feast(self.current_feast)
for holiday in self.holidays:
if holiday.open_time() is not None or holiday.close_time() is not None:
if self.base_block.check_open_hours(holiday.open_time(), holiday.close_time()):
self.base_block.set_open_hours(holiday.open_time(), holiday.close_time())
else:
self.base_block = None
if self.vigil_block is not None:
if self.vigil_block.check_open_hours(holiday.open_time(), holiday.close_time()):
self.vigil_block.set_open_hours(holiday.open_time(), holiday.close_time())
else:
self.vigil_block = None
if self.base_block is not None:
note_lines = []
if not holiday.skip_name():
note_lines.append(holiday.name())
if holiday.note() is not None:
note_lines.append(holiday.note())
if self.base_block.note is not None and len(note_lines) > 0:
self.base_block.note = "\n".join(note_lines) + "\n" + self.base_block.note
elif len(note_lines) > 0:
self.base_block.note = "\n".join(note_lines)
def _make_block_from_feast(self):
"""Use the current feast to set today's main block"""
if self.current_feast is None:
self.logger.warn('Attempting to make block from missing feast on ' + utils.day_to_lookup(self.day))
return
pattern = self.current_feast.pattern()
if pattern is None:
pattern = self.season.pattern(self.day)
if pattern is None:
self.logger.warn('Missing pattern for ' + self.current_feast.name() + ' on ' + utils.day_to_lookup(self.day))
schedule = pattern.schedule(self.day, has_vigil = self.has_vigil)
if schedule is None:
self.logger.warn('Missing schedule for ' + self.current_feast.name() + ' on ' + utils.day_to_lookup(self.day))
return
self.base_block = ResolutionBlock(
color = self.current_feast.color(),
name = self.current_feast.name(),
note = self.current_feast.note(),
schedule = schedule
)
def _make_block_from_season(self):
"""Use the current season to set today's main block"""
if self.season is None:
self.logger.warn('Attempting to make block from missing season on ' + utils.day_to_lookup(self.day))
return
pattern = self.season.pattern(self.day)
if pattern is None:
self.logger.warn('Missing season pattern on ' + utils.day_to_lookup(self.day))
schedule = pattern.schedule(self.day, has_vigil = self.has_vigil)
if schedule is None:
self.logger.warn('Missing season schedule on ' + utils.day_to_lookup(self.day))
return
self.base_block = ResolutionBlock(
color = self.season.color,
name = self.season.day_name(self.day, sunday_count = self.sunday_count, is_last = self.is_last_week),
note = self.season.day_note(self.day),
schedule = schedule
)
def _set_vigil_for_feast(self, feast):
"""Sets the vigil block on this day for a feast happening tomorrow"""
pattern = feast.eve_pattern()
if pattern is None:
pattern = self.season.pattern(self.day)
vigil_name = feast.eve_name()
if vigil_name is None:
vigil_name = 'Eve of ' + feast.name()
vigil_schedule = pattern.schedule(self.day, is_vigil = True)
if vigil_schedule is None:
self.logger.warn('Missing vigil schedule for ' + feast.name() + ' on ' + utils.day_to_lookup(self.day))
return
cp = self.season.precedence(self.day)
for f in self.feasts:
if cp > f.precedence():
cp = f.precedence()
if cp < feast.precedence():
return
self.has_vigil = True
self.vigil_block = ResolutionBlock(
color = feast.color(),
name = vigil_name,
schedule = vigil_schedule
)
if self.current_feast:
self._make_block_from_feast()
else:
self._make_block_from_season()
def _set_vigil_for_season(self):
"""Sets the vigil block on this day using tomorrow's season info"""
tomorrow = self.year.after(self.day)
if tomorrow is None:
self.logger.info('Setting vigil from the first day of next year')
tomorrow = self.year.next_year_first
self.has_vigil = True
tpattern = tomorrow.season.pattern(self.day)
self.vigil_block = ResolutionBlock(
color = tomorrow.season.color,
name = tomorrow.season.day_name(self.day, is_vigil = True, sunday_count = tomorrow.sunday_count, is_last = tomorrow.is_last_week),
schedule = tpattern.schedule(self.day, is_vigil = True)
)
if self.current_feast:
self._make_block_from_feast()
else:
self._make_block_from_season()
def __repr__(self):
"""Displays the day as a string"""
rep = self.day.strftime('%Y-%m-%d') + ' (' + utils.weekday(self.day) + '):'
rep += "\n\t" + str(self.base_block)
if self.vigil_block is not None:
rep += "\n\t" + str(self.vigil_block)
return rep
class ResolutionBlock:
"""Describes a service block on a particular day"""
def __init__(self, **kwargs):
"""Constructor"""
if 'color' in kwargs:
self.color = kwargs['color']
else:
self.color = None
if 'name' in kwargs:
self.name = kwargs['name']
else:
self.name = None
if 'note' in kwargs:
self.note = kwargs['note']
else:
self.note = None
if 'schedule' in kwargs:
self.schedule = kwargs['schedule']
self.services = self.schedule.sort_services()
else:
self.schedule = None
self.services = []
def check_open_hours(self, open_time, close_time):
"""Check the open hours against the service times to see if this block will have any services left"""
ok = []
if open_time is None:
open_time = datetime.time(0, 0, 0)
if close_time is None:
close_time = datetime.time(23, 59, 59)
for s in self.services:
n = s.start_time.replace(tzinfo=None)
if n > open_time and n < close_time:
ok.append(s)
return len(ok) > 0
def set_open_hours(self, open_time, close_time):
"""Slice off services outside the open hours"""
ok = []
if open_time is None:
open_time = datetime.time(0, 0, 0)
if close_time is None:
close_time = datetime.time(23, 59, 59)
for s in self.services:
n = s.start_time.replace(tzinfo=None)
if n > open_time and n < close_time:
ok.append(s)
self.services = ok
def __repr__(self):
"""Displays the block as a string"""
rep = '[' + str(self.color) + '] ' + str(self.name)
if self.note is not None:
lines = self.note.split("\n")
rep += "\n\t\t(" + "\n\t\t ".join(lines) + ')'
for service in self.services:
rep += "\n\t\t* " + str(service)
return rep
class ResolutionFeast:
"""Describes a feast we're putting on the calendar"""
def __init__(self, feasts, day):
"""Constructor"""
self.feasts = feasts
self.original_day = day
self.current_day = day
def _get_for_day(self, day):
"""Selects the feast model that's valid for a given day"""
return valid_in_list(self.feasts, day)
def change_day(self, day):
"""Changes the current day"""
self.current_day = day
def precedence(self):
"""Selects the precedence for this feast"""
f = self._get_for_day(self.current_day);
if f:
return f.otype.precedence
else:
return -1
def code(self):
"""Selects the code for this feast"""
f = self._get_for_day(self.current_day);
if f:
return f.code
else:
return ''
def color(self):
"""Selects the color for this feast"""
f = self._get_for_day(self.current_day);
if f:
return f.color
else:
return ''
def name(self):
"""Selects the name for this feast"""
f = self._get_for_day(self.current_day);
if f:
return f.name
else:
return ''
def note(self):
"""Selects the note for this feast"""
f = self._get_for_day(self.current_day);
if f:
return f.note
else:
return ''
def pattern(self):
"""Selects the pattern for this feast"""
f = self._get_for_day(self.current_day);
if f:
return f.pattern(self.current_day)
else:
return ''
def has_eve(self):
"""Selects the eve flag for this feast"""
f = self._get_for_day(self.current_day);
if f:
return f.has_eve
else:
return ''
def eve_name(self):
"""Selects the eve name for this feast"""
f = self._get_for_day(self.current_day);
if f:
return f.eve_name
else:
return ''
def eve_pattern(self):
"""Selects the eve pattern for this feast"""
f = self._get_for_day(self.current_day);
if f:
return f.eve_pattern(self.current_day)
else:
return ''
def __repr__(self):
"""Displays the feast as a string"""
rep = '[' + str(self.color()) + '] ' + str(self.name())
if self.note() is not None:
lines = self.note().split("\n")
rep += "\n\t\t(" + "\n\t\t ".join(lines) + ')'
if self.pattern():
rep += "\n\t\t* " + str(self.pattern().schedule(self.current_day))
return rep
class ResolutionHoliday:
"""Describes a holiday we're putting on the calendar"""
def __init__(self, holidays, day):
"""Constructor"""
self.holidays = holidays
self.original_day = day
self.current_day = day
def _get_for_day(self, day):
"""Selects the holiday model that's valid for a given day"""
return valid_in_list(self.holidays, day)
def change_day(self, day):
"""Changes the current day"""
self.current_day = day
def name(self):
"""Selects the name for this holiday"""
h = self._get_for_day(self.current_day);
if h:
return h.name
else:
return ''
def code(self):
"""Selects the code for this holiday"""
h = self._get_for_day(self.current_day);
if h:
return h.code
else:
return ''
def note(self):
"""Selects the note for this holiday"""
h = self._get_for_day(self.current_day);
if h:
return h.note
else:
return ''
def open_time(self):
"""Selects the church open time for this holiday"""
h = self._get_for_day(self.current_day);
if h:
return h.open_time
else:
return ''
def close_time(self):
"""Selects the church close time for this holiday"""
h = self._get_for_day(self.current_day);
if h:
return h.close_time
else:
return ''
def skip_name(self):
"""Selects the skip-name flag for this holiday"""
h = self._get_for_day(self.current_day);
if h:
return h.skip_name
else:
return False
def __repr__(self):
"""Displays the holiday as a string"""
rep = str(self.name()) + ' <' + str(self.code()) + '>: Church open ' + str(self.open_time()) + ' to ' + str(self.close_time())
if self.note() is not None:
lines = self.note().split("\n")
rep += "\n\t\t(" + "\n\t\t ".join(lines) + ')'
return rep
|
# import pandas, numpy, and matplotlib
import pandas as pd
from feature_engine.encoding import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import SelectKBest, f_regression,\
mutual_info_regression
pd.set_option('display.width', 75)
pd.set_option('display.max_columns', 20)
pd.set_option('display.max_rows', 100)
pd.options.display.float_format = '{:,.3f}'.format
# load the NLS data
nls97wages = pd.read_csv("data/nls97wages.csv")
feature_cols = ['satverbal','satmath','gpascience',
'gpaenglish','gpamath','gpaoverall','gender','motherhighgrade',
'fatherhighgrade','parentincome','completedba']
X_train, X_test, y_train, y_test = \
train_test_split(nls97wages[feature_cols],\
nls97wages[['wageincome']], test_size=0.3, random_state=0)
# encode and scale the data
ohe = OneHotEncoder(drop_last=True, variables=['gender'])
X_train_enc = ohe.fit_transform(X_train)
scaler = StandardScaler()
standcols = X_train_enc.iloc[:,:-1].columns
X_train_enc = \
pd.DataFrame(scaler.fit_transform(X_train_enc[standcols]),
columns=standcols, index=X_train_enc.index).\
join(X_train_enc[['gender_Male']])
y_train = \
pd.DataFrame(scaler.fit_transform(y_train),
columns=['wageincome'], index=y_train.index)
# select 5 best features for predicting wage income
ksel = SelectKBest(score_func=f_regression, k=5)
ksel.fit(X_train_enc, y_train.values.ravel())
selcols = X_train_enc.columns[ksel.get_support()]
selcols
pd.DataFrame({'score': ksel.scores_,
'feature': X_train_enc.columns},
columns=['feature','score']).\
sort_values(['score'], ascending=False)
# select the 5 best features using mutual information
ksel = SelectKBest(score_func=mutual_info_regression, k=5)
ksel.fit(X_train_enc, y_train.values.ravel())
selcols = X_train_enc.columns[ksel.get_support()]
selcols
pd.DataFrame({'score': ksel.scores_,
'feature': X_train_enc.columns},
columns=['feature','score']).\
sort_values(['score'], ascending=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.